| |
| import argparse |
| import copy |
| import inspect |
| import json |
| import logging |
| import math |
| import os |
| import re |
| import sys |
| import traceback |
| from pathlib import Path |
|
|
| import sentencepiece as spm |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| from huggingface_hub import hf_hub_download |
| from lhotse import CutSet |
| from lhotse.dataset import DynamicBucketingSampler, K2SpeechRecognitionDataset |
| from lhotse.dataset.input_strategies import OnTheFlyFeatures |
| from lhotse.features import Fbank, FbankConfig |
| from torch.cuda.amp import GradScaler, autocast |
| from torch.utils.data import DataLoader |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s [%(levelname)s] %(message)s", |
| datefmt="%H:%M:%S", |
| ) |
| log = logging.getLogger("amharic_asr_pr787") |
|
|
| ICEFALL_DIR = os.environ.get( |
| "ICEFALL_DIR", "/teamspace/studios/this_studio/icefall-transfer" |
| ).strip() |
| STREAMING_DIR = os.path.join( |
| ICEFALL_DIR, "egs/librispeech/ASR/pruned_transducer_stateless7_streaming" |
| ) |
| if STREAMING_DIR not in sys.path: |
| sys.path.insert(0, STREAMING_DIR) |
|
|
| try: |
| import k2 |
| HAVE_K2 = True |
| except Exception as e: |
| HAVE_K2 = False |
| raise RuntimeError(f"k2 import failed: {e}") |
|
|
| try: |
| from zipformer import Zipformer |
| except Exception as e: |
| raise RuntimeError(f"Old recipe-local Zipformer import failed: {e}") |
|
|
| try: |
| from icefall.utils import AttributeDict |
| except Exception: |
| class AttributeDict(dict): |
| __getattr__ = dict.__getitem__ |
| __setattr__ = dict.__setitem__ |
|
|
| try: |
| from optim import ScaledAdam |
| HAVE_SCALED_ADAM = True |
| except Exception: |
| HAVE_SCALED_ADAM = False |
|
|
|
|
| def _split_int(s): |
| return [int(x.strip()) for x in str(s).split(",") if str(x).strip()] |
|
|
|
|
| def _make_pad_mask(lengths, max_len=None): |
| if max_len is None: |
| max_len = int(lengths.max().item()) |
| return torch.arange(max_len, device=lengths.device).unsqueeze(0) >= lengths.unsqueeze(1) |
|
|
|
|
| def get_params(): |
| p = AttributeDict() |
| p.require_zipformer = True |
| p.require_pretrained = True |
| p.encoder_only_transfer = True |
| p.allow_scratch_if_incompatible = False |
| p.min_pretrained_loaded_keys = 50 |
| p.min_pretrained_loaded_ratio = 0.05 |
| p.use_fp16 = True |
| p.num_workers = 2 |
| p.pin_memory = True |
| p.max_duration_train = 120 |
| p.max_duration_valid = 200 |
| p.num_buckets = 6 |
| p.accum_grad = 4 |
| p.grad_clip = 5.0 |
| p.feature_dim = 80 |
|
|
| p.zipformer_num_encoder_layers = "2,4,3,2,4" |
| p.zipformer_encoder_dims = "384,384,384,384,384" |
| p.zipformer_attention_dims = "192,192,192,192,192" |
| p.zipformer_encoder_unmasked_dims = "256,256,256,256,256" |
| p.zipformer_num_heads = "8,8,8,8,8" |
| p.zipformer_feedforward_dims = "1024,1024,2048,2048,1024" |
| p.zipformer_cnn_module_kernels = "31,31,31,31,31" |
| p.zipformer_downsampling_factors = "1,2,4,8,2" |
| p.zipformer_output_downsampling_factor = 2 |
| p.chunk_size = "32" |
| p.left_context_frames = "64" |
| p.short_chunk_size = 50 |
| p.num_left_chunks = 4 |
|
|
| p.encoder_dim = 256 |
| p.decoder_dim = 512 |
| p.joiner_dim = 512 |
| p.context_size = 2 |
| p.blank_id = 0 |
| p.vocab_size = 730 |
|
|
| p.ctc_loss_scale = 0.3 |
| p.simple_loss_scale = 0.5 |
| p.pruned_loss_scale = 1.0 |
| p.prune_range = 5 |
| p.lm_only_scale = 0.25 |
| p.am_only_scale = 0.0 |
|
|
| p.base_lr = 0.03 |
| p.lr_warmup_steps = 3000 |
| p.weight_decay = 6.1e-5 |
| p.freeze_encoder_epochs = 3 |
| p.encoder_lr_factor = 0.1 |
|
|
| p.num_epochs = 40 |
| p.log_interval = 100 |
| p.valid_interval = 3000 |
| p.save_every_n = 3000 |
| return p |
|
|
|
|
| class ZipformerInputEmbed(nn.Module): |
| def __init__(self, num_features, out_dim): |
| super().__init__() |
| self.conv = nn.Sequential( |
| nn.Conv2d(1, 8, 3, stride=2, padding=1), |
| nn.ReLU(), |
| nn.Conv2d(8, 32, 3, stride=2, padding=1), |
| nn.ReLU(), |
| ) |
| self.proj = nn.LazyLinear(out_dim) |
| self.norm = nn.LayerNorm(out_dim) |
|
|
| def forward(self, x, x_lens): |
| x = self.conv(x.unsqueeze(1)) |
| b, c, t, f = x.shape |
| x = x.permute(0, 2, 1, 3).reshape(b, t, c * f) |
| x = self.norm(self.proj(x)) |
| out_lens = x_lens |
| for _ in range(2): |
| out_lens = (out_lens + 1) // 2 |
| return x, out_lens.clamp(min=1) |
|
|
|
|
| class StatelessDecoder(nn.Module): |
| def __init__(self, vocab_size, decoder_dim, context_size=2, blank_id=0): |
| super().__init__() |
| self.embedding = nn.Embedding(vocab_size, decoder_dim, padding_idx=blank_id) |
| self.context_size = context_size |
| if context_size > 1: |
| self.conv = nn.Conv1d( |
| decoder_dim, |
| decoder_dim, |
| kernel_size=context_size, |
| groups=decoder_dim, |
| bias=False, |
| ) |
| self.output_proj = nn.Linear(decoder_dim, decoder_dim, bias=False) |
|
|
| def forward(self, y, need_pad=True): |
| emb = self.embedding(y) |
| if self.context_size > 1: |
| emb = emb.permute(0, 2, 1) |
| if need_pad: |
| emb = F.pad(emb, (self.context_size - 1, 0)) |
| emb = self.conv(emb) |
| emb = emb.permute(0, 2, 1) |
| return self.output_proj(emb) |
|
|
|
|
| class Joiner(nn.Module): |
| def __init__(self, encoder_dim, decoder_dim, joiner_dim, vocab_size): |
| super().__init__() |
| self.encoder_proj = nn.Linear(encoder_dim, joiner_dim) |
| self.decoder_proj = nn.Linear(decoder_dim, joiner_dim) |
| self.output_linear = nn.Linear(joiner_dim, vocab_size) |
|
|
| def forward(self, encoder_out, decoder_out, project_input=True): |
| if project_input: |
| x = self.encoder_proj(encoder_out) + self.decoder_proj(decoder_out) |
| else: |
| x = encoder_out + decoder_out |
| return self.output_linear(torch.tanh(x)) |
|
|
|
|
| class TokenEncoder: |
| def __init__(self, tokens_path, bpe_model_path=None): |
| self.token2id = {} |
| self.id2token = {} |
| with open(tokens_path, "r", encoding="utf-8") as f: |
| for line in f: |
| parts = line.strip().split() |
| if len(parts) >= 2: |
| tok, tid = parts[0], int(parts[1]) |
| self.token2id[tok] = tid |
| self.id2token[tid] = tok |
| self.blank_id = self.token2id.get("<blk>", 0) |
| self.vocab_size = len(self.token2id) |
| self.sp = None |
| if bpe_model_path and os.path.exists(bpe_model_path): |
| self.sp = spm.SentencePieceProcessor() |
| self.sp.load(bpe_model_path) |
|
|
| def encode_text(self, text): |
| toks = text.strip().split() |
| ids = [self.token2id[t] for t in toks if t in self.token2id] |
| if ids: |
| return ids |
| if self.sp is not None: |
| raw = text.replace("\u2581", " ").strip() |
| pieces = self.sp.encode(raw, out_type=str) |
| return [self.token2id[p] for p in pieces if p in self.token2id] |
| return [] |
|
|
|
|
| def _pick_best_state_dict(ckpt): |
| candidates = [] |
| if isinstance(ckpt, dict): |
| for k in ("model", "model_avg", "state_dict", "model_state_dict", "avg_model", "ema_model"): |
| if isinstance(ckpt.get(k), dict): |
| candidates.append((k, ckpt[k])) |
| candidates.append(("root", ckpt)) |
| best_name, best_sd, best_n = "none", {}, -1 |
| for name, sd in candidates: |
| n = sum(1 for v in sd.values() if torch.is_tensor(v)) |
| if n > best_n: |
| best_name, best_sd, best_n = name, sd, n |
| return best_sd, best_name, max(best_n, 0) |
|
|
|
|
| def _read_profile(checkpoint_path): |
| profile_path = os.path.join(os.path.dirname(checkpoint_path), "pretrained_profile.txt") |
| if os.path.exists(profile_path): |
| return Path(profile_path).read_text(encoding="utf-8").strip() |
| return "" |
|
|
|
|
| def download_pretrained(save_dir, force=False): |
| path = os.path.join(save_dir, "pretrained.pt") |
| profile_path = os.path.join(save_dir, "pretrained_profile.txt") |
| if force and os.path.exists(path): |
| os.remove(path) |
| if os.path.exists(path): |
| return path |
|
|
| os.makedirs(save_dir, exist_ok=True) |
| for item in [ |
| { |
| "repo_id": "Zengwei/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29", |
| "filename": "exp/pretrained.pt", |
| "profile": "2022-12-29", |
| }, |
| { |
| "repo_id": "csukuangfj/icefall-asr-librispeech-pruned-transducer-stateless7-streaming-2022-12-29", |
| "filename": "exp/pretrained.pt", |
| "profile": "2022-12-29", |
| }, |
| ]: |
| try: |
| log.info("Downloading pretrained from %s", item["repo_id"]) |
| downloaded = hf_hub_download( |
| repo_id=item["repo_id"], |
| filename=item["filename"], |
| local_dir=save_dir, |
| local_dir_use_symlinks=False, |
| ) |
| if os.path.exists(path): |
| os.remove(path) |
| os.replace(downloaded, path) |
| Path(profile_path).write_text(item["profile"], encoding="utf-8") |
| return path |
| except Exception as e: |
| log.warning("Download failed for %s: %s", item["repo_id"], e) |
| return None |
|
|
|
|
| def build_old_zipformer_kwargs(sig, params): |
| enc_dims = _split_int(params.zipformer_encoder_dims) |
| att_dims = _split_int(params.zipformer_attention_dims) |
| unmasked = _split_int(params.zipformer_encoder_unmasked_dims) |
| n_layers = _split_int(params.zipformer_num_encoder_layers) |
| n_heads = _split_int(params.zipformer_num_heads) |
| ff_dims = _split_int(params.zipformer_feedforward_dims) |
| cnn_k = _split_int(params.zipformer_cnn_module_kernels) |
| ds = _split_int(params.zipformer_downsampling_factors) |
|
|
| mapping = { |
| "num_features": params.feature_dim, |
| "output_downsampling_factor": params.zipformer_output_downsampling_factor, |
| "downsampling_factor": tuple(ds), |
| "zipformer_downsampling_factors": tuple(ds), |
| "num_encoder_layers": tuple(n_layers), |
| "encoder_dim": tuple(enc_dims), |
| "encoder_dims": tuple(enc_dims), |
| "attention_dim": tuple(att_dims), |
| "attention_dims": tuple(att_dims), |
| "encoder_unmasked_dim": tuple(unmasked), |
| "encoder_unmasked_dims": tuple(unmasked), |
| "num_heads": tuple(n_heads), |
| "nhead": tuple(n_heads), |
| "feedforward_dim": tuple(ff_dims), |
| "feedforward_dims": tuple(ff_dims), |
| "cnn_module_kernel": tuple(cnn_k), |
| "cnn_module_kernels": tuple(cnn_k), |
| "query_head_dim": 32, |
| "value_head_dim": 12, |
| "pos_head_dim": 4, |
| "pos_dim": 48, |
| "causal": True, |
| "chunk_size": (_split_int(params.chunk_size)[0],), |
| "left_context_frames": (_split_int(params.left_context_frames)[0],), |
| "short_chunk_size": params.short_chunk_size, |
| "num_left_chunks": params.num_left_chunks, |
| "decode_chunk_len": _split_int(params.chunk_size)[0], |
| } |
|
|
| kw = {} |
| for name in sig: |
| if name in ("self", "args", "kwargs"): |
| continue |
| if name in mapping: |
| kw[name] = mapping[name] |
| return kw |
|
|
|
|
| class ASRModel(nn.Module): |
| def __init__(self, params): |
| super().__init__() |
| self.params = params |
| self.blank_id = params.blank_id |
| self.vocab_size = params.vocab_size |
|
|
| sig = inspect.signature(Zipformer.__init__).parameters |
| kw = build_old_zipformer_kwargs(sig, params) |
| log.info("Old Zipformer ctor config: keys=%s", sorted(kw.keys())) |
| self.encoder = Zipformer(**kw) |
| self.encoder_embed = None |
| self._encoder_accepts_mask = "src_key_padding_mask" in inspect.signature(self.encoder.forward).parameters |
| self._encoder_mask_layout = None |
| self._encoder_input_layout = "BT" |
|
|
| with torch.no_grad(): |
| x = torch.randn(2, 100, params.feature_dim) |
| xl = torch.tensor([100, 80], dtype=torch.long) |
| out, _ = self._call_zipformer_with_mask(x, xl) |
| if out.shape[0] != 2: |
| raise RuntimeError(f"Zipformer probe failed, batch={out.shape[0]}") |
| self.encoder_output_dim = out.shape[-1] |
|
|
| self.encoder_proj = nn.Linear(self.encoder_output_dim, params.encoder_dim) |
| self.ctc_output = nn.Linear(params.encoder_dim, self.vocab_size) |
| self.ctc_loss_fn = nn.CTCLoss(blank=self.blank_id, reduction="sum", zero_infinity=True) |
|
|
| self.decoder = StatelessDecoder( |
| vocab_size=self.vocab_size, |
| decoder_dim=params.decoder_dim, |
| context_size=params.context_size, |
| blank_id=self.blank_id, |
| ) |
| self.joiner = Joiner(params.encoder_dim, params.decoder_dim, params.joiner_dim, self.vocab_size) |
| self.simple_am_proj = nn.Linear(params.encoder_dim, self.vocab_size) |
| self.simple_lm_proj = nn.Linear(params.decoder_dim, self.vocab_size) |
| self.encoder_type = "zipformer_streaming" |
|
|
| def _call_zipformer_with_mask(self, x, x_lens): |
| mask_bt = _make_pad_mask(x_lens, x.size(1)) |
| last_err = None |
| for layout in ["BT", "TBC"]: |
| self._encoder_input_layout = layout |
| x_in = x if layout == "BT" else x.transpose(0, 1) |
| if self._encoder_accepts_mask: |
| for mask_layout in ["BT", "TB"]: |
| mask = mask_bt if mask_layout == "BT" else mask_bt.transpose(0, 1) |
| try: |
| out, out_lens = self.encoder(x_in, x_lens, src_key_padding_mask=mask) |
| self._encoder_mask_layout = mask_layout |
| if layout == "TBC": |
| out = out.transpose(0, 1) |
| return out, out_lens |
| except Exception as e: |
| last_err = e |
| try: |
| out, out_lens = self.encoder(x_in, x_lens) |
| if layout == "TBC": |
| out = out.transpose(0, 1) |
| return out, out_lens |
| except Exception as e: |
| last_err = e |
| raise last_err |
|
|
| def _encode(self, x, x_lens): |
| if self.encoder_embed is not None: |
| x, x_lens = self.encoder_embed(x, x_lens) |
| enc_out, enc_lens = self._call_zipformer_with_mask(x, x_lens) |
| enc_out = self.encoder_proj(enc_out) |
| return enc_out, enc_lens |
|
|
| def forward(self, x, x_lens, y, y_lens): |
| enc_out, enc_lens = self._encode(x, x_lens) |
| b = enc_out.size(0) |
|
|
| ctc_logits = self.ctc_output(enc_out) |
| ctc_log_probs = F.log_softmax(ctc_logits, dim=-1) |
| ctc_loss = self.ctc_loss_fn( |
| ctc_log_probs.permute(1, 0, 2), |
| y, |
| enc_lens.clamp(max=enc_out.size(1)), |
| y_lens, |
| ) / b |
|
|
| sos = torch.full((b, 1), self.blank_id, device=y.device, dtype=y.dtype) |
| sos_y = torch.cat([sos, y], dim=1) |
| dec_out = self.decoder(sos_y) |
|
|
| simple_am = self.simple_am_proj(enc_out) |
| simple_lm = self.simple_lm_proj(dec_out) |
|
|
| boundary = torch.zeros((b, 4), dtype=torch.int64, device=y.device) |
| boundary[:, 2] = y_lens |
| boundary[:, 3] = enc_lens |
|
|
| with autocast(enabled=False): |
| simple_loss, (px_grad, py_grad) = k2.rnnt_loss_smoothed( |
| lm=simple_lm.float(), |
| am=simple_am.float(), |
| symbols=y, |
| termination_symbol=self.blank_id, |
| lm_only_scale=self.params.lm_only_scale, |
| am_only_scale=self.params.am_only_scale, |
| boundary=boundary, |
| reduction="sum", |
| return_grad=True, |
| ) |
|
|
| ranges = k2.get_rnnt_prune_ranges( |
| px_grad=px_grad, |
| py_grad=py_grad, |
| boundary=boundary, |
| s_range=self.params.prune_range, |
| ) |
| am_pruned, lm_pruned = k2.do_rnnt_pruning( |
| am=self.joiner.encoder_proj(enc_out), |
| lm=self.joiner.decoder_proj(dec_out), |
| ranges=ranges, |
| ) |
| logits = self.joiner(am_pruned, lm_pruned, project_input=False) |
|
|
| with autocast(enabled=False): |
| pruned_loss = k2.rnnt_loss_pruned( |
| logits=logits.float(), |
| symbols=y, |
| ranges=ranges, |
| termination_symbol=self.blank_id, |
| boundary=boundary, |
| reduction="sum", |
| ) |
|
|
| loss = ( |
| self.params.ctc_loss_scale * ctc_loss |
| + self.params.simple_loss_scale * (simple_loss / b) |
| + self.params.pruned_loss_scale * (pruned_loss / b) |
| ) |
| return loss |
|
|
|
|
| def load_pretrained(model, path, params): |
| if not path or not os.path.exists(path): |
| raise RuntimeError("No pretrained checkpoint found") |
| ckpt = torch.load(path, map_location="cpu") |
| pre, source, count = _pick_best_state_dict(ckpt) |
| log.info("Pretrained state-dict source: %s (tensor_keys=%d)", source, count) |
|
|
| cur = model.state_dict() |
| used = set() |
| loaded = 0 |
| skipped = 0 |
| shape_mm = 0 |
|
|
| for name, param in pre.items(): |
| if not (name.startswith("encoder.") or name.startswith("encoder_embed.")): |
| skipped += 1 |
| continue |
| if name in cur and cur[name].shape == param.shape: |
| cur[name] = param |
| used.add(name) |
| loaded += 1 |
| elif name in cur: |
| shape_mm += 1 |
| else: |
| skipped += 1 |
|
|
| model.load_state_dict(cur, strict=False) |
| encoder_model_keys = sum(1 for k in cur if k.startswith("encoder.") or k.startswith("encoder_embed.")) |
| encoder_loaded = sum(1 for k in used if k.startswith("encoder.") or k.startswith("encoder_embed.")) |
| ratio = encoder_loaded / max(1, encoder_model_keys) |
|
|
| log.info( |
| "Transfer: loaded=%d skipped=%d shape_mismatch=%d total_model_keys=%d loaded_ratio=%.3f encoder_loaded=%d encoder_model_keys=%d encoder_loaded_ratio=%.3f", |
| loaded, |
| skipped, |
| shape_mm, |
| len(cur), |
| loaded / max(1, len(cur)), |
| encoder_loaded, |
| encoder_model_keys, |
| ratio, |
| ) |
|
|
| if params.require_pretrained: |
| if encoder_loaded < params.min_pretrained_loaded_keys or ratio < params.min_pretrained_loaded_ratio: |
| raise RuntimeError( |
| f"Pretrained transfer too low: encoder_loaded={encoder_loaded} ratio={ratio:.3f}" |
| ) |
|
|
|
|
| def process_batch(batch, tok, device): |
| features = batch["inputs"].to(device) |
| feat_lens = batch["supervisions"]["num_frames"].to(device).long() |
| texts = batch["supervisions"]["text"] |
|
|
| targets = [] |
| tgt_lens = [] |
| keep = [] |
| for i, text in enumerate(texts): |
| ids = tok.encode_text(text) |
| if ids: |
| targets.append(torch.tensor(ids, dtype=torch.long)) |
| tgt_lens.append(len(ids)) |
| keep.append(i) |
| if not targets: |
| return None |
|
|
| max_len = max(tgt_lens) |
| y = torch.zeros(len(targets), max_len, dtype=torch.long) |
| for i, t in enumerate(targets): |
| y[i, : len(t)] = t |
|
|
| return { |
| "features": features[keep], |
| "feat_lens": feat_lens[keep], |
| "targets": y.to(device), |
| "tgt_lens": torch.tensor(tgt_lens, dtype=torch.long, device=device), |
| } |
|
|
|
|
| def build_dataloader(cuts_path, params, is_training=False, musan_noise="", musan_music="", musan_speech="", rir_path=""): |
| cuts = CutSet.from_file(cuts_path).to_eager() |
| log.info("Loaded %d cuts from %s", len(cuts), Path(cuts_path).name) |
|
|
| if is_training: |
| log.info("==== TRAIN AUGMENTATION CHECK ====") |
| log.info("speed_perturb=%s", params.speed_perturb) |
| log.info( |
| "volume_perturb=%s range=[%.2f, %.2f]", |
| params.volume_perturb, |
| params.volume_low, |
| params.volume_high, |
| ) |
| log.info("musan_noise=%s", musan_noise) |
| log.info("musan_music=%s", musan_music) |
| log.info("musan_speech=%s", musan_speech) |
| log.info("rir_path=%s", rir_path) |
| log.info( |
| "path_exists noise=%s music=%s speech=%s rir=%s", |
| os.path.exists(musan_noise) if musan_noise else False, |
| os.path.exists(musan_music) if musan_music else False, |
| os.path.exists(musan_speech) if musan_speech else False, |
| os.path.exists(rir_path) if rir_path else False, |
| ) |
|
|
| if params.speed_perturb: |
| before = len(cuts) |
| cuts = cuts + cuts.perturb_speed(0.9) + cuts.perturb_speed(1.1) |
| log.info("Applied speed perturbation: cuts %d -> %d", before, len(cuts)) |
| else: |
| log.info("Speed perturbation not applied") |
|
|
| if params.volume_perturb: |
| import numpy as np |
| factors = np.random.uniform(params.volume_low, params.volume_high, len(cuts)) |
| cuts = CutSet.from_cuts([c.perturb_volume(float(f)) for c, f in zip(cuts, factors)]) |
| log.info("Applied volume perturbation to %d cuts", len(cuts)) |
| else: |
| log.info("Volume perturbation not applied") |
|
|
| if rir_path and os.path.exists(rir_path): |
| try: |
| rir_cuts = CutSet.from_file(rir_path) |
| cuts = cuts.reverb_rir(rir_cuts, p=params.rir_prob) |
| log.info("Applied RIR augmentation with p=%.2f", params.rir_prob) |
| except Exception as e: |
| log.warning("RIR failed: %s", e) |
| else: |
| log.info("RIR not applied") |
|
|
| for name, path, snr, prob in [ |
| ("noise", musan_noise, params.musan_noise_snr, params.musan_noise_prob), |
| ("music", musan_music, params.musan_music_snr, params.musan_music_prob), |
| ("speech", musan_speech, params.musan_speech_snr, params.musan_speech_prob), |
| ]: |
| if path and os.path.exists(path): |
| try: |
| m = CutSet.from_file(path) |
| try: |
| cuts = cuts.mix(cuts=m, snr=snr, mix_prob=prob) |
| except TypeError: |
| cuts = cuts.mix(cuts=m, snr=snr, prob=prob) |
| log.info("Applied MUSAN %s with snr=%s prob=%.2f", name, snr, prob) |
| except Exception as e: |
| log.warning("MUSAN %s failed: %s", name, e) |
| else: |
| log.info("MUSAN %s not applied", name) |
|
|
| log.info("==== END TRAIN AUGMENTATION CHECK ====") |
|
|
| fbank = OnTheFlyFeatures(Fbank(FbankConfig(num_mel_bins=80))) |
| max_dur = params.max_duration_train if is_training else params.max_duration_valid |
| if is_training: |
| sampler = DynamicBucketingSampler( |
| cuts, |
| max_duration=max_dur, |
| shuffle=True, |
| num_buckets=params.num_buckets, |
| drop_last=True, |
| ) |
| else: |
| sampler = SimpleCutSampler(cuts, max_duration=max_dur, shuffle=False) |
|
|
| dataset = K2SpeechRecognitionDataset(input_strategy=fbank, return_cuts=True) |
| return DataLoader( |
| dataset, |
| sampler=sampler, |
| batch_size=None, |
| num_workers=params.num_workers, |
| pin_memory=params.pin_memory, |
| persistent_workers=params.num_workers > 0, |
| ) |
|
|
| def make_optimizer(model, params): |
| enc = [p for n, p in model.named_parameters() if "encoder" in n and p.requires_grad] |
| head = [p for n, p in model.named_parameters() if "encoder" not in n and p.requires_grad] |
| groups = [] |
| if enc: |
| groups.append({"params": enc, "lr": params.base_lr * params.encoder_lr_factor}) |
| if head: |
| groups.append({"params": head, "lr": params.base_lr}) |
| return torch.optim.AdamW( |
| groups, |
| lr=params.base_lr, |
| betas=(0.9, 0.98), |
| weight_decay=params.weight_decay, |
| ) |
|
|
|
|
| def make_scheduler(optimizer, params): |
| def fn(step): |
| w = params.lr_warmup_steps |
| if step < w: |
| return step / max(1, w) |
| p = (step - w) / max(1, 100000 - w) |
| return max(0.01, 0.5 * (1 + math.cos(math.pi * min(1.0, p)))) |
| return torch.optim.lr_scheduler.LambdaLR(optimizer, fn) |
|
|
|
|
| def get_notebook_default_args(): |
| return AttributeDict( |
| tokens="/teamspace/studios/this_studio/amharic_asr/tokens.txt", |
| tokens_sha256="", |
| train_cuts="/teamspace/studios/this_studio/amharic_asr/amharic_data/cuts_train.jsonl.gz", |
| valid_cuts="/teamspace/studios/this_studio/amharic_asr/amharic_data/cuts_valid.jsonl.gz", |
| initial_model="", |
| exp_dir="./exp_amharic_pr787", |
| resume=False, |
| require_zipformer=True, |
| require_pretrained=True, |
| encoder_only_transfer=True, |
| allow_scratch_if_incompatible=False, |
| min_pretrained_loaded_keys=50, |
| min_pretrained_loaded_ratio=0.05, |
| force_redownload_pretrained=True, |
| musan_noise="/teamspace/studios/this_studio/amharic_asr/musan_noise_cuts.jsonl.gz", |
| musan_music="/teamspace/studios/this_studio/amharic_asr/musan_music_cuts.jsonl.gz", |
| musan_speech="/teamspace/studios/this_studio/amharic_asr/musan_speech_cuts.jsonl.gz", |
| rir_path="/teamspace/studios/this_studio/amharic_asr/rir_cuts.jsonl.gz", |
| ) |
|
|
|
|
| def train(args): |
| params = get_params() |
| params.exp_dir = args.exp_dir |
| os.makedirs(args.exp_dir, exist_ok=True) |
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| log.info("============================================================") |
| log.info("Dependency check:") |
| log.info(" Icefall dir: %s", ICEFALL_DIR) |
| log.info(" k2: %s", "YES" if HAVE_K2 else "NO") |
| log.info(" icefall: YES") |
| log.info(" Zipformer: YES") |
| log.info(" ScaledAdam: %s", "YES" if HAVE_SCALED_ADAM else "NO") |
| log.info(" Mode: pruned_transducer_stateless7_streaming") |
| log.info("============================================================") |
| log.info("Arguments: %s", dict(args)) |
|
|
| bpe_model = os.path.join(os.path.dirname(args.tokens), "bpe_500", "bpe.model") |
| tok = TokenEncoder(args.tokens, bpe_model) |
| params.vocab_size = tok.vocab_size |
|
|
| pre_path = args.initial_model or download_pretrained( |
| os.path.join(args.exp_dir, "pretrained"), |
| force=bool(args.force_redownload_pretrained), |
| ) |
| if not pre_path: |
| raise RuntimeError("Could not download pretrained checkpoint") |
| log.info("Applied built-in 2022 stateless7-streaming compatibility profile.") |
| log.info("Checkpoint profile: %s", _read_profile(pre_path)) |
|
|
| model = ASRModel(params).to(device) |
| load_pretrained(model, pre_path, params) |
|
|
| if params.freeze_encoder_epochs > 0: |
| for p in model.encoder.parameters(): |
| p.requires_grad = False |
|
|
| optimizer = make_optimizer(model, params) |
| scheduler = make_scheduler(optimizer, params) |
| scaler = GradScaler(enabled=params.use_fp16) |
|
|
| train_dl = build_dataloader(args.train_cuts, params, is_training=True) |
|
|
| global_step = 0 |
| for epoch in range(params.num_epochs): |
| if epoch == params.freeze_encoder_epochs: |
| for p in model.encoder.parameters(): |
| p.requires_grad = True |
| optimizer = make_optimizer(model, params) |
| scheduler = make_scheduler(optimizer, params) |
|
|
| model.train() |
| for batch in train_dl: |
| processed = process_batch(batch, tok, device) |
| if processed is None: |
| continue |
| with autocast(enabled=params.use_fp16): |
| loss = model( |
| processed["features"], |
| processed["feat_lens"], |
| processed["targets"], |
| processed["tgt_lens"], |
| ) |
| loss = loss / params.accum_grad |
| scaler.scale(loss).backward() |
|
|
| if (global_step + 1) % params.accum_grad == 0: |
| scaler.unscale_(optimizer) |
| torch.nn.utils.clip_grad_norm_(model.parameters(), params.grad_clip) |
| scaler.step(optimizer) |
| scaler.update() |
| optimizer.zero_grad(set_to_none=True) |
| scheduler.step() |
|
|
| if global_step % params.log_interval == 0: |
| log.info("epoch=%d step=%d loss=%.4f", epoch, global_step, float(loss.item()) * params.accum_grad) |
|
|
| if global_step > 0 and global_step % params.save_every_n == 0: |
| ckpt = { |
| "model": model.state_dict(), |
| "optimizer": optimizer.state_dict(), |
| "scheduler": scheduler.state_dict(), |
| "scaler": scaler.state_dict(), |
| "epoch": epoch, |
| "global_step": global_step, |
| "encoder_type": model.encoder_type, |
| } |
| torch.save(ckpt, os.path.join(args.exp_dir, f"checkpoint-{global_step}.pt")) |
|
|
| global_step += 1 |
|
|
|
|
| def main(): |
| if "ipykernel" in sys.modules: |
| args = get_notebook_default_args() |
| else: |
| parser = argparse.ArgumentParser() |
| parser.add_argument("--tokens", required=True) |
| parser.add_argument("--tokens-sha256", default="") |
| parser.add_argument("--train-cuts", required=True) |
| parser.add_argument("--valid-cuts", required=True) |
| parser.add_argument("--initial-model", default="") |
| parser.add_argument("--exp-dir", default="./exp_amharic_pr787") |
| parser.add_argument("--resume", action="store_true") |
| parser.add_argument("--force-redownload-pretrained", action="store_true") |
| args = parser.parse_args() |
| args.require_zipformer = True |
| args.require_pretrained = True |
| args.encoder_only_transfer = True |
| args.allow_scratch_if_incompatible = False |
| args.min_pretrained_loaded_keys = 50 |
| args.min_pretrained_loaded_ratio = 0.05 |
| train(args) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|