| """The IWSLT Challenge Dataset, adapted to punctuation as described by Ueffing et al. (2013)""" |
|
|
| from enum import Enum |
| from typing import Union |
| from abc import abstractmethod |
| import logging |
| import itertools |
|
|
| |
| from xml.dom import minidom |
| import nltk |
| import datasets |
| import numpy as np |
|
|
| nltk.download("punkt") |
| tknzr = nltk.tokenize.TweetTokenizer() |
|
|
| _CITATION = """\ |
| @inproceedings{Ueffing2013, |
| title={Improved models for automatic punctuation prediction for spoken and written text}, |
| author={B. Ueffing and M. Bisani and P. Vozila}, |
| booktitle={INTERSPEECH}, |
| year={2013} |
| } |
| @article{Federico2011, |
| author = {M. Federico and L. Bentivogli and M. Paul and S. Stüker}, |
| year = {2011}, |
| month = {01}, |
| pages = {}, |
| title = {Overview of the IWSLT 2011 Evaluation Campaign}, |
| journal = {Proceedings of the International Workshop on Spoken Language Translation (IWSLT), San Francisco, CA} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| Both manual transcripts and ASR outputs from the IWSLT2011 speech translation evalutation campaign are often used for the related \ |
| punctuation annotation task. This dataset takes care of preprocessing said transcripts and automatically inserts punctuation marks \ |
| given in the manual transcripts in the ASR outputs using Levenshtein aligment. |
| """ |
|
|
| _VERSION = "0.0.1" |
|
|
| def window(a, w = 4, o = 2): |
| sh = (a.size - w + 1, w) |
| st = a.strides * 2 |
| view = np.lib.stride_tricks.as_strided(a, strides = st, shape = sh)[0::o] |
| return view.copy() |
|
|
| class Punctuation(Enum): |
| NONE = "<none>" |
| PERIOD = "<period>" |
| COMMA = "<comma>" |
| QUESTION = "<question>" |
|
|
| class LabelSubword(Enum): |
| IGNORE = "<ignore>" |
| NONE = "<none>" |
|
|
|
|
| class Task(Enum): |
| TAGGING = 0 |
| SEQ2SEQ = 1 |
|
|
| class TaggingTask: |
| """Treat punctuation prediction as a sequence tagging problem.""" |
| def __eq__(self, other): |
| return Task.TAGGING == other |
|
|
| class IWSLT11Config(datasets.BuilderConfig): |
| """The IWSLT11 Dataset.""" |
|
|
| def __init__( |
| self, |
| task = TaggingTask(), |
| segmented: bool = False, |
| asr_or_ref: str = "ref", |
| tokenizer = None, |
| label_subword = LabelSubword.IGNORE, |
| window_size = 120, |
| window_stride = 60, |
| **kwargs |
| ): |
| """BuilderConfig for IWSLT2011. |
| Args: |
| task: the task to prepare the dataset for. |
| segmented: if segmentation present in IWSLT2011 should be respected. removes segmenation by default. |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| self.task = task |
| self.window_size = window_size |
| self.window_stride = window_stride |
| self.segmented = segmented |
| self.asr_or_ref = asr_or_ref |
| self.punctuation = [ |
| Punctuation.NONE, |
| Punctuation.PERIOD, |
| Punctuation.COMMA, |
| Punctuation.QUESTION, |
| label_subword.IGNORE |
| ] |
| self.label_subword = label_subword |
| self.tokenizer = tokenizer |
| super(IWSLT11Config, self).__init__(**kwargs) |
|
|
| def __eq__(self, other): |
| return True |
|
|
|
|
| class IWSLT11(datasets.GeneratorBasedBuilder): |
| """The IWSLT11 Dataset, adapted for punctuation prediction.""" |
|
|
| BUILDER_CONFIGS = [ |
| IWSLT11Config(name="ref", asr_or_ref="ref"), |
| IWSLT11Config(name="asr", asr_or_ref="asr"), |
| ] |
|
|
| def __init__(self, *args, **kwargs): |
| if 'label_subword' in kwargs: |
| label_subword = kwargs['label_subword'] |
| if isinstance(label_subword, str): |
| if 'ignore' == label_subword.lower(): |
| label_subword = LabelSubword.IGNORE |
| elif 'none' == label_subword.lower(): |
| label_subword = LabelSubword.NONE |
| kwargs['label_subword'] = label_subword |
| super(IWSLT11, self).__init__(*args, **kwargs) |
|
|
| def _info(self): |
| if self.config.task == Task.TAGGING: |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "ids": datasets.Sequence(datasets.Value("int32")), |
| "tokens": datasets.Sequence(datasets.Value("int32")), |
| "labels": datasets.Sequence( |
| datasets.features.ClassLabel( |
| names=[p.name for p in self.config.punctuation] |
| ) |
| ), |
| } |
| ), |
| supervised_keys=None, |
| homepage="http://iwslt2011.org/doku.php", |
| citation=_CITATION, |
| version=_VERSION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
|
|
| urls_to_download = { |
| "train": "https://raw.githubusercontent.com/IsaacChanghau/neural_sequence_labeling/master/data/raw/LREC_converted/train.txt", |
| "valid": "https://github.com/IsaacChanghau/neural_sequence_labeling/blob/master/data/raw/LREC_converted/dev.txt?raw=true", |
| "test_ref": "https://github.com/IsaacChanghau/neural_sequence_labeling/raw/master/data/raw/LREC_converted/ref.txt", |
| "test_asr": "https://github.com/IsaacChanghau/neural_sequence_labeling/raw/master/data/raw/LREC_converted/asr.txt", |
| } |
| files = dl_manager.download_and_extract(urls_to_download) |
|
|
| if self.config.asr_or_ref == "asr": |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepath": files["train"] |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepath": files["valid"] |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepath": files["test_asr"] |
| }, |
| ), |
| ] |
| else: |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "filepath": files["train"] |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={ |
| "filepath": files["valid"] |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "filepath": files["test_ref"] |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepath): |
| logging.info("⏳ Generating examples from = %s", filepath) |
|
|
| text = open(filepath).read() |
| text = ( |
| text |
| .replace(',COMMA', ',') |
| .replace('.PERIOD', '.') |
| .replace('?QUESTIONMARK', '?') |
| ) |
| tokens = [] |
| labels = [] |
| for token in tknzr.tokenize(text): |
| if token in [',', '.', '?']: |
| if ',' in token: |
| labels[-1] = Punctuation.COMMA |
| if '.' in token: |
| labels[-1] = Punctuation.PERIOD |
| if '?' in token: |
| labels[-1] = Punctuation.QUESTION |
| else: |
| labels.append(Punctuation.NONE) |
| tokens.append(token) |
|
|
| tokens = np.array(tokens) |
| labels = np.array(labels) |
| token_len = len(tokens) |
| assert len(tokens) == len(labels) |
|
|
| if self.config.task == Task.TAGGING: |
| def apply_window(l): |
| return window( |
| l, |
| self.config.window_size, |
| self.config.window_stride |
| ) |
| ids = apply_window(np.arange(len(tokens))) |
| tokens = apply_window(tokens) |
| tokens = self.config.tokenizer( |
| [t.tolist() for t in tokens], |
| is_split_into_words=True, |
| return_offsets_mapping=True, |
| padding=True, |
| truncation=True, |
| max_length=int(self.config.window_size*2), |
| pad_to_multiple_of=int(self.config.window_size*2) |
| ) |
| labels = apply_window(labels) |
| for i, (ids, labels) in enumerate(zip(ids, labels)): |
| if self.config.tokenizer is None: |
| raise ValueError('tokenizer argument has to be passed to load_dataset') |
| else: |
| words = tokens[i].words |
| input_ids = tokens['input_ids'][i] |
| offsets = np.array(tokens['offset_mapping'][i]) |
| enc_labels = np.array([self.config.label_subword.name]*len(offsets), dtype=object) |
| count = 0 |
| for j, word_id in enumerate(words): |
| if word_id is not None and (j == 0 or words[j-1] != word_id): |
| enc_labels[j] = labels[count].name |
| count += 1 |
| elif input_ids[j] == self.config.tokenizer.pad_token_id: |
| enc_labels[j] = LabelSubword.IGNORE.name |
| labels = enc_labels |
| yield i, { |
| "ids": ids, |
| "tokens": input_ids, |
| "labels": labels, |
| } |
| logging.info(f"Loaded number of tokens = {token_len}") |