| |
| import os |
| import torch |
| import numpy as np |
| import pickle as pkl |
| from tqdm import tqdm |
| import time |
| from datetime import timedelta |
|
|
|
|
| MAX_VOCAB_SIZE = 10000 |
| UNK, PAD = '<UNK>', '<PAD>' |
|
|
|
|
| def build_vocab(file_path, tokenizer, max_size, min_freq): |
| """构建词汇表""" |
| vocab_dic = {} |
| with open(file_path, 'r', encoding='UTF-8') as f: |
| for line in tqdm(f): |
| lin = line.strip() |
| if not lin: |
| continue |
| content = lin.split('\t')[0] |
| for word in tokenizer(content): |
| vocab_dic[word] = vocab_dic.get(word, 0) + 1 |
| vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size] |
| vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)} |
| vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1}) |
| return vocab_dic |
|
|
|
|
| def load_dataset(path, vocab, tokenizer, pad_size=32): |
| """加载数据集""" |
| contents = [] |
| with open(path, 'r', encoding='UTF-8') as f: |
| for line in tqdm(f, desc=f"Loading {os.path.basename(path)}"): |
| lin = line.strip() |
| if not lin: |
| continue |
| content, label = lin.split('\t') |
| words_line = [] |
| token = tokenizer(content) |
| seq_len = len(token) |
| if pad_size: |
| if len(token) < pad_size: |
| token.extend([PAD] * (pad_size - len(token))) |
| else: |
| token = token[:pad_size] |
| seq_len = pad_size |
| |
| for word in token: |
| words_line.append(vocab.get(word, vocab.get(UNK))) |
| contents.append((words_line, int(label), seq_len)) |
| return contents |
|
|
|
|
| def build_dataset(config, use_word=False): |
| """构建数据集""" |
| if use_word: |
| def tokenizer(x): |
| return x.split(' ') |
| else: |
| def tokenizer(x): |
| return [y for y in x] |
| |
| if os.path.exists(config.vocab_path): |
| vocab = pkl.load(open(config.vocab_path, 'rb')) |
| else: |
| vocab = build_vocab(config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1) |
| pkl.dump(vocab, open(config.vocab_path, 'wb')) |
| |
| print(f"词汇表大小: {len(vocab)}") |
| |
| train = load_dataset(config.train_path, vocab, tokenizer, config.pad_size) |
| dev = load_dataset(config.dev_path, vocab, tokenizer, config.pad_size) |
| test = load_dataset(config.test_path, vocab, tokenizer, config.pad_size) |
| |
| return vocab, train, dev, test |
|
|
|
|
| class DatasetIterator(object): |
| """数据集迭代器""" |
| def __init__(self, batches, batch_size, device): |
| self.batch_size = batch_size |
| self.batches = batches |
| self.n_batches = len(batches) // batch_size |
| self.residue = False |
| if len(batches) % self.n_batches != 0: |
| self.residue = True |
| self.index = 0 |
| self.device = device |
|
|
| def _to_tensor(self, datas): |
| x = torch.LongTensor([_[0] for _ in datas]).to(self.device) |
| y = torch.LongTensor([_[1] for _ in datas]).to(self.device) |
| |
| seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device) |
| return (x, seq_len), y |
|
|
| def __next__(self): |
| if self.residue and self.index == self.n_batches: |
| batches = self.batches[self.index * self.batch_size: len(self.batches)] |
| self.index += 1 |
| batches = self._to_tensor(batches) |
| return batches |
|
|
| elif self.index >= self.n_batches: |
| self.index = 0 |
| raise StopIteration |
| else: |
| batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size] |
| self.index += 1 |
| batches = self._to_tensor(batches) |
| return batches |
|
|
| def __iter__(self): |
| return self |
|
|
| def __len__(self): |
| if self.residue: |
| return self.n_batches + 1 |
| else: |
| return self.n_batches |
|
|
|
|
| def build_iterator(dataset, config): |
| """构建数据迭代器""" |
| iterator = DatasetIterator(dataset, config.batch_size, config.device) |
| return iterator |
|
|
|
|
| def get_time_dif(start_time): |
| """获取已使用时间""" |
| end_time = time.time() |
| time_dif = end_time - start_time |
| return timedelta(seconds=int(round(time_dif))) |
|
|
|
|
| def get_labels_from_dataset(dataset): |
| """从数据集中提取标签""" |
| labels = [] |
| for _, label, _ in dataset: |
| labels.append(label) |
| return np.array(labels) |
|
|