""" Dataset for embedding inversion training. Reads pre-converted numpy .npy files for instant loading. """ import os import glob import bisect import json import numpy as np import torch from torch.utils.data import Dataset, DataLoader from transformers import AutoTokenizer class EmbeddingInversionDataset(Dataset): """ Dataset đọc dữ liệu từ file .pt đã được preprocess. """ def __init__(self, pt_file_path, tokenizer, max_seq_len=32, val=False, val_split=0.01): self.tokenizer = tokenizer self.max_seq_len = max_seq_len # Load dữ liệu từ file .pt print(f"Loading data from {pt_file_path}...") self.raw_data = torch.load(pt_file_path) # List[Dict] # Tạo mapping để truy cập chunk theo index phẳng (flat index) # Vì mỗi cuốn sách có số lượng chunk khác nhau self.flat_indices = [] for book_idx, book in enumerate(self.raw_data): num_chunks = len(book["chunks_text"]) for chunk_idx in range(num_chunks): self.flat_indices.append((book_idx, chunk_idx)) self.total_rows = len(self.flat_indices) # Chia Train/Val val_count = int(self.total_rows * val_split) if val: self.start_idx = self.total_rows - val_count self.length = val_count else: self.start_idx = 0 self.length = self.total_rows - val_count def __len__(self): return self.length def __getitem__(self, idx): global_idx = self.start_idx + idx book_idx, chunk_in_book_idx = self.flat_indices[global_idx] # Lấy dữ liệu book_data = self.raw_data[book_idx] text = book_data["chunks_text"][chunk_in_book_idx] embedding = book_data["embeddings"][chunk_in_book_idx] # Tensor đã có sẵn # Tokenize lại text để lấy token_ids (vì file .pt chỉ lưu text) encoding = self.tokenizer( text, max_length=self.max_seq_len, padding="max_length", truncation=True, return_tensors="pt" ) token_ids = encoding["input_ids"].squeeze(0) # (max_seq_len) # Tạo padding mask # Thông thường 0 hoặc 1 tùy model, Qwen thường dùng padding_mask từ tokenizer padding_mask = (token_ids == self.tokenizer.pad_token_id) return { "token_ids": token_ids.long(), "embedding": embedding.float(), "padding_mask": padding_mask, } # /kaggle/embedding-inversion-demo/dataset.py def create_dataloaders(config): dc = config["data"] tc = config["training"] mc = config["model"] # Sửa lỗi KeyError: Thử lấy 'encoder_model', nếu không có thì lấy 'encoder_model_name' model_name = mc.get("encoder_model") or mc.get("encoder_model_name") if not model_name: raise KeyError("Config file thiếu key 'encoder_model' hoặc 'encoder_model_name' trong phần 'model'") print(f"Initializing tokenizer from: {model_name}") tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token pt_path = dc["data_dir"] train_ds = EmbeddingInversionDataset( pt_path, tokenizer, max_seq_len=mc["max_seq_len"], val=False, val_split=dc["val_split"] ) val_ds = EmbeddingInversionDataset( pt_path, tokenizer, max_seq_len=mc["max_seq_len"], val=True, val_split=dc["val_split"] ) train_loader = DataLoader( train_ds, batch_size=tc["batch_size"], shuffle=True, num_workers=tc["num_workers"], pin_memory=True, drop_last=True ) val_loader = DataLoader( val_ds, batch_size=tc["batch_size"], shuffle=False, num_workers=tc["num_workers"], pin_memory=True ) return train_loader, val_loader