# Copyright (c) 2025 CMS Manhattan # All rights reserved. # Author: Konstantin Vladimirovich Grabko # Email: grabko@cmsmanhattan.com # Phone: +1(516)777-0945 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # # Additional terms: # Any commercial use or distribution of this software or derivative works # requires explicit written permission from the copyright holder. """ Before run this script, download the GPT-2 tokenizer files into a local folder named 'tokenizer': mkdir -p tokenizer wget -O tokenizer/tokenizer.json https://huggingface.co/gpt2/resolve/main/tokenizer.json wget -O tokenizer/vocab.json https://huggingface.co/gpt2/resolve/main/vocab.json wget -O tokenizer/merges.txt https://huggingface.co/gpt2/resolve/main/merges.txt wget -O tokenizer/tokenizer_config.json https://huggingface.co/gpt2/resolve/main/tokenizer_config.json """ import os import torch import torch.nn as nn import torch.optim as optim from torch.utils.data import IterableDataset, DataLoader from transformers import GPT2TokenizerFast from tqdm import tqdm import shutil import math from pathlib import Path import re # ============================= SETTINGS ============================= TRAIN_SEQ_LEN = 256 BATCH_SIZE = 1 EPOCHS = 1 LEARNING_RATE = 6e-6 WEIGHT_DECAY = 0.01 GRAD_CLIP = 1.0 KEEP_LAST_EPOCHS = 3 VAL_SPLIT_RATIO = 0.05 VOCAB_SIZE = 50257 BASE_MODEL_PATH = Path("models/gpt_modern_1b_class.script.pt") LAST_TRAINED_PATH = Path("models/gpt_1b_last_trained.script.pt") BACKUP_DIR = Path("models/backups") BACKUP_DIR.mkdir(exist_ok=True) RAW_PATH = Path("datasets/dialogues_text.txt") CLEAN_PATH = Path("datasets/dialogues_text_clean.txt") # Device selection device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(f"Using device: {device}") # -- Dataset cleaning -- force_clean = False if not CLEAN_PATH.exists(): print("Cleaned dataset not found. Performing initial cleaning...") force_clean = True else: try: if RAW_PATH.stat().st_mtime > CLEAN_PATH.stat().st_mtime: print("Detected changes in the raw dataset. Re-cleaning...") force_clean = True else: print(f"Using existing cleaned dataset → {CLEAN_PATH}") except FileNotFoundError: print("File system synchronization error. Performing re-cleaning for safety...") force_clean = True if force_clean: if not RAW_PATH.exists(): raise FileNotFoundError(f"ERROR: Source file {RAW_PATH} not found. Check the path.") print("Cleaning up the dataset from garbage (wrong separators, extra spaces)...") text = RAW_PATH.read_text(encoding="utf-8") text = re.sub(r' {2,}', ' ', text) text = text.replace(" \n", "\n").replace("\n ", "\n") CLEAN_PATH.write_text(text, encoding="utf-8") print(f"Dataset successfully cleaned and saved → {CLEAN_PATH}") DATASET_PATH = CLEAN_PATH OUTPUT_DIR = Path("build/fine_tuning_output") MODEL_SAVE_NAME = "gpt_finetuned.script.pt" # ============================= DATASET (LAZY) ============================= class LazyTextDataset(IterableDataset): """Lazy memory-efficient dataset, splits on-the-fly into train and val.""" # Обратите внимание: аргумент tokenizer_name по-прежнему имеет значение по умолчанию "gpt2", # но в функции train() мы теперь передаем локальный путь. def __init__(self, text_file, seq_len=TRAIN_SEQ_LEN, tokenizer_name="gpt2", split_type='train', val_ratio=VAL_SPLIT_RATIO): self.seq_len = seq_len # Эта строка теперь загружает токенизатор из локальной папки, если передан локальный путь. self.tokenizer = GPT2TokenizerFast.from_pretrained(tokenizer_name) self.tokenizer.pad_token = self.tokenizer.eos_token self.text_file = text_file self.split_type = split_type self.val_ratio = val_ratio print(f"Loading and tokenizing text from {text_file}") with open(text_file, "r", encoding="utf-8") as f: self.data = f.read() self.tokens = self.tokenizer.encode(self.data) # Work out split indices total_tokens = len(self.tokens) - 1 # because label sequence shifted total_batches = total_tokens // seq_len val_size = int(total_batches * self.val_ratio) train_size = total_batches - val_size if split_type == 'train': self.start = 0 self.stop = train_size elif split_type == 'val': self.start = train_size self.stop = train_size + val_size else: raise ValueError(f"split_type should be 'train' or 'val', got {split_type}") self.total_sequences = self.stop - self.start print(f"Lazy dataset: {self.total_sequences:,} sequences for {split_type} split (from {total_batches:,} total)") def __iter__(self): for i in range(self.start * self.seq_len, self.stop * self.seq_len, self.seq_len): # Make sure last batch fits if i + self.seq_len + 1 > len(self.tokens): break input_seq = torch.tensor(self.tokens[i : i + self.seq_len], dtype=torch.long) label_seq = torch.tensor(self.tokens[i + 1 : i + self.seq_len + 1], dtype=torch.long) yield input_seq, label_seq def __len__(self): return self.total_sequences # ============================= GET LOGITS UTIL ============================= def get_logits_from_model(model, inputs): """ Robust wrapper to call either a scripted JIT model or nn.Module. Handles models that either return (logits, kv) or just logits. """ # Ensure inputs on same device as model parameters/buffers inputs = inputs.to(device) try: out = model(inputs) except RuntimeError as e: # Some JIT modules expect plain tensor on CPU device for tracing path. # Re-raise if unrelated raise # Model may return logits or (logits, kv) if isinstance(out, tuple) or (isinstance(out, list) and len(out) >= 1): logits = out[0] else: logits = out return logits # ============================= EVALUATION (VALIDATION) ============================= def evaluate(model, dataloader, criterion, device): model.eval() total_loss = 0.0 count = 0 with torch.no_grad(): for inputs, targets in dataloader: inputs, targets = inputs.to(device), targets.to(device) logits = get_logits_from_model(model, inputs) logits = logits.contiguous().view(-1, logits.size(-1)) targets = targets.contiguous().view(-1)[:logits.shape[0]] loss = criterion(logits, targets) total_loss += loss.item() count += 1 avg_loss = total_loss / max(count, 1) model.train() return avg_loss # ============================= CLEANUP OLD EPOCHS ============================= def cleanup_old_epochs(keep_last=KEEP_LAST_EPOCHS): epochs = sorted([p for p in OUTPUT_DIR.glob("epoch*") if p.is_dir()], key=lambda x: int(x.name.replace("epoch", ""))) for old in epochs[:-keep_last]: if old.exists(): shutil.rmtree(old) print(f"Old epoch deleted: {old.name}") # ============================= TRAINING ============================= def train(): OUTPUT_DIR.mkdir(parents=True, exist_ok=True) print("Loading model...") model = None if LAST_TRAINED_PATH.exists(): print(f"Continuing training from last JIT model: {LAST_TRAINED_PATH}") model = torch.jit.load(LAST_TRAINED_PATH, map_location=device) elif BASE_MODEL_PATH.exists(): print(f"Starting from base JIT model: {BASE_MODEL_PATH}") model = torch.jit.load(BASE_MODEL_PATH, map_location=device) else: print(f"ERROR: JIT model not found. Checked paths: {BASE_MODEL_PATH} and {LAST_TRAINED_PATH}") print("Please run the JIT export script (e.g., 'model_export.py') first.") return # Sometimes torch.jit.load with map_location doesn't move every internal buffer. # Force a device move for ScriptModule, wrapped in try/except for compatibility. try: model.to(device) except Exception: # If ScriptModule.to fails, attempt moving by reloading state_dict -> module approach is not always possible. pass # As extra safety, try to move any freqs buffers inside submodules (best-effort). try: for name, buf in model.named_buffers(): if buf is not None and buf.device != device: try: model.register_buffer(name, buf.to(device)) except Exception: # Some ScriptModule buffers may not be re-registerable; ignore non-critical failures. pass except Exception: pass # Проверка весов на NaN/Inf try: for n, p in model.named_parameters(): if torch.isnan(p).any(): print(f"[FATAL] NaN in weights: {n}") exit(10) if torch.isinf(p).any(): print(f"[FATAL] Inf in weights: {n}") exit(11) except Exception: # some JIT modules may not expose named_parameters() - ignore if unavailable pass model.train() try: model.gradient_checkpointing_enable() print("✅ Gradient Checkpointing Enabled.") except Exception: print("⚠️ Warning: model.gradient_checkpointing_enable() not found on JIT model. Training will proceed without GC.") # ========================================================================= # ФИНАЛЬНОЕ ИСПРАВЛЕНИЕ: Используем ЛОКАЛЬНУЮ ПАПКУ токенизатора # ========================================================================= LOCAL_TOKENIZER_PATH = "./tokenizer" # Путь к папке, куда вы загрузили файлы токенизатора train_dataset = LazyTextDataset(DATASET_PATH, seq_len=TRAIN_SEQ_LEN, tokenizer_name=LOCAL_TOKENIZER_PATH, split_type='train', val_ratio=VAL_SPLIT_RATIO) val_dataset = LazyTextDataset(DATASET_PATH, seq_len=TRAIN_SEQ_LEN, tokenizer_name=LOCAL_TOKENIZER_PATH, split_type='val', val_ratio=VAL_SPLIT_RATIO) # ========================================================================= # IterableDataset: must use drop_last=True and shuffle=False, num_workers=0 on CPU/GPU train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=False, drop_last=True, num_workers=0) val_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=False, drop_last=True, num_workers=0) optimizer = optim.AdamW(model.parameters(), lr=LEARNING_RATE, weight_decay=WEIGHT_DECAY) criterion = nn.CrossEntropyLoss() total_steps = (len(train_dataset) // BATCH_SIZE) * EPOCHS print(f"\n=== BEGINNING LONG-TERM TRAINING ===") print(f"Epochs: {EPOCHS} | Steps (Train): {total_steps} | Examples (Train): {len(train_dataset)}") print(f"Batch Size (Effective): {BATCH_SIZE} | Precision: FP32") global_step = 0 for epoch in range(1, EPOCHS + 1): print(f"\n--- Epoch {epoch}/{EPOCHS} ---") epoch_loss = 0.0 with tqdm(train_dataloader, desc=f"Epoch {epoch} [TRAIN]", leave=False) as pbar: for inputs, targets in pbar: inputs, targets = inputs.to(device), targets.to(device) optimizer.zero_grad() logits = get_logits_from_model(model, inputs) logits = logits.contiguous().view(-1, logits.size(-1)) targets_view = targets.contiguous().view(-1)[:logits.shape[0]] loss = criterion(logits, targets_view) loss.backward() try: torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_CLIP) except Exception: pass optimizer.step() loss_val = loss.item() epoch_loss += loss_val global_step += 1 pbar.set_postfix({ "loss": f"{loss_val:.3f}", "ppl": f"{math.exp(min(loss_val, 10)):.1f}", "step": f"{global_step}" }) avg_train_loss = epoch_loss / max(1, len(train_dataset) // BATCH_SIZE) print(f" [TRAIN] Average loss: {avg_train_loss:.3f} | PPL: {math.exp(avg_train_loss):.1f}") print(" [VALIDATION] Starting evaluation...") val_loss = evaluate(model, val_dataloader, criterion, device) print(f" [VALIDATION] Average loss: {val_loss:.3f} | PPL: {math.exp(val_loss):.1f}") epoch_dir = OUTPUT_DIR / f"epoch{epoch}" epoch_dir.mkdir(exist_ok=True) try: torch.jit.save(model, epoch_dir / MODEL_SAVE_NAME) print(f"Model saved: {epoch_dir / MODEL_SAVE_NAME}") except Exception: # If saving scripted model fails, fallback to state_dict torch.save(model.state_dict(), epoch_dir / "state_dict.pt") print(f"State dict saved: {epoch_dir / 'state_dict.pt'}") cleanup_old_epochs() final_dir = OUTPUT_DIR / "final" final_dir.mkdir(exist_ok=True) try: torch.jit.save(model, final_dir / MODEL_SAVE_NAME) except Exception: torch.save(model.state_dict(), final_dir / "state_dict.pt") # Try to save tokenizer if available try: train_dataset.tokenizer.save_pretrained(final_dir) except Exception: pass if LAST_TRAINED_PATH.exists(): backup_path = BACKUP_DIR / f"gpt_last_trained_backup_{int(os.path.getmtime(LAST_TRAINED_PATH))}.script.pt" shutil.copy(LAST_TRAINED_PATH, backup_path) print(f"Backup of previous model created → {backup_path.name}") try: shutil.copy(final_dir / MODEL_SAVE_NAME, LAST_TRAINED_PATH) print(f"Last trained model saved → {LAST_TRAINED_PATH}") except Exception: pass print(f"\nTRAINING COMPLETED! Model ready:") print(f" • For chat: {final_dir / MODEL_SAVE_NAME}") print(f" • For further fine-tuning: {LAST_TRAINED_PATH}") if __name__ == "__main__": if not RAW_PATH.exists(): print(f"ERROR: No file {RAW_PATH}") print("Put your text into datasets/dialogues_text.txt") else: train()