| """ |
| Custom Chess Tokenizer for the Chess Challenge. |
| |
| This tokenizer uses sub-structural tokenization: each move is decomposed into |
| its components (piece, source square, destination square, suffix) instead of |
| treating the whole move as a single token. |
| |
| Example: WPe2e4 -> [P, e2, e4] (color is implicit from move number) |
| BNg8f6(x) -> [N, g8, f6, (x)] |
| |
| This approach: |
| - Reduces vocabulary from ~1200 to ~80 tokens |
| - Enables generalization across similar moves |
| - Eliminates [UNK] tokens for rare moves |
| - Saves parameters in the embedding layer |
| |
| The dataset format uses: |
| - W/B prefix for White/Black (ignored - implicit from position) |
| - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King |
| - Source and destination squares (e.g., e2e4) |
| - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling |
| """ |
|
|
| from __future__ import annotations |
|
|
| import json |
| import os |
| import re |
| from pathlib import Path |
| from typing import Dict, List, Optional, Tuple |
|
|
| from transformers import PreTrainedTokenizer |
|
|
|
|
| |
| |
| MOVE_PATTERN = re.compile( |
| r'^([WB])([PNBRQK])([a-h])([1-8])([a-h])([1-8])(\([^)]+\))?$' |
| ) |
|
|
|
|
| class ChessTokenizer(PreTrainedTokenizer): |
| """ |
| A custom tokenizer for chess moves using sub-structural tokenization. |
| |
| Each move is decomposed into components: |
| - Piece type (P, N, B, R, Q, K) |
| - Source square (e2, d7, etc.) |
| - Destination square (e4, f6, etc.) |
| - Optional suffix for captures/checks ((x), (+), (+*), (o), (O)) |
| |
| The color (W/B) is NOT tokenized as it's implicit from the move order. |
| |
| Example: |
| >>> tokenizer = ChessTokenizer.build_vocab() |
| >>> tokenizer.encode("WPe2e4 BPe7e5") |
| [1, 5, 20, 28, 5, 52, 44, 2] # [BOS, P, e2, e4, P, e7, e5, EOS] |
| """ |
| |
| model_input_names = ["input_ids", "attention_mask"] |
| vocab_files_names = {"vocab_file": "vocab.json"} |
| |
| |
| PAD_TOKEN = "[PAD]" |
| BOS_TOKEN = "[BOS]" |
| EOS_TOKEN = "[EOS]" |
| UNK_TOKEN = "[UNK]" |
| |
| def __init__( |
| self, |
| vocab_file: Optional[str] = None, |
| vocab: Optional[Dict[str, int]] = None, |
| **kwargs, |
| ): |
| """ |
| Initialize the chess tokenizer. |
| |
| Args: |
| vocab_file: Path to a JSON file containing the vocabulary mapping. |
| vocab: Dictionary mapping tokens to IDs (alternative to vocab_file). |
| **kwargs: Additional arguments passed to PreTrainedTokenizer. |
| """ |
| |
| self._pad_token = self.PAD_TOKEN |
| self._bos_token = self.BOS_TOKEN |
| self._eos_token = self.EOS_TOKEN |
| self._unk_token = self.UNK_TOKEN |
|
|
| |
| |
| kwargs.pop("pad_token", None) |
| kwargs.pop("bos_token", None) |
| kwargs.pop("eos_token", None) |
| kwargs.pop("unk_token", None) |
| |
| |
| if vocab is not None: |
| self._vocab = vocab |
| elif vocab_file is not None and os.path.exists(vocab_file): |
| with open(vocab_file, "r", encoding="utf-8") as f: |
| self._vocab = json.load(f) |
| else: |
| |
| |
| self._vocab = self._create_default_vocab() |
| |
| |
| self._ids_to_tokens = {v: k for k, v in self._vocab.items()} |
| |
| |
| super().__init__( |
| pad_token=self._pad_token, |
| bos_token=self._bos_token, |
| eos_token=self._eos_token, |
| unk_token=self._unk_token, |
| **kwargs, |
| ) |
| |
| def _create_default_vocab(self) -> Dict[str, int]: |
| """ |
| Create the full sub-structural vocabulary. |
| |
| The vocabulary contains: |
| - 4 special tokens: [PAD], [BOS], [EOS], [UNK] |
| - 6 piece tokens: P, N, B, R, Q, K |
| - 64 square tokens: a1, a2, ..., h8 |
| - 5 suffix tokens: (x), (+), (+*), (o), (O) |
| |
| Total: 79 tokens (vs ~1200 for move-level tokenization) |
| """ |
| tokens = [] |
|
|
| |
| special_tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN] |
| tokens.extend(special_tokens) |
|
|
| |
| pieces = ['P', 'N', 'B', 'R', 'Q', 'K'] |
| tokens.extend(pieces) |
|
|
| |
| files = 'abcdefgh' |
| ranks = '12345678' |
| for f in files: |
| for r in ranks: |
| tokens.append(f + r) |
|
|
| |
| suffixes = ['(x)', '(+)', '(+*)', '(o)', '(O)'] |
| tokens.extend(suffixes) |
|
|
| |
| |
| promotion_pieces = ['=Q', '=R', '=B', '=N'] |
| tokens.extend(promotion_pieces) |
|
|
| vocab = {token: idx for idx, token in enumerate(tokens)} |
| return vocab |
| |
| @classmethod |
| def build_vocab(cls) -> "ChessTokenizer": |
| """ |
| Build a tokenizer with the pre-defined sub-structural vocabulary. |
| |
| This is the recommended way to create a tokenizer for the chess challenge. |
| The vocabulary is deterministic and covers all possible moves. |
| |
| Returns: |
| A ChessTokenizer with the full sub-structural vocabulary (~83 tokens). |
| """ |
| return cls() |
|
|
| @classmethod |
| def build_vocab_from_iterator( |
| cls, |
| iterator, |
| min_frequency: int = 1, |
| ) -> "ChessTokenizer": |
| """ |
| Build a tokenizer vocabulary from an iterator of game strings. |
| |
| Note: With sub-structural tokenization, this method is mainly useful |
| for analyzing token frequencies. The default vocabulary already covers |
| all possible moves. |
| |
| Args: |
| iterator: An iterator yielding game strings (space-separated moves). |
| min_frequency: Minimum frequency for a token to be included. |
| |
| Returns: |
| A ChessTokenizer with the built vocabulary. |
| """ |
| |
| |
| return cls() |
|
|
| @classmethod |
| def build_vocab_from_dataset( |
| cls, |
| dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| split: str = "train", |
| column: str = "text", |
| min_frequency: int = 500, |
| max_samples: Optional[int] = 100000, |
| ) -> "ChessTokenizer": |
| """ |
| Build a tokenizer vocabulary from a Hugging Face dataset. |
| |
| Note: With sub-structural tokenization, the vocabulary is pre-defined |
| and doesn't need to be built from data. This method is kept for |
| compatibility but simply returns a tokenizer with the default vocab. |
| |
| Args: |
| dataset_name: Name of the dataset on Hugging Face Hub. |
| split: Dataset split to use. |
| column: Column containing the game strings. |
| min_frequency: Minimum frequency for a token to be included. |
| max_samples: Maximum number of samples to process. |
| |
| Returns: |
| A ChessTokenizer with the full sub-structural vocabulary. |
| """ |
| |
| return cls() |
| |
| @property |
| def vocab_size(self) -> int: |
| """Return the size of the vocabulary.""" |
| return len(self._vocab) |
| |
| def get_vocab(self) -> Dict[str, int]: |
| """Return the vocabulary as a dictionary.""" |
| return dict(self._vocab) |
| |
| def _parse_move(self, move: str) -> List[str]: |
| """ |
| Parse a single move into its sub-components. |
| |
| Args: |
| move: A move in extended UCI notation (e.g., WPe2e4, BNg8f6(x)) |
| |
| Returns: |
| List of tokens: [piece, src_square, dst_square, suffix?] |
| Color (W/B) is ignored as it's implicit from move order. |
| """ |
| |
| match = MOVE_PATTERN.match(move) |
| if match: |
| color, piece, src_file, src_rank, dst_file, dst_rank, suffix = match.groups() |
| tokens = [piece, src_file + src_rank, dst_file + dst_rank] |
| if suffix: |
| tokens.append(suffix) |
| return tokens |
|
|
| |
| promo_pattern = re.match( |
| r'^([WB])P([a-h])([1-8])([a-h])([1-8])([QRBN])(\([^)]+\))?$', |
| move |
| ) |
| if promo_pattern: |
| color, src_file, src_rank, dst_file, dst_rank, promo_piece, suffix = promo_pattern.groups() |
| tokens = ['P', src_file + src_rank, dst_file + dst_rank, '=' + promo_piece] |
| if suffix: |
| tokens.append(suffix) |
| return tokens |
|
|
| |
| return [move] |
|
|
| def _tokenize(self, text: str) -> List[str]: |
| """ |
| Tokenize a string of moves into sub-structural tokens. |
| |
| Each move is decomposed into: |
| - Piece type (P, N, B, R, Q, K) |
| - Source square (e2, d7, etc.) |
| - Destination square (e4, f6, etc.) |
| - Optional suffix ((x), (+), etc.) |
| |
| Args: |
| text: A string of space-separated moves. |
| |
| Returns: |
| List of sub-tokens. |
| |
| Example: |
| "WPe2e4 BPe7e5" -> ['P', 'e2', 'e4', 'P', 'e7', 'e5'] |
| """ |
| tokens = [] |
| moves = text.strip().split() |
| for move in moves: |
| tokens.extend(self._parse_move(move)) |
| return tokens |
| |
| def _convert_token_to_id(self, token: str) -> int: |
| """Convert a token to its ID.""" |
| return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0)) |
| |
| def _convert_id_to_token(self, index: int) -> str: |
| """Convert an ID to its token.""" |
| return self._ids_to_tokens.get(index, self.UNK_TOKEN) |
| |
| def convert_tokens_to_string(self, tokens: List[str]) -> str: |
| """ |
| Convert a list of sub-tokens back to a string of moves. |
| |
| Reconstructs moves from their components. Each move consists of: |
| - Piece token (P, N, B, R, Q, K) |
| - Source square (e2, d7, etc.) |
| - Destination square (e4, f6, etc.) |
| - Optional suffix ((x), (+), etc.) or promotion (=Q, =R, etc.) |
| |
| Args: |
| tokens: List of sub-tokens. |
| |
| Returns: |
| Space-separated string of reconstructed moves. |
| """ |
| special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN} |
| pieces = {'P', 'N', 'B', 'R', 'Q', 'K'} |
| suffixes = {'(x)', '(+)', '(+*)', '(o)', '(O)'} |
| promotions = {'=Q', '=R', '=B', '=N'} |
|
|
| moves = [] |
| current_move = [] |
|
|
| for token in tokens: |
| if token in special: |
| continue |
|
|
| if token in pieces: |
| |
| if current_move: |
| moves.append(''.join(current_move)) |
| current_move = [token] |
| elif token in suffixes or token in promotions: |
| |
| current_move.append(token) |
| else: |
| |
| current_move.append(token) |
|
|
| |
| if current_move: |
| moves.append(''.join(current_move)) |
|
|
| return " ".join(moves) |
| |
| def save_vocabulary( |
| self, |
| save_directory: str, |
| filename_prefix: Optional[str] = None, |
| ) -> tuple: |
| """ |
| Save the vocabulary to a JSON file. |
| |
| Args: |
| save_directory: Directory to save the vocabulary. |
| filename_prefix: Optional prefix for the filename. |
| |
| Returns: |
| Tuple containing the path to the saved vocabulary file. |
| """ |
| if not os.path.isdir(save_directory): |
| os.makedirs(save_directory, exist_ok=True) |
| |
| vocab_file = os.path.join( |
| save_directory, |
| (filename_prefix + "-" if filename_prefix else "") + "vocab.json", |
| ) |
| |
| with open(vocab_file, "w", encoding="utf-8") as f: |
| json.dump(self._vocab, f, ensure_ascii=False, indent=2) |
| |
| return (vocab_file,) |
|
|
|
|
| def count_vocab_from_dataset( |
| dataset_name: str = "dlouapre/lichess_2025-01_1M", |
| split: str = "train", |
| column: str = "text", |
| max_samples: Optional[int] = 10000, |
| ) -> Dict[str, int]: |
| """ |
| Count sub-token frequencies in a dataset (useful for vocabulary analysis). |
| |
| Args: |
| dataset_name: Name of the dataset on Hugging Face Hub. |
| split: Dataset split to use. |
| column: Column containing the game strings. |
| max_samples: Maximum number of samples to process. |
| |
| Returns: |
| Dictionary mapping sub-tokens to their frequencies. |
| """ |
| from collections import Counter |
| from datasets import load_dataset |
|
|
| dataset = load_dataset(dataset_name, split=split) |
|
|
| if max_samples is not None: |
| dataset = dataset.select(range(min(max_samples, len(dataset)))) |
|
|
| |
| tokenizer = ChessTokenizer() |
| token_counts = Counter() |
|
|
| for example in dataset: |
| sub_tokens = tokenizer._tokenize(example[column]) |
| token_counts.update(sub_tokens) |
|
|
| return dict(token_counts) |
|
|