chess-duc-backup / tokenizer.py
minhdc's picture
Chess Challenge submission by minhdc
d0067d5 verified
"""
Optimized Chess Tokenizer using pure UCI notation.
This achieves ~84 vocab size by:
1. Using only squares (a1-h8) and promotion pieces (q,r,b,n)
2. Decomposing moves into from_square, to_square, (optional) promotion
3. No piece types, no color, no annotations
"""
from __future__ import annotations
import json
import os
from typing import Dict, List, Optional
from transformers import PreTrainedTokenizer
class ChessTokenizer(PreTrainedTokenizer):
model_input_names = ["input_ids", "attention_mask"]
vocab_files_names = {"vocab_file": "vocab.json"}
# Special tokens
PAD_TOKEN = "[PAD]"
BOS_TOKEN = "[BOS]"
EOS_TOKEN = "[EOS]"
UNK_TOKEN = "[UNK]"
def __init__(
self,
vocab_file: Optional[str] = None,
vocab: Optional[Dict[str, int]] = None,
**kwargs,
):
# Initialize special tokens
self._pad_token = self.PAD_TOKEN
self._bos_token = self.BOS_TOKEN
self._eos_token = self.EOS_TOKEN
self._unk_token = self.UNK_TOKEN
# Remove duplicates from kwargs
kwargs.pop("pad_token", None)
kwargs.pop("bos_token", None)
kwargs.pop("eos_token", None)
kwargs.pop("unk_token", None)
# Load or create vocabulary
if vocab is not None:
self._vocab = vocab
elif vocab_file is not None and os.path.exists(vocab_file):
with open(vocab_file, "r", encoding="utf-8") as f:
self._vocab = json.load(f)
else:
self._vocab = self._create_default_vocab()
# Create reverse mapping
self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
super().__init__(
pad_token=self._pad_token,
bos_token=self._bos_token,
eos_token=self._eos_token,
unk_token=self._unk_token,
**kwargs,
)
def _create_default_vocab(self) -> Dict[str, int]:
"""
Create vocabulary with all possible squares and promotion pieces.
This ensures deterministic vocab size of exactly 72 tokens.
"""
tokens = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
# All squares a1-h8
for file in 'abcdefgh':
for rank in '12345678':
tokens.append(f"{file}{rank}")
# Promotion pieces (lowercase for UCI)
tokens.extend(['q', 'r', 'b', 'n'])
vocab = {token: idx for idx, token in enumerate(tokens)}
return vocab
@classmethod
def build_vocab_from_dataset(
cls,
dataset_name: str = "dlouapre/lichess_2025-01_1M",
split: str = "train",
column: str = "text",
min_frequency: int = 1,
max_samples: Optional[int] = 100000,
) -> "ChessTokenizer":
"""
Build tokenizer from dataset by converting to UCI format.
This will create a vocabulary of ~72-84 tokens.
"""
from datasets import load_dataset
from collections import Counter
dataset = load_dataset(dataset_name, split=split)
if max_samples is not None:
dataset = dataset.select(range(min(max_samples, len(dataset))))
token_counts = Counter()
# Process games and extract UCI components
for example in dataset:
moves = example[column].strip().split()
for move in moves:
# Convert extended UCI to decomposed UCI
uci_tokens = cls._extended_to_uci_tokens(move)
token_counts.update(uci_tokens)
# Filter by frequency
tokens = [
token for token, count in token_counts.items()
if count >= min_frequency
]
# Sort for reproducibility
tokens = sorted(set(tokens))
# Build vocabulary
special_tokens = [cls.PAD_TOKEN, cls.BOS_TOKEN, cls.EOS_TOKEN, cls.UNK_TOKEN]
vocab = {token: idx for idx, token in enumerate(special_tokens + tokens)}
return cls(vocab=vocab)
@staticmethod
def _extended_to_uci_tokens(move: str) -> List[str]:
"""
Convert extended UCI format to decomposed UCI tokens.
Input: "WPe2e4" or "BQd8h4(x+)" or "WPe7e8=Q"
Output: ["e2", "e4"] or ["d8", "h4"] or ["e7", "e8", "q"]
"""
if len(move) < 6:
return []
# Extract squares (positions 2-6)
from_sq = move[2:4]
to_sq = move[4:6]
tokens = [from_sq, to_sq]
# Check for promotion
if "=" in move:
promo_idx = move.index("=")
if promo_idx + 1 < len(move):
promo = move[promo_idx + 1].lower()
if promo in 'qrbn':
tokens.append(promo)
return tokens
@property
def vocab_size(self) -> int:
return len(self._vocab)
def get_vocab(self) -> Dict[str, int]:
return dict(self._vocab)
def _tokenize(self, text: str) -> List[str]:
"""
Tokenize a string of moves.
Input can be either:
- Extended UCI: "WPe2e4 BPe7e5"
- Decomposed UCI: "e2 e4 e7 e5"
"""
tokens = text.strip().split()
# If tokens look like extended UCI (start with W/B and piece letter)
# convert them to decomposed format
result = []
for token in tokens:
if len(token) >= 6 and token[0] in 'WB' and token[1] in 'PNBRQK':
# Extended format - decompose it
result.extend(self._extended_to_uci_tokens(token))
else:
# Already in simple format or is a square/promotion
result.append(token)
return result
def _convert_token_to_id(self, token: str) -> int:
return self._vocab.get(token, self._vocab.get(self.UNK_TOKEN, 0))
def _convert_id_to_token(self, index: int) -> str:
return self._ids_to_tokens.get(index, self.UNK_TOKEN)
def convert_tokens_to_string(self, tokens: List[str]) -> str:
"""Convert tokens back to string (space-separated)."""
special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
return " ".join(t for t in tokens if t not in special)
def save_vocabulary(
self,
save_directory: str,
filename_prefix: Optional[str] = None,
) -> tuple:
if not os.path.isdir(save_directory):
os.makedirs(save_directory, exist_ok=True)
vocab_file = os.path.join(
save_directory,
(filename_prefix + "-" if filename_prefix else "") + "vocab.json",
)
with open(vocab_file, "w", encoding="utf-8") as f:
json.dump(self._vocab, f, ensure_ascii=False, indent=2)
return (vocab_file,)