File size: 4,054 Bytes
26adafb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
"""
Nexus-Nano Evaluator
Ultra-lightweight 2.8M parameter CNN

Research: MobileNet architecture principles for efficiency
"""

import onnxruntime as ort
import numpy as np
import chess
import logging
from pathlib import Path

logger = logging.getLogger(__name__)


class NexusNanoEvaluator:
    """
    Lightweight evaluator for Nexus-Nano
    Optimized for speed over accuracy
    """
    
    PIECE_VALUES = {
        chess.PAWN: 100,
        chess.KNIGHT: 320,
        chess.BISHOP: 330,
        chess.ROOK: 500,
        chess.QUEEN: 900,
        chess.KING: 0
    }
    
    def __init__(self, model_path: str, num_threads: int = 1):
        """Initialize with single-threaded ONNX session for speed"""
        
        self.model_path = Path(model_path)
        if not self.model_path.exists():
            raise FileNotFoundError(f"Model not found: {model_path}")
        
        # ONNX session (single-threaded for lowest latency)
        sess_options = ort.SessionOptions()
        sess_options.intra_op_num_threads = num_threads
        sess_options.inter_op_num_threads = num_threads
        sess_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
        sess_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
        
        logger.info(f"Loading Nexus-Nano model...")
        self.session = ort.InferenceSession(
            str(self.model_path),
            sess_options=sess_options,
            providers=['CPUExecutionProvider']
        )
        
        self.input_name = self.session.get_inputs()[0].name
        self.output_name = self.session.get_outputs()[0].name
        
        logger.info(f"✅ Model loaded: {self.input_name} -> {self.output_name}")
    
    def fen_to_tensor(self, board: chess.Board) -> np.ndarray:
        """
        Fast 12-channel tensor conversion
        Optimized for minimal overhead
        """
        tensor = np.zeros((1, 12, 8, 8), dtype=np.float32)
        
        # Piece to channel mapping
        piece_channels = {
            chess.PAWN: 0, chess.KNIGHT: 1, chess.BISHOP: 2,
            chess.ROOK: 3, chess.QUEEN: 4, chess.KING: 5
        }
        
        # Fast piece placement
        for square, piece in board.piece_map().items():
            rank, file = divmod(square, 8)
            channel = piece_channels[piece.piece_type]
            if piece.color == chess.BLACK:
                channel += 6
            tensor[0, channel, rank, file] = 1.0
        
        return tensor
    
    def evaluate_neural(self, board: chess.Board) -> float:
        """
        Fast neural evaluation
        Single forward pass, minimal post-processing
        """
        input_tensor = self.fen_to_tensor(board)
        outputs = self.session.run([self.output_name], {self.input_name: input_tensor})
        
        # Raw value (tanh output)
        raw_value = float(outputs[0][0][0])
        
        # Scale to centipawns
        return raw_value * 300.0  # Slightly lower scale for faster games
    
    def evaluate_material(self, board: chess.Board) -> int:
        """Fast material count"""
        material = 0
        
        for piece_type, value in self.PIECE_VALUES.items():
            if piece_type == chess.KING:
                continue
            white = len(board.pieces(piece_type, chess.WHITE))
            black = len(board.pieces(piece_type, chess.BLACK))
            material += (white - black) * value
        
        return material
    
    def evaluate_hybrid(self, board: chess.Board) -> float:
        """
        Fast hybrid: 85% neural + 15% material
        Higher material weight for stability in fast games
        """
        neural = self.evaluate_neural(board)
        material = self.evaluate_material(board)
        
        hybrid = 0.85 * neural + 0.15 * material
        
        if board.turn == chess.BLACK:
            hybrid = -hybrid
        
        return hybrid
    
    def get_model_size_mb(self) -> float:
        """Get model size"""
        return self.model_path.stat().st_size / (1024 * 1024)