| import torch.nn as nn |
| import torch |
|
|
| |
| |
| |
| def GN(c, groups=16): return nn.GroupNorm(min(groups, c), c) |
|
|
| class LightResNetCNN(nn.Module): |
| def __init__(self, in_channels=1, adaptive_height=8): |
| super().__init__() |
| self.adaptive_height = adaptive_height |
| self.layer1 = nn.Sequential(nn.Conv2d(in_channels, 32, 3, 1, 1), GN(32), nn.ReLU(), nn.MaxPool2d(2, 2)) |
| self.layer2 = nn.Sequential(nn.Conv2d(32, 64, 3, 1, 1), GN(64), nn.ReLU(), nn.MaxPool2d(2, 2)) |
| self.layer3 = nn.Sequential(nn.Conv2d(64, 128, 3, 1, 1), GN(128), nn.ReLU(), nn.MaxPool2d(2, 2)) |
| self.layer4 = nn.Sequential(nn.Conv2d(128, 256, 3, 1, 1), GN(256), nn.ReLU()) |
| self.layer5 = nn.Sequential(nn.Conv2d(256, 256, 3, 1, 1), GN(256), nn.ReLU()) |
| self.layer6 = nn.Sequential(nn.Conv2d(256, 128, 3, 1, 1), GN(128), nn.ReLU()) |
| self.adaptive_pool = nn.AdaptiveAvgPool2d((self.adaptive_height, None)) |
| def forward(self, x): |
| for i in range(1, 7): |
| x = getattr(self, f"layer{i}")(x) |
| x = self.adaptive_pool(x) |
| return x |
|
|
| class PositionalEncoding(nn.Module): |
| def __init__(self, d_model, max_len=2000): |
| super().__init__() |
| pe = torch.zeros(max_len, d_model) |
| position = torch.arange(0, max_len).unsqueeze(1) |
| div_term = torch.exp(torch.arange(0, d_model, 2) * (-torch.log(torch.tensor(10000.0)) / d_model)) |
| pe[:, 0::2] = torch.sin(position * div_term) |
| pe[:, 1::2] = torch.cos(position * div_term) |
| self.register_buffer("pe", pe.unsqueeze(0)) |
| def forward(self, x): |
| return x + self.pe[:, :x.size(1), :] |
|
|
| class CNN_Transformer_OCR(nn.Module): |
| def __init__(self, num_classes, d_model=1280, nhead=16, num_layers=8, dropout=0.2): |
| super().__init__() |
| self.cnn = LightResNetCNN(in_channels=1, adaptive_height=8) |
| self.proj = nn.Linear(128 * 8, d_model) |
| self.posenc = PositionalEncoding(d_model) |
| encoder_layer = nn.TransformerEncoderLayer(d_model, nhead, batch_first=True, dropout=dropout) |
| self.transformer = nn.TransformerEncoder(encoder_layer, num_layers=num_layers) |
| self.fc = nn.Linear(d_model, num_classes) |
| def forward(self, x): |
| f = self.cnn(x) |
| B, C, H, W = f.size() |
| f = f.permute(0, 3, 1, 2).reshape(B, W, C * H) |
| f = self.posenc(self.proj(f)) |
| out = self.transformer(f) |
| out = self.fc(out) |
| return out.log_softmax(2) |
|
|