upload
Browse files- .gitattributes +1 -0
- anyup.py +479 -0
- attention.py +146 -0
- config.json +44 -0
- configuration_falcon_perception.py +77 -0
- main_fig.jpg +3 -0
- model.safetensors +3 -0
- model_args.json +37 -0
- modeling_falcon_perception.py +935 -0
- processing_falcon_perception.py +423 -0
- rope.py +127 -0
- special_tokens_map.json +380 -0
- tokenizer.json +0 -0
- tokenizer_config.json +102 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
main_fig.jpg filter=lfs diff=lfs merge=lfs -text
|
anyup.py
ADDED
|
@@ -0,0 +1,479 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
AnyUp – flattened into a single module for HuggingFace trust_remote_code compatibility.
|
| 3 |
+
|
| 4 |
+
Original package structure:
|
| 5 |
+
anyup/layers/convolutions.py → ResBlock
|
| 6 |
+
anyup/layers/feature_unification.py → LearnedFeatureUnification
|
| 7 |
+
anyup/layers/positional_encoding.py → RoPE (AnyUp-internal)
|
| 8 |
+
anyup/layers/attention/attention_masking.py → window2d, compute_attention_mask, get_attention_mask_mod
|
| 9 |
+
anyup/layers/attention/chunked_attention.py → FlexCrossAttention, CrossAttentionBlock
|
| 10 |
+
anyup/model.py → AnyUp
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.nn as nn
|
| 15 |
+
import torch.nn.functional as F
|
| 16 |
+
import einops as E
|
| 17 |
+
from typing import Tuple
|
| 18 |
+
from functools import lru_cache
|
| 19 |
+
from torch.nn.attention.flex_attention import flex_attention
|
| 20 |
+
from torch.distributed.tensor import DTensor, distribute_tensor
|
| 21 |
+
|
| 22 |
+
compiled_flex_attn_prefill = torch.compile(flex_attention, dynamic=True)
|
| 23 |
+
|
| 24 |
+
# ---------------------------------------------------------------------------
|
| 25 |
+
# ResBlock (from layers/convolutions.py)
|
| 26 |
+
# ---------------------------------------------------------------------------
|
| 27 |
+
|
| 28 |
+
class ResBlock(nn.Module):
|
| 29 |
+
def __init__(
|
| 30 |
+
self,
|
| 31 |
+
in_channels,
|
| 32 |
+
out_channels,
|
| 33 |
+
kernel_size=3,
|
| 34 |
+
num_groups=8,
|
| 35 |
+
pad_mode="zeros",
|
| 36 |
+
norm_fn=None,
|
| 37 |
+
activation_fn=nn.SiLU,
|
| 38 |
+
use_conv_shortcut=False,
|
| 39 |
+
):
|
| 40 |
+
super().__init__()
|
| 41 |
+
N = (lambda c: norm_fn(num_groups, c)) if norm_fn else (lambda c: nn.Identity())
|
| 42 |
+
p = kernel_size // 2
|
| 43 |
+
self.block = nn.Sequential(
|
| 44 |
+
N(in_channels),
|
| 45 |
+
activation_fn(),
|
| 46 |
+
nn.Conv2d(
|
| 47 |
+
in_channels,
|
| 48 |
+
out_channels,
|
| 49 |
+
kernel_size,
|
| 50 |
+
padding=p,
|
| 51 |
+
padding_mode=pad_mode,
|
| 52 |
+
bias=False,
|
| 53 |
+
),
|
| 54 |
+
N(out_channels),
|
| 55 |
+
activation_fn(),
|
| 56 |
+
nn.Conv2d(
|
| 57 |
+
out_channels,
|
| 58 |
+
out_channels,
|
| 59 |
+
kernel_size,
|
| 60 |
+
padding=p,
|
| 61 |
+
padding_mode=pad_mode,
|
| 62 |
+
bias=False,
|
| 63 |
+
),
|
| 64 |
+
)
|
| 65 |
+
self.shortcut = (
|
| 66 |
+
nn.Conv2d(in_channels, out_channels, 1, bias=False, padding_mode=pad_mode)
|
| 67 |
+
if use_conv_shortcut or in_channels != out_channels
|
| 68 |
+
else nn.Identity()
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
def forward(self, x):
|
| 72 |
+
return self.block(x) + self.shortcut(x)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# ---------------------------------------------------------------------------
|
| 76 |
+
# LearnedFeatureUnification (from layers/feature_unification.py)
|
| 77 |
+
# ---------------------------------------------------------------------------
|
| 78 |
+
|
| 79 |
+
class LearnedFeatureUnification(nn.Module):
|
| 80 |
+
def __init__(
|
| 81 |
+
self,
|
| 82 |
+
out_channels: int,
|
| 83 |
+
kernel_size: int = 3,
|
| 84 |
+
init_gaussian_derivatives: bool = False,
|
| 85 |
+
):
|
| 86 |
+
super().__init__()
|
| 87 |
+
self.out_channels = out_channels
|
| 88 |
+
self.kernel_size = kernel_size
|
| 89 |
+
self.basis = nn.Parameter(
|
| 90 |
+
torch.randn(out_channels, 1, kernel_size, kernel_size)
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
def forward(self, features: torch.Tensor) -> torch.Tensor:
|
| 94 |
+
b, c, h, w = features.shape
|
| 95 |
+
x = self._depthwise_conv(features, self.basis, self.kernel_size).view(
|
| 96 |
+
b, self.out_channels, c, h, w
|
| 97 |
+
)
|
| 98 |
+
attn = F.softmax(x, dim=1)
|
| 99 |
+
return attn.mean(dim=2)
|
| 100 |
+
|
| 101 |
+
@staticmethod
|
| 102 |
+
def _depthwise_conv(feats, basis, k):
|
| 103 |
+
b, c, h, w = feats.shape
|
| 104 |
+
p = k // 2
|
| 105 |
+
x = F.pad(feats, (p, p, p, p), value=0)
|
| 106 |
+
x = F.conv2d(x, basis.repeat(c, 1, 1, 1), groups=c)
|
| 107 |
+
mask = torch.ones(1, 1, h, w, dtype=x.dtype, device=x.device)
|
| 108 |
+
denom = F.conv2d(
|
| 109 |
+
F.pad(mask, (p, p, p, p), value=0),
|
| 110 |
+
torch.ones(1, 1, k, k, device=x.device, dtype=x.dtype),
|
| 111 |
+
)
|
| 112 |
+
return x / denom
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
# ---------------------------------------------------------------------------
|
| 116 |
+
# RoPE (from layers/positional_encoding.py) – AnyUp-internal, separate from
|
| 117 |
+
# the main model's 3D RoPE
|
| 118 |
+
# ---------------------------------------------------------------------------
|
| 119 |
+
|
| 120 |
+
def _rotate_half(x):
|
| 121 |
+
x1, x2 = x.chunk(2, dim=-1)
|
| 122 |
+
return torch.cat((-x2, x1), dim=-1)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
class AnyUpRoPE(nn.Module):
|
| 126 |
+
def __init__(
|
| 127 |
+
self,
|
| 128 |
+
dim: int,
|
| 129 |
+
theta: int = 100,
|
| 130 |
+
):
|
| 131 |
+
super().__init__()
|
| 132 |
+
self.dim = dim
|
| 133 |
+
self.theta = theta
|
| 134 |
+
self.freqs = nn.Parameter(torch.empty(2, self.dim))
|
| 135 |
+
|
| 136 |
+
def _device_weight_init(self):
|
| 137 |
+
if isinstance(self.freqs, DTensor):
|
| 138 |
+
target_device = self.freqs.to_local().device
|
| 139 |
+
target_dtype = self.freqs.to_local().dtype
|
| 140 |
+
else:
|
| 141 |
+
target_device = self.freqs.device
|
| 142 |
+
target_dtype = self.freqs.dtype
|
| 143 |
+
|
| 144 |
+
freqs_1d = self.theta ** torch.linspace(
|
| 145 |
+
0, -1, self.dim // 4, device=target_device, dtype=target_dtype
|
| 146 |
+
)
|
| 147 |
+
freqs_1d = torch.cat([freqs_1d, freqs_1d])
|
| 148 |
+
freqs_2d = torch.zeros(2, self.dim, device=target_device, dtype=target_dtype)
|
| 149 |
+
freqs_2d[0, : self.dim // 2] = freqs_1d
|
| 150 |
+
freqs_2d[1, -self.dim // 2 :] = freqs_1d
|
| 151 |
+
freqs_2d.mul_(2 * torch.pi)
|
| 152 |
+
|
| 153 |
+
with torch.no_grad():
|
| 154 |
+
if isinstance(self.freqs, DTensor):
|
| 155 |
+
dist_freqs = distribute_tensor(
|
| 156 |
+
freqs_2d, self.freqs.device_mesh, placements=self.freqs.placements
|
| 157 |
+
)
|
| 158 |
+
self.freqs.to_local().copy_(dist_freqs.to_local())
|
| 159 |
+
else:
|
| 160 |
+
self.freqs.copy_(freqs_2d)
|
| 161 |
+
|
| 162 |
+
def forward(self, x: torch.Tensor, coords: torch.Tensor) -> torch.Tensor:
|
| 163 |
+
angle = coords @ self.freqs
|
| 164 |
+
return x * angle.cos() + _rotate_half(x) * angle.sin()
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
# ---------------------------------------------------------------------------
|
| 168 |
+
# Attention masking (from layers/attention/attention_masking.py)
|
| 169 |
+
# ---------------------------------------------------------------------------
|
| 170 |
+
|
| 171 |
+
def window2d(
|
| 172 |
+
low_res: int | Tuple[int, int],
|
| 173 |
+
high_res: int | Tuple[int, int],
|
| 174 |
+
ratio: float,
|
| 175 |
+
*,
|
| 176 |
+
device: str = "cpu",
|
| 177 |
+
) -> torch.Tensor:
|
| 178 |
+
"""Calculate the lower and upper bounds of row and col for each pixel/position"""
|
| 179 |
+
if isinstance(high_res, int):
|
| 180 |
+
H = W = high_res
|
| 181 |
+
else:
|
| 182 |
+
H, W = high_res
|
| 183 |
+
if isinstance(low_res, int):
|
| 184 |
+
Lh = Lw = low_res
|
| 185 |
+
else:
|
| 186 |
+
Lh, Lw = low_res
|
| 187 |
+
|
| 188 |
+
r_pos = (torch.arange(H, device=device, dtype=torch.float32) + 0.5) / H
|
| 189 |
+
c_pos = (torch.arange(W, device=device, dtype=torch.float32) + 0.5) / W
|
| 190 |
+
pos_r, pos_c = torch.meshgrid(r_pos, c_pos, indexing="ij")
|
| 191 |
+
|
| 192 |
+
r_lo = (pos_r - ratio).clamp(0.0, 1.0)
|
| 193 |
+
r_hi = (pos_r + ratio).clamp(0.0, 1.0)
|
| 194 |
+
c_lo = (pos_c - ratio).clamp(0.0, 1.0)
|
| 195 |
+
c_hi = (pos_c + ratio).clamp(0.0, 1.0)
|
| 196 |
+
|
| 197 |
+
r0 = (r_lo * Lh).floor().long()
|
| 198 |
+
r1 = (r_hi * Lh).ceil().long()
|
| 199 |
+
c0 = (c_lo * Lw).floor().long()
|
| 200 |
+
c1 = (c_hi * Lw).ceil().long()
|
| 201 |
+
|
| 202 |
+
return torch.stack([r0, r1, c0, c1], dim=2)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
@lru_cache
|
| 206 |
+
def compute_attention_mask(
|
| 207 |
+
high_res_h, high_res_w, low_res_h, low_res_w, window_size_ratio, device="cpu"
|
| 208 |
+
):
|
| 209 |
+
h, w = high_res_h, high_res_w
|
| 210 |
+
h_, w_ = low_res_h, low_res_w
|
| 211 |
+
|
| 212 |
+
windows = window2d(
|
| 213 |
+
low_res=(h_, w_), high_res=(h, w), ratio=window_size_ratio, device=device
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
q = h * w
|
| 217 |
+
|
| 218 |
+
r0 = windows[..., 0].reshape(q, 1)
|
| 219 |
+
r1 = windows[..., 1].reshape(q, 1)
|
| 220 |
+
c0 = windows[..., 2].reshape(q, 1)
|
| 221 |
+
c1 = windows[..., 3].reshape(q, 1)
|
| 222 |
+
|
| 223 |
+
rows = torch.arange(h_, device=device)
|
| 224 |
+
cols = torch.arange(w_, device=device)
|
| 225 |
+
|
| 226 |
+
row_ok = (rows >= r0) & (rows < r1)
|
| 227 |
+
col_ok = (cols >= c0) & (cols < c1)
|
| 228 |
+
|
| 229 |
+
attention_mask = (
|
| 230 |
+
(row_ok.unsqueeze(2) & col_ok.unsqueeze(1))
|
| 231 |
+
.reshape(q, h_ * w_)
|
| 232 |
+
.to(dtype=torch.bool)
|
| 233 |
+
)
|
| 234 |
+
|
| 235 |
+
return ~attention_mask
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def get_attention_mask_mod(
|
| 239 |
+
high_res_h, high_res_w, low_res_h, low_res_w, window_size_ratio=0.1, device="cpu"
|
| 240 |
+
):
|
| 241 |
+
"""Window Attention as above but for FlexAttention."""
|
| 242 |
+
h, w = high_res_h, high_res_w
|
| 243 |
+
h_, w_ = low_res_h, low_res_w
|
| 244 |
+
|
| 245 |
+
windows = window2d(
|
| 246 |
+
low_res=(h_, w_),
|
| 247 |
+
high_res=(h, w),
|
| 248 |
+
ratio=window_size_ratio,
|
| 249 |
+
device=device,
|
| 250 |
+
)
|
| 251 |
+
|
| 252 |
+
r0 = windows[..., 0]
|
| 253 |
+
r1 = windows[..., 1]
|
| 254 |
+
c0 = windows[..., 2]
|
| 255 |
+
c1 = windows[..., 3]
|
| 256 |
+
|
| 257 |
+
def _mask_mod(b_idx, h_idx, q_idx, kv_idx):
|
| 258 |
+
q_r_idx = q_idx // w
|
| 259 |
+
q_c_idx = q_idx % w
|
| 260 |
+
kv_r_idx = kv_idx // w_
|
| 261 |
+
kv_c_idx = kv_idx % w_
|
| 262 |
+
row_lower = kv_r_idx >= r0[q_r_idx, q_c_idx]
|
| 263 |
+
row_upper = kv_r_idx < r1[q_r_idx, q_c_idx]
|
| 264 |
+
col_lower = kv_c_idx >= c0[q_r_idx, q_c_idx]
|
| 265 |
+
col_upper = kv_c_idx < c1[q_r_idx, q_c_idx]
|
| 266 |
+
|
| 267 |
+
return row_lower & row_upper & col_lower & col_upper
|
| 268 |
+
|
| 269 |
+
return _mask_mod
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# ---------------------------------------------------------------------------
|
| 273 |
+
# Cross-attention (from layers/attention/chunked_attention.py)
|
| 274 |
+
# ---------------------------------------------------------------------------
|
| 275 |
+
|
| 276 |
+
class AttentionWrapper(nn.Module):
|
| 277 |
+
def __init__(self, qk_dim: int):
|
| 278 |
+
super().__init__()
|
| 279 |
+
self.in_proj_weight = nn.Parameter(torch.empty([qk_dim * 3, qk_dim]))
|
| 280 |
+
self.in_proj_bias = nn.Parameter(torch.empty([qk_dim * 3]))
|
| 281 |
+
|
| 282 |
+
def forward(self, x_q, x_k, x_v):
|
| 283 |
+
w_q, w_k, w_v = self.in_proj_weight.chunk(3, dim=0)
|
| 284 |
+
b_q, b_k, b_v = self.in_proj_bias.chunk(3)
|
| 285 |
+
x_q = x_q @ w_q.T + b_q
|
| 286 |
+
x_k = x_k @ w_k.T + b_k
|
| 287 |
+
return x_q, x_k, x_v
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
class FlexCrossAttention(nn.Module):
|
| 291 |
+
def __init__(self, qk_dim: int, num_heads: int, **kwargs):
|
| 292 |
+
super().__init__()
|
| 293 |
+
self.dim = qk_dim
|
| 294 |
+
self.num_head = num_heads
|
| 295 |
+
self.norm_q = nn.RMSNorm(qk_dim)
|
| 296 |
+
self.norm_k = nn.RMSNorm(qk_dim)
|
| 297 |
+
self.attention = AttentionWrapper(qk_dim)
|
| 298 |
+
|
| 299 |
+
def forward(self, query, key, value, mask=None, **kwargs):
|
| 300 |
+
x_q = self.norm_q(query)
|
| 301 |
+
x_k = self.norm_k(key)
|
| 302 |
+
x_q, x_k, x_v = self.attention(x_q, x_k, value)
|
| 303 |
+
x_q = E.rearrange(x_q, "b HW (h d) -> b h HW d", h=self.num_head)
|
| 304 |
+
x_k = E.rearrange(x_k, "b hw (h d) -> b h hw d", h=self.num_head)
|
| 305 |
+
|
| 306 |
+
x_v = E.rearrange(value, "b hw (h d) -> b h hw d", h=self.num_head)
|
| 307 |
+
output = compiled_flex_attn_prefill(x_q, x_k, x_v, block_mask=mask)
|
| 308 |
+
output = E.rearrange(output, "b h hw d -> b hw (h d)")
|
| 309 |
+
|
| 310 |
+
return output
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class CrossAttentionBlock(nn.Module):
|
| 314 |
+
def __init__(
|
| 315 |
+
self,
|
| 316 |
+
qk_dim,
|
| 317 |
+
num_heads,
|
| 318 |
+
window_ratio: float = 0.1,
|
| 319 |
+
**kwargs,
|
| 320 |
+
):
|
| 321 |
+
super().__init__()
|
| 322 |
+
self.cross_attn = FlexCrossAttention(qk_dim, num_heads)
|
| 323 |
+
self.window_ratio = window_ratio
|
| 324 |
+
self.conv2d = nn.Conv2d(
|
| 325 |
+
qk_dim, qk_dim, kernel_size=3, stride=1, padding=1, bias=False
|
| 326 |
+
)
|
| 327 |
+
|
| 328 |
+
def forward(self, q, k, v, block_mask, **kwargs):
|
| 329 |
+
b, _, h, w = q.shape
|
| 330 |
+
|
| 331 |
+
q = self.conv2d(q)
|
| 332 |
+
q = E.rearrange(q, "b c h w -> b (h w) c")
|
| 333 |
+
k = E.rearrange(k, "b c h w -> b (h w) c")
|
| 334 |
+
v = E.rearrange(v, "b c h w -> b (h w) c")
|
| 335 |
+
|
| 336 |
+
features = self.cross_attn(q, k, v, mask=block_mask)
|
| 337 |
+
return E.rearrange(features, "b (h w) c -> b c h w", h=h, w=w)
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# ---------------------------------------------------------------------------
|
| 341 |
+
# AnyUp (from model.py)
|
| 342 |
+
# ---------------------------------------------------------------------------
|
| 343 |
+
|
| 344 |
+
IMAGENET_MEAN = torch.tensor([0.485, 0.456, 0.406]).reshape(1, 3, 1, 1)
|
| 345 |
+
IMAGENET_STD = torch.tensor([0.229, 0.224, 0.225]).reshape(1, 3, 1, 1)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
def create_coordinate(h, w, start=0.0, end=1.0, device=None, dtype=None):
|
| 349 |
+
x = torch.linspace(start, end, h, device=device, dtype=dtype)
|
| 350 |
+
y = torch.linspace(start, end, w, device=device, dtype=dtype)
|
| 351 |
+
xx, yy = torch.meshgrid(x, y, indexing="ij")
|
| 352 |
+
return torch.stack((xx, yy), -1).view(1, h * w, 2)
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
class AnyUp(nn.Module):
|
| 356 |
+
def __init__(
|
| 357 |
+
self,
|
| 358 |
+
input_dim=3,
|
| 359 |
+
qk_dim=128,
|
| 360 |
+
kernel_size=1,
|
| 361 |
+
kernel_size_lfu=5,
|
| 362 |
+
window_ratio=0.1,
|
| 363 |
+
num_heads=4,
|
| 364 |
+
init_gaussian_derivatives=False,
|
| 365 |
+
**kwargs,
|
| 366 |
+
):
|
| 367 |
+
super().__init__()
|
| 368 |
+
self.qk_dim = qk_dim
|
| 369 |
+
self.window_ratio = window_ratio
|
| 370 |
+
self._rb_args = dict(
|
| 371 |
+
kernel_size=1,
|
| 372 |
+
num_groups=8,
|
| 373 |
+
pad_mode="reflect",
|
| 374 |
+
norm_fn=nn.GroupNorm,
|
| 375 |
+
activation_fn=nn.SiLU,
|
| 376 |
+
)
|
| 377 |
+
|
| 378 |
+
self.image_encoder = self._make_encoder(input_dim, kernel_size)
|
| 379 |
+
self.key_encoder = self._make_encoder(qk_dim, 1)
|
| 380 |
+
self.query_encoder = self._make_encoder(qk_dim, 1)
|
| 381 |
+
self.key_features_encoder = self._make_encoder(
|
| 382 |
+
None,
|
| 383 |
+
1,
|
| 384 |
+
first_layer_k=kernel_size_lfu,
|
| 385 |
+
init_gaussian_derivatives=init_gaussian_derivatives,
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
self.cross_decode = CrossAttentionBlock(
|
| 389 |
+
qk_dim=qk_dim, num_heads=num_heads, window_ratio=window_ratio
|
| 390 |
+
)
|
| 391 |
+
self.aggregation = self._make_encoder(2 * qk_dim, 3)
|
| 392 |
+
|
| 393 |
+
self.rope = AnyUpRoPE(qk_dim)
|
| 394 |
+
self.rope._device_weight_init()
|
| 395 |
+
|
| 396 |
+
self._compiled_encoders = False
|
| 397 |
+
|
| 398 |
+
def compile(self, *, mode: str | None = None, dynamic: bool = True):
|
| 399 |
+
if self._compiled_encoders:
|
| 400 |
+
return self
|
| 401 |
+
self.image_encoder = torch.compile(self.image_encoder, dynamic=dynamic, mode=mode)
|
| 402 |
+
self.key_encoder = torch.compile(self.key_encoder, dynamic=dynamic, mode=mode)
|
| 403 |
+
self.query_encoder = torch.compile(self.query_encoder, dynamic=dynamic, mode=mode)
|
| 404 |
+
self.key_features_encoder = torch.compile(
|
| 405 |
+
self.key_features_encoder, dynamic=dynamic, mode=mode
|
| 406 |
+
)
|
| 407 |
+
self.aggregation = torch.compile(self.aggregation, dynamic=dynamic, mode=mode)
|
| 408 |
+
self._compiled_encoders = True
|
| 409 |
+
return self
|
| 410 |
+
|
| 411 |
+
def _make_encoder(
|
| 412 |
+
self, in_ch, k, layers=2, first_layer_k=0, init_gaussian_derivatives=False
|
| 413 |
+
):
|
| 414 |
+
pre = (
|
| 415 |
+
nn.Conv2d(
|
| 416 |
+
in_ch,
|
| 417 |
+
self.qk_dim,
|
| 418 |
+
k,
|
| 419 |
+
padding=k // 2,
|
| 420 |
+
padding_mode="reflect",
|
| 421 |
+
bias=False,
|
| 422 |
+
)
|
| 423 |
+
if first_layer_k == 0
|
| 424 |
+
else LearnedFeatureUnification(
|
| 425 |
+
self.qk_dim,
|
| 426 |
+
first_layer_k,
|
| 427 |
+
init_gaussian_derivatives=init_gaussian_derivatives,
|
| 428 |
+
)
|
| 429 |
+
)
|
| 430 |
+
blocks = [
|
| 431 |
+
ResBlock(self.qk_dim, self.qk_dim, **self._rb_args) for _ in range(layers)
|
| 432 |
+
]
|
| 433 |
+
return nn.Sequential(pre, *blocks)
|
| 434 |
+
|
| 435 |
+
def upsample(
|
| 436 |
+
self, enc_img, feats, attn_mask, out_size, vis_attn=False, q_chunk_size=None
|
| 437 |
+
):
|
| 438 |
+
b, c, h, w = feats.shape
|
| 439 |
+
|
| 440 |
+
q = F.adaptive_avg_pool2d(self.query_encoder(enc_img), output_size=out_size)
|
| 441 |
+
k = F.adaptive_avg_pool2d(self.key_encoder(enc_img), output_size=(h, w))
|
| 442 |
+
k = torch.cat([k, self.key_features_encoder(F.normalize(feats, dim=1))], dim=1)
|
| 443 |
+
k = self.aggregation(k)
|
| 444 |
+
v = feats
|
| 445 |
+
|
| 446 |
+
result = self.cross_decode(
|
| 447 |
+
q, k, v, attn_mask, vis_attn=vis_attn, q_chunk_size=q_chunk_size
|
| 448 |
+
)
|
| 449 |
+
return result
|
| 450 |
+
|
| 451 |
+
def forward(
|
| 452 |
+
self,
|
| 453 |
+
images,
|
| 454 |
+
features,
|
| 455 |
+
attn_mask,
|
| 456 |
+
output_size=None,
|
| 457 |
+
vis_attn=False,
|
| 458 |
+
q_chunk_size=None,
|
| 459 |
+
):
|
| 460 |
+
output_size = output_size if output_size is not None else images.shape[-2:]
|
| 461 |
+
images = images * 0.5 + 0.5
|
| 462 |
+
images = (images - IMAGENET_MEAN.to(images)) / IMAGENET_STD.to(images)
|
| 463 |
+
images = images.to(features)
|
| 464 |
+
enc = self.image_encoder(images)
|
| 465 |
+
h = enc.shape[-2]
|
| 466 |
+
coords = create_coordinate(h, enc.shape[-1], device=enc.device, dtype=enc.dtype)
|
| 467 |
+
enc = enc.permute(0, 2, 3, 1).view(enc.shape[0], -1, enc.shape[1])
|
| 468 |
+
enc = self.rope(enc, coords)
|
| 469 |
+
enc = enc.view(enc.shape[0], h, -1, enc.shape[-1]).permute(0, 3, 1, 2)
|
| 470 |
+
|
| 471 |
+
result = self.upsample(
|
| 472 |
+
enc,
|
| 473 |
+
features,
|
| 474 |
+
attn_mask,
|
| 475 |
+
output_size,
|
| 476 |
+
vis_attn=vis_attn,
|
| 477 |
+
q_chunk_size=q_chunk_size,
|
| 478 |
+
)
|
| 479 |
+
return result
|
attention.py
ADDED
|
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from torch import Tensor as T
|
| 3 |
+
from torch.nn.attention.flex_attention import (
|
| 4 |
+
BlockMask,
|
| 5 |
+
_mask_mod_signature,
|
| 6 |
+
and_masks,
|
| 7 |
+
create_block_mask,
|
| 8 |
+
flex_attention,
|
| 9 |
+
or_masks,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
# ---------------------------------------------------------------------------
|
| 13 |
+
# Two compiled variants of flex_attention
|
| 14 |
+
# ---------------------------------------------------------------------------
|
| 15 |
+
# _decode: fullgraph=True, static shapes.
|
| 16 |
+
# Used for decode steps (S_q == 1) where shapes are fixed and
|
| 17 |
+
# the call will be captured inside a CUDA graph. fullgraph=True
|
| 18 |
+
# avoids graph breaks that would corrupt the capture.
|
| 19 |
+
#
|
| 20 |
+
# _prefill: dynamic=True, symbolic shapes.
|
| 21 |
+
# Used for prefill steps (S_q > 1) where the sequence length
|
| 22 |
+
# varies per image. dynamic=True lets one compiled graph handle
|
| 23 |
+
# all lengths without recompilation. Prefill is never inside a
|
| 24 |
+
# CUDA graph, so symbolic shape guards are fine.
|
| 25 |
+
compiled_flex_attn_decode = torch.compile(flex_attention, fullgraph=True)
|
| 26 |
+
compiled_flex_attn_prefill = torch.compile(flex_attention, dynamic=True)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def offset_mask_mod(mask_mod: _mask_mod_signature, offset: int):
|
| 30 |
+
"""Get a mask mod function with an offset applied to the query positions."""
|
| 31 |
+
|
| 32 |
+
def _mask_mod(b, h, q, kv):
|
| 33 |
+
return mask_mod(b, h, q + offset, kv)
|
| 34 |
+
|
| 35 |
+
return _mask_mod
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def get_causal_mask_mod() -> _mask_mod_signature:
|
| 39 |
+
"""Causal mask that prevents attention to future tokens."""
|
| 40 |
+
|
| 41 |
+
def _causal_mask(b: T, h: T, q_idx: T, kv_idx: T) -> T:
|
| 42 |
+
return q_idx >= kv_idx
|
| 43 |
+
|
| 44 |
+
return _causal_mask
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def get_document_mask_mod(batch: T, eos_id: int) -> _mask_mod_signature:
|
| 48 |
+
"""Creates a document mask that prevents attention across document boundaries.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
batch: Input batch tensor with shape [b, s, h, d]
|
| 52 |
+
eos_id: End-of-sequence token ID that marks document boundaries
|
| 53 |
+
|
| 54 |
+
Returns:
|
| 55 |
+
A mask modifier function that implements document-level masking.
|
| 56 |
+
"""
|
| 57 |
+
# batch is [b, s, h, d] shape
|
| 58 |
+
eos_mask = batch == eos_id
|
| 59 |
+
eos_mask[:, -1] = True
|
| 60 |
+
cumulative_mask = torch.cumsum(torch.where(eos_mask, 1, 0), dim=1)
|
| 61 |
+
sequence_indices = torch.zeros_like(cumulative_mask, dtype=torch.int32)
|
| 62 |
+
sequence_indices[:, 1:] = cumulative_mask[:, :-1]
|
| 63 |
+
|
| 64 |
+
def document_mask(b: T, h: T, q_idx: T, kv_idx: T) -> T:
|
| 65 |
+
return sequence_indices[b, q_idx] == sequence_indices[b, kv_idx]
|
| 66 |
+
|
| 67 |
+
return document_mask
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def get_non_left_pad_mask_mod(batch: T, pad_id: int) -> _mask_mod_signature:
|
| 71 |
+
"""Prevent model from attending to the left-padded token required for correct batch inference."""
|
| 72 |
+
|
| 73 |
+
non_pad_mask_id = torch.cumsum(batch != pad_id, dim=1)
|
| 74 |
+
|
| 75 |
+
# Left-most pad tokens have cumulative id == 0.
|
| 76 |
+
def mask_mod(b, h, q_idx, kv_idx):
|
| 77 |
+
return non_pad_mask_id[b, kv_idx] > 0
|
| 78 |
+
|
| 79 |
+
return mask_mod
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def get_image_prefix_mask_mod(
|
| 83 |
+
batch: T, soi_id: int, eoi_id: int
|
| 84 |
+
) -> _mask_mod_signature:
|
| 85 |
+
# batch is [b, s, h, d] shape
|
| 86 |
+
soi_mask = batch == soi_id
|
| 87 |
+
eoi_mask = batch == eoi_id
|
| 88 |
+
acc_soi_mask = torch.cumsum(soi_mask, dim=1)
|
| 89 |
+
acc_eoi_mask = torch.cumsum(eoi_mask, dim=1)
|
| 90 |
+
# Get every tokens between two soi_id and eoi_id exclusive of eoi_id
|
| 91 |
+
img_mask = (acc_soi_mask - acc_eoi_mask) > 0
|
| 92 |
+
|
| 93 |
+
# Create a tensor that assigns each token to its image number
|
| 94 |
+
# Each image starts with SOI token, so we can use acc_soi_mask to track image numbers
|
| 95 |
+
img_indices = acc_soi_mask * img_mask
|
| 96 |
+
|
| 97 |
+
def image_prefix_mask_mod(b, h, q_idx, kv_idx):
|
| 98 |
+
# Check if both tokens are image tokens and belong to the same image
|
| 99 |
+
is_img_tokens = img_mask[b, q_idx] & img_mask[b, kv_idx]
|
| 100 |
+
is_same_image = img_indices[b, q_idx] == img_indices[b, kv_idx]
|
| 101 |
+
return is_img_tokens & is_same_image
|
| 102 |
+
|
| 103 |
+
return image_prefix_mask_mod
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
_compiled_create_block_mask = torch.compile(
|
| 107 |
+
create_block_mask, dynamic=True
|
| 108 |
+
) # Note: can't use mode = 'reduce-overhead' here because it uses internal CUDA graph trees on private streams, causing manual capture to record empty graphs
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
@torch.inference_mode()
|
| 112 |
+
def create_attention_mask(*args, **kwargs) -> BlockMask:
|
| 113 |
+
"""
|
| 114 |
+
NOTE: We compile this for performance/memory reasons in large masks. To reduce
|
| 115 |
+
recompiles due to grad_mode flips, we always run mask creation under inference_mode.
|
| 116 |
+
"""
|
| 117 |
+
return _compiled_create_block_mask(*args, **kwargs)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def create_batch_attention_mask(
|
| 121 |
+
input_batch: T,
|
| 122 |
+
*,
|
| 123 |
+
pad_token_id: int,
|
| 124 |
+
eos_token_id: int,
|
| 125 |
+
soi_token_id: int,
|
| 126 |
+
eoi_token_id: int,
|
| 127 |
+
max_len: int | None = None,
|
| 128 |
+
) -> BlockMask:
|
| 129 |
+
"""Build the combined FlexAttention mask for the batch engine.
|
| 130 |
+
|
| 131 |
+
Composes causal + document + non-left-pad + image-prefix masks.
|
| 132 |
+
"""
|
| 133 |
+
B, S = input_batch.size()
|
| 134 |
+
block_causal_mask_mod = and_masks(
|
| 135 |
+
get_causal_mask_mod(),
|
| 136 |
+
get_document_mask_mod(input_batch, eos_token_id),
|
| 137 |
+
get_non_left_pad_mask_mod(input_batch, pad_token_id),
|
| 138 |
+
)
|
| 139 |
+
image_prefix_mask_mod = get_image_prefix_mask_mod(
|
| 140 |
+
batch=input_batch,
|
| 141 |
+
soi_id=soi_token_id,
|
| 142 |
+
eoi_id=eoi_token_id,
|
| 143 |
+
)
|
| 144 |
+
mask_mod = or_masks(image_prefix_mask_mod, block_causal_mask_mod)
|
| 145 |
+
max_len = max_len or S
|
| 146 |
+
return create_attention_mask(mask_mod, B, None, max_len, max_len)
|
config.json
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"architectures": [
|
| 3 |
+
"FalconPerceptionForSegmentation"
|
| 4 |
+
],
|
| 5 |
+
"auto_map": {
|
| 6 |
+
"AutoConfig": "configuration_falcon_perception.FalconPerceptionConfig",
|
| 7 |
+
"AutoModelForCausalLM": "modeling_falcon_perception.FalconPerceptionForSegmentation"
|
| 8 |
+
},
|
| 9 |
+
"model_type": "falcon_perception",
|
| 10 |
+
"torch_dtype": "float32",
|
| 11 |
+
"dim": 1024,
|
| 12 |
+
"n_layers": 28,
|
| 13 |
+
"n_heads": 16,
|
| 14 |
+
"head_dim": 128,
|
| 15 |
+
"n_kv_heads": 8,
|
| 16 |
+
"vocab_size": 65536,
|
| 17 |
+
"ffn_dim": 3072,
|
| 18 |
+
"norm_eps": 1e-05,
|
| 19 |
+
"max_seq_len": 8192,
|
| 20 |
+
"rope_theta": 10000,
|
| 21 |
+
"channel_size": 3,
|
| 22 |
+
"spatial_patch_size": 16,
|
| 23 |
+
"temporal_patch_size": 1,
|
| 24 |
+
"do_segmentation": true,
|
| 25 |
+
"segm_out_dim": 256,
|
| 26 |
+
"num_segm_layers": 3,
|
| 27 |
+
"coord_enc_dim": 512,
|
| 28 |
+
"coord_dec_dim": 8192,
|
| 29 |
+
"coord_out_dim": 2048,
|
| 30 |
+
"coord_token_id": 240,
|
| 31 |
+
"size_enc_dim": 512,
|
| 32 |
+
"size_dec_dim": 8192,
|
| 33 |
+
"size_out_dim": 2048,
|
| 34 |
+
"size_token_id": 241,
|
| 35 |
+
"seg_token_id": 262,
|
| 36 |
+
"eos_id": 11,
|
| 37 |
+
"img_id": 227,
|
| 38 |
+
"image_cls_token_id": 244,
|
| 39 |
+
"image_reg_1_token_id": 245,
|
| 40 |
+
"image_reg_2_token_id": 246,
|
| 41 |
+
"image_reg_3_token_id": 247,
|
| 42 |
+
"image_reg_4_token_id": 248,
|
| 43 |
+
"img_end_id": 230
|
| 44 |
+
}
|
configuration_falcon_perception.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from transformers import PretrainedConfig
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class FalconPerceptionConfig(PretrainedConfig):
|
| 5 |
+
model_type = "falcon_perception"
|
| 6 |
+
|
| 7 |
+
def __init__(
|
| 8 |
+
self,
|
| 9 |
+
dim: int = 1024,
|
| 10 |
+
n_layers: int = 28,
|
| 11 |
+
n_heads: int = 16,
|
| 12 |
+
head_dim: int = 128,
|
| 13 |
+
n_kv_heads: int = 8,
|
| 14 |
+
vocab_size: int = 65536,
|
| 15 |
+
ffn_dim: int = 3072,
|
| 16 |
+
norm_eps: float = 1e-5,
|
| 17 |
+
max_seq_len: int = 8192,
|
| 18 |
+
rope_theta: int = 10000,
|
| 19 |
+
channel_size: int = 3,
|
| 20 |
+
spatial_patch_size: int = 16,
|
| 21 |
+
temporal_patch_size: int = 1,
|
| 22 |
+
do_segmentation: bool = True,
|
| 23 |
+
segm_out_dim: int = 256,
|
| 24 |
+
num_segm_layers: int = 3,
|
| 25 |
+
coord_enc_dim: int = 512,
|
| 26 |
+
coord_dec_dim: int = 8192,
|
| 27 |
+
coord_out_dim: int = 2048,
|
| 28 |
+
coord_token_id: int = 240,
|
| 29 |
+
size_enc_dim: int = 512,
|
| 30 |
+
size_dec_dim: int = 8192,
|
| 31 |
+
size_out_dim: int = 2048,
|
| 32 |
+
size_token_id: int = 241,
|
| 33 |
+
seg_token_id: int = 262,
|
| 34 |
+
eos_id: int = 11,
|
| 35 |
+
img_id: int = 227,
|
| 36 |
+
image_cls_token_id: int = 244,
|
| 37 |
+
image_reg_1_token_id: int = 245,
|
| 38 |
+
image_reg_2_token_id: int = 246,
|
| 39 |
+
image_reg_3_token_id: int = 247,
|
| 40 |
+
image_reg_4_token_id: int = 248,
|
| 41 |
+
img_end_id: int = 230,
|
| 42 |
+
**kwargs,
|
| 43 |
+
):
|
| 44 |
+
self.dim = dim
|
| 45 |
+
self.n_layers = n_layers
|
| 46 |
+
self.n_heads = n_heads
|
| 47 |
+
self.head_dim = head_dim
|
| 48 |
+
self.n_kv_heads = n_kv_heads
|
| 49 |
+
self.vocab_size = vocab_size
|
| 50 |
+
self.ffn_dim = ffn_dim
|
| 51 |
+
self.norm_eps = norm_eps
|
| 52 |
+
self.max_seq_len = max_seq_len
|
| 53 |
+
self.rope_theta = rope_theta
|
| 54 |
+
self.channel_size = channel_size
|
| 55 |
+
self.spatial_patch_size = spatial_patch_size
|
| 56 |
+
self.temporal_patch_size = temporal_patch_size
|
| 57 |
+
self.do_segmentation = do_segmentation
|
| 58 |
+
self.segm_out_dim = segm_out_dim
|
| 59 |
+
self.num_segm_layers = num_segm_layers
|
| 60 |
+
self.coord_enc_dim = coord_enc_dim
|
| 61 |
+
self.coord_dec_dim = coord_dec_dim
|
| 62 |
+
self.coord_out_dim = coord_out_dim
|
| 63 |
+
self.coord_token_id = coord_token_id
|
| 64 |
+
self.size_enc_dim = size_enc_dim
|
| 65 |
+
self.size_dec_dim = size_dec_dim
|
| 66 |
+
self.size_out_dim = size_out_dim
|
| 67 |
+
self.size_token_id = size_token_id
|
| 68 |
+
self.seg_token_id = seg_token_id
|
| 69 |
+
self.eos_id = eos_id
|
| 70 |
+
self.img_id = img_id
|
| 71 |
+
self.image_cls_token_id = image_cls_token_id
|
| 72 |
+
self.image_reg_1_token_id = image_reg_1_token_id
|
| 73 |
+
self.image_reg_2_token_id = image_reg_2_token_id
|
| 74 |
+
self.image_reg_3_token_id = image_reg_3_token_id
|
| 75 |
+
self.image_reg_4_token_id = image_reg_4_token_id
|
| 76 |
+
self.img_end_id = img_end_id
|
| 77 |
+
super().__init__(**kwargs)
|
main_fig.jpg
ADDED
|
Git LFS Details
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32b342ee3cb22d05a380b26aac2ddaaee0d7479093c2007ac5618d8d19f5272e
|
| 3 |
+
size 632397880
|
model_args.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"channel_size": 3,
|
| 3 |
+
"coord_dec_dim": 8192,
|
| 4 |
+
"coord_enc_dim": 512,
|
| 5 |
+
"coord_out_dim": 2048,
|
| 6 |
+
"coord_token_id": 240,
|
| 7 |
+
"dim": 1024,
|
| 8 |
+
"eos_id": 11,
|
| 9 |
+
"ffn_dim": 3072,
|
| 10 |
+
"head_dim": 128,
|
| 11 |
+
"image_cls_token_id": 244,
|
| 12 |
+
"image_reg_1_token_id": 245,
|
| 13 |
+
"image_reg_2_token_id": 246,
|
| 14 |
+
"image_reg_3_token_id": 247,
|
| 15 |
+
"image_reg_4_token_id": 248,
|
| 16 |
+
"img_end_id": 230,
|
| 17 |
+
"img_id": 227,
|
| 18 |
+
"img_row_sep_id": 228,
|
| 19 |
+
"img_start_id": 229,
|
| 20 |
+
"max_seq_len": 8192,
|
| 21 |
+
"n_heads": 16,
|
| 22 |
+
"n_kv_heads": 8,
|
| 23 |
+
"n_layers": 28,
|
| 24 |
+
"norm_eps": 1e-05,
|
| 25 |
+
"num_segm_layers": 3,
|
| 26 |
+
"perception_heads": true,
|
| 27 |
+
"rope_theta": 10000,
|
| 28 |
+
"seg_token_id": 262,
|
| 29 |
+
"segm_out_dim": 256,
|
| 30 |
+
"size_dec_dim": 8192,
|
| 31 |
+
"size_enc_dim": 512,
|
| 32 |
+
"size_out_dim": 2048,
|
| 33 |
+
"size_token_id": 241,
|
| 34 |
+
"spatial_patch_size": 16,
|
| 35 |
+
"temporal_patch_size": 1,
|
| 36 |
+
"vocab_size": 65536
|
| 37 |
+
}
|
modeling_falcon_perception.py
ADDED
|
@@ -0,0 +1,935 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
from pathlib import Path
|
| 3 |
+
|
| 4 |
+
import einops as E
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import torch.nn.functional as F
|
| 8 |
+
import triton
|
| 9 |
+
import triton.language as tl
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from pycocotools import mask as mask_utils
|
| 12 |
+
from torch import Tensor as T
|
| 13 |
+
from torch import nn
|
| 14 |
+
from torch.nn.attention.flex_attention import (
|
| 15 |
+
AuxRequest,
|
| 16 |
+
BlockMask,
|
| 17 |
+
)
|
| 18 |
+
from transformers import AutoTokenizer, PreTrainedModel
|
| 19 |
+
|
| 20 |
+
from .anyup import AnyUp, get_attention_mask_mod as get_upsampler_attn_mask_mod
|
| 21 |
+
from .attention import (
|
| 22 |
+
compiled_flex_attn_decode,
|
| 23 |
+
compiled_flex_attn_prefill,
|
| 24 |
+
create_attention_mask,
|
| 25 |
+
create_batch_attention_mask,
|
| 26 |
+
offset_mask_mod,
|
| 27 |
+
)
|
| 28 |
+
from .configuration_falcon_perception import FalconPerceptionConfig
|
| 29 |
+
from .processing_falcon_perception import load_image, process_batch
|
| 30 |
+
from .rope import (
|
| 31 |
+
apply_3d_rotary_emb,
|
| 32 |
+
apply_golden_freqs_cis_to_visual_pos,
|
| 33 |
+
precompute_freqs_cis,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
# ---------------------------------------------------------------------------
|
| 38 |
+
# Sub-modules: Heads
|
| 39 |
+
# ---------------------------------------------------------------------------
|
| 40 |
+
|
| 41 |
+
class FourierEncoder(nn.Module):
|
| 42 |
+
def __init__(self, in_dim: int, feat_dim: int, out_dim: int):
|
| 43 |
+
super().__init__()
|
| 44 |
+
self.embed = nn.Linear(in_dim, feat_dim // 2, bias=False)
|
| 45 |
+
self.transform = nn.Linear(feat_dim, out_dim, bias=False)
|
| 46 |
+
|
| 47 |
+
def forward(self, x):
|
| 48 |
+
f = 2 * math.pi * self.embed(x)
|
| 49 |
+
f = torch.cat([f.cos(), f.sin()], dim=-1)
|
| 50 |
+
return self.transform(f)
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
class BboxDecoder(nn.Module):
|
| 54 |
+
def __init__(self, in_dim: int, hidden_dim: int, out_dim: int) -> None:
|
| 55 |
+
super().__init__()
|
| 56 |
+
self.w1 = nn.Linear(in_dim, hidden_dim, bias=False)
|
| 57 |
+
self.w2 = nn.Linear(hidden_dim, out_dim, bias=False)
|
| 58 |
+
|
| 59 |
+
def forward(self, x: T) -> T:
|
| 60 |
+
return self.w2(F.relu(self.w1(x)).square())
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class SegmDecoder(nn.Module):
|
| 64 |
+
def __init__(self, in_dim: int, out_dim: int, num_layers: int) -> None:
|
| 65 |
+
super().__init__()
|
| 66 |
+
self.layers = nn.ModuleList([nn.Linear(in_dim, in_dim) for _ in range(num_layers - 1)])
|
| 67 |
+
self.pixel_layer = nn.Linear(in_dim, out_dim, bias=False)
|
| 68 |
+
|
| 69 |
+
def forward(self, x) -> torch.Tensor:
|
| 70 |
+
for layer in self.layers:
|
| 71 |
+
x = F.relu(layer(x)).square()
|
| 72 |
+
return self.pixel_layer(x)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# ---------------------------------------------------------------------------
|
| 76 |
+
# Sub-modules: Attention
|
| 77 |
+
# ---------------------------------------------------------------------------
|
| 78 |
+
|
| 79 |
+
def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
|
| 80 |
+
B, S, H, D = x.shape
|
| 81 |
+
if n_rep == 1:
|
| 82 |
+
return x
|
| 83 |
+
return torch.unsqueeze(x, dim=3).expand(B, S, H, n_rep, D).reshape(B, S, H * n_rep, D)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class Attention(nn.Module):
|
| 87 |
+
def __init__(self, config: FalconPerceptionConfig, layer_id: int):
|
| 88 |
+
super().__init__()
|
| 89 |
+
self.layer_id = layer_id
|
| 90 |
+
self.n_kv_heads = config.n_kv_heads or config.n_heads
|
| 91 |
+
self.n_rep = config.n_heads // self.n_kv_heads
|
| 92 |
+
self.head_dim = config.head_dim or config.dim // config.n_heads
|
| 93 |
+
self.q_dim = config.n_heads * self.head_dim
|
| 94 |
+
self.kv_dim = self.n_kv_heads * self.head_dim
|
| 95 |
+
|
| 96 |
+
self.wqkv = nn.Linear(config.dim, self.q_dim + 2 * self.kv_dim, bias=False)
|
| 97 |
+
self.wo = nn.Linear(config.n_heads * self.head_dim, config.dim, bias=False)
|
| 98 |
+
self.sinks = nn.Parameter(torch.empty((config.n_heads,)))
|
| 99 |
+
|
| 100 |
+
def _pre_attention_qkv(self, x) -> tuple[T, T, T]:
|
| 101 |
+
qkv = self.wqkv(F.rms_norm(x, (x.size(-1),)))
|
| 102 |
+
xq, xk, xv = qkv.split([self.q_dim, self.kv_dim, self.kv_dim], dim=-1)
|
| 103 |
+
xq = E.rearrange(xq, "b s (h d) -> b s h d", d=self.head_dim)
|
| 104 |
+
xk = E.rearrange(xk, "b s (h d) -> b s h d", d=self.head_dim)
|
| 105 |
+
xv = E.rearrange(xv, "b s (h d) -> b s h d", d=self.head_dim)
|
| 106 |
+
xq = F.rms_norm(xq, (xq.size(-1),))
|
| 107 |
+
xk = F.rms_norm(xk, (xk.size(-1),))
|
| 108 |
+
xk = repeat_kv(xk, n_rep=self.n_rep)
|
| 109 |
+
xv = repeat_kv(xv, n_rep=self.n_rep)
|
| 110 |
+
return xq, xk, xv
|
| 111 |
+
|
| 112 |
+
def _post_attention(self, output: T, lse: T) -> T:
|
| 113 |
+
sinks_BHS = self.sinks.view(1, -1, 1)
|
| 114 |
+
sink_scale = torch.sigmoid(lse - sinks_BHS)
|
| 115 |
+
output = (output * sink_scale.unsqueeze(-1)).to(output.dtype)
|
| 116 |
+
output = output.permute(0, 2, 1, 3).contiguous().flatten(2)
|
| 117 |
+
return self.wo(output)
|
| 118 |
+
|
| 119 |
+
def compile_attention(self, *, dynamic: bool = True, mode: str = "default"):
|
| 120 |
+
self._pre_attention_qkv = torch.compile(self._pre_attention_qkv, dynamic=dynamic, mode=mode)
|
| 121 |
+
self._post_attention = torch.compile(self._post_attention, dynamic=dynamic, mode=mode)
|
| 122 |
+
|
| 123 |
+
def forward(
|
| 124 |
+
self, x: T, attention_masks: BlockMask, freqs_cis: T,
|
| 125 |
+
freqs_cis_2d: T | None = None, pos_hw: T | None = None,
|
| 126 |
+
kv_cache=None, input_pos=None, batch_idx=None,
|
| 127 |
+
flex_attn_kernel_options=None,
|
| 128 |
+
):
|
| 129 |
+
xq, xk, xv = self._pre_attention_qkv(x)
|
| 130 |
+
xq, xk = apply_3d_rotary_emb(xq, xk, freqs_cis, freqs_cis_2d, pos_hw)
|
| 131 |
+
xq = E.rearrange(xq, "b s h d -> b h s d")
|
| 132 |
+
xk = E.rearrange(xk, "b s h d -> b h s d")
|
| 133 |
+
xv = E.rearrange(xv, "b s h d -> b h s d")
|
| 134 |
+
xk, xv = kv_cache.insert_kv(self.layer_id, xk, xv, input_pos=input_pos, batch_idx=batch_idx)
|
| 135 |
+
flex_fn = compiled_flex_attn_decode if xq.shape[2] == 1 else compiled_flex_attn_prefill
|
| 136 |
+
output, aux_output = flex_fn(xq, xk, xv, block_mask=attention_masks, return_aux=AuxRequest(lse=True))
|
| 137 |
+
return self._post_attention(output, aux_output.lse)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# ---------------------------------------------------------------------------
|
| 141 |
+
# Sub-modules: FeedForward
|
| 142 |
+
# ---------------------------------------------------------------------------
|
| 143 |
+
|
| 144 |
+
@triton.jit
|
| 145 |
+
def _squared_relu_gate_kernel(
|
| 146 |
+
packed_ptr, out_ptr, n_rows, n_cols,
|
| 147 |
+
in_row_stride, in_col_stride, out_row_stride, out_col_stride,
|
| 148 |
+
BLOCK_SIZE: tl.constexpr,
|
| 149 |
+
):
|
| 150 |
+
pid = tl.program_id(0)
|
| 151 |
+
n_elements = n_rows * n_cols
|
| 152 |
+
offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
|
| 153 |
+
mask = offsets < n_elements
|
| 154 |
+
rows = offsets // n_cols
|
| 155 |
+
cols = offsets % n_cols
|
| 156 |
+
gate_idx = rows * in_row_stride + (2 * cols) * in_col_stride
|
| 157 |
+
up_idx = rows * in_row_stride + (2 * cols + 1) * in_col_stride
|
| 158 |
+
out_idx = rows * out_row_stride + cols * out_col_stride
|
| 159 |
+
gate = tl.load(packed_ptr + gate_idx, mask=mask)
|
| 160 |
+
up = tl.load(packed_ptr + up_idx, mask=mask)
|
| 161 |
+
gate = tl.where(gate > 0, gate, 0.0)
|
| 162 |
+
out = gate * gate * up
|
| 163 |
+
tl.store(out_ptr + out_idx, out, mask=mask)
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def squared_relu_gate(packed: T, hidden_dim: int) -> T:
|
| 167 |
+
packed_2d = packed.flatten(0, -2)
|
| 168 |
+
n_rows = packed_2d.shape[0]
|
| 169 |
+
n_cols = hidden_dim
|
| 170 |
+
out_2d = torch.empty((n_rows, n_cols), device=packed.device, dtype=packed.dtype)
|
| 171 |
+
n = n_rows * n_cols
|
| 172 |
+
grid = lambda meta: (triton.cdiv(n, meta["BLOCK_SIZE"]),)
|
| 173 |
+
_squared_relu_gate_kernel[grid](
|
| 174 |
+
packed_2d, out_2d, n_rows, n_cols,
|
| 175 |
+
packed_2d.stride(0), packed_2d.stride(1),
|
| 176 |
+
out_2d.stride(0), out_2d.stride(1),
|
| 177 |
+
BLOCK_SIZE=1024,
|
| 178 |
+
)
|
| 179 |
+
return out_2d.view(*packed.shape[:-1], hidden_dim)
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class FeedForward(nn.Module):
|
| 183 |
+
def __init__(self, dim: int, hidden_dim: int):
|
| 184 |
+
super().__init__()
|
| 185 |
+
self.w13 = nn.Linear(dim, 2 * hidden_dim, bias=False)
|
| 186 |
+
self.w2 = nn.Linear(hidden_dim, dim, bias=False)
|
| 187 |
+
self.hidden_dim = hidden_dim
|
| 188 |
+
|
| 189 |
+
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
| 190 |
+
x = F.rms_norm(x, (x.size(-1),))
|
| 191 |
+
w13_out = self.w13(x)
|
| 192 |
+
return self.w2(squared_relu_gate(w13_out, self.hidden_dim))
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
# ---------------------------------------------------------------------------
|
| 196 |
+
# Sub-modules: TransformerBlock
|
| 197 |
+
# ---------------------------------------------------------------------------
|
| 198 |
+
|
| 199 |
+
class TransformerBlock(nn.Module):
|
| 200 |
+
def __init__(self, layer_id: int, config: FalconPerceptionConfig):
|
| 201 |
+
super().__init__()
|
| 202 |
+
self.attention = Attention(config, layer_id)
|
| 203 |
+
self.feed_forward = FeedForward(config.dim, config.ffn_dim)
|
| 204 |
+
|
| 205 |
+
def compile(self, *, dynamic: bool = True, mode: str = "default"):
|
| 206 |
+
self.feed_forward = torch.compile(self.feed_forward, dynamic=dynamic, mode=mode)
|
| 207 |
+
self.attention.compile_attention(dynamic=dynamic, mode=mode)
|
| 208 |
+
return self
|
| 209 |
+
|
| 210 |
+
def forward(
|
| 211 |
+
self, x: T, freqs_cis: T, freqs_cis_2d: T | None = None,
|
| 212 |
+
pos_hw: T | None = None, attention_masks=None, kv_cache=None,
|
| 213 |
+
input_pos=None, batch_idx=None, flex_attn_kernel_options=None,
|
| 214 |
+
):
|
| 215 |
+
B, S, D = x.shape
|
| 216 |
+
x = x + self.attention(
|
| 217 |
+
x, freqs_cis=freqs_cis, freqs_cis_2d=freqs_cis_2d, pos_hw=pos_hw,
|
| 218 |
+
attention_masks=attention_masks, kv_cache=kv_cache,
|
| 219 |
+
input_pos=input_pos, batch_idx=batch_idx,
|
| 220 |
+
flex_attn_kernel_options=flex_attn_kernel_options,
|
| 221 |
+
)
|
| 222 |
+
out = x + self.feed_forward(x)
|
| 223 |
+
return out.reshape(B, S, D)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
# ---------------------------------------------------------------------------
|
| 227 |
+
# KV Cache
|
| 228 |
+
# ---------------------------------------------------------------------------
|
| 229 |
+
|
| 230 |
+
class KVCache:
|
| 231 |
+
def __init__(self, max_batch_size, max_seq_length, n_heads, head_dim, num_layers):
|
| 232 |
+
self.kv_shape = (num_layers, 2, max_batch_size, n_heads, max_seq_length, head_dim)
|
| 233 |
+
self.kv_cache = None
|
| 234 |
+
self.pos = 0
|
| 235 |
+
self.pos_t: T | None = None
|
| 236 |
+
|
| 237 |
+
def reset(self):
|
| 238 |
+
self.pos = 0
|
| 239 |
+
self.pos_t = None
|
| 240 |
+
|
| 241 |
+
def get_pos(self):
|
| 242 |
+
return self.pos
|
| 243 |
+
|
| 244 |
+
def set_pos_t(self, pos_t):
|
| 245 |
+
self.pos_t = pos_t
|
| 246 |
+
|
| 247 |
+
def increment_and_get_pos_t(self):
|
| 248 |
+
assert self.pos_t is not None
|
| 249 |
+
self.pos_t += 1
|
| 250 |
+
return self.pos_t
|
| 251 |
+
|
| 252 |
+
def insert_kv(self, layer_id: int, k: T, v: T, **kwargs):
|
| 253 |
+
del kwargs
|
| 254 |
+
assert self.pos_t is not None
|
| 255 |
+
if self.kv_cache is None:
|
| 256 |
+
self.kv_cache = torch.empty(self.kv_shape, dtype=k.dtype, device=k.device)
|
| 257 |
+
B, H, T_add, D = k.size()
|
| 258 |
+
t0, t1 = self.pos, self.pos + T_add
|
| 259 |
+
self.kv_cache[layer_id, 0, :, :, t0:t1] = k
|
| 260 |
+
self.kv_cache[layer_id, 1, :, :, t0:t1] = v
|
| 261 |
+
key_view = self.kv_cache[layer_id, 0, :, :, :t1]
|
| 262 |
+
value_view = self.kv_cache[layer_id, 1, :, :, :t1]
|
| 263 |
+
if layer_id == self.kv_cache.size(0) - 1:
|
| 264 |
+
self.pos = t1
|
| 265 |
+
return key_view, value_view
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
# ---------------------------------------------------------------------------
|
| 269 |
+
# Sampling
|
| 270 |
+
# ---------------------------------------------------------------------------
|
| 271 |
+
|
| 272 |
+
@torch.inference_mode()
|
| 273 |
+
def sample_next_token(logits, rng, temperature=0.0, top_k=None):
|
| 274 |
+
assert temperature >= 0.0
|
| 275 |
+
if temperature == 0.0:
|
| 276 |
+
return torch.argmax(logits, dim=-1, keepdim=True)
|
| 277 |
+
if top_k is not None:
|
| 278 |
+
k = min(top_k, logits.size(-1))
|
| 279 |
+
vals, idx = torch.topk(logits, k, dim=-1)
|
| 280 |
+
vals = vals / temperature
|
| 281 |
+
probs = F.softmax(vals, dim=-1)
|
| 282 |
+
choice = torch.multinomial(probs, num_samples=1, generator=rng)
|
| 283 |
+
return idx.gather(1, choice)
|
| 284 |
+
logits = logits / temperature
|
| 285 |
+
probs = F.softmax(logits, dim=-1)
|
| 286 |
+
return torch.multinomial(probs, num_samples=1, generator=rng)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
# ---------------------------------------------------------------------------
|
| 290 |
+
# Main Model
|
| 291 |
+
# ---------------------------------------------------------------------------
|
| 292 |
+
|
| 293 |
+
class FalconPerceptionForSegmentation(PreTrainedModel):
|
| 294 |
+
config_class = FalconPerceptionConfig
|
| 295 |
+
_no_split_modules = ["TransformerBlock"]
|
| 296 |
+
|
| 297 |
+
def __init__(self, config: FalconPerceptionConfig):
|
| 298 |
+
super().__init__(config)
|
| 299 |
+
img_in_dim = config.temporal_patch_size * config.spatial_patch_size ** 2 * config.channel_size
|
| 300 |
+
self.img_projector = nn.Linear(img_in_dim, config.dim, bias=False)
|
| 301 |
+
self.tok_embeddings = nn.Embedding(config.vocab_size, config.dim)
|
| 302 |
+
|
| 303 |
+
self.layers = nn.ModuleDict()
|
| 304 |
+
for layer_id in range(config.n_layers):
|
| 305 |
+
self.layers[str(layer_id)] = TransformerBlock(layer_id, config)
|
| 306 |
+
|
| 307 |
+
self.norm = nn.RMSNorm(config.dim, eps=config.norm_eps)
|
| 308 |
+
self.output = nn.Linear(config.dim, config.vocab_size, bias=False)
|
| 309 |
+
|
| 310 |
+
self.coord_encoder = FourierEncoder(2, config.coord_enc_dim, config.dim)
|
| 311 |
+
self.coord_decoder = BboxDecoder(config.dim, config.coord_dec_dim, config.coord_out_dim)
|
| 312 |
+
self.size_encoder = FourierEncoder(2, config.size_enc_dim, config.dim)
|
| 313 |
+
self.size_decoder = BboxDecoder(config.dim, config.size_dec_dim, config.size_out_dim)
|
| 314 |
+
|
| 315 |
+
if config.do_segmentation:
|
| 316 |
+
self.itok_upsampler = AnyUp()
|
| 317 |
+
self.proj_segm = SegmDecoder(config.dim, config.segm_out_dim, config.num_segm_layers)
|
| 318 |
+
self.conv_segm = nn.Conv2d(config.dim, config.segm_out_dim, kernel_size=3, padding=1)
|
| 319 |
+
|
| 320 |
+
rope_dim = config.head_dim // 2
|
| 321 |
+
freqs_cis = precompute_freqs_cis(rope_dim, config.max_seq_len, config.rope_theta)
|
| 322 |
+
freqs_cis_golden = torch.empty((config.n_heads, rope_dim // 2, 2), dtype=torch.float)
|
| 323 |
+
self.register_buffer("freqs_cis", freqs_cis, persistent=False)
|
| 324 |
+
self.register_buffer("freqs_cis_golden", freqs_cis_golden, persistent=True)
|
| 325 |
+
|
| 326 |
+
self._weights_fused = False
|
| 327 |
+
self._is_compiled = False
|
| 328 |
+
|
| 329 |
+
self.post_init()
|
| 330 |
+
|
| 331 |
+
# -- Weight management ---------------------------------------------------
|
| 332 |
+
|
| 333 |
+
def _ensure_device_buffers(self):
|
| 334 |
+
"""Recompute non-persistent buffers that HF meta-device loading may discard."""
|
| 335 |
+
if self._weights_fused:
|
| 336 |
+
return
|
| 337 |
+
device = self.tok_embeddings.weight.device
|
| 338 |
+
c = self.config
|
| 339 |
+
rope_dim = c.head_dim // 2
|
| 340 |
+
freqs_cis = precompute_freqs_cis(rope_dim, c.max_seq_len, c.rope_theta).to(device)
|
| 341 |
+
self.register_buffer("freqs_cis", freqs_cis, persistent=False)
|
| 342 |
+
if self.freqs_cis_golden.device != device:
|
| 343 |
+
self.freqs_cis_golden = self.freqs_cis_golden.to(device)
|
| 344 |
+
self._weights_fused = True
|
| 345 |
+
|
| 346 |
+
def compile_model(self):
|
| 347 |
+
if self._is_compiled:
|
| 348 |
+
return
|
| 349 |
+
torch._inductor.config.triton.cudagraphs = False
|
| 350 |
+
for layer in self.layers.values():
|
| 351 |
+
layer.compile(dynamic=True, mode="default")
|
| 352 |
+
self.coord_encoder = torch.compile(self.coord_encoder, dynamic=True, mode="default")
|
| 353 |
+
self.coord_decoder = torch.compile(self.coord_decoder, dynamic=True, mode="default")
|
| 354 |
+
self.size_encoder = torch.compile(self.size_encoder, dynamic=True, mode="default")
|
| 355 |
+
self.size_decoder = torch.compile(self.size_decoder, dynamic=True, mode="default")
|
| 356 |
+
if self.config.do_segmentation:
|
| 357 |
+
self.itok_upsampler.compile(mode="default", dynamic=True)
|
| 358 |
+
self._is_compiled = True
|
| 359 |
+
|
| 360 |
+
# -- Tokenizer -----------------------------------------------------------
|
| 361 |
+
|
| 362 |
+
def _get_tokenizer(self):
|
| 363 |
+
if not hasattr(self, "_tokenizer"):
|
| 364 |
+
import os
|
| 365 |
+
path = self.config._name_or_path
|
| 366 |
+
is_local = os.path.exists(path)
|
| 367 |
+
self._tokenizer = AutoTokenizer.from_pretrained(path, local_files_only=is_local, trust_remote_code=True)
|
| 368 |
+
for token_name, token in self._tokenizer.special_tokens_map.items():
|
| 369 |
+
if isinstance(token, str):
|
| 370 |
+
setattr(self._tokenizer, token_name, token)
|
| 371 |
+
setattr(
|
| 372 |
+
self._tokenizer, token_name + "_id",
|
| 373 |
+
self._tokenizer.convert_tokens_to_ids(token),
|
| 374 |
+
)
|
| 375 |
+
return self._tokenizer
|
| 376 |
+
|
| 377 |
+
# -- Attention mask ------------------------------------------------------
|
| 378 |
+
|
| 379 |
+
def get_attention_mask(self, input_batch: T, max_len: int | None = None):
|
| 380 |
+
return create_batch_attention_mask(
|
| 381 |
+
input_batch,
|
| 382 |
+
pad_token_id=self._pad_token_id,
|
| 383 |
+
eos_token_id=self.config.eos_id,
|
| 384 |
+
soi_token_id=self.config.image_cls_token_id,
|
| 385 |
+
eoi_token_id=self.config.img_end_id,
|
| 386 |
+
max_len=max_len,
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
def get_upsampler_attn_mask(self, H, W, h, w, device):
|
| 390 |
+
return create_attention_mask(
|
| 391 |
+
get_upsampler_attn_mask_mod(H, W, h, w, device=device),
|
| 392 |
+
B=None, H=None, Q_LEN=H * W, KV_LEN=h * w,
|
| 393 |
+
)
|
| 394 |
+
|
| 395 |
+
# -- Embedding helpers ---------------------------------------------------
|
| 396 |
+
|
| 397 |
+
def _scatter_img_tokens_with_projector(self, h_BSD, pixel_patches_NLC, pixel_masks_NTHW, tokens_BS):
|
| 398 |
+
B, S, D = h_BSD.shape
|
| 399 |
+
pixel_patch_mask = E.reduce(
|
| 400 |
+
pixel_masks_NTHW,
|
| 401 |
+
"n (t pt) (h ph) (w pw) -> (n t h w)",
|
| 402 |
+
reduction="any",
|
| 403 |
+
pt=self.config.temporal_patch_size,
|
| 404 |
+
ph=self.config.spatial_patch_size,
|
| 405 |
+
pw=self.config.spatial_patch_size,
|
| 406 |
+
)
|
| 407 |
+
pixel_patches_flat = E.rearrange(pixel_patches_NLC, "n p c -> (n p) c")
|
| 408 |
+
valid_patches = pixel_patches_flat[pixel_patch_mask]
|
| 409 |
+
valid_feats = self.img_projector(valid_patches)
|
| 410 |
+
img_mask_h_BSD = E.repeat(tokens_BS == self.config.img_id, "b s -> b s d", d=D)
|
| 411 |
+
assert valid_feats.numel() == img_mask_h_BSD.sum()
|
| 412 |
+
return torch.masked_scatter(h_BSD, img_mask_h_BSD, valid_feats)
|
| 413 |
+
|
| 414 |
+
def _encode_coords(self, h_BSD: T, tokens_BS: T, all_xy: T):
|
| 415 |
+
coord_tokens_mask = tokens_BS == self.config.coord_token_id
|
| 416 |
+
if all_xy.numel() == 0:
|
| 417 |
+
return h_BSD
|
| 418 |
+
coord_tokens = self.coord_encoder(all_xy.reshape(-1, 2))
|
| 419 |
+
if coord_tokens.shape[0] == h_BSD.shape[0]:
|
| 420 |
+
h_BSD = torch.where(
|
| 421 |
+
coord_tokens_mask.unsqueeze(-1),
|
| 422 |
+
coord_tokens.view(h_BSD.shape[0], -1, h_BSD.shape[-1]),
|
| 423 |
+
h_BSD,
|
| 424 |
+
)
|
| 425 |
+
else:
|
| 426 |
+
h_BSD = h_BSD.masked_scatter_(coord_tokens_mask.unsqueeze(-1), coord_tokens)
|
| 427 |
+
return h_BSD
|
| 428 |
+
|
| 429 |
+
def _encode_sizes(self, h_BSD, tokens_BS, all_hw: T):
|
| 430 |
+
size_tokens_mask = tokens_BS == self.config.size_token_id
|
| 431 |
+
if all_hw.numel() == 0:
|
| 432 |
+
return h_BSD
|
| 433 |
+
size_tokens = self.size_encoder(all_hw.reshape(-1, 2))
|
| 434 |
+
if size_tokens.shape[0] == h_BSD.shape[0]:
|
| 435 |
+
h_BSD = torch.where(
|
| 436 |
+
size_tokens_mask.unsqueeze(-1),
|
| 437 |
+
size_tokens.view(h_BSD.shape[0], -1, h_BSD.shape[-1]),
|
| 438 |
+
h_BSD,
|
| 439 |
+
)
|
| 440 |
+
else:
|
| 441 |
+
h_BSD = h_BSD.masked_scatter_(size_tokens_mask.unsqueeze(-1), size_tokens)
|
| 442 |
+
return h_BSD
|
| 443 |
+
|
| 444 |
+
def decode_coords(self, h_BSD, labels):
|
| 445 |
+
B, S, D = h_BSD.shape
|
| 446 |
+
coord_masks = labels == self.config.coord_token_id
|
| 447 |
+
coord_tokens = torch.masked_select(h_BSD, coord_masks.unsqueeze(-1))
|
| 448 |
+
coord_logits = self.coord_decoder(coord_tokens.reshape(-1, D))
|
| 449 |
+
return E.rearrange(coord_logits, "b (two dim) -> b two dim", two=2)
|
| 450 |
+
|
| 451 |
+
def decode_sizes(self, h_BSD, labels):
|
| 452 |
+
B, S, D = h_BSD.shape
|
| 453 |
+
size_masks = labels == self.config.size_token_id
|
| 454 |
+
size_tokens = torch.masked_select(h_BSD, size_masks.unsqueeze(-1))
|
| 455 |
+
size_logits = self.size_decoder(size_tokens.reshape(-1, D))
|
| 456 |
+
return E.rearrange(size_logits, "b (two dim) -> b two dim", two=2)
|
| 457 |
+
|
| 458 |
+
def process_sizes(self, logits):
|
| 459 |
+
num_bins = logits.shape[-1]
|
| 460 |
+
pred = torch.argmax(logits, dim=-1).float() / (num_bins - 1)
|
| 461 |
+
min_size = torch.log2(torch.tensor(1 / num_bins))
|
| 462 |
+
max_size = 0.0
|
| 463 |
+
pred = pred * (max_size - min_size) + min_size
|
| 464 |
+
return torch.pow(2.0, pred)
|
| 465 |
+
|
| 466 |
+
# -- Segmentation -------------------------------------------------------
|
| 467 |
+
|
| 468 |
+
def gather_img_tokens(self, h_BSD: T, tokens_BS: T, itok_masks_NTHW: T):
|
| 469 |
+
B, S, D = h_BSD.shape
|
| 470 |
+
itok_masks_BSD = E.repeat(tokens_BS == self.config.img_id, "b s -> b s d", d=D)
|
| 471 |
+
itok_flatten = torch.masked_select(h_BSD, itok_masks_BSD)
|
| 472 |
+
itok_masks_NTHWD = E.repeat(itok_masks_NTHW, "n t h w -> n t h w d", d=D)
|
| 473 |
+
itok_NTHWD = torch.zeros_like(itok_masks_NTHWD, dtype=h_BSD.dtype, device=h_BSD.device)
|
| 474 |
+
itok_NTHWD = itok_NTHWD.masked_scatter_(itok_masks_NTHWD, itok_flatten)
|
| 475 |
+
return itok_NTHWD
|
| 476 |
+
|
| 477 |
+
def upsample_img_features(self, h_BSD: T, tokens_BS: T, pixel_values_NTHWC: T, pixel_mask_NTHW: T):
|
| 478 |
+
device = h_BSD.device
|
| 479 |
+
c = self.config
|
| 480 |
+
itok_masks_NTHW = E.reduce(
|
| 481 |
+
pixel_mask_NTHW,
|
| 482 |
+
"n (t pt) (h ph) (w pw) -> n t h w",
|
| 483 |
+
reduction="any",
|
| 484 |
+
pt=c.temporal_patch_size, ph=c.spatial_patch_size, pw=c.spatial_patch_size,
|
| 485 |
+
)
|
| 486 |
+
N, _, h, w = itok_masks_NTHW.shape
|
| 487 |
+
_, _, H, W = pixel_mask_NTHW.shape
|
| 488 |
+
images = E.rearrange(pixel_values_NTHWC, "n 1 h w c -> n c h w")
|
| 489 |
+
lr_img_features = self.gather_img_tokens(h_BSD, tokens_BS, itok_masks_NTHW)
|
| 490 |
+
lr_img_features = E.rearrange(lr_img_features, "n 1 h w d -> n d h w")
|
| 491 |
+
lr_img_features = self.conv_segm(lr_img_features)
|
| 492 |
+
|
| 493 |
+
upsampler_attn_mask = self.get_upsampler_attn_mask(H, W, h, w, device=device)
|
| 494 |
+
hr_parts = []
|
| 495 |
+
for i in range(N):
|
| 496 |
+
hr_i = self.itok_upsampler(
|
| 497 |
+
images=images[i:i + 1], features=lr_img_features[i:i + 1], attn_mask=upsampler_attn_mask,
|
| 498 |
+
)
|
| 499 |
+
hr_parts.append(hr_i)
|
| 500 |
+
return torch.cat(hr_parts, dim=0) if N > 1 else hr_parts[0]
|
| 501 |
+
|
| 502 |
+
@staticmethod
|
| 503 |
+
def _mask_to_coco_rle(binary_masks: torch.Tensor) -> list[dict]:
|
| 504 |
+
C, H, W = binary_masks.shape
|
| 505 |
+
has_any = E.reduce(binary_masks, "c h w -> c", reduction="any")
|
| 506 |
+
binary_col = E.rearrange(binary_masks, "c h w -> c (w h)")
|
| 507 |
+
diffs = binary_col[:, 1:] != binary_col[:, :-1]
|
| 508 |
+
nz = torch.nonzero(diffs, as_tuple=False)
|
| 509 |
+
first_vals = binary_col[:, 0]
|
| 510 |
+
nz_cpu = nz.cpu().numpy()
|
| 511 |
+
has_any_cpu = has_any.cpu().numpy()
|
| 512 |
+
first_vals_cpu = first_vals.cpu().numpy()
|
| 513 |
+
del diffs, nz, binary_col, first_vals, has_any
|
| 514 |
+
N_px = H * W
|
| 515 |
+
if nz_cpu.shape[0] > 0:
|
| 516 |
+
mask_ids = nz_cpu[:, 0]
|
| 517 |
+
change_cols = nz_cpu[:, 1]
|
| 518 |
+
uniq, grp_starts = np.unique(mask_ids, return_index=True)
|
| 519 |
+
grp_ends = np.append(grp_starts[1:], len(mask_ids))
|
| 520 |
+
mask_to_grp = {int(m): (int(gs), int(ge)) for m, gs, ge in zip(uniq, grp_starts, grp_ends)}
|
| 521 |
+
else:
|
| 522 |
+
change_cols = np.array([], dtype=np.intp)
|
| 523 |
+
mask_to_grp = {}
|
| 524 |
+
results = []
|
| 525 |
+
for i in range(C):
|
| 526 |
+
if not has_any_cpu[i]:
|
| 527 |
+
continue
|
| 528 |
+
if i in mask_to_grp:
|
| 529 |
+
gs, ge = mask_to_grp[i]
|
| 530 |
+
cidx = change_cols[gs:ge]
|
| 531 |
+
else:
|
| 532 |
+
cidx = np.array([], dtype=np.intp)
|
| 533 |
+
num_runs = len(cidx) + 1
|
| 534 |
+
starts = np.empty(num_runs, dtype=np.intp)
|
| 535 |
+
starts[0] = 0
|
| 536 |
+
if len(cidx) > 0:
|
| 537 |
+
starts[1:] = cidx + 1
|
| 538 |
+
counts = np.empty(num_runs, dtype=np.uint32)
|
| 539 |
+
if num_runs > 1:
|
| 540 |
+
counts[:-1] = np.diff(starts)
|
| 541 |
+
counts[-1] = N_px - starts[-1]
|
| 542 |
+
if first_vals_cpu[i]:
|
| 543 |
+
counts = np.concatenate([[0], counts])
|
| 544 |
+
rle = {"counts": counts.tolist(), "size": [H, W]}
|
| 545 |
+
rle = mask_utils.frPyObjects(rle, H, W)
|
| 546 |
+
rle["counts"] = rle["counts"].decode("utf-8")
|
| 547 |
+
results.append(rle)
|
| 548 |
+
return results
|
| 549 |
+
|
| 550 |
+
# -- Core forward --------------------------------------------------------
|
| 551 |
+
|
| 552 |
+
def forward(
|
| 553 |
+
self,
|
| 554 |
+
tokens: T,
|
| 555 |
+
attention_mask: BlockMask,
|
| 556 |
+
kv_cache,
|
| 557 |
+
rope_pos_t: T | None = None,
|
| 558 |
+
rope_pos_hw: T | None = None,
|
| 559 |
+
pixel_values: T | None = None,
|
| 560 |
+
pixel_mask: T | None = None,
|
| 561 |
+
coord_xy: T | None = None,
|
| 562 |
+
size_hw: T | None = None,
|
| 563 |
+
):
|
| 564 |
+
B, S = tokens.size()
|
| 565 |
+
c = self.config
|
| 566 |
+
block_mask = attention_mask
|
| 567 |
+
|
| 568 |
+
T_pos = kv_cache.get_pos()
|
| 569 |
+
is_prefill = S != 1
|
| 570 |
+
|
| 571 |
+
if is_prefill:
|
| 572 |
+
assert rope_pos_t is not None and rope_pos_hw is not None
|
| 573 |
+
pos_t = rope_pos_t[:, T_pos:T_pos + S].long()
|
| 574 |
+
kv_cache.pos_t = pos_t[:, -1:]
|
| 575 |
+
freqs_cis = self.freqs_cis[pos_t]
|
| 576 |
+
rope_pos_hw = rope_pos_hw[:, T_pos:T_pos + S]
|
| 577 |
+
freqs_cis_golden = apply_golden_freqs_cis_to_visual_pos(self.freqs_cis_golden, rope_pos_hw)
|
| 578 |
+
block_mask.seq_lengths = (S, S)
|
| 579 |
+
else:
|
| 580 |
+
pos_t = kv_cache.increment_and_get_pos_t()
|
| 581 |
+
freqs_cis = self.freqs_cis[pos_t]
|
| 582 |
+
freqs_cis_golden = None
|
| 583 |
+
block_idx = T_pos // block_mask.BLOCK_SIZE[0]
|
| 584 |
+
block_mask = block_mask[:, :, block_idx]
|
| 585 |
+
block_mask.seq_lengths = (S, T_pos + S)
|
| 586 |
+
block_mask.mask_mod = offset_mask_mod(attention_mask.mask_mod, offset=T_pos)
|
| 587 |
+
|
| 588 |
+
h_BSD = self.tok_embeddings(tokens)
|
| 589 |
+
|
| 590 |
+
coord_xy = coord_xy if coord_xy is not None else h_BSD.new_empty(0)
|
| 591 |
+
size_hw = size_hw if size_hw is not None else h_BSD.new_empty(0)
|
| 592 |
+
h_BSD = self._encode_coords(h_BSD, tokens, coord_xy)
|
| 593 |
+
h_BSD = self._encode_sizes(h_BSD, tokens, size_hw)
|
| 594 |
+
|
| 595 |
+
if pixel_values is not None:
|
| 596 |
+
assert pixel_mask is not None
|
| 597 |
+
pixel_values = pixel_values.to(self.dtype)
|
| 598 |
+
pixel_mask = pixel_mask.to(self.dtype)
|
| 599 |
+
pixel_patches_NLC = E.rearrange(
|
| 600 |
+
pixel_values,
|
| 601 |
+
"n (t pt) (h ph) (w pw) c -> n (t h w) (pt ph pw c)",
|
| 602 |
+
pt=c.temporal_patch_size, ph=c.spatial_patch_size, pw=c.spatial_patch_size,
|
| 603 |
+
)
|
| 604 |
+
h_BSD = self._scatter_img_tokens_with_projector(h_BSD, pixel_patches_NLC, pixel_mask, tokens)
|
| 605 |
+
|
| 606 |
+
for layer in self.layers.values():
|
| 607 |
+
h_BSD = layer(
|
| 608 |
+
h_BSD, freqs_cis=freqs_cis, freqs_cis_2d=freqs_cis_golden,
|
| 609 |
+
pos_hw=rope_pos_hw, attention_masks=block_mask, kv_cache=kv_cache,
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
h_BSD = self.norm(h_BSD)
|
| 613 |
+
logits_BSV = self.output(h_BSD)
|
| 614 |
+
return logits_BSV, h_BSD
|
| 615 |
+
|
| 616 |
+
# -- Main API: generate --------------------------------------------------
|
| 617 |
+
|
| 618 |
+
@torch.inference_mode()
|
| 619 |
+
def generate(
|
| 620 |
+
self,
|
| 621 |
+
images,
|
| 622 |
+
queries,
|
| 623 |
+
max_new_tokens: int = 2048,
|
| 624 |
+
temperature: float = 0.0,
|
| 625 |
+
top_k: int | None = None,
|
| 626 |
+
min_dimension: int = 256,
|
| 627 |
+
max_dimension: int = 1024,
|
| 628 |
+
compile: bool = True,
|
| 629 |
+
seed: int | None = 42,
|
| 630 |
+
segm_threshold: float = 0.5,
|
| 631 |
+
) -> list[list[dict]]:
|
| 632 |
+
"""
|
| 633 |
+
Segment objects in images matching the given queries.
|
| 634 |
+
|
| 635 |
+
Args:
|
| 636 |
+
images: Single PIL Image (or path/URL) or list of them.
|
| 637 |
+
queries: Single query string or list of query strings (one per image).
|
| 638 |
+
max_new_tokens: Maximum generation steps.
|
| 639 |
+
temperature: Sampling temperature (0.0 = greedy).
|
| 640 |
+
top_k: Top-k sampling (None = disabled).
|
| 641 |
+
min_dimension: Min image side after resize.
|
| 642 |
+
max_dimension: Max image side after resize.
|
| 643 |
+
compile: Whether to torch.compile on first call.
|
| 644 |
+
seed: Random seed for reproducibility (None = non-deterministic).
|
| 645 |
+
segm_threshold: Sigmoid threshold for binary mask.
|
| 646 |
+
|
| 647 |
+
Returns:
|
| 648 |
+
List (per image) of lists (per detection) of dicts::
|
| 649 |
+
|
| 650 |
+
{
|
| 651 |
+
"xy": {"x": float, "y": float},
|
| 652 |
+
"hw": {"h": float, "w": float},
|
| 653 |
+
"mask_rle": {"counts": str, "size": [H, W]},
|
| 654 |
+
}
|
| 655 |
+
"""
|
| 656 |
+
self._ensure_device_buffers()
|
| 657 |
+
if compile:
|
| 658 |
+
self.compile_model()
|
| 659 |
+
|
| 660 |
+
# Normalize inputs
|
| 661 |
+
if isinstance(images, (str, Path, Image.Image)):
|
| 662 |
+
images = [images]
|
| 663 |
+
if isinstance(queries, str):
|
| 664 |
+
queries = [queries]
|
| 665 |
+
assert len(images) == len(queries), "Must provide one query per image"
|
| 666 |
+
|
| 667 |
+
device = self.device
|
| 668 |
+
tokenizer = self._get_tokenizer()
|
| 669 |
+
self._pad_token_id = tokenizer.convert_tokens_to_ids("<|pad|>")
|
| 670 |
+
stop_token_ids = [self.config.eos_id, tokenizer.convert_tokens_to_ids("<|end_of_query|>")]
|
| 671 |
+
|
| 672 |
+
# Store original image sizes for mask resizing
|
| 673 |
+
pil_images = [load_image(img).convert("RGB") for img in images]
|
| 674 |
+
original_sizes = [(img.height, img.width) for img in pil_images]
|
| 675 |
+
|
| 676 |
+
# Build prompts
|
| 677 |
+
image_prompt_pairs = [
|
| 678 |
+
(img, f"<|image|>Segment these expressions in the image:<|start_of_query|>{q}<|REF_SEG|>")
|
| 679 |
+
for img, q in zip(pil_images, queries)
|
| 680 |
+
]
|
| 681 |
+
|
| 682 |
+
# Preprocess
|
| 683 |
+
batch_inputs = process_batch(
|
| 684 |
+
tokenizer, self.config, image_prompt_pairs,
|
| 685 |
+
max_length=4096, min_dimension=min_dimension, max_dimension=max_dimension,
|
| 686 |
+
)
|
| 687 |
+
batch_inputs = {k: (v.to(device) if torch.is_tensor(v) else v) for k, v in batch_inputs.items()}
|
| 688 |
+
|
| 689 |
+
tokens = batch_inputs["tokens"]
|
| 690 |
+
B, L = tokens.size()
|
| 691 |
+
block_size = 128
|
| 692 |
+
S = (L + max_new_tokens + block_size - 1) // block_size * block_size
|
| 693 |
+
assert S <= self.config.max_seq_len
|
| 694 |
+
|
| 695 |
+
rng = torch.Generator(device).manual_seed(seed) if seed is not None else None
|
| 696 |
+
|
| 697 |
+
kv_cache = KVCache(
|
| 698 |
+
max_batch_size=B, max_seq_length=S, n_heads=self.config.n_heads,
|
| 699 |
+
head_dim=self.config.head_dim, num_layers=self.config.n_layers,
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
padded_tokens = torch.full((B, S), self._pad_token_id, dtype=tokens.dtype, device=device)
|
| 703 |
+
padded_tokens[:, :L] = tokens
|
| 704 |
+
|
| 705 |
+
attention_mask = self.get_attention_mask(padded_tokens, max_len=S)
|
| 706 |
+
|
| 707 |
+
all_xy, all_hw = self._extract_coords([[]])
|
| 708 |
+
coord_xy = all_xy.to(device=device, dtype=self.dtype)
|
| 709 |
+
size_hw_t = all_hw.to(device=device, dtype=self.dtype)
|
| 710 |
+
|
| 711 |
+
# Prefill
|
| 712 |
+
logits_BSV, h_BSD = self.forward(
|
| 713 |
+
tokens=tokens, rope_pos_t=batch_inputs["pos_t"], rope_pos_hw=batch_inputs["pos_hw"],
|
| 714 |
+
attention_mask=attention_mask, kv_cache=kv_cache,
|
| 715 |
+
pixel_values=batch_inputs["pixel_values"], pixel_mask=batch_inputs["pixel_mask"],
|
| 716 |
+
coord_xy=coord_xy, size_hw=size_hw_t,
|
| 717 |
+
)
|
| 718 |
+
|
| 719 |
+
hr_img_features = self.upsample_img_features(
|
| 720 |
+
h_BSD, tokens, batch_inputs["pixel_values"], batch_inputs["pixel_mask"],
|
| 721 |
+
)
|
| 722 |
+
|
| 723 |
+
aux_output_B = [[] for _ in range(B)]
|
| 724 |
+
stop_ids = torch.tensor(stop_token_ids).to(device)
|
| 725 |
+
should_stop_B = torch.full((B,), False, dtype=torch.bool, device=device)
|
| 726 |
+
|
| 727 |
+
# Decode loop
|
| 728 |
+
while not torch.all(should_stop_B) and (pos := kv_cache.get_pos()) < S:
|
| 729 |
+
tokens_B1 = sample_next_token(logits_BSV[:, -1], rng, temperature, top_k)
|
| 730 |
+
|
| 731 |
+
if torch.any(should_stop_B):
|
| 732 |
+
tokens_B1 = tokens_B1.clone()
|
| 733 |
+
tokens_B1[should_stop_B, :] = self._pad_token_id
|
| 734 |
+
padded_tokens[:, pos] = tokens_B1[:, -1]
|
| 735 |
+
|
| 736 |
+
# Decode coords (with deduplication to avoid repeating the same location)
|
| 737 |
+
coord_logits = self.decode_coords(h_BSD[:, -1:], tokens_B1)
|
| 738 |
+
sample_w_coord = torch.where(tokens_B1 == self.config.coord_token_id)[0]
|
| 739 |
+
|
| 740 |
+
num_bins = coord_logits.size(-1)
|
| 741 |
+
coord_repeat_threshold = 0.01 # coords within 1% of image size are considered duplicates
|
| 742 |
+
max_coord_attempts = 100
|
| 743 |
+
xy_b2 = torch.zeros(B, 2, device=device, dtype=self.dtype)
|
| 744 |
+
|
| 745 |
+
for i, b in enumerate(sample_w_coord.tolist()):
|
| 746 |
+
logits_b = coord_logits[i].clone() # (2, num_bins)
|
| 747 |
+
existing_coords = [
|
| 748 |
+
item for item in aux_output_B[b]
|
| 749 |
+
if isinstance(item, dict) and "x" in item and "y" in item
|
| 750 |
+
]
|
| 751 |
+
pred_x, pred_y = 0.0, 0.0
|
| 752 |
+
for _ in range(max_coord_attempts):
|
| 753 |
+
pred_bins = torch.argmax(logits_b, dim=-1) # (2,)
|
| 754 |
+
pred_x = pred_bins[0].item() / (num_bins - 1)
|
| 755 |
+
pred_y = pred_bins[1].item() / (num_bins - 1)
|
| 756 |
+
is_repeat = any(
|
| 757 |
+
abs(ec["x"] - pred_x) < coord_repeat_threshold
|
| 758 |
+
and abs(ec["y"] - pred_y) < coord_repeat_threshold
|
| 759 |
+
for ec in existing_coords
|
| 760 |
+
)
|
| 761 |
+
if not is_repeat:
|
| 762 |
+
break
|
| 763 |
+
logits_b[0, pred_bins[0]] = float("-inf")
|
| 764 |
+
logits_b[1, pred_bins[1]] = float("-inf")
|
| 765 |
+
xy_b2[b, 0] = pred_x
|
| 766 |
+
xy_b2[b, 1] = pred_y
|
| 767 |
+
aux_output_B[b].append({"x": pred_x, "y": pred_y})
|
| 768 |
+
|
| 769 |
+
# Decode sizes
|
| 770 |
+
size_logits = self.decode_sizes(h_BSD[:, -1:], tokens_B1)
|
| 771 |
+
hw_b2 = self.process_sizes(size_logits)
|
| 772 |
+
size_preds = [{"h": hw[0].item(), "w": hw[1].item()} for hw in hw_b2]
|
| 773 |
+
sample_w_size = torch.where(tokens_B1 == self.config.size_token_id)[0]
|
| 774 |
+
for i, b in enumerate(sample_w_size.tolist()):
|
| 775 |
+
aux_output_B[b].append(size_preds[i])
|
| 776 |
+
|
| 777 |
+
# Decode segmentation
|
| 778 |
+
sample_w_segm = torch.where(tokens_B1 == self.config.seg_token_id)[0]
|
| 779 |
+
segm_tokens = h_BSD[sample_w_segm, -1, :]
|
| 780 |
+
segm_tokens = self.proj_segm(segm_tokens)
|
| 781 |
+
segm_masks = torch.einsum("kdhw,kd->khw", hr_img_features[sample_w_segm], segm_tokens)
|
| 782 |
+
for i, b in enumerate(sample_w_segm):
|
| 783 |
+
aux_output_B[b].append(segm_masks[i])
|
| 784 |
+
|
| 785 |
+
# Next step
|
| 786 |
+
logits_BSV, h_BSD = self.forward(
|
| 787 |
+
tokens=tokens_B1, attention_mask=attention_mask,
|
| 788 |
+
coord_xy=xy_b2.to(self.dtype), size_hw=hw_b2.to(self.dtype), kv_cache=kv_cache,
|
| 789 |
+
)
|
| 790 |
+
|
| 791 |
+
hit_stop_B = torch.isin(tokens_B1, stop_ids).any(dim=-1)
|
| 792 |
+
should_stop_B = should_stop_B.logical_or(hit_stop_B)
|
| 793 |
+
|
| 794 |
+
# Post-process: convert aux outputs to structured results with RLE masks
|
| 795 |
+
pixel_mask_batch = batch_inputs["pixel_mask"][:, 0] # (B, H, W)
|
| 796 |
+
results = []
|
| 797 |
+
for b in range(B):
|
| 798 |
+
dets = self._postprocess_aux(
|
| 799 |
+
aux_output_B[b], pixel_mask_batch[b], original_sizes[b], segm_threshold,
|
| 800 |
+
)
|
| 801 |
+
results.append(dets)
|
| 802 |
+
|
| 803 |
+
return results
|
| 804 |
+
|
| 805 |
+
# -- Post-processing helpers ---------------------------------------------
|
| 806 |
+
|
| 807 |
+
def _extract_coords(self, coords_BO: list[list]):
|
| 808 |
+
all_xy, all_hw = [], []
|
| 809 |
+
for coords_O in coords_BO:
|
| 810 |
+
if not coords_O:
|
| 811 |
+
continue
|
| 812 |
+
for coords in coords_O:
|
| 813 |
+
for k, v in coords.items():
|
| 814 |
+
if k.startswith(("x", "y")):
|
| 815 |
+
all_xy.append(v)
|
| 816 |
+
elif k.startswith(("h", "w")):
|
| 817 |
+
all_hw.append(v)
|
| 818 |
+
return torch.tensor(all_xy), torch.tensor(all_hw)
|
| 819 |
+
|
| 820 |
+
@staticmethod
|
| 821 |
+
def _mask_nms(
|
| 822 |
+
binary_masks: list[torch.Tensor],
|
| 823 |
+
iou_threshold: float = 0.6,
|
| 824 |
+
nms_max_side: int = 256,
|
| 825 |
+
) -> list[int]:
|
| 826 |
+
"""
|
| 827 |
+
Fast vectorised mask NMS on binary (H, W) tensors.
|
| 828 |
+
|
| 829 |
+
Returns the list of kept indices ordered by descending mask score.
|
| 830 |
+
The IoU matrix is computed via a single batched matmul; suppression
|
| 831 |
+
uses one GPU boolean op per kept mask — no .item() in the inner loop.
|
| 832 |
+
"""
|
| 833 |
+
N = len(binary_masks)
|
| 834 |
+
if N <= 1:
|
| 835 |
+
return list(range(N))
|
| 836 |
+
|
| 837 |
+
device = binary_masks[0].device
|
| 838 |
+
base_h, base_w = binary_masks[0].shape
|
| 839 |
+
scale = min(1.0, nms_max_side / max(base_h, base_w))
|
| 840 |
+
th = max(1, int(round(base_h * scale)))
|
| 841 |
+
tw = max(1, int(round(base_w * scale)))
|
| 842 |
+
|
| 843 |
+
resized = []
|
| 844 |
+
for m in binary_masks:
|
| 845 |
+
m = m.float()
|
| 846 |
+
if m.shape != (th, tw):
|
| 847 |
+
m = F.interpolate(
|
| 848 |
+
m[None, None], size=(th, tw), mode="bilinear", align_corners=False
|
| 849 |
+
).squeeze()
|
| 850 |
+
resized.append(m)
|
| 851 |
+
|
| 852 |
+
binary = torch.stack(resized) # (N, th, tw)
|
| 853 |
+
flat = binary.view(N, -1) # (N, th*tw)
|
| 854 |
+
areas = flat.sum(dim=1) # (N,)
|
| 855 |
+
scores = areas # larger mask = higher priority
|
| 856 |
+
intersection = flat @ flat.T # (N, N)
|
| 857 |
+
union = areas[:, None] + areas[None, :] - intersection
|
| 858 |
+
iou = intersection / union.clamp(min=1)
|
| 859 |
+
|
| 860 |
+
order = scores.argsort(descending=True)
|
| 861 |
+
suppressed = torch.zeros(N, dtype=torch.bool, device=device)
|
| 862 |
+
keep = []
|
| 863 |
+
for idx in order.tolist():
|
| 864 |
+
if suppressed[idx]:
|
| 865 |
+
continue
|
| 866 |
+
keep.append(idx)
|
| 867 |
+
suppressed |= iou[idx] > iou_threshold
|
| 868 |
+
|
| 869 |
+
return keep
|
| 870 |
+
|
| 871 |
+
def _postprocess_aux(
|
| 872 |
+
self,
|
| 873 |
+
aux_list: list,
|
| 874 |
+
pixel_mask_hw: T,
|
| 875 |
+
orig_hw: tuple[int, int],
|
| 876 |
+
threshold: float,
|
| 877 |
+
nms_iou_threshold: float = 0.6,
|
| 878 |
+
) -> list[dict]:
|
| 879 |
+
"""Convert raw aux outputs into structured detections with RLE masks."""
|
| 880 |
+
orig_h, orig_w = orig_hw
|
| 881 |
+
|
| 882 |
+
# Find active image region from pixel mask
|
| 883 |
+
nonzero = torch.nonzero(pixel_mask_hw, as_tuple=False)
|
| 884 |
+
if len(nonzero) > 0:
|
| 885 |
+
min_h, min_w = nonzero.min(dim=0)[0]
|
| 886 |
+
max_h, max_w = nonzero.max(dim=0)[0]
|
| 887 |
+
act_h = (max_h - min_h + 1).item()
|
| 888 |
+
act_w = (max_w - min_w + 1).item()
|
| 889 |
+
else:
|
| 890 |
+
min_h = min_w = 0
|
| 891 |
+
act_h = act_w = None
|
| 892 |
+
|
| 893 |
+
# Group into triplets: coord, size, mask — build binary masks first
|
| 894 |
+
candidates = []
|
| 895 |
+
step = 3 # coord, size, mask
|
| 896 |
+
for i in range(0, len(aux_list), step):
|
| 897 |
+
if i + 2 >= len(aux_list):
|
| 898 |
+
break
|
| 899 |
+
xy = aux_list[i]
|
| 900 |
+
hw = aux_list[i + 1]
|
| 901 |
+
mask_logits = aux_list[i + 2]
|
| 902 |
+
if not isinstance(mask_logits, torch.Tensor):
|
| 903 |
+
continue
|
| 904 |
+
|
| 905 |
+
# Crop to active region
|
| 906 |
+
if act_h is not None and act_w is not None:
|
| 907 |
+
mask_logits = mask_logits[min_h:min_h + act_h, min_w:min_w + act_w]
|
| 908 |
+
|
| 909 |
+
# Resize to original image size
|
| 910 |
+
mask_logits = mask_logits.unsqueeze(0).unsqueeze(0).float()
|
| 911 |
+
mask_logits = F.interpolate(mask_logits, size=(orig_h, orig_w), mode="bilinear", align_corners=False)
|
| 912 |
+
mask_logits = mask_logits.squeeze(0).squeeze(0)
|
| 913 |
+
|
| 914 |
+
# Threshold
|
| 915 |
+
binary_mask = (torch.sigmoid(mask_logits) > threshold).bool()
|
| 916 |
+
candidates.append({"xy": xy, "hw": hw, "binary_mask": binary_mask})
|
| 917 |
+
|
| 918 |
+
if not candidates:
|
| 919 |
+
return []
|
| 920 |
+
|
| 921 |
+
# NMS on binary masks before RLE encoding
|
| 922 |
+
keep_indices = self._mask_nms(
|
| 923 |
+
[c["binary_mask"] for c in candidates],
|
| 924 |
+
iou_threshold=nms_iou_threshold,
|
| 925 |
+
)
|
| 926 |
+
candidates = [candidates[i] for i in keep_indices]
|
| 927 |
+
|
| 928 |
+
# Encode survivors as COCO RLE
|
| 929 |
+
detections = []
|
| 930 |
+
for c in candidates:
|
| 931 |
+
rle_list = self._mask_to_coco_rle(c["binary_mask"].unsqueeze(0))
|
| 932 |
+
mask_rle = rle_list[0] if rle_list else {"counts": "", "size": [orig_h, orig_w]}
|
| 933 |
+
detections.append({"xy": c["xy"], "hw": c["hw"], "mask_rle": mask_rle})
|
| 934 |
+
|
| 935 |
+
return detections
|
processing_falcon_perception.py
ADDED
|
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import io
|
| 2 |
+
import math
|
| 3 |
+
|
| 4 |
+
import einops as E
|
| 5 |
+
import numpy as np
|
| 6 |
+
import requests
|
| 7 |
+
import torch
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from transformers.image_processing_utils import BaseImageProcessor
|
| 10 |
+
from transformers.image_transforms import convert_to_rgb, resize
|
| 11 |
+
from transformers.image_utils import (
|
| 12 |
+
ImageInput,
|
| 13 |
+
get_image_size,
|
| 14 |
+
infer_channel_dimension_format,
|
| 15 |
+
to_numpy_array,
|
| 16 |
+
valid_images,
|
| 17 |
+
validate_preprocess_arguments,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
IMAGE_MEAN = [0.5, 0.5, 0.5]
|
| 21 |
+
IMAGE_STD = [0.5, 0.5, 0.5]
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def load_image(image):
|
| 25 |
+
if image is None:
|
| 26 |
+
return None
|
| 27 |
+
if isinstance(image, Image.Image):
|
| 28 |
+
return image
|
| 29 |
+
if isinstance(image, str):
|
| 30 |
+
if image.startswith(("http://", "https://")):
|
| 31 |
+
response = requests.get(image, timeout=10)
|
| 32 |
+
response.raise_for_status()
|
| 33 |
+
return Image.open(io.BytesIO(response.content))
|
| 34 |
+
if image.endswith(".npy"):
|
| 35 |
+
img_array = io.BytesIO(np.load(image))
|
| 36 |
+
return Image.open(img_array)
|
| 37 |
+
return Image.open(image)
|
| 38 |
+
if isinstance(image, np.bytes_):
|
| 39 |
+
return Image.open(io.BytesIO(image))
|
| 40 |
+
if isinstance(image, np.ndarray):
|
| 41 |
+
return Image.fromarray(image)
|
| 42 |
+
raise TypeError(f"Unknown image format {image}")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def load_images(images_input, min_dimension: int, max_dimension: int):
|
| 46 |
+
images = []
|
| 47 |
+
if images_input is not None:
|
| 48 |
+
for inp in images_input:
|
| 49 |
+
img = load_image(inp)
|
| 50 |
+
img = resize_image_if_necessary(img, min_dimension, max_dimension)
|
| 51 |
+
images.append(img)
|
| 52 |
+
return images
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def resize_image_if_necessary(
|
| 56 |
+
image,
|
| 57 |
+
shortest_dimension=224,
|
| 58 |
+
longest_dimension=896,
|
| 59 |
+
):
|
| 60 |
+
original_width, original_height = image.size
|
| 61 |
+
aspect_ratio = original_width / original_height
|
| 62 |
+
|
| 63 |
+
if (
|
| 64 |
+
shortest_dimension <= original_width <= longest_dimension
|
| 65 |
+
and shortest_dimension <= original_height <= longest_dimension
|
| 66 |
+
):
|
| 67 |
+
return image
|
| 68 |
+
|
| 69 |
+
is_vertical_image = original_width < original_height
|
| 70 |
+
if original_width < shortest_dimension or original_height < shortest_dimension:
|
| 71 |
+
if is_vertical_image:
|
| 72 |
+
new_width = shortest_dimension
|
| 73 |
+
new_height = int(new_width / aspect_ratio)
|
| 74 |
+
else:
|
| 75 |
+
new_height = shortest_dimension
|
| 76 |
+
new_width = int(new_height * aspect_ratio)
|
| 77 |
+
else:
|
| 78 |
+
if is_vertical_image:
|
| 79 |
+
new_width = longest_dimension
|
| 80 |
+
new_height = int(new_width / aspect_ratio)
|
| 81 |
+
else:
|
| 82 |
+
new_height = longest_dimension
|
| 83 |
+
new_width = int(new_height * aspect_ratio)
|
| 84 |
+
|
| 85 |
+
if new_width > longest_dimension:
|
| 86 |
+
new_width = longest_dimension
|
| 87 |
+
new_height = int(new_width / aspect_ratio)
|
| 88 |
+
if new_height > longest_dimension:
|
| 89 |
+
new_height = longest_dimension
|
| 90 |
+
new_width = int(new_height * aspect_ratio)
|
| 91 |
+
|
| 92 |
+
resized_image = image.resize((new_width, new_height))
|
| 93 |
+
return resized_image
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
def smart_resize(
|
| 97 |
+
image,
|
| 98 |
+
factor: int,
|
| 99 |
+
resample,
|
| 100 |
+
input_data_format,
|
| 101 |
+
min_pixels: int = 56 * 56,
|
| 102 |
+
max_pixels: int = 14 * 14 * 4 * 1280,
|
| 103 |
+
):
|
| 104 |
+
height, width = get_image_size(image, channel_dim=input_data_format)
|
| 105 |
+
if height < factor or width < factor:
|
| 106 |
+
raise ValueError(f"{height=} or {width=} must be larger than {factor=}")
|
| 107 |
+
if max(height, width) / min(height, width) > 200:
|
| 108 |
+
raise ValueError(
|
| 109 |
+
f"absolute aspect ratio must be smaller than 200, got {max(height, width) / min(height, width)}"
|
| 110 |
+
)
|
| 111 |
+
h_bar = round(height / factor) * factor
|
| 112 |
+
w_bar = round(width / factor) * factor
|
| 113 |
+
if h_bar * w_bar > max_pixels:
|
| 114 |
+
beta = np.sqrt((height * width) / max_pixels)
|
| 115 |
+
h_bar = math.floor(height / beta / factor) * factor
|
| 116 |
+
w_bar = math.floor(width / beta / factor) * factor
|
| 117 |
+
elif h_bar * w_bar < min_pixels:
|
| 118 |
+
beta = np.sqrt(min_pixels / (height * width))
|
| 119 |
+
h_bar = math.ceil(height * beta / factor) * factor
|
| 120 |
+
w_bar = math.ceil(width * beta / factor) * factor
|
| 121 |
+
image = resize(
|
| 122 |
+
image,
|
| 123 |
+
size=(h_bar, w_bar),
|
| 124 |
+
resample=resample,
|
| 125 |
+
input_data_format=input_data_format,
|
| 126 |
+
)
|
| 127 |
+
return image
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
class ImageProcessor(BaseImageProcessor):
|
| 131 |
+
def __init__(
|
| 132 |
+
self,
|
| 133 |
+
patch_size,
|
| 134 |
+
merge_size,
|
| 135 |
+
do_resize: bool = True,
|
| 136 |
+
resample: Image.Resampling = Image.Resampling.BICUBIC,
|
| 137 |
+
do_rescale: bool = True,
|
| 138 |
+
rescale_factor: float = 1 / 255,
|
| 139 |
+
do_normalize: bool = True,
|
| 140 |
+
image_mean: float | list[float] | None = None,
|
| 141 |
+
image_std: float | list[float] | None = None,
|
| 142 |
+
do_convert_rgb: bool = True,
|
| 143 |
+
min_pixels: int = 56 * 56,
|
| 144 |
+
max_pixels: int = 28 * 28 * 1280,
|
| 145 |
+
**kwargs,
|
| 146 |
+
) -> None:
|
| 147 |
+
super().__init__(**kwargs)
|
| 148 |
+
self.do_resize = do_resize
|
| 149 |
+
self.resample = resample
|
| 150 |
+
self.do_rescale = do_rescale
|
| 151 |
+
self.rescale_factor = rescale_factor
|
| 152 |
+
self.do_normalize = do_normalize
|
| 153 |
+
self.image_mean = image_mean or IMAGE_MEAN
|
| 154 |
+
self.image_std = image_std or IMAGE_STD
|
| 155 |
+
self.min_pixels = min_pixels
|
| 156 |
+
self.max_pixels = max_pixels
|
| 157 |
+
self.patch_size = patch_size
|
| 158 |
+
self.merge_size = merge_size
|
| 159 |
+
self.size = {"min_pixels": min_pixels, "max_pixels": max_pixels}
|
| 160 |
+
self.do_convert_rgb = do_convert_rgb
|
| 161 |
+
validate_preprocess_arguments(
|
| 162 |
+
rescale_factor=self.rescale_factor,
|
| 163 |
+
do_normalize=self.do_normalize,
|
| 164 |
+
image_mean=self.image_mean,
|
| 165 |
+
image_std=self.image_std,
|
| 166 |
+
do_resize=self.do_resize,
|
| 167 |
+
size=self.size,
|
| 168 |
+
resample=self.resample,
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
def _preprocess(self, image: ImageInput, do_rescale=None, do_normalize=None):
|
| 172 |
+
if self.do_convert_rgb:
|
| 173 |
+
image = convert_to_rgb(image)
|
| 174 |
+
image = to_numpy_array(image)
|
| 175 |
+
input_data_format = infer_channel_dimension_format(image)
|
| 176 |
+
if self.do_resize:
|
| 177 |
+
image = smart_resize(
|
| 178 |
+
image,
|
| 179 |
+
factor=self.patch_size * self.merge_size,
|
| 180 |
+
resample=self.resample,
|
| 181 |
+
input_data_format=input_data_format,
|
| 182 |
+
min_pixels=self.min_pixels,
|
| 183 |
+
max_pixels=self.max_pixels,
|
| 184 |
+
)
|
| 185 |
+
if do_rescale or self.do_rescale:
|
| 186 |
+
image = self.rescale(image, scale=self.rescale_factor, input_data_format=input_data_format)
|
| 187 |
+
if do_normalize or self.do_normalize:
|
| 188 |
+
image = self.normalize(
|
| 189 |
+
image=image, mean=self.image_mean, std=self.image_std,
|
| 190 |
+
input_data_format=input_data_format,
|
| 191 |
+
)
|
| 192 |
+
return image
|
| 193 |
+
|
| 194 |
+
def preprocess(self, images: list[ImageInput] | None, do_rescale=None, do_normalize=None, **kwargs):
|
| 195 |
+
del kwargs
|
| 196 |
+
if images is None:
|
| 197 |
+
return []
|
| 198 |
+
images = [item for item in images if item is not None]
|
| 199 |
+
if not valid_images(images):
|
| 200 |
+
raise ValueError(
|
| 201 |
+
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
|
| 202 |
+
"torch.Tensor, tf.Tensor or jax.ndarray."
|
| 203 |
+
)
|
| 204 |
+
pixel_values = []
|
| 205 |
+
for image in images:
|
| 206 |
+
processed_image = self._preprocess(image, do_rescale, do_normalize)
|
| 207 |
+
processed_image = processed_image[None, ...]
|
| 208 |
+
pixel_values.append(processed_image)
|
| 209 |
+
return pixel_values
|
| 210 |
+
|
| 211 |
+
def batch_images_with_mask(self, pixel_values, max_image_height, max_image_width):
|
| 212 |
+
if pixel_values is None:
|
| 213 |
+
return None
|
| 214 |
+
pixel_values = [item for item in pixel_values if item is not None and len(item) != 0]
|
| 215 |
+
if len(pixel_values) == 0:
|
| 216 |
+
return None
|
| 217 |
+
pixel_values = [torch.from_numpy(img) for img in pixel_values]
|
| 218 |
+
max_temporal = max(img.shape[0] for img in pixel_values)
|
| 219 |
+
|
| 220 |
+
def pad_image_and_mask(img):
|
| 221 |
+
time_steps, height, width, channels = img.shape
|
| 222 |
+
if channels != 3:
|
| 223 |
+
raise ValueError(f"Expected 3-channel RGB images, got {channels} channels.")
|
| 224 |
+
padding = (0, 0, 0, max_image_width - width, 0, max_image_height - height, 0, max_temporal - time_steps)
|
| 225 |
+
padded_image = torch.nn.functional.pad(img, padding)
|
| 226 |
+
mask = torch.zeros((max_temporal, max_image_height, max_image_width), dtype=torch.long)
|
| 227 |
+
mask[:time_steps, :height, :width] = 1
|
| 228 |
+
return padded_image, mask
|
| 229 |
+
|
| 230 |
+
padded_pixel_values, padding_masks = zip(*[pad_image_and_mask(img) for img in pixel_values])
|
| 231 |
+
padded_pixel_values = torch.stack(list(padded_pixel_values))
|
| 232 |
+
padding_masks = torch.stack(list(padding_masks))
|
| 233 |
+
return {"pixel_values": padded_pixel_values, "padding_mask": padding_masks}
|
| 234 |
+
|
| 235 |
+
|
| 236 |
+
# ---------------------------------------------------------------------------
|
| 237 |
+
# Positional encoding helpers
|
| 238 |
+
# ---------------------------------------------------------------------------
|
| 239 |
+
|
| 240 |
+
def _compute_image_spatial_positions(
|
| 241 |
+
pixel_mask_THW: torch.Tensor,
|
| 242 |
+
spatial_patch_size: int,
|
| 243 |
+
temporal_patch_size: int = 1,
|
| 244 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 245 |
+
mask_thw = E.reduce(
|
| 246 |
+
pixel_mask_THW,
|
| 247 |
+
"(t tp) (h hp) (w wp) -> t h w",
|
| 248 |
+
reduction="any",
|
| 249 |
+
tp=temporal_patch_size,
|
| 250 |
+
hp=spatial_patch_size,
|
| 251 |
+
wp=spatial_patch_size,
|
| 252 |
+
)
|
| 253 |
+
width = E.reduce(mask_thw.sum(dim=-1).int(), "t h -> ", reduction="max")
|
| 254 |
+
height = E.reduce(mask_thw.sum(dim=-2).int(), "t w -> ", reduction="max")
|
| 255 |
+
xlim = torch.sqrt(width / height)
|
| 256 |
+
ylim = torch.sqrt(height / width)
|
| 257 |
+
xpos = torch.linspace(-xlim, xlim, int(width))
|
| 258 |
+
ypos = torch.linspace(-ylim, ylim, int(height))
|
| 259 |
+
wpos, hpos = torch.meshgrid(xpos, ypos, indexing="xy")
|
| 260 |
+
return hpos.flatten(), wpos.flatten()
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def _get_image_token_masks(tokens, config):
|
| 264 |
+
spatial_mask = tokens == config.img_id
|
| 265 |
+
no_increase_mask = (
|
| 266 |
+
spatial_mask
|
| 267 |
+
| (tokens == config.image_reg_1_token_id)
|
| 268 |
+
| (tokens == config.image_reg_2_token_id)
|
| 269 |
+
| (tokens == config.image_reg_3_token_id)
|
| 270 |
+
| (tokens == config.image_reg_4_token_id)
|
| 271 |
+
| (tokens == config.img_end_id)
|
| 272 |
+
)
|
| 273 |
+
return spatial_mask, no_increase_mask
|
| 274 |
+
|
| 275 |
+
|
| 276 |
+
def get_pos_thw(
|
| 277 |
+
tokens: torch.Tensor,
|
| 278 |
+
pixel_masks_NTHW: torch.Tensor,
|
| 279 |
+
config,
|
| 280 |
+
spatial_patch_size: int,
|
| 281 |
+
temporal_patch_size: int = 1,
|
| 282 |
+
pad_token_id: int = None,
|
| 283 |
+
):
|
| 284 |
+
assert pad_token_id is not None
|
| 285 |
+
assert tokens.ndim == 2
|
| 286 |
+
assert pixel_masks_NTHW.ndim == 4
|
| 287 |
+
|
| 288 |
+
spatial_img_token_mask_BS, no_increase_idx_img_token_mask_BS = _get_image_token_masks(tokens, config)
|
| 289 |
+
|
| 290 |
+
hpos_parts, wpos_parts = [], []
|
| 291 |
+
for i in range(pixel_masks_NTHW.shape[0]):
|
| 292 |
+
h, w = _compute_image_spatial_positions(pixel_masks_NTHW[i], spatial_patch_size, temporal_patch_size)
|
| 293 |
+
hpos_parts.append(h)
|
| 294 |
+
wpos_parts.append(w)
|
| 295 |
+
|
| 296 |
+
hpos_N = torch.cat(hpos_parts) if hpos_parts else torch.empty(0)
|
| 297 |
+
wpos_N = torch.cat(wpos_parts) if wpos_parts else torch.empty(0)
|
| 298 |
+
|
| 299 |
+
expected_tokens = spatial_img_token_mask_BS.sum().item()
|
| 300 |
+
actual_tokens = hpos_N.numel()
|
| 301 |
+
assert actual_tokens == expected_tokens, (
|
| 302 |
+
f"Mismatch between spatial image tokens ({expected_tokens}) and generated positions ({actual_tokens})."
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
hpos_BS = torch.full_like(tokens, fill_value=torch.nan, dtype=torch.float, device=tokens.device)
|
| 306 |
+
wpos_BS = torch.full_like(tokens, fill_value=torch.nan, dtype=torch.float, device=tokens.device)
|
| 307 |
+
hpos_BS = hpos_BS.masked_scatter_(spatial_img_token_mask_BS, hpos_N)
|
| 308 |
+
wpos_BS = wpos_BS.masked_scatter_(spatial_img_token_mask_BS, wpos_N)
|
| 309 |
+
|
| 310 |
+
tpos_BS = torch.ones_like(tokens, dtype=torch.float, device=tokens.device)
|
| 311 |
+
tpos_BS[no_increase_idx_img_token_mask_BS] = 0
|
| 312 |
+
tpos_BS = torch.cumsum(tpos_BS, dim=1) - 1
|
| 313 |
+
tpos_BS[tokens == pad_token_id] = 0
|
| 314 |
+
|
| 315 |
+
hw_pos_BS2 = torch.stack([hpos_BS, wpos_BS], dim=-1)
|
| 316 |
+
return tpos_BS.long(), hw_pos_BS2
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def calculate_image_tokens(image, patch_size, merge_size):
|
| 320 |
+
height, width = get_image_size(image)
|
| 321 |
+
return int((height * width) / (patch_size * patch_size * merge_size * merge_size))
|
| 322 |
+
|
| 323 |
+
|
| 324 |
+
def tokenize_inputs(prompt, images, tokenizer, config, patch_size, merge_size, max_length):
|
| 325 |
+
img_reg_ids = [
|
| 326 |
+
config.image_reg_1_token_id,
|
| 327 |
+
config.image_reg_2_token_id,
|
| 328 |
+
config.image_reg_3_token_id,
|
| 329 |
+
config.image_reg_4_token_id,
|
| 330 |
+
]
|
| 331 |
+
|
| 332 |
+
if images is not None and len(images) > 0:
|
| 333 |
+
image_token_counts = [calculate_image_tokens(image, patch_size, merge_size) for image in images]
|
| 334 |
+
else:
|
| 335 |
+
image_token_counts = []
|
| 336 |
+
|
| 337 |
+
image_token = tokenizer.convert_ids_to_tokens(config.img_id)
|
| 338 |
+
prompt_chunks = [tokenizer.encode(chunk) for chunk in prompt.split(image_token)]
|
| 339 |
+
|
| 340 |
+
def insert_separator(X, sep):
|
| 341 |
+
return [ele for sublist in zip(X, sep) for ele in sublist][:-1]
|
| 342 |
+
|
| 343 |
+
input_ids = []
|
| 344 |
+
offset = 0
|
| 345 |
+
bos_id = getattr(tokenizer, "bos_token_id", None)
|
| 346 |
+
if len(prompt_chunks) > 0 and len(prompt_chunks[0]) > 0 and bos_id is not None and prompt_chunks[0][0] == bos_id:
|
| 347 |
+
offset = 1
|
| 348 |
+
input_ids.append(prompt_chunks[0][0])
|
| 349 |
+
|
| 350 |
+
separators = []
|
| 351 |
+
for count in image_token_counts:
|
| 352 |
+
tokens = [config.img_id] * count
|
| 353 |
+
image_block = [config.image_cls_token_id, *img_reg_ids, *tokens, config.img_end_id]
|
| 354 |
+
separators.append(image_block)
|
| 355 |
+
|
| 356 |
+
if len(separators) != 0 and len(separators) != len(prompt_chunks):
|
| 357 |
+
separators.append(separators[-1])
|
| 358 |
+
|
| 359 |
+
selected_images = []
|
| 360 |
+
if len(separators) == 0:
|
| 361 |
+
input_ids = prompt_chunks[0]
|
| 362 |
+
else:
|
| 363 |
+
for index, x in enumerate(insert_separator(prompt_chunks, separators)):
|
| 364 |
+
if index % 2 != 0:
|
| 365 |
+
if (len(input_ids) + len(x)) < max_length:
|
| 366 |
+
input_ids.extend(x)
|
| 367 |
+
selected_images.append(images[index // 2])
|
| 368 |
+
elif index % 2 == 0:
|
| 369 |
+
input_ids.extend(x[offset:])
|
| 370 |
+
|
| 371 |
+
input_ids = torch.LongTensor(input_ids)
|
| 372 |
+
return input_ids, selected_images
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
def process_batch(
|
| 376 |
+
tokenizer,
|
| 377 |
+
config,
|
| 378 |
+
image_prompt_pairs,
|
| 379 |
+
max_length,
|
| 380 |
+
min_dimension,
|
| 381 |
+
max_dimension,
|
| 382 |
+
patch_size=16,
|
| 383 |
+
merge_size=1,
|
| 384 |
+
):
|
| 385 |
+
"""
|
| 386 |
+
Process a batch of images with text prompts.
|
| 387 |
+
Uses LEFT PADDING for proper batch generation with causal models.
|
| 388 |
+
"""
|
| 389 |
+
all_input_ids = []
|
| 390 |
+
all_selected_images = []
|
| 391 |
+
processor_local = ImageProcessor(patch_size, merge_size)
|
| 392 |
+
|
| 393 |
+
for img_input, prompt in image_prompt_pairs:
|
| 394 |
+
img = load_image(img_input)
|
| 395 |
+
if img is not None:
|
| 396 |
+
img = resize_image_if_necessary(img, min_dimension, max_dimension)
|
| 397 |
+
images = processor_local.preprocess(images=[img] if img else [])
|
| 398 |
+
input_ids, selected_images = tokenize_inputs(
|
| 399 |
+
prompt, images, tokenizer, config, patch_size, merge_size, max_length,
|
| 400 |
+
)
|
| 401 |
+
all_input_ids.append(input_ids)
|
| 402 |
+
all_selected_images.extend(selected_images)
|
| 403 |
+
|
| 404 |
+
pad_token_id = tokenizer.convert_tokens_to_ids("<|pad|>")
|
| 405 |
+
padded_input_ids = torch.nn.utils.rnn.pad_sequence(
|
| 406 |
+
all_input_ids, batch_first=True, padding_value=pad_token_id, padding_side="left",
|
| 407 |
+
)
|
| 408 |
+
|
| 409 |
+
processed = processor_local.batch_images_with_mask(all_selected_images, max_dimension, max_dimension)
|
| 410 |
+
assert processed is not None
|
| 411 |
+
|
| 412 |
+
pos_t, pos_hw = get_pos_thw(
|
| 413 |
+
padded_input_ids, processed["padding_mask"], config, patch_size, pad_token_id=pad_token_id,
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
return {
|
| 417 |
+
"tokens": padded_input_ids,
|
| 418 |
+
"pixel_values": processed["pixel_values"],
|
| 419 |
+
"pixel_mask": processed["padding_mask"],
|
| 420 |
+
"pos_t": pos_t,
|
| 421 |
+
"pos_hw": pos_hw,
|
| 422 |
+
"pad_token_id": pad_token_id,
|
| 423 |
+
}
|
rope.py
ADDED
|
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import einops as E
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0) -> torch.Tensor:
|
| 6 |
+
"""
|
| 7 |
+
Precompute the frequency tensor for complex exponentials (cis) with given dimensions.
|
| 8 |
+
|
| 9 |
+
This function calculates a frequency tensor with complex exponentials using the given dimension 'dim'
|
| 10 |
+
and the end index 'end'. The 'theta' parameter scales the frequencies.
|
| 11 |
+
The returned tensor contains complex values in complex64 data type.
|
| 12 |
+
|
| 13 |
+
Args:
|
| 14 |
+
dim (int): Dimension of the frequency tensor.
|
| 15 |
+
end (int): End index for precomputing frequencies.
|
| 16 |
+
theta (float, optional): Scaling factor for frequency computation. Defaults to 10000.0.
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
torch.Tensor: Precomputed frequency tensor with complex exponentials.
|
| 20 |
+
"""
|
| 21 |
+
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
|
| 22 |
+
t = torch.arange(end, device=freqs.device)
|
| 23 |
+
freqs = torch.outer(t, freqs).float()
|
| 24 |
+
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
|
| 25 |
+
return freqs_cis # [S, D//2]
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def apply_rotary_emb(
|
| 29 |
+
xq: torch.Tensor,
|
| 30 |
+
xk: torch.Tensor,
|
| 31 |
+
freqs_cis: torch.Tensor,
|
| 32 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 33 |
+
"""1D rotary embedding"""
|
| 34 |
+
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
|
| 35 |
+
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
|
| 36 |
+
assert freqs_cis.ndim == 3, (
|
| 37 |
+
"Freqs_cis must be indexed by position ids already and has shape (B,S,D)"
|
| 38 |
+
)
|
| 39 |
+
freqs_cis = E.rearrange(freqs_cis, "b s d -> b s 1 d")
|
| 40 |
+
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
|
| 41 |
+
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
|
| 42 |
+
return xq_out.type_as(xq), xk_out.type_as(xk)
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
###### 2D golden rope
|
| 46 |
+
"""
|
| 47 |
+
Dimension key:
|
| 48 |
+
B: batch size
|
| 49 |
+
S: number of tokens per sample, Seqlen
|
| 50 |
+
T: Number of selected Tokens
|
| 51 |
+
P: pos_dim
|
| 52 |
+
h: n_heads
|
| 53 |
+
d: head_dim
|
| 54 |
+
F: num_freqs == head_dim // 2
|
| 55 |
+
"""
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def apply_golden_freqs_cis_to_visual_pos(freqs_hFP, pos_BSP) -> torch.Tensor:
|
| 59 |
+
"""
|
| 60 |
+
This function is applied once per input batch, and the cached
|
| 61 |
+
freqs_cis is passed through to all layers.
|
| 62 |
+
Safe for Torch‑Inductor because it never uses boolean indexing on a symbolic tensor.
|
| 63 |
+
"""
|
| 64 |
+
# 1. Boolean mask → integer indices (no unbacked shapes)
|
| 65 |
+
img_mask_BS = E.reduce(~torch.isnan(pos_BSP), 'b s p -> b s', reduction='all')
|
| 66 |
+
idx_b, idx_s = torch.nonzero(img_mask_BS, as_tuple=True) # each shape: (N,)
|
| 67 |
+
|
| 68 |
+
# 2. Gather the positional tensor for those tokens
|
| 69 |
+
pos_tP = pos_BSP[idx_b, idx_s].float() # (N, p)
|
| 70 |
+
|
| 71 |
+
# 3. Project positions onto the frequency table → angles θ
|
| 72 |
+
theta_thF = torch.einsum("tp,hfp->thf", pos_tP, freqs_hFP.float()) # (t, h, f)
|
| 73 |
+
|
| 74 |
+
# 4. Convert to complex numbers on the unit circle
|
| 75 |
+
freqs_cis_thF = torch.polar(torch.ones_like(theta_thF), theta_thF)
|
| 76 |
+
return freqs_cis_thF
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def apply_golden_rotary_emb(input_BShd, freqs_cis_thF, pos_BSP) -> torch.Tensor:
|
| 80 |
+
"""
|
| 81 |
+
Rotates *only* the image tokens in `input_BShd`. No boolean indexing,
|
| 82 |
+
so it is safe for Torch‑Inductor.
|
| 83 |
+
"""
|
| 84 |
+
img_mask_BS = E.reduce(~torch.isnan(pos_BSP), 'b s p -> b s', reduction='all')
|
| 85 |
+
idx_b, idx_s = torch.nonzero(img_mask_BS, as_tuple=True) # (N,)
|
| 86 |
+
|
| 87 |
+
input_thd = input_BShd[idx_b, idx_s].float() # (N, h, d)
|
| 88 |
+
x_even = input_thd[..., 0::2] # (N, h, F)
|
| 89 |
+
x_odd = input_thd[..., 1::2] # (N, h, F)
|
| 90 |
+
|
| 91 |
+
cos_thF = freqs_cis_thF.real
|
| 92 |
+
sin_thF = freqs_cis_thF.imag
|
| 93 |
+
|
| 94 |
+
# (a + ib) * (c + id) = (ac - bd) + i(ad + bc)
|
| 95 |
+
rot_even = x_even * cos_thF - x_odd * sin_thF
|
| 96 |
+
rot_odd = x_even * sin_thF + x_odd * cos_thF
|
| 97 |
+
|
| 98 |
+
output_real = torch.empty_like(input_thd)
|
| 99 |
+
output_real[..., 0::2] = rot_even
|
| 100 |
+
output_real[..., 1::2] = rot_odd
|
| 101 |
+
output_real = output_real.type_as(input_BShd)
|
| 102 |
+
|
| 103 |
+
output_BShd = input_BShd.clone()
|
| 104 |
+
output_BShd[idx_b, idx_s] = output_real
|
| 105 |
+
|
| 106 |
+
return output_BShd
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def apply_3d_rotary_emb(
|
| 110 |
+
xq: torch.Tensor, # (B, S, H, D)
|
| 111 |
+
xk: torch.Tensor, # (B, S, H, D)
|
| 112 |
+
freqs_cis: torch.Tensor,
|
| 113 |
+
freqs_cis_2d: torch.Tensor | None,
|
| 114 |
+
pos_hw: torch.Tensor | None, # (B,S,3)
|
| 115 |
+
) -> tuple[torch.Tensor, torch.Tensor]:
|
| 116 |
+
xq_t, xq_hw = xq.chunk(chunks=2, dim=-1)
|
| 117 |
+
xk_t, xk_hw = xk.chunk(chunks=2, dim=-1)
|
| 118 |
+
B, S, H, D = xq.shape
|
| 119 |
+
|
| 120 |
+
xq_t, xk_t = apply_rotary_emb(xq_t, xk_t, freqs_cis)
|
| 121 |
+
if freqs_cis_2d is not None and pos_hw is not None:
|
| 122 |
+
xq_hw = apply_golden_rotary_emb(xq_hw, freqs_cis_2d, pos_hw)
|
| 123 |
+
xk_hw = apply_golden_rotary_emb(xk_hw, freqs_cis_2d, pos_hw)
|
| 124 |
+
|
| 125 |
+
xq_out = torch.concat([xq_t, xq_hw], dim=-1).type_as(xq)
|
| 126 |
+
xk_out = torch.concat([xk_t, xk_hw], dim=-1).type_as(xk)
|
| 127 |
+
return xq_out, xk_out
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"absence_token": "<|absence|>",
|
| 3 |
+
"additional_special_tokens": [
|
| 4 |
+
"<|pad|>",
|
| 5 |
+
">>ABSTRACT<<",
|
| 6 |
+
">>INTRODUCTION<<",
|
| 7 |
+
">>SUMMARY<<",
|
| 8 |
+
">>COMMENT<<",
|
| 9 |
+
">>ANSWER<<",
|
| 10 |
+
">>QUESTION<<",
|
| 11 |
+
">>DOMAIN<<",
|
| 12 |
+
">>PREFIX<<",
|
| 13 |
+
">>SUFFIX<<",
|
| 14 |
+
">>MIDDLE<<",
|
| 15 |
+
"<|finetune_right_pad_id|>",
|
| 16 |
+
"<|start_header_id|>",
|
| 17 |
+
"<|end_header_id|>",
|
| 18 |
+
"<|eom_id|>",
|
| 19 |
+
"<|eot_id|>",
|
| 20 |
+
"<|begin_of_text|>",
|
| 21 |
+
">>TITLE<<",
|
| 22 |
+
"<tool_response>",
|
| 23 |
+
"</tool_response>",
|
| 24 |
+
"<tool_call>",
|
| 25 |
+
"</tool_call>",
|
| 26 |
+
"<schema>",
|
| 27 |
+
"</schema>",
|
| 28 |
+
"<scratch_pad>",
|
| 29 |
+
"</scratch_pad>",
|
| 30 |
+
"<thinking>",
|
| 31 |
+
"</thinking>",
|
| 32 |
+
"<explanation>",
|
| 33 |
+
"</explanation>",
|
| 34 |
+
"<file_sep>",
|
| 35 |
+
"<repo_name>",
|
| 36 |
+
">>UNUSED_119<<",
|
| 37 |
+
">>UNUSED_120<<",
|
| 38 |
+
"<|image|>",
|
| 39 |
+
"<|image_row_sep|>",
|
| 40 |
+
"<|start_of_image|>",
|
| 41 |
+
"<|end_of_image|>",
|
| 42 |
+
"<|start_of_video|>",
|
| 43 |
+
"<|end_of_video|>",
|
| 44 |
+
"<|frame_sep|>",
|
| 45 |
+
"<|start_of_turn|>",
|
| 46 |
+
"<|end_of_turn|>",
|
| 47 |
+
"<|start_of_diffusion_query|>",
|
| 48 |
+
"<|end_of_diffusion_query|>",
|
| 49 |
+
"<|diffusion_query|>",
|
| 50 |
+
"<|object|>",
|
| 51 |
+
"<|coord|>",
|
| 52 |
+
"<|size|>",
|
| 53 |
+
"<|perceive|>",
|
| 54 |
+
"<|image_mask_token|>",
|
| 55 |
+
"<|image_cls|>",
|
| 56 |
+
"<|image_reg_1|>",
|
| 57 |
+
"<|image_reg_2|>",
|
| 58 |
+
"<|image_reg_3|>",
|
| 59 |
+
"<|image_reg_4|>",
|
| 60 |
+
"<|image_reg_5|>",
|
| 61 |
+
"<|image_reg_6|>",
|
| 62 |
+
"<|image_reg_7|>",
|
| 63 |
+
"<|image_reg_8|>",
|
| 64 |
+
"<|DET|>",
|
| 65 |
+
"<|POINTING|>",
|
| 66 |
+
"<|OCR_GROUNDING|>",
|
| 67 |
+
"<|OCR_DOC_PARSER|>",
|
| 68 |
+
"<|OCR_PLAIN|>",
|
| 69 |
+
"<|REF_SEG|>",
|
| 70 |
+
"<|POINT_REF_SEG|>",
|
| 71 |
+
"<|CAPTION|>",
|
| 72 |
+
"<|DETAILED_CAPTION|>",
|
| 73 |
+
"<|seg|>",
|
| 74 |
+
"<|end_of_query|>",
|
| 75 |
+
"<|start_of_query|>",
|
| 76 |
+
"<|task_sep|>",
|
| 77 |
+
"<|SEMANTIC_SEG_TASK|>",
|
| 78 |
+
"<|semantic_seg|>",
|
| 79 |
+
"<|presence|>",
|
| 80 |
+
"<|absence|>",
|
| 81 |
+
">>UNUSED_258<<",
|
| 82 |
+
">>UNUSED_259<<",
|
| 83 |
+
">>UNUSED_260<<",
|
| 84 |
+
">>UNUSED_261<<",
|
| 85 |
+
">>UNUSED_262<<",
|
| 86 |
+
">>UNUSED_263<<",
|
| 87 |
+
">>UNUSED_264<<",
|
| 88 |
+
">>UNUSED_265<<",
|
| 89 |
+
">>UNUSED_266<<",
|
| 90 |
+
">>UNUSED_267<<",
|
| 91 |
+
">>UNUSED_268<<",
|
| 92 |
+
">>UNUSED_269<<",
|
| 93 |
+
">>UNUSED_270<<",
|
| 94 |
+
">>UNUSED_271<<",
|
| 95 |
+
">>UNUSED_272<<",
|
| 96 |
+
">>UNUSED_273<<",
|
| 97 |
+
">>UNUSED_274<<",
|
| 98 |
+
">>UNUSED_275<<",
|
| 99 |
+
">>UNUSED_276<<",
|
| 100 |
+
">>UNUSED_277<<",
|
| 101 |
+
">>UNUSED_278<<",
|
| 102 |
+
">>UNUSED_279<<",
|
| 103 |
+
">>UNUSED_280<<",
|
| 104 |
+
">>UNUSED_281<<",
|
| 105 |
+
">>UNUSED_282<<",
|
| 106 |
+
">>UNUSED_283<<",
|
| 107 |
+
">>UNUSED_284<<",
|
| 108 |
+
">>UNUSED_285<<",
|
| 109 |
+
">>UNUSED_286<<",
|
| 110 |
+
">>UNUSED_287<<",
|
| 111 |
+
">>UNUSED_288<<",
|
| 112 |
+
">>UNUSED_289<<",
|
| 113 |
+
">>UNUSED_290<<",
|
| 114 |
+
">>UNUSED_291<<",
|
| 115 |
+
">>UNUSED_292<<",
|
| 116 |
+
">>UNUSED_293<<",
|
| 117 |
+
">>UNUSED_294<<",
|
| 118 |
+
">>UNUSED_295<<",
|
| 119 |
+
">>UNUSED_296<<",
|
| 120 |
+
">>UNUSED_297<<",
|
| 121 |
+
">>UNUSED_298<<",
|
| 122 |
+
">>UNUSED_299<<",
|
| 123 |
+
">>UNUSED_300<<",
|
| 124 |
+
">>UNUSED_301<<",
|
| 125 |
+
">>UNUSED_302<<",
|
| 126 |
+
">>UNUSED_303<<",
|
| 127 |
+
">>UNUSED_304<<",
|
| 128 |
+
">>UNUSED_305<<",
|
| 129 |
+
">>UNUSED_306<<",
|
| 130 |
+
">>UNUSED_307<<",
|
| 131 |
+
">>UNUSED_308<<",
|
| 132 |
+
">>UNUSED_309<<",
|
| 133 |
+
">>UNUSED_310<<",
|
| 134 |
+
">>UNUSED_311<<",
|
| 135 |
+
">>UNUSED_312<<",
|
| 136 |
+
">>UNUSED_313<<",
|
| 137 |
+
">>UNUSED_314<<",
|
| 138 |
+
">>UNUSED_315<<",
|
| 139 |
+
">>UNUSED_316<<",
|
| 140 |
+
">>UNUSED_317<<",
|
| 141 |
+
">>UNUSED_318<<",
|
| 142 |
+
">>UNUSED_319<<",
|
| 143 |
+
">>UNUSED_320<<",
|
| 144 |
+
">>UNUSED_321<<",
|
| 145 |
+
">>UNUSED_322<<",
|
| 146 |
+
">>UNUSED_323<<",
|
| 147 |
+
">>UNUSED_324<<",
|
| 148 |
+
">>UNUSED_325<<",
|
| 149 |
+
">>UNUSED_326<<",
|
| 150 |
+
">>UNUSED_327<<",
|
| 151 |
+
">>UNUSED_328<<",
|
| 152 |
+
">>UNUSED_329<<",
|
| 153 |
+
">>UNUSED_330<<",
|
| 154 |
+
">>UNUSED_331<<",
|
| 155 |
+
">>UNUSED_332<<",
|
| 156 |
+
">>UNUSED_333<<",
|
| 157 |
+
">>UNUSED_334<<",
|
| 158 |
+
">>UNUSED_335<<",
|
| 159 |
+
">>UNUSED_336<<",
|
| 160 |
+
">>UNUSED_337<<",
|
| 161 |
+
">>UNUSED_338<<",
|
| 162 |
+
">>UNUSED_339<<",
|
| 163 |
+
">>UNUSED_340<<",
|
| 164 |
+
">>UNUSED_341<<",
|
| 165 |
+
">>UNUSED_342<<",
|
| 166 |
+
">>UNUSED_343<<",
|
| 167 |
+
">>UNUSED_344<<",
|
| 168 |
+
">>UNUSED_345<<",
|
| 169 |
+
">>UNUSED_346<<",
|
| 170 |
+
">>UNUSED_347<<",
|
| 171 |
+
">>UNUSED_348<<",
|
| 172 |
+
">>UNUSED_349<<",
|
| 173 |
+
">>UNUSED_350<<",
|
| 174 |
+
">>UNUSED_351<<",
|
| 175 |
+
">>UNUSED_352<<",
|
| 176 |
+
">>UNUSED_353<<",
|
| 177 |
+
">>UNUSED_354<<",
|
| 178 |
+
">>UNUSED_355<<",
|
| 179 |
+
">>UNUSED_356<<",
|
| 180 |
+
">>UNUSED_357<<",
|
| 181 |
+
">>UNUSED_358<<",
|
| 182 |
+
">>UNUSED_359<<",
|
| 183 |
+
">>UNUSED_360<<",
|
| 184 |
+
">>UNUSED_361<<",
|
| 185 |
+
">>UNUSED_362<<",
|
| 186 |
+
">>UNUSED_363<<",
|
| 187 |
+
">>UNUSED_364<<",
|
| 188 |
+
">>UNUSED_365<<",
|
| 189 |
+
">>UNUSED_366<<",
|
| 190 |
+
">>UNUSED_367<<",
|
| 191 |
+
">>UNUSED_368<<",
|
| 192 |
+
">>UNUSED_369<<",
|
| 193 |
+
">>UNUSED_370<<",
|
| 194 |
+
">>UNUSED_371<<",
|
| 195 |
+
">>UNUSED_372<<",
|
| 196 |
+
">>UNUSED_373<<",
|
| 197 |
+
">>UNUSED_374<<",
|
| 198 |
+
">>UNUSED_375<<",
|
| 199 |
+
">>UNUSED_376<<",
|
| 200 |
+
">>UNUSED_377<<",
|
| 201 |
+
">>UNUSED_378<<",
|
| 202 |
+
">>UNUSED_379<<",
|
| 203 |
+
">>UNUSED_380<<",
|
| 204 |
+
">>UNUSED_381<<",
|
| 205 |
+
">>UNUSED_382<<",
|
| 206 |
+
">>UNUSED_383<<",
|
| 207 |
+
">>UNUSED_384<<",
|
| 208 |
+
">>UNUSED_385<<",
|
| 209 |
+
">>UNUSED_386<<",
|
| 210 |
+
">>UNUSED_387<<",
|
| 211 |
+
">>UNUSED_388<<",
|
| 212 |
+
">>UNUSED_389<<",
|
| 213 |
+
">>UNUSED_390<<",
|
| 214 |
+
">>UNUSED_391<<",
|
| 215 |
+
">>UNUSED_392<<",
|
| 216 |
+
">>UNUSED_393<<",
|
| 217 |
+
">>UNUSED_394<<",
|
| 218 |
+
">>UNUSED_395<<",
|
| 219 |
+
">>UNUSED_396<<",
|
| 220 |
+
">>UNUSED_397<<",
|
| 221 |
+
">>UNUSED_398<<",
|
| 222 |
+
">>UNUSED_399<<",
|
| 223 |
+
">>UNUSED_400<<",
|
| 224 |
+
">>UNUSED_401<<",
|
| 225 |
+
">>UNUSED_402<<",
|
| 226 |
+
">>UNUSED_403<<",
|
| 227 |
+
">>UNUSED_404<<",
|
| 228 |
+
">>UNUSED_405<<",
|
| 229 |
+
">>UNUSED_406<<",
|
| 230 |
+
">>UNUSED_407<<",
|
| 231 |
+
">>UNUSED_408<<",
|
| 232 |
+
">>UNUSED_409<<",
|
| 233 |
+
">>UNUSED_410<<",
|
| 234 |
+
">>UNUSED_411<<",
|
| 235 |
+
">>UNUSED_412<<",
|
| 236 |
+
">>UNUSED_413<<",
|
| 237 |
+
">>UNUSED_414<<",
|
| 238 |
+
">>UNUSED_415<<",
|
| 239 |
+
">>UNUSED_416<<",
|
| 240 |
+
">>UNUSED_417<<",
|
| 241 |
+
">>UNUSED_418<<",
|
| 242 |
+
">>UNUSED_419<<",
|
| 243 |
+
">>UNUSED_420<<",
|
| 244 |
+
">>UNUSED_421<<",
|
| 245 |
+
">>UNUSED_422<<",
|
| 246 |
+
">>UNUSED_423<<",
|
| 247 |
+
">>UNUSED_424<<",
|
| 248 |
+
">>UNUSED_425<<",
|
| 249 |
+
">>UNUSED_426<<",
|
| 250 |
+
">>UNUSED_427<<",
|
| 251 |
+
">>UNUSED_428<<",
|
| 252 |
+
">>UNUSED_429<<",
|
| 253 |
+
">>UNUSED_430<<",
|
| 254 |
+
">>UNUSED_431<<",
|
| 255 |
+
">>UNUSED_432<<",
|
| 256 |
+
">>UNUSED_433<<",
|
| 257 |
+
">>UNUSED_434<<",
|
| 258 |
+
">>UNUSED_435<<",
|
| 259 |
+
">>UNUSED_436<<",
|
| 260 |
+
">>UNUSED_437<<",
|
| 261 |
+
">>UNUSED_438<<",
|
| 262 |
+
">>UNUSED_439<<",
|
| 263 |
+
">>UNUSED_440<<",
|
| 264 |
+
">>UNUSED_441<<",
|
| 265 |
+
">>UNUSED_442<<",
|
| 266 |
+
">>UNUSED_443<<",
|
| 267 |
+
">>UNUSED_444<<",
|
| 268 |
+
">>UNUSED_445<<",
|
| 269 |
+
">>UNUSED_446<<",
|
| 270 |
+
">>UNUSED_447<<",
|
| 271 |
+
">>UNUSED_448<<",
|
| 272 |
+
">>UNUSED_449<<",
|
| 273 |
+
">>UNUSED_450<<",
|
| 274 |
+
">>UNUSED_451<<",
|
| 275 |
+
">>UNUSED_452<<",
|
| 276 |
+
">>UNUSED_453<<",
|
| 277 |
+
">>UNUSED_454<<",
|
| 278 |
+
">>UNUSED_455<<",
|
| 279 |
+
">>UNUSED_456<<",
|
| 280 |
+
">>UNUSED_457<<",
|
| 281 |
+
">>UNUSED_458<<",
|
| 282 |
+
">>UNUSED_459<<",
|
| 283 |
+
">>UNUSED_460<<",
|
| 284 |
+
">>UNUSED_461<<",
|
| 285 |
+
">>UNUSED_462<<",
|
| 286 |
+
">>UNUSED_463<<",
|
| 287 |
+
">>UNUSED_464<<",
|
| 288 |
+
">>UNUSED_465<<",
|
| 289 |
+
">>UNUSED_466<<",
|
| 290 |
+
">>UNUSED_467<<",
|
| 291 |
+
">>UNUSED_468<<",
|
| 292 |
+
">>UNUSED_469<<",
|
| 293 |
+
">>UNUSED_470<<",
|
| 294 |
+
">>UNUSED_471<<",
|
| 295 |
+
">>UNUSED_472<<",
|
| 296 |
+
">>UNUSED_473<<",
|
| 297 |
+
">>UNUSED_474<<",
|
| 298 |
+
">>UNUSED_475<<",
|
| 299 |
+
">>UNUSED_476<<",
|
| 300 |
+
">>UNUSED_477<<",
|
| 301 |
+
">>UNUSED_478<<",
|
| 302 |
+
">>UNUSED_479<<",
|
| 303 |
+
">>UNUSED_480<<",
|
| 304 |
+
">>UNUSED_481<<",
|
| 305 |
+
">>UNUSED_482<<",
|
| 306 |
+
">>UNUSED_483<<",
|
| 307 |
+
">>UNUSED_484<<",
|
| 308 |
+
">>UNUSED_485<<",
|
| 309 |
+
">>UNUSED_486<<",
|
| 310 |
+
">>UNUSED_487<<",
|
| 311 |
+
">>UNUSED_488<<",
|
| 312 |
+
">>UNUSED_489<<",
|
| 313 |
+
">>UNUSED_490<<",
|
| 314 |
+
">>UNUSED_491<<",
|
| 315 |
+
">>UNUSED_492<<",
|
| 316 |
+
">>UNUSED_493<<",
|
| 317 |
+
">>UNUSED_494<<",
|
| 318 |
+
">>UNUSED_495<<",
|
| 319 |
+
">>UNUSED_496<<",
|
| 320 |
+
">>UNUSED_497<<",
|
| 321 |
+
">>UNUSED_498<<",
|
| 322 |
+
">>UNUSED_499<<",
|
| 323 |
+
">>UNUSED_500<<",
|
| 324 |
+
">>UNUSED_501<<",
|
| 325 |
+
">>UNUSED_502<<",
|
| 326 |
+
">>UNUSED_503<<",
|
| 327 |
+
">>UNUSED_504<<",
|
| 328 |
+
">>UNUSED_505<<",
|
| 329 |
+
">>UNUSED_506<<",
|
| 330 |
+
">>UNUSED_507<<",
|
| 331 |
+
">>UNUSED_508<<",
|
| 332 |
+
">>UNUSED_509<<",
|
| 333 |
+
">>UNUSED_510<<",
|
| 334 |
+
">>UNUSED_511<<"
|
| 335 |
+
],
|
| 336 |
+
"caption_token": "<|CAPTION|>",
|
| 337 |
+
"coord_token": "<|coord|>",
|
| 338 |
+
"det_token": "<|DET|>",
|
| 339 |
+
"detailed_caption_token": "<|DETAILED_CAPTION|>",
|
| 340 |
+
"diffusion_query_token": "<|diffusion_query|>",
|
| 341 |
+
"end_of_diffusion_query_token": "<|end_of_diffusion_query|>",
|
| 342 |
+
"end_of_image_token": "<|end_of_image|>",
|
| 343 |
+
"end_of_query_token": "<|end_of_query|>",
|
| 344 |
+
"end_of_turn_token": "<|end_of_turn|>",
|
| 345 |
+
"end_of_video_token": "<|end_of_video|>",
|
| 346 |
+
"eos_token": "<|end_of_text|>",
|
| 347 |
+
"frame_sep_token": "<|frame_sep|>",
|
| 348 |
+
"image_cls_token": "<|image_cls|>",
|
| 349 |
+
"image_mask_token": "<|image_mask_token|>",
|
| 350 |
+
"image_reg_1_token": "<|image_reg_1|>",
|
| 351 |
+
"image_reg_2_token": "<|image_reg_2|>",
|
| 352 |
+
"image_reg_3_token": "<|image_reg_3|>",
|
| 353 |
+
"image_reg_4_token": "<|image_reg_4|>",
|
| 354 |
+
"image_reg_5_token": "<|image_reg_5|>",
|
| 355 |
+
"image_reg_6_token": "<|image_reg_6|>",
|
| 356 |
+
"image_reg_7_token": "<|image_reg_7|>",
|
| 357 |
+
"image_reg_8_token": "<|image_reg_8|>",
|
| 358 |
+
"image_row_sep_token": "<|image_row_sep|>",
|
| 359 |
+
"image_token": "<|image|>",
|
| 360 |
+
"object_token": "<|object|>",
|
| 361 |
+
"ocr_doc_parser_token": "<|OCR_DOC_PARSER|>",
|
| 362 |
+
"ocr_grounding_token": "<|OCR_GROUNDING|>",
|
| 363 |
+
"ocr_plain_token": "<|OCR_PLAIN|>",
|
| 364 |
+
"pad_token": "<|pad|>",
|
| 365 |
+
"perceive_token": "<|perceive|>",
|
| 366 |
+
"point_ref_seg_token": "<|POINT_REF_SEG|>",
|
| 367 |
+
"pointing_token": "<|POINTING|>",
|
| 368 |
+
"presence_token": "<|presence|>",
|
| 369 |
+
"ref_seg_token": "<|REF_SEG|>",
|
| 370 |
+
"seg_token": "<|seg|>",
|
| 371 |
+
"semantic_seg_task_token": "<|SEMANTIC_SEG_TASK|>",
|
| 372 |
+
"semantic_seg_token": "<|semantic_seg|>",
|
| 373 |
+
"size_token": "<|size|>",
|
| 374 |
+
"start_of_diffusion_query_token": "<|start_of_diffusion_query|>",
|
| 375 |
+
"start_of_image_token": "<|start_of_image|>",
|
| 376 |
+
"start_of_query_token": "<|start_of_query|>",
|
| 377 |
+
"start_of_turn_token": "<|start_of_turn|>",
|
| 378 |
+
"start_of_video_token": "<|start_of_video|>",
|
| 379 |
+
"task_sep_token": "<|task_sep|>"
|
| 380 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"absence_token": "<|absence|>",
|
| 3 |
+
"backend": "tokenizers",
|
| 4 |
+
"caption_token": "<|CAPTION|>",
|
| 5 |
+
"clean_up_tokenization_spaces": true,
|
| 6 |
+
"coord_token": "<|coord|>",
|
| 7 |
+
"det_token": "<|DET|>",
|
| 8 |
+
"detailed_caption_token": "<|DETAILED_CAPTION|>",
|
| 9 |
+
"diffusion_query_token": "<|diffusion_query|>",
|
| 10 |
+
"end_of_diffusion_query_token": "<|end_of_diffusion_query|>",
|
| 11 |
+
"end_of_image_token": "<|end_of_image|>",
|
| 12 |
+
"end_of_query_token": "<|end_of_query|>",
|
| 13 |
+
"end_of_turn_token": "<|end_of_turn|>",
|
| 14 |
+
"end_of_video_token": "<|end_of_video|>",
|
| 15 |
+
"eos_token": "<|end_of_text|>",
|
| 16 |
+
"frame_sep_token": "<|frame_sep|>",
|
| 17 |
+
"image_cls_token": "<|image_cls|>",
|
| 18 |
+
"image_mask_token": "<|image_mask_token|>",
|
| 19 |
+
"image_reg_1_token": "<|image_reg_1|>",
|
| 20 |
+
"image_reg_2_token": "<|image_reg_2|>",
|
| 21 |
+
"image_reg_3_token": "<|image_reg_3|>",
|
| 22 |
+
"image_reg_4_token": "<|image_reg_4|>",
|
| 23 |
+
"image_reg_5_token": "<|image_reg_5|>",
|
| 24 |
+
"image_reg_6_token": "<|image_reg_6|>",
|
| 25 |
+
"image_reg_7_token": "<|image_reg_7|>",
|
| 26 |
+
"image_reg_8_token": "<|image_reg_8|>",
|
| 27 |
+
"image_row_sep_token": "<|image_row_sep|>",
|
| 28 |
+
"image_token": "<|image|>",
|
| 29 |
+
"is_local": true,
|
| 30 |
+
"model_input_names": [
|
| 31 |
+
"input_ids",
|
| 32 |
+
"attention_mask"
|
| 33 |
+
],
|
| 34 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 35 |
+
"model_specific_special_tokens": {
|
| 36 |
+
"absence_token": "<|absence|>",
|
| 37 |
+
"caption_token": "<|CAPTION|>",
|
| 38 |
+
"coord_token": "<|coord|>",
|
| 39 |
+
"det_token": "<|DET|>",
|
| 40 |
+
"detailed_caption_token": "<|DETAILED_CAPTION|>",
|
| 41 |
+
"diffusion_query_token": "<|diffusion_query|>",
|
| 42 |
+
"end_of_diffusion_query_token": "<|end_of_diffusion_query|>",
|
| 43 |
+
"end_of_image_token": "<|end_of_image|>",
|
| 44 |
+
"end_of_query_token": "<|end_of_query|>",
|
| 45 |
+
"end_of_turn_token": "<|end_of_turn|>",
|
| 46 |
+
"end_of_video_token": "<|end_of_video|>",
|
| 47 |
+
"frame_sep_token": "<|frame_sep|>",
|
| 48 |
+
"image_cls_token": "<|image_cls|>",
|
| 49 |
+
"image_mask_token": "<|image_mask_token|>",
|
| 50 |
+
"image_reg_1_token": "<|image_reg_1|>",
|
| 51 |
+
"image_reg_2_token": "<|image_reg_2|>",
|
| 52 |
+
"image_reg_3_token": "<|image_reg_3|>",
|
| 53 |
+
"image_reg_4_token": "<|image_reg_4|>",
|
| 54 |
+
"image_reg_5_token": "<|image_reg_5|>",
|
| 55 |
+
"image_reg_6_token": "<|image_reg_6|>",
|
| 56 |
+
"image_reg_7_token": "<|image_reg_7|>",
|
| 57 |
+
"image_reg_8_token": "<|image_reg_8|>",
|
| 58 |
+
"image_row_sep_token": "<|image_row_sep|>",
|
| 59 |
+
"image_token": "<|image|>",
|
| 60 |
+
"object_token": "<|object|>",
|
| 61 |
+
"ocr_doc_parser_token": "<|OCR_DOC_PARSER|>",
|
| 62 |
+
"ocr_grounding_token": "<|OCR_GROUNDING|>",
|
| 63 |
+
"ocr_plain_token": "<|OCR_PLAIN|>",
|
| 64 |
+
"pad_token": "<|pad|>",
|
| 65 |
+
"perceive_token": "<|perceive|>",
|
| 66 |
+
"point_ref_seg_token": "<|POINT_REF_SEG|>",
|
| 67 |
+
"pointing_token": "<|POINTING|>",
|
| 68 |
+
"presence_token": "<|presence|>",
|
| 69 |
+
"ref_seg_token": "<|REF_SEG|>",
|
| 70 |
+
"seg_token": "<|seg|>",
|
| 71 |
+
"semantic_seg_task_token": "<|SEMANTIC_SEG_TASK|>",
|
| 72 |
+
"semantic_seg_token": "<|semantic_seg|>",
|
| 73 |
+
"size_token": "<|size|>",
|
| 74 |
+
"start_of_diffusion_query_token": "<|start_of_diffusion_query|>",
|
| 75 |
+
"start_of_image_token": "<|start_of_image|>",
|
| 76 |
+
"start_of_query_token": "<|start_of_query|>",
|
| 77 |
+
"start_of_turn_token": "<|start_of_turn|>",
|
| 78 |
+
"start_of_video_token": "<|start_of_video|>",
|
| 79 |
+
"task_sep_token": "<|task_sep|>"
|
| 80 |
+
},
|
| 81 |
+
"object_token": "<|object|>",
|
| 82 |
+
"ocr_doc_parser_token": "<|OCR_DOC_PARSER|>",
|
| 83 |
+
"ocr_grounding_token": "<|OCR_GROUNDING|>",
|
| 84 |
+
"ocr_plain_token": "<|OCR_PLAIN|>",
|
| 85 |
+
"pad_token": "<|pad|>",
|
| 86 |
+
"perceive_token": "<|perceive|>",
|
| 87 |
+
"point_ref_seg_token": "<|POINT_REF_SEG|>",
|
| 88 |
+
"pointing_token": "<|POINTING|>",
|
| 89 |
+
"presence_token": "<|presence|>",
|
| 90 |
+
"ref_seg_token": "<|REF_SEG|>",
|
| 91 |
+
"seg_token": "<|seg|>",
|
| 92 |
+
"semantic_seg_task_token": "<|SEMANTIC_SEG_TASK|>",
|
| 93 |
+
"semantic_seg_token": "<|semantic_seg|>",
|
| 94 |
+
"size_token": "<|size|>",
|
| 95 |
+
"start_of_diffusion_query_token": "<|start_of_diffusion_query|>",
|
| 96 |
+
"start_of_image_token": "<|start_of_image|>",
|
| 97 |
+
"start_of_query_token": "<|start_of_query|>",
|
| 98 |
+
"start_of_turn_token": "<|start_of_turn|>",
|
| 99 |
+
"start_of_video_token": "<|start_of_video|>",
|
| 100 |
+
"task_sep_token": "<|task_sep|>",
|
| 101 |
+
"tokenizer_class": "TokenizersBackend"
|
| 102 |
+
}
|