id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
1,859
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_mppd(pl_module, batch): infer = pl_module.infer(batch, mask_text=False, mask_image=True) mppd_logits = pl_module.mppd_score(infer["image_feats"]) mppd_labels = infer["image_labels_mppd"] filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100 labels = mppd_labels[filter_to_train] logits = mppd_logits[filter_to_train] mppd_loss = F.mse_loss(logits, labels) ret = { "mppd_loss": mppd_loss, "mppd_logits": mppd_logits, "mppd_labels": mppd_labels, } phase = "train" if pl_module.training else "val" loss = getattr(pl_module, f"{phase}_mppd_loss")(ret["mppd_loss"]) pl_module.log(f"mppd/{phase}/loss", loss) return ret
null
1,860
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_mpfr(pl_module, batch): infer = pl_module.infer(batch, mask_text=False, mask_image=True) mpfr_logits = pl_module.mpfr_score(infer["image_feats"]) mpfr_labels = infer["image_labels_mpfr"] filter_to_train = infer["image_labels"].float().mean(dim=-1) != -100 labels = mpfr_labels[filter_to_train] logits = mpfr_logits[filter_to_train] mpfr_loss = F.mse_loss(logits, labels) ret = { "mpfr_loss": mpfr_loss, "mpfr_logits": mpfr_logits, "mpfr_labels": mpfr_labels, } phase = "train" if pl_module.training else "val" loss = getattr(pl_module, f"{phase}_mpfr_loss")(ret["mpfr_loss"]) pl_module.log(f"mpfr/{phase}/loss", loss) return ret
null
1,861
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def cost_matrix_cosine(x, y, eps=1e-5): """Compute cosine distnace across every pairs of x, y (batched) [B, L_x, D] [B, L_y, D] -> [B, Lx, Ly]""" assert x.dim() == y.dim() assert x.size(0) == y.size(0) assert x.size(2) == y.size(2) x_norm = F.normalize(x, p=2, dim=-1, eps=eps) y_norm = F.normalize(y, p=2, dim=-1, eps=eps) cosine_sim = x_norm.matmul(y_norm.transpose(1, 2)) cosine_dist = 1 - cosine_sim return cosine_dist def trace(x): """ compute trace of input tensor (batched) """ b, m, n = x.size() assert m == n mask = torch.eye(n, dtype=torch.bool, device=x.device).unsqueeze(0).expand_as(x) trace = x.masked_select(mask).contiguous().view(b, n).sum(dim=-1, keepdim=False) return trace def ipot(C, x_len, x_pad, y_len, y_pad, joint_pad, beta, iteration, k): """ [B, M, N], [B], [B, M], [B], [B, N], [B, M, N]""" b, m, n = C.size() sigma = torch.ones(b, m, dtype=C.dtype, device=C.device) / x_len.unsqueeze(1) T = torch.ones(b, n, m, dtype=C.dtype, device=C.device) A = torch.exp(-C.transpose(1, 2) / beta) # mask padded positions sigma.masked_fill_(x_pad, 0) joint_pad = joint_pad.transpose(1, 2) T.masked_fill_(joint_pad, 0) A.masked_fill_(joint_pad, 0) # broadcastable lengths x_len = x_len.unsqueeze(1).unsqueeze(2) y_len = y_len.unsqueeze(1).unsqueeze(2) # mask to zero out padding in delta and sigma x_mask = (x_pad.to(C.dtype) * 1e4).unsqueeze(1) y_mask = (y_pad.to(C.dtype) * 1e4).unsqueeze(1) for _ in range(iteration): Q = A * T # bs * n * m sigma = sigma.view(b, m, 1) for _ in range(k): delta = 1 / (y_len * Q.matmul(sigma).view(b, 1, n) + y_mask) sigma = 1 / (x_len * delta.matmul(Q) + x_mask) T = delta.view(b, n, 1) * Q * sigma T.masked_fill_(joint_pad, 0) return T def compute_itm_wpa(pl_module, batch): pos_len = len(batch["text"]) // 2 neg_len = len(batch["text"]) - pos_len itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len)]).to( pl_module.device ) itm_labels = itm_labels[torch.randperm(itm_labels.size(0))] itm_images = [ torch.stack( [ ti if itm_labels[i] == 1 else fi for i, (ti, fi) in enumerate(zip(bti, bfi)) ] ) for bti, bfi in zip(batch["image"], batch["false_image_0"]) ] batch = {k: v for k, v in batch.items()} batch["image"] = itm_images infer = pl_module.infer(batch, mask_text=False, mask_image=False) with torch.cuda.amp.autocast(enabled=False): txt_emb, img_emb = infer["text_feats"], infer["image_feats"] txt_mask, img_mask = infer["text_masks"].bool(), infer["image_masks"].bool() for i, _len in enumerate(txt_mask.sum(dim=1)): txt_mask[i, _len - 1] = False txt_mask[:, 0] = False img_mask[:, 0] = False if "deit" in pl_module.hparams.config["vit"]: img_mask[:, 1] = False txt_pad, img_pad = ~txt_mask, ~img_mask cost = cost_matrix_cosine(txt_emb.float(), img_emb.float()) joint_pad = txt_pad.unsqueeze(-1) | img_pad.unsqueeze(-2) cost.masked_fill_(joint_pad, 0) txt_len = (txt_pad.size(1) - txt_pad.sum(dim=1, keepdim=False)).to( dtype=cost.dtype ) img_len = (img_pad.size(1) - img_pad.sum(dim=1, keepdim=False)).to( dtype=cost.dtype ) T = ipot( cost.detach(), txt_len, txt_pad, img_len, img_pad, joint_pad, 0.5, 50, 1 ) distance = trace(cost.matmul(T.detach())) dist_pos = distance.masked_select(itm_labels == 1) dist_neg = distance.masked_select(itm_labels == 0) ot_loss = (dist_pos.sum() - dist_neg.sum()) / (dist_pos.size(0) + dist_neg.size(0)) itm_logits = pl_module.itm_score(infer["cls_feats"]) itm_loss = F.cross_entropy(itm_logits, itm_labels.long()) ret = { "itm_loss": itm_loss, "itm_wpa_loss": 0.1 * ot_loss, "itm_logits": itm_logits, "itm_labels": itm_labels, } phase = "train" if pl_module.training else "val" loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"]) wpa_loss = getattr(pl_module, f"{phase}_itm_wpa_loss")(ret["itm_wpa_loss"]) acc = getattr(pl_module, f"{phase}_itm_accuracy")( ret["itm_logits"], ret["itm_labels"] ) pl_module.log(f"itm/{phase}/loss", loss) pl_module.log(f"itm/{phase}/wpa_loss", wpa_loss) pl_module.log(f"itm/{phase}/accuracy", acc) return ret
null
1,862
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_imgcls(pl_module, batch): infer = pl_module.infer(batch, mask_text=False, mask_image=False) imgcls_logits = pl_module.img_classifier(infer["cls_feats"]) imgcls_labels = batch["label"] imgcls_labels = torch.tensor(imgcls_labels).to(pl_module.device).long() imgcls_loss = F.cross_entropy(imgcls_logits, imgcls_labels) ret = { "imgcls_loss": imgcls_loss, "imgcls_logits": imgcls_logits, "imgcls_labels": imgcls_labels, } phase = "train" if pl_module.training else "val" loss = getattr(pl_module, f"{phase}_imgcls_loss")(ret["imgcls_loss"]) acc = getattr(pl_module, f"{phase}_imgcls_accuracy")( ret["imgcls_logits"], ret["imgcls_labels"] ) pl_module.log(f"imgcls/{phase}/loss", loss) pl_module.log(f"imgcls/{phase}/accuracy", acc) return ret
null
1,863
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_vqa(pl_module, batch): infer = pl_module.infer(batch, mask_text=False, mask_image=False) vqa_logits = pl_module.vqa_classifier(infer["cls_feats"]) vqa_targets = torch.zeros( len(vqa_logits), pl_module.hparams.config["vqav2_label_size"] ).to(pl_module.device) vqa_labels = batch["vqa_labels"] vqa_scores = batch["vqa_scores"] for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)): for l, s in zip(_label, _score): vqa_targets[i, l] = s vqa_loss = ( F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets) * vqa_targets.shape[1] ) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19 ret = { "vqa_loss": vqa_loss, "vqa_logits": vqa_logits, "vqa_targets": vqa_targets, "vqa_labels": vqa_labels, "vqa_scores": vqa_scores, } phase = "train" if pl_module.training else "val" loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"]) score = getattr(pl_module, f"{phase}_vqa_score")( ret["vqa_logits"], ret["vqa_targets"] ) pl_module.log(f"vqa/{phase}/loss", loss) pl_module.log(f"vqa/{phase}/score", score) return ret
null
1,864
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_nlvr2(pl_module, batch): infer1 = pl_module.infer( batch, mask_text=False, mask_image=False, image_token_type_idx=1 ) infer2 = pl_module.infer( batch, mask_text=False, mask_image=False, image_token_type_idx=2 ) cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1) nlvr2_logits = pl_module.nlvr2_classifier(cls_feats) nlvr2_labels = batch["answers"] nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long() nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels) ret = { "nlvr2_loss": nlvr2_loss, "nlvr2_logits": nlvr2_logits, "nlvr2_labels": nlvr2_labels, } phase = "train" if pl_module.training else "val" if phase == "train": loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"]) acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")( ret["nlvr2_logits"], ret["nlvr2_labels"] ) pl_module.log(f"nlvr2/{phase}/loss", loss) pl_module.log(f"nlvr2/{phase}/accuracy", acc) else: dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n] test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n] if dev_batches: dev_loss = getattr(pl_module, f"dev_nlvr2_loss")( F.cross_entropy( ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches] ) ) dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")( ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches] ) pl_module.log(f"nlvr2/dev/loss", dev_loss) pl_module.log(f"nlvr2/dev/accuracy", dev_acc) if test_batches: test_loss = getattr(pl_module, f"test_nlvr2_loss")( F.cross_entropy( ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches] ) ) test_acc = getattr(pl_module, f"test_nlvr2_accuracy")( ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches] ) pl_module.log(f"nlvr2/test/loss", test_loss) pl_module.log(f"nlvr2/test/accuracy", test_acc) return ret
null
1,865
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def compute_irtr(pl_module, batch): is_training_phase = pl_module.training _bs, _c, _h, _w = batch["image"][0].shape false_len = pl_module.hparams.config["draw_false_text"] text_ids = torch.stack( [batch[f"false_text_{i}_ids"] for i in range(false_len)], dim=1 ) text_masks = torch.stack( [batch[f"false_text_{i}_masks"] for i in range(false_len)], dim=1 ) text_labels = torch.stack( [batch[f"false_text_{i}_labels"] for i in range(false_len)], dim=1 ) text_ids = torch.cat([batch["text_ids"].unsqueeze(1), text_ids], dim=1) text_masks = torch.cat([batch["text_masks"].unsqueeze(1), text_masks], dim=1) text_labels = torch.cat([batch["text_labels"].unsqueeze(1), text_labels], dim=1) images = batch["image"][0].unsqueeze(1).expand(_bs, false_len + 1, _c, _h, _w) infer = pl_module.infer( { "image": [rearrange(images, "bs fs c h w -> (bs fs) c h w")], "text_ids": rearrange(text_ids, "bs fs tl -> (bs fs) tl"), "text_masks": rearrange(text_masks, "bs fs tl -> (bs fs) tl"), "text_labels": rearrange(text_labels, "bs fs tl -> (bs fs) tl"), } ) score = pl_module.rank_output(infer["cls_feats"])[:, 0] score = rearrange(score, "(bs fs) -> bs fs", bs=_bs, fs=false_len + 1) answer = torch.zeros(_bs).to(score).long() irtr_loss = F.cross_entropy(score, answer) ret = { "irtr_loss": irtr_loss, } phase = "train" if pl_module.training else "val" irtr_loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"]) pl_module.log(f"irtr/{phase}/irtr_loss", irtr_loss) return ret
null
1,866
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def init_weights(module): if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=0.02) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_()
null
1,867
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def vqa_test_step(pl_module, batch, output): id2answer = ( pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer ) vqa_logits = output["vqa_logits"] vqa_preds = vqa_logits.argmax(dim=-1) vqa_preds = [id2answer[pred.item()] for pred in vqa_preds] questions = batch["text"] qids = batch["qid"] return {"qids": qids, "preds": vqa_preds}
null
1,868
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def arc_test_step(pl_module, batch, output): return output
null
1,869
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def vqa_test_wrapup(outs, model_name): rank = torch.distributed.get_rank() qids, preds = list(), list() for out in outs: qids += out["qids"] preds += out["preds"] rets = list() for qid, pred in zip(qids, preds): rets.append({"question_id": qid, "answer": pred}) with open(f"vqa_submit_{rank}.json", "w") as fp: json.dump(rets, fp, indent=4) torch.distributed.barrier() if rank == 0: jsons = list() paths = list(glob.glob("vqa_submit_*.json")) for path in paths: with open(path, "r") as fp: jsons += json.load(fp) os.makedirs("result", exist_ok=True) with open(f"result/vqa_submit_{model_name}.json", "w") as fp: json.dump(jsons, fp, indent=4) torch.distributed.barrier() os.remove(f"vqa_submit_{rank}.json")
null
1,870
import torch import torch.nn as nn import torch.nn.functional as F import os import glob import json import tqdm import functools from torch.utils.data.distributed import DistributedSampler from einops import rearrange from vilt.modules.dist_utils import all_gather def arc_test_wrapup(outs, caplen, model_name): rank = torch.distributed.get_rank() iids, captions = list(), list() for out in outs: iids += out["iid"] captions += out["captions"] rets = list() for iid, caption in zip(iids, captions): rets.append({"image_id": iid, "caption": caption}) with open(f"coco_cap_len{caplen}_{rank}.json", "w") as fp: json.dump(rets, fp, indent=4) torch.distributed.barrier() if rank == 0: jsons = list() paths = list(glob.glob(f"coco_cap_len{caplen}_*.json")) for path in paths: with open(path, "r") as fp: jsons += json.load(fp) os.makedirs("result/arc", exist_ok=True) jsons = sorted(jsons, key=lambda x: x["image_id"]) with open(f"result/arc/coco_cap_{model_name}_len{caplen}.json", "w") as fp: json.dump(jsons, fp, indent=4) torch.distributed.barrier() os.remove(f"coco_cap_len{caplen}_{rank}.json")
null
1,871
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def download_clip( url: str = "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", root: str = os.path.expanduser("~/.cache/clip"), ): os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): if ( hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256 ): return download_target else: warnings.warn( f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm(total=int(source.info().get("Content-Length")), ncols=80) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) if ( hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256 ): raise RuntimeError( f"Model has been downloaded but the SHA256 checksum does not not match" ) return download_target
null
1,872
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _cfg(url="", **kwargs): return { "url": url, "num_classes": 1000, "input_size": (3, 224, 224), "pool_size": None, "crop_pct": 0.9, "interpolation": "bicubic", "mean": IMAGENET_DEFAULT_MEAN, "std": IMAGENET_DEFAULT_STD, "first_conv": "patch_embed.proj", "classifier": "head", **kwargs, }
null
1,873
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_small_patch16_224` function. Write a Python function `def vit_small_patch16_224(pretrained=False, **kwargs)` to solve the following problem: My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3. Here is the function: def vit_small_patch16_224(pretrained=False, **kwargs): """ My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3.""" model_kwargs = dict( patch_size=16, embed_dim=768, depth=8, num_heads=8, mlp_ratio=3.0, qkv_bias=False, norm_layer=nn.LayerNorm, **kwargs, ) if pretrained: # NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model model_kwargs.setdefault("qk_scale", 768 ** -0.5) model = _create_vision_transformer( "vit_small_patch16_224", pretrained=pretrained, **model_kwargs ) return model
My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3.
1,874
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_patch16_224` function. Write a Python function `def vit_base_patch16_224(pretrained=False, **kwargs)` to solve the following problem: ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. Here is the function: def vit_base_patch16_224(pretrained=False, **kwargs): """ ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_base_patch16_224", pretrained=pretrained, **model_kwargs ) return model
ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
1,875
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_patch32_224` function. Write a Python function `def vit_base_patch32_224(pretrained=False, **kwargs)` to solve the following problem: ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. Here is the function: def vit_base_patch32_224(pretrained=False, **kwargs): """ ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. """ model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_base_patch32_224", pretrained=pretrained, **model_kwargs ) return model
ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
1,876
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_patch16_384` function. Write a Python function `def vit_base_patch16_384(pretrained=False, **kwargs)` to solve the following problem: ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. Here is the function: def vit_base_patch16_384(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_base_patch16_384", pretrained=pretrained, **model_kwargs ) return model
ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,877
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_patch32_384` function. Write a Python function `def vit_base_patch32_384(pretrained=False, **kwargs)` to solve the following problem: ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. Here is the function: def vit_base_patch32_384(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_base_patch32_384", pretrained=pretrained, **model_kwargs ) return model
ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,878
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_large_patch16_224` function. Write a Python function `def vit_large_patch16_224(pretrained=False, **kwargs)` to solve the following problem: ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. Here is the function: def vit_large_patch16_224(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer( "vit_large_patch16_224", pretrained=pretrained, **model_kwargs ) return model
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
1,879
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_large_patch32_224` function. Write a Python function `def vit_large_patch32_224(pretrained=False, **kwargs)` to solve the following problem: ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. Here is the function: def vit_large_patch32_224(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights. """ model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer( "vit_large_patch32_224", pretrained=pretrained, **model_kwargs ) return model
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
1,880
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_large_patch16_384` function. Write a Python function `def vit_large_patch16_384(pretrained=False, **kwargs)` to solve the following problem: ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. Here is the function: def vit_large_patch16_384(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer( "vit_large_patch16_384", pretrained=pretrained, **model_kwargs ) return model
ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,881
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_large_patch32_384` function. Write a Python function `def vit_large_patch32_384(pretrained=False, **kwargs)` to solve the following problem: ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. Here is the function: def vit_large_patch32_384(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs) model = _create_vision_transformer( "vit_large_patch32_384", pretrained=pretrained, **model_kwargs ) return model
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,882
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_patch16_224_in21k` function. Write a Python function `def vit_base_patch16_224_in21k(pretrained=False, **kwargs)` to solve the following problem: ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. Here is the function: def vit_base_patch16_224_in21k(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict( patch_size=16, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs, ) model = _create_vision_transformer( "vit_base_patch16_224_in21k", pretrained=pretrained, **model_kwargs ) return model
ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,883
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_patch32_224_in21k` function. Write a Python function `def vit_base_patch32_224_in21k(pretrained=False, **kwargs)` to solve the following problem: ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. Here is the function: def vit_base_patch32_224_in21k(pretrained=False, **kwargs): """ ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict( patch_size=32, embed_dim=768, depth=12, num_heads=12, representation_size=768, **kwargs, ) model = _create_vision_transformer( "vit_base_patch32_224_in21k", pretrained=pretrained, **model_kwargs ) return model
ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,884
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_large_patch16_224_in21k` function. Write a Python function `def vit_large_patch16_224_in21k(pretrained=False, **kwargs)` to solve the following problem: ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. Here is the function: def vit_large_patch16_224_in21k(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict( patch_size=16, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs, ) model = _create_vision_transformer( "vit_large_patch16_224_in21k", pretrained=pretrained, **model_kwargs ) return model
ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,885
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_large_patch32_224_in21k` function. Write a Python function `def vit_large_patch32_224_in21k(pretrained=False, **kwargs)` to solve the following problem: ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. Here is the function: def vit_large_patch32_224_in21k(pretrained=False, **kwargs): """ ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. """ model_kwargs = dict( patch_size=32, embed_dim=1024, depth=24, num_heads=16, representation_size=1024, **kwargs, ) model = _create_vision_transformer( "vit_large_patch32_224_in21k", pretrained=pretrained, **model_kwargs ) return model
ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,886
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_huge_patch14_224_in21k` function. Write a Python function `def vit_huge_patch14_224_in21k(pretrained=False, **kwargs)` to solve the following problem: ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. NOTE: converted weights not currently available, too large for github release hosting. Here is the function: def vit_huge_patch14_224_in21k(pretrained=False, **kwargs): """ ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. NOTE: converted weights not currently available, too large for github release hosting. """ model_kwargs = dict( patch_size=14, embed_dim=1280, depth=32, num_heads=16, representation_size=1280, **kwargs, ) model = _create_vision_transformer( "vit_huge_patch14_224_in21k", pretrained=pretrained, **model_kwargs ) return model
ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. NOTE: converted weights not currently available, too large for github release hosting.
1,887
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet50_224_in21k` function. Write a Python function `def vit_base_resnet50_224_in21k(pretrained=False, **kwargs)` to solve the following problem: R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. Here is the function: def vit_base_resnet50_224_in21k(pretrained=False, **kwargs): """ R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer. """ # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head backbone = ResNetV2( layers=(3, 4, 9), num_classes=0, global_pool="", in_chans=kwargs.get("in_chans", 3), preact=False, stem_type="same", conv_layer=StdConv2dSame, ) model_kwargs = dict( embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, representation_size=768, **kwargs, ) model = _create_vision_transformer( "vit_base_resnet50_224_in21k", pretrained=pretrained, **model_kwargs ) return model
R50+ViT-B/16 hybrid model from original paper (https://arxiv.org/abs/2010.11929). ImageNet-21k weights @ 224x224, source https://github.com/google-research/vision_transformer.
1,888
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet50_384` function. Write a Python function `def vit_base_resnet50_384(pretrained=False, **kwargs)` to solve the following problem: R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. Here is the function: def vit_base_resnet50_384(pretrained=False, **kwargs): """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ # create a ResNetV2 w/o pre-activation, that uses StdConv and GroupNorm and has 3 stages, no head backbone = ResNetV2( layers=(3, 4, 9), num_classes=0, global_pool="", in_chans=kwargs.get("in_chans", 3), preact=False, stem_type="same", conv_layer=StdConv2dSame, ) model_kwargs = dict( embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs ) model = _create_vision_transformer( "vit_base_resnet50_384", pretrained=pretrained, **model_kwargs ) return model
R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
1,889
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_small_resnet26d_224` function. Write a Python function `def vit_small_resnet26d_224(pretrained=False, **kwargs)` to solve the following problem: Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. Here is the function: def vit_small_resnet26d_224(pretrained=False, **kwargs): """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d( pretrained=pretrained, in_chans=kwargs.get("in_chans", 3), features_only=True, out_indices=[4], ) model_kwargs = dict( embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, hybrid_backbone=backbone, **kwargs, ) model = _create_vision_transformer( "vit_small_resnet26d_224", pretrained=pretrained, **model_kwargs ) return model
Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights.
1,890
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_small_resnet50d_s3_224` function. Write a Python function `def vit_small_resnet50d_s3_224(pretrained=False, **kwargs)` to solve the following problem: Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. Here is the function: def vit_small_resnet50d_s3_224(pretrained=False, **kwargs): """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. """ backbone = resnet50d( pretrained=pretrained, in_chans=kwargs.get("in_chans", 3), features_only=True, out_indices=[3], ) model_kwargs = dict( embed_dim=768, depth=8, num_heads=8, mlp_ratio=3, hybrid_backbone=backbone, **kwargs, ) model = _create_vision_transformer( "vit_small_resnet50d_s3_224", pretrained=pretrained, **model_kwargs ) return model
Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights.
1,891
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet26d_224` function. Write a Python function `def vit_base_resnet26d_224(pretrained=False, **kwargs)` to solve the following problem: Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. Here is the function: def vit_base_resnet26d_224(pretrained=False, **kwargs): """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d( pretrained=pretrained, in_chans=kwargs.get("in_chans", 3), features_only=True, out_indices=[4], ) model_kwargs = dict( embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs ) model = _create_vision_transformer( "vit_base_resnet26d_224", pretrained=pretrained, **model_kwargs ) return model
Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights.
1,892
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_base_resnet50d_224` function. Write a Python function `def vit_base_resnet50d_224(pretrained=False, **kwargs)` to solve the following problem: Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. Here is the function: def vit_base_resnet50d_224(pretrained=False, **kwargs): """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. """ backbone = resnet50d( pretrained=pretrained, in_chans=kwargs.get("in_chans", 3), features_only=True, out_indices=[4], ) model_kwargs = dict( embed_dim=768, depth=12, num_heads=12, hybrid_backbone=backbone, **kwargs ) model = _create_vision_transformer( "vit_base_resnet50d_224", pretrained=pretrained, **model_kwargs ) return model
Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights.
1,893
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_tiny_patch16_224` function. Write a Python function `def vit_deit_tiny_patch16_224(pretrained=False, **kwargs)` to solve the following problem: DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_tiny_patch16_224(pretrained=False, **kwargs): """ DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) model = _create_vision_transformer( "vit_deit_tiny_patch16_224", pretrained=pretrained, **model_kwargs ) return model
DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,894
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_small_patch16_224` function. Write a Python function `def vit_deit_small_patch16_224(pretrained=False, **kwargs)` to solve the following problem: DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_small_patch16_224(pretrained=False, **kwargs): """ DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) model = _create_vision_transformer( "vit_deit_small_patch16_224", pretrained=pretrained, **model_kwargs ) return model
DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,895
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_patch16_224` function. Write a Python function `def vit_deit_base_patch16_224(pretrained=False, **kwargs)` to solve the following problem: DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_base_patch16_224(pretrained=False, **kwargs): """ DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_deit_base_patch16_224", pretrained=pretrained, **model_kwargs ) return model
DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,896
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_patch16_384` function. Write a Python function `def vit_deit_base_patch16_384(pretrained=False, **kwargs)` to solve the following problem: DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_base_patch16_384(pretrained=False, **kwargs): """ DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_deit_base_patch16_384", pretrained=pretrained, **model_kwargs ) return model
DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,897
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_tiny_distilled_patch16_224` function. Write a Python function `def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs)` to solve the following problem: DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_tiny_distilled_patch16_224(pretrained=False, **kwargs): """ DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs) model = _create_vision_transformer( "vit_deit_tiny_distilled_patch16_224", pretrained=pretrained, distilled=True, **model_kwargs, ) return model
DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,898
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_small_distilled_patch16_224` function. Write a Python function `def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs)` to solve the following problem: DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_small_distilled_patch16_224(pretrained=False, **kwargs): """ DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs) model = _create_vision_transformer( "vit_deit_small_distilled_patch16_224", pretrained=pretrained, distilled=True, **model_kwargs, ) return model
DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,899
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_distilled_patch16_224` function. Write a Python function `def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs)` to solve the following problem: DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_base_distilled_patch16_224(pretrained=False, **kwargs): """ DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_deit_base_distilled_patch16_224", pretrained=pretrained, distilled=True, **model_kwargs, ) return model
DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,900
import math import logging from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import hashlib import os import urllib import warnings from functools import partial from tqdm import tqdm from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.models.helpers import load_pretrained from timm.models.layers import StdConv2dSame, DropPath, to_2tuple, trunc_normal_ from timm.models.resnet import resnet26d, resnet50d from timm.models.resnetv2 import ResNetV2 from timm.models.registry import register_model from torchvision import transforms def _create_vision_transformer(variant, pretrained=False, distilled=False, **kwargs): default_cfg = default_cfgs[variant] default_num_classes = default_cfg["num_classes"] default_img_size = default_cfg["input_size"][-1] num_classes = kwargs.pop("num_classes", default_num_classes) img_size = kwargs.pop("img_size", default_img_size) repr_size = kwargs.pop("representation_size", None) if repr_size is not None and num_classes != default_num_classes: # Remove representation layer if fine-tuning. This may not always be the desired action, # but I feel better than doing nothing by default for fine-tuning. Perhaps a better interface? _logger.warning("Removing representation layer for fine-tuning.") repr_size = None model_cls = DistilledVisionTransformer if distilled else VisionTransformer model = model_cls( img_size=img_size, num_classes=num_classes, representation_size=repr_size, **kwargs, ) model.default_cfg = default_cfg if pretrained: load_pretrained( model, num_classes=num_classes, in_chans=kwargs.get("in_chans", 3), filter_fn=partial(checkpoint_filter_fn, model=model), strict=False, ) return model The provided code snippet includes necessary dependencies for implementing the `vit_deit_base_distilled_patch16_384` function. Write a Python function `def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs)` to solve the following problem: DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. Here is the function: def vit_deit_base_distilled_patch16_384(pretrained=False, **kwargs): """ DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit. """ model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs) model = _create_vision_transformer( "vit_deit_base_distilled_patch16_384", pretrained=pretrained, distilled=True, **model_kwargs, ) return model
DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877). ImageNet-1k weights from https://github.com/facebookresearch/deit.
1,901
import torch from pytorch_lightning import LightningDataModule from torch.utils.data import DataLoader from transformers import ( DataCollatorForLanguageModeling, DataCollatorForWholeWordMask, BertTokenizer, ) def get_pretrained_tokenizer(from_pretrained): if torch.distributed.is_initialized(): if torch.distributed.get_rank() == 0: BertTokenizer.from_pretrained( from_pretrained, do_lower_case="uncased" in from_pretrained ) torch.distributed.barrier() return BertTokenizer.from_pretrained( from_pretrained, do_lower_case="uncased" in from_pretrained )
null
1,902
from sacred import Experiment def _loss_names(d): def config(): exp_name = "vilt" seed = 0 datasets = ["coco", "vg", "sbu", "gcc"] loss_names = _loss_names({"itm": 1, "mlm": 1}) batch_size = 4096 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller. # Image setting train_transform_keys = ["pixelbert"] val_transform_keys = ["pixelbert"] image_size = 384 max_image_len = -1 patch_size = 32 draw_false_image = 1 image_only = False # Text Setting vqav2_label_size = 3129 max_text_len = 40 tokenizer = "bert-base-uncased" vocab_size = 30522 whole_word_masking = False mlm_prob = 0.15 draw_false_text = 0 # Transformer Setting vit = "vit_base_patch32_384" hidden_size = 768 num_heads = 12 num_layers = 12 mlp_ratio = 4 drop_rate = 0.1 # Optimizer Setting optim_type = "adamw" learning_rate = 1e-4 weight_decay = 0.01 decay_power = 1 max_epoch = 100 max_steps = 25000 warmup_steps = 2500 end_lr = 0 lr_mult = 1 # multiply lr for downstream heads # Downstream Setting get_recall_metric = False # PL Trainer Setting resume_from = None fast_dev_run = False val_check_interval = 1.0 test_only = False # below params varies with the environment data_root = "" log_dir = "result" per_gpu_batchsize = 0 # you should define this manually with per_gpu_batch_size=# num_gpus = 1 num_nodes = 1 load_path = "" num_workers = 8 precision = 16
null
1,903
from sacred import Experiment def env_dandelin(): data_root = "/data2/dsets/dataset" log_dir = "/data2/vilt/result" num_gpus = 8 num_nodes = 1
null
1,904
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_mlm_itm(): exp_name = "mlm_itm" datasets = ["coco", "vg", "sbu", "gcc"] loss_names = _loss_names({"itm": 1, "mlm": 1}) batch_size = 4096 max_epoch = 10 max_image_len = 200
null
1,905
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_mlm_itm_randaug(): exp_name = "mlm_itm_randaug" datasets = ["coco", "vg", "sbu", "gcc"] train_transform_keys = ["pixelbert_randaug"] loss_names = _loss_names({"itm": 1, "mlm": 1}) batch_size = 4096 max_epoch = 10 max_image_len = 200
null
1,906
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_mlm_itm_mpp(): exp_name = "mlm_itm_mpp" datasets = ["coco", "vg", "sbu", "gcc"] loss_names = _loss_names({"itm": 1, "mlm": 1, "mpp": 1}) batch_size = 4096 max_epoch = 10 max_image_len = 200
null
1,907
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_nlvr2(): exp_name = "finetune_nlvr2" datasets = ["nlvr2"] loss_names = _loss_names({"nlvr2": 1}) batch_size = 128 max_epoch = 10 max_steps = None warmup_steps = 0.1 draw_false_image = 0 learning_rate = 1e-4
null
1,908
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_nlvr2_randaug(): exp_name = "finetune_nlvr2_randaug" datasets = ["nlvr2"] train_transform_keys = ["pixelbert_randaug"] loss_names = _loss_names({"nlvr2": 1}) batch_size = 128 max_epoch = 10 max_steps = None warmup_steps = 0.1 draw_false_image = 0 learning_rate = 1e-4
null
1,909
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_vqa(): exp_name = "finetune_vqa" datasets = ["vqa"] loss_names = _loss_names({"vqa": 1}) batch_size = 256 max_epoch = 10 max_steps = None warmup_steps = 0.1 draw_false_image = 0 learning_rate = 1e-4 val_check_interval = 0.1 lr_mult = 10
null
1,910
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_vqa_randaug(): exp_name = "finetune_vqa_randaug" datasets = ["vqa"] train_transform_keys = ["pixelbert_randaug"] loss_names = _loss_names({"vqa": 1}) batch_size = 256 max_epoch = 10 max_steps = None warmup_steps = 0.1 draw_false_image = 0 learning_rate = 1e-4 val_check_interval = 0.1 lr_mult = 10
null
1,911
from sacred import Experiment def _loss_names(d): def task_finetune_irtr_coco(): exp_name = "finetune_irtr_coco" datasets = ["coco"] loss_names = _loss_names({"itm": 0.5, "irtr": 1}) batch_size = 256 max_epoch = 10 max_steps = None warmup_steps = 0.1 get_recall_metric = True draw_false_text = 15 learning_rate = 1e-4
null
1,912
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_irtr_coco_randaug(): exp_name = "finetune_irtr_coco_randaug" datasets = ["coco"] train_transform_keys = ["pixelbert_randaug"] loss_names = _loss_names({"itm": 0.5, "irtr": 1}) batch_size = 256 max_epoch = 10 max_steps = None warmup_steps = 0.1 get_recall_metric = True draw_false_text = 15 learning_rate = 1e-4
null
1,913
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_irtr_f30k(): exp_name = "finetune_irtr_f30k" datasets = ["f30k"] loss_names = _loss_names({"itm": 0.5, "irtr": 1}) batch_size = 256 max_epoch = 10 max_steps = None warmup_steps = 0.1 get_recall_metric = True draw_false_text = 15 learning_rate = 1e-4
null
1,914
from sacred import Experiment def _loss_names(d): ret = { "itm": 0, "mlm": 0, "mpp": 0, "vqa": 0, "nlvr2": 0, "irtr": 0, } ret.update(d) return ret def task_finetune_irtr_f30k_randaug(): exp_name = "finetune_irtr_f30k_randaug" datasets = ["f30k"] train_transform_keys = ["pixelbert_randaug"] loss_names = _loss_names({"itm": 0.5, "irtr": 1}) batch_size = 256 max_epoch = 10 max_steps = None warmup_steps = 0.1 get_recall_metric = True draw_false_text = 15 learning_rate = 1e-4
null
1,915
from sacred import Experiment def step25k(): max_epoch = 100 max_steps = 25000
null
1,916
from sacred import Experiment def step50k(): max_epoch = 100 max_steps = 50000
null
1,917
from sacred import Experiment def step100k(): max_epoch = 100 max_steps = 100000
null
1,918
from sacred import Experiment def step200k(): max_epoch = 200 max_steps = 200000
null
1,919
from sacred import Experiment def vit32_base(): vit = "vit_base_patch32_384" patch_size = 32 hidden_size = 768 num_heads = 12 num_layers = 12
null
1,920
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45] assert -0.45 <= v <= 0.45 if random.random() > 0.5: v = -v v = v * img.size[0] return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
null
1,921
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45] assert -0.45 <= v <= 0.45 if random.random() > 0.5: v = -v v = v * img.size[1] return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
null
1,922
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def Invert(img, _): return PIL.ImageOps.invert(img)
null
1,923
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def Flip(img, _): # not from the paper return PIL.ImageOps.mirror(img)
null
1,924
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2] # assert 0 <= v <= 20 if v < 0: return img w, h = img.size x0 = np.random.uniform(w) y0 = np.random.uniform(h) x0 = int(max(0, x0 - v / 2.0)) y0 = int(max(0, y0 - v / 2.0)) x1 = min(w, x0 + v) y1 = min(h, y0 + v) xy = (x0, y0, x1, y1) color = (125, 123, 114) # color = (0, 0, 0) img = img.copy() PIL.ImageDraw.Draw(img).rectangle(xy, color) return img def Cutout(img, v): # [0, 60] => percentage: [0, 0.2] assert 0.0 <= v <= 0.2 if v <= 0.0: return img v = v * img.size[0] return CutoutAbs(img, v)
null
1,925
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def SamplePairing(imgs): # [0, 0.4] def f(img1, v): i = np.random.choice(len(imgs)) img2 = PIL.Image.fromarray(imgs[i]) return PIL.Image.blend(img1, img2, v) return f
null
1,926
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def Identity(img, v): return img
null
1,927
import random import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw import numpy as np import torch from PIL import Image def ShearX(img, v): # [-0.3, 0.3] assert -0.3 <= v <= 0.3 if random.random() > 0.5: v = -v return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0)) def ShearY(img, v): # [-0.3, 0.3] assert -0.3 <= v <= 0.3 if random.random() > 0.5: v = -v return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0)) def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45] assert 0 <= v if random.random() > 0.5: v = -v return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0)) def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45] assert 0 <= v if random.random() > 0.5: v = -v return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v)) def Rotate(img, v): # [-30, 30] assert -30 <= v <= 30 if random.random() > 0.5: v = -v return img.rotate(v) def AutoContrast(img, _): return PIL.ImageOps.autocontrast(img) def Equalize(img, _): return PIL.ImageOps.equalize(img) def Solarize(img, v): # [0, 256] assert 0 <= v <= 256 return PIL.ImageOps.solarize(img, v) def SolarizeAdd(img, addition=0, threshold=128): img_np = np.array(img).astype(np.int) img_np = img_np + addition img_np = np.clip(img_np, 0, 255) img_np = img_np.astype(np.uint8) img = Image.fromarray(img_np) return PIL.ImageOps.solarize(img, threshold) def Posterize(img, v): # [4, 8] v = int(v) v = max(1, v) return PIL.ImageOps.posterize(img, v) def Contrast(img, v): # [0.1,1.9] assert 0.1 <= v <= 1.9 return PIL.ImageEnhance.Contrast(img).enhance(v) def Color(img, v): # [0.1,1.9] assert 0.1 <= v <= 1.9 return PIL.ImageEnhance.Color(img).enhance(v) def Brightness(img, v): # [0.1,1.9] assert 0.1 <= v <= 1.9 return PIL.ImageEnhance.Brightness(img).enhance(v) def Sharpness(img, v): # [0.1,1.9] assert 0.1 <= v <= 1.9 return PIL.ImageEnhance.Sharpness(img).enhance(v) def augment_list(): # 16 oeprations and their ranges # https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57 # l = [ # (Identity, 0., 1.0), # (ShearX, 0., 0.3), # 0 # (ShearY, 0., 0.3), # 1 # (TranslateX, 0., 0.33), # 2 # (TranslateY, 0., 0.33), # 3 # (Rotate, 0, 30), # 4 # (AutoContrast, 0, 1), # 5 # (Invert, 0, 1), # 6 # (Equalize, 0, 1), # 7 # (Solarize, 0, 110), # 8 # (Posterize, 4, 8), # 9 # # (Contrast, 0.1, 1.9), # 10 # (Color, 0.1, 1.9), # 11 # (Brightness, 0.1, 1.9), # 12 # (Sharpness, 0.1, 1.9), # 13 # # (Cutout, 0, 0.2), # 14 # # (SamplePairing(imgs), 0, 0.4), # 15 # ] # https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505 l = [ (AutoContrast, 0, 1), (Equalize, 0, 1), # (Invert, 0, 1), (Rotate, 0, 30), (Posterize, 0, 4), (Solarize, 0, 256), (SolarizeAdd, 0, 110), (Color, 0.1, 1.9), (Contrast, 0.1, 1.9), (Brightness, 0.1, 1.9), (Sharpness, 0.1, 1.9), (ShearX, 0.0, 0.3), (ShearY, 0.0, 0.3), # (CutoutAbs, 0, 40), (TranslateXabs, 0.0, 100), (TranslateYabs, 0.0, 100), ] return l
null
1,928
from .utils import ( inception_normalize, MinMaxResize, ) from torchvision import transforms from .randaug import RandAugment class MinMaxResize: def __init__(self, shorter=800, longer=1333): self.min = shorter self.max = longer def __call__(self, x): w, h = x.size scale = self.min / min(w, h) if h < w: newh, neww = self.min, scale * w else: newh, neww = scale * h, self.min if max(newh, neww) > self.max: scale = self.max / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) newh, neww = newh // 32 * 32, neww // 32 * 32 return x.resize((neww, newh), resample=Image.BICUBIC) inception_normalize = transforms.Compose( [transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])] ) def pixelbert_transform(size=800): longer = int((1333 / 800) * size) return transforms.Compose( [ MinMaxResize(shorter=size, longer=longer), transforms.ToTensor(), inception_normalize, ] )
null
1,929
from .utils import ( inception_normalize, MinMaxResize, ) from torchvision import transforms from .randaug import RandAugment class MinMaxResize: def __init__(self, shorter=800, longer=1333): self.min = shorter self.max = longer def __call__(self, x): w, h = x.size scale = self.min / min(w, h) if h < w: newh, neww = self.min, scale * w else: newh, neww = scale * h, self.min if max(newh, neww) > self.max: scale = self.max / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) newh, neww = newh // 32 * 32, neww // 32 * 32 return x.resize((neww, newh), resample=Image.BICUBIC) inception_normalize = transforms.Compose( [transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])] ) class RandAugment: def __init__(self, n, m): self.n = n self.m = m # [0, 30] self.augment_list = augment_list() def __call__(self, img): ops = random.choices(self.augment_list, k=self.n) for op, minval, maxval in ops: val = (float(self.m) / 30) * float(maxval - minval) + minval img = op(img, val) return img def pixelbert_transform_randaug(size=800): longer = int((1333 / 800) * size) trs = transforms.Compose( [ MinMaxResize(shorter=size, longer=longer), transforms.ToTensor(), inception_normalize, ] ) trs.transforms.insert(0, RandAugment(2, 9)) return trs
null
1,930
import json import pandas as pd import pyarrow as pa import os from tqdm import tqdm from collections import defaultdict def process(root, iden, row): texts = [r["sentence"] for r in row] labels = [r["label"] for r in row] split = iden.split("-")[0] if iden.startswith("train"): directory = row[0]["directory"] path = f"{root}/images/train/{directory}/{iden}" else: path = f"{root}/{split}/{iden}" with open(f"{path}-img0.png", "rb") as fp: img0 = fp.read() with open(f"{path}-img1.png", "rb") as fp: img1 = fp.read() return [img0, img1, texts, labels, iden] def make_arrow(root, dataset_root): train_data = list( map(json.loads, open(f"{root}/nlvr2/data/train.json").readlines()) ) test1_data = list( map(json.loads, open(f"{root}/nlvr2/data/test1.json").readlines()) ) dev_data = list(map(json.loads, open(f"{root}/nlvr2/data/dev.json").readlines())) balanced_test1_data = list( map( json.loads, open(f"{root}/nlvr2/data/balanced/balanced_test1.json").readlines(), ) ) balanced_dev_data = list( map( json.loads, open(f"{root}/nlvr2/data/balanced/balanced_dev.json").readlines(), ) ) unbalanced_test1_data = list( map( json.loads, open(f"{root}/nlvr2/data/unbalanced/unbalanced_test1.json").readlines(), ) ) unbalanced_dev_data = list( map( json.loads, open(f"{root}/nlvr2/data/unbalanced/unbalanced_dev.json").readlines(), ) ) splits = [ "train", "dev", "test1", "balanced_dev", "balanced_test1", "unbalanced_dev", "unbalanced_test1", ] datas = [ train_data, dev_data, test1_data, balanced_dev_data, balanced_test1_data, unbalanced_dev_data, unbalanced_test1_data, ] annotations = dict() for split, data in zip(splits, datas): _annot = defaultdict(list) for row in tqdm(data): _annot["-".join(row["identifier"].split("-")[:-1])].append(row) annotations[split] = _annot for split in splits: bs = [ process(root, iden, row) for iden, row in tqdm(annotations[split].items()) ] dataframe = pd.DataFrame( bs, columns=["image_0", "image_1", "questions", "answers", "identifier"], ) table = pa.Table.from_pandas(dataframe) os.makedirs(dataset_root, exist_ok=True) with pa.OSFile(f"{dataset_root}/nlvr2_{split}.arrow", "wb") as sink: with pa.RecordBatchFileWriter(sink, table.schema) as writer: writer.write_table(table)
null
1,931
import json import pandas as pd import pyarrow as pa import random import os from tqdm import tqdm from glob import glob from collections import defaultdict def path2rest(path, iid2captions, iid2split): name = path.split("/")[-1] with open(path, "rb") as fp: binary = fp.read() captions = iid2captions[name] split = iid2split[name] return [binary, captions, name, split] def make_arrow(root, dataset_root): with open(f"{root}/karpathy/dataset_flickr30k.json", "r") as fp: captions = json.load(fp) captions = captions["images"] iid2captions = defaultdict(list) iid2split = dict() for cap in tqdm(captions): filename = cap["filename"] iid2split[filename] = cap["split"] for c in cap["sentences"]: iid2captions[filename].append(c["raw"]) paths = list(glob(f"{root}/flickr30k-images/*.jpg")) random.shuffle(paths) caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions] if len(paths) == len(caption_paths): print("all images have caption annotations") else: print("not all images have caption annotations") print( len(paths), len(caption_paths), len(iid2captions), ) bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)] for split in ["train", "val", "test"]: batches = [b for b in bs if b[-1] == split] dataframe = pd.DataFrame( batches, columns=["image", "caption", "image_id", "split"], ) table = pa.Table.from_pandas(dataframe) os.makedirs(dataset_root, exist_ok=True) with pa.OSFile( f"{dataset_root}/f30k_caption_karpathy_{split}.arrow", "wb" ) as sink: with pa.RecordBatchFileWriter(sink, table.schema) as writer: writer.write_table(table)
null
1,932
import json import os import pandas as pd import pyarrow as pa import random from tqdm import tqdm from glob import glob from collections import defaultdict def path2rest(path, iid2captions, iid2split): name = path.split("/")[-1] with open(path, "rb") as fp: binary = fp.read() captions = iid2captions[name] split = iid2split[name] return [binary, captions, name, split] def make_arrow(root, dataset_root): with open(f"{root}/karpathy/dataset_coco.json", "r") as fp: captions = json.load(fp) captions = captions["images"] iid2captions = defaultdict(list) iid2split = dict() for cap in tqdm(captions): filename = cap["filename"] iid2split[filename] = cap["split"] for c in cap["sentences"]: iid2captions[filename].append(c["raw"]) paths = list(glob(f"{root}/train2014/*.jpg")) + list(glob(f"{root}/val2014/*.jpg")) random.shuffle(paths) caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions] if len(paths) == len(caption_paths): print("all images have caption annotations") else: print("not all images have caption annotations") print( len(paths), len(caption_paths), len(iid2captions), ) bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)] for split in ["train", "val", "restval", "test"]: batches = [b for b in bs if b[-1] == split] dataframe = pd.DataFrame( batches, columns=["image", "caption", "image_id", "split"], ) table = pa.Table.from_pandas(dataframe) os.makedirs(dataset_root, exist_ok=True) with pa.OSFile( f"{dataset_root}/coco_caption_karpathy_{split}.arrow", "wb" ) as sink: with pa.RecordBatchFileWriter(sink, table.schema) as writer: writer.write_table(table)
null
1,933
import json import pandas as pd import pyarrow as pa import random import os from tqdm import tqdm from glob import glob from collections import defaultdict def path2rest(path, iid2captions): name = path.split("/")[-1] iid = int(name[:-4]) with open(path, "rb") as fp: binary = fp.read() cdicts = iid2captions[iid] captions = [c["phrase"] for c in cdicts] widths = [c["width"] for c in cdicts] heights = [c["height"] for c in cdicts] xs = [c["x"] for c in cdicts] ys = [c["y"] for c in cdicts] return [ binary, captions, widths, heights, xs, ys, str(iid), ] def make_arrow(root, dataset_root): with open(f"{root}/annotations/region_descriptions.json", "r") as fp: captions = json.load(fp) iid2captions = defaultdict(list) for cap in tqdm(captions): cap = cap["regions"] for c in cap: iid2captions[c["image_id"]].append(c) paths = list(glob(f"{root}/images/VG_100K/*.jpg")) + list( glob(f"{root}/images/VG_100K_2/*.jpg") ) random.shuffle(paths) caption_paths = [ path for path in paths if int(path.split("/")[-1][:-4]) in iid2captions ] if len(paths) == len(caption_paths): print("all images have caption annotations") else: print("not all images have caption annotations") print( len(paths), len(caption_paths), len(iid2captions), ) bs = [path2rest(path, iid2captions) for path in tqdm(caption_paths)] dataframe = pd.DataFrame( bs, columns=["image", "caption", "width", "height", "x", "y", "image_id"], ) table = pa.Table.from_pandas(dataframe) os.makedirs(dataset_root, exist_ok=True) with pa.OSFile(f"{dataset_root}/vg.arrow", "wb") as sink: with pa.RecordBatchFileWriter(sink, table.schema) as writer: writer.write_table(table)
null
1,934
import json import pandas as pd import pyarrow as pa import random import os from tqdm import tqdm from glob import glob from collections import defaultdict, Counter from .glossary import normalize_word def get_score(occurences): if occurences == 0: return 0.0 elif occurences == 1: return 0.3 elif occurences == 2: return 0.6 elif occurences == 3: return 0.9 else: return 1.0 def path2rest(path, split, annotations, label2ans): iid = int(path.split("/")[-1].split("_")[-1][:-4]) with open(path, "rb") as fp: binary = fp.read() _annot = annotations[split][iid] _annot = list(_annot.items()) qids, qas = [a[0] for a in _annot], [a[1] for a in _annot] questions = [qa[0] for qa in qas] answers = [qa[1] for qa in qas] if "test" not in split else list(list()) answer_labels = ( [a["labels"] for a in answers] if "test" not in split else list(list()) ) answer_scores = ( [a["scores"] for a in answers] if "test" not in split else list(list()) ) answers = ( [[label2ans[l] for l in al] for al in answer_labels] if "test" not in split else list(list()) ) return [binary, questions, answers, answer_labels, answer_scores, iid, qids, split] def normalize_word(token): _token = token for p in punct: if (p + " " in token or " " + p in token) or ( re.search(comma_strip, token) != None ): _token = _token.replace(p, "") else: _token = _token.replace(p, " ") token = period_strip.sub("", _token, re.UNICODE) _token = [] temp = token.lower().split() for word in temp: word = manual_map.setdefault(word, word) if word not in articles: _token.append(word) for i, word in enumerate(_token): if word in contractions: _token[i] = contractions[word] token = " ".join(_token) token = token.replace(",", "") return token def make_arrow(root, dataset_root): with open(f"{root}/v2_OpenEnded_mscoco_train2014_questions.json", "r") as fp: questions_train2014 = json.load(fp)["questions"] with open(f"{root}/v2_OpenEnded_mscoco_val2014_questions.json", "r") as fp: questions_val2014 = json.load(fp)["questions"] with open(f"{root}/v2_OpenEnded_mscoco_test2015_questions.json", "r") as fp: questions_test2015 = json.load(fp)["questions"] with open(f"{root}/v2_OpenEnded_mscoco_test-dev2015_questions.json", "r") as fp: questions_test_dev2015 = json.load(fp)["questions"] with open(f"{root}/v2_mscoco_train2014_annotations.json", "r") as fp: annotations_train2014 = json.load(fp)["annotations"] with open(f"{root}/v2_mscoco_val2014_annotations.json", "r") as fp: annotations_val2014 = json.load(fp)["annotations"] annotations = dict() for split, questions in zip( ["train", "val", "test", "test-dev"], [ questions_train2014, questions_val2014, questions_test2015, questions_test_dev2015, ], ): _annot = defaultdict(dict) for q in tqdm(questions): _annot[q["image_id"]][q["question_id"]] = [q["question"]] annotations[split] = _annot all_major_answers = list() for split, annots in zip( ["train", "val"], [annotations_train2014, annotations_val2014], ): _annot = annotations[split] for q in tqdm(annots): all_major_answers.append(q["multiple_choice_answer"]) all_major_answers = [normalize_word(word) for word in tqdm(all_major_answers)] counter = {k: v for k, v in Counter(all_major_answers).items() if v >= 9} ans2label = {k: i for i, k in enumerate(counter.keys())} label2ans = list(counter.keys()) for split, annots in zip( ["train", "val"], [annotations_train2014, annotations_val2014], ): _annot = annotations[split] for q in tqdm(annots): answers = q["answers"] answer_count = {} for answer in answers: answer_ = answer["answer"] answer_count[answer_] = answer_count.get(answer_, 0) + 1 labels = [] scores = [] for answer in answer_count: if answer not in ans2label: continue labels.append(ans2label[answer]) score = get_score(answer_count[answer]) scores.append(score) _annot[q["image_id"]][q["question_id"]].append( {"labels": labels, "scores": scores,} ) for split in ["train", "val"]: filtered_annot = dict() for ik, iv in annotations[split].items(): new_q = dict() for qk, qv in iv.items(): if len(qv[1]["labels"]) != 0: new_q[qk] = qv if len(new_q) != 0: filtered_annot[ik] = new_q annotations[split] = filtered_annot for split in [ "train", "val", "test", "test-dev", ]: annot = annotations[split] split_name = { "train": "train2014", "val": "val2014", "test": "test2015", "test-dev": "test2015", }[split] paths = list(glob(f"{root}/{split_name}/*.jpg")) random.shuffle(paths) annot_paths = [ path for path in paths if int(path.split("/")[-1].split("_")[-1][:-4]) in annot ] if len(paths) == len(annot_paths): print("all images have caption annotations") else: print("not all images have caption annotations") print( len(paths), len(annot_paths), len(annot), ) bs = [ path2rest(path, split, annotations, label2ans) for path in tqdm(annot_paths) ] dataframe = pd.DataFrame( bs, columns=[ "image", "questions", "answers", "answer_labels", "answer_scores", "image_id", "question_id", "split", ], ) table = pa.Table.from_pandas(dataframe) os.makedirs(dataset_root, exist_ok=True) with pa.OSFile(f"{dataset_root}/vqav2_{split}.arrow", "wb") as sink: with pa.RecordBatchFileWriter(sink, table.schema) as writer: writer.write_table(table) table = pa.ipc.RecordBatchFileReader( pa.memory_map(f"{dataset_root}/vqav2_val.arrow", "r") ).read_all() pdtable = table.to_pandas() df1 = pdtable[:-1000] df2 = pdtable[-1000:] df1 = pa.Table.from_pandas(df1) df2 = pa.Table.from_pandas(df2) with pa.OSFile(f"{dataset_root}/vqav2_trainable_val.arrow", "wb") as sink: with pa.RecordBatchFileWriter(sink, df1.schema) as writer: writer.write_table(df1) with pa.OSFile(f"{dataset_root}/vqav2_rest_val.arrow", "wb") as sink: with pa.RecordBatchFileWriter(sink, df2.schema) as writer: writer.write_table(df2)
null
1,935
import json import pandas as pd import pyarrow as pa import gc import random import os from tqdm import tqdm from glob import glob def path2rest(path, iid2captions): def make_arrow(root, dataset_root): with open(f"{root}/annot.json", "r") as fp: captions = json.load(fp) iid2captions = dict() for cap in tqdm(captions): iid = cap[0].split("/")[-1] iid2captions[iid] = [cap[1]] paths = list(glob(f"{root}/images_train/*/*")) random.shuffle(paths) caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions] if len(paths) == len(caption_paths): print("all images have caption annotations") else: print("not all images have caption annotations") print( len(paths), len(caption_paths), len(iid2captions), ) sub_len = int(len(caption_paths) // 100000) subs = list(range(sub_len + 1)) for sub in subs: sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000] bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)] dataframe = pd.DataFrame(bs, columns=["image", "caption", "image_id", "split"],) table = pa.Table.from_pandas(dataframe) os.makedirs(dataset_root, exist_ok=True) with pa.OSFile(f"{dataset_root}/sbu_{sub}.arrow", "wb") as sink: with pa.RecordBatchFileWriter(sink, table.schema) as writer: writer.write_table(table) del dataframe del table del bs gc.collect()
null
1,936
import json import pandas as pd import pyarrow as pa import gc import random import os from tqdm import tqdm from glob import glob def path2rest(path, iid2captions): def make_arrow(root, dataset_root): for split in ["val", "train"]: with open(f"{root}/{split}_annot.json", "r") as fp: captions = json.load(fp) iid2captions = dict() for cap in tqdm(captions): iid = cap[0].split("/")[-1] iid2captions[iid] = [cap[1]] paths = list(glob(f"{root}/images_{split}/*/*")) random.shuffle(paths) caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions] if len(paths) == len(caption_paths): print("all images have caption annotations") else: print("not all images have caption annotations") print( len(paths), len(caption_paths), len(iid2captions), ) sub_len = int(len(caption_paths) // 100000) subs = list(range(sub_len + 1)) for sub in subs: sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000] bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)] dataframe = pd.DataFrame( bs, columns=["image", "caption", "image_id", "split"], ) table = pa.Table.from_pandas(dataframe) os.makedirs(dataset_root, exist_ok=True) with pa.OSFile( f"{dataset_root}/conceptual_caption_{split}_{sub}.arrow", "wb" ) as sink: with pa.RecordBatchFileWriter(sink, table.schema) as writer: writer.write_table(table) del dataframe del table del bs gc.collect()
null
1,937
import glob import os from setuptools import find_packages, setup import torch from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension torch_ver = [int(x) for x in torch.__version__.split(".")[:2]] assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3" def get_extensions(): this_dir = os.path.dirname(os.path.abspath(__file__)) extensions_dir = os.path.join(this_dir, "adet", "layers", "csrc") main_source = os.path.join(extensions_dir, "vision.cpp") sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp")) source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob( os.path.join(extensions_dir, "*.cu") ) sources = [main_source] + sources extension = CppExtension extra_compile_args = {"cxx": []} define_macros = [] if (torch.cuda.is_available() and CUDA_HOME is not None) or os.getenv("FORCE_CUDA", "0") == "1": extension = CUDAExtension sources += source_cuda define_macros += [("WITH_CUDA", None)] extra_compile_args["nvcc"] = [ "-DCUDA_HAS_FP16=1", "-D__CUDA_NO_HALF_OPERATORS__", "-D__CUDA_NO_HALF_CONVERSIONS__", "-D__CUDA_NO_HALF2_OPERATORS__", ] if torch_ver < [1, 7]: # supported by https://github.com/pytorch/pytorch/pull/43931 CC = os.environ.get("CC", None) if CC is not None: extra_compile_args["nvcc"].append("-ccbin={}".format(CC)) sources = [os.path.join(extensions_dir, s) for s in sources] include_dirs = [extensions_dir] ext_modules = [ extension( "adet._C", sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args, ) ] return ext_modules
null
1,938
import os import sys import mock import sphinx_rtd_theme from recommonmark.parser import CommonMarkParser import adet def autodoc_skip_member(app, what, name, obj, skip, options): # we hide something deliberately if getattr(obj, "__HIDE_SPHINX_DOC__", False): return True # Hide some names that are deprecated or not intended to be used if name in _DEPRECATED_NAMES: return True return None def url_resolver(url): if ".html" not in url: url = url.replace("../", "") return "https://github.com/aim-uofa/adet/blob/master/" + url else: if DEPLOY: return "http://adet.readthedocs.io/" + url else: return "/" + url def setup(app): from recommonmark.transform import AutoStructify app.connect("autodoc-skip-member", autodoc_skip_member) # app.connect('autodoc-skip-member', autodoc_skip_member) app.add_config_value( "recommonmark_config", { "url_resolver": url_resolver, "enable_math": True, "enable_inline_math": True, "enable_eval_rst": True, }, True, ) app.add_transform(AutoStructify)
null
1,939
from torch import nn from detectron2.layers import Conv2d from .deform_conv import DFConv2d from detectron2.layers.batch_norm import get_norm class DFConv2d(nn.Module): def __init__( self, in_channels, out_channels, with_modulated_dcn=True, kernel_size=3, stride=1, groups=1, dilation=1, deformable_groups=1, bias=False, padding=None ): def forward(self, x, return_offset=False): def conv_with_kaiming_uniform( norm=None, activation=None, use_deformable=False, use_sep=False): def make_conv( in_channels, out_channels, kernel_size, stride=1, dilation=1 ): if use_deformable: conv_func = DFConv2d else: conv_func = Conv2d if use_sep: assert in_channels == out_channels groups = in_channels else: groups = 1 conv = conv_func( in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=dilation * (kernel_size - 1) // 2, dilation=dilation, groups=groups, bias=(norm is None) ) if not use_deformable: # Caffe2 implementation uses XavierFill, which in fact # corresponds to kaiming_uniform_ in PyTorch nn.init.kaiming_uniform_(conv.weight, a=1) if norm is None: nn.init.constant_(conv.bias, 0) module = [conv,] if norm is not None and len(norm) > 0: if norm == "GN": norm_module = nn.GroupNorm(32, out_channels) else: norm_module = get_norm(norm, out_channels) module.append(norm_module) if activation is not None: module.append(nn.ReLU(inplace=True)) if len(module) > 1: return nn.Sequential(*module) return conv return make_conv
null
1,940
from detectron2.layers import batched_nms The provided code snippet includes necessary dependencies for implementing the `ml_nms` function. Write a Python function `def ml_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores", label_field="labels")` to solve the following problem: Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Args: boxlist (detectron2.structures.Boxes): nms_thresh (float): max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (str): Here is the function: def ml_nms(boxlist, nms_thresh, max_proposals=-1, score_field="scores", label_field="labels"): """ Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Args: boxlist (detectron2.structures.Boxes): nms_thresh (float): max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (str): """ if nms_thresh <= 0: return boxlist boxes = boxlist.pred_boxes.tensor scores = boxlist.scores labels = boxlist.pred_classes keep = batched_nms(boxes, scores, labels, nms_thresh) if max_proposals > 0: keep = keep[: max_proposals] boxlist = boxlist[keep] return boxlist
Performs non-maximum suppression on a boxlist, with scores specified in a boxlist field via score_field. Args: boxlist (detectron2.structures.Boxes): nms_thresh (float): max_proposals (int): if > 0, then only the top max_proposals are kept after non-maximum suppression score_field (str):
1,941
import random import numpy as np from fvcore.transforms import transform as T from detectron2.data.transforms import RandomCrop, StandardAugInput from detectron2.structures import BoxMode def adjust_crop(x0, y0, crop_size, instances, eps=1e-3): modified = False x1 = x0 + crop_size[1] y1 = y0 + crop_size[0] for bbox in instances: if bbox[0] < x0 - eps and bbox[2] > x0 + eps: crop_size[1] += x0 - bbox[0] x0 = bbox[0] modified = True if bbox[0] < x1 - eps and bbox[2] > x1 + eps: crop_size[1] += bbox[2] - x1 x1 = bbox[2] modified = True if bbox[1] < y0 - eps and bbox[3] > y0 + eps: crop_size[0] += y0 - bbox[1] y0 = bbox[1] modified = True if bbox[1] < y1 - eps and bbox[3] > y1 + eps: crop_size[0] += bbox[3] - y1 y1 = bbox[3] modified = True return modified, x0, y0, crop_size The provided code snippet includes necessary dependencies for implementing the `gen_crop_transform_with_instance` function. Write a Python function `def gen_crop_transform_with_instance(crop_size, image_size, instances, crop_box=True)` to solve the following problem: Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format. Here is the function: def gen_crop_transform_with_instance(crop_size, image_size, instances, crop_box=True): """ Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format. """ bbox = random.choice(instances) bbox[::2] = np.clip(bbox[::2], 0, image_size[1]) bbox[1::2] = np.clip(bbox[1::2], 0, image_size[0]) crop_size = np.asarray(crop_size, dtype=np.int32) center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5 assert ( image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1] ), "The annotation bounding box is outside of the image!" assert ( image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1] ), "Crop size is larger than image size!" min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0) max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0) max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32)) y0 = np.random.randint(min_yx[0], max_yx[0] + 1) x0 = np.random.randint(min_yx[1], max_yx[1] + 1) # if some instance is cropped extend the box if not crop_box: num_modifications = 0 modified = True # convert crop_size to float crop_size = crop_size.astype(np.float32) while modified: modified, x0, y0, crop_size = adjust_crop(x0, y0, crop_size, instances) num_modifications += 1 if num_modifications > 100: raise ValueError( "Cannot finished cropping adjustment within 100 tries (#instances {}).".format( len(instances) ) ) return T.CropTransform(0, 0, image_size[1], image_size[0]) return T.CropTransform(*map(int, (x0, y0, crop_size[1], crop_size[0])))
Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format.
1,942
import copy import logging import os.path as osp import numpy as np import torch from fvcore.common.file_io import PathManager from PIL import Image from pycocotools import mask as maskUtils from detectron2.data import detection_utils as utils from detectron2.data import transforms as T from detectron2.data.dataset_mapper import DatasetMapper from detectron2.data.detection_utils import SizeMismatchError from detectron2.structures import BoxMode from .augmentation import RandomCropWithInstance from .detection_utils import (annotations_to_instances, build_augmentation, transform_instance_annotations) def segmToRLE(segm, img_size): h, w = img_size if type(segm) == list: # polygon -- a single object might consist of multiple parts # we merge all parts into one mask rle code rles = maskUtils.frPyObjects(segm, h, w) rle = maskUtils.merge(rles) elif type(segm["counts"]) == list: # uncompressed RLE rle = maskUtils.frPyObjects(segm, h, w) else: # rle rle = segm return rle def segmToMask(segm, img_size): rle = segmToRLE(segm, img_size) m = maskUtils.decode(rle) return m
null
1,943
import os from detectron2.data.datasets.register_coco import register_coco_instances from detectron2.data.datasets.builtin_meta import _get_builtin_metadata from .datasets.text import register_text_instances _PREDEFINED_SPLITS_PIC = { "pic_person_train": ("pic/image/train", "pic/annotations/train_person.json"), "pic_person_val": ("pic/image/val", "pic/annotations/val_person.json"), } metadata_pic = { "thing_classes": ["person"] } _PREDEFINED_SPLITS_TEXT = { "totaltext_train": ("totaltext/train_images", "totaltext/train.json"), "totaltext_val": ("totaltext/test_images", "totaltext/test.json"), "ctw1500_word_train": ("CTW1500/ctwtrain_text_image", "CTW1500/annotations/train_ctw1500_maxlen100_v2.json"), "ctw1500_word_test": ("CTW1500/ctwtest_text_image","CTW1500/annotations/test_ctw1500_maxlen100.json"), "syntext1_train": ("syntext1/images", "syntext1/annotations/train.json"), "syntext2_train": ("syntext2/images", "syntext2/annotations/train.json"), "mltbezier_word_train": ("mlt2017/images","mlt2017/annotations/train.json"), "rects_train": ("ReCTS/ReCTS_train_images", "ReCTS/annotations/rects_train.json"), "rects_val": ("ReCTS/ReCTS_val_images", "ReCTS/annotations/rects_val.json"), "rects_test": ("ReCTS/ReCTS_test_images", "ReCTS/annotations/rects_test.json"), "art_train": ("ArT/rename_artimg_train", "ArT/annotations/abcnet_art_train.json"), "lsvt_train": ("LSVT/rename_lsvtimg_train", "LSVT/annotations/abcnet_lsvt_train.json"), "chnsyn_train": ("ChnSyn/syn_130k_images", "ChnSyn/annotations/chn_syntext.json"), "icdar2013_train": ("icdar2013/train_images", "icdar2013/ic13_train.json"), "icdar2015_train": ("icdar2015/train_images", "icdar2015/ic15_train.json"), "icdar2015_test": ("icdar2015/test_images", "icdar2015/ic15_test.json"), } metadata_text = { "thing_classes": ["text"] } def register_text_instances(name, metadata, json_file, image_root): """ Register a dataset in json annotation format for text detection and recognition. Args: name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train". metadata (dict): extra metadata associated with this dataset. It can be an empty dict. json_file (str): path to the json instance annotation file. image_root (str or path-like): directory which contains all the images. """ DatasetCatalog.register(name, lambda: load_text_json(json_file, image_root, name)) MetadataCatalog.get(name).set( json_file=json_file, image_root=image_root, evaluator_type="text", **metadata ) def register_all_coco(root="datasets"): for key, (image_root, json_file) in _PREDEFINED_SPLITS_PIC.items(): # Assume pre-defined datasets live in `./datasets`. register_coco_instances( key, metadata_pic, os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), ) for key, (image_root, json_file) in _PREDEFINED_SPLITS_TEXT.items(): # Assume pre-defined datasets live in `./datasets`. register_text_instances( key, metadata_text, os.path.join(root, json_file) if "://" not in json_file else json_file, os.path.join(root, image_root), )
null
1,944
import logging import numpy as np import torch from detectron2.data import transforms as T from detectron2.data.detection_utils import \ annotations_to_instances as d2_anno_to_inst from detectron2.data.detection_utils import \ transform_instance_annotations as d2_transform_inst_anno import math def transform_beziers_annotations(beziers, transforms): """ Transform keypoint annotations of an image. Args: beziers (list[float]): Nx16 float in Detectron2 Dataset format. transforms (TransformList): """ # (N*2,) -> (N, 2) beziers = np.asarray(beziers, dtype="float64").reshape(-1, 2) beziers = transforms.apply_coords(beziers).reshape(-1) # This assumes that HorizFlipTransform is the only one that does flip do_hflip = ( sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1 ) if do_hflip: raise ValueError("Flipping text data is not supported (also disencouraged).") return beziers def transform_instance_annotations( annotation, transforms, image_size, *, keypoint_hflip_indices=None ): annotation = d2_transform_inst_anno( annotation, transforms, image_size, keypoint_hflip_indices=keypoint_hflip_indices, ) if "beziers" in annotation: beziers = transform_beziers_annotations(annotation["beziers"], transforms) annotation["beziers"] = beziers return annotation
null
1,945
import logging import numpy as np import torch from detectron2.data import transforms as T from detectron2.data.detection_utils import \ annotations_to_instances as d2_anno_to_inst from detectron2.data.detection_utils import \ transform_instance_annotations as d2_transform_inst_anno import math def annotations_to_instances(annos, image_size, mask_format="polygon"): instance = d2_anno_to_inst(annos, image_size, mask_format) if not annos: return instance # add attributes if "beziers" in annos[0]: beziers = [obj.get("beziers", []) for obj in annos] instance.beziers = torch.as_tensor(beziers, dtype=torch.float32) if "rec" in annos[0]: text = [obj.get("rec", []) for obj in annos] instance.text = torch.as_tensor(text, dtype=torch.int32) return instance
null
1,946
import logging import numpy as np import torch from detectron2.data import transforms as T from detectron2.data.detection_utils import \ annotations_to_instances as d2_anno_to_inst from detectron2.data.detection_utils import \ transform_instance_annotations as d2_transform_inst_anno import math The provided code snippet includes necessary dependencies for implementing the `build_augmentation` function. Write a Python function `def build_augmentation(cfg, is_train)` to solve the following problem: With option to don't use hflip Returns: list[Augmentation] Here is the function: def build_augmentation(cfg, is_train): """ With option to don't use hflip Returns: list[Augmentation] """ if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST sample_style = "choice" if sample_style == "range": assert ( len(min_size) == 2 ), "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) logger = logging.getLogger(__name__) augmentation = [] augmentation.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) if is_train: if cfg.INPUT.HFLIP_TRAIN: augmentation.append(T.RandomFlip()) logger.info("Augmentations used in training: " + str(augmentation)) return augmentation
With option to don't use hflip Returns: list[Augmentation]
1,947
import torch from torch.nn import functional as F from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler class Blender(object): def __init__(self, cfg): # fmt: off self.pooler_resolution = cfg.MODEL.BLENDMASK.BOTTOM_RESOLUTION sampling_ratio = cfg.MODEL.BLENDMASK.POOLER_SAMPLING_RATIO pooler_type = cfg.MODEL.BLENDMASK.POOLER_TYPE pooler_scales = cfg.MODEL.BLENDMASK.POOLER_SCALES self.attn_size = cfg.MODEL.BLENDMASK.ATTN_SIZE self.top_interp = cfg.MODEL.BLENDMASK.TOP_INTERP num_bases = cfg.MODEL.BASIS_MODULE.NUM_BASES # fmt: on self.attn_len = num_bases * self.attn_size * self.attn_size self.pooler = ROIPooler( output_size=self.pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, pooler_type=pooler_type, canonical_level=2) def __call__(self, bases, proposals, gt_instances): if gt_instances is not None: # training # reshape attns dense_info = proposals["instances"] attns = dense_info.top_feats pos_inds = dense_info.pos_inds if pos_inds.numel() == 0: return None, {"loss_mask": sum([x.sum() * 0 for x in attns]) + bases[0].sum() * 0} gt_inds = dense_info.gt_inds rois = self.pooler(bases, [x.gt_boxes for x in gt_instances]) rois = rois[gt_inds] pred_mask_logits = self.merge_bases(rois, attns) # gen targets gt_masks = [] for instances_per_image in gt_instances: if len(instances_per_image.gt_boxes.tensor) == 0: continue gt_mask_per_image = instances_per_image.gt_masks.crop_and_resize( instances_per_image.gt_boxes.tensor, self.pooler_resolution ).to(device=pred_mask_logits.device) gt_masks.append(gt_mask_per_image) gt_masks = cat(gt_masks, dim=0) gt_masks = gt_masks[gt_inds] N = gt_masks.size(0) gt_masks = gt_masks.view(N, -1) gt_ctr = dense_info.gt_ctrs loss_denorm = proposals["loss_denorm"] mask_losses = F.binary_cross_entropy_with_logits( pred_mask_logits, gt_masks.to(dtype=torch.float32), reduction="none") mask_loss = ((mask_losses.mean(dim=-1) * gt_ctr).sum() / loss_denorm) return None, {"loss_mask": mask_loss} else: # no proposals total_instances = sum([len(x) for x in proposals]) if total_instances == 0: # add empty pred_masks results for box in proposals: box.pred_masks = box.pred_classes.view( -1, 1, self.pooler_resolution, self.pooler_resolution) return proposals, {} rois = self.pooler(bases, [x.pred_boxes for x in proposals]) attns = cat([x.top_feat for x in proposals], dim=0) pred_mask_logits = self.merge_bases(rois, attns).sigmoid() pred_mask_logits = pred_mask_logits.view( -1, 1, self.pooler_resolution, self.pooler_resolution) start_ind = 0 for box in proposals: end_ind = start_ind + len(box) box.pred_masks = pred_mask_logits[start_ind:end_ind] start_ind = end_ind return proposals, {} def merge_bases(self, rois, coeffs, location_to_inds=None): # merge predictions N = coeffs.size(0) if location_to_inds is not None: rois = rois[location_to_inds] N, B, H, W = rois.size() coeffs = coeffs.view(N, -1, self.attn_size, self.attn_size) coeffs = F.interpolate(coeffs, (H, W), mode=self.top_interp).softmax(dim=1) masks_preds = (rois * coeffs).sum(dim=1) return masks_preds.view(N, -1) def build_blender(cfg): return Blender(cfg)
null
1,948
from typing import Dict from torch import nn from torch.nn import functional as F from detectron2.utils.registry import Registry from detectron2.layers import ShapeSpec from adet.layers import conv_with_kaiming_uniform BASIS_MODULE_REGISTRY = Registry("BASIS_MODULE") BASIS_MODULE_REGISTRY.__doc__ = """ Registry for basis module, which produces global bases from feature maps. The registered object will be called with `obj(cfg, input_shape)`. The call should return a `nn.Module` object. """ def build_basis_module(cfg, input_shape): name = cfg.MODEL.BASIS_MODULE.NAME return BASIS_MODULE_REGISTRY.get(name)(cfg, input_shape)
null
1,949
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def aligned_bilinear(tensor, factor): assert tensor.dim() == 4 assert factor >= 1 assert int(factor) == factor if factor == 1: return tensor h, w = tensor.size()[2:] tensor = F.pad(tensor, pad=(0, 1, 0, 1), mode="replicate") oh = factor * h + 1 ow = factor * w + 1 tensor = F.interpolate( tensor, size=(oh, ow), mode='bilinear', align_corners=True ) tensor = F.pad( tensor, pad=(factor // 2, 0, factor // 2, 0), mode="replicate" ) return tensor[:, :, :oh - 1, :ow - 1]
null
1,950
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def compute_basis_stride(images, basis_out): im_h, im_w = images.tensor.size()[-2:] assert len(basis_out["bases"]) == 1 base_h, base_w = basis_out["bases"][0].size()[2:] base_stride_h, base_stride_w = im_h // base_h, im_w // base_w assert base_stride_h == base_stride_w base_stride = base_stride_w return base_stride
null
1,951
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit class folder(nn.Module): def __init__(self): super().__init__() def forward(self, feature_map): N,_,H,W = feature_map.size() feature_map = F.unfold(feature_map,kernel_size=3,padding=1) feature_map = feature_map.view(N,-1,H,W) return feature_map def top_module(in_channels, attn_len): return folder()
null
1,952
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def process_gt_instances(gt_instances, gt_stride, device): basis_heatmap_list = [] head_heatmap_list = [] p3_heatmap_list = [] for instances in gt_instances: one_frame_instances = instances.keypoint_heatmap.to(device = device, dtype = torch.float) one_basis_heatmap = one_frame_instances.max(dim = 0)[0]#.clamp(0,1) basis_heatmap_list.append(one_basis_heatmap) p3_output_list = instances.p3_output_list.to(device = device, dtype = torch.float) p3_output_list = p3_output_list.max(dim = 0)[0]#.clamp(0,1) p3_heatmap_list.append(p3_output_list) one_frame_instances = instances.head_heatmap.to(device = device, dtype = torch.float) for index_instence in range(len(instances)): head_heatmap_list.append(one_frame_instances[index_instence]) basis_heatmap_list = ImageList.from_tensors(basis_heatmap_list) p3_heatmap_list = ImageList.from_tensors(p3_heatmap_list) head_heatmap_list = ImageList.from_tensors(head_heatmap_list) return basis_heatmap_list.tensor, head_heatmap_list.tensor.bool(), p3_heatmap_list.tensor,
null
1,953
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def reduce_sum(tensor): world_size = get_world_size() if world_size < 2: return tensor tensor = tensor.clone() dist.all_reduce(tensor, op=dist.ReduceOp.SUM) return tensor def compute_loss(p1_heatmap_list, p3_heatmap_list, p1_logits, p3_logits): # gt_bitmasks = gt_bitmasks.float() # mask_logits = mask_logits.sigmoid() num_gpus = get_world_size() num_dice = (p1_heatmap_list**2).sum() num_dice = reduce_sum(p1_logits.new_tensor([num_dice])).item() num_dice = max(num_dice / num_gpus, 1.0) p1_loss = F.mse_loss(p1_heatmap_list, p1_logits, reduction='sum') / num_dice num_dice = (p3_heatmap_list**2).sum() num_dice = reduce_sum(p3_logits.new_tensor([num_dice])).item() num_dice = max(num_dice / num_gpus, 1.0) p3_loss = F.mse_loss(p3_heatmap_list, p3_logits, reduction='sum') / num_dice # loss = (p1_loss + p3_loss) / 2 return p1_loss, p3_loss
null
1,954
import torch.distributed as dist from detectron2.utils.comm import get_world_size from torch.nn import functional as F from torch import nn import torch from detectron2.structures import ImageList from adet.utils.comm import reduce_sum from fvcore.nn import sigmoid_focal_loss_jit def reduce_sum(tensor): def compute_loss_softmax(gt_bitmasks, mask_logits, num_loss, num_instances, direction, direction_mask_logits, gt_keypoint, max_ranges, distance_norm): assert not torch.isnan(mask_logits).any() assert not torch.isnan(direction).any() assert not torch.isnan(direction_mask_logits).any() # direction_mask_logits = direction_mask_logits.detach() N,K,H,W = gt_bitmasks.size() # gt_bitmasks = gt_bitmasks.float() num_gpus = get_world_size() assert not (num_loss == 0).any() loss_weight = 1/num_loss #TODO num_loss can be 0 sum_loss_weight = loss_weight.sum() assert sum_loss_weight!=0 loss_weight = loss_weight[:,None].repeat(1,17).flatten() gt_bitmasks = gt_bitmasks.reshape(N*K,H*W) mask_logits = mask_logits.reshape(N*K,H*W) gt_bitmasks_visible_mask = gt_bitmasks.sum(dim=1).bool() # assert gt_bitmasks_visible_mask.sum()!=0 #TODO AssertionError if gt_bitmasks_visible_mask.sum()!=0: loss_weight = loss_weight[gt_bitmasks_visible_mask] mask_logits = mask_logits[gt_bitmasks_visible_mask] gt_bitmasks = gt_bitmasks[gt_bitmasks_visible_mask] mask_logits = F.log_softmax(mask_logits,dim=1) total_instances = reduce_sum(mask_logits.new_tensor([num_instances])).item() gpu_balence_factor = num_instances/total_instances loss = (- mask_logits[gt_bitmasks]) loss = (loss*loss_weight).sum()/17 loss = (loss/sum_loss_weight)*gpu_balence_factor max_ranges = max_ranges[:,None].repeat(1,17).flatten()[gt_bitmasks_visible_mask] gt_keypoint = gt_keypoint[:,:,[0,1]] N,H,W,K,_ = direction_mask_logits.size() direction = direction - gt_keypoint[:,None,None,:,:] direction = direction.permute(0,3,1,2,4).reshape(N*17,H,W,2) direction = direction[gt_bitmasks_visible_mask] direction = (direction[:,:,:,0] ** 2 + direction[:,:,:,1] ** 2).sqrt()[:,:,:,None] assert (max_ranges != 0).all() direction = direction / max_ranges[:,None,None,None] direction = direction * distance_norm direction = (direction.sigmoid()-0.5) * 2 direction_mask_logits = direction_mask_logits.permute(0,3,1,2,4).reshape(N*17,H,W,1) direction_mask_logits = direction_mask_logits[gt_bitmasks_visible_mask] direction = direction * direction_mask_logits direction = direction.flatten(start_dim=1).sum(dim=1) direction = direction * loss_weight assert distance_norm != 0 direction_loss = (direction/sum_loss_weight * gpu_balence_factor) / distance_norm direction_loss = direction_loss.sum() assert not torch.isnan(direction_loss).any() assert not torch.isnan(loss).any() return loss, direction_loss else: print('gt_bitmasks_visible_mask.sum()==0') total_instances = reduce_sum(mask_logits.new_tensor([num_instances])).item() loss = mask_logits.sum() + direction.sum() + direction_mask_logits.sum() loss = loss*0.0 return loss, loss
null
1,955
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from detectron2.utils.comm import get_world_size from detectron2.layers import ConvTranspose2d from detectron2.structures.instances import Instances import logging def build_blender(cfg): return Blender(cfg)
null
1,956
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from detectron2.utils.comm import get_world_size from detectron2.layers import ConvTranspose2d from detectron2.structures.instances import Instances import logging def compute_locations_per_level(h, w, stride, device): shifts_x = torch.arange( 0, w * stride, step=stride, dtype=torch.float32, device=device ) shifts_y = torch.arange( 0, h * stride, step=stride, dtype=torch.float32, device=device ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) locations = torch.stack((shift_x, shift_y), dim=1) + stride // 2 return locations
null
1,957
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from detectron2.utils.comm import get_world_size from detectron2.layers import ConvTranspose2d from detectron2.structures.instances import Instances import logging def dice_coefficient(x, target): eps = 1e-5 n_inst = x.size(0) x = x.reshape(n_inst, -1) target = target.reshape(n_inst, -1) intersection = (x * target).sum(dim=1) union = (x ** 2.0).sum(dim=1) + (target ** 2.0).sum(dim=1) + eps loss = 1. - (2 * intersection / union) return loss
null
1,958
import torch from torch.nn import functional as F from torch import nn from detectron2.layers import cat from detectron2.modeling.poolers import ROIPooler from .utils import aligned_bilinear, compute_loss, compute_loss_softmax from fvcore.nn import sigmoid_focal_loss_jit from adet.utils.comm import reduce_sum from detectron2.utils.comm import get_world_size from detectron2.layers import ConvTranspose2d from detectron2.structures.instances import Instances import logging def get_subnetworks_params(attns, num_bases, channels): assert attns.dim() == 2 n_inst = attns.size(0) w0, b0, w1, b1, w2, b2 = torch.split_with_sizes(attns, [ (2 + num_bases) * channels, channels, channels * channels, channels, channels * 17, 17 ], dim=1) # out_channels x in_channels x 1 x 1 w0 = w0.reshape(n_inst * channels, 2 + num_bases, 1, 1) b0 = b0.reshape(n_inst * channels) w1 = w1.reshape(n_inst * channels, channels, 1, 1) b1 = b1.reshape(n_inst * channels) w2 = w2.reshape(n_inst * 17, channels, 1, 1) b2 = b2.reshape(n_inst*17) return [w0, w1, w2], [b0, b1, b2]
null