python_code stringlengths 0 4.04M | repo_name stringlengths 8 58 | file_path stringlengths 5 147 |
|---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
from random import randrange
import os
import numpy as np
from sklearn.feature_extraction ... | DeeperCluster-main | src/data/loader.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import zipfile
import numpy as np
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMA... | DeeperCluster-main | src/data/YFCC100M.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import glob
import os
from collections import defaultdict
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD... | DeeperCluster-main | src/data/VOC2007.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import time
import os
from six.moves import cPickle
import traceback
from co... | connect-caption-and-trace-main | tools/train.py |
connect-caption-and-trace-main | captioning/__init__.py | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
import os
import torch.nn.functional as F
import six
from six.moves import cPickle
bad_endings = ['with','... | connect-caption-and-trace-main | captioning/utils/misc.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as uti... | connect-caption-and-trace-main | captioning/utils/eval_utils_orig.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from eval_utils import getCOC... | connect-caption-and-trace-main | captioning/utils/eval_multi.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copy from fvcore
import logging
import os
from typing import Any
import yaml
from yacs.config import CfgNode as _CfgNode
import io as PathManager
BASE_KEY = "_BASE_"
class CfgNode(_CfgNode):
"""
Our own extended version of :class:`ya... | connect-caption-and-trace-main | captioning/utils/config.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as uti... | connect-caption-and-trace-main | captioning/utils/eval_utils_for_coco_caption.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as uti... | connect-caption-and-trace-main | captioning/utils/eval_utils_show_control_tell.py |
connect-caption-and-trace-main | captioning/utils/__init__.py | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from captioning.utils imp... | connect-caption-and-trace-main | captioning/utils/for_debug_eval_spice.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
class myResnet(nn.Module):
def __init__(self, resnet):
super(myResnet, self).__init__()
self.resnet = resnet
def forward(self, img, att_size=14):
x = img.unsqueeze(0)
x = self.resnet.conv1(x)
x = self.r... | connect-caption-and-trace-main | captioning/utils/resnet_utils.py |
import torch
import torch.nn as nn
import torchvision.models.resnet
from torchvision.models.resnet import BasicBlock, Bottleneck
class ResNet(torchvision.models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__(block, layers, num_classes)
self.maxpool... | connect-caption-and-trace-main | captioning/utils/resnet.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as uti... | connect-caption-and-trace-main | captioning/utils/eval_utils_joint.py |
from __future__ import print_function
import argparse
def if_use_feat(caption_model):
# Decide if load attention feature according to caption model
if caption_model in ['show_tell', 'all_img', 'fc', 'newfc']:
use_att, use_fc = False, True
elif caption_model == 'language_model':
use_att, us... | connect-caption-and-trace-main | captioning/utils/opts.py |
import torch
import scipy.optimize
import numpy as np
def local_OT(D, window = 0):
window = window
p = D.shape[1]; m = D.shape[2] # p < m, e.g., p = 10, m = 20
# construct the cx, ax=b
x = torch.rand([10,p*m])
A = torch.zeros([p,p*m])
b = torch.ones([p])
for i in range(p):
A[i, (i)... | connect-caption-and-trace-main | captioning/utils/local_optimal_transport.py |
from random import uniform
import numpy as np
from collections import OrderedDict, defaultdict
from itertools import tee
import time
# -----------------------------------------------
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def compute_div_n(caps,n=1):
aggr_div = []
for ... | connect-caption-and-trace-main | captioning/utils/div_utils.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
from collections import OrderedDict
import torch
import sys
try:
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD import CiderD
from pyciderevalcap.cider.ci... | connect-caption-and-trace-main | captioning/utils/rewards.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as uti... | connect-caption-and-trace-main | captioning/utils/eval_utils.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as uti... | connect-caption-and-trace-main | captioning/utils/eval_utils_caption_generation.py |
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __fut... | connect-caption-and-trace-main | captioning/models/TransformerModel_trace_generation_caption_to_encoder.py |
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __fut... | connect-caption-and-trace-main | captioning/models/cachedTransformer.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from . import utils
from .CaptionModel import CaptionModel
class ShowTellModel(CaptionModel):
def __init__(s... | connect-caption-and-trace-main | captioning/models/ShowTellModel.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_both_backup_2020_11_11.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_both.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import numpy as np
import torch
from .ShowTellModel import ShowTellModel
from .FCModel import FCModel
from .AttModel_both import *
from .TransformerModel_mitr import TransformerModel
#... | connect-caption-and-trace-main | captioning/models/__init__.py |
# This file contains our mirrored Transformer network
# The branch for extracted visual features is implemented in "encoder",
# and then branches for trace and caption are implemented in "decoder"
# The cfg name correspondance:
# N_layer=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __... | connect-caption-and-trace-main | captioning/models/TransformerModel_mitr.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_encoder_trace.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_standard_enco_deco_both.py |
"""
Instruction to use meshed_memory_transformer (https://arxiv.org/abs/1912.08226)
pip install git+https://github.com/ruotianluo/meshed-memory-transformer.git
Note:
Currently m2transformer is not performing as well as original transformer. Not sure why? Still investigating.
"""
from __future__ import absolute_impor... | connect-caption-and-trace-main | captioning/models/M2Transformer.py |
import torch
def repeat_tensors(n, x):
"""
For a tensor of size Bx..., we repeat it n times, and make it Bnx...
For collections, do nested repeat
"""
if torch.is_tensor(x):
x = x.unsqueeze(1) # Bx1x...
x = x.expand(-1, n, *([-1]*len(x.shape[2:]))) # Bxnx...
x = x.reshape(x.s... | connect-caption-and-trace-main | captioning/models/utils.py |
# This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __futur... | connect-caption-and-trace-main | captioning/models/CaptionModel_orig.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_caption_generation.py |
# Implementation for paper 'Attention on Attention for Image Captioning'
# https://arxiv.org/abs/1908.06954
# RT: Code from original author's repo: https://github.com/husthuaan/AoANet/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import tor... | connect-caption-and-trace-main | captioning/models/AoAModel.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_orig.py |
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __fut... | connect-caption-and-trace-main | captioning/models/TransformerModel_standard_enco_deco_both.py |
# This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __futur... | connect-caption-and-trace-main | captioning/models/CaptionModel.py |
# This file is the implementation for ensemble evaluation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from .CaptionModel import CaptionM... | connect-caption-and-trace-main | captioning/models/AttEnsemble.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from . import utils
from .CaptionModel import CaptionModel
class LSTMCore(nn.Module):
def __init__(self, opt... | connect-caption-and-trace-main | captioning/models/FCModel.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_both_backup_2020_11_07.py |
"""
BertCapModel is using huggingface transformer bert model as seq2seq model.
The result is not as goog as original transformer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import... | connect-caption-and-trace-main | captioning/models/BertCapModel.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_for_coco_caption_baseline.py |
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
#... | connect-caption-and-trace-main | captioning/models/AttModel_for_coco_caption_task.py |
import torch
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.... | connect-caption-and-trace-main | captioning/modules/loss_wrapper_caption_generation.py |
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_... | connect-caption-and-trace-main | captioning/modules/loss_wrapper_show_control_tell.py |
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
from ..utils.local_optimal_transport import local_OT
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt ... | connect-caption-and-trace-main | captioning/modules/loss_wrapper_trace_generation.py |
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
import numpy as np
import random
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.mo... | connect-caption-and-trace-main | captioning/modules/loss_wrapper_joint.py |
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_... | connect-caption-and-trace-main | captioning/modules/loss_wrapper_for_coco_caption.py |
import torch
import torch.nn as nn
from ..utils.rewards import get_scores, get_self_cider_scores
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
... | connect-caption-and-trace-main | captioning/modules/losses.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
... | connect-caption-and-trace-main | captioning/data/pth_loader.py |
connect-caption-and-trace-main | captioning/data/__init__.py | |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import os
import numpy as np
import random
import torch
import skimage
import skimage.io
import scipy.misc
from torchvision import transforms as trn
preprocess = trn.Compose([
#... | connect-caption-and-trace-main | captioning/data/dataloaderraw.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
... | connect-caption-and-trace-main | captioning/data/dataloader.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
... | connect-caption-and-trace-main | captioning/data/dataloader_show_control_tell.py |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import base64
import numpy as np
import csv
import sys
import zlib
import time
import mmap
import argparse
parser = argparse.ArgumentParser()
# output_dir
parser.add_argument('--downloaded_feats', d... | connect-caption-and-trace-main | scripts/make_bu_data.py |
"""
Preprocess a raw json dataset into features files for use in data_loader.py
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a m... | connect-caption-and-trace-main | scripts/prepro_feats.py |
# coding: utf-8
"""
Create a reference json file used for evaluation with `coco-caption` repo.
Used when reference json is not provided, (e.g., flickr30k, or you have your own split of train/val/test)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
impo... | connect-caption-and-trace-main | scripts/prepro_reference_json.py |
"""
Precompute ngram counts of captions, to accelerate cider computation during training time.
"""
import os
import json
import argparse
from six.moves import cPickle
import captioning.utils.misc as utils
from collections import defaultdict
import sys
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD_scorer ... | connect-caption-and-trace-main | scripts/prepro_ngrams.py |
import argparse
import h5py
import os
import numpy as np
import json
from tqdm import tqdm
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
N = len(imgs)
if params['fc_input_dir'] is not None:
print('processing fc')
with h5py.File(params['fc_o... | connect-caption-and-trace-main | scripts/dump_to_h5df.py |
"""
Preprocess a raw json dataset into hdf5/json files for use in data_loader.py
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a ... | connect-caption-and-trace-main | scripts/prepro_labels.py |
import torch
import scipy.optimize
import numpy as np
m = 10
pred = torch.rand([10, m, 4])
label = torch.rand([10, m, 4])
def local_OT(D):
p = D.shape[1]; m = D.shape[2]
# construct the cx, ax=b
x = torch.rand([10,m*m])
A = torch.zeros([m+m,m*m])
b = torch.ones([m+m])
for i in range(p):
... | connect-caption-and-trace-main | scripts/my_local_optimal_transport.py |
# copy from https://github.com/Lyken17/Efficient-PyTorch/tools
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import os, sys
import os.path as osp
from PIL import Image
import six
import string
import lmdb
import pickle
imp... | connect-caption-and-trace-main | scripts/dump_to_lmdb.py |
import numpy as np
import os
import h5py
import numpy as np
import jsonlines
import re
import json
# The first directory should lead to your feature files extracted by detectrons, and the box_only and feats_only are the new folders for saving bounding boxes and features (which will be used during training).
i = 0
for... | connect-caption-and-trace-main | scripts/prepare_feats_boxes_from_npz.py |
"""
Preprocess a raw json dataset into hdf5/json files for use in data_loader.lua
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a... | connect-caption-and-trace-main | scripts/build_bpe_subword_nmt.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is the main script used for training Classy Vision jobs.
This can be used for training on your local machine,... | cv_bias_amplification-main | my-project-release/my-project/classy_train.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file... | cv_bias_amplification-main | my-project-release/my-project/losses/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
from classy_vision.losses import ClassyLoss, register_loss
@register_loss("one_hot_bi... | cv_bias_amplification-main | my-project-release/my-project/losses/one_hot_binary_ce_loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import enum
import json
import logging
import math
import multiprocessing as mp
import ti... | cv_bias_amplification-main | my-project-release/my-project/tasks/biasamp_classification_task.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from cl... | cv_bias_amplification-main | my-project-release/my-project/tasks/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torchvision.datasets import FashionMNIST
import torch.utils.data
import torch
from torchvision import datasets, transforms
import clas... | cv_bias_amplification-main | my-project-release/my-project/datasets/cifar100_random_sample.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from PIL import Image
import numpy as np
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import json
from classy_visio... | cv_bias_amplification-main | my-project-release/my-project/datasets/inversion_transforms.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dat... | cv_bias_amplification-main | my-project-release/my-project/datasets/cifar100.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file... | cv_bias_amplification-main | my-project-release/my-project/datasets/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dat... | cv_bias_amplification-main | my-project-release/my-project/datasets/fashionmnist.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dat... | cv_bias_amplification-main | my-project-release/my-project/datasets/cifar10_overlay.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file... | cv_bias_amplification-main | my-project-release/my-project/models/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Option... | cv_bias_amplification-main | my-project-release/my-project/models/custom_resnet.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd... | cv_bias_amplification-main | my-project-release/my-project/configs/fashionmnist/scripts/training_measurements.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Run from within the /scripts folder.
import json
import numpy as np
import pandas as pd
import classy_vision.generic.util as util
import... | cv_bias_amplification-main | my-project-release/my-project/configs/fashionmnist/scripts/generate_experiment_configs.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_width/scripts/training_measurements.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.pat... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_width/scripts/generate_experiment_configs.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100/scripts/training_measurements_checkpoints.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.pat... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100/scripts/generate_experiment_configs.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar10_overlay/scripts/training_measurements.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar10_overlay/scripts/generate_experiment_configs.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_trainingsize/scripts/training_measurements.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.pat... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_trainingsize/scripts/generate_experiment_configs.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_regularization/scripts/training_measurements.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.pat... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_regularization/scripts/generate_experiment_configs.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_swapped/scripts/training_measurements_checkpoints.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.pat... | cv_bias_amplification-main | my-project-release/my-project/configs/cifar100_swapped/scripts/generate_experiment_configs.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file... | cv_bias_amplification-main | my-project-release/my-project/trainer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
from classy_vision.generic.distributed_util import set_cpu_device, set_cuda... | cv_bias_amplification-main | my-project-release/my-project/trainer/gpu_trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
| Clockwork-main | __init__.py |
#!/usr/bin/env python3
# pyre-stricts
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.s
import asyncio
from planner.config import (
PlannerConfig,
get_algorithm,
get_task_fetche... | Clockwork-main | main.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
import sys
import traceback
from common.data_types import UnixtimeA... | Clockwork-main | planner/planner.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from algorithm.algor... | Clockwork-main | planner/config.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.