text
stringlengths
1
93.6k
catfields = ['chrom', 'strand', 'codon', 'orftype']
if not opts.noregress:
if opts.verbose:
logprint('Calculating regression results by chromosome')
workers = mp.Pool(opts.numproc)
if opts.startonly:
(orf_strengths, start_strengths) = \
[pd.concat(res_dfs).reset_index() for res_dfs in zip(*workers.map(_regress_chrom, chroms))]
if opts.verbose:
logprint('Saving results')
for catfield in catfields:
if catfield in start_strengths.columns:
start_strengths[catfield] = start_strengths[catfield].astype('category') # saves disk space and read/write time
if catfield in orf_strengths.columns:
orf_strengths[catfield] = orf_strengths[catfield].astype('category') # saves disk space and read/write time
with pd.HDFStore(regressfilename, mode='w') as outstore:
outstore.put('orf_strengths', orf_strengths, format='t', data_columns=True)
outstore.put('start_strengths', start_strengths, format='t', data_columns=True)
else:
(orf_strengths, start_strengths, stop_strengths) = \
[pd.concat(res_dfs).reset_index() for res_dfs in zip(*workers.map(_regress_chrom, chroms))]
if opts.verbose:
logprint('Saving results')
for catfield in catfields:
if catfield in start_strengths.columns:
start_strengths[catfield] = start_strengths[catfield].astype('category') # saves disk space and read/write time
if catfield in orf_strengths.columns:
orf_strengths[catfield] = orf_strengths[catfield].astype('category') # saves disk space and read/write time
if catfield in stop_strengths.columns:
stop_strengths[catfield] = stop_strengths[catfield].astype('category') # saves disk space and read/write time
with pd.HDFStore(regressfilename, mode='w') as outstore:
outstore.put('orf_strengths', orf_strengths, format='t', data_columns=True)
outstore.put('start_strengths', start_strengths, format='t', data_columns=True)
outstore.put('stop_strengths', stop_strengths, format='t', data_columns=True)
workers.close()
if opts.verbose:
logprint('Tasks complete')
# <FILESEP>
import os, sys, argparse, time, random
from functools import partial
sys.path.append('./')
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from models.cifar10.resnet_DuBIN import ResNet18_DuBIN
from models.cifar10.wideresnet_DuBIN import WRN40_DuBIN
from models.cifar10.resnext_DuBIN import ResNeXt29_DuBIN
from models.imagenet.resnet_DuBIN import ResNet18_DuBIN as INResNet18_DuBIN
from dataloaders.cifar10 import cifar_dataloaders, cifar_c_testloader, cifar10_1_testloader, cifar_random_affine_test_set
from dataloaders.tiny_imagenet import tiny_imagenet_dataloaders, tiny_imagenet_c_testloader
from dataloaders.imagenet import imagenet_dataloaders, imagenet_c_testloader
from utils.utils import *
parser = argparse.ArgumentParser(description='Trains a CIFAR Classifier')
parser.add_argument('--gpu', default='0')
parser.add_argument('--cpus', type=int, default=4)
# dataset:
parser.add_argument('--dataset', '--ds', default='cifar10', choices=['cifar10', 'cifar100', 'tin', 'IN'], help='which dataset to use')
parser.add_argument('--data_root_path', '--drp', default='/ssd1/haotao/datasets/', help='Where you save all your datasets.')
parser.add_argument('--model', '--md', default='WRN40', choices=['ResNet18_DuBIN', 'WRN40_DuBIN', 'ResNeXt29_DuBIN'], help='which model to use')
parser.add_argument('--widen_factor', '--widen', default=2, type=int, help='widen factor for WRN')
#
parser.add_argument('--test_batch_size', '--tb', type=int, default=1000)
parser.add_argument('--ckpt_path', default='')
parser.add_argument('--mode', default='clean', choices=['clean', 'c', 'v2', 'sta', 'all'], help='Which dataset to evaluate on')
parser.add_argument('--k', default=10, type=int, help='hyperparameter k in worst-of-k spatial attack')
parser.add_argument('--save_root_path', '--srp', default='/ssd1/haotao', help='where you save the outputs')
args = parser.parse_args()
print(args)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
CORRUPTIONS = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
# model:
if args.dataset == 'IN':
model_fn = INResNet18_DuBIN
else:
if args.model == 'ResNet18_DuBIN':
model_fn = ResNet18_DuBIN
if args.model == 'WRN40_DuBIN':
model_fn = partial(WRN40_DuBIN, widen_factor=args.widen_factor)