repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 06/exercise_code/networks/loss.py
|
<gh_stars>0
import numpy as np
class Loss(object):
def __init__(self):
self.grad_history = []
def forward(self, y_out, y_truth):
return NotImplementedError
def backward(self, y_out, y_truth, upstream_grad=1.):
return NotImplementedError
def __call__(self, y_out, y_truth):
loss = self.forward(y_out, y_truth)
grad = self.backward(y_out, y_truth)
return loss, grad
class L1(Loss):
def forward(self, y_out, y_truth, reduction="mean"):
"""
Performs the forward pass of the L1 loss function.
:param y_out: [N, ] array predicted value of your model.
:param y_truth: [N, ] array ground truth value of your training set.
:param reduction:
:return: [N, ] array of L1 loss for each sample of your training set.
"""
result = np.abs(y_out - y_truth)
if reduction == "mean":
result = result.mean()
elif reduction == "sum":
result = result.sum()
elif reduction == "none":
pass
else:
raise NotImplementedError
return result
def backward(self, y_out, y_truth):
"""
Performs the backward pass of the L1 loss function.
:param y_out: [N, ] array predicted value of your model.
:param y_truth: [N, ] array ground truth value of your training set.
:return: [N, ] array of L1 loss gradients w.r.t y_out for
each sample of your training set.
"""
gradient = y_out - y_truth
zero_loc = np.where(gradient == 0)
negative_loc = np.where(gradient < 0)
positive_loc = np.where(gradient > 0)
gradient[zero_loc] = 0
gradient[positive_loc] = 1
gradient[negative_loc] = -1
return gradient
class MSE(Loss):
def forward(self, y_out, y_truth, reduction="mean"):
"""
Performs the forward pass of the MSE loss function.
:param y_out: [N, ] array predicted value of your model.
:param y_truth: [N, ] array ground truth value of your training set.
:param reduction:
:return: [N, ] array of MSE loss for each sample of your training set.
"""
result = (y_out - y_truth) ** 2
if reduction == "mean":
result = result.mean()
elif reduction == "sum":
result = result.sum()
elif reduction == "none":
pass
else:
raise NotImplementedError
return result
def backward(self, y_out, y_truth):
"""
Performs the backward pass of the MSE loss function.
:param y_out: [N, ] array predicted value of your model.
:param y_truth: [N, ] array ground truth value of your training set.
:return: [N, ] array of MSE loss gradients w.r.t y_out for
each sample of your training set.
"""
gradient = 2 * (y_out - y_truth)
return gradient
class BCE(Loss):
def forward(self, y_out, y_truth, reduction="mean"):
"""
Performs the forward pass of the binary cross entropy loss function.
:param y_out: [N, ] array predicted value of your model.
:param y_truth: [N, ] array ground truth value of your training set.
:param reduction:
:return: [N, ] array of binary cross entropy loss for each sample of your training set.
"""
result = -y_truth * np.log(y_out) - (1 - y_truth) * np.log(1 - y_out)
if reduction == "mean":
result = result.mean()
elif reduction == "sum":
result = result.sum()
elif reduction == "none":
pass
else:
raise NotImplementedError
return result
def backward(self, y_out, y_truth):
"""
Performs the backward pass of the loss function.
:param y_out: [N, ] array predicted value of your model.
:param y_truth: [N, ] array ground truth value of your training set.
:return: [N, ] array of binary cross entropy loss gradients w.r.t y_out for
each sample of your training set.
"""
gradient = -(y_truth / y_out) + (1 - y_truth) / (1 - y_out)
return gradient
class CrossEntropyFromLogits(Loss):
def __init__(self):
self.cache = {}
def forward(self, y_out, y_truth, reduction="mean"):
"""
Performs the forward pass of the cross entropy loss function.
:param y_out: [N, C] array with the predicted logits of the model
(i.e. the value before applying any activation)
:param y_truth: [N, ] array with ground truth labels.
:param reduction:
:return: float, the cross-entropy loss value
"""
# Transform the ground truth labels into one hot encodings.
N = y_out.shape[0]
y_truth_one_hot = np.zeros_like(y_out)
y_truth_one_hot[np.arange(N), y_truth] = 1
# Transform the logits into a distribution using softmax.
y_out_exp = np.exp(y_out - np.max(y_out, axis=1, keepdims=True))
y_out_probabilities = y_out_exp / np.sum(y_out_exp, axis=1, keepdims=True)
# Compute the loss for each element in the batch.
loss = -y_truth_one_hot * np.log(y_out_probabilities)
loss = loss.sum(axis=1).mean()
self.cache["probs"] = y_out_probabilities
return loss
def backward(self, y_out, y_truth):
N = y_out.shape[0]
gradient = self.cache["probs"]
gradient[np.arange(N), y_truth] -= 1
gradient /= N
return gradient
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 08/exercise_code/Util.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
import os
import torch
import pickle
from exercise_code.models import Encoder, Classifier
PARAM_LIMIT = 5e6
SIZE_LIMIT_MB = 20
ACC_THRESHOLD = 0.5
def checkParams(model):
n_params = sum(p.numel() for p in model.parameters())
if n_params > PARAM_LIMIT:
print(
"Your model has {:.3f} mio. params but must have less than 5 mio. params. Simplify your model before submitting it. You won't need that many params :)".format(
n_params / 1e6))
return False
print("FYI: Your model has {:.3f} mio. params.".format(n_params / 1e6))
return True
def checkLayers(model):
'''
Important Note: convolutional layers are not allowed in this exercise, as they have not been covered yet in the lecture.
Using these would be highly unfair towards student that haven't heard about them yet.
'''
forbidden_layers = [torch.nn.modules.conv.Conv2d]
for key, module in model.encoder._modules.items():
for i in range(len(module)):
if type(module[i]) == forbidden_layers:
print(
"Please don't use convolutions! For now, only use layers that have been already covered in the lecture!")
return False
return True
def checkSize(path="./models/classifier_pytorch.torch"):
size = os.path.getsize(path)
sizeMB = size / 1e6
if sizeMB > SIZE_LIMIT_MB:
print(
"Your model is too large! The size is {:.1f} MB, but it must be less than 20 MB. Please simplify your model before submitting.".format(
sizeMB))
return False
print("Great! Your model size is less than 20 MB and will be accepted :)")
return True
def printModelInfo(model):
accepted = checkParams(model) & checkLayers(model)
print("Model accepted!") if accepted else print(
"Model not accepted. Please follow the instructions.")
return accepted
def load_model(model_path):
model_dict = pickle.load(open(model_path, 'rb'))["classifier_pt1"]
encoder = Encoder(model_dict['encoder_hparam'], model_dict['encoder_inputsize'], model_dict['encoder_latent_dim'])
model = Classifier(model_dict['hparams'], encoder)
model.load_state_dict(model_dict["state_dict"])
return model
def save_model(model, file_name, directory="models"):
model = model.cpu()
model_dict = {"classifier_pt1": {
"state_dict": model.state_dict(),
"hparams": model.hparams,
'encoder_hparam': model.encoder.hparams,
'encoder_inputsize': model.encoder.input_size,
'encoder_latent_dim': model.encoder.latent_dim,
'encoder_state_dict': model.encoder.state_dict()
}}
if not os.path.exists(directory):
os.makedirs(directory)
pickle.dump(model_dict, open(os.path.join(directory, file_name), 'wb', 4))
def test_and_save(model):
_, val_acc = model.getAcc(model.val_dataloader())
print("Validation-Accuracy: {}%".format(val_acc * 100))
if val_acc < ACC_THRESHOLD:
print(
"That's too low! Please tune your model in order to reach at least {}% before running on the test set and submitting!".format(
ACC_THRESHOLD * 100))
return
if not (checkParams(model) & checkLayers(model)):
return
save_model(model, "classifier_pytorch.p")
if not checkSize("./models/classifier_pytorch.p"):
return
print("Your model has been saved and is ready to be submitted. NOW, let's check the test-accuracy.")
_, test_acc = model.getAcc()
print("Test-Accuracy: {}%".format(test_acc * 100))
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 10/exercise_code/networks/segmentation_nn.py
|
<gh_stars>0
"""SegmentationNN"""
import torch
import torch.nn as nn
from torchvision import models
class SegmentationNN(nn.Module):
def __init__(self, num_classes=23, hparams=None):
super().__init__()
self.hparams = hparams
self.num_classes = num_classes
#######################################################################
# YOUR CODE #
#######################################################################
# The encoder part
self.encoder = models.alexnet(pretrained=True).features
# The decoder part
self.decoder = nn.Sequential(
nn.Conv2d(256, 4096, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(4096),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Upsample(scale_factor=8, mode="bilinear"),
nn.Conv2d(4096, 256, kernel_size=1, padding=0, stride=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Upsample(scale_factor=5, mode="bilinear"),
nn.Conv2d(256, self.num_classes, kernel_size=3, padding=1, stride=1),
nn.BatchNorm2d(self.num_classes),
nn.ReLU(),
nn.Dropout(p=0.2),
nn.Conv2d(self.num_classes, self.num_classes, kernel_size=3, padding=1, stride=1),
)
self.initialize()
def initialize(self):
for m in self.decoder.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
#######################################################################
# END OF YOUR CODE #
#######################################################################
def forward(self, x):
"""
Forward pass of the convolutional neural network. Should not be called
manually but by calling a model instance directly.
Inputs:
- x: PyTorch input Variable
"""
#######################################################################
# YOUR CODE #
#######################################################################
x = self.encoder(x)
x = self.decoder(x)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return x
@property
def is_cuda(self):
"""
Check if model parameters are allocated on the GPU.
"""
return next(self.parameters()).is_cuda
def save(self, path):
"""
Save model with its parameters to the given path. Conventionally the
path should end with "*.model".
Inputs:
- path: path string
"""
print('Saving model... %s' % path)
torch.save(self, path)
class DummySegmentationModel(nn.Module):
def __init__(self, target_image):
super().__init__()
def _to_one_hot(y, num_classes):
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1)
target_image[target_image == -1] = 1
self.prediction = _to_one_hot(target_image, 23).permute(2, 0, 1).unsqueeze(0)
def forward(self, x):
return self.prediction.float()
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 10/exercise_code/util/__init__.py
|
"""Util functions"""
from .vis_utils import visualizer
from .save_model import save_model
from .Util import checkParams, checkSize, test
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 08/exercise_code/tests/eval_utils.py
|
<gh_stars>1-10
import pickle
import os
import numpy as np
def evaluate(x):
sum_exp = np.sum(x)
if sum_exp > 4.53:
print('Hurray, you passed!! Now save your model and submit it!')
return 75
else:
print('I think you can do better...')
return 0
def save_pickle(data_dict, file_name):
"""Save given data dict to pickle file file_name in models/"""
directory = 'models'
if not os.path.exists(directory):
os.makedirs(directory)
pickle.dump(data_dict, open(os.path.join(directory, file_name), 'wb', 4))
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/tests/__init__.py
|
<gh_stars>0
"""Unit tests and evaluation tools"""
from .keypoint_nn_tests import test_keypoint_nn
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/util/vis_utils.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
"""Utils for visualizations in notebooks"""
import matplotlib.pyplot as plt
def show_all_keypoints(image, keypoints, pred_kpts=None):
"""Show image with predicted keypoints"""
image = (image.clone() * 255).view(96, 96)
plt.imshow(image, cmap='gray')
keypoints = keypoints.clone() * 48 + 48
plt.scatter(keypoints[:, 0], keypoints[:, 1], s=200, marker='.', c='m')
if pred_kpts is not None:
pred_kpts = pred_kpts.clone() * 48 + 48
plt.scatter(pred_kpts[:, 0], pred_kpts[:, 1], s=200, marker='.', c='r')
plt.show()
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/tests/spatial_batchnorm_tests.py
|
import numpy as np
from .base_tests import UnitTest, CompositeTest
from .. import layers
from .gradient_check import (
eval_numerical_gradient_array,
eval_numerical_gradient,
rel_error,
)
class SpatialBatchnormForwardTest(UnitTest):
def __init__(self, shape, mean, scale, beta, gamma, mode, test_name):
np.random.seed(0)
self.x = scale * np.random.randn(*shape) + mean
self.beta = beta
self.gamma = gamma
self.bn_param = {'mode' : mode}
self.test_name = test_name
# Check the test-time forward pass by running the training-time
# forward pass many times to warm up the running averages, and then
# checking the means and variances of activations after a test-time
# forward pass.
if mode == 'test':
self.bn_param['mode'] = 'train'
for t in range(50):
x = scale * np.random.randn(*shape) + mean
layers.spatial_batchnorm_forward(x, gamma, beta, self.bn_param)
self.bn_param['mode'] = 'test'
def test(self):
out, _ = layers.spatial_batchnorm_forward(
self.x, self.gamma, self.beta, self.bn_param)
out_mean = out.mean(axis=(0, 2, 3))
out_std = out.std(axis=(0, 2, 3))
atol = 1e-5 if self.bn_param['mode'] == 'train' else 0.15
return np.all(np.isclose(self.beta, out_mean, atol=atol)) and \
np.all(np.isclose(self.gamma, out_std, atol=atol))
def define_failure_message(self):
return '%s failed.' % self.test_name
def define_success_message(self):
return '%s passed.' % self.test_name
class SpatialBatchnormBackwardTest(UnitTest):
def __init__(self, shape, mean, scale, beta, gamma, mode):
np.random.seed(0)
self.x = scale * np.random.randn(*shape) + mean
self.dout = np.random.randn(*shape)
self.beta = beta
self.gamma = gamma
self.bn_param = {'mode' : mode}
def test(self):
fx = lambda x: layers.spatial_batchnorm_forward(
x, self.gamma, self.beta, self.bn_param)[0]
fg = lambda a: layers.spatial_batchnorm_forward(
self.x, a, self.beta, self.bn_param)[0]
fb = lambda b: layers.spatial_batchnorm_forward(
self.x, self.gamma, b, self.bn_param)[0]
dx_num = eval_numerical_gradient_array(fx, self.x, self.dout)
da_num = eval_numerical_gradient_array(fg, self.gamma, self.dout)
db_num = eval_numerical_gradient_array(fb, self.beta, self.dout)
_, cache = layers.spatial_batchnorm_forward(
self.x, self.gamma, self.beta, self.bn_param)
dx, dgamma, dbeta = layers.spatial_batchnorm_backward(
self.dout, cache)
return np.isclose(rel_error(dx_num, dx), 0, atol=1e-6) and \
np.isclose(rel_error(da_num, dgamma), 0, atol=1e-6) and \
np.isclose(rel_error(db_num, dbeta), 0, atol=1e-6)
class SpatialBatchnormForwardTests(CompositeTest):
def define_tests(self):
return [
SpatialBatchnormForwardTest(shape=(2, 3, 4, 5),
mean=10,
scale=4,
beta=np.zeros(3),
gamma=np.ones(3),
mode='train',
test_name='SpatialBatchnormForwardTest with trivial beta and gamma (train)'),
SpatialBatchnormForwardTest(shape=(2, 3, 4, 5),
mean=10,
scale=4,
beta=np.array([6, 7, 8]),
gamma=np.array([3, 4, 5]),
mode='train',
test_name='SpatialBatchnormForwardTest with nontrivial beta and gamma (train)'),
SpatialBatchnormForwardTest(shape=(10, 4, 11, 12),
mean=13,
scale=2.3,
beta=np.zeros(4),
gamma=np.ones(4),
mode='test',
test_name='SpatialBatchnormForwardTest with trivial beta and gamma (test)')
]
def define_failure_message(self):
return "Some tests failed for your spatial batchnorm implementation."
def define_success_message(self):
return "All tests passed for your spatial batchnorm implementation."
def test_spatial_batchnorm_forward():
SpatialBatchnormForwardTests()()
def test_spatial_batchnorm_backward():
SpatialBatchnormBackwardTest(shape=(2, 3, 4, 5),
mean=12,
scale=5,
beta=np.random.randn(3),
gamma=np.random.randn(3),
mode='train')()
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 10/exercise_code/tests/eval_utils.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
import pickle
import os
def save_pickle(data_dict, file_name):
"""Save given data dict to pickle file file_name in models/"""
directory = 'models'
if not os.path.exists(directory):
os.makedirs(directory)
pickle.dump(data_dict, open(os.path.join(directory, file_name), 'wb', 4))
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/networks/SpatialBatchNormModel.py
|
<gh_stars>0
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torchvision
import torchvision.transforms as transforms
import numpy as np
class AbstractNetwork(pl.LightningModule):
def general_step(self, batch, batch_idx, mode):
images, targets = batch
# load X, y to device!
images, targets = images.to(self.device), targets.to(self.device)
# forward pass
out = self.forward(images)
# loss
loss = F.cross_entropy(out, targets)
predictions = out.argmax(axis=1)
n_correct = (targets == predictions).sum()
return loss, n_correct
def general_end(self, outputs, mode):
# average over all batches aggregated during one epoch
avg_loss = torch.stack([x[mode + "_loss"] for x in outputs]).mean()
total_correct = torch.stack([x[mode + "_n_correct"] for x in outputs]).sum().cpu().numpy()
acc = total_correct / len(self.dataset[mode])
return avg_loss, acc
def training_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "train")
tensorboard_logs = {"train/loss": loss}
return {
"loss": loss,
"train_n_correct": n_correct,
"log": tensorboard_logs}
def validation_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "val")
return {"val_loss": loss, "val_n_correct": n_correct}
def test_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "test")
return {"test_loss": loss, "test_n_correct": n_correct}
def validation_epoch_end(self, outputs):
avg_loss, acc = self.general_end(outputs, "val")
print("Val-Acc={}".format(acc))
tensorboard_logs = {"val/loss": avg_loss, "val/acc": acc}
return {"val_loss": avg_loss, "val_acc": acc, "log": tensorboard_logs}
def prepare_data(self):
# create dataset
fashion_mnist_train = torchvision.datasets.FashionMNIST(
root="../datasets", train=True, transform=transforms.ToTensor(), download=True)
fashion_mnist_test = torchvision.datasets.FashionMNIST(
root="../datasets", train=False, transform=transforms.ToTensor())
torch.manual_seed(0)
N = len(fashion_mnist_train)
fashion_mnist_train, fashion_mnist_val = torch.utils.data.random_split(
fashion_mnist_train, [int(N * 0.8), int(N * 0.2)])
torch.manual_seed(torch.initial_seed())
# assign to use in data loaders
self.dataset = {}
self.dataset["train"], self.dataset["val"], self.dataset["test"] = fashion_mnist_train, fashion_mnist_val, fashion_mnist_test
def train_dataloader(self):
return DataLoader(
self.dataset["train"],
shuffle=True,
batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.dataset["val"], batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.dataset["test"], batch_size=self.batch_size)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.model.parameters(), self.learning_rate)
return optimizer
def getTestAcc(self, loader=None):
if not loader:
loader = self.test_dataloader()
scores = []
labels = []
for batch in loader:
X, y = batch
X, y = X.to(self.device), y.to(self.device)
score = self.forward(X)
scores.append(score.detach().cpu().numpy())
labels.append(y.detach().cpu().numpy())
scores = np.concatenate(scores, axis=0)
labels = np.concatenate(labels, axis=0)
predictions = scores.argmax(axis=1)
acc = (labels == predictions).mean()
return predictions, acc
class SimpleNetwork(AbstractNetwork):
def __init__(
self,
batch_size,
learning_rate,
num_classes=10):
super().__init__()
# set hyper parameters
self.batch_size = batch_size
self.learning_rate = learning_rate
self.model = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
)
self.fc = nn.Linear(32 * 7 * 7, num_classes)
def forward(self, x):
# x.shape = [batch_size, 1, 28, 28]
# load to device!
x = x.to(self.device)
# feed x into model!
x = self.model(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
class SpatialBatchNormNetwork(AbstractNetwork):
def __init__(
self,
batch_size,
learning_rate,
num_classes=10):
super().__init__()
# set hyper parameters
self.batch_size = batch_size
self.learning_rate = learning_rate
self.model = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.MaxPool2d(2, 2),
)
self.fc = nn.Linear(32 * 7 * 7, num_classes)
def forward(self, x):
# x.shape = [batch_size, 1, 28, 28]
# load to device!
x = x.to(self.device)
# feed x into model!
x = self.model(x)
x = x.view(x.shape[0], -1)
x = self.fc(x)
return x
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 08/exercise_code/tests/grad_tests.py
|
<filename>Exercise 08/exercise_code/tests/grad_tests.py
import numpy as np
from .base_tests import UnitTest, CompositeTest, test_results_to_score
import math
from .gradient_check_utils import eval_numerical_gradient_array
epsilon = 1e-7
class TestForwardPass(UnitTest):
"""Test whether Sigmoid of 0 is correct"""
def __init__(self, x, gamma, beta, bn_params):
self.x = x
self.gamma = gamma
self.beta = beta
self.bn_params = bn_params
self.bn = layer
def test(self):
return self.value == 0.5
def define_failure_message(self):
return "Sigmoid of 0 is incorrect. Expected: 0.5 Evaluated: " + str(self.value)
class RunAllBatchNormTests(CompositeTest):
def define_tests(self, layer):
return [
BatchNormForward(layer),
]
def define_success_message(self):
return "Congratulations you have passed all the unit tests!!!"
def define_failure_message(self):
return "Test cases are still failing!"
class BatchNormTestWrapper:
def __init__(self, layer):
self.bn_tests = RunAllBatchNormTests(model)
def __call__(self, *args, **kwargs):
return "You secured a score of :" + str(test_results_to_score(self.sigmoid_tests()))
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/tests/keypoint_nn_tests.py
|
<gh_stars>0
"""Tests for facial keypoint detection models"""
import os
import torch
from exercise_code.tests.base_tests import UnitTest, CompositeTest
from exercise_code.util.save_model import save_model
class KeypointShapeTest(UnitTest):
"""Test whether model returns correct keypoint shape"""
def __init__(
self, model, img_shape=(2, 1, 96, 96), kpts_shape=(2, 30)):
self.model = model
self.img_shape = img_shape
self.kpts_shape = kpts_shape
self.pred_shape = None
def test(self):
images = torch.randn(*self.img_shape) # simulate batch of images
preds = self.model(images)
self.pred_shape = tuple(list(torch.squeeze(preds).size()))
return self.pred_shape == self.kpts_shape
def define_failure_message(self):
return "The output of your model do not have the correct shape." \
" Expected shape %s, but received %s." \
% (self.kpts_shape, self.pred_shape)
def define_exception_message(self, exception):
return "Inferencing your model failed. Input was an image batch of" \
" size %s. Please make sure your model inherits from either" \
" torch.nn.Module or pytorch_lightning.LightningModule, and" \
" implements a working forward() function." % self.img_shape
class ParamCountTest(UnitTest):
"""Test whether number of model params smaller than limit"""
def __init__(self, model, limit=5e6):
self.model = model
self.limit = limit
self.n_params = 0
def test(self):
self.n_params = sum(p.numel() for p in self.model.parameters())
return self.n_params < self.limit
def define_success_message(self):
n_params_mio = self.n_params / 1e6
return "ParamCountTest passed. Your model has {:.3f} mio. params." \
.format(n_params_mio)
def define_failure_message(self):
n_params_mio = self.n_params / 1e6
limit_mio = self.limit / 1e6
return "Your model has {:.3f} mio. params but must have less than" \
" {:.3f} mio. params. Simplify your model before submitting" \
" it. You won't need that many params :)" \
.format(n_params_mio, limit_mio)
class FileSizeTest(UnitTest):
"""Test whether file size of saved model smaller than limit"""
def __init__(self, model, limit=20):
self.model = model
self.limit = limit
self.size = 0
def test(self):
model_path = save_model(self.model, "model.p", ".tmp")
size = os.path.getsize(model_path)
self.size = size / 1e6
return self.size < self.limit
def define_success_message(self):
return "FileSizeTest passed. Your model is %.1f MB large" % self.size
def define_failure_message(self):
return "Your model is too large! The size is {:.1f} MB, but it must" \
" be less than {:.1f} MB. Please simplify your model before" \
" submitting.".format(self.size, self.limit)
def define_exception_message(self, exception):
return "Your model could not be saved. lease make sure your model" \
" inherits from either torch.nn.Module or" \
" pytorch_lightning.LightningModule."
class KeypointModelTest(CompositeTest):
"""Composite test for KeypointModel"""
def define_tests(self, model):
return [
KeypointShapeTest(model),
ParamCountTest(model),
FileSizeTest(model)
]
def define_failure_message(self):
return "Some tests failed for your model."
def define_success_message(self):
return "All tests passed for your model."
def test_keypoint_nn(model):
"""Wrapper for KeypointModelTest"""
KeypointModelTest(model)()
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 08/exercise_code/image_folder_dataset.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
"""
Definition of ImageFolderDataset dataset class
"""
# pylint: disable=too-few-public-methods
import os
import torch
from .base_dataset import Dataset
class ImageFolderDataset(Dataset):
"""CIFAR-10 dataset class"""
def __init__(self, *args,
root=None,
images=None,
labels=None,
transform=None,
download_url="http://i2dl.vc.in.tum.de/static/data/mnist.zip",
**kwargs):
super().__init__(*args,
download_url=download_url,
root=root,
**kwargs)
print(download_url)
self.images = torch.load(os.path.join(root, images))
if labels is not None:
self.labels = torch.load(os.path.join(root, labels))
else:
self.labels = None
# transform function that we will apply later for data preprocessing
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, index):
image = self.images[index]
if self.transform is not None:
image = self.transform(image)
if self.labels is not None:
return image, self.labels[index]
else:
return image
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 06/exercise_code/networks/__init__.py
|
"""Definition of all datasets"""
from .classification_net import MyOwnNetwork, ClassificationNet
from .loss import L1, MSE, BCE, CrossEntropyFromLogits
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 10/exercise_code/tests/__init__.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises<filename>Exercise 10/exercise_code/tests/__init__.py
"""Unit tests and evaluation tools"""
from .segmentation_nn_tests import test_seg_nn
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 07/exercise_code/lightning_models.py
|
<gh_stars>0
import matplotlib.pyplot as plt
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torch.nn as nn
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader, random_split
class TwoLayerNet(pl.LightningModule):
def __init__(self, hparams, input_size=1 * 28 * 28, hidden_size=512, num_classes=10):
super().__init__()
self.hparams = hparams
self.model = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.Sigmoid(),
nn.Linear(hidden_size, num_classes),
)
def forward(self, x):
# flatten the image before sending as input to the model
N, _, _, _ = x.shape
x = x.view(N, -1)
x = self.model(x)
return x
def training_step(self, batch, batch_idx):
images, targets = batch
# Perform a forward pass on the network with inputs
out = self.forward(images)
# calculate the loss with the network predictions and ground truth targets
loss = F.cross_entropy(out, targets)
# Find the predicted class from probabilites of the image belonging to each of the classes
# from the network output
_, preds = torch.max(out, 1)
# Calculate the accuracy of predictions
acc = preds.eq(targets).sum().float() / targets.size(0)
# Log the accuracy and loss values to the tensorboard
self.log('loss', loss)
self.log('acc', acc)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
images, targets = batch
# Perform a forward pass on the network with inputs
out = self.forward(images)
# calculate the loss with the network predictions and ground truth targets
loss = F.cross_entropy(out, targets)
# Find the predicted class from probabilites of the image belonging to each of the classes
# from the network output
_, preds = torch.max(out, 1)
# Calculate the accuracy of predictions
acc = preds.eq(targets).sum().float() / targets.size(0)
# Visualise the predictions of the model
if batch_idx == 0:
self.visualize_predictions(images.detach(), out.detach(), targets)
return {'val_loss': loss, 'val_acc': acc}
def validation_epoch_end(self, outputs):
# Average the loss over the entire validation data from it's mini-batches
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['val_acc'] for x in outputs]).mean()
# Log the validation accuracy and loss values to the tensorboard
self.log('val_loss', avg_loss)
self.log('val_acc', avg_acc)
def configure_optimizers(self):
optim = torch.optim.SGD(self.model.parameters(
), self.hparams["learning_rate"], momentum=0.9)
return optim
def visualize_predictions(self, images, preds, targets):
# Helper function to help us visualize the predictions of the
# validation data by the model
class_names = ['t-shirts', 'trouser', 'pullover', 'dress',
'coat', 'sandal', 'shirt', 'sneaker', 'bag', 'ankle boot']
# determine size of the grid based for the given batch size
num_rows = torch.tensor(len(images)).float().sqrt().floor()
fig = plt.figure(figsize=(10, 10))
for i in range(len(images)):
plt.subplot(num_rows, len(images) // num_rows + 1, i+1)
plt.imshow(images[i].cpu().numpy().squeeze(0))
plt.title(class_names[torch.argmax(preds, axis=-1)
[i]] + f'\n[{class_names[targets[i]]}]')
plt.axis('off')
self.logger.experiment.add_figure(
'predictions', fig, global_step=self.global_step)
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 08/exercise_code/models.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import numpy as np
class Encoder(nn.Module):
def __init__(self, hparams, input_size=28 * 28, latent_dim=24):
super().__init__()
# set hyper parameters
self.latent_dim = latent_dim
self.input_size = input_size
self.hparams = hparams
########################################################################
# TODO: Initialize your encoder! #
########################################################################
self.n_hidden = self.hparams["n_hidden"]
self.encoder = nn.Sequential(
nn.Linear(input_size, self.n_hidden),
nn.BatchNorm1d(self.n_hidden),
nn.LeakyReLU(),
nn.Dropout(p=0.5),
nn.Linear(self.n_hidden, self.n_hidden),
nn.BatchNorm1d(self.n_hidden),
nn.LeakyReLU(),
nn.Dropout(p=0.5),
nn.Linear(self.n_hidden, self.latent_dim)
)
for m in self.encoder.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
########################################################################
# END OF YOUR CODE #
########################################################################
def forward(self, x):
# feed x into encoder!
return self.encoder(x)
class Decoder(nn.Module):
def __init__(self, hparams, latent_dim=24, output_size=28 * 28):
super().__init__()
# set hyper parameters
self.hparams = hparams
self.output_size = output_size
self.latent_dim = latent_dim
########################################################################
# TODO: Initialize your decoder! #
########################################################################
self.n_hidden = self.hparams["n_hidden"]
self.decoder = nn.Sequential(
nn.Linear(self.latent_dim, self.n_hidden),
nn.BatchNorm1d(self.n_hidden),
nn.LeakyReLU(),
nn.Dropout(p=0.5),
nn.Linear(self.n_hidden, self.n_hidden),
nn.BatchNorm1d(self.n_hidden),
nn.LeakyReLU(),
nn.Dropout(p=0.5),
nn.Linear(self.n_hidden, self.output_size)
)
for m in self.decoder.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
########################################################################
# END OF YOUR CODE #
########################################################################
def forward(self, x):
# feed x into decoder!
return self.decoder(x)
class Autoencoder(pl.LightningModule):
def __init__(self, hparams, encoder, decoder, train_set, val_set, logger):
super().__init__()
self.hparams = hparams
# set hyper parameters
self.encoder = encoder
self.decoder = decoder
self.train_set = train_set
self.val_set = val_set
self.logger = logger
self.model = nn.Sequential(
self.encoder,
self.decoder
)
def forward(self, x):
########################################################################
# TODO: Feed the input image to your encoder to generate the latent #
# vector. Then decode the latent vector and get your reconstruction #
# of the input. #
########################################################################
reconstruction = self.model(x)
########################################################################
# END OF YOUR CODE #
########################################################################
return reconstruction
def general_step(self, batch, batch_idx, mode):
images = batch
flattened_images = images.view(images.shape[0], -1)
# forward pass
reconstruction = self.forward(flattened_images)
# loss
loss = F.mse_loss(reconstruction, flattened_images)
return loss, reconstruction
def general_end(self, outputs, mode):
# average over all batches aggregated during one epoch
avg_loss = torch.stack([x[mode + "_loss"] for x in outputs]).mean()
return avg_loss
def training_step(self, batch, batch_idx):
loss, _ = self.general_step(batch, batch_idx, "train")
tensorboard_logs = {"loss": loss}
return {"loss": loss, "log": tensorboard_logs}
def validation_step(self, batch, batch_idx):
images = batch
flattened_images = images.view(images.shape[0], -1)
reconstruction = self.forward(flattened_images)
loss = F.mse_loss(reconstruction, flattened_images)
reconstruction = reconstruction.view(reconstruction.shape[0], 28, 28).cpu().numpy()
images = np.zeros((len(reconstruction), 3, 28, 28))
for i in range(len(reconstruction)):
images[i, 0] = reconstruction[i]
images[i, 2] = reconstruction[i]
images[i, 1] = reconstruction[i]
self.logger.experiment.add_images("reconstructions", images, self.current_epoch, dataformats="NCHW")
return loss
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_set, shuffle=True, batch_size=self.hparams["batch_size"])
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_set, batch_size=self.hparams["batch_size"])
def configure_optimizers(self):
########################################################################
# TODO: Define your optimizer. #
########################################################################
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.hparams["learning_rate"])
########################################################################
# END OF YOUR CODE #
########################################################################
return optimizer
def getReconstructions(self, loader=None):
self.eval()
self = self.to(self.device)
if not loader:
loader = self.val_dataloader()
reconstructions = []
for batch in loader:
X = batch
X = X.to(self.device)
flattened_X = X.view(X.shape[0], -1)
reconstruction = self.forward(flattened_X)
reconstructions.append(
reconstruction.view(-1, 28, 28).cpu().detach().numpy())
return np.concatenate(reconstructions, axis=0)
class Classifier(pl.LightningModule):
def __init__(self, hparams, encoder, train_set=None, val_set=None, test_set=None):
super().__init__()
# set hyper parameters
self.hparams = hparams
self.encoder = encoder
self.model = nn.Identity()
self.data = {"train": train_set,
"val": val_set,
"test": test_set}
########################################################################
# TODO: Initialize your classifier! #
# Remember that it must have the same input size as the output size #
# of your encoder #
########################################################################
self.n_hidden = self.hparams["n_hidden"]
self.classifier = nn.Sequential(
nn.Linear(encoder.latent_dim, self.n_hidden),
nn.BatchNorm1d(self.n_hidden),
nn.LeakyReLU(),
nn.Dropout(p=0.5),
nn.Linear(self.n_hidden, self.n_hidden),
nn.BatchNorm1d(self.n_hidden),
nn.LeakyReLU(),
nn.Dropout(p=0.5),
nn.Linear(self.n_hidden, 10)
)
for m in self.classifier.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
self.model = nn.Sequential(
self.encoder,
self.classifier
)
########################################################################
# END OF YOUR CODE #
########################################################################
def forward(self, x):
x = self.model(x)
return x
def general_step(self, batch, batch_idx, mode):
images, targets = batch
flattened_images = images.view(images.shape[0], -1)
# forward pass
out = self.forward(flattened_images)
# loss
loss = F.cross_entropy(out, targets)
predictions = out.argmax(axis=1)
n_correct = (targets == predictions).sum()
return loss, n_correct
def general_end(self, outputs, mode):
# average over all batches aggregated during one epoch
avg_loss = torch.stack([x[mode + "_loss"] for x in outputs]).mean()
total_correct = torch.stack([x[mode + "_n_correct"] for x in outputs]).sum().cpu().numpy()
acc = total_correct / len(self.data[mode])
return avg_loss, acc
def training_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "train")
tensorboard_logs = {"loss": loss}
return {"loss": loss, "train_n_correct": n_correct, "log": tensorboard_logs}
def validation_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "val")
return {"val_loss": loss, "val_n_correct": n_correct}
def test_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "test")
return {"test_loss": loss, "test_n_correct": n_correct}
def validation_end(self, outputs):
avg_loss, acc = self.general_end(outputs, "val")
# print("Val-Acc={}".format(acc))
tensorboard_logs = {"val_loss": avg_loss, "val_acc": acc}
return {"val_loss": avg_loss, "val_acc": acc, "log": tensorboard_logs}
def train_dataloader(self):
return torch.utils.data.DataLoader(self.data["train"], shuffle=True, batch_size=self.hparams["batch_size"])
def val_dataloader(self):
return torch.utils.data.DataLoader(self.data["val"], batch_size=self.hparams["batch_size"])
def test_dataloader(self):
return torch.utils.data.DataLoader(self.data["test"], batch_size=self.hparams["batch_size"])
def configure_optimizers(self):
########################################################################
# TODO: Define your optimizer. #
########################################################################
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.hparams["learning_rate"])
########################################################################
# END OF YOUR CODE #
########################################################################
return optimizer
def getAcc(self, loader=None):
self.eval()
self = self.to(self.device)
if not loader:
loader = self.test_dataloader()
scores = []
labels = []
for batch in loader:
X, y = batch
X = X.to(self.device)
flattened_X = X.view(X.shape[0], -1)
score = self.forward(flattened_X)
scores.append(score.detach().cpu().numpy())
labels.append(y.detach().cpu().numpy())
scores = np.concatenate(scores, axis=0)
labels = np.concatenate(labels, axis=0)
predictions = scores.argmax(axis=1)
acc = (labels == predictions).mean()
return predictions, acc
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/networks/keypoint_nn.py
|
<gh_stars>0
"""Models for facial keypoint detection"""
import torch
import torch.nn as nn
from torch.nn import functional as F
import pytorch_lightning as pl
class ResidualBlock(nn.Module):
def __init__(self, in_channel_num, out_channel_num, use_11conv=False, stride=1):
super(ResidualBlock, self).__init__()
self.main_path = nn.Sequential(
nn.Conv2d(in_channel_num, out_channel_num, kernel_size=3, padding=1, stride=stride),
nn.BatchNorm2d(out_channel_num),
nn.ReLU(),
nn.Conv2d(out_channel_num, out_channel_num, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channel_num)
)
if use_11conv:
self.side_path = nn.Conv2d(in_channel_num, out_channel_num, kernel_size=1, stride=stride)
else:
self.side_path = None
def forward(self, x):
if self.side_path:
return F.relu(self.main_path(x) + self.side_path(x))
else:
return F.relu(self.main_path(x) + x)
class KeypointModel(nn.Module):
"""Facial keypoint detection model"""
def __init__(self, hparams):
"""
Initialize your model from a given dict containing all your hyper parameters
Warning: Don't change the method declaration (i.e. by adding more
arguments), otherwise it might not work on the submission server
"""
super(KeypointModel, self).__init__()
self.hparams = hparams
########################################################################
# TODO: Define all the layers of your CNN, the only requirements are: #
# 1. The network takes in a batch of images of shape (Nx1x96x96) #
# 2. It ends with a linear layer that represents the key points. #
# Thus, the output layer needs to have shape (Nx30), #
# with 2 values representing each of the 15 keypoint (x, y) pairs #
# #
# Some layers you might consider including: #
# max pooling layers, multiple conv layers, fully-connected layers, #
# and other layers (such as dropout or batch normalization) to avoid #
# over fitting. #
########################################################################
self.block1 = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=7, padding=3, stride=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, padding=1, stride=2)
)
self.block2 = nn.Sequential(*self.resnet_block(64, 64, 2, first_block=True))
self.block3 = nn.Sequential(*self.resnet_block(64, 128, 2))
self.block4 = nn.Sequential(*self.resnet_block(128, 224, 2))
self.net = nn.Sequential(
self.block1,
self.block2,
self.block3,
self.block4,
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(224, 30)
)
self.initialize()
########################################################################
# END OF YOUR CODE #
########################################################################
def resnet_block(self, in_channel_num, out_channel_num, residual_block_num, first_block=False):
blocks = []
for i in range(0, residual_block_num):
if i == 0 and not first_block:
blocks.append(ResidualBlock(in_channel_num, out_channel_num, use_11conv=True, stride=2))
else:
blocks.append(ResidualBlock(out_channel_num, out_channel_num))
return blocks
def initialize(self):
for m in self.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
elif isinstance(m, nn.Conv2d):
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# check dimensions to use show_keypoint_predictions later
if x.dim() == 3:
x = torch.unsqueeze(x, 0)
########################################################################
# TODO: Define the forward pass behavior of your model #
# for an input image x, forward(x) should return the #
# corresponding predicted key points #
########################################################################
x = self.net(x)
########################################################################
# END OF YOUR CODE #
########################################################################
return x
class DummyKeypointModel(pl.LightningModule):
"""Dummy model always predicting the key points of the first train sample"""
def __init__(self):
super().__init__()
self.prediction = torch.tensor([[
0.4685, -0.2319,
-0.4253, -0.1953,
0.2908, -0.2214,
0.5992, -0.2214,
-0.2685, -0.2109,
-0.5873, -0.1900,
0.1967, -0.3827,
0.7656, -0.4295,
-0.2035, -0.3758,
-0.7389, -0.3573,
0.0086, 0.2333,
0.4163, 0.6620,
-0.3521, 0.6985,
0.0138, 0.6045,
0.0190, 0.9076,
]])
def forward(self, x):
return self.prediction.repeat(x.size()[0], 1, 1, 1)
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/layers.py
|
import numpy as np
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from mini-batch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the mean
and variance of each feature, and these averages are used to normalize data
at test-time.
At each time step we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7 implementation
of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift parameter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param["mode"]
eps = bn_param.get("eps", 1e-5)
momentum = bn_param.get("momentum", 0.9)
N, D = x.shape
running_mean = bn_param.get("running_mean", np.zeros(D, dtype=x.dtype))
running_var = bn_param.get("running_var", np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == "train":
sample_mean = np.mean(x, axis=0)
x_minus_mean = x - sample_mean
sq = x_minus_mean ** 2
var = 1.0 / N * np.sum(sq, axis=0)
std = np.sqrt(var + eps)
ivar = 1.0 / std
x_norm = x_minus_mean * ivar
gamma_x = gamma * x_norm
out = gamma_x + beta
running_var = momentum * running_var + (1 - momentum) * var
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
cache = (out, x_norm, beta, gamma, x_minus_mean, ivar, std, var, eps)
elif mode == "test":
x = (x - running_mean) / np.sqrt(running_var)
out = x * gamma + beta
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param["running_mean"] = running_mean
bn_param["running_var"] = running_var
return out, cache
def batchnorm_backward(d_out, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- d_out: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- d_gamma: Gradient with respect to scale parameter gamma, of shape (D,)
- d_beta: Gradient with respect to shift parameter beta, of shape (D,)
"""
N, D = d_out.shape
out, x_norm, beta, gamma, xmu, ivar, std, var, eps = cache
dx_norm = d_out * gamma
d_ivar = np.sum(dx_norm * xmu, axis=0)
dx_mu1 = dx_norm * ivar
d_std = -1.0 / (std ** 2) * d_ivar
d_var = 0.5 * 1. / np.sqrt(var + eps) * d_std
dsq = 1.0 / N * np.ones((N, D)) * d_var
dx_mu2 = 2 * xmu * dsq
dx1 = dx_mu1 + dx_mu2
d_mean = -1.0 * np.sum(dx1, axis=0)
dx2 = 1. / N * np.ones((N, D)) * d_mean
dx = dx1 + dx2
d_beta = np.sum(d_out, axis=0)
d_gamma = np.sum(d_out * x_norm, axis=0)
return dx, d_gamma, d_beta
def batchnorm_backward_alt(d_out, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalization backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
out, x_norm, beta, gamma, xmu, ivar, std, var, eps = cache
N, D = d_out.shape
d_gamma = np.diat(np.dot(d_out.T, x_norm))
d_beta = np.sum(d_out, axis=0)
d_out_dx = gamma * (1 - 1 / N) / ivar * (1 + 1 / N * ((out - beta) / gamma) ** 2)
dx = d_out * d_out_dx
return dx, d_gamma, d_beta
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the #
# vanilla version of batch normalization defined above. Your #
# implementation should be very short; ours is less than six lines. #
########################################################################
x_swapped = np.transpose(x, (0, 2, 3, 1))
x_swapped_reshaped = np.reshape(x_swapped, (-1, x_swapped.shape[-1]))
out_temp, cache = batchnorm_forward(x_swapped_reshaped, gamma, beta, bn_param)
out = np.transpose(np.reshape(out_temp, x_swapped.shape), (0, 3, 1, 2))
########################################################################
# END OF YOUR CODE #
########################################################################
return out, cache
def spatial_batchnorm_backward(d_out, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- d_out: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- d_gamma: Gradient with respect to scale parameter, of shape (C,)
- d_beta: Gradient with respect to shift parameter, of shape (C,)
"""
########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the #
# vanilla version of batch normalization defined above. Your #
# implementation should be very short; ours is less than six lines. #
########################################################################
d_out_swapped = np.transpose(d_out, (0, 2, 3, 1))
d_out_swapped_reshaped = np.reshape(d_out_swapped, (-1, d_out_swapped.shape[-1]))
dx_sr, d_gamma, d_beta = batchnorm_backward(d_out_swapped_reshaped, cache)
dx = np.transpose(np.reshape(dx_sr, d_out_swapped.shape), (0, 3, 1, 2))
########################################################################
# END OF YOUR CODE #
########################################################################
return dx, d_gamma, d_beta
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 08/exercise_code/tests/__init__.py
|
"""Define tests, sanity checks, and evaluation"""
from .grad_tests import *
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/networks/__init__.py
|
"""Neural network definitions"""
from .keypoint_nn import (
KeypointModel,
DummyKeypointModel
)
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 07/exercise_code/MyPytorchModel.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision
import torchvision.transforms as transforms
import numpy as np
class MyPytorchModel(pl.LightningModule):
def __init__(self, hparams, input_size=3 * 32 * 32, num_classes=10):
super().__init__()
# set hyper-parameters
self.hparams = hparams
########################################################################
# TODO: Initialize your model! #
########################################################################
n_hidden = self.hparams["n_hidden"]
self.lr_decay = self.hparams["lr_decay"]
self.model = nn.Sequential(
nn.Linear(input_size, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.LeakyReLU(),
nn.Linear(n_hidden, n_hidden),
nn.BatchNorm1d(n_hidden),
nn.LeakyReLU(),
nn.Linear(n_hidden, num_classes)
)
for m in self.model.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
########################################################################
# END OF YOUR CODE #
########################################################################
def forward(self, x):
# x.shape = [batch_size, 3, 32, 32] -> flatten the image first
x = x.view(x.shape[0], -1)
# feed x into model!
x = self.model(x)
return x
def general_step(self, batch, batch_idx, mode):
images, targets = batch
# forward pass
out = self.forward(images)
# loss
loss = F.cross_entropy(out, targets)
predictions = out.argmax(axis=1)
n_correct = (targets == predictions).sum()
return loss, n_correct
def general_end(self, outputs, mode):
# average over all batches aggregated during one epoch
avg_loss = torch.stack([x[mode + "_loss"] for x in outputs]).mean()
total_correct = torch.stack([x[mode + "_n_correct"] for x in outputs]).sum().cpu().numpy()
acc = total_correct / len(self.sampler[mode])
return avg_loss, acc
def training_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "train")
# tensorboard_logs = {'loss': loss}
self.log("loss", loss)
return {"loss": loss, "train_n_correct": n_correct} # , 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "val")
self.log("val_loss", loss)
return {"val_loss": loss, "val_n_correct": n_correct}
def test_step(self, batch, batch_idx):
loss, n_correct = self.general_step(batch, batch_idx, "test")
return {"test_loss": loss, "test_n_correct": n_correct}
def validation_end(self, outputs):
avg_loss, acc = self.general_end(outputs, "val")
# print("Val-Acc={}".format(acc))
# tensorboard_logs = {'val_loss': avg_loss}
self.log("val_loss", avg_loss)
self.log("val_acc", acc)
return {"val_loss": avg_loss, "val_acc": acc} # , 'log': tensorboard_logs}
def prepare_data(self):
# create dataset
CIFAR_ROOT = "../datasets/cifar10"
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
########################################################################
# TODO: Define your transforms (convert to tensors, normalize). #
# If you want, you can also perform data augmentation! #
########################################################################
my_transform = transforms.Compose([transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
transforms.Normalize(mean, std)])
########################################################################
# END OF YOUR CODE #
########################################################################
train_val_transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])
cifar_complete_augmented = torchvision.datasets.ImageFolder(root=CIFAR_ROOT, transform=my_transform)
cifar_complete_train_val = torchvision.datasets.ImageFolder(root=CIFAR_ROOT, transform=train_val_transform)
N = len(cifar_complete_augmented)
num_train, num_val = int(N * 0.9), int(N * 0.05)
np.random.seed(0)
indices = np.random.permutation(N)
train_idx = indices[:num_train]
val_idx = indices[num_train:num_train + num_val]
test_idx = indices[num_train + num_val:]
train_sampler = SubsetRandomSampler(train_idx)
val_sampler = SubsetRandomSampler(val_idx)
test_sampler = SubsetRandomSampler(test_idx)
self.sampler = {"train": train_sampler, "val": val_sampler, "test": test_sampler}
# assign to use in data loaders
self.dataset = {}
self.dataset["train"] = cifar_complete_augmented
self.dataset["val"] = cifar_complete_train_val
self.dataset["test"] = cifar_complete_train_val
def train_dataloader(self):
return DataLoader(self.dataset["train"], batch_size=self.hparams["batch_size"], sampler=self.sampler["train"])
def val_dataloader(self):
return DataLoader(self.dataset["val"], batch_size=self.hparams["batch_size"], sampler=self.sampler["val"])
def test_dataloader(self):
return DataLoader(self.dataset["test"], batch_size=self.hparams["batch_size"], sampler=self.sampler["test"])
def configure_optimizers(self):
########################################################################
# TODO: Define your optimizer. #
########################################################################
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.hparams["learning_rate"])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=self.lr_decay, patience=1)
optim = {"optimizer": optimizer, "scheduler": scheduler, "monitor": "val_loss"}
########################################################################
# END OF YOUR CODE #
########################################################################
return optim
def getTestAcc(self, loader=None):
self.model.eval()
self.model = self.model.to(self.device)
if not loader:
loader = self.test_dataloader()
scores = []
labels = []
for batch in loader:
X, y = batch
X = X.to(self.device)
score = self.forward(X)
scores.append(score.detach().cpu().numpy())
labels.append(y.detach().cpu().numpy())
scores = np.concatenate(scores, axis=0)
labels = np.concatenate(labels, axis=0)
predictions = scores.argmax(axis=1)
acc = (labels == predictions).mean()
return predictions, acc
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 07/exercise_code/data/base_dataset.py
|
<filename>Exercise 07/exercise_code/data/base_dataset.py
"""Dataset Base Class"""
from abc import ABC, abstractmethod
from .download_utils import download_dataset
class Dataset(ABC):
"""
Abstract Dataset Base Class
All subclasses must define __getitem__() and __len__()
"""
def __init__(self, root, download_url=None, force_download=False):
self.root_path = root
# The actual archive name should be all the text of the url after the
# last '/'.
if download_url is not None:
dataset_zip_name = download_url[download_url.rfind('/')+1:]
self.dataset_zip_name = dataset_zip_name
download_dataset(
url=download_url,
data_dir=root,
dataset_zip_name=dataset_zip_name,
force_download=force_download,
)
@abstractmethod
def __getitem__(self, index):
"""Return data sample at given index"""
@abstractmethod
def __len__(self):
"""Return size of the dataset"""
class DummyDataset(Dataset):
"""
Simple dummy dataset
Contains all integers from 1 to a given limit, which are dividable by a given divisor
"""
def __init__(self, divisor, limit, **kwargs):
"""
:param divisor: common divisor of all integers in the dataset
:param limit: upper limit of integers in the dataset
"""
super().__init__(**kwargs)
self.data = [i for i in range(1, limit + 1) if i % divisor == 0]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return {"data": self.data[index]}
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/data/facial_keypoints_dataset.py
|
"""Dataset for facial keypoint detection"""
import os
import pandas as pd
import numpy as np
import torch
from .base_dataset import BaseDataset
class FacialKeypointsDataset(BaseDataset):
"""Dataset for facial keypoint detection"""
def __init__(self, *args, train=True, transform=None, **kwargs):
super().__init__(*args, **kwargs)
file_name = "training.csv" if train else "val.csv"
csv_file = os.path.join(self.root_path, file_name)
self.key_pts_frame = pd.read_csv(csv_file)
self.key_pts_frame.dropna(inplace=True)
self.key_pts_frame.reset_index(drop=True, inplace=True)
self.transform = transform
@staticmethod
def _get_image(idx, key_pts_frame):
img_str = key_pts_frame.loc[idx]['Image']
img = np.array([
int(item) for item in img_str.split()
]).reshape((96, 96))
return np.expand_dims(img, axis=2).astype(np.uint8)
@staticmethod
def _get_keypoints(idx, key_pts_frame, shape=(15, 2)):
keypoint_cols = list(key_pts_frame.columns)[:-1]
key_pts = key_pts_frame.iloc[idx][keypoint_cols].values.reshape(shape)
key_pts = (key_pts.astype(np.float) - 48.0) / 48.0
return torch.from_numpy(key_pts).float()
def __len__(self):
return self.key_pts_frame.shape[0]
def __getitem__(self, idx):
image = self._get_image(idx, self.key_pts_frame)
keypoints = self._get_keypoints(idx, self.key_pts_frame)
if self.transform:
image = self.transform(image)
return {'image': image, 'keypoints': keypoints}
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 10/exercise_code/util/vis_utils.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
"""Utils for visualizations in notebooks"""
import matplotlib.pyplot as plt
import torch
from math import sqrt, ceil
import numpy as np
from exercise_code.data.segmentation_dataset import label_img_to_rgb
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def visualizer(model, test_data=None):
num_example_imgs = 4
plt.figure(figsize=(15, 5 * num_example_imgs))
for i, (img, target) in enumerate(test_data[:num_example_imgs]):
inputs = img.unsqueeze(0)
inputs = inputs.to(device)
outputs = model.forward(inputs)
_, preds = torch.max(outputs, 1)
pred = preds[0].data.cpu()
img, target, pred = img.numpy(), target.numpy(), pred.numpy()
# img
plt.subplot(num_example_imgs, 3, i * 3 + 1)
plt.axis('off')
plt.imshow(img.transpose(1, 2, 0))
if i == 0:
plt.title("Input image")
# target
plt.subplot(num_example_imgs, 3, i * 3 + 2)
plt.axis('off')
plt.imshow(label_img_to_rgb(target))
if i == 0:
plt.title("Target image")
# pred
plt.subplot(num_example_imgs, 3, i * 3 + 3)
plt.axis('off')
plt.imshow(label_img_to_rgb(pred))
if i == 0:
plt.title("Prediction image")
plt.show()
def visualize_grid(Xs, ubound=255.0, padding=1):
"""
Reshape a 4D tensor of image data to a grid for easy visualization.
Inputs:
- Xs: Data of shape (N, H, W, C)
- ubound: Output grid will have values scaled to the range [0, ubound]
- padding: The number of blank pixels between elements of the grid
"""
(N, H, W, C) = Xs.shape
grid_size = int(ceil(sqrt(N)))
grid_height = H * grid_size + padding * (grid_size - 1)
grid_width = W * grid_size + padding * (grid_size - 1)
grid = np.zeros((grid_height, grid_width, C))
next_idx = 0
y0, y1 = 0, H
for y in range(grid_size):
x0, x1 = 0, W
for x in range(grid_size):
if next_idx < N:
img = Xs[next_idx]
low, high = np.min(img), np.max(img)
grid[y0:y1, x0:x1] = ubound * (img - low) / (high - low)
next_idx += 1
x0 += W + padding
x1 += W + padding
y0 += H + padding
y1 += H + padding
return grid
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 10/exercise_code/tests/segmentation_nn_tests.py
|
"""Tests for facial keypoint detection models"""
import os
import torch
from exercise_code.tests.base_tests import UnitTest, CompositeTest
from exercise_code.util.save_model import save_model
class ParamCountTest(UnitTest):
"""Test whether number of model params smaller than limit"""
def __init__(self, model, limit=5e6):
self.model = model
self.limit = limit
self.n_params = 0
def test(self):
self.n_params = sum(p.numel() for p in self.model.parameters())
return self.n_params < self.limit
def define_success_message(self):
n_params_mio = self.n_params / 1e6
return "ParamCountTest passed. Your model has {:.3f} mio. params." \
.format(n_params_mio)
def define_failure_message(self):
n_params_mio = self.n_params / 1e6
limit_mio = self.limit / 1e6
return "Your model has {:.3f} mio. params but must have less than" \
" {:.3f} mio. params. Simplify your model before submitting" \
" it. You won't need that many params :)" \
.format(n_params_mio, limit_mio)
class FileSizeTest(UnitTest):
"""Test whether file size of saved model smaller than limit"""
def __init__(self, model, limit=20):
self.model = model
self.limit = limit
self.size = 0
def test(self):
model_path = save_model(self.model, "model.model", ".tmp")
size = os.path.getsize(model_path)
self.size = size / 1e6
return self.size < self.limit
def define_success_message(self):
return "FileSizeTest passed. Your model is %.1f MB large" % self.size
def define_failure_message(self):
return "Your model is too large! The size is {:.1f} MB, but it must" \
" be less than {:.1f} MB. Please simplify your model before" \
" submitting.".format(self.size, self.limit)
def define_exception_message(self, exception):
return "Your model could not be saved. Please make sure your model" \
" inherits from either torch.nn.Module or" \
" pytorch_lightning.LightningModule."
class SegModelTest(CompositeTest):
"""Composite test for SegModel"""
def define_tests(self, model):
return [
ParamCountTest(model),
FileSizeTest(model, 50)
]
def define_failure_message(self):
return "Some tests failed for your model."
def define_success_message(self):
return "All tests passed for your model."
def test_seg_nn(model):
"""Wrapper for SegModelTest"""
SegModelTest(model)()
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 10/exercise_code/util/save_model.py
|
<filename>Exercise 10/exercise_code/util/save_model.py
"""Utils for model saving"""
import os
import pickle
import torch
def save_model(model, file_name, directory="models"):
"""Save model as pickle"""
model = model.cpu()
if not os.path.exists(directory):
os.makedirs(directory)
model_path = os.path.join(directory, file_name)
torch.save(model, model_path)
return model_path
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 07/exercise_code/data_class.py
|
import pytorch_lightning as pl
import torch
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader, random_split
class FashionMNISTDataModule(pl.LightningDataModule):
def __init__(self, batch_size=4):
super().__init__()
self.batch_size = batch_size
def prepare_data(self):
# Define the transform
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Download the Fashion-MNIST dataset
fashion_mnist_train_val = torchvision.datasets.FashionMNIST(root='../datasets', train=True,
download=True, transform=transform)
self.fashion_mnist_test = torchvision.datasets.FashionMNIST(root='../datasets', train=False,
download=True, transform=transform)
# Apply the Transforms
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
# Perform the training and validation split
torch.manual_seed(0)
self.train_dataset, self.val_dataset = random_split(
fashion_mnist_train_val, [50000, 10000])
torch.manual_seed(torch.initial_seed())
#Define the data loaders that can be called from the trainers
def train_dataloader(self):
return DataLoader(self.train_dataset, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.val_dataset, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.fashion_mnist_test, batch_size=self.batch_size)
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 07/exercise_code/data/csv_dataset.py
|
<gh_stars>0
from .base_dataset import Dataset
import numpy as np
import pandas as pd
import os.path
class CSVDataset(Dataset):
"""
CSVDataset class.
Provide access to the Boston Housing Prices dataset.
"""
def __init__(self, target_column, transform=None, mode="train", input_data=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# The name of the .csv dataset file should be the same as the name
# of the archive, but with a different extension.
if input_data is not None:
self.df = input_data
else:
name_prefix = self.dataset_zip_name[:self.dataset_zip_name.find('.')]
dataset_csv_name = name_prefix + '.csv'
data_path = os.path.join(self.root_path, dataset_csv_name)
self.df = pd.read_csv(data_path)
self.target_column = target_column
# split the dataset into train - val - test with the ratio 60 - 20 - 20
assert mode in ["train", "val", "test"], "wrong mode for dataset given"
train, val, test = np.split(self.df.sample(frac=1, random_state=0), [
int(.6 * len(self.df)), int(.8 * len(self.df))])
if mode == "train":
self.df = train
elif mode == "val":
self.df = val
elif mode == "test":
self.df = test
self.data = self.df.loc[:, self.df.columns != self.target_column]
self.targets = self.df[self.target_column]
self.transforms = transform if transform is not None else lambda x: x
self.data.iloc[0]['OverallQual'] = np.nan
def __len__(self):
return len(self.data)
def __getitem__(self, index):
"""
Create a dict of the data at the given index in your dataset.
The dict should have the following format:
{ "features" : <i-th row of the dataframe (except TARGET_COLUMN)>,
"label" : <value of TARGET_COLUMN for i-th row> }
"""
data_dict = {}
data_dict['features'] = self.data.iloc[index]
data_dict['target'] = self.targets.iloc[index]
return self.transforms(data_dict)
class FeatureSelectorAndNormalizationTransform:
"""
Select some numerical features and normalize them between 0 and 1.
"""
def __init__(self, column_stats, target_column):
"""
:param column_stats: a dictionary mapping the column name to the
relevant statistics for normalization (min and max on that column).
It should also include the statistics for the target column.
"""
self.column_stats = column_stats
self.target_column = target_column
def __call__(self, data_dict):
def normalize_column(old_value, column_name):
mn = self.column_stats[column_name]['min']
mx = self.column_stats[column_name]['max']
return (old_value - mn) / (mx - mn)
# For every feature column, normalize it if it's one of the columns
# we want to keep.
feature_columns = []
for column_idx in data_dict['features'].index:
if column_idx in self.column_stats and column_idx != self.target_column:
feature_columns.append(column_idx)
if np.isnan(data_dict['features'][column_idx]):
mean_col_val = self.column_stats[column_idx]['mean']
data_dict['features'][column_idx] = mean_col_val
old_value = data_dict['features'][column_idx]
normalized = normalize_column(old_value, column_idx)
data_dict['features'][column_idx] = normalized
# Drop the rest of the columns.
data_dict['features'] = data_dict['features'][feature_columns]
data_dict['features'] = data_dict['features'].values.astype(np.float32)
# Also normalize the target.
old_value = data_dict['target']
normalized = normalize_column(old_value, self.target_column)
data_dict['target'] = np.array([normalized])
return data_dict
class FeatureSelectorTransform:
"""
Select some numerical features and not normalize them, just return their old values.
This class is used for the binarized data to convert it to the correct format of CSVDataset object
so that it could be loaded by our dataloader
"""
def __init__(self, column_stats, target_column):
"""
:param column_stats: a dictionary mapping the column name to the
relevant statistics for normalization (min and max on that column).
It should also include the statistics for the target column.
"""
self.column_stats = column_stats
self.target_column = target_column
def __call__(self, data_dict):
# For every feature column, just keep it old values
feature_columns = []
for column_idx in data_dict['features'].index:
if column_idx in self.column_stats and column_idx != self.target_column:
feature_columns.append(column_idx)
if np.isnan(data_dict['features'][column_idx]):
mean_col_val = self.column_stats[column_idx]['mean']
data_dict['features'][column_idx] = mean_col_val
data_dict['features'] = data_dict['features'][feature_columns]
data_dict['features'] = data_dict['features'].values.astype(np.float32)
data_dict['target'] = np.array([data_dict['target']])
return data_dict
def get_exercise5_transform():
# dataloading and preprocessing steps as in ex04 2_logistic_regression.ipynb
target_column = 'SalePrice'
i2dl_exercises_path = os.path.dirname(os.path.abspath(os.getcwd()))
root_path = os.path.join(i2dl_exercises_path, "datasets", 'housing')
housing_file_path = os.path.join(root_path, "housing_train.csv")
download_url = 'https://cdn3.vision.in.tum.de/~dl4cv/housing_train.zip'
# Always make sure this line was run at least once before trying to
# access the data manually, as the data is downloaded in the
# constructor of CSVDataset.
train_dataset = CSVDataset(target_column=target_column, root=root_path, download_url=download_url, mode="train")
# For the data transformations, compute min, max and mean for each feature column. We perform the same transformation
# on the training, validation, and test data.
df = train_dataset.df
# Select only 2 features to keep plus the target column.
selected_columns = ['OverallQual', 'GrLivArea', target_column]
# selected_columns = ['GrLivArea', target_column]
mn, mx, mean = df.min(), df.max(), df.mean()
column_stats = {}
for column in selected_columns:
crt_col_stats = {'min': mn[column],
'max': mx[column],
'mean': mean[column]}
column_stats[column] = crt_col_stats
transform = FeatureSelectorAndNormalizationTransform(column_stats, target_column)
return transform
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 06/exercise_code/tests/__init__.py
|
<filename>Exercise 06/exercise_code/tests/__init__.py
"""Define tests, sanity checks, and evaluation"""
from .eval_utils import save_pickle
|
CornellLenard/Deep-Learning-Course-Exercises
|
Exercise 09/exercise_code/util/__init__.py
|
<reponame>CornellLenard/Deep-Learning-Course-Exercises
"""Util functions"""
from .vis_utils import show_all_keypoints
from .save_model import save_model
|
Callifrey/ACGAN-Paddle
|
dataset.py
|
import paddle.vision.transforms as tran
from paddle.io import Dataset, DataLoader
from PIL import Image
import os
import os.path
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def find_classes(dir, classes_idx=None):
classes = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
classes.sort()
if classes_idx is not None:
assert type(classes_idx) == tuple
start, end = classes_idx
classes = classes[start:end]
class_to_idx = {classes[i]: i for i in range(len(classes))}
return classes, class_to_idx
def make_dataset(dir, class_to_idx):
images = []
dir = os.path.expanduser(dir)
for target in sorted(os.listdir(dir)):
if target not in class_to_idx:
continue
d = os.path.join(dir, target)
if not os.path.isdir(d):
continue
for root, _, fnames in sorted(os.walk(d)):
for fname in sorted(fnames):
if is_image_file(fname):
path = os.path.join(root, fname)
item = (path, class_to_idx[target])
images.append(item)
return images
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
class ImageFolder(Dataset):
def __init__(self, root, transform=None, target_transform=None, loader=pil_loader, classes_idx=None):
super().__init__()
self.classes_idx = classes_idx
classes, class_to_idx = find_classes(root, self.classes_idx)
imgs = make_dataset(root, class_to_idx)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in subfolders of: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.classes = classes
self.class_to_idx = class_to_idx
self.transform = transform
self.target_transform = target_transform
self.loader = loader
def __getitem__(self, index):
path, target = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
if __name__ == '__main__':
trans = tran.ToTensor()
dataset = ImageFolder(root='/media/gallifrey/DJW/Dataset/Imagenet/train', transform=tran.Compose([
tran.Resize(128),
tran.CenterCrop(128),
tran.ToTensor(),
tran.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
classes_idx=(10, 20))
dataloader = DataLoader(dataset)
print(len(dataloader))
for data in dataloader:
img, target = data
print(img.shape)
print(target.shape)
break
|
Callifrey/ACGAN-Paddle
|
test.py
|
<reponame>Callifrey/ACGAN-Paddle<gh_stars>0
import argparse
from utils import *
from network import Generator
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', type=str, default='/media/gallifrey/DJW/Dataset/Imagenet/train', help='path to dataset')
parser.add_argument('--batchSize', type=int, default=100, help='input batch size')
parser.add_argument('--nz', type=int, default=110, help='size of the latent z vector')
parser.add_argument('--check_path', default='./checkpoints/class-10-20', help='folder to output images and model checkpoints')
parser.add_argument('--result_path', type=str, default='./results', help='folder to save results picture')
parser.add_argument('--class_result_path', type=str, default='./class_results', help='folder to save results picture')
parser.add_argument('--num_classes', type=int, default=10, help='Number of classes for AC-GAN')
parser.add_argument('--which_epoch', type=int, default=499, help='Test checkpoints')
opt = parser.parse_args()
print(opt)
# 加载预训练模型
G = Generator(nz=opt.nz)
state_dict = paddle.load(os.path.join(opt.check_path, 'G_{}.pdparams'.format(opt.which_epoch)))
G.load_dict(state_dict)
G.eval()
# 生成随机噪声
batch_size = opt.batchSize
noise = paddle.randn(shape=[batch_size, opt.nz])
label = paddle.randint(0, opt.num_classes, shape=[batch_size])
class_onehot = paddle.zeros([batch_size, opt.num_classes])
class_onehot[np.arange(batch_size), label] = 1
noise[0:batch_size, :opt.num_classes] = class_onehot[0:batch_size]
noise = paddle.reshape(noise, shape=[batch_size, opt.nz, 1, 1])
# 生成fake
fake = G(noise)
save_samples(fake, path=os.path.join(opt.result_path, 'sample_epoch_{}.png'.format(opt.which_epoch)))
|
Callifrey/ACGAN-Paddle
|
network.py
|
<gh_stars>0
import paddle
import paddle.nn as nn
class Generator(nn.Layer):
def __init__(self, nz):
super(Generator, self).__init__()
self.nz = nz
# first linear layer
self.fc1 = nn.Linear(110, 768)
# Transposed Convolution 2
self.tconv2 = nn.Sequential(
nn.Conv2DTranspose(768, 384, 4, 2, 0),
nn.BatchNorm2D(384),
nn.ReLU(True),
)
# Transposed Convolution 3
self.tconv3 = nn.Sequential(
nn.Conv2DTranspose(384, 256, 4, 2, 1),
nn.BatchNorm2D(256),
nn.ReLU(True),
)
# Transposed Convolution 4
self.tconv4 = nn.Sequential(
nn.Conv2DTranspose(256, 192, 4, 2, 1),
nn.BatchNorm2D(192),
nn.ReLU(True),
)
# Transposed Convolution 5
self.tconv5 = nn.Sequential(
nn.Conv2DTranspose(192, 64, 4, 2, 1),
nn.BatchNorm2D(64),
nn.ReLU(True),
)
# Transposed Convolution 6
self.tconv6 = nn.Sequential(
nn.Conv2DTranspose(64, 32, 4, 2, 1),
nn.BatchNorm2D(32),
nn.ReLU(True),
)
# Transposed Convolution 7
self.tconv7 = nn.Sequential(
nn.Conv2DTranspose(32, 3, 4, 2, 1),
nn.Tanh(),
)
def forward(self, input):
input = paddle.reshape(input, shape=[-1, self.nz])
fc1 = self.fc1(input)
fc1 = paddle.reshape(fc1, shape=[-1, 768, 1, 1])
tconv2 = self.tconv2(fc1)
tconv3 = self.tconv3(tconv2)
tconv4 = self.tconv4(tconv3)
tconv5 = self.tconv5(tconv4)
tconv6 = self.tconv6(tconv5)
tconv7 = self.tconv7(tconv6)
output = tconv7
return output
class Discriminator(nn.Layer):
def __init__(self, num_classes=10):
super(Discriminator, self).__init__()
# Convolution 1
self.conv1 = nn.Sequential(
nn.Conv2D(3, 16, 3, 2, 1),
nn.LeakyReLU(0.2),
nn.Dropout(0.5),
)
# Convolution 2
self.conv2 = nn.Sequential(
nn.Conv2D(16, 32, 3, 1, 1),
nn.BatchNorm2D(32),
nn.LeakyReLU(0.2),
nn.Dropout(0.5),
)
# Convolution 3
self.conv3 = nn.Sequential(
nn.Conv2D(32, 64, 3, 2, 1),
nn.BatchNorm2D(64),
nn.LeakyReLU(0.2),
nn.Dropout(0.5),
)
# Convolution 4
self.conv4 = nn.Sequential(
nn.Conv2D(64, 128, 3, 1, 1),
nn.BatchNorm2D(128),
nn.LeakyReLU(0.2),
nn.Dropout(0.5),
)
# Convolution 5
self.conv5 = nn.Sequential(
nn.Conv2D(128, 256, 3, 2, 1),
nn.BatchNorm2D(256),
nn.LeakyReLU(0.2),
nn.Dropout(0.5),
)
# Convolution 6
self.conv6 = nn.Sequential(
nn.Conv2D(256, 512, 3, 1, 1),
nn.BatchNorm2D(512),
nn.LeakyReLU(0.2),
nn.Dropout(0.5),
)
# discriminator fc
self.fc_dis = nn.Linear(16*16*512, 1)
# aux-classifier fc
self.fc_aux = nn.Linear(16*16*512, num_classes)
# softmax and sigmoid
self.softmax = nn.Softmax()
self.sigmoid = nn.Sigmoid()
def forward(self, input):
conv1 = self.conv1(input)
conv2 = self.conv2(conv1)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
conv6 = self.conv6(conv5)
flat6 = paddle.reshape(conv6, [-1, 16*16*512])
fc_dis = self.fc_dis(flat6)
fc_aux = self.fc_aux(flat6)
classes = self.softmax(fc_aux)
realfake = paddle.reshape(self.sigmoid(fc_dis), [-1, 1]).squeeze(1)
return realfake, classes
if __name__ == '__main__':
# test Generator
noise = paddle.randn((1, 110, 1, 1))
G = Generator(nz=110)
img = G(noise)
print(img.shape) # [1,3,128,128]
# test Discriminator
D = Discriminator(num_classes=10)
real_fake, classes = D(img)
print(real_fake.shape) # [1,100]
print(classes.shape) #[1,10]
|
Callifrey/ACGAN-Paddle
|
utils.py
|
<filename>utils.py<gh_stars>0
import paddle
import numpy as np
from PIL import Image
import os
# 计算当前分类准确率
def compute_acc(preds, labels):
correct = 0
preds_ = paddle.argmax(preds, axis=1)
correct = paddle.to_tensor(np.sum(np.array(preds_ == labels)))
acc = float(correct) / float(labels.shape[0]) * 100.0
return acc
def save_samples(images, path):
images = paddle.nn.functional.pad(images, pad=[2, 2, 2, 2])
b, c, h, w = images.shape
results = np.zeros((1, 3, 10*h, 10*w))
count = 0
for i in range(10):
for j in range(10):
results[:, :, i*h:(i+1)*h, j*w:(j+1)*w] = images[count].unsqueeze(0)
count += 1
results = 255 * (results + 1) / 2
result = np.array(results[0].transpose(1, 2, 0), dtype=np.uint8)
save_result = Image.fromarray(result)
save_result.save(path)
def get_same_class_samples(net, opt, cal=0):
net.eval()
# 随机获取噪声
noise_ = paddle.randn(shape=[100, opt.nz - opt.num_classes])
label = paddle.zeros(shape=[opt.num_classes])
label[cal] = 1
noise = paddle.zeros(shape=[100, opt.nz])
noise[:, 0:opt.nz - opt.num_classes] = noise_
noise[tuple(np.arange(100)), opt.nz - opt.num_classes:] = label
img = net(noise)
result = np.zeros((1, 3, opt.imageSize * opt.num_classes, opt.imageSize * 10))
for i in range(10):
for j in range(10):
result[:, :, j * opt.imageSize:(j + 1) * opt.imageSize, i * opt.imageSize:(i + 1) * opt.imageSize] = img[i*10+j].squeeze(0)
result = 255 * (result + 1) / 2
result = np.array(result[0].transpose(1, 2, 0), dtype=np.uint8)
save_result = Image.fromarray(result)
save_result.save(os.path.join(opt.class_result_path, "class_samples_class_{}.png".format(cal)))
|
Callifrey/ACGAN-Paddle
|
train.py
|
<gh_stars>0
import paddle.vision.transforms as tran
from paddle.io import Dataset, DataLoader
import argparse
import paddle.nn as nn
from visualdl import LogWriter
from utils import *
from network import Generator, Discriminator
from dataset import ImageFolder
parser = argparse.ArgumentParser()
parser.add_argument('--dataroot', type=str, default='/media/gallifrey/DJW/Dataset/Imagenet/train', help='path to dataset')
parser.add_argument('--workers', default=4, type=int, help='number of data loading workers')
parser.add_argument('--batchSize', type=int, default=100, help='input batch size')
parser.add_argument('--imageSize', type=int, default=128, help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=110, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=500, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--check_path', default='./checkpoints', help='folder to output images and model checkpoints')
parser.add_argument('--result_path', type=str, default='./results', help='folder to save results picture')
parser.add_argument('--class_result_path', type=str, default='./class_results', help='folder to save fix class results picture')
parser.add_argument('--log_path', type=str, default='./log', help='folder to save log file')
parser.add_argument('--save_freq', type=int, default=5, help='frequency for save')
parser.add_argument('--num_classes', type=int, default=10, help='Number of classes for AC-GAN')
opt = parser.parse_args()
print(opt)
# 加载数据
dataset = ImageFolder(
root=opt.dataroot,
transform=tran.Compose([
tran.Resize(opt.imageSize),
tran.CenterCrop(opt.imageSize),
tran.ToTensor(),
tran.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]),
classes_idx=(90,100)
)
dataloader = DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), drop_last=True)
# 实例化模型
netG = Generator(opt.nz)
netD = Discriminator(opt.num_classes)
# 定义损失函数
dis_criterion = nn.BCELoss()
aux_criterion = nn.NLLLoss()
real_label = 1
fake_label = 0
# 定义优化器
optimizerG = paddle.optimizer.Adam(learning_rate=opt.lr, beta1=opt.beta1, beta2=0.999, parameters=netG.parameters())
optimizerD = paddle.optimizer.Adam(learning_rate=opt.lr, beta1=opt.beta1, beta2=0.999, parameters=netD.parameters())
avg_loss_D = 0.0
avg_loss_G = 0.0
avg_loss_A = 0.0
with LogWriter(logdir=opt.log_path) as writer:
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
############################
# (1) 更新判别器: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
optimizerD.clear_grad()
real_cpu, label = data
batch_size = real_cpu.shape[0]
dis_label = paddle.full([batch_size], real_label)
dis_output, aux_output = netD(real_cpu)
dis_errD_real = dis_criterion(dis_output, dis_label)
aux_errD_real = aux_criterion(aux_output, label)
errD_real = dis_errD_real + aux_errD_real
errD_real.backward()
D_x = dis_output.mean()
# compute the current classification accuracy
accuracy = compute_acc(aux_output, label)
# train with fake
noise = paddle.randn(shape=[batch_size, opt.nz])
label = paddle.randint(0, opt.num_classes, shape=[batch_size])
class_onehot = paddle.zeros([batch_size, opt.num_classes])
class_onehot[np.arange(batch_size), label] = 1
noise[0:batch_size, :opt.num_classes] = class_onehot[0:batch_size]
noise = paddle.reshape(noise, shape=[batch_size, opt.nz, 1, 1])
fake = netG(noise)
dis_label = paddle.full([batch_size], fake_label)
aux_label = label
dis_output, aux_output = netD(fake.detach())
dis_errD_fake = dis_criterion(dis_output, dis_label)
aux_errD_fake = aux_criterion(aux_output, aux_label)
errD_fake = dis_errD_fake + aux_errD_fake
errD_fake.backward()
D_G_z1 = dis_output.mean()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) 更新生成器: maximize log(D(G(z)))
###########################
optimizerG.clear_grad()
dis_label = paddle.full([batch_size], real_label)
dis_output, aux_output = netD(fake)
dis_errG = dis_criterion(dis_output, dis_label)
aux_errG = aux_criterion(aux_output, aux_label)
errG = dis_errG + aux_errG
errG.backward()
D_G_z2 = dis_output.mean()
optimizerG.step()
# 计算平均损失/分类精度
curr_iter = epoch * len(dataloader) + i
all_loss_G = avg_loss_G * curr_iter
all_loss_D = avg_loss_D * curr_iter
all_loss_A = avg_loss_A * curr_iter
all_loss_G += errG.item()
all_loss_D += errD.item()
all_loss_A += accuracy
avg_loss_G = all_loss_G / (curr_iter + 1)
avg_loss_D = all_loss_D / (curr_iter + 1)
avg_loss_A = all_loss_A / (curr_iter + 1)
writer.add_scalar('D_loss', value=errD.item(), step=curr_iter)
writer.add_scalar('G_loss', value=errG.item(), step=curr_iter)
writer.add_scalar('Acc', value=accuracy, step=curr_iter)
print('[%d/%d][%d/%d] Loss_D: %.4f (%.4f) Loss_G: %.4f (%.4f) D(x): %.4f D(G(z)): %.4f / %.4f Acc: %.4f (%.4f)'
% (epoch, opt.niter, i, len(dataloader),
errD.item(), avg_loss_D, errG.item(), avg_loss_G, D_x, D_G_z1, D_G_z2, accuracy, avg_loss_A))
# 保存图像和checkpoint
if epoch % opt.save_freq == 0 or epoch == 499:
save_samples(real_cpu, path=os.path.join(opt.result_path, 'real_samples_epoch_{}.png'.format(epoch)))
save_samples(fake, path=os.path.join(opt.result_path, 'fake_samples_epoch_{}.png'.format(epoch)))
paddle.save(netG.state_dict(), os.path.join(opt.check_path, 'G_{}.pdparams'.format(epoch)))
paddle.save(netD.state_dict(), os.path.join(opt.check_path, 'D_{}.pdparams'.format(epoch)))
|
CodeSelfStudy/python-project-cookiecutter
|
{{cookiecutter.directory_name}}/setup.py
|
<gh_stars>0
"""Configuration for {{ cookiecutter.project_name }}."""
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': '{{ cookiecutter.description }}',
'author': '{{ cookiecutter.author }}',
'url': '{{ cookiecutter.url }}',
'download_url': '{{ cookiecutter.download_url }}',
'author_email': '{{ cookiecutter.author_email }}',
'version': '0.1.0',
'install_requires': ['nose'],
'packages': ['{{ cookiecutter.directory_name }}'],
'scripts': [],
'name': '{{ cookiecutter.project_name }}'
}
setup(**config)
|
CodeSelfStudy/python-project-cookiecutter
|
{{cookiecutter.directory_name}}/tests/{{cookiecutter.directory_name}}_tests.py
|
from nose.tools import *
import NAME
def setup():
print('setup')
def teardown():
print('teardown')
def test_basic():
print('test ran')
|
jordimarinvalle/requeues
|
pimpamqueues/simplequeue.py
|
<reponame>jordimarinvalle/requeues
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
from pimpamqueues import QUEUE_COLLECTION_OF_ELEMENTS
from pimpamqueues import Tools
from pimpamqueues.exceptions import PimPamQueuesError
from pimpamqueues.exceptions import PimPamQueuesElementWithoutValueError
class SimpleQueue(object):
'''
A lightweight queue. Simple Queue.
'''
QUEUE_TYPE_NAME = 'simple'
def __init__(self, id_args, collection_of=QUEUE_COLLECTION_OF_ELEMENTS,
keep_previous=True, redis_conn=None):
'''
Create a SimpleQueue object.
Arguments:
:id_args -- list, list's values will be used to name the queue
:collection_of -- string (default: QUEUE_COLLECTION_OF_ELEMENTS),
a type descriptor of queued elements
:keep_previous -- boolean (default: true),
a flag to create a fresh queue or not
:redis_conn -- redis.client.Redis (default: None), a redis
connection will be created using the default
redis.client.Redis connection params.
'''
self.id_args = id_args
self.collection_of = collection_of
if redis_conn is None:
redis_conn = redis.Redis()
self.redis = redis_conn
self.key_queue = self.get_key_queue()
if keep_previous is False:
self.delete()
def __str__(self):
'''
Return a string representation of the class.
Returns: string
'''
return '<SimpleQueue: %s (%s)>' % (self.key_queue, self.num())
def get_key_queue(self):
'''
Get a key id that will be used to store/retrieve data from
the redis server.
Returns: string
'''
return 'queue:%s:type:%s:of:%s' % ('.'.join(self.id_args),
SimpleQueue.QUEUE_TYPE_NAME,
self.collection_of)
def push(self, element, to_first=False):
'''
Push a element into the queue. Element can be pushed to the first or
last position (by default is pushed to the last position).
Arguments:
:element -- string
:to_first -- boolean (default: False)
Raise:
:PimPamQueuesElementWithoutValueError, if element has not a value
Returns: long, the number of queued elements
'''
if element in ('', None):
raise PimPamQueuesElementWithoutValueError()
return self.push_some([element, ], to_first)
def push_some(self, elements, to_first=False, num_block_size=None):
'''
Push a bunch of elements into the queue. Elements can be pushed to the
first or last position (by default are pushed to the last position).
Arguments:
:elements -- a collection of strings
:to_first -- boolean (default: false)
:num_block_size -- integer (default: none)
Returns: long, the number of queued elements
'''
try:
elements = list(elements)
if to_first:
elements.reverse()
block_slices = Tools.get_block_slices(
num_elements=len(elements),
num_block_size=num_block_size
)
pipe = self.redis.pipeline()
for s in block_slices:
some_elements = elements[s[0]:s[1]]
if to_first:
pipe.lpush(self.key_queue, *some_elements)
else:
pipe.rpush(self.key_queue, *some_elements)
return pipe.execute().pop()
except Exception as e:
raise PimPamQueuesError(e.message)
def pop(self, last=False):
'''
Pop a element from the queue. Element can be popped from the begining
or the ending of the queue (by default pops from the begining).
If no element is poped, it returns None
Arguments:
:last -- boolean (default: false)
Returns: string, the popped element, or, none, if no element is popped
'''
if last:
return self.redis.rpop(self.key_queue)
return self.redis.lpop(self.key_queue)
def num(self):
'''
Get the number of elements that are queued.
Returns: integer, the number of elements that are queued
'''
return self.redis.llen(self.key_queue)
def is_empty(self):
'''
Check if the queue is empty.
Returns: boolean, true if queue is empty, otherwise false
'''
return True if self.num() is 0 else False
def is_not_empty(self):
'''
Check if the queue is not empty.
Returns: boolean, true if queue is not empty, otherwise false
'''
return not self.is_empty()
def elements(self, queue_from=0, queue_to=-1):
'''
Get some (or even all) queued elements, by the order that they are
queued. By default it returns all queued elements.
Note
====
Elements are not popped.
Arguments:
:queue_from -- integer (default: 0)
:queue_to -- integer (default: -1)
Returns: list
'''
return self.redis.lrange(self.key_queue, queue_from, queue_to)
def first_elements(self, num_elements=10):
'''
Get the N first queued elements, by the order that they are
queued. By default it returns the first ten elements.
Note
====
Elements are not popped.
Arguments:
:num_elements -- integer (default: 10)
Returns: list
'''
queue_to = num_elements - 1
return self.elements(queue_to=queue_to)
def remove(self, element):
'''
Remove a element from the queue.
Arguments:
:element -- string
Returns: boolean, return true if element was removed, otherwise false
'''
return True if self.redis.lrem(self.key_queue, element) else False
def delete(self):
'''
Delete the queue with all its elements.
Returns: boolean, true if queue has been deleted, otherwise false
'''
return True if self.redis.delete(self.key_queue) else False
|
jordimarinvalle/requeues
|
tests/test_bucketqueue.py
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from tests import redis_conn
from pimpamqueues.bucketqueue import BucketQueue
ELEMENT_EGG = b'egg'
ELEMENT_BACON = b'bacon'
ELEMENT_SPAM = b'spam'
ELEMENT_42 = b'42'
ELEMENT_UNEXISTENT_ELEMENT = b'utopia'
some_elements = [
ELEMENT_EGG,
ELEMENT_BACON,
ELEMENT_SPAM,
ELEMENT_SPAM,
ELEMENT_SPAM,
ELEMENT_42,
ELEMENT_SPAM,
]
class TestBucketQueue(object):
def setup(self):
self.queue = BucketQueue(
id_args=['test', 'testing'],
redis_conn=redis_conn
)
def test_empty(self):
assert self.queue.num() is 0
assert self.queue.is_empty() is True
assert self.queue.is_not_empty() is False
def test_push(self):
assert self.queue.push(ELEMENT_EGG) == ELEMENT_EGG
assert self.queue.push(ELEMENT_EGG) == ''
def test_push_some(self):
queued_elements = self.queue.push_some(some_elements)
assert len(queued_elements) == len(set(some_elements))
def test_pop(self):
self.queue.push_some(some_elements)
assert self.queue.pop() is not None
def test_pop_empty_queue(self):
assert self.queue.pop() is None
def test_is_element(self):
self.queue.push_some(some_elements)
assert self.queue.is_element(some_elements[0]) is True
def test_is_not_element(self):
self.queue.push_some(some_elements)
assert self.queue.is_element(ELEMENT_UNEXISTENT_ELEMENT) is False
def test_elements(self):
self.queue.push_some(some_elements)
elements = self.queue.elements()
for i, some_element in enumerate(list(set(some_elements))):
elements.discard(some_element)
assert len(elements) is 0
def test_n_elements(self):
self.queue.push_some(some_elements)
num_remaining = 1
elements = self.queue.elements(len(set(some_elements)) - num_remaining)
assert len(set(some_elements).difference(elements)) is num_remaining
def test_fresh_queue(self):
self.queue.push(ELEMENT_EGG)
assert self.queue.is_not_empty() is True
queue_y = BucketQueue(
id_args=['test', 'testing'],
keep_previous=False,
redis_conn=redis_conn
)
assert queue_y.is_empty() is True
def test_delete(self):
self.queue.push(element=ELEMENT_42)
assert self.queue.num() == 1
assert self.queue.delete() is True
assert self.queue.num() == 0
def teardown(self):
self.queue.delete()
if __name__ == '__main__':
pytest.main()
|
jordimarinvalle/requeues
|
pimpamqueues/exceptions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class PimPamQueuesError(Exception):
MESSAGE = 'Unexpected error'
def __init__(self, message=''):
self.message = message if message else self.MESSAGE
def __str__(self):
return '<%s %s>' % (self.__class__.__name__, self.message)
class PimPamQueuesElementWithoutValueError(PimPamQueuesError):
MESSAGE = 'Element do not has a value'
class PimPamQueuesDisambiguatorInvalidError(PimPamQueuesError):
MESSAGE = 'Disambiguator has to contain a disambiguate() static method ' \
'which returns a string'
|
jordimarinvalle/requeues
|
tests/test_tools.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from pimpamqueues import Tools
class TestTools(object):
def setup(self):
self.block_slices_300_1 = Tools.get_block_slices(300, 1)
self.block_slices_10_10 = Tools.get_block_slices(10, 10)
self.block_slices_27_10 = Tools.get_block_slices(27, 10)
self.block_slices_13_2 = Tools.get_block_slices(13, 2)
self.block_slices_3_141592 = Tools.get_block_slices(3, 141592)
self.block_slices_0_1000 = Tools.get_block_slices(0, 1000)
def test_block_slices(self):
assert len(self.block_slices_300_1) == 300
assert len(self.block_slices_10_10) == 1
assert len(self.block_slices_27_10) == 3
assert len(self.block_slices_13_2) == 7
assert len(self.block_slices_3_141592) == 1
assert len(self.block_slices_0_1000) == 1
def test_block_slices_a_slice(self):
assert self.block_slices_300_1[0] == [0, 1]
assert self.block_slices_300_1[299] == [299, 300]
assert self.block_slices_10_10[0] == [0, 10]
assert self.block_slices_27_10[2] == [20, 30]
assert self.block_slices_13_2[1] == [2, 4]
assert self.block_slices_3_141592[0] == [0, 3]
assert self.block_slices_0_1000[0] == [0, 0]
if __name__ == '__main__':
pytest.main()
|
jordimarinvalle/requeues
|
tests/__init__.py
|
<reponame>jordimarinvalle/requeues
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
# REDISLABS FREE ACCOUNT
# https://redislabs.com/
REDIS_HOST = 'pub-redis-10356.us-east-1-3.6.ec2.redislabs.com'
REDIS_PORT = '10356'
REDIS_PASSWORD = '<PASSWORD>'
REDIS_DATABASE = '0'
redis_conn = redis.Redis(
host=REDIS_HOST,
port=REDIS_PORT,
password=<PASSWORD>,
db=REDIS_DATABASE,
)
|
jordimarinvalle/requeues
|
tests/test_smartqueue.py
|
<filename>tests/test_smartqueue.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from tests import redis_conn
from pimpamqueues.smartqueue import SmartQueue
from pimpamqueues.exceptions import PimPamQueuesDisambiguatorInvalidError
ELEMENT_EGG = b'egg'
ELEMENT_BACON = b'bacon'
ELEMENT_SPAM = b'spam'
ELEMENT_42 = b'42'
ELEMENT_SPAM_UPPERCASED = b'SPAM'
some_elements = [
ELEMENT_EGG,
ELEMENT_BACON,
ELEMENT_SPAM,
ELEMENT_SPAM,
ELEMENT_SPAM,
ELEMENT_42,
ELEMENT_SPAM,
ELEMENT_SPAM_UPPERCASED,
]
class Disambiguator(object):
@staticmethod
def disambiguate(element):
return element.lower()
class DisambiguatorInvalid(object):
@staticmethod
def invalid(element):
return element.lower()
class TestSmartQueue(object):
def setup(self):
self.queue = SmartQueue(
id_args=['test', 'testing'],
redis_conn=redis_conn
)
def test_empty(self):
assert self.queue.num() is 0
assert self.queue.is_empty() is True
assert self.queue.is_not_empty() is False
def test_push(self):
assert self.queue.push(ELEMENT_EGG) == ELEMENT_EGG
def test_push_to_first(self):
self.queue.push(ELEMENT_EGG)
self.queue.push(ELEMENT_BACON)
self.queue.push(ELEMENT_SPAM)
self.queue.push(ELEMENT_42, to_first=True)
assert self.queue.pop() == ELEMENT_42
def test_push_some(self):
queued_elements = self.queue.push_some(some_elements)
assert (set(queued_elements) - set(some_elements)) == set()
def test_push_smart(self):
self.queue.push(ELEMENT_EGG)
self.queue.push(ELEMENT_BACON)
self.queue.push(ELEMENT_SPAM)
assert self.queue.push(ELEMENT_SPAM) == ''
def test_push_smart_force(self):
self.queue.push(ELEMENT_EGG)
self.queue.push(ELEMENT_BACON)
self.queue.push(ELEMENT_SPAM)
assert self.queue.push(element=ELEMENT_SPAM, force=True) != ''
def test_push_smart_some_force(self):
queued_elements = self.queue.push_some(
elements=some_elements,
force=True
)
assert len(some_elements) == len(queued_elements)
def test_push_smart_force_push_smart_some_force(self):
self.queue.push(ELEMENT_EGG)
self.queue.push(ELEMENT_BACON)
self.queue.push(ELEMENT_SPAM)
self.queue.push(ELEMENT_SPAM)
num_elements = self.queue.num()
self.queue.push(element=ELEMENT_SPAM, force=True)
queued_elements = self.queue.push_some(
elements=some_elements,
force=True
)
self.queue.push(element=ELEMENT_SPAM)
assert self.queue.num() == (num_elements + 1 + len(queued_elements))
def test_push_some_to_first(self):
self.queue.push(ELEMENT_42)
self.queue.push_some(
elements=[ELEMENT_EGG, ELEMENT_BACON],
to_first=True
)
assert self.queue.pop() == ELEMENT_EGG
def test_pop(self):
self.queue.push(ELEMENT_EGG)
assert self.queue.pop() == ELEMENT_EGG
def test_pop_none(self):
assert self.queue.pop() is None
def test_elements(self):
self.queue.push_some(some_elements)
elements = self.queue.elements()
assert len(set(some_elements).difference(set(elements))) is 0
def test_elements_first_elements(self):
self.queue.push_some(some_elements)
assert self.queue.first_elements(3) == some_elements[0:3]
def test_disambiguate(self):
self.queue = SmartQueue(
id_args=['test', 'testing'],
redis_conn=redis_conn,
disambiguator=Disambiguator,
)
assert self.queue.push(ELEMENT_SPAM) == ELEMENT_SPAM
assert self.queue.push(ELEMENT_SPAM_UPPERCASED) == ''
def test_disambiguate_some(self):
self.queue = SmartQueue(
id_args=['test', 'testing'],
redis_conn=redis_conn,
disambiguator=Disambiguator,
)
queued_elements = self.queue.push_some(some_elements)
assert (set(queued_elements) - (set(some_elements)) == set())
assert self.queue.push(ELEMENT_SPAM_UPPERCASED) == ''
def test_disambiguate_invalid(self):
with pytest.raises(PimPamQueuesDisambiguatorInvalidError):
self.queue = SmartQueue(
id_args=['test', 'testing'],
redis_conn=redis_conn,
disambiguator=DisambiguatorInvalid,
)
def test_delete(self):
self.queue.push(element=ELEMENT_42)
assert self.queue.num() == 1
assert self.queue.delete() is True
assert self.queue.num() == 0
def test_queue_new_queue_remove_queued_elements(self):
self.queue.push(ELEMENT_EGG)
assert self.queue.is_not_empty() is True
queue = SmartQueue(
id_args=['test', 'testing'],
keep_previous=False,
redis_conn=redis_conn
)
assert queue.is_empty() is True
def teardown(self):
self.queue.delete()
if __name__ == '__main__':
pytest.main()
|
jordimarinvalle/requeues
|
pimpamqueues/bucketqueue.py
|
<filename>pimpamqueues/bucketqueue.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
from pimpamqueues import QUEUE_COLLECTION_OF_ELEMENTS
from pimpamqueues import Tools
from pimpamqueues.exceptions import PimPamQueuesError
from pimpamqueues.exceptions import PimPamQueuesElementWithoutValueError
class BucketQueue(object):
'''
A lightweight queue. Bucket Queue, uniqueness and super-fast queue
element checking.
'''
QUEUE_TYPE_NAME = 'bucket'
def __init__(self, id_args, collection_of=QUEUE_COLLECTION_OF_ELEMENTS,
keep_previous=True, redis_conn=None):
'''
Create a SimpleQueue object.
Arguments:
:id_args -- list, list's values will be used to name the queue
:collection_of -- string (default: QUEUE_COLLECTION_OF_ELEMENTS),
a type descriptor of queued elements
:keep_previous -- boolean (default: true),
a flag to create a fresh queue or not
:redis_conn -- redis.client.Redis (default: None), a redis
connection will be created using the default
redis.client.Redis connection params.
'''
self.id_args = id_args
self.collection_of = collection_of
if redis_conn is None:
redis_conn = redis.Redis()
self.redis = redis_conn
self.key_queue_bucket = self.get_key_bucket()
if keep_previous is False:
self.delete()
def __str__(self):
'''
Return a string representation of the class.
Returns: string
'''
return '<BucketQueue: %s (%s)>' % (self.key_queue_bucket, self.num())
def get_key_bucket(self):
'''
Get a key id that will be used to store/retrieve data from
the redis server.
Returns: string
'''
return 'queue:%s:type:%s:of:%s' % ('.'.join(self.id_args),
BucketQueue.QUEUE_TYPE_NAME,
self.collection_of)
def push(self, element):
'''
Push a element into the queue.
Arguments:
:element -- string
Returns: string, element if element was queued otherwise a empty string
'''
if element in ('', None):
raise PimPamQueuesElementWithoutValueError()
return element if self.push_some([element, ]) else ''
def push_some(self, elements, num_block_size=None):
'''
Push a bunch of elements into the queue.
Arguments:
:elements -- a collection of strings
:num_block_size -- integer (default: none)
Returns: list of strings, list of queued elements
'''
try:
elements = list(elements)
block_slices = Tools.get_block_slices(
num_elements=len(elements),
num_block_size=num_block_size
)
queued_elements = []
for s in block_slices:
queued_elements.extend(self.__push_some(elements[s[0]:s[1]]))
return queued_elements
except Exception as e:
raise PimPamQueuesError(e.message)
def pop(self):
'''
Pop a random element from the queue.
If no element is poped, it returns None
Arguments:
:last -- boolean (default: false)
Returns: string, the popped element, or, none, if no element is popped
'''
return self.redis.spop(self.key_queue_bucket)
def num(self):
'''
Get the number of elements that are queued.
Returns: integer, the number of elements that are queued
'''
return self.redis.scard(self.key_queue_bucket)
def is_empty(self):
'''
Check if the queue is empty.
Returns: boolean, true if queue is empty, otherwise false
'''
return True if self.num() is 0 else False
def is_not_empty(self):
'''
Check if the queue is not empty.
Returns: boolean, true if queue is not empty, otherwise false
'''
return not self.is_empty()
def is_element(self, element):
'''
Checks if a element is in the queue. It returns true is element is in
the queue, otherwise false.
Arguments:
:element -- string
Returns: boolean
'''
return self.redis.sismember(self.key_queue_bucket, element)
def elements(self, num_elements=-1):
'''
Get some (or even all) unordered queued elements.
By default it returns all queued elements.
Note
====
Elements are not popped.
Arguments:
:num_elements -- integer (default: -1).
Returns: set
'''
if num_elements is -1:
return self.redis.smembers(self.key_queue_bucket)
return set(self.redis.srandmember(self.key_queue_bucket, num_elements))
def delete(self):
'''
Delete the queue with all its elements.
Returns: boolean, true if queue has been deleted, otherwise false
'''
return True if self.redis.delete(self.key_queue_bucket) else False
def __push_some(self, elements):
'''
Push some elements into the queue.
Arguments:
:elements -- a collection of strings
Returns: list of strings, a list with queued elements
'''
keys = [self.key_queue_bucket, ]
return self.redis.eval(self.__lua_push(), len(keys),
*(keys + elements))
def __lua_push(self):
return """
local elements = {}
for i=1, #ARGV do
if redis.call('SADD', KEYS[1], ARGV[i]) == 1 then
table.insert(elements, ARGV[i])
end
end
return elements
"""
|
jordimarinvalle/requeues
|
pimpamqueues/__init__.py
|
<filename>pimpamqueues/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
NUM_BLOCK_SIZE = 1000
QUEUE_COLLECTION_OF_URLS = 'urls'
QUEUE_COLLECTION_OF_JOBS = 'jobs'
QUEUE_COLLECTION_OF_TASKS = 'tasks'
QUEUE_COLLECTION_OF_ITEMS = 'items'
QUEUE_COLLECTION_OF_ELEMENTS = 'elements'
VERSION_MAJOR = 1
VERSION_MINOR = 0
VERSION_MICRO = 2
__version__ = "%s" % (".".join(str(v) for v in [VERSION_MAJOR, VERSION_MINOR,
VERSION_MICRO]))
class Tools(object):
@staticmethod
def get_block_slices(num_elements, num_block_size=None):
'''
Get how many loops and for each loop the position from and to for a
bunch of elements that are going to be pushed. It is useful for big
amount of element to be pepelined to the Redis server.
Arguments:
:num_elements -- integer, number of elements that are going to
be pushed
:num_block_size -- integer (default: none), how big are going to be
the Redis pipeline blocks
Returns: list of lists
'''
block_slices = []
if num_block_size is None:
num_block_size = NUM_BLOCK_SIZE
if num_block_size > num_elements or num_elements is 0:
return [[0, num_elements]]
num_loops = int(math.ceil(num_elements / float(num_block_size)))
for i in range(0, num_loops):
position_from = i * num_block_size
position_to = position_from + num_block_size
block_slices.append([position_from, position_to])
return block_slices
|
jordimarinvalle/requeues
|
setup.py
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
from pimpamqueues import __version__
class TestTox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import tox
sys.exit(tox.cmdline(self.test_args))
class Setup(object):
@staticmethod
def long_description(filenames=['README.rst']):
try:
descriptions = []
for filename in filenames:
descriptions.append(open(filename).read())
return "\n\n".join(descriptions)
except:
return ''
setup(
name='pimpamqueues',
version='%s' % (__version__, ),
description='Lightweight queue interfaces with Redis super powers for '
'distributed and non-distributed systems',
long_description='%s' % (Setup.long_description(['README.rst',
'HISTORY.rst'])),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/jordimarinvalle/pimpamqueues',
license='MIT',
platforms='all',
packages=[
'pimpamqueues',
],
install_requires=[
'redis',
],
extras_require={
'redis': ['redis', ],
'testing': ['pytest', ],
},
tests_require=[
'pytest',
],
cmdclass={
'test': TestTox,
},
test_suite="tests",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Distributed Computing',
'Topic :: Utilities',
],
keywords=[
'queue',
'queues',
'distributed system',
'distributed systems',
'redis',
'lua',
],
)
|
jordimarinvalle/requeues
|
tests/test_simplequeue.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from tests import redis_conn
from pimpamqueues.simplequeue import SimpleQueue
ELEMENT_EGG = b'egg'
ELEMENT_BACON = b'bacon'
ELEMENT_SPAM = b'spam'
ELEMENT_42 = b'42'
some_elements = [
ELEMENT_EGG,
ELEMENT_BACON,
ELEMENT_SPAM,
ELEMENT_42,
ELEMENT_SPAM,
]
class TestSimpleQueue(object):
def setup(self):
self.queue = SimpleQueue(
id_args=['test', 'testing'],
redis_conn=redis_conn
)
def test_empty(self):
assert self.queue.num() is 0
assert self.queue.is_empty() is True
assert self.queue.is_not_empty() is False
def test_push(self):
assert self.queue.push(ELEMENT_EGG) == 1
def test_push_to_first(self):
self.queue.push(ELEMENT_EGG)
self.queue.push(ELEMENT_BACON)
self.queue.push(ELEMENT_SPAM)
self.queue.push(ELEMENT_42, to_first=True)
assert self.queue.pop() == ELEMENT_42
def test_push_some(self):
assert self.queue.push_some(some_elements) == len(some_elements)
def test_push_some_to_first(self):
self.queue.push(ELEMENT_42)
self.queue.push_some(elements=[ELEMENT_EGG, ELEMENT_BACON],
to_first=True)
assert self.queue.pop() == ELEMENT_EGG
def test_pop(self):
self.queue.push(ELEMENT_EGG)
assert self.queue.pop() == ELEMENT_EGG
def test_pop_none(self):
assert self.queue.pop() is None
def test_elements(self):
self.queue.push_some(some_elements)
elements = self.queue.elements()
assert len(set(some_elements).difference(set(elements))) is 0
def test_elements_first_elements(self):
self.queue.push_some(some_elements)
assert self.queue.first_elements(3) == some_elements[0:3]
def test_remove(self):
self.queue.push_some([ELEMENT_EGG, ELEMENT_BACON, ELEMENT_SPAM])
assert self.queue.remove(ELEMENT_SPAM) is True
assert self.queue.remove(ELEMENT_SPAM) is False
def test_delete(self):
self.queue.push(element=ELEMENT_42)
assert self.queue.num() == 1
assert self.queue.delete() is True
assert self.queue.num() == 0
def test_queue_new_queue_remove_queued_elements(self):
self.queue.push(ELEMENT_EGG)
assert self.queue.is_not_empty() is True
queue = SimpleQueue(
id_args=['test', 'testing'],
keep_previous=False,
redis_conn=redis_conn
)
assert queue.is_empty() is True
def teardown(self):
self.queue.delete()
if __name__ == '__main__':
pytest.main()
|
jordimarinvalle/requeues
|
pimpamqueues/smartqueue.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
from pimpamqueues import QUEUE_COLLECTION_OF_ELEMENTS
from pimpamqueues import Tools
from pimpamqueues.simplequeue import SimpleQueue
from pimpamqueues.bucketqueue import BucketQueue
from pimpamqueues.exceptions import PimPamQueuesError
from pimpamqueues.exceptions import PimPamQueuesElementWithoutValueError
from pimpamqueues.exceptions import PimPamQueuesDisambiguatorInvalidError
class SmartQueue(SimpleQueue, BucketQueue):
'''
A lightweight queue. Smart Queue. It only adds a unique element once
for the queue's time living. If a element wants to be added more than once,
queue will not be altered.
'''
QUEUE_TYPE_NAME = 'smart'
def __init__(self, id_args, collection_of=QUEUE_COLLECTION_OF_ELEMENTS,
keep_previous=True, redis_conn=None, disambiguator=None):
'''
Create a SmartQueue object.
Arguments:
:id_args -- list, list's values will be used to name the queue
:collection_of -- string (default: QUEUE_COLLECTION_OF_ELEMENTS),
a type descriptor of queued elements
:keep_previous -- boolean (default: true),
a flag to create a fresh queue or not
:redis_conn -- redis.client.Redis (default: None), a redis
connection will be created using the default
redis.client.Redis connection params.
:disambiguator -- class (default: none), a class with a disambiguate
static method which receives a string as an argument
and return a string. It is used to discriminate
those elements that do not need to be pushed again.
Raise:
:PimPamQueuesDisambiguatorInvalidError(), if disambiguator argument
is invalid
'''
self.id_args = id_args
self.collection_of = collection_of
if disambiguator and not disambiguator.__dict__.get('disambiguate'):
raise PimPamQueuesDisambiguatorInvalidError()
self.disambiguator = disambiguator
if redis_conn is None:
redis_conn = redis.Redis()
self.redis = redis_conn
self.key_queue = self.get_key_queue()
self.key_queue_bucket = self.get_key_bucket()
self.keys = [self.key_queue, self.key_queue_bucket, ]
if keep_previous is False:
self.delete()
def __str__(self):
'''
Return a string representation of the class.
Returns: string
'''
return '<SmartQueue: %s (%s)>' % (self.key_queue, self.num())
def push(self, element, to_first=False, force=False):
'''
Push a element into the queue. Element can be pushed to the first or
last position (by default is pushed to the last position).
Arguments:
:element -- string
:to_first -- boolean (default: False)
:force -- boolean (default: False)
Raise:
:PimPamQueuesError(), if element can not be pushed
:PimPamQueuesElementWithoutValueError, if element has not a value
Returns: string, if element was queued returns the queued element,
otherwise, empty string
'''
if element in ('', None):
raise PimPamQueuesElementWithoutValueError()
try:
if self.push_some([element, ], to_first, force):
return element
return ''
except Exception:
raise PimPamQueuesError("%s was not pushed" % (element))
def push_some(self, elements, to_first=False, force=False,
num_block_size=None):
'''
Push a bunch of elements into the queue. Elements can be pushed to the
first or last position (by default are pushed to the last position).
Arguments:
:elements -- a collection of strings
:to_first -- boolean (default: false)
:force -- boolean (default: False)
:num_block_size -- integer (default: none)
Raise:
:PimPamQueuesError(), if element can not be pushed
Returns: list of strings, a list with queued elements
'''
try:
elements = self.disambiguate_some(list(elements))
if to_first:
elements.reverse()
block_slices = Tools.get_block_slices(
num_elements=len(elements),
num_block_size=num_block_size
)
queued_elements = []
for s in block_slices:
some_elements = self.__push_some(
elements=elements[s[0]:s[1]],
to_first=to_first,
force=force
)
queued_elements.extend(some_elements)
return queued_elements
except Exception as e:
raise PimPamQueuesError(e.message)
def disambiguate(self, element):
'''
Treats a element.
Arguments:
:element -- string
Returns: string
'''
if self.__has_to_disambiguate():
return self.disambiguator.disambiguate(element)
return element
def disambiguate_some(self, elements):
'''
Treats a list of elements.
Arguments:
:elements -- elements
Returns: list of strings
'''
if self.__has_to_disambiguate():
return [self.disambiguate(element) for element in elements]
return elements
def delete(self):
'''
Delete the queue with all its elements.
Returns: boolean, true if queue has been deleted, otherwise false
'''
pipe = self.redis.pipeline()
for key in self.keys:
pipe.delete(key)
return True if len(pipe.execute()) is len(self.keys) else False
def __has_to_disambiguate(self):
'''
Check if disambiguation code has to be triggered.
Returns: boolean, true if queue needs to disambiguate, otherwise false
'''
return True if self.disambiguator else False
def __push_some(self, elements, to_first=False, force=False):
'''
Push some elements into the queue. Elements can be pushed to the
first or last position (by default are pushed to the last position).
Arguments:
:elements -- a collection of strings
:to_first -- boolean (default: false)
:force -- boolean (default: False)
Returns: list of strings, a list with queued elements
'''
push_to = 'lpush' if to_first is True else 'rpush'
keys = [self.key_queue_bucket, self.key_queue, push_to]
return self.redis.eval(self.__lua_push(force), len(keys),
*(keys + elements))
def __lua_push(self, force=False):
if force:
return """
local elements = {}
for i=1, #ARGV do
redis.call('SADD', KEYS[1], ARGV[i])
table.insert(elements, ARGV[i])
end
for i=1, #elements do
redis.call(KEYS[3], KEYS[2], elements[i])
end
return elements
"""
return """
local elements = {}
for i=1, #ARGV do
if redis.call('SADD', KEYS[1], ARGV[i]) == 1 then
table.insert(elements, ARGV[i])
end
end
for i=1, #elements do
redis.call(KEYS[3], KEYS[2], elements[i])
end
return elements
"""
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/auto_apply_changelog_entry_pending.py
|
<filename>red_githubbot/routers/auto_apply_changelog_entry_pending.py<gh_stars>0
from gidgethub import sansio
from .. import utils
from . import gh_router
@gh_router.register("pull_request", action="closed")
async def auto_apply_changelog_entry_pending(event: sansio.Event) -> None:
pr_data = event.data["pull_request"]
if not pr_data["merged"] or pr_data["base"]["ref"] != "V3/develop":
return
for label in pr_data["labels"]:
if label["name"].startswith("Changelog Entry: "):
return
installation_id = event.data["installation"]["id"]
gh = await utils.get_gh_client(installation_id)
await gh.post(f"{pr_data['issue_url']}/labels", data=["Changelog Entry: Pending"])
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/tasks.py
|
<reponame>Cog-Creators/Red-GitHubBot
import logging
import os
import subprocess
from aiohttp import web
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from . import utils
from .constants import FORK_REPO, GIT_EMAIL, GIT_NAME, REPO_NAME, UPSTREAM_REPO
_log = logging.getLogger(__name__)
# consider adding kwarg:
# job_defaults={"misfire_grace_time": None}
scheduler = AsyncIOScheduler()
async def on_startup(app: web.Application) -> None:
_prepare_red_git_repo()
# https://help.heroku.com/ZKNTJQSK
database_url = os.environ["DATABASE_URL"]
if database_url.startswith("postgres://"):
database_url = database_url.replace("postgres://", "postgresql://", 1)
scheduler.add_jobstore("sqlalchemy", alias="default", url=database_url)
scheduler.add_jobstore("memory", alias="memory")
scheduler.start()
def _prepare_git() -> None:
subprocess.check_output(("git", "config", "--global", "user.name", GIT_NAME))
subprocess.check_output(("git", "config", "--global", "user.email", GIT_EMAIL))
def _prepare_red_git_repo() -> None:
_log.info(f"Setting up {REPO_NAME} repository...")
if REPO_NAME in os.listdir("."):
os.chdir(f"./{REPO_NAME}")
_log.info("%s directory already exists.", REPO_NAME)
return
_prepare_git()
subprocess.check_output(
(
"git",
"clone",
f"https://{utils.machine_gh.oauth_token}:x-oauth-basic@github.com/{FORK_REPO}",
)
)
os.chdir(f"./{REPO_NAME}")
subprocess.check_output(
("git", "remote", "add", "upstream", f"https://github.com/{UPSTREAM_REPO}")
)
_log.info("Finished setting up %s repository.", REPO_NAME)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/backport_pr.py
|
import asyncio
import logging
from cherry_picker import cherry_picker
from gidgethub import sansio
from .. import utils
from ..constants import MAINTENANCE_BRANCHES, UPSTREAM_REPO, UPSTREAM_USERNAME
from . import gh_router
log = logging.getLogger(__name__)
CHERRY_PICKER_CONFIG = {
"team": UPSTREAM_USERNAME,
"repo": "Red-DiscordBot",
"check_sha": "6251c585e4ec0a53813a9993ede3ab5309024579",
"fix_commit_msg": False,
"default_branch": "V3/develop",
}
@gh_router.register("pull_request", action="closed")
@gh_router.register("pull_request", action="labeled")
async def backport_pr(event: sansio.Event) -> None:
if not event.data["pull_request"]["merged"]:
return
installation_id = event.data["installation"]["id"]
gh = await utils.get_gh_client(installation_id)
pr_number = event.data["pull_request"]["number"]
sender = event.data["sender"]["login"]
head_sha = event.data["pull_request"]["head"]["sha"]
commit_hash = event.data["pull_request"]["merge_commit_sha"]
pr_labels = []
if event.data["action"] == "labeled":
pr_labels = [event.data["label"]]
else:
gh_issue = await gh.getitem(
event.data["repository"]["issues_url"],
{"number": f"{event.data['pull_request']['number']}"},
)
pr_labels = await gh.getitem(gh_issue["labels_url"])
unsupported_branches = []
branches = []
for label in pr_labels:
if label["name"].startswith("Needs Backport To"):
branch = label["name"].rsplit(maxsplit=1)[1]
if branch not in MAINTENANCE_BRANCHES:
unsupported_branches.append(branch)
continue
branches.append(branch)
if unsupported_branches:
log.warning(
"Seen a Needs Backport label with unsupported branches (%s)",
", ".join(unsupported_branches),
)
await utils.leave_comment(
gh,
pr_number,
f"Sorry @{sender}, {'some of' if branches else ''} the branches you want to backport"
f" to ({', '.join(unsupported_branches)}) seem to not be maintenance branches."
" Please consider reporting this to Red-GitHubBot's issue tracker"
" and backport using [cherry_picker](https://pypi.org/project/cherry-picker/)"
" on command line.\n"
"```\n"
f"cherry_picker {commit_hash} <branches...>\n"
"```",
)
if branches:
check_run_id = await utils.post_check_run(
gh,
name=f"Backport to {branch}",
head_sha=head_sha,
status=utils.CheckRunStatus.IN_PROGRESS,
)
sorted_branches = sorted(
branches, reverse=True, key=lambda v: tuple(map(int, v.split(".")))
)
for branch in sorted_branches:
try:
utils.add_job(
backport_task,
installation_id=installation_id,
commit_hash=commit_hash,
branch=branch,
pr_number=pr_number,
sender=sender,
check_run_id=check_run_id,
)
except utils.DB_ERRORS as exc:
await utils.leave_comment(
gh,
pr_number,
f"I'm having trouble backporting to `{branch}`.\n"
f"Reason '`{exc}`'.\n"
f"Please retry by removing and re-adding the"
f" `Needs Backport To {branch}` label.",
)
async def backport_task(
*,
installation_id: int,
commit_hash: str,
branch: str,
pr_number: int,
sender: str,
check_run_id: int,
) -> None:
async with utils.git_lock:
gh = await utils.get_gh_client(installation_id)
try:
cp = await asyncio.to_thread(backport, commit_hash=commit_hash, branch=branch)
except cherry_picker.BranchCheckoutException:
summary = (
f"Sorry @{sender}, I had trouble checking out the `{branch}` backport branch."
" Please backport using [cherry_picker](https://pypi.org/project/cherry-picker/)"
" on command line.\n"
"```\n"
f"cherry_picker {commit_hash} {branch}\n"
"```"
)
conclusion = utils.CheckRunConclusion.FAILURE
output = utils.CheckRunOutput(
title="Failed to backport due to checkout failure.", summary=summary
)
await utils.leave_comment(gh, pr_number, summary)
except cherry_picker.CherryPickException:
summary = (
f"Sorry, @{sender}, I could not cleanly backport this to `{branch}`"
" due to a conflict."
" Please backport using [cherry_picker](https://pypi.org/project/cherry-picker/)"
" on command line.\n"
"```\n"
f"cherry_picker {commit_hash} {branch}\n"
"```"
)
conclusion = utils.CheckRunConclusion.FAILURE
output = utils.CheckRunOutput(
title="Failed to backport due to a conflict.", summary=summary
)
await utils.leave_comment(gh, pr_number, summary)
except Exception:
summary = (
f"Sorry, @{sender}, I'm having trouble backporting this to `{branch}`.\n"
f"Please retry by removing and re-adding the **Needs Backport To {branch}** label."
"If this issue persist, please report this to Red-GitHubBot's issue tracker"
" and backport using [cherry_picker](https://pypi.org/project/cherry-picker/)"
" on command line.\n"
"```\n"
f"cherry_picker {commit_hash} {branch}\n"
"```",
)
conclusion = utils.CheckRunConclusion.FAILURE
output = utils.CheckRunOutput(
title="Failed to backport due to an unexpected error.", summary=summary
)
await utils.leave_comment(gh, pr_number, summary)
await utils.patch_check_run(
gh,
check_run_id=check_run_id,
conclusion=utils.CheckRunConclusion.FAILURE,
output=output,
)
raise
else:
conclusion = utils.CheckRunConclusion.SUCCESS
output = utils.CheckRunOutput(
title=f"Backport PR (#{cp.pr_number}) created.",
summary=(
f"#{cp.pr_number} is a backport of this pull request to"
f" [Red {branch}](https://github.com/{UPSTREAM_REPO}/tree/{branch})."
),
details_url=f"https://github.com/{UPSTREAM_REPO}/pull/{cp.pr_number}",
)
await utils.patch_check_run(
gh, check_run_id=check_run_id, conclusion=conclusion, output=output
)
def backport(*, commit_hash: str, branch: str) -> cherry_picker.CherryPicker:
cp = _get_cherry_picker(commit_hash=commit_hash, branch=branch)
try:
cp.backport()
except cherry_picker.BranchCheckoutException:
# We need to set the state to BACKPORT_PAUSED so that CherryPicker allows us to
# abort it, switch back to the default branch, and clean up the backport branch.
#
# Ideally, we would be able to do it in a less-hacky way but that will require some changes
# in the upstream, so for now this is probably the best we can do here.
cp.initial_state = cherry_picker.WORKFLOW_STATES.BACKPORT_PAUSED
cp.abort_cherry_pick()
raise
except cherry_picker.CherryPickException:
# We need to get a new CherryPicker here to get an up-to-date (PAUSED) state.
cp = _get_cherry_picker(commit_hash=commit_hash, branch=branch)
cp.abort_cherry_pick()
raise
return cp
def _get_cherry_picker(*, commit_hash: str, branch: str) -> cherry_picker.CherryPicker:
return cherry_picker.CherryPicker(
pr_remote="origin",
commit_sha1=commit_hash,
branches=[branch],
config=CHERRY_PICKER_CONFIG,
)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/__init__.py
|
import importlib
import pkgutil
from gidgethub import routing
DISABLED_ROUTERS = {"fix_committed_and_released"}
gh_router = routing.Router()
# import all submodules in order to fill `gh_router` with all the routes
for loader, module_name, is_pkg in pkgutil.iter_modules(__path__, __name__ + "."):
_, _, router_name = module_name.rpartition(".")
if router_name not in DISABLED_ROUTERS:
importlib.import_module(module_name)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/maintenance_branch_actions.py
|
import logging
import re
from typing import Any
from gidgethub import sansio
from .. import utils
from ..constants import MAINTENANCE_BRANCHES, UPSTREAM_REPO
from . import gh_router
log = logging.getLogger(__name__)
TITLE_RE = re.compile(r"\s*\[(?P<branch>\d+\.\d+)\].+\(#(?P<pr_number>\d+)\)")
MAINTENANCE_BRANCH_TITLE_RE = re.compile(
r"^\s*\[(?P<branch>\d+\.\d+)\].+?(\(#(?P<pr_number>\d+)\))?\s*$"
)
CHECK_RUN_NAME = "Verify title of PR to maintenance branch"
@gh_router.register("pull_request", action="opened")
@gh_router.register("pull_request", action="edited")
async def handle_backport_prs(event: sansio.Event) -> None:
"""
Handle backport and its original PR.
This will remove a "Needs Backport To" label from the original PR
and comment on it about this backport.
It will also update the backport with labels from the original PR.
"""
if event.data["action"] == "edited" and "title" not in event.data["changes"]:
return
pr_data = event.data["pull_request"]
installation_id = event.data["installation"]["id"]
title = utils.normalize_title(pr_data["title"], pr_data["body"])
if (match := TITLE_RE.match(title)) is None:
return
gh = await utils.get_gh_client(installation_id)
branch = match.group("branch")
original_pr_number = match.group("pr_number")
original_pr_data = await gh.getitem(
event.data["repository"]["issues_url"], {"number": original_pr_number}
)
await _remove_backport_label(
gh,
original_pr_data=original_pr_data,
branch=branch,
backport_pr_number=event.data["number"],
)
await utils.copy_over_labels(
gh, source_issue_data=original_pr_data, target_issue_number=event.data["number"]
)
async def _remove_backport_label(
gh: utils.GitHubAPI,
*,
original_pr_data: dict[str, Any],
branch: str,
backport_pr_number: int,
) -> None:
"""
Remove the appropriate "Needs Backport To" label on the original PR.
Also leave a comment on the original PR referencing the backport PR.
"""
backport_label = f"Needs Backport To {branch}"
if not any(label_data["name"] == backport_label for label_data in original_pr_data["labels"]):
return
await gh.delete(original_pr_data["labels_url"], {"name": backport_label})
message = (
f"#{backport_pr_number} is a backport of this pull request to"
f" [Red {branch}](https://github.com/{UPSTREAM_REPO}/tree/{branch})."
)
await gh.post(original_pr_data["comments_url"], data={"body": message})
@gh_router.register("pull_request", action="opened")
@gh_router.register("pull_request", action="reopened")
@gh_router.register("pull_request", action="edited")
@gh_router.register("pull_request", action="synchronize")
@gh_router.register("check_run", action="rerequested")
async def validate_maintenance_branch_pr(event: sansio.Event) -> None:
"""
Check the PR title for maintenance branch pull requests.
If the PR was made against maintenance branch, and the title does not
match the maintenance branch PR pattern, then post a failure status.
The maintenance branch PR has to start with `[X.Y]`
"""
if event.event == "pull_request":
if event.data["action"] == "edited" and "title" not in event.data["changes"]:
return
installation_id = event.data["installation"]["id"]
gh = await utils.get_gh_client(installation_id)
pr_data, head_sha = await utils.get_pr_data_for_check_run(
gh, event=event, check_run_name=CHECK_RUN_NAME, get_pr_data=True
)
if pr_data is None:
return
base_branch = pr_data["base"]["ref"]
if base_branch not in MAINTENANCE_BRANCHES:
return
title = utils.normalize_title(pr_data["title"], pr_data["body"])
match = MAINTENANCE_BRANCH_TITLE_RE.match(title)
original_pr_number = match and match.group("pr_number")
if match is None:
conclusion = utils.CheckRunConclusion.FAILURE
title = f"[{base_branch}] {title}"
output = utils.CheckRunOutput(
title="PR title is not prefixed with the branch's name.",
summary=(
"Title of a PR made to a maintenance branch must be prefixed"
f" with the branch's name, for example:\n```\n{title}\n```"
),
)
elif match.group("branch") != base_branch:
conclusion = utils.CheckRunConclusion.FAILURE
title = f"[{base_branch}] " + title.replace(f"[{match.group('branch')}] ", "", 1)
output = utils.CheckRunOutput(
title="PR title is prefixed with incorrect branch's name.",
summary=(
"Title of a PR made to a maintenance branch must be prefixed"
f" with the branch's name, for example:\n```\n{title}\n```"
),
)
else:
conclusion = utils.CheckRunConclusion.SUCCESS
output = utils.CheckRunOutput(
title="PR title is prefixed with maintenance branch's name.",
summary="Title of a PR has a proper prefix.",
)
if original_pr_number is None:
output.summary += (
"\n\n"
"Note: If this is a backport of a different PR,"
" you should also include the original PR number, for example:\n"
f"```\n{title} (#123)\n```"
)
if conclusion is utils.CheckRunConclusion.SUCCESS:
conclusion = utils.CheckRunConclusion.NEUTRAL
output.title = f"{output.title[:-1]}, but it does not include original PR number."
await utils.post_check_run(
gh, name=CHECK_RUN_NAME, head_sha=head_sha, conclusion=conclusion, output=output
)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/auto_delete_pr_branch.py
|
import contextlib
import gidgethub
from gidgethub import sansio
from .. import utils
from ..constants import FORK_REPO, MACHINE_USERNAME
from . import gh_router
@gh_router.register("pull_request", action="closed")
async def auto_delete_pr_branch(event: sansio.Event) -> None:
pr_data = event.data["pull_request"]
if pr_data["user"]["login"] == MACHINE_USERNAME:
branch_name = pr_data["head"]["ref"]
branch_url = f"/repos/{FORK_REPO}/git/refs/heads/{branch_name}"
if pr_data["merged"]:
with contextlib.suppress(gidgethub.InvalidField):
await utils.machine_gh.delete(branch_url)
else:
# this is delayed to ensure that the bot doesn't remove the branch
# if PR was closed and reopened to rerun checks (or similar)
utils.run_job_in(
60, maybe_delete_pr_branch, pr_url=pr_data["url"], branch_url=branch_url
)
async def maybe_delete_pr_branch(*, pr_url: str, branch_url: str) -> None:
pr_data = await utils.machine_gh.getitem(pr_url)
if pr_data["state"] == "closed":
with contextlib.suppress(gidgethub.InvalidField):
await utils.machine_gh.delete(branch_url)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/fix_committed_and_released.py
|
<gh_stars>0
import logging
import re
from typing import Any
import graphql_builder
from gidgethub import sansio
from .. import utils
from ..constants import REPO_NAME, UPSTREAM_REPO, UPSTREAM_USERNAME
from . import gh_router
log = logging.getLogger(__name__)
GET_CLOSED_EVENT_QUERY = utils.minify_graphql_call(
"""
query getClosedEvent($owner: String! $name: String! $issue_number: Int!) {
repository(owner: $owner name: $name) {
issue(number: $issue_number) {
timelineItems(itemTypes: [CLOSED_EVENT] last: 1) {
nodes {
... on ClosedEvent {
closer {
__typename
}
}
}
}
}
}
}
"""
)
MAINTENANCE_BRANCH_TITLE_RE = re.compile(
r"^\s*\[(?P<branch>\d+\.\d+)\].+?(?:\(#(?P<pr_number>\d+)\))"
)
class GetClosedIssues(graphql_builder.OperationBuilder):
OPERATION_TYPE = graphql_builder.OperationType.QUERY
MAX_COST = 100
class Repository(graphql_builder.NestableFieldBuilder):
TEMPLATE = """
repository(owner: ${owner} name: ${name}) {
${nested_call}
}
"""
class FromCommit(graphql_builder.FieldBuilder):
TEMPLATE = """
commit_${commit_oid:literal}: object(oid: ${commit_oid}) {
... on Commit {
associatedPullRequests(first: 1) {
nodes {
number
closingIssuesReferences(last: 10) {
nodes {
id
number
closed
labels(last: 100) {
nodes {
name
}
}
timelineItems(itemTypes: [CLOSED_EVENT] last: 1) {
nodes {
... on ClosedEvent {
closer {
... on PullRequest {
id
number
}
}
}
}
}
}
}
}
}
}
}
"""
class FromPR(graphql_builder.FieldBuilder):
TEMPLATE = """
pr_${pr_number:literal}: pullRequest(number: ${pr_number}) {
number
closingIssuesReferences(last: 10) {
nodes {
id
number
closed
labels(last: 100) {
nodes {
name
}
}
timelineItems(itemTypes: [CLOSED_EVENT] last: 1) {
nodes {
... on ClosedEvent {
closer {
... on PullRequest {
id
number
}
}
}
}
}
}
}
}
"""
class AddAndRemoveLabels(graphql_builder.OperationBuilder):
OPERATION_TYPE = graphql_builder.OperationType.MUTATION
MAX_COST = 50
class Mutation(graphql_builder.FieldBuilder):
COST = 2
TEMPLATE = """
add${unique_id}: addLabelsToLabelable(
input: {
labelIds: ${labels_to_add}
labelableId: ${labelable_id}
}
) {
clientMutationId
}
remove${unique_id}: removeLabelsFromLabelable(
input: {
labelIds: ${labels_to_remove}
labelableId: ${labelable_id}
}
) {
clientMutationId
}
"""
@gh_router.register("issue", action="closed")
async def apply_resolution_if_closed_by_pr_or_commit(event: sansio.Event) -> None:
"""
Apply resolution label automatically on the issues that were closed by a PR.
"""
issue_data = event.data["issue"]
for label_data in issue_data["labels"]:
if label_data["name"].startswith("Resolution: "):
return
installation_id = event.data["installation"]["id"]
gh = await utils.get_gh_client(installation_id)
if await _has_closer(gh, issue_number=issue_data["number"]):
await gh.post(issue_data["labels_url"], data=["Resolution: Fix Committed"])
async def _has_closer(gh: utils.GitHubAPI, *, issue_number: int) -> bool:
data = await gh.graphql(
GET_CLOSED_EVENT_QUERY, owner=UPSTREAM_USERNAME, name=REPO_NAME, issue_number=issue_number
)
return data["repository"]["issue"]["timelineItems"]["nodes"][0]["closer"] is not None
@gh_router.register("workflow", action="completed")
async def apply_resolution_merged_on_release(event: sansio.Event) -> None:
workflow_data = event.data["workflow"]
if workflow_data["path"] != ".github/workflows/publish_release.yml":
return
if workflow_data["conclusion"] != "success":
return
workflow_run_data = event.data["workflow_run"]
tag_name = workflow_run_data["head_branch"]
if tag_name is None:
log.error("No tag name found for workflow run with ID: %s", workflow_run_data["id"])
return
utils.add_job(apply_resolution_merged_on_release_task, event=event, tag_name=tag_name)
async def apply_resolution_merged_on_release_task(*, event: sansio.Event, tag_name: str) -> None:
backport_commits, commits = await _get_git_commits(tag_name)
builder = _get_builder(backport_commits, commits)
installation_id = event.data["installation"]["id"]
gh = await utils.get_gh_client(installation_id)
label_builder = await _fetch_issues_resolved_by_release(gh, tag_name=tag_name, builder=builder)
await _update_resolution_labels(gh, tag_name=tag_name, label_builder=label_builder)
@utils.async_with_context(utils.git_lock)
async def _get_git_commits(tag_name: str) -> tuple[set[str], list[tuple[str, str]]]:
await utils.check_call("git", "fetch", "upstream")
previous_tag = await utils.check_output(
"git", "describe", "--abbrev=0", "--tags", f"{tag_name}~"
)
rev_range = f"{previous_tag}..{tag_name}"
backport_commits: set[str] = set(
(
await utils.check_output(
"git", "log", "--first-parent", "--format=%H", rev_range, "^V3/develop"
)
).splitlines()
)
commits: list[tuple[str, str]] = [
tuple(line.split(maxsplit=1)) # type: ignore[misc]
for line in (
await utils.check_output("git", "log", "--first-parent", "--format=%H %s", rev_range)
).splitlines()
]
return backport_commits, commits
def _get_builder(backport_commits: set[str], commits: list[tuple[str, str]]) -> GetClosedIssues:
operation_builder = GetClosedIssues()
repo_builder = operation_builder.Repository(owner=UPSTREAM_USERNAME, name=REPO_NAME)
for commit_oid, commit_header in commits:
if commit_oid not in backport_commits:
repo_builder.FromCommit(commit_oid=commit_oid)
continue
if match := MAINTENANCE_BRANCH_TITLE_RE.match(commit_header):
repo_builder.FromPR(pr_number=int(match.group("pr_number")))
else:
repo_builder.FromCommit(commit_oid=commit_oid)
return operation_builder
async def _get_label_ids(gh: utils.GitHubAPI) -> dict[str, list[str]]:
labels = {}
labels_url = f"/repos/{UPSTREAM_REPO}/labels{{/name}}"
labels["labels_to_add"] = [
(await gh.getitem(labels_url, {"name": "Resolution: Fix Released"}))["node_id"]
]
labels["labels_to_remove"] = [
(await gh.getitem(labels_url, {"name": "Resolution: Fix Committed"}))["node_id"]
]
return labels
async def _fetch_issues_resolved_by_release(
gh: utils.GitHubAPI, *, tag_name: str, builder: GetClosedIssues
) -> AddAndRemoveLabels:
issue_numbers_to_label: list[int] = []
label_ids = await _get_label_ids(gh)
label_builder = AddAndRemoveLabels()
for call in builder.iter_calls():
data = await gh.graphql(call)
repository_data = data["repository"]
for key, inner_data in repository_data.items():
if key.startswith("commit_"):
associated_prs = inner_data["associatedPullRequests"]["nodes"]
if not associated_prs:
continue
associated_pr_data = associated_prs[0]
else:
associated_pr_data = inner_data
for issue_data in _get_valid_closing_issue_refs(associated_pr_data):
issue_numbers_to_label.append(issue_data["number"])
label_builder.Mutation.append(labelable_id=issue_data["id"], **label_ids)
log.info(
"Finished fetching issues resolved by release %s:\n%r", tag_name, issue_numbers_to_label
)
return label_builder
def _get_valid_closing_issue_refs(associated_pr_data: dict[str, Any]) -> list[dict[str, Any]]:
valid_issues = []
closing_issue_refs = associated_pr_data["closingIssuesReferences"]["nodes"]
for issue_data in closing_issue_refs:
if not issue_data["closed"]:
log.info(
"Issue %s (related to PR %s) is not closed. Skipping...",
issue_data["number"],
associated_pr_data["number"],
)
continue
closer_data = issue_data["timelineItems"]["nodes"][0]["closer"]
if closer_data is None:
log.info(
"Issue %s (related to PR %s) was not closed by a PR. Skipping...",
issue_data["number"],
associated_pr_data["number"],
)
elif "number" not in closer_data:
log.info(
"Issue %s (related to PR %s) was closed by a commit, not a PR. Skipping...",
issue_data["number"],
associated_pr_data["number"],
)
elif closer_data["number"] != associated_pr_data["number"]:
log.info(
"Issue %s (related to PR %s) was closed by a different PR (%s). Skipping...",
issue_data["number"],
associated_pr_data["number"],
closer_data["number"],
)
elif _has_resolution_fix_committed(issue_data, associated_pr_data):
valid_issues.append(issue_data)
return valid_issues
def _has_resolution_fix_committed(
issue_data: dict[str, Any], associated_pr_data: dict[str, Any]
) -> bool:
resolution = None
for label_data in issue_data["labels"]["nodes"]:
if label_data["name"].startswith("Resolution: "):
resolution = label_data["name"]
break
else:
if resolution is not None:
log.info(
"Issue %s (related to PR %s) has a different resolution already, skipping...",
issue_data["number"],
associated_pr_data["number"],
)
else:
log.info(
"Issue %s (related to PR %s) does not have any resolution, skipping...",
issue_data["number"],
associated_pr_data["number"],
)
return False
if resolution == "Resolution: Fix Committed":
return True
else:
log.info(
"Issue %s (related to PR %s) is not closed, skipping...",
issue_data["number"],
associated_pr_data["number"],
)
return False
async def _update_resolution_labels(
gh: utils.GitHubAPI, *, tag_name: str, label_builder: AddAndRemoveLabels
) -> None:
for call in label_builder.iter_calls():
await gh.graphql(call)
log.info("Labels of all issues resolved by release %s have been updated.", tag_name)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/issue_parser/actions.py
|
<filename>red_githubbot/issue_parser/actions.py
ACTIONS = {
"close": (
"close",
"closes",
"closed",
"fix",
"fixes",
"fixed",
"resolve",
"resolves",
"resolved",
),
}
KEYWORDS = {
keyword: action for action, keyword_list in ACTIONS.items() for keyword in keyword_list
}
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/constants.py
|
<reponame>Cog-Creators/Red-GitHubBot
MACHINE_USERNAME = "Red-GitHubBot"
UPSTREAM_USERNAME = "Cog-Creators"
REQUESTER = f"{UPSTREAM_USERNAME}/Red-GitHubBot"
REPO_NAME = "Red-DiscordBot"
FORK_REPO = f"{MACHINE_USERNAME}/{REPO_NAME}"
UPSTREAM_REPO = f"{UPSTREAM_USERNAME}/{REPO_NAME}"
GIT_NAME = MACHINE_USERNAME
GIT_EMAIL = "87398303+red-githubbot[bot]@<EMAIL>"
MAINTENANCE_BRANCHES = {"3.4"}
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/__main__.py
|
import logging
import os
from aiohttp import web
from .web import app as web_app
log = logging.getLogger("red_githubbot")
def main() -> None:
if _sentry_dsn := os.environ.get("SENTRY_DSN"):
# pylint: disable=import-outside-toplevel
import sentry_sdk
from sentry_sdk.integrations.pure_eval import PureEvalIntegration
sentry_sdk.init(
_sentry_dsn,
release=os.environ["HEROKU_SLUG_COMMIT"],
traces_sample_rate=0.1,
integrations=[PureEvalIntegration()],
)
logging.basicConfig(
format="[{levelname}] {name}: {message}",
style="{",
level=logging.INFO,
)
port = int(os.environ.get("PORT", 8080))
# in aiohttp 4.0, we will need to pass `loop` kwarg here
web.run_app(web_app, port=port)
if __name__ == "__main__":
main()
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/blocked_labels_check.py
|
<gh_stars>0
from gidgethub import sansio
from .. import utils
from . import gh_router
CHECK_RUN_NAME = "Blocked status"
@gh_router.register("pull_request", action="opened")
@gh_router.register("pull_request", action="reopened")
@gh_router.register("pull_request", action="synchronize")
@gh_router.register("pull_request", action="labeled")
@gh_router.register("pull_request", action="unlabeled")
@gh_router.register("check_run", action="rerequested")
async def check_for_blocked_labels(event: sansio.Event) -> None:
installation_id = event.data["installation"]["id"]
gh = await utils.get_gh_client(installation_id)
pr_data, head_sha = await utils.get_pr_data_for_check_run(
gh, event=event, check_run_name=CHECK_RUN_NAME
)
if pr_data is None or pr_data["merged"]:
return
blocked_labels = [
label_data
for label_data in pr_data["labels"]
if label_data["name"] == "Blocked" or label_data["name"].startswith("Blocked By: ")
]
if blocked_labels:
conclusion = utils.CheckRunConclusion.FAILURE
summary = "The PR is labeled with these Blocked labels:\n" + "\n".join(
f"- {label_data['name']} - {label_data['description'] or 'No description'}"
for label_data in blocked_labels
)
output = utils.CheckRunOutput(
title=(
"PR is blocked by something, see labels and PR description for more information."
),
summary=summary,
)
else:
conclusion = utils.CheckRunConclusion.SUCCESS
output = utils.CheckRunOutput(
title="PR is not blocked by anything.",
summary="The PR is not labeled with any Blocked labels.",
)
await utils.post_check_run(
gh, name=CHECK_RUN_NAME, head_sha=head_sha, conclusion=conclusion, output=output
)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/issue_parser/__init__.py
|
<reponame>Cog-Creators/Red-GitHubBot
from ._parser import parse_issue_body
__all__ = ("parse_issue_body",)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/routers/keep_up_to_date_application_ids.py
|
<gh_stars>0
import logging
from gidgethub import sansio
from .. import utils
from . import gh_router
log = logging.getLogger(__name__)
@gh_router.register("installation", action="created")
@gh_router.register("installation", action="deleted")
async def update_installation_id_cache(event: sansio.Event) -> None:
installation_data = event.data["installation"]
login = installation_data["account"]["login"].lower()
if event.data["action"] == "created":
utils.gh_installation_id_cache[login] = installation_data["id"]
else:
utils.gh_installation_id_cache.pop(login, None)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/web.py
|
import asyncio
import logging
import os
import gidgethub
from aiohttp import web
from gidgethub import sansio
from . import tasks, utils
from .constants import UPSTREAM_REPO
from .routers import gh_router
log = logging.getLogger(__name__)
routes = web.RouteTableDef()
@routes.get("/")
async def hello(_request: web.Request) -> web.Response:
# maybe one day there will be some front-facing UI, you can never know...
return web.Response(text="Hello, world")
@routes.post("/webhook")
async def webhook(request: web.Request) -> web.Response:
try:
body = await request.read()
secret = os.environ["GH_WEBHOOK_SECRET"]
try:
event = sansio.Event.from_http(request.headers, body, secret=secret)
except gidgethub.ValidationFailure as exc:
log.info("GH webhook failed secret validation: %s", exc)
return web.Response(status=401, text=str(exc))
except gidgethub.BadRequest as exc:
log.info("GH webhook received a bad request (%d): %s", exc.status_code, exc)
return web.Response(status=exc.status_code.value, text=str(exc))
log.info("GH delivery ID: %s", event.delivery_id)
if event.event == "ping":
return web.Response(status=200)
# We don't want to handle events received from the bot's fork
repo_full_name = event.data.get("repository", {}).get("full_name")
if repo_full_name is not None and repo_full_name != UPSTREAM_REPO:
return web.Response(status=200)
# Give GitHub some time to reach internal consistency.
await asyncio.sleep(1)
await gh_router.dispatch(event)
return web.Response(status=200)
except Exception as exc:
log.error("The app did not handle an exception", exc_info=exc)
return web.Response(status=500)
async def on_startup(app: web.Application) -> None:
await tasks.on_startup(app)
async def on_cleanup(app: web.Application) -> None:
await utils.session.close()
app = web.Application()
app.add_routes(routes)
app.on_startup.append(on_startup)
app.on_cleanup.append(on_cleanup)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/issue_parser/_regexes.py
|
import regex
from .actions import KEYWORDS
# partial regex pattern strings
_KEYWORD_PATTERN = rf"""
(?:
(?P<keyword_name>{'|'.join(map(regex.escape, KEYWORDS))})
# the issue reference needs to be delimited from the keyword by
# any amount of whitespace and optionally one colon in it
(?:\ |\t)*
(?:\ |\t|:)
(?:\ |\t)*
)
"""
_AUTO_REF_PATTERN = rf"""
# match (optional) keyword
{_KEYWORD_PATTERN}?
# match issue prefix
(?:
# match GitHub issue/pull URL
#
# domain is optional on purpose as GitHub autolinks without the domain too
(?:https://github\.com/)?
(?P<slug>[\w\-\.]+/[\w\-\.]+)/
(?:issues|pull)/
|
# match autolinked reference with an optional repo name
(?P<slug>[\w\-\.]+/[\w\-\.]+)?
(?:\#|gh-)
)
# match issue number
(?P<issue_number>\d+)
# ensure the match doesn't end with a word character
(?!\w)
"""
_MENTION_PATTERN = r"@(?P<mention>[\w\-\.]+)"
# Pattern objects
TEXT_RE = regex.compile(
rf"""
# ensure the match doesn't start with a word character
(?:[^\w\n\v\r]|^)
(?:
{_AUTO_REF_PATTERN}
|
{_MENTION_PATTERN}
)
""",
regex.IGNORECASE | regex.MULTILINE | regex.VERBOSE,
)
KEYWORD_RE = regex.compile(
rf"""
# ensure the match doesn't start with a word character
(?:[^\w\n\v\r]|^)
{_KEYWORD_PATTERN}
$
""",
regex.IGNORECASE | regex.VERBOSE,
)
ISSUE_URL_RE = regex.compile(
r"""
^
(?:https://github\.com/)
(?P<slug>[\w\-\.]+/[\w\-\.]+)/
(?:issues|pull)/
(?P<issue_number>\d+)
(?!\w)
""",
regex.IGNORECASE | regex.VERBOSE,
)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/issue_parser/_parser.py
|
<reponame>Cog-Creators/Red-GitHubBot
from typing import Any, Optional
import regex
from ..utils import parse_markdown
from ._regexes import ISSUE_URL_RE, KEYWORD_RE, TEXT_RE
from .actions import ACTIONS, KEYWORDS
from .wrappers import ParsedIssue, ParsedIssueAction, ParsedIssueMention, ParsedIssueRef
def parse_issue_body(body: str) -> ParsedIssue:
issue = ParsedIssue(ACTIONS.keys())
_parse_children(issue, parse_markdown(body))
return issue
def _parse_children(issue: ParsedIssue, nodes: list[dict[str, Any]]) -> None:
for idx, node in enumerate(nodes):
node_type = node["type"]
if node_type in ("codespan", "inline_html", "block_code", "block_html"):
continue
if node_type == "link":
_parse_link(issue, previous_node=nodes[idx] if idx else None, node=node)
continue
if text := node.get("text"):
_parse_text(issue, text)
if children := node.get("children"):
_parse_children(issue, children)
def _parse_text(issue: ParsedIssue, text: str) -> None:
for match in TEXT_RE.finditer(text):
username = match.group("mention")
if username is not None:
fragment = ParsedIssueMention(username=username)
issue.fragments.append(fragment)
issue.mentions.append(fragment)
continue
_append_parsed_ref(issue, match=match, keyword_name=match.group("keyword_name"))
def _parse_link(
issue: ParsedIssue, *, previous_node: Optional[dict[str, Any]], node: dict[str, Any]
) -> None:
match = ISSUE_URL_RE.match(node["link"])
if match is None:
return
keyword_name = None
if previous_node is not None and previous_node["type"] == "text":
keyword_match = KEYWORD_RE.search(previous_node["text"])
keyword_name = keyword_match and keyword_match.group("keyword_name")
_append_parsed_ref(issue, match=match, keyword_name=keyword_name)
def _append_parsed_ref(
issue: ParsedIssue, *, match: regex.Match[str], keyword_name: Optional[str]
) -> None:
issue_number = int(match.group("issue_number"))
slug = match.group("slug")
if keyword_name is None:
ref = ParsedIssueRef(slug=slug, issue_number=issue_number)
issue.refs.append(ref)
else:
action = KEYWORDS[keyword_name.lower()]
ref = ParsedIssueAction(slug=slug, issue_number=issue_number, action=action)
issue.actions[action].append(ref)
issue.fragments.append(ref)
issue.refs_and_actions.append(ref)
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/issue_parser/wrappers.py
|
<gh_stars>0
from collections.abc import Iterable
from dataclasses import InitVar, dataclass, field
from typing import Optional
@dataclass
class ParsedIssueFragment:
pass
@dataclass
class ParsedIssueRef(ParsedIssueFragment):
slug: Optional[str]
issue_number: int
@dataclass
class ParsedIssueAction(ParsedIssueRef):
action: str
@dataclass
class ParsedIssueMention(ParsedIssueFragment):
username: str
@dataclass
class ParsedIssue:
action_names: InitVar[Iterable[str]]
actions: dict[str, list[ParsedIssueAction]] = field(default_factory=dict)
refs: list[ParsedIssueRef] = field(default_factory=list)
refs_and_actions: list[ParsedIssueRef] = field(default_factory=list, repr=False)
mentions: list[ParsedIssueMention] = field(default_factory=list)
fragments: list[ParsedIssueFragment] = field(default_factory=list, repr=False)
def __post_init__(self, action_names: tuple[str]) -> None:
for action_name in action_names:
self.actions.setdefault(action_name, [])
|
Cog-Creators/Red-GitHubBot
|
red_githubbot/utils.py
|
import asyncio
import dataclasses
import datetime
import enum
import functools
import logging
import os
import subprocess
from collections.abc import Callable, Coroutine, Mapping, MutableMapping
from contextlib import AbstractAsyncContextManager, AbstractContextManager
from typing import Any, Optional, TypeVar
import aiohttp
import cachetools
import mistune
import sqlalchemy.exc
from apscheduler.job import Job
from apscheduler.triggers.interval import IntervalTrigger
from gidgethub import aiohttp as gh_aiohttp, apps, sansio
from typing_extensions import ParamSpec
from . import tasks
from .constants import MACHINE_USERNAME, REQUESTER, UPSTREAM_REPO
log = logging.getLogger(__name__)
DB_ERRORS = (sqlalchemy.exc.OperationalError,)
git_lock = asyncio.Lock()
session = aiohttp.ClientSession()
_gh_cache: MutableMapping[Any, Any] = cachetools.LRUCache(maxsize=500)
_gh_installation_tokens_cache: MutableMapping[int, str] = cachetools.TTLCache(
maxsize=100, ttl=55 * 60
)
gh_installation_id_cache: MutableMapping[str, int] = cachetools.LRUCache(100)
parse_markdown = mistune.create_markdown(
renderer="ast", plugins=("strikethrough", "table", "task_lists")
)
class GitHubAPI(gh_aiohttp.GitHubAPI):
def __init__(self, client_name: str, *, oauth_token: Optional[str] = None) -> None:
"""
GitHub API client that logs current rate limit status after each request.
This class requires the developer to pass a client name for inclusion of it in logs.
"""
self.client_name = client_name
super().__init__(session, REQUESTER, cache=_gh_cache, oauth_token=oauth_token)
async def _request(
self, method: str, url: str, headers: Mapping[str, str], body: bytes = b""
) -> tuple[int, Mapping[str, str], bytes]:
ret = await super()._request(method, url, headers, body)
rate_limit = sansio.RateLimit.from_http(headers)
if rate_limit is None:
log.info(
"Processing GitHub API response...\n"
" - Client name: %s\n"
" - Request Method: %s\n"
" - Request URL: %s",
self.client_name,
method,
url,
)
else:
log.info(
"Processing GitHub API response...\n"
" - Client Name: %s\n"
" - Request Method: %s\n"
" - Request URL: %s\n"
" - Rate Limit Points Remaining: %d/%d\n"
" - Rate Limit Resets At: %s",
self.client_name,
method,
url,
rate_limit.remaining,
rate_limit.limit,
rate_limit.reset_datetime,
)
return ret
machine_gh = GitHubAPI(
f"{MACHINE_USERNAME} (Machine account)", oauth_token=os.environ.get("GH_AUTH")
)
class CheckRunStatus(enum.Enum):
QUEUED = "queued"
IN_PROGRESS = "in_progress"
COMPLETED = "completed"
class CheckRunConclusion(enum.Enum):
ACTION_REQUIRED = "action_required"
CANCELLED = "cancelled"
FAILURE = "failure"
NEUTRAL = "neutral"
SUCCESS = "success"
SKIPPED = "skipped"
TIMED_OUT = "timed_out"
def _noneless_dict_factory(result: list[tuple[str, Any]]) -> dict[str, Any]:
return dict((key, value) for key, value in result if value is not None)
@dataclasses.dataclass
class CheckRunOutput:
title: str
summary: str
text: Optional[str] = None
# Output can also contain `annotations` and `images` but they can always be added in the future
def to_dict(self) -> dict[str, Any]:
return dataclasses.asdict(self, dict_factory=_noneless_dict_factory)
async def get_gh_client(
installation_id: Optional[int] = None, *, slug: str = UPSTREAM_REPO
) -> GitHubAPI:
if installation_id is None:
installation_id = await get_installation_id_by_repo(slug)
return GitHubAPI(
f"Installation {installation_id}",
oauth_token=await get_installation_access_token(installation_id),
)
async def get_installation_id_by_repo(slug: str, *, force_refresh: bool = False) -> int:
owner = slug.split("/", maxsplit=1)[0].lower()
if force_refresh:
jwt = apps.get_jwt(
app_id=os.environ["GH_APP_ID"], private_key=os.environ["GH_PRIVATE_KEY"]
)
installation_data = await machine_gh.getitem(f"/repos/{slug}/installation", jwt=jwt)
installation_id = installation_data["id"]
gh_installation_id_cache[owner] = installation_id
return installation_id
try:
return gh_installation_id_cache[owner]
except KeyError:
return await get_installation_id_by_repo(slug, force_refresh=True)
async def get_installation_access_token(
installation_id: int, *, force_refresh: bool = False
) -> str:
if force_refresh:
token_data = await apps.get_installation_access_token(
machine_gh,
installation_id=str(installation_id),
app_id=os.environ["GH_APP_ID"],
private_key=os.environ["GH_PRIVATE_KEY"],
)
token = token_data["token"]
_gh_installation_tokens_cache[installation_id] = token
return token
try:
return _gh_installation_tokens_cache[installation_id]
except KeyError:
return await get_installation_access_token(installation_id, force_refresh=True)
async def leave_comment(gh: GitHubAPI, issue_number: int, body: str) -> None:
issue_comment_url = f"/repos/{UPSTREAM_REPO}/issues/{issue_number}/comments"
data = {"body": body}
await gh.post(issue_comment_url, data=data)
async def post_check_run(
gh: GitHubAPI,
*,
name: str,
head_sha: str,
status: Optional[CheckRunStatus] = None,
conclusion: Optional[CheckRunConclusion] = None,
details_url: Optional[str] = None,
output: Optional[CheckRunOutput] = None,
) -> int:
check_run_url = f"/repos/{UPSTREAM_REPO}/check-runs"
data: dict[str, Any] = {"name": name, "head_sha": head_sha}
if status is not None:
if conclusion is not None and status is not CheckRunStatus.COMPLETED:
raise RuntimeError("`status` needs to be `COMPLETED` when `conclusion` is provided.")
data["status"] = status.value
if conclusion is not None:
data["conclusion"] = conclusion.value
if details_url is not None:
data["details_url"] = details_url
if output is not None:
data["output"] = output.to_dict()
return (await gh.post(check_run_url, data=data))["id"]
async def patch_check_run(
gh: GitHubAPI,
*,
check_run_id: int,
status: Optional[CheckRunStatus] = None,
conclusion: Optional[CheckRunConclusion] = None,
details_url: Optional[str] = None,
output: Optional[CheckRunOutput] = None,
) -> None:
check_run_updates_url = f"/repos/{UPSTREAM_REPO}/check-runs/{check_run_id}"
data = {}
if status is not None:
if conclusion is not None and status is not CheckRunStatus.COMPLETED:
raise RuntimeError("`status` needs to be `COMPLETED` when `conclusion` is provided.")
data["status"] = status.value
if conclusion is not None:
data["conclusion"] = conclusion.value
if details_url is not None:
data["details_url"] = details_url
if output is not None:
data["output"] = output.to_dict()
await gh.patch(check_run_updates_url, data=data)
async def get_open_pr_for_commit(
gh: GitHubAPI, sha: str, *, get_pr_data: bool = False
) -> Optional[dict[str, Any]]:
"""
Get the most recently updated open PR associated with the given commit.
This is needed for `check_run` hooks because GitHub does not provide associated PR
when the PR is made from a fork.
Note: This is like getting a PR from the issues endpoint
so some PR attributes might be missing.
To get full PR data, you need to set the `get_pr_data` kwarg to `True`.
"""
search_results = await gh.getitem(
"/search/issues{?q,sort}",
{"q": f"type:pr repo:{UPSTREAM_REPO} sha:{sha} is:open", "sort": "updated"},
)
if search_results["total_count"] > 0:
if search_results["total_count"] > 1:
log.warning(
"Found more than one possible candidate when searching for an open PR"
" associated with the commit `%s`. Choosing the most recently updated one...",
sha,
)
issue_data = search_results["items"][0]
if get_pr_data:
return await gh.getitem(issue_data["pull_request"]["url"])
return issue_data
return None
async def get_pr_data_for_check_run(
gh: GitHubAPI,
*,
event: sansio.Event,
check_run_name: str,
get_pr_data: bool = False,
) -> tuple[Optional[dict[str, Any]], str]:
if event.event == "pull_request":
pr_data = event.data["pull_request"]
head_sha = pr_data["head"]["sha"]
else:
check_run_data = event.data["check_run"]
head_sha = check_run_data["head_sha"]
if check_run_data["name"] != check_run_name:
return None, head_sha
pull_requests = check_run_data["pull_requests"]
if len(pull_requests) > 1:
# if this happens, I want this on Sentry
log.error(
"Check run with ID %s was rerequested but multiple PRs were found:\n%r",
check_run_data["id"],
pull_requests,
)
return None, head_sha
elif pull_requests:
pr_data = pull_requests[0]
else:
pr_data = await get_open_pr_for_commit(gh, head_sha, get_pr_data=get_pr_data)
if pr_data is None:
log.error(
"Could not find an open PR for the rerequested check run with ID %s",
check_run_data["id"],
)
return None, head_sha
return pr_data, head_sha
async def copy_over_labels(
gh: GitHubAPI,
*,
source_issue_data: dict[str, Any],
target_issue_number: int,
copyable_labels_prefixes: tuple[str] = (
"Type: ",
"Release Blocker",
"High Priority",
"Breaking Change",
),
) -> None:
"""Copy over relevant labels from one issue/PR to another."""
labels = [
label_data["name"]
for label_data in source_issue_data["labels"]
if label_data["name"].startswith(copyable_labels_prefixes)
]
if labels:
labels_url = f"/repos/{UPSTREAM_REPO}/issues/{target_issue_number}/labels"
await gh.post(labels_url, data=labels)
def minify_graphql_call(call: str) -> str:
"""
Minify GraphQL call.
Right now this just strips leading whitespace from all lines
which is enough to reduce size by ~50%.
"""
return "\n".join(line.lstrip() for line in call.strip().splitlines())
def normalize_title(title: str, body: str) -> str:
"""Normalize the title if it spills over into the PR's body."""
if not (title.endswith("…") and body.startswith("…")):
return title
else:
return title[:-1] + body[1:].partition("\n")[0].rstrip("\r")
_P = ParamSpec("_P")
def add_job(func: Callable[_P, Any], *args: _P.args, **kwargs: _P.kwargs) -> Job:
return tasks.scheduler.add_job(func, args=args, kwargs=kwargs)
def run_job_in(seconds: int, func: Callable[_P, Any], *args: _P.args, **kwargs: _P.kwargs) -> Job:
td = datetime.timedelta(seconds=seconds)
return tasks.scheduler.add_job(
func, "date", run_date=datetime.datetime.now() + td, args=args, kwargs=kwargs
)
_NoArgsCallableT = TypeVar("_NoArgsCallableT", bound=Callable[[], Any])
def interval_job(
job_id: Optional[str] = None,
*,
weeks: int = 0,
days: int = 0,
hours: int = 0,
minutes: int = 0,
seconds: int = 0,
) -> Callable[[_NoArgsCallableT], Any]:
def decorator(func: _NoArgsCallableT) -> _NoArgsCallableT:
nonlocal job_id
if job_id is None:
module_name = getattr(func, "__module__", None)
job_id = func.__name__
if module_name is not None:
job_id = f"{module_name}.{job_id}"
tasks.scheduler.add_job(
func,
IntervalTrigger(
weeks=weeks,
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
),
id=job_id,
jobstore="memory",
replace_existing=True,
)
return func
return decorator
async def call(program: str, *args: str) -> int:
process = await asyncio.create_subprocess_exec(program, *args)
return await process.wait()
async def check_call(program: str, *args: str) -> None:
process = await asyncio.create_subprocess_exec(program, *args)
await process.wait()
if process.returncode:
raise subprocess.CalledProcessError(process.returncode, (program, *args))
async def check_output(program: str, *args: str) -> str:
process = await asyncio.create_subprocess_exec(program, *args, stdout=asyncio.subprocess.PIPE)
stdout_data, stderr_data = await process.communicate()
stdout = stdout_data.decode().strip()
stderr = stderr_data.decode().strip() if stderr_data is not None else None
if process.returncode:
raise subprocess.CalledProcessError(process.returncode, (program, *args), stdout, stderr)
return stdout
_T = TypeVar("_T")
def async_with_context(
context_manager: AbstractAsyncContextManager,
) -> Callable[[Callable[_P, Coroutine[Any, Any, _T]]], Callable[_P, Coroutine[Any, Any, _T]]]:
def deco(func: Callable[_P, Coroutine[Any, Any, _T]]) -> Callable[_P, Coroutine[Any, Any, _T]]:
@functools.wraps(func)
async def inner(*args: _P.args, **kwargs: _P.kwargs) -> _T:
async with context_manager:
return await func(*args, **kwargs)
return inner
return deco
def with_context(
context_manager: AbstractContextManager,
) -> Callable[[Callable[_P, _T]], Callable[_P, _T]]:
def deco(func: Callable[_P, _T]) -> Callable[_P, _T]:
@functools.wraps(func)
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _T:
with context_manager:
return func(*args, **kwargs)
return inner
return deco
|
uxcn/x2x
|
setup.py
|
from distutils.core import setup
setup(
url = 'https://github.com/uxcn/x2x',
name = 'x2x',
version = '0.9',
fullname = 'x2x',
description = 'commands to convert radixes',
long_description = '''
x2x
Commands to convert radixes.
* x2b - convert to binary
* x2o - convert to octal
* x2d - convert to decimal
* x2h - convert to heaxadecimal
* x2x - convert any radix to any radix
Installing
----------
PyPI
::
pip install x2x
From source
::
python setup.py install
Versions
--------
0.9 (Jan, 2016)
* first release
''',
classifiers = ['Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: System :: Shells',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: MIT License'],
author = '<NAME>',
maintainer = '<NAME>',
author_email = '<EMAIL>',
maintainer_email = '<EMAIL>',
keywords = 'radix base convert binary octal decimal hexadecimal',
license = 'MIT',
scripts = ['x2x', 'x2b', 'x2o', 'x2d', 'x2h'],
)
|
syed-ahmed/tensorflow
|
tensorflow/contrib/estimator/python/estimator/multi_head_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.contrib.estimator.python.estimator import multi_head as multi_head_lib
from tensorflow.core.framework import summary_pb2
from tensorflow.python.estimator import model_fn
from tensorflow.python.estimator.canned import metric_keys
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
def _sigmoid(logits):
return 1 / (1 + np.exp(-logits))
class MultiHeadTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def test_no_heads(self):
with self.assertRaisesRegexp(
ValueError, r'Must specify heads\. Given: \[\]'):
multi_head_lib.multi_head(heads=[])
def test_head_name_missing(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3)
with self.assertRaisesRegexp(
ValueError, r'All given heads must have name specified\.'):
multi_head_lib.multi_head([head1, head2])
def test_head_weights_wrong_size(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
with self.assertRaisesRegexp(
ValueError,
r'heads and head_weights must have the same size\. '
r'Given len\(heads\): 2. Given len\(head_weights\): 1\.'):
multi_head_lib.multi_head([head1, head2], head_weights=[1.])
def test_name(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
self.assertEqual('head1_head2', multi_head.name)
def test_predict_two_heads_logits_dict(self):
"""Tests predict with logits as dict."""
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = {
'head1': np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32),
'head2': np.array([[2., -2., 2.], [-3., 2., -2.]], dtype=np.float32)
}
expected_probabilities = {
'head1': _sigmoid(logits['head1']),
'head2': _sigmoid(logits['head2']),
}
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'predict', 'head1', 'classification/head1',
'predict/head1', 'head2', 'classification/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
logits['head1'],
predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
logits['head2'],
predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_probabilities['head1'],
predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head2'],
predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs['head1'].scores))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(spec.export_outputs['head2'].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(
spec.export_outputs['predict'].outputs['head1/probabilities']))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(
spec.export_outputs['predict'].outputs['head2/probabilities']))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(
spec.export_outputs['predict/head1'].outputs['probabilities']))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(
spec.export_outputs['predict/head2'].outputs['probabilities']))
def test_predict_two_heads_logits_tensor(self):
"""Tests predict with logits as Tensor."""
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[-1., 1., 2., -2., 2.], [-1.5, 1., -3., 2., -2.]], dtype=np.float32)
expected_logits1 = np.array([[-1., 1.], [-1.5, 1.]], dtype=np.float32)
expected_logits2 = np.array([[2., -2., 2.], [-3., 2., -2.]],
dtype=np.float32)
expected_probabilities = {
'head1': _sigmoid(expected_logits1),
'head2': _sigmoid(expected_logits2),
}
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'predict', 'head1', 'classification/head1',
'predict/head1', 'head2', 'classification/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
expected_logits1,
predictions[('head1', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_logits2,
predictions[('head2', prediction_keys.PredictionKeys.LOGITS)])
self.assertAllClose(
expected_probabilities['head1'],
predictions[('head1', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head2'],
predictions[('head2', prediction_keys.PredictionKeys.PROBABILITIES)])
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].scores))
self.assertAllClose(
expected_probabilities['head1'],
sess.run(spec.export_outputs['head1'].scores))
self.assertAllClose(
expected_probabilities['head2'],
sess.run(spec.export_outputs['head2'].scores))
def test_predict_two_heads_logits_tensor_multi_dim(self):
"""Tests predict with multi-dimensional logits of shape [2, 2, 5]."""
head1 = head_lib.regression_head(label_dimension=2, name='head1')
head2 = head_lib.regression_head(label_dimension=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
[[-1.5, 1., -3., 2., -2.], [-1.5, 1., -3., 2., -2.]]],
dtype=np.float32)
expected_logits1 = np.array(
[[[-1., 1.], [-1., 1.]],
[[-1.5, 1.], [-1.5, 1.]]],
dtype=np.float32)
expected_logits2 = np.array(
[[[2., -2., 2.], [2., -2., 2.]],
[[-3., 2., -2.], [-3., 2., -2.]]],
dtype=np.float32)
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.PREDICT,
logits=logits)
self.assertItemsEqual(
(_DEFAULT_SERVING_KEY, 'predict', 'head1', 'regression/head1',
'predict/head1', 'head2', 'regression/head2', 'predict/head2'),
spec.export_outputs.keys())
# Assert predictions and export_outputs.
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
predictions = sess.run(spec.predictions)
self.assertAllClose(
expected_logits1,
predictions[('head1', prediction_keys.PredictionKeys.PREDICTIONS)])
self.assertAllClose(
expected_logits2,
predictions[('head2', prediction_keys.PredictionKeys.PREDICTIONS)])
self.assertAllClose(
expected_logits1,
sess.run(spec.export_outputs[_DEFAULT_SERVING_KEY].value))
self.assertAllClose(
expected_logits1,
sess.run(spec.export_outputs['head1'].value))
self.assertAllClose(
expected_logits2,
sess.run(spec.export_outputs['head2'].value))
def test_eval_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# loss = ( (10 + 10) / 2 + (15 + 0) / 2 ) / 2 = 8.75
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# loss = ( (20 + 20 + 20) / 3 + (30 + 0 + 0) / 3 ) / 2 = 15
expected_loss_head1 = 8.75
expected_loss_head2 = 15.
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.EVAL,
logits=logits,
labels=labels)
keys = metric_keys.MetricKeys
expected_metrics = {
keys.LOSS + '/head1': expected_loss_head1,
keys.LOSS + '/head2': expected_loss_head2,
# Average loss over examples.
keys.LOSS_MEAN + '/head1': expected_loss_head1,
keys.LOSS_MEAN + '/head2': expected_loss_head2,
# auc and auc_pr cannot be reliably calculated for only 4-6 samples, but
# this assert tests that the algorithm remains consistent.
keys.AUC + '/head1': 0.1667,
keys.AUC + '/head2': 0.3333,
keys.AUC_PR + '/head1': 0.6667,
keys.AUC_PR + '/head2': 0.5000,
}
# Assert spec contains expected tensors.
self.assertIsNotNone(spec.loss)
self.assertItemsEqual(expected_metrics.keys(), spec.eval_metric_ops.keys())
self.assertIsNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, and metrics.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNone(spec.scaffold.summary_op)
value_ops = {k: spec.eval_metric_ops[k][0] for k in spec.eval_metric_ops}
update_ops = {k: spec.eval_metric_ops[k][1] for k in spec.eval_metric_ops}
loss, metrics = sess.run((spec.loss, update_ops))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
# Check results of both update (in `metrics`) and value ops.
self.assertAllClose(expected_metrics, metrics, rtol=tol, atol=tol)
self.assertAllClose(
expected_metrics, {k: value_ops[k].eval() for k in value_ops},
rtol=tol,
atol=tol)
def test_train_create_loss_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
loss = multi_head.create_loss(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
tol = 1e-3
with self.test_session():
# Unreduced loss of the head is [[(10 + 10) / 2], (15 + 0) / 2]
# (averaged over classes, averaged over examples).
self.assertAllClose(8.75, loss.eval(), rtol=tol, atol=tol)
def test_train_create_loss_two_heads_with_weights(self):
# Use different example weighting for each head weighting.
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
training_loss, unreduced_losses, weights, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# training_loss = (1 * 10 + 2 * 7.5) / 2 = 12.5
# head-weighted unreduced_loss = 1 * [10, 7.5]
self.assertAllClose(
[[10.], [7.5]], unreduced_losses['head1'].eval(), rtol=tol, atol=tol)
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# training_loss = (2 * 20 + 3 * 10) / 2 = 35
# head-weighted unreduced_loss = 2 * [20, 10]
self.assertAllClose(
[[40.], [20.]], unreduced_losses['head2'].eval(), rtol=tol, atol=tol)
# head-weighted training_loss = 1 * 12.5 + 2 * 35 = 82.5
self.assertAllClose(82.5, training_loss.eval(), rtol=tol, atol=tol)
# head-weighted example weights
self.assertAllClose(
[[1.], [2.]], weights['head1'].eval(), rtol=tol, atol=tol)
self.assertAllClose(
[[4.], [6.]], weights['head2'].eval(), rtol=tol, atol=tol)
def test_train_create_loss_logits_tensor(self):
"""Tests create_loss with logits Tensor."""
weights1 = np.array([[1.], [2.]], dtype=np.float32)
weights2 = np.array([[2.], [3.]])
head1 = head_lib.multi_label_head(n_classes=2, name='head1',
weight_column='weights1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2',
weight_column='weights2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = np.array([[-10., 10., 20., -20., 20.],
[-15., 10., -30., 20., -20.]], dtype=np.float32)
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
training_loss, unreduced_losses, weights, _ = multi_head.create_loss(
features={
'x': np.array(((42,),), dtype=np.int32),
'weights1': weights1,
'weights2': weights2
},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)
tol = 1e-3
with self.test_session():
# loss of the first head is [[(10 + 10) / 2], [(15 + 0) / 2]]
# = [10, 7.5]
# training_loss = (1 * 10 + 2 * 7.5) / 2 = 12.5
# head-weighted unreduced_loss = 1 * [10, 7.5]
self.assertAllClose(
[[10.], [7.5]], unreduced_losses['head1'].eval(), rtol=tol, atol=tol)
# loss of the second head is [[(20 + 20 + 20) / 3], [(30 + 0 + 0) / 3]]
# = [20, 10]
# training_loss = (2 * 20 + 3 * 10) / 2 = 35
# head-weighted unreduced_loss = 2 * [20, 10]
self.assertAllClose(
[[40.], [20.]], unreduced_losses['head2'].eval(), rtol=tol, atol=tol)
# head-weighted training_loss = 1 * 12.5 + 2 * 35 = 82.5
self.assertAllClose(82.5, training_loss.eval(), rtol=tol, atol=tol)
# head-weighted example weights
self.assertAllClose(
[[1.], [2.]], weights['head1'].eval(), rtol=tol, atol=tol)
self.assertAllClose(
[[4.], [6.]], weights['head2'].eval(), rtol=tol, atol=tol)
def test_train_create_loss_logits_tensor_multi_dim(self):
"""Tests create_loss with multi-dimensional logits of shape [2, 2, 5]."""
head1 = head_lib.regression_head(label_dimension=2, name='head1')
head2 = head_lib.regression_head(label_dimension=3, name='head2')
multi_head = multi_head_lib.multi_head([head1, head2])
logits = np.array(
[[[-1., 1., 2., -2., 2.], [-1., 1., 2., -2., 2.]],
[[-1.5, 1.5, -2., 2., -2.], [-1.5, 1.5, -2., 2., -2.]]],
dtype=np.float32)
labels = {
'head1': np.array([[[1., 0.], [1., 0.]],
[[1.5, 1.5], [1.5, 1.5]]], dtype=np.float32),
'head2': np.array([[[0., 1., 0.], [0., 1., 0.]],
[[2., 2., 0.], [2., 2., 0.]]], dtype=np.float32),
}
# Loss for the first head:
# loss1 = ((1+1)^2 + (0-1)^2 + (1+1)^2 + (0-1)^2 +
# (1.5+1.5)^2 + (1.5-1.5)^2 + (1.5+1.5)^2 + (1.5-1.5)^2) / 8
# = 3.5
# Loss for the second head:
# loss2 = ((0-2)^2 + (1+2)^2 + (0-2)^2 + (0-2)^2 + (1+2)^2 + (0-2)^2 +
# (2+2)^2 + (2-2)^2 + (0+2)^2 + (2+2)^2 + (2-2)^2 + (0+2)^2) / 12
# = 6.167
expected_training_loss = 3.5 + 6.167
training_loss = multi_head.create_loss(
features={},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels)[0]
tol = 1e-3
with self.test_session():
self.assertAllClose(
expected_training_loss, training_loss.eval(), rtol=tol, atol=tol)
def test_train_one_head(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# loss = ( (10 + 10) / 2 + (15 + 0) / 2 ) / 2 = 8.75
expected_loss = 8.75
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS + '/head1': expected_loss,
}, summary_str, tol)
def test_train_one_head_with_optimizer(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
multi_head = multi_head_lib.multi_head([head1])
logits = {'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32)}
labels = {'head1': np.array([[1, 0], [1, 1]], dtype=np.int64)}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# expected_unweighted_loss = [[10., 10.], [15., 0.]]
# loss = ( (10 + 10) / 2 + (15 + 0) / 2 ) / 2 = 8.75
expected_loss = 8.75
expected_train_result = 'my_train_op'
class _Optimizer(object):
def minimize(self, loss, global_step):
del global_step
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
optimizer=_Optimizer())
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
loss, train_result = sess.run((spec.loss, spec.train_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
def test_train_two_heads_with_weights(self):
head1 = head_lib.multi_label_head(n_classes=2, name='head1')
head2 = head_lib.multi_label_head(n_classes=3, name='head2')
multi_head = multi_head_lib.multi_head(
[head1, head2], head_weights=[1., 2.])
logits = {
'head1': np.array([[-10., 10.], [-15., 10.]], dtype=np.float32),
'head2': np.array([[20., -20., 20.], [-30., 20., -20.]],
dtype=np.float32),
}
labels = {
'head1': np.array([[1, 0], [1, 1]], dtype=np.int64),
'head2': np.array([[0, 1, 0], [1, 1, 0]], dtype=np.int64),
}
# For large logits, sigmoid cross entropy loss is approximated as:
# loss = labels * (logits < 0) * (-logits) +
# (1 - labels) * (logits > 0) * logits =>
# head1: expected_unweighted_loss = [[10., 10.], [15., 0.]]
# loss = ( (10 + 10) / 2 + (15 + 0) / 2 ) / 2 = 8.75
# head2: expected_unweighted_loss = [[20., 20., 20.], [30., 0., 0]]
# loss = ( (20 + 20 + 20) / 3 + (30 + 0 + 0) / 3 ) / 2 = 15
# Average over classes, weighted sum over batch and heads.
expected_loss_head1 = 8.75
expected_loss_head2 = 15.0
expected_loss = 1. * expected_loss_head1 + 2. * expected_loss_head2
expected_train_result = 'my_train_op'
def _train_op_fn(loss):
return string_ops.string_join(
[constant_op.constant(expected_train_result),
string_ops.as_string(loss, precision=3)])
spec = multi_head.create_estimator_spec(
features={'x': np.array(((42,),), dtype=np.int32)},
mode=model_fn.ModeKeys.TRAIN,
logits=logits,
labels=labels,
train_op_fn=_train_op_fn)
self.assertIsNotNone(spec.loss)
self.assertEqual({}, spec.eval_metric_ops)
self.assertIsNotNone(spec.train_op)
self.assertIsNone(spec.export_outputs)
_assert_no_hooks(self, spec)
# Assert predictions, loss, train_op, and summaries.
tol = 1e-3
with self.test_session() as sess:
_initialize_variables(self, spec.scaffold)
self.assertIsNotNone(spec.scaffold.summary_op)
loss, train_result, summary_str = sess.run((spec.loss, spec.train_op,
spec.scaffold.summary_op))
self.assertAllClose(expected_loss, loss, rtol=tol, atol=tol)
self.assertEqual(
six.b('{0:s}{1:.3f}'.format(expected_train_result, expected_loss)),
train_result)
_assert_simple_summaries(self, {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS + '/head1': expected_loss_head1,
metric_keys.MetricKeys.LOSS + '/head2': expected_loss_head2,
}, summary_str, tol)
if __name__ == '__main__':
test.main()
|
imonursahin/python-simple-student-management-system
|
obs.py
|
import time
import cagir
print("""
==============================================
= =
= Öğrenci Bilgi Sistemi - Akademisyen Girişi =
= (k_adi: onur - sifre: 1903) =
= =
==============================================
""")
ogr_kullanici = "onur"
ogr_sifre = "1903"
giris_hakki = 3
while True:
o_adi = input("Kullanıcı Adı :")
o_sifre = input("Parola :")
if (giris_hakki == 0 ):
print("Giriş Hakkınız Bitti... Daha sonra tekrar deneyin.")
break
elif (o_adi != ogr_kullanici and o_sifre == ogr_sifre):
print("Bilgiler Sorgulanıyor...")
time.sleep(1)
print("Kullanıcı Adı Hatalı...")
giris_hakki -= 1
print("Giriş Hakkı: ", giris_hakki)
elif (o_adi == ogr_kullanici and o_sifre != ogr_sifre):
print("Bilgiler Sorgulanıyor...")
time.sleep(1)
print("Parola Hatalı...")
giris_hakki -= 1
print("Giriş Hakkı: ", giris_hakki)
elif (o_adi != ogr_kullanici and o_sifre != ogr_sifre):
print("Bilgiler Sorgulanıyor...")
time.sleep(1)
print("Kullanıcı Adı ve Parola Hatalı...")
giris_hakki -= 1
print("Giriş Hakkı: ", giris_hakki)
else:
print("Bilgiler Sorgulanıyor...")
time.sleep(1)
print("Başarıyla Sisteme Giriş Yaptınız...Lütfen Bekleyin.")
time.sleep(1)
while True:
print("""
[1] Öğrenci Bilgisi
[2] Akademisyen Bilgisi
[0] Çıkış Yap
""")
islem=int(input("Lütfen Yapmak İstediğiniz İşleme Ait Numarayı Girin :"))
if islem==1: # Öğrenci Bilgisi
print("-------Kayıtlı Öğrenciler-----")
print('[Ad: Ali]','[Soyad: Veli]','[Numarası: 1998]','[Bölümü: YBS]')
print('[Ad: Onur]','[Soyad: Şahin]','[Numarası: 2040]','[Bölümü: YBS]')
ogrenciNo=input("İşlem Yapmak İstediğiniz Ogrencinin Numarası :")
cagir.ogrencibilgisi(ogrenciNo,)
elif islem ==2: #Akademisyen Bilgisi
print("")
cagir.akademisyenBilgisi()
elif islem==0:
print("Çıkış Yaptınız")
break
|
imonursahin/python-simple-student-management-system
|
cagir.py
|
import time
class Ogrenci(object):
def __init__(self, isim=None, soyisim=None, numara=None, bölüm=None, vizeNot=None, finalNot=None, topDersSaat=None, devamsızlıkBil=None, danisman=None):
self.isim = isim
self.soyisim = soyisim
self.numara = numara
self.bölüm = bölüm
self.vizeNot = vizeNot
self.finalNot = finalNot
self.topDersSaat = topDersSaat
self.devamsızlıkBil = devamsızlıkBil
def notlar(vize,final):
vizeNotu=vize
finalNotu=final
ortalamaNot=int((0.4*vizeNotu)+(0.6*finalNotu))
if(ortalamaNot>=88):
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: int{}] [Harf Notu: AA] [GEÇTİ]".format(vizeNotu,finalNotu,ortalamaNot))
elif ortalamaNot>=82 and ortalamaNot<88 :
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: {}] [Harf Notu: BA] [GEÇTİ]".format(vizeNotu,finalNotu,ortalamaNot))
elif ortalamaNot >= 76 and ortalamaNot < 82:
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: {}] [Harf Notu: BB] [GEÇTİ]".format(vizeNotu,finalNotu,ortalamaNot))
elif ortalamaNot >= 66 and ortalamaNot < 76:
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: {}] [Harf Notu: CB] [GEÇTİ]".format(vizeNotu,finalNotu,ortalamaNot))
elif ortalamaNot >= 60 and ortalamaNot < 66:
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: {}] [Harf Notu: CC] [GEÇTİ]".format(vizeNotu,finalNotu,ortalamaNot))
elif ortalamaNot >= 55 and ortalamaNot < 60:
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: {}] [Harf Notu: DC] [KOŞULLU GEÇTİ]".format(vizeNotu,finalNotu,ortalamaNot))
elif ortalamaNot >= 45 and ortalamaNot < 55:
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: {}] [Harf Notu: DD] [KOŞULLU GEÇTİ]".format(vizeNotu,finalNotu,ortalamaNot))
else:
print("[Vize Notu: {}] [Final Notu: {}] [Ortalaması: {}] [Harf Notu: FF] [KALDI]".format(vizeNotu,finalNotu,ortalamaNot))
def devamsizlik(haftaDersSaat,devamsızlık):
hafta=haftaDersSaat
devam=devamsızlık
d_hakki = (hafta*70/100)
d_hakki=hafta-d_hakki
if (d_hakki <= devam):
print("[Devamsızlıktan Kaldı]")
else:
print("[Geçti]")
def ogrencibilgisi(ogrenciNo):
ogrenciList=[]
ogrenciList.append(Ogrenci('Ali','Veli','1998','YBS'))
ogrenciList.append(Ogrenci('Onur','Şahin','2040','YBS'))
for ogr in ogrenciList:
if(ogr.numara==ogrenciNo):
print("[Adı: {}]".format(ogr.isim))
print("[Soyadı: {}]".format(ogr.soyisim))
print("[No: {}]".format(ogr.numara))
print("[Bölüm: {}]".format(ogr.bölüm))
print("------Danışman Bilgisi----")
akademisyenBilgisi()
while True:
print("""
[1] Not Giriş
[2] Devamsızlık Giriş
[0] Çıkış Yap
""")
islem=int(input("Lütfen Yapmak İstediğiniz İşleme Ait Numarayı Girin :"))
if islem==1: # Öğrenci Bilgisi
vizeNotu=int(input("Vize: "))
finalNotu=int(input("Final: "))
notlar(vizeNotu,finalNotu)
elif islem==2:
dersSaati=int(input("<NAME>: "))
devamsiz=int(input("Devamsızlık: "))
devamsizlik(dersSaati,devamsiz)
elif islem==0:
print("Çıkış yaptınız")
time.sleep(1)
break
else:
print("Yanlış tuşlama yaptınız")
pass
break
else:
print("Bu numaraya sahip öğrenci bulunmamaktadır.")
break
def akademisyenBilgisi():
print("Adı: Onur")
print("Soyadı: Şahin")
print("Bölümü: YBS")
|
sakurai-youhei/rpsowmi
|
rpsowmi.py
|
<reponame>sakurai-youhei/rpsowmi
'''
Created on 2017/04/06
@author: sakurai
'''
import _winapi # TODO(X): It might be better to use ctypes.windll instead
from base64 import encodebytes
from collections import defaultdict
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor
from contextlib import closing
from enum import IntEnum
from logging import getLogger
from multiprocessing.connection import BUFSIZE
from multiprocessing.connection import PipeConnection
from threading import Timer
from uuid import uuid4
from xml.dom.minidom import parseString
VERSION = (2017, 4, 11)
VERSION_TEXT = ".".join(map(str, VERSION))
__version__ = VERSION_TEXT
__license__ = "MIT"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__all__ = ["RemotePowerShellOverWmi"]
RPSOWMI_PS1 = """\
trap [Exception] {{
Add-Content "{LOGFILE}" -value $error[0].exception
exit {CODE_ON_EXC}
}}
Function Write-Log {{
Param ([string]$Message)
$Stamp = (Get-Date).toString("yyyy-MM-dd HH:mm:ss")
echo "$Stamp $Message" >> "{LOGFILE}"
}}
Write-Log "* NPIPE_HOST: {NPIPE_HOST}"
Write-Log "* NPIPE_IN: {NPIPE_IN}"
Write-Log "* NPIPE_OUT: {NPIPE_OUT}"
Write-Log "* LOGFILE: {LOGFILE}"
Write-Log "* EXEC: {EXEC}"
Write-Log "* TIMEOUT: {TIMEOUT}"
Write-Log "* ENCODING: {ENCODING}"
Write-Log "* CODE_ON_EXC: {CODE_ON_EXC}"
$psi = New-Object System.Diagnostics.ProcessStartInfo
$psi.CreateNoWindow = $true
$psi.LoadUserProfile = $false
$psi.UseShellExecute = $false
$psi.StandardOutputEncoding = {ENCODING}
$psi.StandardErrorEncoding = {ENCODING}
$psi.RedirectStandardInput = $true
$psi.RedirectStandardOutput = $true
$psi.RedirectStandardError = $true
$psi.FileName = "{EXEC}"
$psi.Arguments = "{ARGS}"
Write-Log "[{NPIPE_IN}] Opening"
$npipe_in = New-Object System.IO.Pipes.NamedPipeClientStream(
"{NPIPE_HOST}", "{NPIPE_IN}", [System.IO.Pipes.PipeDirection]::In)
$npipe_in.Connect({TIMEOUT})
Write-Log "[{NPIPE_OUT}] Opening"
$npipe_out = New-Object System.IO.Pipes.NamedPipeClientStream(
"{NPIPE_HOST}", "{NPIPE_OUT}", [System.IO.Pipes.PipeDirection]::Out)
$npipe_out.Connect({TIMEOUT})
$proc = New-Object System.Diagnostics.Process
$proc.StartInfo = $psi
$proc.EnableRaisingEvents = $true
$stdout = New-Object -TypeName System.Text.StringBuilder
$stderr = New-Object -TypeName System.Text.StringBuilder
$action = {{
$line = $Event.SourceEventArgs.Data
if (-not [String]::IsNullOrEmpty($line)) {{
$Event.MessageData.AppendLine($line)
}}
}}
$evt_stdout = Register-ObjectEvent `
-InputObject $proc `
-EventName OutputDataReceived `
-Action $action `
-MessageData $stdout
$evt_stderr = Register-ObjectEvent `
-InputObject $proc `
-EventName ErrorDataReceived `
-Action $action `
-MessageData $stderr
Write-Log "Starting {EXEC}"
$proc.Start()
$proc.BeginOutputReadLine()
$proc.BeginErrorReadLine()
$reader = New-Object System.IO.StreamReader($npipe_in, {ENCODING})
$proc_stdin = New-Object System.IO.StreamWriter(
$proc.StandardInput.BaseStream, {ENCODING})
$proc_stdin.Write($reader.ReadToEnd())
$proc_stdin.Flush()
$proc.StandardInput.Close()
$reader.Close()
Write-Log "[{NPIPE_IN}] Closed"
Write-Log ("Waiting for exit of {EXEC} pid=" + $proc.Id)
if ($proc.WaitForExit({TIMEOUT}) -eq $False) {{
$proc.Kill()
$npipe_out.Close()
throw ("Timeout fired, {TIMEOUT} ms - {EXEC} pid=" + $proc.Id)
}}
Write-Log ("{EXEC} exited with " + $proc.ExitCode)
$proc.CancelOutputRead()
$proc.CancelErrorRead()
Unregister-Event -SourceIdentifier $evt_stdout.Name
Unregister-Event -SourceIdentifier $evt_stderr.Name
$xml = [XML]@'
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE Result [
<!ELEMENT output (#PCDATA)>
<!ATTLIST output id ID #IMPLIED>
]>
<Result>
<output id="code"/>
<output id="stdout"/>
<output id="stderr"/>
</Result>
'@
$xml.SelectSingleNode("/Result/output[@id='code']").InnerText = $proc.ExitCode
$xml.SelectSingleNode("/Result/output[@id='stdout']").InnerText = $stdout
$xml.SelectSingleNode("/Result/output[@id='stderr']").InnerText = $stderr
$writer = New-Object System.IO.StreamWriter($npipe_out, {ENCODING})
$xml.WriteContentTo((New-Object system.xml.XmlTextWriter($writer)))
$writer.Close()
Write-Log "[{NPIPE_OUT}] Closed"
""" # Curly brackets must be {{ and }} on the inside of above PS code.
ResultSet = namedtuple("ResultSet", ["pid", "code", "stdout", "stderr"])
ReturnValues = defaultdict(lambda: (
"Other, see "
"WMI Error Constants - "
"https://msdn.microsoft.com/en-us/library/aa394559.aspx"
", WbemErrorEnum - "
"https://msdn.microsoft.com/en-us/library/aa393978.aspx"
" or System Error Codes - "
"https://msdn.microsoft.com/en-us/library/ms681381.aspx"
"."
))
ReturnValues[0] = "Successful completion"
ReturnValues[2] = "Access denied"
ReturnValues[3] = "Insufficient privilege"
ReturnValues[8] = "Unknown failure"
ReturnValues[9] = "Path not found"
ReturnValues[21] = "Invalid parameter"
class ShowWindow(IntEnum):
"""https://msdn.microsoft.com/en-us/library/aa394375.aspx"""
SW_HIDE = 0
SW_NORMAL = 1
SW_SHOWMINIMIZED = 2
SW_SHOWMAXIMIZED = 3
SW_SHOWNOACTIVATE = 4
SW_SHOW = 5
SW_MINIMIZE = 6
SW_SHOWMINNOACTIVE = 7
SW_SHOWNA = 8
SW_RESTORE = 9
SW_SHOWDEFAULT = 10
SW_FORCEMINIMIZE = 11
class dwOpenMode(IntEnum):
PIPE_ACCESS_DUPLEX = 0x00000003
PIPE_ACCESS_INBOUND = 0x00000001
PIPE_ACCESS_OUTBOUND = 0x00000002
FILE_FLAG_FIRST_PIPE_INSTANCE = 0x00080000
FILE_FLAG_WRITE_THROUGH = 0x80000000
FILE_FLAG_OVERLAPPED = 0x40000000
WRITE_DAC = 0x00040000
WRITE_OWNER = 0x00080000
ACCESS_SYSTEM_SECURITY = 0x01000000
class dwPipeMode(IntEnum):
PIPE_TYPE_BYTE = 0x00000000
PIPE_TYPE_MESSAGE = 0x00000004
PIPE_READMODE_BYTE = 0x00000000
PIPE_READMODE_MESSAGE = 0x00000002
PIPE_WAIT = 0x00000000
PIPE_NOWAIT = 0x00000001
PIPE_ACCEPT_REMOTE_CLIENTS = 0x00000000
PIPE_REJECT_REMOTE_CLIENTS = 0x00000008
class nMaxInstances(IntEnum):
PIPE_UNLIMITED_INSTANCES = 255
class nDefaultTimeOut(IntEnum):
NMPWAIT_USE_DEFAULT_WAIT = 0x00000000
NMPWAIT_WAIT_FOREVER = 0xffffffff
class TimeoutTimer(Timer):
def __init__(self, interval, function, args=None, kwargs=None):
Timer.__init__(self, interval, function, args=args, kwargs=kwargs)
self.setDaemon(True)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.cancel()
def PipeServerConnection(address, readable, writable,
timeout=nDefaultTimeOut.NMPWAIT_WAIT_FOREVER):
open_mode = (
0x00000000
| dwOpenMode.FILE_FLAG_OVERLAPPED
| dwOpenMode.FILE_FLAG_FIRST_PIPE_INSTANCE
| (readable and dwOpenMode.PIPE_ACCESS_INBOUND or 0x00000000)
| (writable and dwOpenMode.PIPE_ACCESS_OUTBOUND or 0x00000000)
)
pipe_mode = (
0x00000000
| (readable and dwPipeMode.PIPE_READMODE_BYTE or 0x00000000)
| (writable and dwPipeMode.PIPE_TYPE_BYTE or 0x00000000)
| dwPipeMode.PIPE_WAIT
)
# https://msdn.microsoft.com/en-US/library/windows/desktop/aa365150.aspx
handle = _winapi.CreateNamedPipe(address, open_mode, pipe_mode, 1,
BUFSIZE, BUFSIZE, timeout, 0x0)
overlapped = _winapi.ConnectNamedPipe(handle, overlapped=True)
if (nDefaultTimeOut.NMPWAIT_USE_DEFAULT_WAIT
< timeout < nDefaultTimeOut.NMPWAIT_WAIT_FOREVER):
timer = TimeoutTimer(timeout / 1000, overlapped.cancel)
else:
timer = TimeoutTimer(0, lambda: None)
with timer:
_, err = overlapped.GetOverlappedResult(True) # Can block forever
assert err == 0
return PipeConnection(handle, readable=readable, writable=writable)
class RemotePowerShellOverWmi(object):
def __init__(self, wmiconn, localhost=".", timeout=60, logfile="$NUL",
code_on_exc=255, logger=getLogger("RPSoWMI")):
"""Enable you to execute PowerShell script on remote host via WMI.
:param wmiconn: Any object behaving like wmi.WMI().
:type wmiconn: win32com.client.Dispatch("WbemScripting.SWbemLocator")
:param localhost: Host name where named pipes are being created. The
host name must be referable from remote host because remote host
accesses to the named pipes using the host name.
:type localhost: str
:param timeout: Timeout seconds of execution. If None is provided,
timeout is not set; i.e. The execution could be blocked forever.
:type timeout: int or float or None
:param logfile: Path to log file to be updated with utf-16le encoding
by remote PowerShell process. By default, no log file is generated
because of '$NUL'.
:type logfile: str
:param code_on_exc: Exit code when wrapper PowerShell code meets
exception such as timeout, IO error, etc.
:type code_on_exc: int
:param logger: Logger to be used for debug logging.
:type logger: logging.Logger
"""
assert all([ # Minimum requirements of wmiconn object
hasattr(wmiconn, "Win32_ProcessStartup"),
hasattr(wmiconn.Win32_ProcessStartup, "new"),
callable(wmiconn.Win32_ProcessStartup.new),
hasattr(wmiconn, "Win32_Process"),
hasattr(wmiconn.Win32_Process, "Create"),
callable(wmiconn.Win32_Process.Create),
]), "Incompatible wmiconn object, %r" % wmiconn
self.wmiconn = wmiconn
self.localhost = localhost
self.timeout = timeout
self.logfile = logfile
self.code_on_exc = code_on_exc
self.logger = logger
self.encoding = "utf-8"
self.ps_cmd = "powershell.exe"
self.ps_opts = "-NonInteractive -NoProfile -NoLogo -encodedCommand"
self.no_stdin = "-InputFormat none"
self.ps_encoding = "[System.Text.Encoding]::UTF8"
self.ps_prepend = (
"[Console]::OutputEncoding = {ENCODING};"
"[Console]::InputEncoding = {ENCODING};"
)
self.npipe_in = r"\\.\pipe\%s" % uuid4()
self.npipe_out = r"\\.\pipe\%s" % uuid4()
@staticmethod
def encode_ps_code(ps_code):
"""Encode PowerShell code into one-line using utf-16le and base64."""
return encodebytes(
ps_code.encode("utf-16le")).decode("ascii").replace("\n", "")
@staticmethod
def parse_ps_result(s):
"""Parse XML formatted output from remote PowerShell execution."""
targets = (("code", int), ("stdout", str), ("stderr", str))
with parseString(s) as dom:
for ident, cast in targets:
try:
yield cast(
dom.getElementById(ident).childNodes[0].nodeValue)
except IndexError:
try:
yield cast("") # empty stdout and stderr are ok.
except ValueError: # empty code is invalid.
RuntimeError(
"Not found valid %s %r" % (ident, cast), s)
@property
def timeout_ms_or_forever(self):
if self.timeout is None or self.timeout < 0:
return nDefaultTimeOut.NMPWAIT_WAIT_FOREVER
else:
return int(self.timeout * 1000)
def _handle_write(self, stdin):
"""Write data connected to STDIN through named pipe.
:param stdin: String to be provided to STDIN.
:type stdin: str
"""
data = stdin.encode(self.encoding)
self.logger.debug("[%s] Opening", self.npipe_in)
with closing(PipeServerConnection(
address=self.npipe_in,
readable=False, writable=True,
timeout=self.timeout_ms_or_forever
)) as pipe:
self.logger.debug("[%s] Established", self.npipe_in)
pipe.send_bytes(data)
self.logger.debug("[%s] Sent %d bytes", self.npipe_in, len(data))
self.logger.debug("[%s] Closed", self.npipe_in)
def _handle_read(self):
"""Read data of exit code, STDOUT and STDERR through named pipe.
:return: XML string containing exit code, STDOUT and STDERR.
:rtype: str
"""
data = b""
self.logger.debug("[%s] Opening", self.npipe_out)
with closing(PipeServerConnection(
address=self.npipe_out,
readable=True, writable=False,
timeout=self.timeout_ms_or_forever
)) as pipe:
self.logger.debug("[%s] Established", self.npipe_out)
while True:
try:
recv = pipe.recv_bytes(BUFSIZE)
self.logger.debug("[%s] Received %d bytes",
self.npipe_out, len(recv))
data += recv
except (BrokenPipeError, EOFError):
break
self.logger.debug("[%s] Closed", self.npipe_out)
return data.decode(self.encoding)
def execute(self, ps_code, stdin=None):
"""Execute PowerShell code through Win32_Process.Create().
TODO(X): Line separators in stdin are transformed to '\n' somewhere
regardless of original formats such as '\r', '\n' or '\r\n'.
TODO(X): '\n' is always appended at the end of stdout and maybe also
stderr.
:param ps_code: PowerShell code to be executed.
:type ps_code: str
:param stdin: String to be provided to PowerShell process.
:type stdin: str
:return: Named tuple of pid, code, stdout and stderr as an execution
result of PowerShell code.
:rtype: rpsowmi.ResultSet
:raises RuntimeError: Process creation fails or remote execution of
wrapper PowerShell code meets exception which may include timeout
on remote host.
:raises concurrent.futures.TimeoutError: Timeout on local host.
"""
ps_code_encoded = self.encode_ps_code(
self.ps_prepend.format(ENCODING=self.ps_encoding) + ps_code)
wrapper = self.encode_ps_code(RPSOWMI_PS1.format(
EXEC=self.ps_cmd,
ARGS=" ".join([self.ps_opts, ps_code_encoded]),
NPIPE_HOST=self.localhost,
NPIPE_IN=self.npipe_in.rsplit("\\", 1)[-1],
NPIPE_OUT=self.npipe_out.rsplit("\\", 1)[-1],
LOGFILE=self.logfile,
ENCODING=self.ps_encoding,
TIMEOUT="" if not self.timeout else int(self.timeout * 1000),
CODE_ON_EXC=self.code_on_exc,
))
cmdline = " ".join([self.ps_cmd, self.no_stdin, self.ps_opts, wrapper])
ps_info = self.wmiconn.Win32_ProcessStartup.new()
ps_info.ShowWindow = ShowWindow.SW_HIDE.value
with ThreadPoolExecutor(2) as pool:
f_write = pool.submit(self._handle_write, stdin=stdin or "")
f_read = pool.submit(self._handle_read)
self.logger.debug("Creating new process with %d bytes command",
len(cmdline))
pid, result = self.wmiconn.Win32_Process.Create(
CommandLine=cmdline, ProcessStartupInformation=ps_info)
if result != 0:
f_write.cancel()
f_read.cancel()
raise RuntimeError(
"Creating new process failed with %d" % result,
"%d - %s" % (result, ReturnValues[result]),
cmdline, repr(self.wmiconn))
else:
f_write.result(timeout=self.timeout)
self.logger.debug("Waiting for result set information from "
"process pid=%d", pid)
r = ResultSet(pid, *self.parse_ps_result(
f_read.result(timeout=self.timeout)
))
if r.code == self.code_on_exc:
raise RuntimeError("Exception is recorded in wrapper code",
r, cmdline, repr(self.wmiconn))
else:
return r
|
sakurai-youhei/rpsowmi
|
setup.py
|
<gh_stars>1-10
from setuptools import setup
import rpsowmi
classifiers = [line.rstrip() for line in """\
License :: OSI Approved :: MIT License
Development Status :: 3 - Alpha
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Operating System :: Microsoft :: Windows
Intended Audience :: Developers
""".splitlines()]
keywords = [line.rstrip() for line in """\
WMI
PowerShell
Remote
""".splitlines()]
with open("README.rst") as fp:
long_description = fp.read()
setup(
version=rpsowmi.__version__,
name=rpsowmi.__name__,
license=rpsowmi.__license__,
url="https://github.com/sakurai-youhei/rpsowmi",
description="Remote PowerShell over WMI (RPSoWMI)",
long_description=long_description,
classifiers=classifiers,
keywords=keywords,
author=rpsowmi.__author__,
author_email=rpsowmi.__email__,
py_modules=[rpsowmi.__name__],
test_suite="test.suite",
)
|
naamaf/hw5_2019
|
hw5.py
|
import pathlib
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from typing import Union, Tuple
class QuestionnaireAnalysis:
"""
Reads and analyzes data generated by the questionnaire experiment.
Should be able to accept strings and pathlib.Path objects.
"""
def __init__(self, data_fname: Union[pathlib.Path, str]):
if not(os.path.exists(data_fname)):
raise ValueError
if type(data_fname) == str:
self.data_fname = pathlib.Path(data_fname)
else:
self.data_fname = data_fname
def read_data(self):
"""
Reads the json data located in self.data_fname into memory, to
the attribute self.data.
"""
self.data = pd.read_json(self.data_fname)
def show_age_distrib(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Calculates and plots the age distribution of the participants.
Returns a tuple containing two numpy arrays:
The first item being the number of people in a given bin.
The second item being the bin edges.
"""
good_data_mask = self.data['age'].notna()
age_values = self.data['age'][good_data_mask]
hist_values, bin_values = np.histogram(age_values, bins = 10, range = (0, 100))
plt.hist(age_values, bins = bin_values)
plt.xlabel('Age')
plt.ylabel('No. of participents')
plt.show()
return hist_values, bin_values
def remove_rows_without_mail(self) -> pd.DataFrame:
"""
Checks self.data for rows with invalid emails, and removes them.
Returns the corrected DataFrame, i.e. the same table but with
the erroneous rows removed and the (ordinal) index after a reset.
"""
good_data_mask = self.data['email'].str.contains('[0-9A-Za-z]+@[0-9A-Za-z]+.[A-Za-z]+')
valid_email_data = self.data[good_data_mask]
new_max_index = len(valid_email_data.index)
valid_email_data.index = np.arange(new_max_index)
return valid_email_data
def fill_na_with_mean(self) -> Union[pd.DataFrame, np.ndarray]:
"""
Finds, in the original DataFrame, the subjects that didn't answer
all questions, and replaces that missing value with the mean of the
other grades for that student. Returns the corrected DataFrame,
as well as the row indices of the students that their new grades
were generated.
"""
number_of_questions = 5 #for flexibility
question_indices = []
for idx in range(number_of_questions):
question_indices.append('q' + str(idx+1))
missing_idx = np.array([])
work_data = self.data.copy()
for question in question_indices:
missing_answers = work_data[question].isna()
missing_idx = np.union1d(missing_idx, missing_answers[missing_answers].index)
mean_scores = np.nanmean(work_data[question_indices][missing_answers], axis = 1)
work_data.loc[missing_answers, question] = mean_scores
return work_data, missing_idx
def correlate_gender_age(self) -> pd.DataFrame:
"""
Looks for a correlation between the gender of the subject, their age
and the score for all five questions.
Returns a DataFrame with a MultiIndex containing the gender and whether
the subject is above 40 years of age, and the average score in each of
the five questions.
"""
number_of_questions = 5 #for flexibility
question_indices = []
for idx in range(number_of_questions):
question_indices.append('q' + str(idx+1))
relevant_columns = ['gender', 'age'] + question_indices
relevant_data = self.data[relevant_columns]
relevant_data = relevant_data.groupby(['gender', relevant_data.age >= 40])
corr_scores = relevant_data.mean()
return corr_scores
if __name__ == '__main__':
QA = QuestionnaireAnalysis(pathlib.Path('data.json'))
QA.read_data()
QA.show_age_distrib()
valid_mail_data = QA.remove_rows_without_mail()
scores_fixed_data, fixed_indices = QA.fill_na_with_mean()
corr_scores = QA.correlate_gender_age()
|
Yugal41735/sample-repoasitory
|
Hello.py
|
print("hello World")
print("This is the sample repository")
|
Lispython/httpecho
|
setup.py
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
empty_server
~~~~~~~~~~~~
HTTP Request & Response service
:copyright: (c) 2014 by <NAME> (<EMAIL>).
:license: BSD, see LICENSE for more details.
"""
import sys
import os
from setuptools import setup, find_packages
try:
readme_content = open(os.path.join(os.path.abspath(
os.path.dirname(__file__)), "README.rst")).read()
except Exception:
exc = sys.exc_info()[1]
# Current exception is 'exc'
print(exc)
readme_content = __doc__
VERSION = "0.0.6"
def run_tests():
from tests import suite
return suite()
py_ver = sys.version_info
#: Python 2.x?
is_py2 = (py_ver[0] == 2)
#: Python 3.x?
is_py3 = (py_ver[0] == 3)
tests_require = [
'nose',
'mock==1.0.1']
install_requires = [
"tornado==2.4.1",
"commandor==0.1.5"]
if not (is_py3 or (is_py2 and py_ver[1] >= 7)):
install_requires.append("importlib==1.0.2")
PACKAGE_DATA = []
PROJECT = 'httpecho'
for folder in ['static', 'templates']:
for root, dirs, files in os.walk(os.path.join(PROJECT, folder)):
for filename in files:
PACKAGE_DATA.append("%s/%s" % (root[len(PROJECT) + 1:], filename))
setup(
name="httpecho",
version=VERSION,
description="HTTP Request & Response service",
long_description=readme_content,
author="<NAME>",
author_email="<EMAIL>",
maintainer="<NAME>",
maintainer_email="<EMAIL>",
url="https://github.com/Lispython/httpecho",
packages=find_packages(),
package_data={'': PACKAGE_DATA},
entry_points={
'console_scripts': [
'httpecho = httpecho.app:main',
]},
install_requires=install_requires,
tests_require=tests_require,
license="BSD",
platforms = ['Linux', 'Mac'],
classifiers=[
"Environment :: Web Environment",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX",
"Topic :: Internet",
"Topic :: Software Development :: Libraries",
],
test_suite = '__main__.run_tests'
)
|
Lispython/httpecho
|
httpecho/app.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
http.app
~~~~~~~~~
Core of HTTP Request & Response service
:copyright: (c) 2011 by <NAME> (<EMAIL>).
:license: BSD, see LICENSE for more details.
:github: http://github.com/Lispython/httpecho
"""
import os
import sys
import time
import tornado.ioloop
import tornado
try:
import urlparse
except ImportError:
# Python3
from urllib import parse as urlparse
try:
from urlparse import parse_qs
parse_qs # placate pyflakes
except ImportError:
try:
# Python3
from urllib.parse import parse_qs
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
from tornado.web import Application
from tornado.options import define, options
from tornado import httpserver
from tornado import autoreload
from tornado.escape import utf8
import logging as logging_module
from tornado.options import _LogFormatter
from logging import StreamHandler
define("port", default=8889, help="run HTTP on the given port", type=int)
define("ssl_port", default=8890, help="run HTTPS on the given port", type=int)
logger = logging_module.getLogger('httpecho')
def configure_logging(logging):
"""Configure logging handler"""
if logging.upper() not in ['DEBUG', 'INFO', 'CRITICAL',
'WARNING', 'ERROR']:
return
logger.setLevel(getattr(logging_module, logging.upper()))
if not logger.handlers:
channel = StreamHandler()
channel.setFormatter(_LogFormatter(color=False))
logger.addHandler(channel)
logger.info("Logging handler configured with level {0}".format(logging))
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
from pprint import pprint
def rel(*args):
return os.path.join(PROJECT_ROOT, *args)
class HTTPApplication(Application):
"""Base application
"""
def __init__(self):
self.dirty_handlers = [
(r"/(?P<path>.*)", MainHandler),
]
settings = dict(
site_title="HTTP echo & debug",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=False,
cookie_secret="<KEY>
autoescape=None,
)
tornado.web.Application.__init__(self, [(h[0], h[1]) for h in self.dirty_handlers], **settings)
class CustomHandler(tornado.web.RequestHandler):
"""Custom handler with good methods
"""
def __init__(self, *args, **kwargs):
super(CustomHandler, self).__init__(*args, **kwargs)
self.set_header("Server", "LightBeer/0.568")
def json_response(self, data, finish=True):
output_json = utf8(tornado.escape.json_encode(data))
# self.set_header("Content-Type", "application/json")
if finish is True:
self.finish(output_json)
else:
return output_json
def get_data(self):
data = {}
data['args'] = dict([(k, v) for k, v in self.request.arguments.items()])
data['headers'] = dict([(k, v) for k, v in self.request.headers.items()])
data['ip'] = self.request.headers.get(
"X-Real-Ip", self.request.headers.get(
"X-RealI-IP",
self.request.headers.get("X-Forwarded-For", self.request.remote_ip)))
data['url'] = self.request.full_url()
data['request_time'] = self.request.request_time()
data['start_time'] = self.request._start_time
if self.request.method in ("POST", "PUT", "PATCH"):
data['body'] = self.request.body
data['files'] = {}
for k, v in self.request.files.items():
data['files'][k] = [dict(filename=x['filename'],
content_type=x['content_type'],
body=x['body'] if len(x['body']) < 500 else x['body'][:500])
for x in v]
logger.debug(data)
logger.debug(self.request)
return data
class MainHandler(CustomHandler):
"""GET method
"""
def get(self, *args, **kwargs):
self.json_response(self.get_data())
def post(self,*args,**kwargs):
return self.json_response(self.get_data())
def put(self,*args,**kwargs):
return self.json_response(self.get_data())
def delete(self,*args,**kwargs):
return self.json_response(self.get_data())
application = HTTPApplication()
def main():
tornado.options.parse_command_line()
configure_logging('debug')
http_server = httpserver.HTTPServer(application)
certfile = rel("server.crt")
keyfile = rel("server.key")
if os.path.exists(certfile) and os.path.exists(keyfile):
https_server = httpserver.HTTPServer(application, ssl_options={
"certfile": certfile,
"keyfile": keyfile})
https_server.listen(options.ssl_port)
http_server.listen(options.port)
ioloop = tornado.ioloop.IOLoop.instance()
autoreload.start(io_loop=ioloop, check_time=100)
logger.debug("starting server {0}".format(options.port))
ioloop.start()
if __name__ == "__main__":
main()
|
Lispython/httpecho
|
httpecho/utils.py
|
<reponame>Lispython/httpecho
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
httphq.utils
~~~~~~~~~~~~
Helpers for handlers
:copyright: (c) 2011 - 2013 by <NAME> (<EMAIL>).
:license: BSD, see LICENSE for more details.
:github: http://github.com/Lispython/httphq
"""
import sys
try:
from urllib2 import parse_http_list
except ImportError:
from urllib.request import parse_http_list
from hashlib import md5
from tornado.escape import utf8
def parse_dict_header(value):
"""Parse key=value pairs from value list
"""
result = {}
for item in parse_http_list(value):
if "=" not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = value[1:-1] # strip "
result[name] = value
return result
def parse_authorization_header(header):
"""Parse authorization header and build Authorization object
Authorization: Digest username="Mufasa",
realm="<EMAIL>",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
uri="/dir/index.html",
qop=auth, # not required
nc=00000001, # required if qop is auth or auth-int
cnonce="0a4f113b", # required if qop is auth or auth-int
response="6629fae49393a05397450978507c4ef1",
opaque="5ccc069c403ebaf9f0171e9517f40e41"
"""
if not header:
return
try:
auth_type, auth_info = header.split(None, 1) # separate auth type and values
auth_type = auth_type.lower()
except ValueError:
print(sys.exc_info()[0])
return
if auth_type == 'basic':
try:
username, password = auth_info.decode('base64').split(':', 1)
except Exception:
return
return Authorization('basic', {'username': username,
'password': password})
elif auth_type == 'digest':
auth_map = parse_dict_header(auth_info)
required_map = {
'auth': ("username", "realm", "nonce", "uri", "response", "opaque"),
'auth-int': ("realm", "nonce", "uri", "qop", "nc", "cnonce", "response", "opaque")}
required = required_map.get(auth_map.get('qop', 'auth'))
for key in required:
if not key in auth_map:
return
return Authorization('digest', auth_map)
elif auth_type == 'oauth':
auth_map = parse_dict_header(auth_info)
return Authorization('OAuth', auth_map)
else:
raise ValueError("Unknown auth type %s" % auth_type)
def parse_authenticate_header(header):
"""Parse WWW-Authenticate response header
WWW-Authenticate: Digest
realm="<EMAIL>",
qop="auth,auth-int",
nonce="dcd98b7102dd2f0e8b11d0f600bfb0c093",
opaque="5ccc069c403ebaf9f0171e9517f40e41"
"""
if not header:
return
try:
auth_type, auth_info = header.split(None, 1)
auth_type = auth_type.lower()
except ValueError:
print(sys.exc_info()[0])
return
return WWWAuthentication(auth_type, parse_dict_header(auth_info))
class WWWAuthentication(dict):
"""WWWAuthentication header object
"""
AUTH_TYPES = ("Digest", "Basic", "OAuth")
def __init__(self, auth_type='basic', data=None):
if auth_type.lower() not in [t.lower() for t in self.AUTH_TYPES]:
raise RuntimeError("Unsupported auth type: %s" % auth_type)
dict.__init__(self, data or {})
self._auth_type = auth_type
@staticmethod
def from_string(value):
"""Build Authenticate object from header value
- `value`: Authorization field value
"""
return parse_authenticate_header(value)
def to_header(self):
"""Convert values into WWW-Authenticate header value
"""
d = dict(self)
return "%s %s" % (self._auth_type.title(), ", ".join("%s=\"%s\"" % (k, v)
for k, v in d.items()))
class Authorization(dict):
"""Authorization header object
"""
AUTH_TYPES = ("Digest", "Basic", "OAuth")
def __init__(self, auth_type='basic', data=None):
if auth_type.lower() not in [t.lower() for t in self.AUTH_TYPES]:
raise RuntimeError("Unsupported auth type: %s" % auth_type)
dict.__init__(self, data or {})
self._auth_type = auth_type
@staticmethod
def from_string(value):
"""Build Authorization object from header value
- `value`: Authorization field value
"""
return parse_authorization_header(value)
def to_header(self):
"""Convert values into WWW-Authenticate header value
"""
d = dict(self)
return "%s %s" % (self._auth_type.title(), ", ".join("%s=\"%s\"" % (k, v)
for k, v in d.items()))
# Digest auth properties http://tools.ietf.org/html/rfc2069#page-4
realm = property(lambda x: x.get('realm'), doc="""
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access. An example
might be "<EMAIL>". The realm is a
"quoted-string" as specified in section 2.2 of the HTTP/1.1
specification.""")
domain = property(lambda x: x.get('domain'), doc="""domain
A comma-separated list of URIs, as specified for HTTP/1.0. The
intent is that the client could use this information to know the
set of URIs for which the same authentication information should be
sent. The URIs in this list may exist on different servers. If
this keyword is omitted or empty, the client should assume that the
domain consists of all URIs on the responding server.""")
nonce = property(lambda x: x.get('nonce'))
opaque = property(lambda x: x.get('opaque'))
username = property(lambda x: x.get('username'))
password = property(lambda x: x.get('password'))
uri = property(lambda x: x.get('uri'))
qop = property(lambda x: x.get('qop'))
cnonce = property(lambda x: x.get('cnonce'))
responce = property(lambda x: x.get('responce'))
nc = property(lambda x: x.get('nc'))
stale = property(lambda x: x.get('stale'))
algorithm = property(lambda x: x.get('alghoritm'))
# Digest auth helpers
# qop is a quality of protection
def H(data):
return md5(utf8(data)).hexdigest()
def HA1(realm, username, password):
"""Create HA1 hash by realm, username, password
HA1 = md5(A1) = MD5(username:realm:password)
"""
return H("%s:%s:%s" % (username,
realm,
password))
def HA2(credentails, request):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentails.get("qop") == "auth" or credentails.get('qop') is None:
return H("%s:%s" % (request['method'], request['uri']))
elif credentails.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
return H("%s:%s:%s" % (request['method'],
request['uri'],
H(request['body'])))
raise ValueError
def response(credentails, password, request):
"""Compile digest auth response
If the qop directive's value is "auth" or "auth-int" , then compute the response as follows:
RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2)
Else if the qop directive is unspecified, then compute the response as follows:
RESPONSE = MD5(HA1:nonce:HA2)
Arguments:
- `credentails`: credentails dict
- `password`: <PASSWORD>
- `request`: request dict
"""
response = None
HA1_value = HA1(credentails.get('realm'), credentails.get('username'), password)
HA2_value = HA2(credentails, request)
if credentails.get('qop') is None:
response = H(":".join([HA1_value, credentails.get('nonce'), HA2_value]))
elif credentails.get('qop') == 'auth' or credentails.get('qop') == 'auth-int':
for k in 'nonce', 'nc', 'cnonce', 'qop':
if k not in credentails:
raise ValueError("%s required for response H" % k)
response = H(":".join([HA1_value,
credentails.get('nonce'),
credentails.get('nc'),
credentails.get('cnonce'),
credentails.get('qop'),
HA2_value]))
else:
raise ValueError("qop value are wrong")
return response
|
Lispython/httpecho
|
debug.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
httphq.debug
~~~~~~~~~~~~
Local debug runner
:copyright: (c) 2011 - 2013 by <NAME> (<EMAIL>).
:license: BSD, see LICENSE for more details.
"""
import tornado.ioloop
from tornado import httpserver, autoreload
from tornado.options import options, parse_command_line
from httphq.app import application, rel
if __name__ == '__main__':
parse_command_line()
http_server = httpserver.HTTPServer(application)
https_server = httpserver.HTTPServer(application, ssl_options={
"certfile": rel("..", "server.crt"),
"keyfile": rel("..", "server.key"),
})
http_server.listen(options.port)
https_server.listen(options.ssl_port)
ioloop = tornado.ioloop.IOLoop.instance()
autoreload.start(io_loop=ioloop, check_time=100)
ioloop.start()
|
Lispython/httpecho
|
httpecho/compat.py
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compatibility module
~~~~~~~~~~~~~~~~~~~~
Add utilities to support python2 and python3
:copyright: (c) 2013 by <NAME> (<EMAIL>).
:license: BSD, see LICENSE for more details.
:github: http://github.com/Lispython/httphq
"""
import sys
py_ver = sys.version_info
#: Python 2.x?
is_py2 = (py_ver[0] == 2)
#: Python 3.x?
is_py3 = (py_ver[0] == 3)
if is_py2:
from urllib import unquote, urlencode, quote
try:
from cStringIO import StringIO
BytesIO = StringIO
except ImportError:
from StringIO import StringIO
BytesIO = StringIO
else:
# Python3
from urllib.parse import urlencode, unquote, quote
from io import StringIO, BytesIO
|
fellipematos/TAG
|
tag/app.py
|
<filename>tag/app.py
from flask import Flask, render_template, request, url_for
import json
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/etiqueta', methods=['GET', 'POST'])
def tag():
if request.method == 'POST':
dados = {
'dnome': request.form['dnome'],
'dcep': request.form['dcep'],
'drua': request.form['drua'],
'dnum': request.form['dnum'],
'dcom': request.form['dcom'],
'dbairro': request.form['dbairro'],
'dcidade': request.form['dcidade'],
'duf': request.form['duf'],
'rnome': request.form['rnome'],
'rcep': request.form['rcep'],
'rrua': request.form['rrua'],
'rnum': request.form['rnum'],
'rcom': request.form['rcom'],
'rbairro': request.form['rbairro'],
'rcidade': request.form['rcidade'],
'ruf': request.form['ruf']
}
return render_template('tag.html', result=dados)
|
boyob/YOLO4
|
utils/utils_fit.py
|
<filename>utils/utils_fit.py
import torch
from tqdm import tqdm
from utils.utils import get_lr
def fit_one_epoch(model_train, model, yolo_loss, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen,
gen_val, Epoch, cuda):
loss = 0
val_loss = 0
model_train.train()
print('Start Train')
with tqdm(total=epoch_step, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
targets = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in targets]
else:
images = torch.from_numpy(images).type(torch.FloatTensor)
targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
# ----------------------#
# 清零梯度
# ----------------------#
optimizer.zero_grad()
# ----------------------#
# 前向传播
# ----------------------#
outputs = model_train(images)
loss_value_all = 0
num_pos_all = 0
# ----------------------#
# 计算损失
# ----------------------#
for l in range(len(outputs)):
loss_item, num_pos = yolo_loss(l, outputs[l], targets)
loss_value_all += loss_item
num_pos_all += num_pos
loss_value = loss_value_all / num_pos_all
# ----------------------#
# 反向传播
# ----------------------#
loss_value.backward()
optimizer.step()
loss += loss_value.item()
pbar.set_postfix(**{'loss': loss / (iteration + 1),
'lr': get_lr(optimizer)})
pbar.update(1)
exit(0)
print('Finish Train')
model_train.eval()
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}', postfix=dict, mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
images, targets = batch[0], batch[1]
with torch.no_grad():
if cuda:
images = torch.from_numpy(images).type(torch.FloatTensor).cuda()
targets = [torch.from_numpy(ann).type(torch.FloatTensor).cuda() for ann in targets]
else:
images = torch.from_numpy(images).type(torch.FloatTensor)
targets = [torch.from_numpy(ann).type(torch.FloatTensor) for ann in targets]
# ----------------------#
# 清零梯度
# ----------------------#
optimizer.zero_grad()
# ----------------------#
# 前向传播
# ----------------------#
outputs = model_train(images)
loss_value_all = 0
num_pos_all = 0
# ----------------------#
# 计算损失
# ----------------------#
for l in range(len(outputs)):
loss_item, num_pos = yolo_loss(l, outputs[l], targets)
loss_value_all += loss_item
num_pos_all += num_pos
loss_value = loss_value_all / num_pos_all
val_loss += loss_value.item()
pbar.set_postfix(**{'val_loss': val_loss / (iteration + 1)})
pbar.update(1)
print('Finish Validation')
loss_history.append_loss(loss / epoch_step, val_loss / epoch_step_val)
print('Epoch:' + str(epoch + 1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))
torch.save(model.state_dict(),
'logs/ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val))
|
boyob/YOLO4
|
utils/dataloader.py
|
<gh_stars>0
from random import sample, shuffle
import cv2
import numpy as np
from PIL import Image
from torch.utils.data.dataset import Dataset
from utils.utils import cvtColor, preprocess_input
class YoloDataset(Dataset):
def __init__(self, annotation_lines, input_shape, num_classes, mosaic, train):
super(YoloDataset, self).__init__()
self.annotation_lines = annotation_lines
self.input_shape = input_shape
self.num_classes = num_classes
self.length = len(self.annotation_lines)
self.mosaic = mosaic
self.train = train
def __len__(self):
return self.length
def __getitem__(self, index):
index = index % self.length
#---------------------------------------------------#
# 训练时进行数据的随机增强
# 验证时不进行数据的随机增强
#---------------------------------------------------#
if self.mosaic:
if self.rand() < 0.5:
lines = sample(self.annotation_lines, 3)
lines.append(self.annotation_lines[index])
shuffle(lines)
image, box = self.get_random_data_with_Mosaic(lines, self.input_shape)
else:
image, box = self.get_random_data(self.annotation_lines[index], self.input_shape, random = self.train)
else:
image, box = self.get_random_data(self.annotation_lines[index], self.input_shape, random = self.train)
image = np.transpose(preprocess_input(np.array(image, dtype=np.float32)), (2, 0, 1))
box = np.array(box, dtype=np.float32)
if len(box) != 0:
box[:, [0, 2]] = box[:, [0, 2]] / self.input_shape[1]
box[:, [1, 3]] = box[:, [1, 3]] / self.input_shape[0]
box[:, 2:4] = box[:, 2:4] - box[:, 0:2]
box[:, 0:2] = box[:, 0:2] + box[:, 2:4] / 2
return image, box
def rand(self, a=0, b=1):
return np.random.rand()*(b-a) + a
def get_random_data(self, annotation_line, input_shape, jitter=.3, hue=.1, sat=1.5, val=1.5, random=True):
line = annotation_line.split()
#------------------------------#
# 读取图像并转换成RGB图像
#------------------------------#
image = Image.open(line[0])
image = cvtColor(image)
#------------------------------#
# 获得图像的高宽与目标高宽
#------------------------------#
iw, ih = image.size
h, w = input_shape
#------------------------------#
# 获得预测框
#------------------------------#
box = np.array([np.array(list(map(int,box.split(',')))) for box in line[1:]])
if not random:
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
dx = (w-nw)//2
dy = (h-nh)//2
#---------------------------------#
# 将图像多余的部分加上灰条
#---------------------------------#
image = image.resize((nw,nh), Image.BICUBIC)
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image, np.float32)
#---------------------------------#
# 对真实框进行调整
#---------------------------------#
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)] # discard invalid box
return image_data, box
#------------------------------------------#
# 对图像进行缩放并且进行长和宽的扭曲
#------------------------------------------#
new_ar = w/h * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter)
scale = self.rand(.25, 2)
if new_ar < 1:
nh = int(scale*h)
nw = int(nh*new_ar)
else:
nw = int(scale*w)
nh = int(nw/new_ar)
image = image.resize((nw,nh), Image.BICUBIC)
#------------------------------------------#
# 将图像多余的部分加上灰条
#------------------------------------------#
dx = int(self.rand(0, w-nw))
dy = int(self.rand(0, h-nh))
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image = new_image
#------------------------------------------#
# 翻转图像
#------------------------------------------#
flip = self.rand()<.5
if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)
#------------------------------------------#
# 色域扭曲
#------------------------------------------#
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand()<.5 else 1/self.rand(1, sat)
val = self.rand(1, val) if self.rand()<.5 else 1/self.rand(1, val)
x = cv2.cvtColor(np.array(image,np.float32)/255, cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:,:, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
image_data = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
#---------------------------------#
# 对真实框进行调整
#---------------------------------#
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
if flip: box[:, [0,2]] = w - box[:, [2,0]]
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)]
return image_data, box
def merge_bboxes(self, bboxes, cutx, cuty):
merge_bbox = []
for i in range(len(bboxes)):
for box in bboxes[i]:
tmp_box = []
x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
if i == 0:
if y1 > cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if i == 1:
if y2 < cuty or x1 > cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if x2 >= cutx and x1 <= cutx:
x2 = cutx
if i == 2:
if y2 < cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y1 = cuty
if x2 >= cutx and x1 <= cutx:
x1 = cutx
if i == 3:
if y1 > cuty or x2 < cutx:
continue
if y2 >= cuty and y1 <= cuty:
y2 = cuty
if x2 >= cutx and x1 <= cutx:
x1 = cutx
tmp_box.append(x1)
tmp_box.append(y1)
tmp_box.append(x2)
tmp_box.append(y2)
tmp_box.append(box[-1])
merge_bbox.append(tmp_box)
return merge_bbox
def get_random_data_with_Mosaic(self, annotation_line, input_shape, max_boxes=100, hue=.1, sat=1.5, val=1.5):
h, w = input_shape
min_offset_x = self.rand(0.25, 0.75)
min_offset_y = self.rand(0.25, 0.75)
nws = [ int(w * self.rand(0.4, 1)), int(w * self.rand(0.4, 1)), int(w * self.rand(0.4, 1)), int(w * self.rand(0.4, 1))]
nhs = [ int(h * self.rand(0.4, 1)), int(h * self.rand(0.4, 1)), int(h * self.rand(0.4, 1)), int(h * self.rand(0.4, 1))]
place_x = [int(w*min_offset_x) - nws[0], int(w*min_offset_x) - nws[1], int(w*min_offset_x), int(w*min_offset_x)]
place_y = [int(h*min_offset_y) - nhs[0], int(h*min_offset_y), int(h*min_offset_y), int(h*min_offset_y) - nhs[3]]
image_datas = []
box_datas = []
index = 0
for line in annotation_line:
# 每一行进行分割
line_content = line.split()
# 打开图片
image = Image.open(line_content[0])
image = cvtColor(image)
# 图片的大小
iw, ih = image.size
# 保存框的位置
box = np.array([np.array(list(map(int,box.split(',')))) for box in line_content[1:]])
# 是否翻转图片
flip = self.rand()<.5
if flip and len(box)>0:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
box[:, [0,2]] = iw - box[:, [2,0]]
nw = nws[index]
nh = nhs[index]
image = image.resize((nw,nh), Image.BICUBIC)
# 将图片进行放置,分别对应四张分割图片的位置
dx = place_x[index]
dy = place_y[index]
new_image = Image.new('RGB', (w,h), (128,128,128))
new_image.paste(image, (dx, dy))
image_data = np.array(new_image)
index = index + 1
box_data = []
# 对box进行重新处理
if len(box)>0:
np.random.shuffle(box)
box[:, [0,2]] = box[:, [0,2]]*nw/iw + dx
box[:, [1,3]] = box[:, [1,3]]*nh/ih + dy
box[:, 0:2][box[:, 0:2]<0] = 0
box[:, 2][box[:, 2]>w] = w
box[:, 3][box[:, 3]>h] = h
box_w = box[:, 2] - box[:, 0]
box_h = box[:, 3] - box[:, 1]
box = box[np.logical_and(box_w>1, box_h>1)]
box_data = np.zeros((len(box),5))
box_data[:len(box)] = box
image_datas.append(image_data)
box_datas.append(box_data)
# 将图片分割,放在一起
cutx = int(w * min_offset_x)
cuty = int(h * min_offset_y)
new_image = np.zeros([h, w, 3])
new_image[:cuty, :cutx, :] = image_datas[0][:cuty, :cutx, :]
new_image[cuty:, :cutx, :] = image_datas[1][cuty:, :cutx, :]
new_image[cuty:, cutx:, :] = image_datas[2][cuty:, cutx:, :]
new_image[:cuty, cutx:, :] = image_datas[3][:cuty, cutx:, :]
# 进行色域变换
hue = self.rand(-hue, hue)
sat = self.rand(1, sat) if self.rand()<.5 else 1/self.rand(1, sat)
val = self.rand(1, val) if self.rand()<.5 else 1/self.rand(1, val)
x = cv2.cvtColor(np.array(new_image/255,np.float32), cv2.COLOR_RGB2HSV)
x[..., 0] += hue*360
x[..., 0][x[..., 0]>1] -= 1
x[..., 0][x[..., 0]<0] += 1
x[..., 1] *= sat
x[..., 2] *= val
x[x[:, :, 0]>360, 0] = 360
x[:, :, 1:][x[:, :, 1:]>1] = 1
x[x<0] = 0
new_image = cv2.cvtColor(x, cv2.COLOR_HSV2RGB)*255
# 对框进行进一步的处理
new_boxes = self.merge_bboxes(box_datas, cutx, cuty)
return new_image, new_boxes
# DataLoader中collate_fn使用
def yolo_dataset_collate(batch):
images = []
bboxes = []
for img, box in batch:
images.append(img)
bboxes.append(box)
images = np.array(images)
return images, bboxes
|
HotIce0/ht_docs
|
app_repo/models.py
|
from datetime import datetime
from django.db import models
from django.conf import settings
class Repository(models.Model):
"""
仓库表
"""
class Meta:
db_table = 't_repository'
user = models.ForeignKey(verbose_name='仓库所有者', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
db_column='user_id', null=False)
name = models.CharField(verbose_name='名称', max_length=32, null=False, default='')
remarks = models.CharField(verbose_name='描述', max_length=2048, null=False, default='')
git_ssh_url = models.CharField(verbose_name='ssh地址', max_length=4096, null=False, default='')
create_at = models.DateTimeField(verbose_name='创建时间', null=False, default=datetime.now)
update_at = models.DateTimeField(verbose_name='更新时间', null=True, default=None)
update_msg = models.CharField(verbose_name='更新消息', max_length=4096, null=False, default='')
class SharedRepo(models.Model):
"""
共享仓库表
共享仓库实现:独立文件夹,原git仓库的克隆,只不过指定了当前的分支。
"""
class Meta:
db_table = 't_shared_repo'
repo = models.ForeignKey(verbose_name='原仓库', to='app_repo.Repository', on_delete=models.CASCADE,
db_column='repo_id', null=False)
user = models.ForeignKey(verbose_name='共享仓库所有者', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
db_column='user_id', null=False)
name = models.CharField(verbose_name='名称', max_length=32, null=False, default='')
remarks = models.CharField(verbose_name='描述', max_length=2048, null=False, default='')
branch = models.CharField(verbose_name='分支名称', max_length=512, null=False, default='')
create_at = models.DateTimeField(verbose_name='创建时间', null=False, default=datetime.now)
update_at = models.DateTimeField(verbose_name='更新时间', null=True, default=None)
update_msg = models.CharField(verbose_name='更新消息', max_length=4096, null=False, default='')
is_public = models.BooleanField(verbose_name='是否公开', null=False, default=False)
class SharedRepoAccessCtrl(models.Model):
"""
共享仓库访问控制表
"""
class Meta:
db_table = 't_shared_repo_access_ctrl'
user = models.ForeignKey(verbose_name='用户', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
db_column='user_id', null=False)
shared_repo = models.ForeignKey(verbose_name='共享仓库', to='app_repo.SharedRepo', on_delete=models.CASCADE,
db_column='shared_repo_id', null=False)
expired_at = models.DateTimeField(verbose_name='授权过期时间', null=False, default=datetime.now)
class SharedRepoStar(models.Model):
"""
共享仓库收藏表
"""
class Meta:
db_table = 't_shared_repo_star'
user = models.ForeignKey(verbose_name='用户', to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE,
db_column='user_id', null=False)
shared_repo = models.ForeignKey(verbose_name='共享仓库', to='app_repo.SharedRepo', on_delete=models.CASCADE,
db_column='shared_repo_id', null=False)
create_at = models.DateTimeField(verbose_name='收藏时间', null=False, default=datetime.now)
|
HotIce0/ht_docs
|
app_user/models.py
|
from datetime import datetime
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin
from django.db import models
class UserManager(BaseUserManager):
def create_user(self, username, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not username:
raise ValueError('The given username must be set')
user = self.model(username=username, nick=username, **extra_fields)
if 'password' not in extra_fields:
user.set_unusable_password()
else:
user.set_password(extra_fields['password'])
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
class Meta:
db_table = 't_user'
USERNAME_FIELD = 'username'
objects = UserManager()
username = models.CharField(verbose_name='用户名', max_length=32, null=False, unique=True)
nick = models.CharField(verbose_name='昵称', max_length=32, null=False, default='')
email = models.CharField(verbose_name='邮箱', max_length=2048, null=False, default='')
ssh_key_public = models.CharField(verbose_name='SSH公钥', max_length=4096, null=False, default='')
ssh_key_private = models.CharField(verbose_name='SSH私钥', max_length=4096, null=False, default='')
create_at = models.DateTimeField(verbose_name='创建时间', null=False, default=datetime.now)
def __str__(self):
return self.username
|
HotIce0/ht_docs
|
app_repo/migrations/0001_initial.py
|
<gh_stars>0
# Generated by Django 3.2.9 on 2021-11-27 18:47
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Repository',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=32, verbose_name='名称')),
('remarks', models.CharField(default='', max_length=2048, verbose_name='描述')),
('git_ssh_url', models.CharField(default='', max_length=4096, verbose_name='ssh地址')),
('create_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),
('update_at', models.DateTimeField(default=None, null=True, verbose_name='更新时间')),
('update_msg', models.CharField(default='', max_length=4096, verbose_name='更新消息')),
('user', models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='仓库所有者')),
],
options={
'db_table': 't_repository',
},
),
migrations.CreateModel(
name='SharedRepo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='', max_length=32, verbose_name='名称')),
('remarks', models.CharField(default='', max_length=2048, verbose_name='描述')),
('branch', models.CharField(default='', max_length=512, verbose_name='分支名称')),
('create_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='创建时间')),
('update_at', models.DateTimeField(default=None, null=True, verbose_name='更新时间')),
('update_msg', models.CharField(default='', max_length=4096, verbose_name='更新消息')),
('is_public', models.BooleanField(default=False, verbose_name='是否公开')),
('repo', models.ForeignKey(db_column='repo_id', on_delete=django.db.models.deletion.CASCADE, to='app_repo.repository', verbose_name='原仓库')),
('user', models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='共享仓库所有者')),
],
options={
'db_table': 't_shared_repo',
},
),
migrations.CreateModel(
name='SharedRepoStar',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='收藏时间')),
('shared_repo', models.ForeignKey(db_column='shared_repo_id', on_delete=django.db.models.deletion.CASCADE, to='app_repo.sharedrepo', verbose_name='共享仓库')),
('user', models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'db_table': 't_shared_repo_star',
},
),
migrations.CreateModel(
name='SharedRepoAccessCtrl',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('expired_at', models.DateTimeField(default=datetime.datetime.now, verbose_name='授权过期时间')),
('shared_repo', models.ForeignKey(db_column='shared_repo_id', on_delete=django.db.models.deletion.CASCADE, to='app_repo.sharedrepo', verbose_name='共享仓库')),
('user', models.ForeignKey(db_column='user_id', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'db_table': 't_shared_repo_access_ctrl',
},
),
]
|
georghe-crihan/AlgoWaveGraph
|
wave.py
|
<reponame>georghe-crihan/AlgoWaveGraph<filename>wave.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Этот пpимеp демонстpиpует поиск кpатчайщего пути в лабиpинте.
# Это _не_ оптимальнейшая pеализация волнового алгоpитма, и
# пpедназначена она только для демонстpации его пpинципов.
#
# Должна компилиться любым С, C++ компилятоpом, писалось на
# Watcom C 1.6 для _PЕАЛЬHОГО_ pежима (т.е wcl386 source.c)
#
# Используйте где хотите и сколько хотите.
#
# Со всеми вопpосами обpащайтесь to <NAME>sov 2:5030/140.777
# See: http://algolist.manual.ru/maths/graphs/shortpath/wave.php
import curses
from locale import setlocale, LC_ALL
LAB_DIM = 10
move_cost = (
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 1, 6, 6, 6, 6, 6, 1, 1, 0),
(0, 1, 0, 0, 0, 0, 6, 0, 0, 0),
(0, 1, 0, 1, 1, 1, 1, 1, 1, 0),
(0, 1, 0, 1, 1, 0, 0, 0, 1, 0), # Это и есть лабиpинт
(0, 1, 0, 1, 0, 0, 1, 0, 1, 0), # 0 - стена
(0, 1, 0, 1, 0, 1, 1, 0, 1, 0), # любое дpугое число-
(0, 1, 0, 0, 0, 0, 0, 0, 1, 0), # степень пpоходимости
(0, 1, 8, 1, 1, 1, 1, 1, 1, 0), # 1- лучшая пpоходимость
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
)
fill_map = [[0 for i in range(0, LAB_DIM)] for i in range(0, LAB_DIM)] # Pазмеp == pазмеpу лабиpинта !
buf = [type('', (), {'x': 0, 'y': 0})() for i in range(0, 256)] # Кооpдинаты в лабиpинте
# Чем больше лабиpинт, тем больше должен
# быть этот массив
buf_ptr = 0
buf_end = 0 # Индесксы в buf
# Curses window handle
w = None
#
# ЭТА ЧАСТЬ ЗАHИМАЕТСЯ ВЫВОДОМ HА ЭКPАH И
# HЕ ИМЕЕТ HИКАКОГО ОТHОШЕHИЯ К АЛГОPИТМУ
#
vga_color_map = (
curses.COLOR_BLACK, curses.COLOR_BLUE, curses.COLOR_GREEN, curses.COLOR_CYAN, # 0-3
curses.COLOR_RED, curses.COLOR_MAGENTA, curses.COLOR_YELLOW, curses.COLOR_WHITE, # 4-7
curses.COLOR_WHITE, curses.COLOR_BLUE, curses.COLOR_GREEN, # 8-0xa
curses.COLOR_CYAN, curses.COLOR_RED, # 0xb-0xc
curses.COLOR_MAGENTA, curses.COLOR_YELLOW, # 0xd-0xe
curses.COLOR_WHITE # 0xf
)
vga_attr_map = (
curses.A_NORMAL, curses.A_NORMAL, curses.A_NORMAL, curses.A_NORMAL, # 0-3
curses.A_NORMAL, curses.A_NORMAL, curses.A_NORMAL, curses.A_BOLD, # 4-7
curses.A_NORMAL, curses.A_BOLD, curses.A_BOLD, # 8-0xa
curses.A_BOLD, curses.A_BOLD, # 0xb-0xc
curses.A_BOLD, curses.A_BOLD, # 0xd-0xe
curses.A_BOLD # 0xf
)
vga_attrs = {}
def clr_scr():
"""Очистить экpан"""
global w
setlocale(LC_ALL, "")
w = curses.initscr()
curses.start_color()
curses.noecho()
# Below is to initialize VGA Attribute to NCurses map
for a in (0, 0x80): # blink, bit 7
for f in range(0, 16):
for b in range(0, 8):
def _idx(_f, _b): return _f | (_b << 4)
def _attr(_a, _f): return vga_attr_map[_f] | (curses.A_BLINK if _a == 0x80 else 0)
if f != 0 or b != 0:
"""curses.color_pair(0) is read-only!"""
curses.init_pair(_idx(f, b), vga_color_map[f], vga_color_map[b])
vga_attrs[a | _idx(f, b)] = \
curses.color_pair(_idx(f, b)) | \
_attr(a, f)
def scr_chr(y, x, ch):
w.addch(y, x, ch)
def scr_attr(y, x, attr):
if attr in vga_attrs:
w.chgat(y, x, 1, vga_attrs[attr])
def writestr(x, y, s, attr):
"""Hапечатать стpоку str в кооpдинатах (x,y) цветом attr"""
w.addstr(y, x, s, vga_attrs[attr])
def draw_maze():
"""Pмсует начальную каpтинку лабиpинта"""
for j in range(0, LAB_DIM):
for i in range(0, LAB_DIM):
scr_attr(j, i*2 , 16*(7-move_cost[j][i])+7+8*((i+j)&1))
scr_attr(j, i*2+1, 16*(7-move_cost[j][i])+7+8*((i+j)&1))
scr_chr(asy, asx*2, '[')
scr_chr(asy, asx*2+1, ']')
scr_chr(aty, atx*2, '<')
scr_chr(aty, atx*2+1, '>')
scr_attr(1, 40, 16*(7-1))
writestr(45, 1, "Пустое место", 7)
scr_attr(3, 40, 16*(7-0))
writestr(45, 3, "Стена",7)
scr_attr(5, 40, 16*(7-6))
writestr(45, 5, "Болото",7)
writestr(40, 7, "[] Hачальная точка", 7)
writestr(40, 9, "<> Цель пути", 7)
#
# А ВОТ ДАЛЬШЕ УЖЕ ИДЕТ PЕАЛИЗАЦИЯ АЛГОPИТМА
#
def push(x, y, n):
"""Эта функция пpовеpяет является ли пpедлогаемый путь в точку более
коpотким,
чем найденый pанее, и если да, то запоминает точку в buf."""
global buf_end, buf, fill_map
if fill_map[y][x] <= n:
return # Если новый путь не коpоче-нафиг его
fill_map[y][x] = n # Запоминаем новую длину пути
buf[buf_end].x = x #
buf[buf_end].y = y # Запоминаем точку
buf_end += 1 # Pазмеp buf-256 buf_end - byte, зациклится само,
# иначе надо писать bufe=(buf_end+1)%(pазмеp buf)
scr_chr(y, x*2 , n/10+48) #
# Это пpосто pисование и ожидание нажатия кнопки
scr_chr(y, x*2+1, (n % 10)+48)
w.getch() #
def pop(x, y):
"""Здесь беpется очеpедная точка из buf и возвpащается True,
если бpать нечего, то возвpащается False"""
global buf_ptr, buf_end, buf
if buf_ptr == buf_end:
return False
x.i = buf[buf_ptr].x
y.i = buf[buf_ptr].y
buf_ptr += 1 # То же, что и с buf_end !!! см. ^
return True
# ВHИМАHИЕ !!! Hе смотpя на названия функций (push и pop)
# buf это не stack ! Это кольцевой FIFO-шный буфеp !
# Вот, она самая, она-то путь и ищет
def fill(sx, sy, tx, ty):
global buf_ptr, buf_end, fill_map
x = type('', (), {})()
y = type('', (), {})()
n = 0
t = 0
# Вначале fill_map заполняется max значением
fill_map = [[0xFF for i in range(0, LAB_DIM)] for i in range(0, LAB_DIM)]
buf_ptr = 0
buf_end = 0 # Думаю понятно...
push(sx, sy, 0) # Путь в начальную точку =0, логично ?
while pop(x, y): # Цикл, пока есть точки в буфеpе
if (x.i == tx) and (y.i == ty):
writestr(0, 20, "Hайден путь длиной ", 15)
scr_chr(20, 19, n/10+48)
scr_chr(20, 20, (n % 10)+48)
# break # Если pаскоментаpить этот break, то цикл вывалится
# как только найдется 1-ый же путь. Это логично
# сделать, если поpходимость всех клеток одинакова.
# n=длина пути до любой соседней клетки
n = fill_map[y.i][x.i]+move_cost[y.i][x.i]
# Пеpебоp 4-х соседних клеток
if move_cost[y.i+1][x.i ]:
push(x.i , y.i+1, n) #
if move_cost[y.i-1][x.i ]:
push(x.i , y.i-1, n) #
if move_cost[y.i ][x.i+1]:
push(x.i+1, y.i , n) #
if move_cost[y.i ][x.i-1]:
push(x.i-1, y.i , n) #
# Либо мы нашли 1-ый путь и вывалились по break-у,
# либо залили уже всю каpту
if fill_map[ty][tx] == 0xFF:
writestr(0, 20, "Пути не существует !!!", 15)
return
else:
writestr(0, 20, "Заливка закончена, пpойдемся по пути !!!", 15)
x.i = tx
y.i = ty
n = 0xFF # Мы начали заливку из (sx,sy), значит
# по пути пpидется идти из (tx,ty)
while (x.i != sx) or (y.i != sy): # Пока не пpидем в (sx,sy)
scr_attr(y.i, x.i*2, 2*16)
scr_attr(y.i, x.i*2+1, 2*16) # Pисование
# Сдесь ищется соседняя
if fill_map[y.i+1][x.i ] < n:
tx = x.i
ty = y.i+1
t = fill_map[y.i+1][x.i ]
# клетка, содеpжащая
if fill_map[y.i-1][x.i ] < n:
tx = x.i
ty = y.i-1
t = fill_map[y.i-1][x.i ]
# минимальное значение
if fill_map[y.i ][x.i+1] < n:
tx = x.i+1
ty = y.i
t = fill_map[y.i ][x.i+1]
if fill_map[y.i ][x.i-1] < n:
tx = x.i-1
ty = y.i
t = fill_map[y.i ][x.i-1]
x.i = tx
y.i = ty
n = t # Пеpеходим в найденую клетку
# Вот и все ! Путь найден !
# Hачальные и конечные кооpдинаты пути
asx = 1
asy = 1 # Hачальная точка
atx = 3
aty = 3 # Цель пути
clr_scr() # Это все pисование
draw_maze() #
w.getch() #
fill(asx, asy, atx, aty) # Hайдем путь
w.refresh()
w.getch() # Ждем нажатия кнопки
curses.endwin()
|
mbarkhau/markdown-katex
|
src/markdown_katex/__init__.py
|
# This file is part of the markdown-katex project
# https://github.com/mbarkhau/markdown-katex
#
# Copyright (c) 2019-2021 <NAME> (<EMAIL>) - MIT License
# SPDX-License-Identifier: MIT
"""markdown_katex extension.
This is an extension for Python-Markdown which
uses KaTeX to generate html from tex.
"""
__version__ = "v202112.1034"
from markdown_katex.wrapper import tex2html
from markdown_katex.wrapper import get_bin_cmd
from markdown_katex.extension import KatexExtension
def _make_extension(**kwargs) -> KatexExtension:
return KatexExtension(**kwargs)
# Name that conforms with the Markdown extension API
# https://python-markdown.github.io/extensions/api/#dot_notation
makeExtension = _make_extension
TEST_FORMULAS = r"""
f(x) = \int_{-\infty}^\infty
\hat f(\xi)\,e^{2 \pi i \xi x}
\,d\xi
---
\displaystyle
\frac{1}{
\Bigl(\sqrt{\phi \sqrt{5}}-\phi\Bigr) e^{\frac25 \pi}
} =
1+\frac{e^{-2\pi}} {
1+\frac{e^{-4\pi}} {
1+\frac{e^{-6\pi}} {
1+\frac{e^{-8\pi}}{
1+\cdots
}
}
}
}
---
\displaystyle
\left
( \sum_{k=1}^n a_k b_k
\right)^2
\leq
\left(
\sum_{k=1}^n a_k^2
\right)
\left(
\sum_{k=1}^n b_k^2
\right)
---
\overbrace{x + \cdots + x}^{n\rm\ times}
-
\underbrace{x + \cdots + x}_{n\rm\ times}
---
\oiiint \oiint \oint \frac ab + {\scriptscriptstyle \frac cd + \frac ef} + \frac gh
---
\Overrightarrow{ABCDE}
-
\overrightharpoon{abcdec}
-
\overgroup{ABCDEF}
-
\undergroup{abcde}
-
\undergroup{efgp}
-
\utilde{AB}
-
\utilde{\utilde{\utilde{AB}}}
-
\widecheck{AB\widecheck{CD}EF}
-
\widehat{AB\widehat{CD}EF}
""".split(
"---"
)
__all__ = ['makeExtension', '__version__', 'get_bin_cmd', 'tex2html', 'TEST_FORMULAS']
|
TakeshiKishita/kadai_HNSon
|
summation_of_divisors.py
|
<filename>summation_of_divisors.py
# coding=utf-8
""""
int numを引数として渡すと、numの約数の総和を返す
"""
import math
import sys
def get_prime_number(num):
"""Summary line.
エラトステネスの篩を使い、素数を列挙する
Args:
num (int): 素因数分解を行う数値
Returns:
list: numまでの素数リスト
"""
sequence_list = [i for i in range(2, num + 1)]
# 2から入力値までの数列のリスト
prime_list = []
# 素数のリスト
while True:
prime = min(sequence_list)
if float(prime) > math.sqrt(num):
# 入力値の平方根以上は全てリストに加えて終了
prime_list.extend(sequence_list)
break
else:
prime_list.append(prime)
i = 0
while i < len(sequence_list):
if sequence_list[i] % prime == 0:
# 素数の倍数をリストから削除
sequence_list.pop(i)
continue
i += 1
return prime_list
def _prime_factorization(num):
"""Summary line.
素数を使い、素因数分解を行う
Args:
num (int): 素因数分解を行う数値
Returns:
list: 素因数分解の解
"""
prime_list = get_prime_number(num)
# 2からnumまでの素数リストを作成
temp_num = num
ans_list = []
i = 0
while num >= prime_list[i] ** 2:
# 対象数値の平方根の数まで処理を行う
if temp_num % prime_list[i] == 0:
ans_list.append(str(prime_list[i]))
temp_num /= prime_list[i]
else:
i += 1
if temp_num != 1:
ans_list.append(str(int(temp_num)))
return ans_list
num = int(sys.argv[1])
if num == 0 or num == 1:
# 0,1の場合はnumを返して終了
print(num)
sys.exit()
prime_list = _prime_factorization(num)
# 素因数分解のリストを作成
temp_num = 0
# 同約数の和
power_num = 0
# 累乗の数
sum_divisor = 0
# 約数の和
for i, val in enumerate(prime_list):
# 素因数分解を用いて約数の総和を求める公式
if i == 0:
temp_num = 1 + int(val)
power_num = 2
else:
if val == prime_list[i - 1]:
# 素数が直前と同じだった場合
temp_num += int(val) ** power_num
power_num += 1
else:
if sum_divisor == 0:
sum_divisor = temp_num
else:
sum_divisor *= temp_num
temp_num = 1 + int(val)
power_num = 2
if sum_divisor == 0:
# 全て同じ約数だった場合
sum_divisor = temp_num
else:
sum_divisor *= temp_num
print(sum_divisor)
|
terrotim/alpha-zero-general
|
pit.py
|
<gh_stars>0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import Arena
from MCTS import MCTS
from sotf.SotfGame import SotfGame, display
from sotf.SotfPlayers import *
from sotf.tensorflow.NNet import NNetWrapper as NNet
"""
from connect4.Connect4Game import Connect4Game, display
from connect4.Connect4Players import *
from connect4.tensorflow.NNet import NNetWrapper as NNet
"""
"""
from othello.OthelloGame import OthelloGame
from othello.OthelloPlayers import *
from othello.pytorch.NNet import NNetWrapper as NNet
"""
import numpy as np
from utils import *
"""
use this script to play any two agents against each other, or play manually with
any agent.
"""
g = SotfGame()
#g = Connect4Game()
# all players
rp = RandomPlayer(g).play
rp2 = RandomPlayer(g).play
#hp = HumanPlayer(g).play
#hp2 = HumanPlayer(g).play
"""
mini_othello = False # Play in 6x6 instead of the normal 8x8.
human_vs_cpu = True
if mini_othello:
g = OthelloGame(6)
else:
g = OthelloGame(8)
# all players
rp = RandomPlayer(g).play
gp = GreedyOthelloPlayer(g).play
hp = HumanOthelloPlayer(g).play
"""
# nnet players
"""
n1 = NNet(g)
n1.load_checkpoint('./temp/','temp.pth.tar')
args1 = dotdict({'numMCTSSims': 25, 'cpuct':1.0})
mcts1 = MCTS(g, n1, args1)
n1p = lambda x: np.argmax(mcts1.getActionProb(x, temp=0))
"""
"""
if mini_othello:
n1.load_checkpoint('./pretrained_models/othello/pytorch/','6x100x25_best.pth.tar')
else:
n1.load_checkpoint('./pretrained_models/othello/pytorch/','8x8_100checkpoints_best.pth.tar')
args1 = dotdict({'numMCTSSims': 50, 'cpuct':1.0})
"""
"""
if human_vs_cpu:
player2 = hp
else:
n2 = NNet(g)
n2.load_checkpoint('./pretrained_models/othello/pytorch/', '8x8_100checkpoints_best.pth.tar')
args2 = dotdict({'numMCTSSims': 50, 'cpuct': 1.0})
mcts2 = MCTS(g, n2, args2)
n2p = lambda x: np.argmax(mcts2.getActionProb(x, temp=0))
player2 = n2p # Player 2 is neural network if it's cpu vs cpu.
"""
n2 = NNet(g)
n2.load_checkpoint('./temp/','best.pth.tar')
args2 = dotdict({'numMCTSSims': 25, 'cpuct':1.0})
mcts2 = MCTS(g, n2, args2)
n2p = lambda x: np.argmax(mcts2.getActionProb(x, temp=0))
n3 = NNet(g)
n3.load_checkpoint('./pretrained_models/sotf/fixed','best.pth.tar')
args3 = dotdict({'numMCTSSims': 25, 'cpuct':1.0})
mcts3 = MCTS(g, n3, args3)
n3p = lambda x: np.argmax(mcts3.getActionProb(x, temp=0))
#arena = Arena.Arena(rp, rp2, g, display=display)
arena = Arena.Arena(n2p, rp, g, display=display)
print(arena.playGames(100, verbose=True))
"""
arena = Arena.Arena(n1p, player2, g, display=OthelloGame.display)
print(arena.playGames(2, verbose=True))
"""
|
terrotim/alpha-zero-general
|
sotf/SotfGame.py
|
<reponame>terrotim/alpha-zero-general<gh_stars>0
import sys
import numpy as np
import colorama
from itertools import permutations,product
sys.path.append('..')
from Game import Game
from .SotfLogic import Board
from termcolor import colored, cprint
colorama.init()
class SotfGame(Game):
"""
Connect4 Game class implementing the alpha-zero-general Game interface.
"""
def __init__(self, height=4, width=12, tiles=None):
self.height = height
self.width = width
self.board = Board(self.height,self.width)
def getInitBoard(self):
# main 4x12 board, 2 pboards size 51 (both with a 3 reserve board and a 48 tile board), 1 first_claim, and 1 action_num
np.random.shuffle(self.board.layout[:self.height*self.width])
return self.board.layout
def getBoardSize(self):
#48 + 51 + 51 + 1 + 1
return self.board.layout.size,1
def getActionSize(self):
# user can use three spirits in any way on the 4x12 board, and they can pass
return (self.board.height*self.board.width * 3) + 1
def getNextState(self, board, player, action):
"""Returns a copy of the board with updated move, original board is unmodified."""
b = Board(self.height,self.width)
b.layout = np.copy(board)
#move = np.unravel(action,[self.height,self.width])
#b.layout = b.execute_move(move,player)
if action < self.getActionSize()-1:
b.layout = b.execute_move(action,player)
#print('action',action,player)
#print('b.layout',b.layout)
if b.layout[-1] < 3:
b.layout[-1] += 1
return b.layout, player
else:
b.layout[-1] = 1
return b.layout, -player
def getValidMoves(self, board, player):
b = Board(self.height,self.width)
b.layout = np.copy(board)
validMoves = [0]*self.getActionSize()
for i in range(self.getActionSize() - 1):
validMoves[i] = b.is_legal_move(i,player)
#spirit_used,tile_row,tile_column = np.unravel_index(i,[self.board.height,self.board.width,3])
#move = (spirit_used,tile_row,tile_column
#validMoves[i] = b.is_legal_move(move,player)
#if not valid moves, user can pass
if not 1 in validMoves:
validMoves[-1] = 1
#if its the second or third move of a user's turn, they may pass
if b.layout[-1] > 1:
validMoves[-1] = 1
#print('validmoves', validMoves)
return validMoves
def getGameEnded(self, board, player):
b = Board(self.height,self.width)
b.layout = np.copy(board)
allTilesTaken = b.all_tiles_taken()
if allTilesTaken:
p1score, p2score = b.get_scores(player)
if p1score>p2score:
return 1
if p2score>p1score:
return -1
return 1e-4
else:
return 0
def getCanonicalForm(self, board, player):
# main 4x12 board, 2 pboards size 51 (both with a 3 reserve board and a 48 tile board), and 1 action_num
b = np.copy(board)
tiles = b[:self.height*self.width]
p_tiles = b[self.height*self.width:self.height*self.width*3]
p_spirits = b[self.height*self.width*3:-2]
p_tiles = p_tiles.reshape(2,-1)
p_spirits = p_spirits.reshape(2,-1)
first_claim = b[-2]
action_num = b[-1]
return np.concatenate((tiles,p_tiles[::player].flatten(),p_spirits[::player].flatten(),[first_claim],[action_num]))
def getSymmetries(self, board, pi):
b = np.copy(board)
tiles = b[:self.height*self.width]
p_tiles = b[self.height*self.width:self.height*self.width*3]
p_spirits = b[self.height*self.width*3:-2]
first_claim = b[-2]
action_num = b[-1]
tile_board = tiles.reshape(self.height,self.width)
pi_board = np.asarray(pi[:-1]).reshape(self.height,self.width,3)
#t_perms = list(permutations(tile_board))
#p_perms = list(permutations(pi_board))
t_perms = [tile_board]
p_perms = [pi_board]
assert len(t_perms) == len(p_perms)
syms = []
combs = list(product(range(-1,2,2),repeat=4))
for p in range(len(t_perms)):
t_perm = t_perms[p]
p_perm = p_perms[p]
for c in combs:
new_t = np.concatenate([t_perm[i][::c[i]] for i in range(len(c))])
new_p = np.concatenate([p_perm[i][::c[i]] for i in range(len(c))])
sym = (np.concatenate((new_t,p_tiles,p_spirits,[first_claim],[action_num])).reshape(self.getBoardSize()),np.concatenate((np.concatenate(new_p),[pi[-1]])))
syms.append(sym)
return syms
#return [(board.reshape(self.getBoardSize()),pi)]
def stringRepresentation(self, board):
return board.tostring()
def display(board):
height = 4
width = 12
b = Board(height,width)
tile_data = b.tile_data
tiles = board[:height*width]
p_tiles = board[height*width:height*width*3]
p_spirits = board[height*width*3:-2]
taken_tiles = p_tiles.reshape(2,height*width)
spirit_board = p_spirits.reshape(2,3)
print(p_spirits)
for ind,t in enumerate(tiles):
tile = tile_data[t]
if len(tile)>1:
space = '\t'
else:
space = '\t\t'
if t in spirit_board[0]:
bcolor = 'on_red'
elif t in spirit_board[1]:
bcolor = 'on_cyan'
else:
bcolor = 'on_grey'
if t in taken_tiles[0]:
cprint(colored(tile,'red',bcolor),end=space)
elif t in taken_tiles[1]:
cprint(colored(tile,'cyan',bcolor),end=space)
else:
cprint(colored(tile,'white',bcolor),end=space)
if (ind+1) % 12 == 0:
print()
|
terrotim/alpha-zero-general
|
sotf/SotfLogic.py
|
from collections import namedtuple
import numpy as np
DEFAULT_HEIGHT = 6
DEFAULT_WIDTH = 7
DEFAULT_WIN_LENGTH = 4
WinState = namedtuple('WinState', 'is_ended winner')
class Board():
"""
Connect4 Board.
"""
def __init__(self, height=None, width=None, tiles=None):
self.height = height
self.width = width
if tiles is None:
self.tiles = np.arange(height*width)
#Randomize tiles
#np.random.shuffle(self.tiles)
self.p_tiles = np.full(height*width*2,-1)
self.p_spirits = np.full(6,-1)
else:
self.tiles = tiles
self.initiate_tiles()
self.layout = np.concatenate((self.tiles,self.p_tiles,self.p_spirits,[-1],[1]))
def execute_move(self, action, player):
tiles = self.layout[:self.height*self.width]
p_tiles = self.layout[self.height*self.width:self.height*self.width*3]
p_spirits = self.layout[self.height*self.width*3:-2]
first_claim = self.layout[-2]
action_num = self.layout[-1]
spirit_used,tile_index = np.unravel_index(action,[3,self.height*self.width])
taken_tiles = p_tiles.reshape(2,self.height*self.width)
spirit_board = p_spirits.reshape(2,3)
own_tiles_list = taken_tiles[0 if player == 1 else 1]
own_spirits = spirit_board[0 if player == 1 else 1]
other_spirits = spirit_board[1 if player == 1 else 0]
if action_num < 3:
#if it is action 1 or 2, its a claim action
own_tiles = [t for t in own_tiles_list if not t == -1]
own_tiles.append(tile_index)
own_tiles.sort()
while len(own_tiles) < len(own_tiles_list):
own_tiles.append(-1)
if player == 1:
taken_tiles[0] = own_tiles
else:
taken_tiles[1] = own_tiles
p_tiles = taken_tiles.flatten()
#if the tiles[tile_index] was reserved in p_spirits
if tile_index in p_spirits:
#print('p_spirits',p_spirits)
if tile_index in own_spirits:
s_used = np.where(own_spirits == tile_index)[0]
own_spirits[s_used] = -1
else:
own_spirits[spirit_used] = -2
s_used = np.where(other_spirits == tile_index)[0]
other_spirits[s_used] = -1
#print('p_spirits2',p_spirits)
"""
if player == 1:
spirit_board[0] = own_spirits
spirit_board[1] = other_spirits
else:
spirit_board[1] = own_spirits
spirit_board[0] = other_spirits
p_spirits = spirit_board.flatten()
print('p_spirits3',p_spirits)
"""
if action_num == 1:
first_claim = tile_index
else:
first_claim = -1
return np.concatenate((tiles,p_tiles,p_spirits,[first_claim],[action_num]))
else:
#its a reserve action
first_claim = -1
own_spirits[spirit_used] = tile_index
if player == 1:
spirit_board[0] = own_spirits
else:
spirit_board[1] = own_spirits
p_spirits = spirit_board.flatten()
return np.concatenate((tiles,p_tiles,p_spirits,[first_claim],[action_num]))
def is_legal_move(self,action,player):
tiles = self.layout[:self.height*self.width]
p_tiles = self.layout[self.height*self.width:self.height*self.width*3]
p_spirits = self.layout[self.height*self.width*3:-2]
first_claim = self.layout[-2]
action_num = self.layout[-1]
spirit_used,tile_index = np.unravel_index(action,[3,self.height*self.width])
taken_tiles = p_tiles.reshape(2,self.height*self.width)
spirit_board = p_spirits.reshape(2,3)
own_tiles_list = taken_tiles[0 if player == 1 else 1]
own_spirits = spirit_board[0 if player == 1 else 1]
other_spirits = spirit_board[1 if player == 1 else 0]
if action_num < 3:
if tile_index in p_tiles:
return 0
if tile_index in other_spirits and own_spirits[spirit_used] == -2:
return 0
if action_num == 2:
#print(action)
if first_claim == -1:
return 0
spirits1 = self.tile_data[first_claim]
spirits2 = self.tile_data[tile_index]
if first_claim == tile_index:
return 0
if not spirits1[0] == spirits2[0]:
return 0
if len(spirits1) > 1 and spirits1[0] == spirits1[1]:
return 0
if len(spirits2) > 1 and spirits2[0] == spirits2[1]:
return 0
tile_board = tiles.reshape(self.height,self.width)
edge_tiles = []
for row in tile_board:
fr = [t for t in row if not t in p_tiles]
if len(fr) > 0:
if fr[0] not in edge_tiles:
edge_tiles.append(fr[0])
if fr[-1] not in edge_tiles:
edge_tiles.append(fr[-1])
if not tile_index in edge_tiles:
return 0
return 1
else:
if tile_index in p_tiles or tile_index in p_spirits:
return 0
if own_spirits[spirit_used] == -2:
return 0
return 1
def all_tiles_taken(self):
p_tiles = self.layout[self.height*self.width:self.height*self.width*3]
all_taken = [t for t in p_tiles if not t == -1]
return len(all_taken) == self.height*self.width
def get_scores(self,player):
p1score = 0
p2score = 0
p1spirits = []
p2spirits = []
p_tiles = self.layout[self.height*self.width:self.height*self.width*3]
taken_tiles = p_tiles.reshape(2,self.height*self.width)
p1_tiles = taken_tiles[0 if player == 1 else 1]
p2_tiles = taken_tiles[1 if player == 1 else 0]
p1_spirits = np.concatenate([self.tile_data[t] for t in p1_tiles if not t == -1])
p2_spirits = np.concatenate([self.tile_data[t] for t in p2_tiles if not t == -1])
unique1, counts1 = np.unique(p1_spirits, return_counts=True)
unique2, counts2 = np.unique(p2_spirits, return_counts=True)
p1dict = dict(zip(unique1, counts1))
p2dict = dict(zip(unique2, counts2))
"""
print('self.layout',self.layout)
print('p1dict',p1dict)
print('p2dict',p2dict)
"""
for tile in ['a','b','c','d','e','f','g','h','i','x','y','z']:
p1count = p1dict.get(tile,0)
p2count = p2dict.get(tile,0)
if p1count == 0:
p1score -= 3
if p2count == 0:
p2score -= 3
if p1count >= p2count:
p1score += p1count
if p2count >= p1count:
p2score += p2count
"""
print('p1score:',p1score)
print('p2score:',p2score)
input("Press any button to continue...")
"""
return p1score, p2score
def initiate_tiles(self):
# initiates each tile's symbols
# 5 = green, 4 tiles, a
# 6 = red, 5 tiles, b
# 6 = black, 4 tiles, c
# 7 = blue, 5 tiles, d
# 7 = brown, 5 tiles, e
# 8 = tan, 6 tiles, f
# 8 = maroon, 6 tiles, g
# 8 = orange, 6 tiles, h
# 10 = purple, 7 tiles, i
# sun = x,
# moon = y,
# fire = z,
data = {}
data[0] = ['a','a']
data[1] = ['a','x']
data[2] = ['a','y']
data[3] = ['a','z']
data[4] = ['b','b']
data[5] = ['b','x']
data[6] = ['b','y']
data[7] = ['b','z']
data[8] = ['b','z']
data[9] = ['c','c']
data[10] = ['c','c']
data[11] = ['c','x']
data[12] = ['c','y']
data[13] = ['d','d']
data[14] = ['d','d']
data[15] = ['d','x']
data[16] = ['d','y']
data[17] = ['d','z']
data[18] = ['e','e']
data[19] = ['e','e']
data[20] = ['e','x']
data[21] = ['e','y']
data[22] = ['e','z']
data[23] = ['f']
data[24] = ['f','f']
data[25] = ['f','f']
data[26] = ['f','x']
data[27] = ['f','y']
data[28] = ['f','z']
data[29] = ['g']
data[30] = ['g','g']
data[31] = ['g','g']
data[32] = ['g','x']
data[33] = ['g','y']
data[34] = ['g','z']
data[35] = ['h']
data[36] = ['h','h']
data[37] = ['h','h']
data[38] = ['h','x']
data[39] = ['h','y']
data[40] = ['h','z']
data[41] = ['i']
data[42] = ['i','i']
data[43] = ['i','i']
data[44] = ['i','i']
data[45] = ['i','x']
data[46] = ['i','y']
data[47] = ['i','z']
self.tile_data = data
|
terrotim/alpha-zero-general
|
main.py
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import logging
import coloredlogs
"""
from Coach import Coach
from connect4.Connect4Game import Connect4Game as Game
from connect4.tensorflow.NNet import NNetWrapper as nn
from utils import dotdict
"""
from Coach import Coach
from sotf.SotfGame import SotfGame as Game
from sotf.tensorflow.NNet import NNetWrapper as nn
from utils import dotdict
log = logging.getLogger(__name__)
coloredlogs.install(level='INFO') # Change this to DEBUG to see more info.
args = dotdict({
'numIters': 100,
'numEps': 200,
'tempThreshold': 15,
'updateThreshold': 0.55,
'maxlenOfQueue': 2000000,
'numMCTSSims': 25,
'arenaCompare': 100,
'cpuct': 1,
'dirichletAlpha': 1.0,
'checkpoint': './temp/',
'load_model': False,
'load_folder_file': ('./temp/','best.pth.tar'),
'numItersForTrainExamplesHistory': 20,
})
def main():
log.info('Loading %s...', Game.__name__)
g = Game()
#g = SotfGame()
#g = Connect4Game()
log.info('Loading %s...', nn.__name__)
nnet = nn(g)
if args.load_model:
log.info('Loading checkpoint "%s/%s"...', args.load_folder_file)
nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1])
else:
log.warning('Not loading a checkpoint!')
log.info('Loading the Coach...')
c = Coach(g, nnet, args)
if args.load_model:
log.info("Loading 'trainExamples' from file...")
c.loadTrainExamples()
log.info('Starting the learning process 🎉')
c.learn()
if __name__ == "__main__":
main()
|
MarcusTL12/PyCalc
|
imports.py
|
import sys
sys.path.append('./PyScr/')
import numpy as np
import matplotlib.pyplot as plt
import cmath
import math
import copy
import pfprint
import primefac
from scipy.special import comb as choose
_ = None
def ppprint(arg):
global _
pfprint.pfprint(arg)
_ = arg
def frac():
pfprint.frac = not pfprint.frac
return _
def denom_lim(n):
pfprint.denom_lim = n
def num_dec(n):
pfprint.num_dec = n
def pltclr():
plt.cla()
plt.clf()
plt.close()
def fact(n):
return math.factorial(n)
def primesS(n):
return primefac.primes_to_string(primefac.primes(n))
def primes(n):
return primefac.primes(n)
sys.displayhook = ppprint
pi = np.pi
e = np.exp(1)
sqrt = np.sqrt
predef_globals = len(globals()) + 1
def loc():
return dict(list(globals().items())[predef_globals:])
|
MarcusTL12/PyCalc
|
PyScr/primefac.py
|
import math
def primes(n:int) -> list:
factors = []
if n < 0:
factors.append(-1)
factors.append(1)
n *= -1
more = False
i = 2
while i <= math.sqrt(n):
while n % i == 0:
if not more:
factors.append(i)
factors.append(1)
more = True
else:
factors[-1] += 1
n /= i
more = False
if i == 2:
i += 1
else:
i += 2
if n != 1:
factors.append(int(n))
factors.append(1)
return factors if len(factors) != 0 else [1, 1]
def primes_to_string(p:list) -> str:
ret = ''
for i in range(int(len(p) / 2)):
if p[2 * i] == -1:
ret += '-'
else:
if len(ret) > 0 and ret != '-':
ret += ' * '
ret += str(p[2 * i]) + ('^' + str(p[2 * i + 1]) if p[2 * i + 1] > 1 else '')
return ret
|
MarcusTL12/PyCalc
|
PyScr/pifrac.py
|
from fractions import Fraction
import numpy as np
a = [Fraction(22 / 7)]
for i in range(8, 1000000):
frac = Fraction(np.pi).limit_denominator(i)
if abs(float(a[-1]) - np.pi) > abs(float(frac) - np.pi):
a.append(frac)
|
MarcusTL12/PyCalc
|
PyScr/dicemap.py
|
<filename>PyScr/dicemap.py
a = [[[str(i + 1) + str(j + 1) + 2 * str(k + 1) if (i != j and i != k and j != k) else ' ' for k in range(6)] for j in range(6)] for i in range(6)]
b = [[[[str(i + 1) + str(j + 1) + str(k + 1) + str(l + 1) if (i != j and i != k and i != l and j != k and j != l and k != l) else ' ' for l in range(6)] for k in range(6)] for j in range(6)] for i in range(6)]
|
MarcusTL12/PyCalc
|
PyScr/jsonparse.py
|
import os
def parse(filename):
with open(filename, "r") as i, open("tempjson.py", "w") as o:
o.write("a = ")
for l in i:
o.write(l.replace("true", "True").replace("false", "False").replace("null", "None"))
import tempjson
os.remove("tempjson.py")
return tempjson.a
|
MarcusTL12/PyCalc
|
test.py
|
import PyScr.jsonparse as json
# import json
import time
t1 = time.time()
# a = json.load(open("enemies.json", "r"))
a = json.parse("enemies.json")
t2 = time.time()
print(t2 - t1)
print(a[0]["randomshit"])
|
MarcusTL12/PyCalc
|
PyScr/pfprint.py
|
import numpy as np
import copy
from pprint import pprint
from fractions import Fraction
frac = True
denom_lim = 100000
num_dec = 12
def toFrac(arg):
return Fraction(arg).limit_denominator(denom_lim)
def chkFrac(fra, arg):
return abs(float(fra) - arg) < 10**(-14)
def floatformat(arg):
if frac:
fra = toFrac(arg)
if chkFrac(fra, arg):
return str(fra)
narg = arg / np.pi
fra = toFrac(narg)
if chkFrac(fra, narg):
return str(fra) + ' π'
narg = arg / np.exp(1)
fra = toFrac(narg)
if chkFrac(fra, narg):
return str(fra) + ' e'
narg = arg**2
fra = toFrac(narg)
if chkFrac(float(fra), narg):
return '√( ' + str(fra) + ' )'
return round(arg, num_dec)
def listformat(arg, prevpoints=[]):
prevpoints = copy.copy(prevpoints)
isnparray = isinstance(arg, np.ndarray)
if isnparray:
arg = list(np.asarray(arg))
if isinstance(arg, (list, tuple, dict)):
prevpoints.append(arg)
istup = isinstance(arg, tuple)
isdict = isinstance(arg, dict)
ret = list(arg.items()) if isdict else list(copy.copy(arg))
if isdict:
arg = list(arg.items())
for i in range(len(arg)):
seen_before = False
for j in prevpoints:
if id(arg[i]) == id(j):
ret[i] = '[...]'
seen_before = True
break
if not seen_before:
if isinstance(arg[i], float):
ret[i] = floatformat(arg[i])
elif isinstance(arg[i], (list, tuple, np.ndarray)):
ret[i] = listformat(arg[i], prevpoints)
if isnparray:
return np.array(ret)
elif istup:
return tuple(ret)
elif isdict:
return dict(ret)
else:
return ret
return arg
def pfprint(arg):
if isinstance(arg, float):
print(floatformat(arg))
elif isinstance(arg, (list, tuple, dict, np.ndarray)):
data = listformat(arg, [])
if isinstance(arg, np.ndarray):
print(data)
else:
pprint(data)
else:
pprint(arg)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.