content
stringlengths 5
1.05M
|
|---|
from django import forms
class ContentManageableModelForm(forms.ModelForm):
class Meta:
fields = []
def __init__(self, request=None, *args, **kwargs):
self.request = request
super().__init__(*args, **kwargs)
def save(self, commit=True):
obj = super().save(commit=False)
if self.request is not None and self.request.user.is_authenticated():
if not obj.pk:
obj.creator = self.request.user
else:
obj.last_modified_by = self.request.user
if commit:
obj.save()
return obj
|
#!/usr/bin/python
import os
import dense_correspondence_manipulation.utils.utils as utils
import logging
utils.add_dense_correspondence_to_python_path()
import matplotlib.pyplot as plt
import cv2
import numpy as np
import pandas as pd
import random
import scipy.stats as ss
import torch
from torch.autograd import Variable
from torchvision import transforms
from dense_correspondence_manipulation.utils.constants import *
from dense_correspondence_manipulation.utils.utils import CameraIntrinsics
from dense_correspondence.dataset.spartan_dataset_masked import SpartanDataset
import dense_correspondence.correspondence_tools.correspondence_plotter as correspondence_plotter
import dense_correspondence.correspondence_tools.correspondence_finder as correspondence_finder
from dense_correspondence.network.dense_correspondence_network import DenseCorrespondenceNetwork
from dense_correspondence.loss_functions.pixelwise_contrastive_loss import PixelwiseContrastiveLoss
import dense_correspondence.evaluation.plotting as dc_plotting
from dense_correspondence.correspondence_tools.correspondence_finder import random_sample_from_masked_image
class PandaDataFrameWrapper(object):
"""
A simple wrapper for a PandaSeries that protects from read/write errors
"""
def __init__(self, columns):
data = [np.nan] * len(columns)
self._columns = columns
self._df = pd.DataFrame(data=[data], columns=columns)
def set_value(self, key, value):
if key not in self._columns:
raise KeyError("%s is not in the index" %(key))
self._df[key] = value
def get_value(self, key):
return self._df[key]
@property
def dataframe(self):
return self._df
@dataframe.setter
def dataframe(self, value):
self._series = value
class DCNEvaluationPandaTemplate(PandaDataFrameWrapper):
columns = ['scene_name',
'img_a_idx',
'img_b_idx',
'is_valid',
'is_valid_masked',
'norm_diff_descriptor_ground_truth',
'norm_diff_descriptor',
'norm_diff_descriptor_masked',
'norm_diff_ground_truth_3d',
'norm_diff_pred_3d',
'norm_diff_pred_3d_masked',
'pixel_match_error_l2',
'pixel_match_error_l2_masked',
'pixel_match_error_l1',
'fraction_pixels_closer_than_ground_truth',
'fraction_pixels_closer_than_ground_truth_masked',
'average_l2_distance_for_false_positives',
'average_l2_distance_for_false_positives_masked']
def __init__(self):
PandaDataFrameWrapper.__init__(self, DCNEvaluationPandaTemplate.columns)
class DCNEvaluationPandaTemplateAcrossObject(PandaDataFrameWrapper):
columns = ['scene_name_a',
'scene_name_b',
'img_a_idx',
'img_b_idx',
'object_id_a',
'object_id_b',
'norm_diff_descriptor_best_match']
def __init__(self):
PandaDataFrameWrapper.__init__(self, DCNEvaluationPandaTemplateAcrossObject.columns)
class SIFTKeypointMatchPandaTemplate(PandaDataFrameWrapper):
columns = ['scene_name',
'img_a_idx',
'img_b_idx',
'is_valid',
'norm_diff_pred_3d']
def __init__(self):
PandaDataFrameWrapper.__init__(self, SIFTKeypointMatchPandaTemplate.columns)
class DenseCorrespondenceEvaluation(object):
"""
Samples image pairs from the given scenes. Then uses the network to compute dense
descriptors. Records the results of this in a Pandas.DataFrame object.
"""
def __init__(self, config):
self._config = config
self._dataset = None
def load_network_from_config(self, name):
"""
Loads a network from config file. Puts it in eval mode by default
:param name:
:type name:
:return: DenseCorrespondenceNetwork
:rtype:
"""
if name not in self._config["networks"]:
raise ValueError("Network %s is not in config file" %(name))
path_to_network_params = self._config["networks"][name]["path_to_network_params"]
path_to_network_params = utils.convert_to_absolute_path(path_to_network_params)
model_folder = os.path.dirname(path_to_network_params)
dcn = DenseCorrespondenceNetwork.from_model_folder(model_folder, model_param_file=path_to_network_params)
dcn.eval()
return dcn
def load_dataset_for_network(self, network_name):
"""
Loads a dataset for the network specified in the config file
:param network_name: string
:type network_name:
:return: SpartanDataset
:rtype:
"""
if network_name not in self._config["networks"]:
raise ValueError("Network %s is not in config file" %(network_name))
network_folder = os.path.dirname(self._config["networks"][network_name]["path_to_network_params"])
network_folder = utils.convert_to_absolute_path(network_folder)
dataset_config = utils.getDictFromYamlFilename(os.path.join(network_folder, "dataset.yaml"))
dataset = SpartanDataset(config=dataset_config)
return dataset
def load_dataset(self):
"""
Loads a SpartanDatasetMasked object
For now we use a default one
:return:
:rtype: SpartanDatasetMasked
"""
config_file = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'dataset',
'spartan_dataset_masked.yaml')
config = utils.getDictFromYamlFilename(config_file)
dataset = SpartanDataset(mode="test", config=config)
return dataset
@property
def dataset(self):
if self._dataset is None:
self._dataset = self.load_dataset()
return self._dataset
@dataset.setter
def dataset(self, value):
self._dataset = value
def get_output_dir(self):
return utils.convert_to_absolute_path(self._config['output_dir'])
@staticmethod
def get_image_pair_with_poses_diff_above_threshold(dataset, scene_name, threshold=0.05,
max_num_attempts=100):
"""
Given a dataset and scene name find a random pair of images with
poses that are different above a threshold
:param dataset:
:type dataset:
:param scene_name:
:type scene_name:
:param threshold:
:type threshold:
:param max_num_attempts:
:type max_num_attempts:
:return:
:rtype:
"""
img_a_idx = dataset.get_random_image_index(scene_name)
pose_a = dataset.get_pose_from_scene_name_and_idx(scene_name, img_a_idx)
pos_a = pose_a[0:3, 3]
for i in xrange(0, max_num_attempts):
img_b_idx = dataset.get_random_image_index(scene_name)
pose_b = dataset.get_pose_from_scene_name_and_idx(scene_name, img_b_idx)
pos_b = pose_b[0:3, 3]
if np.linalg.norm(pos_a - pos_b) > threshold:
return (img_a_idx, img_b_idx)
return None
def evaluate_single_network(self, network_name, mode="train", save=True):
"""
Evaluates a single network, this network should be in the config
:param network_name:
:type network_name:
:return:
:rtype:
"""
DCE = DenseCorrespondenceEvaluation
dcn = self.load_network_from_config(network_name)
dcn.eval()
dataset = self.dataset
if mode == "train":
dataset.set_train_mode()
if mode == "test":
dataset.set_test_mode()
num_image_pairs = self._config['params']['num_image_pairs']
num_matches_per_image_pair = self._config['params']['num_matches_per_image_pair']
pd_dataframe_list, df = DCE.evaluate_network(dcn, dataset, num_image_pairs=num_image_pairs,
num_matches_per_image_pair=num_matches_per_image_pair)
# save pandas.DataFrame to csv
if save:
output_dir = os.path.join(self.get_output_dir(), network_name, mode)
data_file = os.path.join(output_dir, "data.csv")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
df.to_csv(data_file)
def evaluate_single_network_cross_scene(self, network_name, save=True):
"""
Simple wrapper that uses class config and then calls static method
"""
dcn = self.load_network_from_config(network_name)
dcn.eval()
dataset = dcn.load_training_dataset()
DenseCorrespondenceEvaluation.evaluate_network_cross_scene(dcn, dataset, save=save)
@staticmethod
def evaluate_network_cross_scene(dcn=None, dataset=None, save=True):
"""
This will search for the "evaluation_labeled_data_path" in the dataset.yaml,
and use pairs of images that have been human-labeled across scenes.
"""
utils.reset_random_seed()
cross_scene_data = DenseCorrespondenceEvaluation.parse_cross_scene_data(dataset)
pd_dataframe_list = []
for annotated_pair in cross_scene_data:
scene_name_a = annotated_pair["image_a"]["scene_name"]
scene_name_b = annotated_pair["image_b"]["scene_name"]
image_a_idx = annotated_pair["image_a"]["image_idx"]
image_b_idx = annotated_pair["image_b"]["image_idx"]
img_a_pixels = annotated_pair["image_a"]["pixels"]
img_b_pixels = annotated_pair["image_b"]["pixels"]
dataframe_list_temp =\
DenseCorrespondenceEvaluation.single_cross_scene_image_pair_quantitative_analysis(dcn,
dataset, scene_name_a, image_a_idx, scene_name_b, image_b_idx,
img_a_pixels, img_b_pixels)
assert dataframe_list_temp is not None
pd_dataframe_list += dataframe_list_temp
df = pd.concat(pd_dataframe_list)
# save pandas.DataFrame to csv
if save:
output_dir = os.path.join(self.get_output_dir(), network_name, "cross-scene")
data_file = os.path.join(output_dir, "data.csv")
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
df.to_csv(data_file)
return df
@staticmethod
def evaluate_network_across_objects(dcn=None, dataset=None, num_image_pairs=25):
"""
This grabs different objects and computes a small set of statistics on their distribution.
"""
utils.reset_random_seed()
pd_dataframe_list = []
for i in xrange(num_image_pairs):
object_id_a, object_id_b = dataset.get_two_different_object_ids()
scene_name_a = dataset.get_random_single_object_scene_name(object_id_a)
scene_name_b = dataset.get_random_single_object_scene_name(object_id_b)
image_a_idx = dataset.get_random_image_index(scene_name_a)
image_b_idx = dataset.get_random_image_index(scene_name_b)
dataframe_list_temp =\
DenseCorrespondenceEvaluation.single_across_object_image_pair_quantitative_analysis(dcn,
dataset, scene_name_a, scene_name_b, image_a_idx, image_b_idx, object_id_a, object_id_b)
# if the list is empty, don't bother +=ing it, just continue
if len(dataframe_list_temp) == 0:
continue
assert dataframe_list_temp is not None
pd_dataframe_list += dataframe_list_temp
df = pd.concat(pd_dataframe_list)
return df
@staticmethod
def evaluate_network(dcn, dataset, num_image_pairs=25, num_matches_per_image_pair=100):
"""
:param nn: A neural network DenseCorrespondenceNetwork
:param test_dataset: DenseCorrespondenceDataset
the dataset to draw samples from
:return:
"""
utils.reset_random_seed()
DCE = DenseCorrespondenceEvaluation
dcn.eval()
logging_rate = 5
pd_dataframe_list = []
for i in xrange(0, num_image_pairs):
scene_name = dataset.get_random_scene_name()
# grab random scene
if i % logging_rate == 0:
print "computing statistics for image %d of %d, scene_name %s" %(i, num_image_pairs, scene_name)
print "scene"
idx_pair = DCE.get_image_pair_with_poses_diff_above_threshold(dataset, scene_name)
if idx_pair is None:
logging.info("no satisfactory image pair found, continuing")
continue
img_idx_a, img_idx_b = idx_pair
dataframe_list_temp =\
DCE.single_same_scene_image_pair_quantitative_analysis(dcn, dataset, scene_name,
img_idx_a,
img_idx_b,
num_matches=num_matches_per_image_pair,
debug=False)
if dataframe_list_temp is None:
print "no matches found, skipping"
continue
pd_dataframe_list += dataframe_list_temp
# pd_series_list.append(series_list_temp)
df = pd.concat(pd_dataframe_list)
return pd_dataframe_list, df
@staticmethod
def plot_descriptor_colormaps(res_a, res_b, descriptor_image_stats=None,
mask_a=None, mask_b=None, plot_masked=False,descriptor_norm_type="mask_image"):
"""
Plots the colormaps of descriptors for a pair of images
:param res_a: descriptors for img_a
:type res_a: numpy.ndarray
:param res_b:
:type res_b: numpy.ndarray
:param descriptor_norm_type: what type of normalization to use for the
full descriptor image
:type : str
:return: None
:rtype: None
"""
if plot_masked:
nrows = 2
ncols = 2
else:
nrows = 1
ncols = 2
fig, axes = plt.subplots(nrows=nrows, ncols=ncols)
fig.set_figheight(5)
fig.set_figwidth(15)
if descriptor_image_stats is None:
res_a_norm, res_b_norm = dc_plotting.normalize_descriptor_pair(res_a, res_b)
else:
res_a_norm = dc_plotting.normalize_descriptor(res_a, descriptor_image_stats[descriptor_norm_type])
res_b_norm = dc_plotting.normalize_descriptor(res_b, descriptor_image_stats[descriptor_norm_type])
if plot_masked:
ax = axes[0,0]
else:
ax = axes[0]
ax.imshow(res_a_norm)
if plot_masked:
ax = axes[0,1]
else:
ax = axes[1]
ax.imshow(res_b_norm)
if plot_masked:
assert mask_a is not None
assert mask_b is not None
fig.set_figheight(10)
fig.set_figwidth(15)
D = np.shape(res_a)[2]
mask_a_repeat = np.repeat(mask_a[:,:,np.newaxis], D, axis=2)
mask_b_repeat = np.repeat(mask_b[:,:,np.newaxis], D, axis=2)
res_a_mask = mask_a_repeat * res_a
res_b_mask = mask_b_repeat * res_b
if descriptor_image_stats is None:
res_a_norm_mask, res_b_norm_mask = dc_plotting.normalize_masked_descriptor_pair(res_a, res_b, mask_a, mask_b)
else:
res_a_norm_mask = dc_plotting.normalize_descriptor(res_a_mask, descriptor_image_stats['mask_image'])
res_b_norm_mask = dc_plotting.normalize_descriptor(res_b_mask, descriptor_image_stats['mask_image'])
res_a_norm_mask = res_a_norm_mask * mask_a_repeat
res_b_norm_mask = res_b_norm_mask * mask_b_repeat
axes[1,0].imshow(res_a_norm_mask)
axes[1,1].imshow(res_b_norm_mask)
@staticmethod
def clip_pixel_to_image_size_and_round(uv, image_width, image_height):
u = min(int(round(uv[0])), image_width - 1)
v = min(int(round(uv[1])), image_height - 1)
return (u,v)
@staticmethod
def single_cross_scene_image_pair_quantitative_analysis(dcn, dataset, scene_name_a,
img_a_idx, scene_name_b, img_b_idx,
img_a_pixels, img_b_pixels):
"""
Quantitative analsys of a dcn on a pair of images from different scenes (requires human labeling).
There is a bit of code copy from single_same_scene_image_pair_quantitative_analysis, but
it's a bit of a different structure, since matches are passed in and we need to try to generate more
views of these sparse human labeled pixel matches.
:param dcn:
:type dcn: DenseCorrespondenceNetwork
:param dataset:
:type dataset: SpartanDataset
:param scene_name:
:type scene_name: str
:param img_a_idx:
:type img_a_idx: int
:param img_b_idx:
:type img_b_idx: int
:param img_a_pixels, img_b_pixels: lists of dicts, where each dict contains keys for "u" and "v"
the lists should be the same length and index i from each list constitutes a match pair
:return: Dict with relevant data
:rtype:
"""
rgb_a, depth_a, mask_a, pose_a = dataset.get_rgbd_mask_pose(scene_name_a, img_a_idx)
rgb_b, depth_b, mask_b, pose_b = dataset.get_rgbd_mask_pose(scene_name_b, img_b_idx)
depth_a = np.asarray(depth_a)
depth_b = np.asarray(depth_b)
mask_a = np.asarray(mask_a)
mask_b = np.asarray(mask_b)
# compute dense descriptors
rgb_a_tensor = dataset.rgb_image_to_tensor(rgb_a)
rgb_b_tensor = dataset.rgb_image_to_tensor(rgb_b)
# these are Variables holding torch.FloatTensors, first grab the data, then convert to numpy
res_a = dcn.forward_single_image_tensor(rgb_a_tensor).data.cpu().numpy()
res_b = dcn.forward_single_image_tensor(rgb_b_tensor).data.cpu().numpy()
camera_intrinsics_a = dataset.get_camera_intrinsics(scene_name_a)
camera_intrinsics_b = dataset.get_camera_intrinsics(scene_name_b)
if not np.allclose(camera_intrinsics_a.K, camera_intrinsics_b.K):
print "Currently cannot handle two different camera K matrices in different scenes!"
print "But you could add this..."
camera_intrinsics_matrix = camera_intrinsics_a.K
assert len(img_a_pixels) == len(img_b_pixels)
print "Expanding amount of matches between:"
print "scene_name_a", scene_name_a
print "scene_name_b", scene_name_b
print "originally had", len(img_a_pixels), "matches"
image_height, image_width = dcn.image_shape
DCE = DenseCorrespondenceEvaluation
dataframe_list = []
# Loop over the labeled pixel matches once, before using different views
# This lets us keep depth_a, depth_b, res_a, res_b without reloading
for i in range(len(img_a_pixels)):
print "now, index of pixel match:", i
uv_a = (img_a_pixels[i]["u"], img_a_pixels[i]["v"])
uv_b = (img_b_pixels[i]["u"], img_b_pixels[i]["v"])
uv_a = DCE.clip_pixel_to_image_size_and_round(uv_a, image_width, image_height)
uv_b = DCE.clip_pixel_to_image_size_and_round(uv_b, image_width, image_height)
print uv_a
print uv_b
# Reminder: this function wants only a single uv_a, uv_b
pd_template = DenseCorrespondenceEvaluation.compute_descriptor_match_statistics(depth_a,
depth_b, mask_a, mask_b, uv_a, uv_b, pose_a,pose_b, res_a,
res_b, camera_intrinsics_matrix,
rgb_a=rgb_a, rgb_b=rgb_b, debug=False)
pd_template.set_value('scene_name', scene_name_a+"+"+scene_name_b)
pd_template.set_value('img_a_idx', int(img_a_idx))
pd_template.set_value('img_b_idx', int(img_b_idx))
dataframe_list.append(pd_template.dataframe)
# Loop a second time over the labeled pixel matches
# But this time try,
# for each I labeled pixel match pairs,
# for each J different views for image a, and
# for each K different views for image b
# This will lead to I*J+I*K attempts at new pairs!
# Could also do the cubic version...
J = 10
K = 10
# Loop over labeled pixel matches
for i in range(len(img_a_pixels)):
uv_a = (img_a_pixels[i]["u"], img_a_pixels[i]["v"])
uv_b = (img_b_pixels[i]["u"], img_b_pixels[i]["v"])
uv_a = DCE.clip_pixel_to_image_size_and_round(uv_a, image_width, image_height)
uv_b = DCE.clip_pixel_to_image_size_and_round(uv_b, image_width, image_height)
# Loop over J different views for image a
for j in range(J):
different_view_a_idx = dataset.get_img_idx_with_different_pose(scene_name_a, pose_a, num_attempts=50)
if different_view_a_idx is None:
logging.info("no frame with sufficiently different pose found, continuing")
continue
diff_rgb_a, diff_depth_a, diff_mask_a, diff_pose_a = dataset.get_rgbd_mask_pose(scene_name_a, different_view_a_idx)
diff_depth_a = np.asarray(diff_depth_a)
diff_mask_a = np.asarray(diff_mask_a)
(uv_a_vec, diff_uv_a_vec) = correspondence_finder.batch_find_pixel_correspondences(depth_a, pose_a, diff_depth_a, diff_pose_a,
uv_a=uv_a)
if uv_a_vec is None:
logging.info("no matches found, continuing")
continue
diff_rgb_a_tensor = dataset.rgb_image_to_tensor(diff_rgb_a)
diff_res_a = dcn.forward_single_image_tensor(diff_rgb_a_tensor).data.cpu().numpy()
diff_uv_a = (diff_uv_a_vec[0][0], diff_uv_a_vec[1][0])
diff_uv_a = DCE.clip_pixel_to_image_size_and_round(diff_uv_a, image_width, image_height)
pd_template = DenseCorrespondenceEvaluation.compute_descriptor_match_statistics(diff_depth_a,
depth_b, diff_mask_a, mask_b, diff_uv_a, uv_b, diff_pose_a, pose_b,
diff_res_a, res_b, camera_intrinsics_matrix,
rgb_a=diff_rgb_a, rgb_b=rgb_b, debug=False)
pd_template.set_value('scene_name', scene_name_a+"+"+scene_name_b)
pd_template.set_value('img_a_idx', int(different_view_a_idx))
pd_template.set_value('img_b_idx', int(img_b_idx))
dataframe_list.append(pd_template.dataframe)
# Loop over K different views for image b
for k in range(K):
different_view_b_idx = dataset.get_img_idx_with_different_pose(scene_name_b, pose_b, num_attempts=50)
if different_view_b_idx is None:
logging.info("no frame with sufficiently different pose found, continuing")
continue
diff_rgb_b, diff_depth_b, diff_mask_b, diff_pose_b = dataset.get_rgbd_mask_pose(scene_name_b, different_view_b_idx)
diff_depth_b = np.asarray(diff_depth_b)
diff_mask_b = np.asarray(diff_mask_b)
(uv_b_vec, diff_uv_b_vec) = correspondence_finder.batch_find_pixel_correspondences(depth_b, pose_b, diff_depth_b, diff_pose_b,
uv_a=uv_b)
if uv_b_vec is None:
logging.info("no matches found, continuing")
continue
diff_rgb_b_tensor = dataset.rgb_image_to_tensor(diff_rgb_b)
diff_res_b = dcn.forward_single_image_tensor(diff_rgb_b_tensor).data.cpu().numpy()
diff_uv_b = (diff_uv_b_vec[0][0], diff_uv_b_vec[1][0])
diff_uv_b = DCE.clip_pixel_to_image_size_and_round(diff_uv_b, image_width, image_height)
pd_template = DenseCorrespondenceEvaluation.compute_descriptor_match_statistics(depth_a,
diff_depth_b, mask_a, diff_mask_b, uv_a, diff_uv_b, pose_a, diff_pose_b,
res_a, diff_res_b, camera_intrinsics_matrix,
rgb_a=rgb_a, rgb_b=diff_rgb_b, debug=False)
pd_template.set_value('scene_name', scene_name_a+"+"+scene_name_b)
pd_template.set_value('img_a_idx', int(img_a_idx))
pd_template.set_value('img_b_idx', int(different_view_b_idx))
dataframe_list.append(pd_template.dataframe)
return dataframe_list
@staticmethod
def single_across_object_image_pair_quantitative_analysis(dcn, dataset, scene_name_a, scene_name_b,
img_a_idx, img_b_idx, object_id_a, object_id_b, num_uv_a_samples=100,
debug=False):
"""
Quantitative analysis of a dcn on a pair of images from the same scene.
:param dcn:
:type dcn: DenseCorrespondenceNetwork
:param dataset:
:type dataset: SpartanDataset
:param scene_name:
:type scene_name: str
:param img_a_idx:
:type img_a_idx: int
:param img_b_idx:
:type img_b_idx: int
:param camera_intrinsics_matrix: Optionally set camera intrinsics, otherwise will get it from the dataset
:type camera_intrinsics_matrix: 3 x 3 numpy array
:return: Dict with relevant data
:rtype:
"""
rgb_a, depth_a, mask_a, _ = dataset.get_rgbd_mask_pose(scene_name_a, img_a_idx)
rgb_b, depth_b, mask_b, _ = dataset.get_rgbd_mask_pose(scene_name_b, img_b_idx)
depth_a = np.asarray(depth_a)
depth_b = np.asarray(depth_b)
mask_a = np.asarray(mask_a)
mask_b = np.asarray(mask_b)
# compute dense descriptors
rgb_a_tensor = dataset.rgb_image_to_tensor(rgb_a)
rgb_b_tensor = dataset.rgb_image_to_tensor(rgb_b)
# these are Variables holding torch.FloatTensors, first grab the data, then convert to numpy
res_a = dcn.forward_single_image_tensor(rgb_a_tensor).data.cpu().numpy()
res_b = dcn.forward_single_image_tensor(rgb_b_tensor).data.cpu().numpy()
# container to hold a list of pandas dataframe
# will eventually combine them all with concat
dataframe_list = []
logging_rate = 100
image_height, image_width = dcn.image_shape
DCE = DenseCorrespondenceEvaluation
sampled_idx_list = random_sample_from_masked_image(mask_a, num_uv_a_samples)
# If the list is empty, return an empty list
if len(sampled_idx_list) == 0:
return dataframe_list
for i in range(num_uv_a_samples):
uv_a = [sampled_idx_list[1][i], sampled_idx_list[0][i]]
pd_template = DCE.compute_descriptor_match_statistics_no_ground_truth(uv_a, res_a,
res_b,
rgb_a=rgb_a,
rgb_b=rgb_b,
depth_a=depth_a,
depth_b=depth_b,
debug=debug)
pd_template.set_value('scene_name_a', scene_name_a)
pd_template.set_value('scene_name_b', scene_name_b)
pd_template.set_value('object_id_a', object_id_a)
pd_template.set_value('object_id_b', object_id_b)
pd_template.set_value('img_a_idx', int(img_a_idx))
pd_template.set_value('img_b_idx', int(img_b_idx))
dataframe_list.append(pd_template.dataframe)
return dataframe_list
@staticmethod
def single_same_scene_image_pair_quantitative_analysis(dcn, dataset, scene_name,
img_a_idx, img_b_idx,
camera_intrinsics_matrix=None,
num_matches=100,
debug=False):
"""
Quantitative analysis of a dcn on a pair of images from the same scene.
:param dcn:
:type dcn: DenseCorrespondenceNetwork
:param dataset:
:type dataset: SpartanDataset
:param scene_name:
:type scene_name: str
:param img_a_idx:
:type img_a_idx: int
:param img_b_idx:
:type img_b_idx: int
:param camera_intrinsics_matrix: Optionally set camera intrinsics, otherwise will get it from the dataset
:type camera_intrinsics_matrix: 3 x 3 numpy array
:return: Dict with relevant data
:rtype:
"""
rgb_a, depth_a, mask_a, pose_a = dataset.get_rgbd_mask_pose(scene_name, img_a_idx)
rgb_b, depth_b, mask_b, pose_b = dataset.get_rgbd_mask_pose(scene_name, img_b_idx)
depth_a = np.asarray(depth_a)
depth_b = np.asarray(depth_b)
mask_a = np.asarray(mask_a)
mask_b = np.asarray(mask_b)
# compute dense descriptors
rgb_a_tensor = dataset.rgb_image_to_tensor(rgb_a)
rgb_b_tensor = dataset.rgb_image_to_tensor(rgb_b)
# these are Variables holding torch.FloatTensors, first grab the data, then convert to numpy
res_a = dcn.forward_single_image_tensor(rgb_a_tensor).data.cpu().numpy()
res_b = dcn.forward_single_image_tensor(rgb_b_tensor).data.cpu().numpy()
if camera_intrinsics_matrix is None:
camera_intrinsics = dataset.get_camera_intrinsics(scene_name)
camera_intrinsics_matrix = camera_intrinsics.K
# find correspondences
(uv_a_vec, uv_b_vec) = correspondence_finder.batch_find_pixel_correspondences(depth_a, pose_a, depth_b, pose_b,
device='CPU', img_a_mask=mask_a)
if uv_a_vec is None:
print "no matches found, returning"
return None
# container to hold a list of pandas dataframe
# will eventually combine them all with concat
dataframe_list = []
total_num_matches = len(uv_a_vec[0])
num_matches = min(num_matches, total_num_matches)
match_list = random.sample(range(0, total_num_matches), num_matches)
if debug:
match_list = [50]
logging_rate = 100
image_height, image_width = dcn.image_shape
DCE = DenseCorrespondenceEvaluation
for i in match_list:
uv_a = (uv_a_vec[0][i], uv_a_vec[1][i])
uv_b_raw = (uv_b_vec[0][i], uv_b_vec[1][i])
uv_b = DCE.clip_pixel_to_image_size_and_round(uv_b_raw, image_width, image_height)
pd_template = DCE.compute_descriptor_match_statistics(depth_a,
depth_b,
mask_a,
mask_b,
uv_a,
uv_b,
pose_a,
pose_b,
res_a,
res_b,
camera_intrinsics_matrix,
rgb_a=rgb_a,
rgb_b=rgb_b,
debug=debug)
pd_template.set_value('scene_name', scene_name)
pd_template.set_value('img_a_idx', int(img_a_idx))
pd_template.set_value('img_b_idx', int(img_b_idx))
dataframe_list.append(pd_template.dataframe)
return dataframe_list
@staticmethod
def is_depth_valid(depth):
"""
Checks if depth value is valid, usually missing depth values are either 0 or MAX_RANGE
:param depth: depth in meters
:type depth:
:return:
:rtype: bool
"""
MAX_DEPTH = 10.0
return ((depth > 0) and (depth < MAX_DEPTH))
@staticmethod
def compute_descriptor_match_statistics_no_ground_truth(uv_a, res_a, res_b, rgb_a=None, rgb_b=None,
depth_a=None, depth_b=None, debug=False):
"""
Computes statistics of descriptor pixelwise match when there is zero ground truth data.
:param res_a: descriptor for image a, of shape (H,W,D)
:type res_a: numpy array
:param res_b: descriptor for image b, of shape (H,W,D)
:param debug: whether or not to print visualization
:type debug:
"""
DCE = DenseCorrespondenceEvaluation
# compute best match
uv_b, best_match_diff, norm_diffs =\
DenseCorrespondenceNetwork.find_best_match(uv_a, res_a,
res_b, debug=debug)
if debug:
correspondence_plotter.plot_correspondences_direct(rgb_a, depth_a, rgb_b, depth_b,
uv_a, uv_b, show=True)
pd_template = DCNEvaluationPandaTemplateAcrossObject()
pd_template.set_value('norm_diff_descriptor_best_match', best_match_diff)
return pd_template
@staticmethod
def compute_descriptor_match_statistics(depth_a, depth_b, mask_a, mask_b, uv_a, uv_b, pose_a, pose_b,
res_a, res_b, camera_matrix, params=None,
rgb_a=None, rgb_b=None, debug=False):
"""
Computes statistics of descriptor pixelwise match.
:param uv_a: a single pixel index in (u,v) coordinates, from image a
:type uv_a: tuple of 2 ints
:param uv_b: a single pixel index in (u,v) coordinates, from image b
:type uv_b: tuple of 2 ints
:param camera_matrix: camera intrinsics matrix
:type camera_matrix: 3 x 3 numpy array
:param rgb_a:
:type rgb_a:
:param rgb_b:
:type rgb_b:
:param depth_a: depth is assumed to be in mm (see conversion to meters below)
:type depth_a: numpy array
:param depth_b:
:type depth_b:
:param pose_a:
:type pose_a: 4 x 4 numpy array
:param pose_b:
:type pose_b:
:param res_a: descriptor for image a, of shape (H,W,D)
:type res_a: numpy array
:param res_b: descriptor for image b, of shape (H,W,D)
:type res_b: numpy array
:param params:
:type params:
:param debug: whether or not to print visualization
:type debug:
:return:
:rtype:
"""
DCE = DenseCorrespondenceEvaluation
# compute best match
uv_b_pred, best_match_diff, norm_diffs =\
DenseCorrespondenceNetwork.find_best_match(uv_a, res_a,
res_b, debug=debug)
# norm_diffs shape is (H,W)
# compute best match on mask only
mask_b_inv = 1-mask_b
masked_norm_diffs = norm_diffs + mask_b_inv*1e6
best_match_flattened_idx_masked = np.argmin(masked_norm_diffs)
best_match_xy_masked = np.unravel_index(best_match_flattened_idx_masked, masked_norm_diffs.shape)
best_match_diff_masked = masked_norm_diffs[best_match_xy_masked]
uv_b_pred_masked = (best_match_xy_masked[1], best_match_xy_masked[0])
# compute pixel space difference
pixel_match_error_l2 = np.linalg.norm((np.array(uv_b) - np.array(uv_b_pred)), ord=2)
pixel_match_error_l2_masked = np.linalg.norm((np.array(uv_b) - np.array(uv_b_pred_masked)), ord=2)
pixel_match_error_l1 = np.linalg.norm((np.array(uv_b) - np.array(uv_b_pred)), ord=1)
# extract the ground truth descriptors
des_a = res_a[uv_a[1], uv_a[0], :]
des_b_ground_truth = res_b[uv_b[1], uv_b[0], :]
norm_diff_descriptor_ground_truth = np.linalg.norm(des_a - des_b_ground_truth)
# from Schmidt et al 2017:
"""
We then determine the number of pixels in the target image that are closer in
descriptor space to the source point than the manually-labelled corresponding point.
"""
# compute this
(v_indices_better_than_ground_truth, u_indices_better_than_ground_truth) = np.where(norm_diffs < norm_diff_descriptor_ground_truth)
num_pixels_closer_than_ground_truth = len(u_indices_better_than_ground_truth)
num_pixels_in_image = res_a.shape[0] * res_a.shape[1]
fraction_pixels_closer_than_ground_truth = num_pixels_closer_than_ground_truth*1.0/num_pixels_in_image
(v_indices_better_than_ground_truth_masked, u_indices_better_than_ground_truth_masked) = np.where(masked_norm_diffs < norm_diff_descriptor_ground_truth)
num_pixels_closer_than_ground_truth_masked = len(u_indices_better_than_ground_truth_masked)
num_pixels_in_masked_image = len(np.nonzero(mask_b)[0])
fraction_pixels_closer_than_ground_truth_masked = num_pixels_closer_than_ground_truth_masked*1.0/num_pixels_in_masked_image
# new metric: average l2 distance of the pixels better than ground truth
if num_pixels_closer_than_ground_truth == 0:
average_l2_distance_for_false_positives = 0.0
else:
l2_distances = np.sqrt((u_indices_better_than_ground_truth - uv_b[0])**2 + (v_indices_better_than_ground_truth - uv_b[1])**2)
average_l2_distance_for_false_positives = np.average(l2_distances)
# new metric: average l2 distance of the pixels better than ground truth
if num_pixels_closer_than_ground_truth_masked == 0:
average_l2_distance_for_false_positives_masked = 0.0
else:
l2_distances_masked = np.sqrt((u_indices_better_than_ground_truth_masked - uv_b[0])**2 + (v_indices_better_than_ground_truth_masked - uv_b[1])**2)
average_l2_distance_for_false_positives_masked = np.average(l2_distances_masked)
# extract depth values, note the indexing order of u,v has to be reversed
uv_a_depth = depth_a[uv_a[1], uv_a[0]] / DEPTH_IM_SCALE # check if this is not None
uv_b_depth = depth_b[uv_b[1], uv_b[0]] / DEPTH_IM_SCALE
uv_b_pred_depth = depth_b[uv_b_pred[1], uv_b_pred[0]] / DEPTH_IM_SCALE
uv_b_pred_depth_is_valid = DenseCorrespondenceEvaluation.is_depth_valid(uv_b_pred_depth)
uv_b_pred_depth_masked = depth_b[uv_b_pred_masked[1], uv_b_pred_masked[0]] / DEPTH_IM_SCALE
uv_b_pred_depth_is_valid_masked = DenseCorrespondenceEvaluation.is_depth_valid(uv_b_pred_depth_masked)
is_valid = uv_b_pred_depth_is_valid
is_valid_masked = uv_b_pred_depth_is_valid_masked
uv_a_pos = DCE.compute_3d_position(uv_a, uv_a_depth, camera_matrix, pose_a)
uv_b_pos = DCE.compute_3d_position(uv_b, uv_b_depth, camera_matrix, pose_b)
uv_b_pred_pos = DCE.compute_3d_position(uv_b_pred, uv_b_pred_depth, camera_matrix, pose_b)
uv_b_pred_pos_masked = DCE.compute_3d_position(uv_b_pred_masked, uv_b_pred_depth_masked, camera_matrix, pose_b)
diff_ground_truth_3d = uv_b_pos - uv_a_pos
diff_pred_3d = uv_b_pos - uv_b_pred_pos
diff_pred_3d_masked = uv_b_pos - uv_b_pred_pos_masked
if DCE.is_depth_valid(uv_b_depth):
norm_diff_ground_truth_3d = np.linalg.norm(diff_ground_truth_3d)
else:
norm_diff_ground_truth_3d = np.nan
if DCE.is_depth_valid(uv_b_depth) and DCE.is_depth_valid(uv_b_pred_depth):
norm_diff_pred_3d = np.linalg.norm(diff_pred_3d)
else:
norm_diff_pred_3d = np.nan
if DCE.is_depth_valid(uv_b_depth) and DCE.is_depth_valid(uv_b_pred_depth_masked):
norm_diff_pred_3d_masked = np.linalg.norm(diff_pred_3d_masked)
else:
norm_diff_pred_3d_masked = np.nan
if debug:
fig, axes = correspondence_plotter.plot_correspondences_direct(rgb_a, depth_a, rgb_b, depth_b,
uv_a, uv_b, show=False)
correspondence_plotter.plot_correspondences_direct(rgb_a, depth_a, rgb_b, depth_b,
uv_a, uv_b_pred,
use_previous_plot=(fig, axes),
show=True,
circ_color='purple')
pd_template = DCNEvaluationPandaTemplate()
pd_template.set_value('norm_diff_descriptor', best_match_diff)
pd_template.set_value('norm_diff_descriptor_masked', best_match_diff_masked)
pd_template.set_value('is_valid', is_valid)
pd_template.set_value('is_valid_masked', is_valid_masked)
pd_template.set_value('norm_diff_ground_truth_3d', norm_diff_ground_truth_3d)
if is_valid:
pd_template.set_value('norm_diff_pred_3d', norm_diff_pred_3d)
else:
pd_template.set_value('norm_diff_pred_3d', np.nan)
if is_valid_masked:
pd_template.set_value('norm_diff_pred_3d_masked', norm_diff_pred_3d_masked)
else:
pd_template.set_value('norm_diff_pred_3d_masked', np.nan)
pd_template.set_value('norm_diff_descriptor_ground_truth', norm_diff_descriptor_ground_truth)
pd_template.set_value('pixel_match_error_l2', pixel_match_error_l2)
pd_template.set_value('pixel_match_error_l2_masked', pixel_match_error_l2_masked)
pd_template.set_value('pixel_match_error_l1', pixel_match_error_l1)
pd_template.set_value('fraction_pixels_closer_than_ground_truth', fraction_pixels_closer_than_ground_truth)
pd_template.set_value('fraction_pixels_closer_than_ground_truth_masked', fraction_pixels_closer_than_ground_truth_masked)
pd_template.set_value('average_l2_distance_for_false_positives', average_l2_distance_for_false_positives)
pd_template.set_value('average_l2_distance_for_false_positives_masked', average_l2_distance_for_false_positives_masked)
return pd_template
@staticmethod
def compute_3d_position(uv, depth, camera_intrinsics_matrix, camera_to_world):
"""
:param uv: pixel-location in (row, column) ordering
:type uv:
:param depth: depth-value
:type depth:
:param camera_intrinsics_matrix: the camera intrinsics matrix
:type camera_intrinsics_matrix:
:param camera_to_world: camera to world transform as a homogenous transform matrix
:type camera_to_world: 4 x 4 numpy array
:return:
:rtype: np.array with shape (3,)
"""
pos_in_camera_frame = correspondence_finder.pinhole_projection_image_to_world(uv, depth, camera_intrinsics_matrix)
pos_in_world_frame = np.dot(camera_to_world, np.append(pos_in_camera_frame, 1))[:3]
return pos_in_world_frame
@staticmethod
def single_same_scene_image_pair_qualitative_analysis(dcn, dataset, scene_name,
img_a_idx, img_b_idx,
num_matches=10):
"""
Wrapper for single_image_pair_qualitative_analysis, when images are from same scene.
See that function for remaining documentation.
:param scene_name: scene name to use
:param img_a_idx: index of image_a in the dataset
:param img_b_idx: index of image_b in the datset
:type scene_name: str
:type img_a_idx: int
:type img_b_idx: int
:return: None
"""
rgb_a, _, mask_a, _ = dataset.get_rgbd_mask_pose(scene_name, img_a_idx)
rgb_b, _, mask_b, _ = dataset.get_rgbd_mask_pose(scene_name, img_b_idx)
DenseCorrespondenceEvaluation.single_image_pair_qualitative_analysis(dcn, dataset, rgb_a, rgb_b, mask_a, mask_b, num_matches)
@staticmethod
def single_cross_scene_image_pair_qualitative_analysis(dcn, dataset, scene_name_a,
img_a_idx, scene_name_b, img_b_idx,
num_matches=10):
"""
Wrapper for single_image_pair_qualitative_analysis, when images are NOT from same scene.
See that function for remaining documentation.
:param scene_name: scene name to use
:param img_a_idx: index of image_a in the dataset
:param img_b_idx: index of image_b in the datset
:type scene_name: str
:type img_a_idx: int
:type img_b_idx: int
:return: the images a and b
:rtype: PIL.Image, PIL.Image
"""
rgb_a, _, mask_a, _ = dataset.get_rgbd_mask_pose(scene_name_a, img_a_idx)
rgb_b, _, mask_b, _ = dataset.get_rgbd_mask_pose(scene_name_b, img_b_idx)
DenseCorrespondenceEvaluation.single_image_pair_qualitative_analysis(dcn, dataset, rgb_a, rgb_b, mask_a, mask_b, num_matches)
return rgb_a, rgb_b
@staticmethod
def single_image_pair_qualitative_analysis(dcn, dataset, rgb_a, rgb_b, mask_a, mask_b,
num_matches):
"""
Computes qualtitative assessment of DCN performance for a pair of
images
:param dcn: dense correspondence network to use
:param dataset: dataset to use
:param rgb_a, rgb_b: two rgb images for which to do matching
:param mask_a, mask_b: masks of these two images
:param num_matches: number of matches to generate
:type dcn: DenseCorrespondenceNetwork
:type dataset: DenseCorrespondenceDataset
:type rgb_a, rgb_b: PIL.Images
:type mask_a, mask_b: PIL.Images
:type num_matches: int
:return: None
"""
mask_a = np.asarray(mask_a)
mask_b = np.asarray(mask_b)
# compute dense descriptors
rgb_a_tensor = dataset.rgb_image_to_tensor(rgb_a)
rgb_b_tensor = dataset.rgb_image_to_tensor(rgb_b)
# these are Variables holding torch.FloatTensors, first grab the data, then convert to numpy
res_a = dcn.forward_single_image_tensor(rgb_a_tensor).data.cpu().numpy()
res_b = dcn.forward_single_image_tensor(rgb_b_tensor).data.cpu().numpy()
# sample points on img_a. Compute best matches on img_b
# note that this is in (x,y) format
# TODO: if this mask is empty, this function will not be happy
# de-prioritizing since this is only for qualitative evaluation plots
sampled_idx_list = random_sample_from_masked_image(mask_a, num_matches)
# list of cv2.KeyPoint
kp1 = []
kp2 = []
matches = [] # list of cv2.DMatch
# placeholder constants for opencv
diam = 0.01
dist = 0.01
try:
descriptor_image_stats = dcn.descriptor_image_stats
except:
print "Could not find descriptor image stats..."
print "Only normalizing pairs of images!"
descriptor_image_stats = None
for i in xrange(0, num_matches):
# convert to (u,v) format
pixel_a = [sampled_idx_list[1][i], sampled_idx_list[0][i]]
best_match_uv, best_match_diff, norm_diffs =\
DenseCorrespondenceNetwork.find_best_match(pixel_a, res_a,
res_b)
# be careful, OpenCV format is (u,v) = (right, down)
kp1.append(cv2.KeyPoint(pixel_a[0], pixel_a[1], diam))
kp2.append(cv2.KeyPoint(best_match_uv[0], best_match_uv[1], diam))
matches.append(cv2.DMatch(i, i, dist))
gray_a_numpy = cv2.cvtColor(np.asarray(rgb_a), cv2.COLOR_BGR2GRAY)
gray_b_numpy = cv2.cvtColor(np.asarray(rgb_b), cv2.COLOR_BGR2GRAY)
img3 = cv2.drawMatches(gray_a_numpy, kp1, gray_b_numpy, kp2, matches, flags=2, outImg=gray_b_numpy)
fig, axes = plt.subplots(nrows=1, ncols=1)
fig.set_figheight(10)
fig.set_figwidth(15)
axes.imshow(img3)
# show colormap if possible (i.e. if descriptor dimension is 1 or 3)
if dcn.descriptor_dimension in [1,3]:
DenseCorrespondenceEvaluation.plot_descriptor_colormaps(res_a, res_b,
descriptor_image_stats=descriptor_image_stats,
mask_a=mask_a,
mask_b=mask_b,
plot_masked=True)
plt.show()
@staticmethod
def compute_sift_keypoints(img, mask=None):
"""
Compute SIFT keypoints given a grayscale img
:param img:
:type img:
:param mask:
:type mask:
:return:
:rtype:
"""
# convert to grayscale image if needed
if len(img.shape) > 2:
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
gray = img
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray, mask)
kp, des = sift.compute(gray, kp)
img_w_kp = 0 * img
cv2.drawKeypoints(gray, kp, img_w_kp)
return kp, des, gray, img_w_kp
@staticmethod
def single_image_pair_sift_analysis(dataset, scene_name,
img_a_idx, img_b_idx,
cross_match_threshold=0.75,
num_matches=10,
visualize=True,
camera_intrinsics_matrix=None):
"""
Computes SIFT features and does statistics
:param dcn:
:type dcn:
:param dataset:
:type dataset:
:param scene_name:
:type scene_name:
:param img_a_idx:
:type img_a_idx:
:param img_b_idx:
:type img_b_idx:
:param num_matches:
:type num_matches:
:return:
:rtype:
"""
DCE = DenseCorrespondenceEvaluation
rgb_a, depth_a, mask_a, pose_a = dataset.get_rgbd_mask_pose(scene_name, img_a_idx)
rgb_a = np.array(rgb_a) # converts PIL image to rgb
rgb_b, depth_b, mask_b, pose_b = dataset.get_rgbd_mask_pose(scene_name, img_b_idx)
rgb_b = np.array(rgb_b) # converts PIL image to rgb
kp1, des1, gray1, img_1_kp = DCE.compute_sift_keypoints(rgb_a, mask_a)
kp2, des2, gray2, img_2_kp = DCE.compute_sift_keypoints(rgb_b, mask_b)
img1 = gray1
img2 = gray2
if visualize:
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_figheight(10)
fig.set_figwidth(15)
axes[0].imshow(img_1_kp)
axes[1].imshow(img_2_kp)
plt.title("SIFT Keypoints")
plt.show()
# compute matches
# Match descriptors.
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2) # Sort them in the order of their distance.
total_num_matches = len(matches)
# Apply ratio test
good = []
for m, n in matches:
# m is the best match
# n is the second best match
if m.distance < 0.75 * n.distance:
good.append([m])
if visualize:
good_vis = random.sample(good, 5)
outImg = 0 * img1 # placeholder
fig, axes = plt.subplots(nrows=1, ncols=1)
fig.set_figheight(10)
fig.set_figwidth(15)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good_vis, outImg, flags=2)
plt.imshow(img3)
plt.title("SIFT Keypoint Matches")
plt.show()
if camera_intrinsics_matrix is None:
camera_intrinsics = dataset.get_camera_intrinsics(scene_name)
camera_intrinsics_matrix = camera_intrinsics.K
dataframe_list = []
for idx, val in enumerate(good):
match = val[0]
kp_a = kp1[match.queryIdx]
kp_b = kp2[match.trainIdx]
df = DCE.compute_single_sift_match_statistics(depth_a, depth_b, kp_a, kp_b,
pose_a, pose_b, camera_intrinsics_matrix)
dataframe_list.append(df)
returnData = dict()
returnData['kp1'] = kp1
returnData['kp2'] = kp2
returnData['matches'] = matches
returnData['good'] = good
returnData['dataframe_list'] = dataframe_list
return returnData
@staticmethod
def compute_single_sift_match_statistics(depth_a, depth_b, kp_a, kp_b, pose_a, pose_b,
camera_matrix, params=None,
rgb_a=None, rgb_b=None, debug=False):
"""
Compute some statistics of the SIFT match
:param depth_a:
:type depth_a:
:param depth_b:
:type depth_b:
:param kp_a: kp_a.pt is the (u,v) = (column, row) coordinates in the image
:type kp_a: cv2.KeyPoint
:param kp_b:
:type kp_b:
:param pose_a:
:type pose_a:
:param pose_b:
:type pose_b:
:param camera_matrix:
:type camera_matrix:
:param params:
:type params:
:param rgb_a:
:type rgb_a:
:param rgb_b:
:type rgb_b:
:param debug:
:type debug:
:return:
:rtype:
"""
DCE = DenseCorrespondenceEvaluation
# first compute location of kp_a in world frame
image_height, image_width = depth_a.shape[0], depth_a.shape[1]
def clip_pixel_to_image_size_and_round(uv):
u = min(int(round(uv[0])), image_width - 1)
v = min(int(round(uv[1])), image_height - 1)
return [u,v]
uv_a = clip_pixel_to_image_size_and_round((kp_a.pt[0], kp_a.pt[1]))
uv_a_depth = depth_a[uv_a[1], uv_a[0]] / DEPTH_IM_SCALE
# print "uv_a", uv_a
# print "uv_a_depth", uv_a_depth
# print "camera_matrix", camera_matrix
# print "pose_a", pose_a
kp_a_3d = DCE.compute_3d_position(uv_a, uv_a_depth, camera_matrix, pose_a)
uv_b = clip_pixel_to_image_size_and_round((kp_b.pt[0], kp_b.pt[1]))
uv_b_depth = depth_b[uv_b[1], uv_b[0]] / DEPTH_IM_SCALE
uv_b_depth_valid = DCE.is_depth_valid(uv_b_depth)
kp_b_3d = DCE.compute_3d_position(uv_b, uv_b_depth, camera_matrix, pose_b)
# uv_b_ground_truth = correspondence_finder.pinhole_projection_world_to_image(kp_b_3d, camera_matrix, camera_to_world=pose_b)
is_valid = uv_b_depth_valid
if debug:
print "\n\n"
print "uv_a", uv_a
print "kp_a_3d", kp_a_3d
print "kp_b_3d", kp_b_3d
print "is_valid", is_valid
norm_diff_pred_3d = np.linalg.norm(kp_b_3d - kp_a_3d)
pd_template = SIFTKeypointMatchPandaTemplate()
pd_template.set_value('is_valid', is_valid)
if is_valid:
pd_template.set_value('norm_diff_pred_3d', norm_diff_pred_3d)
return pd_template
@staticmethod
def single_image_pair_keypoint_analysis(dcn, dataset, scene_name,
img_a_idx, img_b_idx,
params=None,
camera_intrinsics_matrix=None, visualize=True):
DCE = DenseCorrespondenceEvaluation
# first compute SIFT stuff
sift_data = DCE.single_image_pair_sift_analysis(dataset, scene_name,
img_a_idx, img_b_idx, visualize=visualize)
kp1 = sift_data['kp1']
kp2 = sift_data['kp2']
rgb_a, depth_a, mask_a, pose_a = dataset.get_rgbd_mask_pose(scene_name, img_a_idx)
rgb_a = np.array(rgb_a) # converts PIL image to rgb
rgb_b, depth_b, mask_b, pose_b = dataset.get_rgbd_mask_pose(scene_name, img_b_idx)
rgb_b = np.array(rgb_b) # converts PIL image to rgb
# compute the best matches among the SIFT keypoints
des1 = dcn.evaluate_descriptor_at_keypoints(rgb_a, kp1)
des2 = dcn.evaluate_descriptor_at_keypoints(rgb_b, kp2)
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1, des2, k=2) # Sort them in the order of their distance.
total_num_matches = len(matches)
good = []
for idx, val in enumerate(matches):
m, n = val
if (m.distance < 0.5 * n.distance) and m.distance < 0.01:
print "\n\n"
print "m.distance", m.distance
print "n.distance", n.distance
good.append([m])
#
# if idx > 5:
# return
print "total keypoints = ", len(kp1)
print "num good matches = ", len(good)
print "SIFT good matches = ", len(sift_data['good'])
if visualize:
img1 = cv2.cvtColor(rgb_a, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(rgb_b, cv2.COLOR_BGR2GRAY)
good_vis = random.sample(good, 5)
outImg = 0 * img1 # placeholder
fig, axes = plt.subplots(nrows=1, ncols=1)
fig.set_figheight(10)
fig.set_figwidth(15)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good_vis, outImg, flags=2)
plt.imshow(img3)
plt.title("Dense Correspondence Keypoint Matches")
plt.show()
returnData = dict()
returnData['kp1'] = kp1
returnData['kp2'] = kp2
returnData['matches'] = matches
returnData['des1'] = des1
returnData['des2'] = des2
return returnData
return returnData
@staticmethod
def parse_cross_scene_data(dataset):
"""
This takes a dataset.config, and concatenates together
a list of all of the cross scene data annotated pairs.
"""
evaluation_labeled_data_paths = []
# add the multi object list
evaluation_labeled_data_paths += dataset.config["multi_object"]["evaluation_labeled_data_path"]
# add all of the single object lists
for object_key, val in dataset.config["single_object"].iteritems():
if "evaluation_labeled_data_path" in val:
evaluation_labeled_data_paths += val["evaluation_labeled_data_path"]
if len(evaluation_labeled_data_paths) == 0:
print "Could not find labeled cross scene data for this dataset."
print "It needs to be set in the dataset.yaml of the folder from which"
print "this network is loaded from."
return
cross_scene_data = []
home = os.path.dirname(utils.getDenseCorrespondenceSourceDir())
for i in evaluation_labeled_data_paths:
cross_scene_data_full_path = os.path.join(home, i)
this_cross_scene_data = utils.getDictFromYamlFilename(cross_scene_data_full_path)
cross_scene_data += this_cross_scene_data
return cross_scene_data
@staticmethod
def evaluate_network_qualitative_cross_scene(dcn, dataset, draw_human_annotations=True):
"""
This will search for the "evaluation_labeled_data_path" in the dataset.yaml,
and use pairs of images that have been human-labeled across scenes.
"""
dcn.eval()
cross_scene_data = DenseCorrespondenceEvaluation.parse_cross_scene_data(dataset)
for annotated_pair in cross_scene_data:
scene_name_a = annotated_pair["image_a"]["scene_name"]
scene_name_b = annotated_pair["image_b"]["scene_name"]
image_a_idx = annotated_pair["image_a"]["image_idx"]
image_b_idx = annotated_pair["image_b"]["image_idx"]
rgb_a, rgb_b = DenseCorrespondenceEvaluation.single_cross_scene_image_pair_qualitative_analysis(\
dcn, dataset, scene_name_a, image_a_idx, scene_name_b, image_b_idx)
if draw_human_annotations:
img_a_points_picked = annotated_pair["image_a"]["pixels"]
img_b_points_picked = annotated_pair["image_b"]["pixels"]
# note here: converting the rgb_a to numpy format, but not inverting
# the RGB <--> BGR colors as cv2 would expect, because all I'm going to do is then
# plot this as an image in matplotlib, in which case
# would just need to switch the colors back.
rgb_a = dc_plotting.draw_correspondence_points_cv2(np.asarray(rgb_a), img_a_points_picked)
rgb_b = dc_plotting.draw_correspondence_points_cv2(np.asarray(rgb_b), img_b_points_picked)
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_figheight(10)
fig.set_figwidth(15)
axes[0].imshow(rgb_a)
axes[1].imshow(rgb_b)
plt.show()
@staticmethod
def get_random_image_pairs(dataset):
"""
Given a dataset, chose a random scene, and a handful of image pairs from
that scene.
:param dataset: dataset from which to draw a scene and image pairs
:type dataset: SpartanDataset
:return: scene_name, img_pairs
:rtype: str, list of lists, where each of the lists are [img_a_idx, img_b_idx], for example:
[[113,220],
[114,225]]
"""
scene_name = dataset.get_random_scene_name()
img_pairs = []
for _ in range(5):
img_a_idx = dataset.get_random_image_index(scene_name)
pose_a = dataset.get_pose_from_scene_name_and_idx(scene_name, img_a_idx)
img_b_idx = dataset.get_img_idx_with_different_pose(scene_name, pose_a, num_attempts=100)
if img_b_idx is None:
continue
img_pairs.append([img_a_idx, img_b_idx])
return scene_name, img_pairs
@staticmethod
def get_random_scenes_and_image_pairs(dataset):
"""
Given a dataset, chose a variety of random scenes and image pairs
:param dataset: dataset from which to draw a scene and image pairs
:type dataset: SpartanDataset
:return: scene_names, img_pairs
:rtype: list[str], list of lists, where each of the lists are [img_a_idx, img_b_idx], for example:
[[113,220],
[114,225]]
"""
scene_names = []
img_pairs = []
for _ in range(5):
scene_name = dataset.get_random_scene_name()
img_a_idx = dataset.get_random_image_index(scene_name)
pose_a = dataset.get_pose_from_scene_name_and_idx(scene_name, img_a_idx)
img_b_idx = dataset.get_img_idx_with_different_pose(scene_name, pose_a, num_attempts=100)
if img_b_idx is None:
continue
img_pairs.append([img_a_idx, img_b_idx])
scene_names.append(scene_name)
return scene_names, img_pairs
@staticmethod
def evaluate_network_qualitative(dcn, dataset, num_image_pairs=5, randomize=False,
scene_type=None):
dcn.eval()
# Train Data
print "\n\n-----------Train Data Evaluation----------------"
if randomize:
scene_names, img_pairs = DenseCorrespondenceEvaluation.get_random_scenes_and_image_pairs(dataset)
else:
if scene_type == "caterpillar":
scene_name = '2018-04-10-16-06-26'
img_pairs = []
img_pairs.append([0,753])
img_pairs.append([812, 1218])
img_pairs.append([1430, 1091])
img_pairs.append([1070, 649])
elif scene_type == "drill":
scene_name = '13_drill_long_downsampled'
img_pairs = []
img_pairs.append([0, 737])
img_pairs.append([409, 1585])
img_pairs.append([2139, 1041])
img_pairs.append([235, 1704])
else:
raise ValueError("scene_type must be one of [drill, caterpillar], it was %s)" %(scene_type))
scene_names = [scene_name]*len(img_pairs)
for scene_name, img_pair in zip(scene_names, img_pairs):
print "Image pair (%d, %d)" %(img_pair[0], img_pair[1])
DenseCorrespondenceEvaluation.single_same_scene_image_pair_qualitative_analysis(dcn,
dataset,
scene_name,
img_pair[0],
img_pair[1])
# Test Data
print "\n\n-----------Test Data Evaluation----------------"
dataset.set_test_mode()
if randomize:
scene_names, img_pairs = DenseCorrespondenceEvaluation.get_random_scenes_and_image_pairs(dataset)
else:
if scene_type == "caterpillar":
scene_name = '2018-04-10-16-08-46'
img_pairs = []
img_pairs.append([0, 754])
img_pairs.append([813, 1219])
img_pairs.append([1429, 1092])
img_pairs.append([1071, 637])
elif scene_type == "drill":
scene_name = '06_drill_long_downsampled'
img_pairs = []
img_pairs.append([0, 617])
img_pairs.append([270, 786])
img_pairs.append([1001, 2489])
img_pairs.append([1536, 1917])
else:
raise ValueError("scene_type must be one of [drill, caterpillar], it was %s)" % (scene_type))
scene_names = [scene_name] * len(img_pairs)
for scene_name, img_pair in zip(scene_names, img_pairs):
print "Image pair (%d, %d)" %(img_pair[0], img_pair[1])
DenseCorrespondenceEvaluation.single_same_scene_image_pair_qualitative_analysis(dcn,
dataset,
scene_name,
img_pair[0],
img_pair[1])
if scene_type == "caterpillar":
# Train Data
print "\n\n-----------More Test Data Evaluation----------------"
if randomize:
scene_name, img_pairs = DenseCorrespondenceEvaluation.get_random_image_pairs(dataset)
else:
scene_name = '2018-04-16-14-25-19'
img_pairs = []
img_pairs.append([0,1553])
img_pairs.append([1729, 2386])
img_pairs.append([2903, 1751])
img_pairs.append([841, 771])
for img_pair in img_pairs:
print "Image pair (%d, %d)" %(img_pair[0], img_pair[1])
DenseCorrespondenceEvaluation.single_same_scene_image_pair_qualitative_analysis(dcn,
dataset,
scene_name,
img_pair[0],
img_pair[1])
@staticmethod
def compute_loss_on_dataset(dcn, data_loader, loss_config, num_iterations=500,):
"""
Computes the loss for the given number of iterations
:param dcn:
:type dcn:
:param data_loader:
:type data_loader:
:param num_iterations:
:type num_iterations:
:return:
:rtype:
"""
dcn.eval()
# loss_vec = np.zeros(num_iterations)
loss_vec = []
match_loss_vec = []
non_match_loss_vec = []
counter = 0
pixelwise_contrastive_loss = PixelwiseContrastiveLoss(dcn.image_shape, config=loss_config)
batch_size = 1
for i, data in enumerate(data_loader, 0):
# get the inputs
data_type, img_a, img_b, matches_a, matches_b, non_matches_a, non_matches_b, metadata = data
data_type = data_type[0]
if len(matches_a[0]) == 0:
print "didn't have any matches, continuing"
continue
img_a = Variable(img_a.cuda(), requires_grad=False)
img_b = Variable(img_b.cuda(), requires_grad=False)
if data_type == "matches":
matches_a = Variable(matches_a.cuda().squeeze(0), requires_grad=False)
matches_b = Variable(matches_b.cuda().squeeze(0), requires_grad=False)
non_matches_a = Variable(non_matches_a.cuda().squeeze(0), requires_grad=False)
non_matches_b = Variable(non_matches_b.cuda().squeeze(0), requires_grad=False)
# run both images through the network
image_a_pred = dcn.forward(img_a)
image_a_pred = dcn.process_network_output(image_a_pred, batch_size)
image_b_pred = dcn.forward(img_b)
image_b_pred = dcn.process_network_output(image_b_pred, batch_size)
# get loss
if data_type == "matches":
loss, match_loss, non_match_loss = \
pixelwise_contrastive_loss.get_loss(image_a_pred,
image_b_pred,
matches_a,
matches_b,
non_matches_a,
non_matches_b)
loss_vec.append(loss.data[0])
non_match_loss_vec.append(non_match_loss.data[0])
match_loss_vec.append(match_loss.data[0])
if i > num_iterations:
break
loss_vec = np.array(loss_vec)
match_loss_vec = np.array(match_loss_vec)
non_match_loss_vec = np.array(non_match_loss_vec)
loss = np.average(loss_vec)
match_loss = np.average(match_loss_vec)
non_match_loss = np.average(non_match_loss_vec)
return loss, match_loss, non_match_loss
@staticmethod
def compute_descriptor_statistics_on_dataset(dcn, dataset, num_images=100,
save_to_file=True, filename=None):
"""
Computes the statistics of the descriptors on the dataset
:param dcn:
:type dcn:
:param dataset:
:type dataset:
:param save_to_file:
:type save_to_file:
:return:
:rtype:
"""
utils.reset_random_seed()
dcn.eval()
to_tensor = transforms.ToTensor()
# compute the per-channel mean
def compute_descriptor_statistics(res, mask_tensor):
"""
Computes
:param res: The output of the DCN
:type res: torch.FloatTensor with shape [H,W,D]
:return: min, max, mean
:rtype: each is torch.FloatTensor of shape [D]
"""
# convert to [W*H, D]
D = res.shape[2]
# convert to torch.FloatTensor instead of variable
if isinstance(res, torch.autograd.Variable):
res = res.data
res_reshape = res.contiguous().view(-1,D)
channel_mean = res_reshape.mean(0) # shape [D]
channel_min, _ = res_reshape.min(0) # shape [D]
channel_max, _ = res_reshape.max(0) # shape [D]
mask_flat = mask_tensor.view(-1,1).squeeze(1)
# now do the same for the masked image
# gracefully handle the case where the mask is all zeros
mask_indices_flat = torch.nonzero(mask_flat)
if len(mask_indices_flat) == 0:
return None, None
mask_indices_flat = mask_indices_flat.squeeze(1)
# print "mask_flat.shape", mask_flat.shape
res_masked_flat = res_reshape.index_select(0, mask_indices_flat) # shape [mask_size, D]
mask_channel_mean = res_masked_flat.mean(0)
mask_channel_min, _ = res_masked_flat.min(0)
mask_channel_max, _ = res_masked_flat.max(0)
entire_image_stats = (channel_min, channel_max, channel_mean)
mask_image_stats = (mask_channel_min, mask_channel_max, mask_channel_mean)
return entire_image_stats, mask_image_stats
def compute_descriptor_std_dev(res, channel_mean):
"""
Computes the std deviation of a descriptor image, given a channel mean
:param res:
:type res:
:param channel_mean:
:type channel_mean:
:return:
:rtype:
"""
D = res.shape[2]
res_reshape = res.view(-1, D) # shape [W*H,D]
v = res - channel_mean
std_dev = torch.std(v, 0) # shape [D]
return std_dev
def update_stats(stats_dict, single_img_stats):
"""
Update the running mean, min and max
:param stats_dict:
:type stats_dict:
:param single_img_stats:
:type single_img_stats:
:return:
:rtype:
"""
min_temp, max_temp, mean_temp = single_img_stats
if stats_dict['min'] is None:
stats_dict['min'] = min_temp
else:
stats_dict['min'] = torch.min(stats_dict['min'], min_temp)
if stats_dict['max'] is None:
stats_dict['max'] = max_temp
else:
stats_dict['max'] = torch.max(stats_dict['max'], max_temp)
if stats_dict['mean'] is None:
stats_dict['mean'] = mean_temp
else:
stats_dict['mean'] += mean_temp
stats = dict()
stats['entire_image'] = {'mean': None, 'max': None, 'min': None}
stats['mask_image'] = {'mean': None, 'max': None, 'min': None}
for i in xrange(0,num_images):
rgb, depth, mask, _ = dataset.get_random_rgbd_mask_pose()
img_tensor = dataset.rgb_image_to_tensor(rgb)
res = dcn.forward_single_image_tensor(img_tensor) # [H, W, D]
mask_tensor = to_tensor(mask).cuda()
entire_image_stats, mask_image_stats = compute_descriptor_statistics(res, mask_tensor)
# handles the case of an empty mask
if mask_image_stats is None:
logging.info("Mask was empty, skipping")
continue
update_stats(stats['entire_image'], entire_image_stats)
update_stats(stats['mask_image'], mask_image_stats)
for key, val in stats.iteritems():
val['mean'] = 1.0/num_images * val['mean']
for field in val:
val[field] = val[field].tolist()
if save_to_file:
if filename is None:
path_to_params_folder = dcn.config['path_to_network_params_folder']
path_to_params_folder = utils.convert_to_absolute_path(path_to_params_folder)
filename = os.path.join(path_to_params_folder, 'descriptor_statistics.yaml')
utils.saveToYaml(stats, filename)
return stats
@staticmethod
def run_evaluation_on_network(model_folder, num_image_pairs=100,
num_matches_per_image_pair=100,
save_folder_name="analysis",
compute_descriptor_statistics=True,
cross_scene=True,
dataset=None):
"""
Runs all the quantitative evaluations on the model folder
Creates a folder model_folder/analysis that stores the information.
Performs several steps:
1. compute dataset descriptor stats
2. compute quantitative eval csv files
3. make quantitative plots, save as a png for easy viewing
:param model_folder:
:type model_folder:
:return:
:rtype:
"""
utils.reset_random_seed()
DCE = DenseCorrespondenceEvaluation
model_folder = utils.convert_to_absolute_path(model_folder)
# save it to a csv file
output_dir = os.path.join(model_folder, save_folder_name)
train_output_dir = os.path.join(output_dir, "train")
test_output_dir = os.path.join(output_dir, "test")
cross_scene_output_dir = os.path.join(output_dir, "cross_scene")
# create the necessary directories
for dir in [output_dir, train_output_dir, test_output_dir, cross_scene_output_dir]:
if not os.path.isdir(dir):
os.makedirs(dir)
dcn = DenseCorrespondenceNetwork.from_model_folder(model_folder)
dcn.eval()
if dataset is None:
dataset = dcn.load_training_dataset()
# compute dataset statistics
if compute_descriptor_statistics:
logging.info("Computing descriptor statistics on dataset")
DCE.compute_descriptor_statistics_on_dataset(dcn, dataset, num_images=100, save_to_file=True)
# evaluate on training data and on test data
logging.info("Evaluating network on train data")
dataset.set_train_mode()
pd_dataframe_list, df = DCE.evaluate_network(dcn, dataset, num_image_pairs=num_image_pairs,
num_matches_per_image_pair=num_matches_per_image_pair)
train_csv = os.path.join(train_output_dir, "data.csv")
df.to_csv(train_csv)
logging.info("Evaluating network on test data")
dataset.set_test_mode()
pd_dataframe_list, df = DCE.evaluate_network(dcn, dataset, num_image_pairs=num_image_pairs,
num_matches_per_image_pair=num_matches_per_image_pair)
test_csv = os.path.join(test_output_dir, "data.csv")
df.to_csv(test_csv)
if cross_scene:
logging.info("Evaluating network on cross scene data")
df = DCE.evaluate_network_cross_scene(dcn=dcn, dataset=dataset, save=False)
cross_scene_csv = os.path.join(cross_scene_output_dir, "data.csv")
df.to_csv(cross_scene_csv)
logging.info("Making plots")
DCEP = DenseCorrespondenceEvaluationPlotter
fig_axes = DCEP.run_on_single_dataframe(train_csv, label="train", save=False)
fig_axes = DCEP.run_on_single_dataframe(test_csv, label="test", save=False, previous_fig_axes=fig_axes)
if cross_scene:
fig_axes = DCEP.run_on_single_dataframe(cross_scene_csv, label="cross_scene", save=False, previous_fig_axes=fig_axes)
fig, _ = fig_axes
save_fig_file = os.path.join(output_dir, "quant_plots.png")
fig.savefig(save_fig_file)
# only do across object analysis if have multiple single objects
if dataset.get_number_of_unique_single_objects() > 1:
across_object_output_dir = os.path.join(output_dir, "across_object")
if not os.path.isdir(across_object_output_dir):
os.makedirs(across_object_output_dir)
logging.info("Evaluating network on across object data")
df = DCE.evaluate_network_across_objects(dcn=dcn, dataset=dataset)
across_object_csv = os.path.join(across_object_output_dir, "data.csv")
df.to_csv(across_object_csv)
DCEP.run_on_single_dataframe_across_objects(across_object_csv, label="across_object", save=True)
logging.info("Finished running evaluation on network")
@staticmethod
def make_2d_cluster_plot(dcn, dataset, plot_background=False):
"""
This function randomly samples many points off of different objects and the background,
and makes an object-labeled scatter plot of where these descriptors are.
"""
print "Checking to make sure this is a 2D or 3D descriptor"
print "If you'd like you could add projection methods for higher dimension descriptors"
assert ((dcn.descriptor_dimension == 2) or (dcn.descriptor_dimension == 3))
if dcn.descriptor_dimension == 3:
use_3d = True
d = 3
print "This descriptor_dimension is 3d"
print "I'm going to make 3 plots for you: xy, yz, xz"
else:
use_3d = False
d = 2
# randomly grab object ID, and scene
# Fixing random state for reproducibility
np.random.seed(19680801)
descriptors_known_objects_samples = dict()
if use_3d:
descriptors_known_objects_samples_xy = dict()
descriptors_known_objects_samples_yz = dict()
descriptors_known_objects_samples_xz = dict()
descriptors_background_samples = np.zeros((0,d))
if use_3d:
descriptors_background_samples_xy = np.zeros((0,2))
descriptors_background_samples_yz = np.zeros((0,2))
descriptors_background_samples_xz = np.zeros((0,2))
num_objects = dataset.get_number_of_unique_single_objects()
num_samples_per_image = 100
for i in range(100):
object_id, object_id_int = dataset.get_random_object_id_and_int()
scene_name = dataset.get_random_single_object_scene_name(object_id)
img_idx = dataset.get_random_image_index(scene_name)
rgb = dataset.get_rgb_image_from_scene_name_and_idx(scene_name, img_idx)
mask = dataset.get_mask_image_from_scene_name_and_idx(scene_name, img_idx)
mask_torch = torch.from_numpy(np.asarray(mask)).long()
mask_inv = 1 - mask_torch
object_uv_samples = correspondence_finder.random_sample_from_masked_image_torch(mask_torch, num_samples_per_image)
background_uv_samples = correspondence_finder.random_sample_from_masked_image_torch(mask_inv, num_samples_per_image/num_objects)
object_u_samples = object_uv_samples[0].numpy()
object_v_samples = object_uv_samples[1].numpy()
background_u_samples = background_uv_samples[0].numpy()
background_v_samples = background_uv_samples[1].numpy()
# This snippet will plot where the samples are coming from in the image
# plt.scatter(object_u_samples, object_v_samples, c="g", alpha=0.5, label="object")
# plt.scatter(background_u_samples, background_v_samples, c="k", alpha=0.5, label="background")
# plt.legend()
# plt.show()
img_tensor = dataset.rgb_image_to_tensor(rgb)
res = dcn.forward_single_image_tensor(img_tensor) # [H, W, D]
res = res.data.cpu().numpy()
descriptors_object = np.zeros((len(object_u_samples),d))
for j in range(len(object_u_samples)):
descriptors_object[j,:] = res[object_v_samples[j], object_u_samples[j], :]
if use_3d:
descriptors_object_xy = np.zeros((len(object_u_samples),2))
descriptors_object_yz = np.zeros((len(object_u_samples),2))
descriptors_object_xz = np.zeros((len(object_u_samples),2))
for j in range(len(object_u_samples)):
descriptors_object_xy[j,:] = res[object_v_samples[j], object_u_samples[j], 0:2]
descriptors_object_yz[j,:] = res[object_v_samples[j], object_u_samples[j], 1:3]
descriptors_object_xz[j,:] = res[object_v_samples[j], object_u_samples[j], 0::2]
descriptors_background = np.zeros((len(background_u_samples),d))
for j in range(len(background_u_samples)):
descriptors_background[j,:] = res[background_v_samples[j], background_u_samples[j], :]
if use_3d:
descriptors_background_xy = np.zeros((len(background_u_samples),2))
descriptors_background_yz = np.zeros((len(background_u_samples),2))
descriptors_background_xz = np.zeros((len(background_u_samples),2))
for j in range(len(background_u_samples)):
descriptors_background_xy[j,:] = res[background_v_samples[j], background_u_samples[j], 0:2]
descriptors_background_yz[j,:] = res[background_v_samples[j], background_u_samples[j], 1:3]
descriptors_background_xz[j,:] = res[background_v_samples[j], background_u_samples[j], 0::2]
# This snippet will plot the descriptors just from this image
# plt.scatter(descriptors_object[:,0], descriptors_object[:,1], c="g", alpha=0.5, label=object_id)
# plt.scatter(descriptors_background[:,0], descriptors_background[:,1], c="k", alpha=0.5, label="background")
# plt.legend()
# plt.show()
if object_id not in descriptors_known_objects_samples:
descriptors_known_objects_samples[object_id] = descriptors_object
if use_3d:
descriptors_known_objects_samples_xy[object_id] = descriptors_object_xy
descriptors_known_objects_samples_yz[object_id] = descriptors_object_yz
descriptors_known_objects_samples_xz[object_id] = descriptors_object_xz
else:
descriptors_known_objects_samples[object_id] = np.vstack((descriptors_known_objects_samples[object_id], descriptors_object))
if use_3d:
descriptors_known_objects_samples_xy[object_id] = np.vstack((descriptors_known_objects_samples_xy[object_id], descriptors_object_xy))
descriptors_known_objects_samples_yz[object_id] = np.vstack((descriptors_known_objects_samples_yz[object_id], descriptors_object_yz))
descriptors_known_objects_samples_xz[object_id] = np.vstack((descriptors_known_objects_samples_xz[object_id], descriptors_object_xz))
descriptors_background_samples = np.vstack((descriptors_background_samples, descriptors_background))
if use_3d:
descriptors_background_samples_xy = np.vstack((descriptors_background_samples_xy, descriptors_background_xy))
descriptors_background_samples_yz = np.vstack((descriptors_background_samples_yz, descriptors_background_yz))
descriptors_background_samples_xz = np.vstack((descriptors_background_samples_xz, descriptors_background_xz))
print "ALL"
if not use_3d:
for key, value in descriptors_known_objects_samples.iteritems():
plt.scatter(value[:,0], value[:,1], alpha=0.5, label=key)
if plot_background:
plt.scatter(descriptors_background_samples[:,0], descriptors_background_samples[:,1], alpha=0.5, label="background")
plt.legend()
plt.show()
if use_3d:
for key, value in descriptors_known_objects_samples_xy.iteritems():
plt.scatter(value[:,0], value[:,1], alpha=0.5, label=key)
if plot_background:
plt.scatter(descriptors_background_samples_xy[:,0], descriptors_background_samples_xy[:,1], alpha=0.5, label="background")
plt.legend()
plt.show()
for key, value in descriptors_known_objects_samples_yz.iteritems():
plt.scatter(value[:,0], value[:,1], alpha=0.5, label=key)
if plot_background:
plt.scatter(descriptors_background_samples_yz[:,0], descriptors_background_samples_yz[:,1], alpha=0.5, label="background")
plt.legend()
plt.show()
for key, value in descriptors_known_objects_samples_xz.iteritems():
plt.scatter(value[:,0], value[:,1], alpha=0.5, label=key)
if plot_background:
plt.scatter(descriptors_background_samples_xz[:,0], descriptors_background_samples_xz[:,1], alpha=0.5, label="background")
plt.legend()
plt.show()
print "done"
@staticmethod
def make_default():
"""
Makes a DenseCorrespondenceEvaluation object using the default config
:return:
:rtype: DenseCorrespondenceEvaluation
"""
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'dense_correspondence', 'evaluation', 'evaluation.yaml')
config = utils.getDictFromYamlFilename(config_filename)
return DenseCorrespondenceEvaluation(config)
############ TESTING ################
@staticmethod
def test(dcn, dataset, data_idx=1, visualize=False, debug=False, match_idx=10):
scene_name = '13_drill_long_downsampled'
img_idx_a = utils.getPaddedString(0)
img_idx_b = utils.getPaddedString(737)
DenseCorrespondenceEvaluation.single_same_scene_image_pair_qualitative_analysis(dcn, dataset,
scene_name, img_idx_a,
img_idx_b)
class DenseCorrespondenceEvaluationPlotter(object):
"""
This class contains plotting utilities. They are all
encapsulated as static methods
"""
def __init__(self):
pass
@staticmethod
def make_cdf_plot(ax, data, num_bins, label=None, x_axis_scale_factor=1):
"""
Plots the empirical CDF of the data
:param ax: axis of a matplotlib plot to plot on
:param data:
:type data:
:param num_bins:
:type num_bins:
:return:
:rtype:
"""
cumhist, l, b, e = ss.cumfreq(data, num_bins)
cumhist *= 1.0 / len(data)
x_axis = l + b * np.arange(0, num_bins)
x_axis /= x_axis_scale_factor
plot = ax.plot(x_axis, cumhist, label=label)
return plot
@staticmethod
def make_pixel_match_error_plot(ax, df, label=None, num_bins=100, masked=False):
"""
:param ax: axis of a matplotlib plot to plot on
:param df: pandas dataframe, i.e. generated from quantitative
:param num_bins:
:type num_bins:
:return:
:rtype:
"""
DCEP = DenseCorrespondenceEvaluationPlotter
if masked:
data_string = 'pixel_match_error_l2_masked'
else:
data_string = 'pixel_match_error_l2'
data = df[data_string]
# rescales the pixel distance to be relative to the diagonal of the image
x_axis_scale_factor = 800
plot = DCEP.make_cdf_plot(ax, data, num_bins=num_bins, label=label, x_axis_scale_factor=x_axis_scale_factor)
if masked:
ax.set_xlabel('Pixel match error (masked), L2 (pixel distance)')
else:
ax.set_xlabel('Pixel match error (fraction of image), L2 (pixel distance)')
ax.set_ylabel('Fraction of images')
# ax.set_xlim([0,200])
return plot
@staticmethod
def make_across_object_best_match_plot(ax, df, label=None, num_bins=100):
"""
:param ax: axis of a matplotlib plot to plot on
:param df: pandas dataframe, i.e. generated from quantitative
:param num_bins:
:type num_bins:
:return:
:rtype:
"""
DCEP = DenseCorrespondenceEvaluationPlotter
data = df['norm_diff_descriptor_best_match']
plot = DCEP.make_cdf_plot(ax, data, num_bins=num_bins, label=label)
ax.set_xlabel('Best descriptor match, L2 norm')
ax.set_ylabel('Fraction of pixel samples from images')
return plot
@staticmethod
def make_descriptor_accuracy_plot(ax, df, label=None, num_bins=100, masked=False):
"""
Makes a plot of best match accuracy.
Drops nans
:param ax: axis of a matplotlib plot to plot on
:param df:
:type df:
:param num_bins:
:type num_bins:
:return:
:rtype:
"""
DCEP = DenseCorrespondenceEvaluationPlotter
if masked:
data_string = 'norm_diff_pred_3d_masked'
else:
data_string = 'norm_diff_pred_3d'
data = df[data_string]
data = data.dropna()
data *= 100 # convert to cm
plot = DCEP.make_cdf_plot(ax, data, num_bins=num_bins, label=label)
if masked:
ax.set_xlabel('3D match error (masked), L2 (cm)')
else:
ax.set_xlabel('3D match error, L2 (cm)')
ax.set_ylabel('Fraction of images')
#ax.set_title("3D Norm Diff Best Match")
return plot
@staticmethod
def make_norm_diff_ground_truth_plot(ax, df, label=None, num_bins=100, masked=False):
"""
:param ax: axis of a matplotlib plot to plot on
:param df:
:type df:
:param num_bins:
:type num_bins:
:return:
:rtype:
"""
DCEP = DenseCorrespondenceEvaluationPlotter
data = df['norm_diff_descriptor_ground_truth']
plot = DCEP.make_cdf_plot(ax, data, num_bins=num_bins, label=label)
ax.set_xlabel('Descriptor match error, L2')
ax.set_ylabel('Fraction of images')
return plot
@staticmethod
def make_fraction_false_positives_plot(ax, df, label=None, num_bins=100, masked=False):
"""
:param ax: axis of a matplotlib plot to plot on
:param df:
:type df:
:param num_bins:
:type num_bins:
:return:
:rtype:
"""
DCEP = DenseCorrespondenceEvaluationPlotter
if masked:
data_string = 'fraction_pixels_closer_than_ground_truth_masked'
else:
data_string = 'fraction_pixels_closer_than_ground_truth'
data = df[data_string]
plot = DCEP.make_cdf_plot(ax, data, num_bins=num_bins, label=label)
if masked:
ax.set_xlabel('Fraction false positives (masked)')
else:
ax.set_xlabel('Fraction false positives')
ax.set_ylabel('Fraction of images')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
return plot
@staticmethod
def make_average_l2_false_positives_plot(ax, df, label=None, num_bins=100, masked=False):
"""
:param ax: axis of a matplotlib plot to plot on
:param df:
:type df:
:param num_bins:
:type num_bins:
:return:
:rtype:
"""
DCEP = DenseCorrespondenceEvaluationPlotter
if masked:
data_string = 'average_l2_distance_for_false_positives_masked'
else:
data_string = 'average_l2_distance_for_false_positives'
data = df[data_string]
plot = DCEP.make_cdf_plot(ax, data, num_bins=num_bins, label=label)
if masked:
ax.set_xlabel('Average l2 pixel distance for false positives (masked)')
else:
ax.set_xlabel('Average l2 pixel distance for false positives')
ax.set_ylabel('Fraction of images')
# ax.set_xlim([0,200])
return plot
@staticmethod
def compute_area_above_curve(df, field, num_bins=100):
"""
Computes AOC for the entries in that field
:param df:
:type df: Pandas.DataFrame
:param field: specifies which column of the DataFrame to use
:type field: str
:return:
:rtype:
"""
data = df[field]
data = data.dropna()
cumhist, l, b, e = ss.cumfreq(data, num_bins)
cumhist *= 1.0 / len(data)
# b is bin width
area_above_curve = b * np.sum((1-cumhist))
return area_above_curve
@staticmethod
def run_on_single_dataframe(path_to_df_csv, label=None, output_dir=None, save=True, previous_fig_axes=None):
"""
This method is intended to be called from an ipython notebook for plotting.
Usage notes:
- after calling this function, you can still change many things about the plot
- for example you can still call plt.title("New title") to change the title
- if you'd like to plot multiple lines on the same axes, then take the return arg of a previous call to this function,
- and pass it into previous_plot, i.e.:
fig = run_on_single_dataframe("thing1.csv")
run_on_single_dataframe("thing2.csv", previous_plot=fig)
plt.title("both things")
plt.show()
- if you'd like each line to have a label in the plot, then use pass a string to label, i.e.:
fig = run_on_single_dataframe("thing1.csv", label="thing1")
run_on_single_dataframe("thing2.csv", label="thing2", previous_plot=fig)
plt.title("both things")
plt.show()
:param path_to_df_csv: full path to csv file
:type path_to_df_csv: string
:param label: name that will show up labeling this line in the legend
:type label: string
:param save: whether or not you want to save a .png
:type save: bool
:param previous_plot: a previous matplotlib figure to keep building on
:type previous_plot: None or matplotlib figure
"""
DCEP = DenseCorrespondenceEvaluationPlotter
path_to_csv = utils.convert_to_absolute_path(path_to_df_csv)
if output_dir is None:
output_dir = os.path.dirname(path_to_csv)
df = pd.read_csv(path_to_csv, index_col=0, parse_dates=True)
if 'is_valid_masked' not in df:
use_masked_plots = False
else:
use_masked_plots = True
if previous_fig_axes==None:
N = 5
if use_masked_plots:
fig, axes = plt.subplots(nrows=N, ncols=2, figsize=(15,N*5))
else:
fig, axes = plt.subplots(N, figsize=(10,N*5))
else:
[fig, axes] = previous_fig_axes
def get_ax(axes, index):
if use_masked_plots:
return axes[index,0]
else:
return axes[index]
# pixel match error
ax = get_ax(axes, 0)
plot = DCEP.make_pixel_match_error_plot(ax, df, label=label)
if use_masked_plots:
plot = DCEP.make_pixel_match_error_plot(axes[0,1], df, label=label, masked=True)
ax.legend()
# 3D match error
ax = get_ax(axes, 1)
plot = DCEP.make_descriptor_accuracy_plot(ax, df, label=label)
if use_masked_plots:
plot = DCEP.make_descriptor_accuracy_plot(axes[1,1], df, label=label, masked=True)
# if save:
# fig_file = os.path.join(output_dir, "norm_diff_pred_3d.png")
# fig.savefig(fig_file)
aac = DCEP.compute_area_above_curve(df, 'norm_diff_pred_3d')
d = dict()
d['norm_diff_3d_area_above_curve'] = float(aac)
# norm difference of the ground truth match (should be 0)
ax = get_ax(axes,2)
plot = DCEP.make_norm_diff_ground_truth_plot(ax, df, label=label)
# fraction false positives
ax = get_ax(axes,3)
plot = DCEP.make_fraction_false_positives_plot(ax, df, label=label)
if use_masked_plots:
plot = DCEP.make_fraction_false_positives_plot(axes[3,1], df, label=label, masked=True)
# average l2 false positives
ax = get_ax(axes, 4)
plot = DCEP.make_average_l2_false_positives_plot(ax, df, label=label)
if use_masked_plots:
plot = DCEP.make_average_l2_false_positives_plot(axes[4,1], df, label=label, masked=True)
yaml_file = os.path.join(output_dir, 'stats.yaml')
utils.saveToYaml(d, yaml_file)
return [fig, axes]
@staticmethod
def run_on_single_dataframe_across_objects(path_to_df_csv, label=None, output_dir=None, save=True, previous_fig_axes=None):
"""
This method is intended to be called from an ipython notebook for plotting.
See run_on_single_dataframe() for documentation.
The only difference is that for this one, we only have across object data.
"""
DCEP = DenseCorrespondenceEvaluationPlotter
path_to_csv = utils.convert_to_absolute_path(path_to_df_csv)
if output_dir is None:
output_dir = os.path.dirname(path_to_csv)
df = pd.read_csv(path_to_csv, index_col=0, parse_dates=True)
if previous_fig_axes==None:
N = 1
fig, ax = plt.subplots(N, figsize=(10,N*5))
else:
[fig, ax] = previous_fig_axes
# pixel match error
plot = DCEP.make_across_object_best_match_plot(ax, df, label=label)
ax.legend()
if save:
fig_file = os.path.join(output_dir, "across_objects.png")
fig.savefig(fig_file)
return [fig, ax]
def run():
pass
def main(config):
eval = DenseCorrespondenceEvaluation(config)
dcn = eval.load_network_from_config("10_scenes_drill")
test_dataset = SpartanDataset(mode="test")
DenseCorrespondenceEvaluation.test(dcn, test_dataset)
def test():
config_filename = os.path.join(utils.getDenseCorrespondenceSourceDir(), 'config', 'evaluation', 'evaluation.yaml')
config = utils.getDictFromYamlFilename(config_filename)
default_config = utils.get_defaults_config()
utils.set_cuda_visible_devices(default_config['cuda_visible_devices'])
main(config)
if __name__ == "__main__":
test()
|
import unittest
from cupy import cuda
from cupy.testing import attr
@unittest.skipUnless(cuda.nccl_enabled, 'nccl is not installed')
class TestNCCL(unittest.TestCase):
@attr.gpu
def test_single_proc_ring(self):
id = cuda.nccl.get_unique_id()
comm = cuda.nccl.NcclCommunicator(1, id, 0)
assert 0 == comm.rank_id()
comm.destroy()
@attr.gpu
@unittest.skipUnless(cuda.nccl_enabled and
cuda.nccl.get_version() >= 2400, 'Using old NCCL')
def test_abort(self):
id = cuda.nccl.get_unique_id()
comm = cuda.nccl.NcclCommunicator(1, id, 0)
comm.abort()
@attr.gpu
@unittest.skipUnless(cuda.nccl_enabled and
cuda.nccl.get_version() >= 2400, 'Using old NCCL')
def test_check_async_error(self):
id = cuda.nccl.get_unique_id()
comm = cuda.nccl.NcclCommunicator(1, id, 0)
comm.check_async_error()
comm.destroy()
|
# -*- coding: utf-8 -*-
"""
chanjo_report._compat
~~~~~~~~~~~~~~~~~~~~~~
Python 2.7.x, 3.2+ compatability module.
"""
from __future__ import absolute_import, unicode_literals
import operator
import sys
is_py2 = sys.version_info[0] == 2
if not is_py2:
# Python 3
# strings and ints
text_type = str
string_types = (str,)
integer_types = (int,)
# lazy iterators
zip = zip
range = range
iteritems = operator.methodcaller('items')
iterkeys = operator.methodcaller('keys')
itervalues = operator.methodcaller('values')
else:
# Python 2
# strings and ints
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
# lazy iterators
range = xrange
from itertools import izip as zip
iteritems = operator.methodcaller('iteritems')
iterkeys = operator.methodcaller('iterkeys')
itervalues = operator.methodcaller('itervalues')
|
import pytest
from .. import exceptions
class TestAccessTokenException:
def test_exception(self):
ex = exceptions.AccessTokenException()
assert 'Error retrieving' in str(ex)
class TestDebugTokenException:
def test_exception(self):
ex = exceptions.DebugTokenException({})
assert 'Unable to' in str(ex)
ex = exceptions.DebugTokenException({'message': 'Foo'})
assert str(ex) == 'Foo'
class TestUserEmailException:
def test_exception(self):
ex = exceptions.UserEmailException()
assert 'Email access' in str(ex)
class TestFacebookRequestException:
def test_exception(self):
ex = exceptions.FacebookRequestException({})
assert 'Unknown error' in str(ex)
ex = exceptions.FacebookRequestException({'message': 'Foo'})
assert str(ex) == 'Foo'
class TestFacebookNetworkException:
def test_exception(self):
ex = exceptions.FacebookNetworkException()
assert 'Unable to' in str(ex)
|
"""
Compare Plot
============
_thumb: .5, .5
"""
import arviz as az
import numpy as np
import pymc3 as pm
az.style.use('arviz-darkgrid')
# Data of the Eight Schools Model
J = 8
y = np.array([28., 8., -3., 7., -1., 1., 18., 12.])
sigma = np.array([15., 10., 16., 11., 9., 11., 10., 18.])
with pm.Model('Centered Eight Schools') as centered_eight:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta = pm.Normal('theta', mu=mu, sd=tau, shape=J)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
centered_eight_trace = pm.sample()
with pm.Model('Non-Centered Eight Schools') as non_centered:
mu = pm.Normal('mu', mu=0, sd=5)
tau = pm.HalfCauchy('tau', beta=5)
theta_tilde = pm.Normal('theta_t', mu=0, sd=1, shape=J)
theta = pm.Deterministic('theta', mu + tau * theta_tilde)
obs = pm.Normal('obs', mu=theta, sd=sigma, observed=y)
non_centered_eight_trace = pm.sample()
model_compare = az.compare({
centered_eight: centered_eight_trace,
non_centered: non_centered_eight_trace
})
az.compareplot(model_compare, figsize=(12, 4))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import ssl, socket
import pprint
import argparse
from datetime import datetime
URLS_TO_CHECK = []
# parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument('-d', action="store_true", default=False, dest="discover", help="Zabbix discover mode.")
parser.add_argument('-u', action="store", default="", dest="url", help="URL of cert to check")
args = parser.parse_args()
def loadURLList():
global URLS_TO_CHECK
for line in open("/usr/lib/zabbix/externalscripts/certlist.txt"):
URLS_TO_CHECK.append(line.strip())
def discover():
output = ""
chunks = []
for line in open("/usr/lib/zabbix/externalscripts/certlist.txt"):
chunks.append("{\"{#CERTURL}\": \"%s\"}" % line.strip() )
print "{\"data\": [" + ','.join(chunks) + "]}"
def checkCertURL():
global URLS_TO_CHECK
loadURLList()
ctx = ssl.create_default_context()
s = ctx.wrap_socket(socket.socket(), server_hostname=args.url)
s.connect((args.url, 443))
cert = s.getpeercert()
expire_date = datetime.strptime(cert['notAfter'], "%b %d %H:%M:%S %Y GMT")
expire_in = expire_date - datetime.now()
print expire_in.days
def main():
if args.discover:
discover()
else:
checkCertURL()
# Start program
if __name__ == "__main__":
main()
|
from com.jcraft.jsch import JSchException
from com.jcraft.jsch import JSch
from org.python.core.util import FileUtil
import sys
failed = []
exclude = "'Rollback point not found\|No space left on device\|Permission denied\|Already exists\|Unsupported key supplied'"
def run(command, session):
output = []
error = []
try:
_ssh_client = session.openChannel("exec")
_ssh_client.setInputStream(None)
_ssh_client.setErrStream(None)
instream = _ssh_client.getInputStream()
errstream = _ssh_client.getErrStream()
_ssh_client.setCommand(command)
_ssh_client.connect()
fu1 = FileUtil.wrap(instream)
for line in fu1.readlines():
output.append(line)
fu1.close()
fu2 = FileUtil.wrap(errstream)
for line in fu2.readlines():
error.append(line)
fu2.close()
_ssh_client.disconnect()
except JSchException as e:
print("JSch exception on %s: %s" % (server, str(e)))
return output, error
def connection(server):
try:
jsch = JSch()
session = jsch.getSession("root", server, 22)
session.setPassword("couchbase")
session.setConfig("StrictHostKeyChecking", "no")
session.connect(10000)
return session
except:
failed.append(server)
return None
def scan_all_slaves():
all_slaves = ["172.23.123.80","172.23.107.117","172.23.107.116","172.23.107.120","172.23.106.136","172.23.121.65",
"172.23.105.66","172.23.108.94","172.23.104.254",
"172.23.120.172","172.23.100.195","172.23.109.166","172.23.122.36","172.23.122.37",
"172.23.122.38","172.23.99.156","172.23.120.106","172.23.123.184","172.23.120.84",
"172.23.120.223","172.23.120.26","172.23.120.85","172.23.120.90","172.23.120.103",
"172.23.120.104","172.23.120.105","172.23.105.131","172.23.106.231","172.23.105.170",
"172.23.98.7","172.23.105.169","172.23.106.94","172.23.96.83","172.23.109.38",
"172.23.106.41","172.23.106.43","172.23.106.34","172.23.105.209","172.23.107.165",
"172.23.104.30","172.23.108.6","172.23.106.230","172.23.96.110","172.23.107.166",
"172.23.109.52","172.23.104.35","172.23.105.174","172.23.211.37","172.23.106.193",
"172.23.107.226","172.23.106.162","172.23.105.95","172.23.105.40","172.23.108.33",
"172.23.105.135","172.23.105.136","172.23.96.232","172.23.96.246","172.23.96.248",
"172.23.96.255","172.23.97.0","172.23.96.206","172.23.105.248","172.23.107.249",
"172.23.107.21","172.23.107.68","172.23.121.16","172.23.120.254","172.23.120.31",
"172.23.120.228","172.23.108.27","172.23.104.76","172.23.105.144","172.23.108.222",
"172.23.107.238","172.23.106.205","172.23.105.131","172.23.105.131","172.23.105.115",
"172.23.123.88","172.23.123.91","172.23.123.69","172.23.123.69","172.23.123.71",
"172.23.123.72","172.23.123.75","172.23.123.73","172.23.123.74","172.23.123.70",
"172.23.123.77","172.23.123.76","172.23.123.78","172.23.123.80",
"172.23.123.79","172.23.97.128","172.23.99.156","172.23.104.136","172.23.97.128",
"172.23.99.156","172.23.97.101","172.23.107.216","172.23.104.34"
]
count = 1
for server in all_slaves:
print("--+--+--+--+-- %s. CHECKING ON SLAVE: %s --+--+--+--+--" % (count, server))
count += 1
session = connection(server)
if session is None:
continue
cmds = ["find /data/workspace/ -iname '*collect*2021*.zip'", "find /data/workspace/ -iname '*2021*diag*.zip'"]
if len(sys.argv) > 1:
cmds = ["find /data/workspace/ -iname '*collect*{}*.zip'".format(sys.argv[1]),
"find /data/workspace/ -iname '*{}*diag*.zip'".format(sys.argv[1].replace("-", ""))]
for cmd in cmds:
output, _ = run(cmd, session)
try:
for cbcollect_zips in output:
flag = True
log_files, _ = run("zipinfo -1 {}".format(cbcollect_zips), session)
for file in log_files:
if file.rstrip().endswith("dmp"):
print "#######################"
print "checking: %s" % cbcollect_zips.rstrip()
print "#######################"
print file.rstrip()
flag = False
break
run("rm -rf /root/cbcollect*", session)[0]
run("unzip {}".format(cbcollect_zips), session)[0]
memcached = "/root/cbcollect*/memcached.log*"
o, _ = run("grep 'CRITICAL\| ERROR ' {} | grep -v {}".format(memcached, exclude), session)
if o:
if flag:
print "#######################"
print "checking: %s" % cbcollect_zips.rstrip()
print "#######################"
print "".join(o)
except:
pass
session.disconnect()
def check_coredump_exist(server):
binCb = "/opt/couchbase/bin/"
libCb = "/opt/couchbase/var/lib/couchbase/"
dmpmsg = ""
session = connection(server)
if session is None:
return
def findIndexOf(strList, subString):
for i in range(len(strList)):
if subString in strList[i]:
return i
return -1
def get_gdb(dmpPath, dmpName):
dmpFile = dmpPath + dmpName
coreFile = dmpPath + dmpName.strip(".dmp") + ".core"
run("rm -rf " + coreFile, session)
run("/" + binCb + "minidump-2-core " + dmpFile + " > " + coreFile, session)
gdbOut = run("gdb --batch " + binCb + "memcached -c " + coreFile + " -ex \"bt full\" -ex quit", session)[0]
index = findIndexOf(gdbOut, "Core was generated by")
gdbOut = gdbOut[index:]
gdbOut = " ".join(gdbOut)
return gdbOut
print(server + " : SSH Successful")
print(server + " : Looking for crash dump files")
crashDir = libCb + "crash/"
dmpFiles = run("ls -lt " + crashDir, session)[0]
dmpFiles = [f for f in dmpFiles if ".core" not in f]
dmpFiles = [f for f in dmpFiles if "total" not in f]
dmpFiles = [f.split()[-1] for f in dmpFiles if ".core" not in f]
dmpFiles = [f.strip("\n") for f in dmpFiles]
if dmpFiles:
print(run("cat /opt/couchbase/VERSION.txt", session)[0])
msg = "Node %s - Core dump seen: %s" % (server, str(len(dmpFiles)))
dmpmsg += msg + "\n"
print(msg)
print(server + " : Stack Trace of first crash: " + dmpFiles[-1])
print(get_gdb(crashDir, dmpFiles[-1]))
else:
print(server + " : No crash files found")
print(server + " : Looking for CRITICAL messages in log")
logsDir = libCb + "logs/"
logFiles = run("ls " + logsDir + "memcached.log.*", session)[0]
for logFile in logFiles:
criticalMessages = run("grep -r 'CRITICAL\| ERROR ' {} | grep -v {}".format(logFile.strip("\n"), exclude), session)[0]
index = findIndexOf(criticalMessages, "Fatal error encountered during exception handling")
criticalMessages = criticalMessages[:index]
if (criticalMessages):
print(server + " : Found message in " + logFile.strip("\n"))
print("".join(criticalMessages))
break
session.disconnect()
def scan_all_servers():
from java.time import Duration
from com.couchbase.client.java import Cluster, ClusterOptions
from com.couchbase.client.java.env import ClusterEnvironment
from com.couchbase.client.core.env import TimeoutConfig, IoConfig
cluster_env = ClusterEnvironment.builder().ioConfig(IoConfig.numKvConnections(25)).timeoutConfig(TimeoutConfig.builder().connectTimeout(Duration.ofSeconds(20)).kvTimeout(Duration.ofSeconds(10)))
cluster_options = ClusterOptions.clusterOptions("Administrator", "esabhcuoc").environment(cluster_env.build())
cluster = Cluster.connect("172.23.104.162", cluster_options)
STATEMENT = "select meta().id from `QE-server-pool` where os='centos' and '12hrreg' in poolId or 'regression' in poolId or 'magmareg' in poolId;"
result = cluster.query(STATEMENT);
count = 1
for server in result.rowsAsObject():
print("--+--+--+--+-- %s. CHECKING ON SERVER: %s --+--+--+--+--"
% (count, server.get("id")))
count += 1
check_coredump_exist(server.get("id"))
if __name__ == "__main__":
scan_all_slaves()
scan_all_servers()
if failed:
for server in failed:
print("ssh failed: %s" % server)
|
import sqlite3
import functools
import inspect
import hashlib
import os, errno
import logging
from os import path
import numpy
def make_dirs(p):
try:
os.makedirs(p)
except OSError as e:
if e.errno == errno.EEXIST:
return
else:
raise
PREFIX = 'sisfft-cached-tests'
make_dirs(PREFIX)
def disk_memo(f):
@functools.wraps(f)
def g(*args, **kwargs):
dirname = path.join(PREFIX, f.__name__)
fname = 'f'
for a in args:
if isinstance(a, numpy.ndarray):
# hash a numpy array, don't use it directly, because
# it could be quite long
h = hashlib.sha256()
h.update(a.data)
fname += '_' + h.hexdigest()
else:
# use everything else in the directory name, so
# finding the relevant files for a GC is easier
dirname = path.join(dirname, repr(a))
for k,v in kwargs.items():
if isinstance(v, numpy.ndarray):
h = hashlib.sha256()
h.update(a.data)
fname += '_%s=%s' % (k, h.hexdigest())
else:
dirname = path.join(dirname, '%s=%s' % (k, repr(v)))
fname += '.npy'
logging.info('looking in %s for %s' % (dirname, fname))
make_dirs(dirname)
full_path = path.join(dirname, fname)
try:
loaded = numpy.load(full_path)
logging.info(' found')
return loaded
except:
logging.info(' not found, recomputing')
value = f(*args, **kwargs)
numpy.save(full_path, value)
return value
if os.environ.get('SISFFT_NO_CACHE', '0') == '1':
return f
else:
return g
|
from django.urls import path
from .views import AccountsSignup, AccountsPanel, AccountsUpdate, AccountsUsersList, AccountsLogin,\
choice_location_manual, choice_location_api
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required
urlpatterns = [
path('signup/', AccountsSignup.as_view(), name='signup'),
path('login/', AccountsLogin.as_view(), name='login'),
path('panel/', login_required(AccountsPanel.as_view()), name='panel'),
path('panel/<int:pk>', AccountsUpdate.as_view(), name='user_update'),
path('panel/userslist', AccountsUsersList.as_view(), name='users_list'),
path('ajax/choice_location_manual/', choice_location_manual, name='choice_location_manual'),
path('ajax/choice_location_api/', choice_location_api, name='choice_location_api'),
# standard django.contrib.auth templates
path('logout/', views.LogoutView.as_view(), name='logout'),
path('password_change/', views.PasswordChangeView.as_view(), name='password_change'),
path('password_change/done/', views.PasswordChangeDoneView.as_view(), name='password_change_done'),
path('password_reset/', views.PasswordResetView.as_view(), name='password_reset'),
path('password_reset/done/', views.PasswordResetDoneView.as_view(), name='password_reset_done'),
path('reset/<uidb64>/<token>/', views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('reset/done/', views.PasswordResetCompleteView.as_view(), name='password_reset_complete'),
]
|
"""Some useful utilities when dealing with neural nets w/ tensorflow.
Parag K. Mital, Jan. 2016
"""
import tensorflow as tf
import numpy as np
def montage_batch(images):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
batch : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
img_h = images.shape[1]
img_w = images.shape[2]
n_plots = int(np.ceil(np.sqrt(images.shape[0])))
m = np.ones(
(images.shape[1] * n_plots + n_plots + 1,
images.shape[2] * n_plots + n_plots + 1, 3)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < images.shape[0]:
this_img = images[this_filter, ...]
m[1 + i + i * img_h:1 + i + (i + 1) * img_h,
1 + j + j * img_w:1 + j + (j + 1) * img_w, :] = this_img
return m
# %%
def montage(W):
"""Draws all filters (n_input * n_output filters) as a
montage image separated by 1 pixel borders.
Parameters
----------
W : Tensor
Input tensor to create montage of.
Returns
-------
m : numpy.ndarray
Montage image.
"""
W = np.reshape(W, [W.shape[0], W.shape[1], 1, W.shape[2] * W.shape[3]])
n_plots = int(np.ceil(np.sqrt(W.shape[-1])))
m = np.ones(
(W.shape[0] * n_plots + n_plots + 1,
W.shape[1] * n_plots + n_plots + 1)) * 0.5
for i in range(n_plots):
for j in range(n_plots):
this_filter = i * n_plots + j
if this_filter < W.shape[-1]:
m[1 + i + i * W.shape[0]:1 + i + (i + 1) * W.shape[0],
1 + j + j * W.shape[1]:1 + j + (j + 1) * W.shape[1]] = (
np.squeeze(W[:, :, :, this_filter]))
return m
# %%
def corrupt(x):
"""Take an input tensor and add uniform masking.
Parameters
----------
x : Tensor/Placeholder
Input to corrupt.
Returns
-------
x_corrupted : Tensor
50 pct of values corrupted.
"""
return tf.mul(x, tf.cast(tf.random_uniform(shape=tf.shape(x),
minval=0,
maxval=2,
dtype=tf.int32), tf.float32))
# %%
def weight_variable(shape):
'''Helper function to create a weight variable initialized with
a normal distribution
Parameters
----------
shape : list
Size of weight variable
'''
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial)
# %%
def bias_variable(shape):
'''Helper function to create a bias variable initialized with
a constant value.
Parameters
----------
shape : list
Size of weight variable
'''
initial = tf.random_normal(shape, mean=0.0, stddev=0.01)
return tf.Variable(initial)
|
#this is the example of file handling using python
def game():
return int(input("enter score "))
score=game()
with open("Highscore.txt") as f:
hiScoreStr=f.read()
if hiScoreStr=='':
with open("Highscore.txt","w") as f:
f.write(str(score))
print("updated")
elif int(hiScoreStr)<score:
with open("Highscore.txt","w") as f:
f.write(str(score))
print("updated")
elif int(hiScoreStr)>=score:
print(hiScoreStr)
print("do you want to reset?")
choice = input("enter yes or no in lowercase ")
if choice == "yes":
score = int(input("enter new value "))
with open("Highscore.txt","w") as f:
f.write(str(score))
print("updated")
else:
print("Appreciate your patience")
print(f.read())
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Structure import Structure;
class GIF_IMAGE(Structure):
type_name = 'GIF_IMAGE';
def __init__(self, stream, offset, max_size, parent, name):
import C;
from GIF_BLOCK import GIF_BLOCK;
from GIF_COLORTABLE import GIF_COLORTABLE;
from GIF_IMAGE_DESCRIPTOR import GIF_IMAGE_DESCRIPTOR;
from LZW_compressed_data import LZW_compressed_data;
Structure.__init__(self, stream, offset, max_size, parent, name);
self._descriptor = self.Member(GIF_IMAGE_DESCRIPTOR, 'descriptor');
flags = self._descriptor._Flags;
self._has_local_color_table = flags._LocalColorTable.value == 1;
if self._has_local_color_table:
self._local_color_table_entries = \
2 ** (flags._SizeLocalColorTable.value + 1);
self._local_color_table_sorted = flags._Sort.value == 1;
self._local_color_table = self.Member(GIF_COLORTABLE, \
'local_color_table', self._local_color_table_entries, \
self._local_color_table_sorted);
else:
self._local_color_table = None;
self._lzw_minimum_code_size = self.Member(C.BYTE, 'LZW_minimum_code_size');
if self._lzw_minimum_code_size.value == 0:
self._lzw_minimum_code_size.warnings.append('expected value > 0');
self._compressed_pixel_data_container = self.Member(GIF_BLOCK, 'pixel_data');
self._pixel_data_container = \
self._compressed_pixel_data_container.ContainMember( \
LZW_compressed_data, 'pixel_data', \
self._lzw_minimum_code_size.value);
self._pixel_data = self._pixel_data_container.ContainMember( \
C.STRING, 'pixel_data', \
self._descriptor._Width.value * self._descriptor._Height.value);
|
# -*- coding: utf-8 -*-
# Copyright 2017, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""Backends Test."""
import json
import jsonschema
from qiskit import IBMQ, Aer
from qiskit.backends.aer import AerProvider
from .common import Path, QiskitTestCase, requires_qe_access
class TestBackends(QiskitTestCase):
"""Qiskit Backends (Object) Tests."""
def test_aer_backends_exist(self):
"""Test if there are local backends.
If all correct some should exists.
"""
aer_provider = AerProvider()
local = aer_provider.backends()
self.assertTrue(len(local) > 0)
@requires_qe_access
def test_remote_backends_exist(self, qe_token, qe_url):
"""Test if there are remote backends.
If all correct some should exists.
"""
IBMQ.enable_account(qe_token, qe_url)
remotes = IBMQ.backends()
self.assertTrue(len(remotes) > 0)
@requires_qe_access
def test_remote_backends_exist_real_device(self, qe_token, qe_url):
"""Test if there are remote backends that are devices.
If all correct some should exists.
"""
IBMQ.enable_account(qe_token, qe_url)
remotes = IBMQ.backends(simulator=False)
self.assertTrue(remotes)
@requires_qe_access
def test_remote_backends_exist_simulator(self, qe_token, qe_url):
"""Test if there are remote backends that are simulators.
If all correct some should exists.
"""
IBMQ.enable_account(qe_token, qe_url)
remotes = IBMQ.backends(simulator=True)
self.assertTrue(remotes)
def test_get_backend(self):
"""Test get backends.
If all correct should return a name the same as input.
"""
backend = Aer.backends(name='qasm_simulator_py')[0]
self.assertEqual(backend.name(), 'qasm_simulator_py')
def test_aer_backend_status(self):
"""Test backend_status.
If all correct should pass the validation.
"""
schema_path = self._get_resource_path(
'backend_status_schema.json', path=Path.SCHEMAS)
with open(schema_path, 'r') as schema_file:
schema = json.load(schema_file)
for backend in Aer.backends():
status = backend.status()
jsonschema.validate(status.to_dict(), schema)
@requires_qe_access
def test_remote_backend_status(self, qe_token, qe_url):
"""Test backend_status.
If all correct should pass the validation.
"""
schema_path = self._get_resource_path(
'backend_status_schema.json', path=Path.SCHEMAS)
with open(schema_path, 'r') as schema_file:
schema = json.load(schema_file)
IBMQ.enable_account(qe_token, qe_url)
for backend in IBMQ.backends():
status = backend.status()
jsonschema.validate(status.to_dict(), schema)
def test_aer_backend_configuration(self):
"""Test backend configuration.
If all correct should pass the validation.
"""
schema_path = self._get_resource_path(
'backend_configuration_schema.json', path=Path.SCHEMAS)
with open(schema_path, 'r') as schema_file:
schema = json.load(schema_file)
aer_backends = Aer.backends()
for backend in aer_backends:
configuration = backend.configuration()
jsonschema.validate(configuration.to_dict(), schema)
@requires_qe_access
def test_remote_backend_configuration(self, qe_token, qe_url):
"""Test backend configuration.
If all correct should pass the validation.
"""
schema_path = self._get_resource_path(
'backend_configuration_schema.json', path=Path.SCHEMAS)
with open(schema_path, 'r') as schema_file:
schema = json.load(schema_file)
IBMQ.enable_account(qe_token, qe_url)
remotes = IBMQ.backends()
for backend in remotes:
configuration = backend.configuration()
jsonschema.validate(configuration.to_dict(), schema)
def test_aer_backend_properties(self):
"""Test backend properties.
If all correct should pass the validation.
"""
aer_backends = Aer.backends()
for backend in aer_backends:
properties = backend.properties()
self.assertEqual(properties, None)
@requires_qe_access
def test_remote_backend_properties(self, qe_token, qe_url):
"""Test backend properties.
If all correct should pass the validation.
"""
schema_path = self._get_resource_path(
'backend_properties_schema.json', path=Path.SCHEMAS)
with open(schema_path, 'r') as schema_file:
schema = json.load(schema_file)
IBMQ.enable_account(qe_token, qe_url)
remotes = IBMQ.backends(simulator=False)
for backend in remotes:
properties = backend.properties()
if backend.configuration().simulator:
self.assertEqual(properties, None)
else:
jsonschema.validate(properties.to_dict(), schema)
|
from flask import render_template, current_app, copy_current_request_context
from flask_mail import Message
from . import mail
from threading import Thread
def send_email(to, subject, template, **kwargs):
msg = Message(current_app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + subject,
sender=current_app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
@copy_current_request_context
def send_asc_mail(msg):
mail.send(msg)
thr = Thread(name='emails',target=send_asc_mail, args=(msg,))
thr.start()
|
word = input("Give me a word: ")
wrong = True
while wrong:
if word == "banana":
wrong = False
print("END GAME")
else:
print("WRONG")
word = input("Give me a word: ")
|
from django.db import models
from employes import Employes
E_TYPE = (
(0, 'SICKNESS'),
(1, 'HOLIDAY'),
(2, 'MOVING')
)
class Absences(models.Model):
user = models.ForeignKey(Employes)
From = models.DateTimeField()
to = models.DateTimeField()
type = models.CharField(max_length=1, choices=E_TYPE)
justification = models.TextField(blank=True)
accepted = models.BooleanField(default=False)
|
from sqlite_utils import Database
db = Database("cat_database.db")
# This line creates a "cats" table if one does not already exist:
db["cats"].insert_all([
{"id": 1, "age": 4, "name": "Mittens"},
{"id": 2, "age": 2, "name": "Fluffy"}
], pk="id")
|
from __future__ import absolute_import
from erlpack.types import Atom
from erlpack import pack
def test_small_atom():
atm = Atom('hello world')
assert pack(atm) == b'\x83s\x0bhello world'
def test_large_atom():
atm = Atom('test ' * 100)
assert pack(atm) == (
b'\x83d\x01\xf4test test test test test test test test test test test test test test test test test test test '
b'test test test test test test test test test test test test test test test test test test test test test '
b'test test test test test test test test test test test test test test test test test test test test test '
b'test test test test test test test test test test test test test test test test test test test test test '
b'test test test test test test test test test test test test test test test test test test '
)
|
from yaml_config.configuration import cut_protocol, open_config
|
"""
Notify Linux.
Copyright (c) 2013 - 2016 Isaac Muse <isaacmuse@gmail.com>
License: MIT
"""
import subprocess
import os
from . import util
__all__ = ("get_notify", "alert", "setup", "destroy")
PLAYERS = ('paplay', 'aplay', 'play')
class Options:
"""Notification options."""
icon = None
notify = None
app_name = ""
sound = None
player = None
@classmethod
def clear(cls):
"""Clear."""
cls.icon = None
cls.notify = None
cls.app_name = ""
cls.sound = None
cls.player = None
def _alert(sound=None, player=None):
"""Play an alert sound for the OS."""
if sound is None and Options.sound is not None:
sound = Options.sound
if player is None and Options.player is not None:
player = Options.player
if player is not None and sound is not None:
try:
if player == 'play':
subprocess.call([player, '-q', sound])
else:
subprocess.call([player, sound])
except Exception:
pass
def alert():
"""Alert."""
_alert()
@staticmethod
def notify_osd_fallback(title, message, sound, fallback):
"""Ubuntu Notify OSD notifications fallback (just sound)."""
# Fallback to wxPython notification
fallback(title, message, sound)
try:
if subprocess.call(["notify-send", "--version"]) != 0:
raise ValueError("Notification support does not appear to be available")
@staticmethod
def notify_osd_call(title, message, sound, fallback):
"""Ubuntu Notify OSD notifications."""
try:
params = ["notify-send", "-a", Options.app_name, "-t", "3000"]
if Options.icon is not None:
params += ["-i", Options.icon]
if message is not None:
params += [title, message]
subprocess.call(params)
if sound:
# Play sound if desired
alert()
except Exception:
# Fallback to wxPython notification
fallback(title, message, sound)
except Exception:
notify_osd_call = None
print("no notify osd")
def setup_notify_osd(app_name):
"""Setup Notify OSD."""
if notify_osd_call is not None:
Options.app_name = app_name
Options.notify = notify_osd_call
def setup(app_name, icon, **kwargs):
"""Setup."""
Options.icon = None
sound = kwargs.get('sound')
if sound is not None and os.path.exists(sound):
Options.sound = sound
player = kwargs.get('sound_player')
if player is not None and player in PLAYERS and util.which(player):
Options.player = player
try:
if icon is None or not os.path.exists(icon):
raise ValueError("Icon does not appear to be valid")
Options.icon = icon
except Exception:
pass
if notify_osd_call is not None:
Options.app_name = app_name
Options.notify = notify_osd_call
def destroy():
"""Destroy."""
Options.clear()
Options.notify = notify_osd_fallback
def get_notify():
"""Get notification."""
return Options.notify
Options.notify = notify_osd_fallback
|
# -*- coding: utf-8 -*-
from argparse import ArgumentParser, Namespace, ArgumentDefaultsHelpFormatter
import os
from dotenv import load_dotenv
import logging
import logging.config
import yaml
from pyconsem import Convertor
log = logging.getLogger(__name__)
DEFAULT_CONFIG_FILE = 'pyconsem.yml'
load_dotenv()
def get_arg_parser():
""" Defines the arguments to this script by using Python's [argparse](https://docs.python.org/3/library/argparse.html)
"""
parser = ArgumentParser(description='Py Project to perform data conversion using semantics and knowledge-graphs',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-l', '--logconf',
type=str,
metavar="LOGCONF.FILE",
action='store',
help='location of the logging config (yml) to use',
)
# TODO define your own command line arguments
parser.add_argument(
'-c', '--config',
type=str,
metavar="CONFIG.FILE",
action='store',
help='Config file for the conversion',
)
return parser
def yml_to_dict(fname):
with open(fname, 'r') as ymlf:
return yaml.load(ymlf, Loader=yaml.SafeLoader)
def enable_logging(args: Namespace):
if args.logconf is None:
return
logging.config.dictConfig(yml_to_dict(args.logconf))
log.info(f"Logging enabled according to config in {args.logconf}")
def load_config(args: Namespace):
config_file = args.config if args.config else os.environ.get('CONSEM_CONFIG', DEFAULT_CONFIG_FILE)
log.info(f"Using Convertor config from {config_file}")
log.info(f"env == {os.environ}")
return yml_to_dict(config_file)
def main():
""" The main entry point to this module.
"""
args = get_arg_parser().parse_args()
enable_logging(args)
config = load_config(args)
Convertor(config).run()
if __name__ == '__main__':
main()
|
import sys
import logging
import os
from datetime import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
import numpy as NP
import pandas as PD
logger = logging.getLogger('pyrsss.mag.fm2iaga')
HEADER_TEMPLATE = """\
Format IAGA-2002 |
Source of Data CARISMA |
IAGA CODE {stn} |
Geodetic Latitude {lat:<8.3f} |
Geodetic Longitude {lon:<8.3f} |
Elevation {el:<8.3f} |
Reported XYZF |
DATE TIME DOY {stn}X {stn}Y {stn}Z {stn}F |
"""
def parse(fname):
"""
Parse FGM format data *fname*. Return :class:`DataFrame`
containing all information found in the file.
The FGM file format is used by CARISMA to store data and is
described here:
http://www.carisma.ca/carisma-data/fgm-data-format.
"""
with open(fname) as fid:
siteid, lat, lon, date, pos_format, units, sample_rate = fid.next().split()
dt = []
x = []
y = []
z = []
flag = []
for line in fid:
cols = line.split()
dt.append(datetime.strptime(cols[0], '%Y%m%d%H%M%S'))
x.append(float(cols[1]))
y.append(float(cols[2]))
z.append(float(cols[3]))
if cols[4] == '.':
flag.append(False)
elif cols[4] == 'x':
flag.append(True)
else:
raise ValueError('unknown flag value {} encountered in {}'.format(cols[4], fname))
f = NP.hypot(x, NP.hypot(y, z))
df = PD.DataFrame(data={'x': x, 'y': y, 'z': z, 'f': f, 'flag': flag},
index=dt)
df.siteid = siteid
df.lat = float(lat)
df.lon = float(lon)
df.date = datetime.strptime(date, '%Y%m%d')
df.pos_format = pos_format
df.units = units
df.sample_rate = sample_rate
return df
def fgm2iaga(path,
fgm_fname,
ftype='v',
output_template='{stn}{date:%Y%m%d}{ftype}{interval}.{interval}'):
"""
Parse FGM format file *fgm_fname* and reformat it to IAGA2002 and
save at *path* (using *output_tempalte* to form the file
name). Return the file name. The *ftype* denotes the file type: p
- provisional, d - definitive, q - quasi-definitive, or v -
variation.
"""
df = parse(fgm_fname)
delta = (df.index[1] - df.index[0]).total_seconds()
if delta == 1.0:
interval = 'sec'
elif delta == 60.0:
interval = 'min'
else:
raise ValueError('unknown data interval found in {}'.format(fgm_fname))
stn = df.siteid[:3].upper()
out_fname = os.path.join(path,
output_template.format(stn=stn.lower(),
date=df.date,
ftype=ftype,
interval=interval))
with open(out_fname, 'w') as fid:
fid.write(HEADER_TEMPLATE.format(stn=stn.upper(),
lat=df.lat,
lon=df.lon,
el=0))
for row in df.itertuples():
dt = row.Index
if row.flag:
X = Y = Z = F = 99999
else:
X = row.x
Y = row.y
Z = row.z
F = NP.linalg.norm([X, Y, Z])
fid.write('{date:%Y-%m-%d %H:%M:%S.000} {date:%j}'
' {X:>9.2f} {Y:>9.2f} {Z:>9.2f} {F:>9.2f}\n'.format(date=dt,
X=X,
Y=Y,
Z=Z,
F=F))
return out_fname
def main(argv=None):
if argv is None:
argv = sys.argv
parser = ArgumentParser('Convert FGM format data (CARISMA) to IAGA2002 format.',
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('output_path',
type=str,
help='path to store daily IAGA2002 format files')
parser.add_argument('fgm_fnames',
type=str,
nargs='+',
metavar='fgm_fname',
help='FGM format file')
args = parser.parse_args(argv[1:])
for fgm_fname in args.fgm_fnames:
iaga_fname = fgm2iaga(args.output_path, fgm_fname)
logger.info('{} -> {}'.format(fgm_fname, iaga_fname))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
sys.exit(main())
|
# Date: 12/28/2018
# Author: Mohamed
# Description: A list that will manage proxies
class ProxyList(object):
def __init__(self):
self.list = []
def __contains__(self, proxy):
for _proxy in self.list:
if _proxy.ip == proxy['ip'] and _proxy.port == proxy['port']:
return True
return False
def append(self, proxy):
self.list.append(proxy)
|
#!/usr/bin/env python
'''
This software was created by United States Government employees at
The Center for Cybersecurity and Cyber Operations (C3O)
at the Naval Postgraduate School NPS. Please note that within the
United States, copyright protection is not available for any works
created by United States Government employees, pursuant to Title 17
United States Code Section 105. This software is in the public
domain and is not subject to copyright.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
# Filename: transfer.py
# Description:
# This is the script to be run by the student to transfer file
# to/from the container from/to the host.
# Note:
# 1. It needs 'start.config' file, where
# <labname> is given as a parameter to the script.
#
# It will perform the following tasks:
# a. If 'direction' is not specified, then 'direction' is default to 'TOHOST',
# i.e., default direction is the transfer is from the container to the host.
# b. If 'direction' is 'TOCONTAINER', then transfer is from host to the container.
import glob
import json
import md5
import os
import re
import subprocess
import sys
import time
import zipfile
import ParseStartConfig
import labutils
import logging
import LabtainerLogging
LABS_ROOT = os.path.abspath("../../labs/")
def usage():
sys.stderr.write("Usage: transfer.py <labname> <filename> [<container>] [TOHOST|TOCONTAINER]\n")
exit(1)
# Usage: (see usage)
def main():
num_args = len(sys.argv)
print "Number of argument is %d" % num_args
container = None
requested_direction = "TOHOST"
if num_args < 3:
usage()
elif num_args == 3:
container = sys.argv[1]
elif num_args == 4:
# Assume the third argument is 'TOHOST|TOCONTAINER'
requested_direction = sys.argv[3]
if requested_direction == "TOHOST":
container = sys.argv[1]
elif requested_direction == "TOCONTAINER":
container = sys.argv[1]
else:
# If third argument is not 'TOHOST|TOCONTAINER' then
# it must be the container name
# and requested_direction defaults to 'TOHOST'
container = sys.argv[3]
elif num_args == 5:
requested_direction = sys.argv[4]
if requested_direction == "TOHOST":
container = sys.argv[3]
elif requested_direction == "TOCONTAINER":
container = sys.argv[3]
else:
usage()
else:
usage()
labname = sys.argv[1]
filename = sys.argv[2]
labutils.logger = LabtainerLogging.LabtainerLogging("labtainer.log", labname, "../../config/labtainer.config")
labutils.logger.info("Begin logging transfer.py for %s lab" % labname)
labutils.DoTransfer(labname, "student", container, filename, requested_direction)
return 0
if __name__ == '__main__':
sys.exit(main())
|
from fineract.objects.note import Note
def test_document_creation(fineract):
note = Note.create(fineract.request_handler, Note.CLIENTS, 1, 'Test Note')
assert isinstance(note, Note)
def test_retrieve_all_documents(fineract):
notes = Note.get_all(fineract.request_handler, Note.CLIENTS, 1)
assert notes.total_count > 0
def test_retrieve_single_note(fineract):
note = Note.create(fineract.request_handler, Note.CLIENTS, 1, 'Test Note')
note = Note.get(fineract.request_handler, Note.CLIENTS, 1, note.id)
assert isinstance(note, Note)
|
'''
MAP Client, a program to generate detailed musculoskeletal models for OpenSim.
Copyright (C) 2012 University of Auckland
This file is part of MAP Client. (http://launchpad.net/mapclient)
MAP Client is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MAP Client is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MAP Client. If not, see <http://www.gnu.org/licenses/>..
'''
import os
os.environ['ETS_TOOLKIT'] = 'qt'
from PySide2.QtWidgets import QDialog, QAbstractItemView, QTableWidgetItem
from PySide2.QtCore import Qt
from PySide2.QtCore import QThread, Signal
from mapclientplugins.fieldworkmeshfittingstep.ui_mayavifittingviewerwidget import Ui_Dialog
from traits.api import on_trait_change
from gias2.mappluginutils.mayaviviewer import MayaviViewerObjectsContainer, MayaviViewerDataPoints, \
MayaviViewerFieldworkModel, colours
import copy
class _ExecThread(QThread):
finalUpdate = Signal(tuple)
update = Signal(tuple)
def __init__(self, func):
QThread.__init__(self)
self.func = func
def run(self):
output = self.func(self.update)
self.finalUpdate.emit(output)
class MayaviFittingViewerWidget(QDialog):
'''
Configure dialog to present the user with the options to configure this step.
'''
defaultColor = colours['bone']
objectTableHeaderColumns = {'visible': 0, 'type': 1}
backgroundColour = (0.0, 0.0, 0.0)
_dataRenderArgs = {'mode': 'point', 'scale_factor': 0.1, 'color': (0, 1, 0)}
_GFUnfittedRenderArgs = {'color': (1, 0, 0)}
_GFFittedRenderArgs = {'color': (1, 1, 0)}
_GFD = [15, 15]
_fitParamTableRows = ('fit mode', 'mesh discretisation', 'sobelov discretisation', \
'sobelov weight', 'normal discretisation', 'normal weight', \
'max iterations', 'max sub-iterations', 'xtol', 'kdtree args', \
'n closest points', 'verbose', 'fixed nodes', 'GUI')
def __init__(self, data, GFUnfitted, config, fitFunc, resetCallback, parent=None):
'''
Constructor
'''
QDialog.__init__(self, parent)
self._ui = Ui_Dialog()
self._ui.setupUi(self)
self._scene = self._ui.MayaviScene.visualisation.scene
self._scene.background = self.backgroundColour
self.selectedObjectName = None
self._data = data
self._GFUnfitted = GFUnfitted
self._GFFitted = copy.deepcopy(self._GFUnfitted)
self._fitFunc = fitFunc
self._config = config
self._resetCallback = resetCallback
self._worker = _ExecThread(self._fitFunc)
self._worker.finalUpdate.connect(self._fitUpdate)
self._worker.update.connect(self._fitCallback)
# create self._objects
self._objects = MayaviViewerObjectsContainer()
self._objects.addObject('data', MayaviViewerDataPoints('data', self._data, renderArgs=self._dataRenderArgs))
self._objects.addObject('GF Unfitted', MayaviViewerFieldworkModel('GF Unfitted', self._GFUnfitted, self._GFD,
renderArgs=self._GFUnfittedRenderArgs))
self._objects.addObject('GF Fitted', MayaviViewerFieldworkModel('GF Fitted', self._GFFitted, self._GFD,
renderArgs=self._GFFittedRenderArgs))
self._makeConnections()
self._initialiseObjectTable()
self._initialiseSettings()
self._refresh()
# self.testPlot()
# self.drawObjects()
def _makeConnections(self):
self._ui.tableWidget.itemClicked.connect(self._tableItemClicked)
self._ui.tableWidget.itemChanged.connect(self._visibleBoxChanged)
self._ui.screenshotSaveButton.clicked.connect(self._saveScreenShot)
# self._ui.fitButton.clicked.connect(self._fit)
self._ui.fitButton.clicked.connect(self._worker.start)
self._ui.fitButton.clicked.connect(self._fitLockUI)
self._ui.resetButton.clicked.connect(self._reset)
self._ui.abortButton.clicked.connect(self._abort)
self._ui.acceptButton.clicked.connect(self._accept)
# connect up changes to params table
self._ui.fitParamsTableWidget.itemChanged.connect(self._fitParamsTableChanged)
def _initialiseSettings(self):
# set values for the params table
for row, param in enumerate(self._fitParamTableRows):
self._ui.fitParamsTableWidget.setItem(row, 0, QTableWidgetItem(self._config[param]))
def _fitParamsTableChanged(self, item):
param = self._fitParamTableRows[item.row()]
self._config[param] = item.text()
def _initialiseObjectTable(self):
self._ui.tableWidget.setRowCount(self._objects.getNumberOfObjects())
self._ui.tableWidget.verticalHeader().setVisible(False)
self._ui.tableWidget.setEditTriggers(QAbstractItemView.NoEditTriggers)
self._ui.tableWidget.setSelectionBehavior(QAbstractItemView.SelectRows)
self._ui.tableWidget.setSelectionMode(QAbstractItemView.SingleSelection)
self._addObjectToTable(0, 'data', self._objects.getObject('data'))
self._addObjectToTable(1, 'GF Unfitted', self._objects.getObject('GF Unfitted'))
self._addObjectToTable(2, 'GF Fitted', self._objects.getObject('GF Fitted'), checked=False)
self._ui.tableWidget.resizeColumnToContents(self.objectTableHeaderColumns['visible'])
self._ui.tableWidget.resizeColumnToContents(self.objectTableHeaderColumns['type'])
def _addObjectToTable(self, row, name, obj, checked=True):
typeName = obj.typeName
print(typeName)
print(name)
tableItem = QTableWidgetItem(name)
if checked:
tableItem.setCheckState(Qt.Checked)
else:
tableItem.setCheckState(Qt.Unchecked)
self._ui.tableWidget.setItem(row, self.objectTableHeaderColumns['visible'], tableItem)
self._ui.tableWidget.setItem(row, self.objectTableHeaderColumns['type'], QTableWidgetItem(typeName))
def _tableItemClicked(self):
selectedRow = self._ui.tableWidget.currentRow()
self.selectedObjectName = self._ui.tableWidget.item(selectedRow,
self.objectTableHeaderColumns['visible']).text()
self._populateScalarsDropDown(self.selectedObjectName)
print(selectedRow)
print(self.selectedObjectName)
def _visibleBoxChanged(self, tableItem):
# checked changed item is actually the checkbox
if tableItem.column() == self.objectTableHeaderColumns['visible']:
# get visible status
name = tableItem.text()
visible = tableItem.checkState() == Qt.CheckState.Checked
print('visibleboxchanged name', name)
print('visibleboxchanged visible', visible)
# toggle visibility
obj = self._objects.getObject(name)
print(obj.name)
if obj.sceneObject:
print('changing existing visibility')
obj.setVisibility(visible)
else:
print('drawing new')
obj.draw(self._scene)
def _getSelectedObjectName(self):
return self.selectedObjectName
def _getSelectedScalarName(self):
return 'none'
def drawObjects(self):
for name in self._objects.getObjectNames():
self._objects.getObject(name).draw(self._scene)
def _fitUpdate(self, fitOutput):
GFFitted, GFParamsFitted, RMSEFitted, errorsFitted = fitOutput
self._GFFitted = copy.deepcopy(GFFitted)
# update error fields
self._ui.RMSELineEdit.setText(str(RMSEFitted))
self._ui.meanErrorLineEdit.setText(str(errorsFitted.mean()))
self._ui.SDLineEdit.setText(str(errorsFitted.std()))
# update fitted GF
fittedObj = self._objects.getObject('GF Fitted')
fittedObj.updateGeometry(GFParamsFitted, self._scene)
fittedTableItem = self._ui.tableWidget.item(2, self.objectTableHeaderColumns['visible'])
fittedTableItem.setCheckState(Qt.Checked)
# unlock reg ui
self._fitUnlockUI()
def _fitLockUI(self):
self._ui.fitParamsTableWidget.setEnabled(False)
self._ui.fitButton.setEnabled(False)
self._ui.resetButton.setEnabled(False)
self._ui.acceptButton.setEnabled(False)
self._ui.abortButton.setEnabled(False)
def _fitUnlockUI(self):
self._ui.fitParamsTableWidget.setEnabled(True)
self._ui.fitButton.setEnabled(True)
self._ui.resetButton.setEnabled(True)
self._ui.acceptButton.setEnabled(True)
self._ui.abortButton.setEnabled(True)
def _fitCallback(self, output):
GFParamsFitted = output[1]
fittedObj = self._objects.getObject('GF Fitted')
fittedObj.updateGeometry(GFParamsFitted, self._scene)
fittedTableItem = self._ui.tableWidget.item(2, self.objectTableHeaderColumns['visible'])
fittedTableItem.setCheckState(Qt.Checked)
def _reset(self):
self._resetCallback()
fittedObj = self._objects.getObject('GF Fitted')
fittedObj.updateGeometry(self._GFUnfitted.field_parameters.copy(), self._scene)
fittedTableItem = self._ui.tableWidget.item(2, self.objectTableHeaderColumns['visible'])
fittedTableItem.setCheckState(Qt.Unchecked)
# clear error fields
self._ui.RMSELineEdit.clear()
self._ui.meanErrorLineEdit.clear()
self._ui.SDLineEdit.clear()
def _accept(self):
self._close()
def _abort(self):
self._reset()
self._close()
def _close(self):
for name in self._objects.getObjectNames():
self._objects.getObject(name).remove()
self._objects._objects = {}
self._objects == None
# for r in xrange(self._ui.tableWidget.rowCount()):
# self._ui.tableWidget.removeRow(r)
def _refresh(self):
for r in range(self._ui.tableWidget.rowCount()):
tableItem = self._ui.tableWidget.item(r, self.objectTableHeaderColumns['visible'])
name = tableItem.text()
visible = tableItem.checkState() == Qt.CheckState.Checked
obj = self._objects.getObject(name)
print(obj.name)
if obj.sceneObject:
print('changing existing visibility')
obj.setVisibility(visible)
else:
print('drawing new')
obj.draw(self._scene)
def _saveScreenShot(self):
filename = self._ui.screenshotFilenameLineEdit.text()
width = int(self._ui.screenshotPixelXLineEdit.text())
height = int(self._ui.screenshotPixelYLineEdit.text())
self._scene.mlab.savefig(filename, size=(width, height))
# ================================================================#
@on_trait_change('scene.activated')
def testPlot(self):
# This function is called when the view is opened. We don't
# populate the scene when the view is not yet open, as some
# VTK features require a GLContext.
print('trait_changed')
# We can do normal mlab calls on the embedded scene.
self._scene.mlab.test_points3d()
# def _saveImage_fired( self ):
# self.scene.mlab.savefig( str(self.saveImageFilename), size=( int(self.saveImageWidth), int(self.saveImageLength) ) )
|
import math
import numpy as np
from math import sqrt
from shapely.geometry import Point, LineString, MultiLineString
"""
A series of math functions for angle computations.
Readapted for LineStrings from Abhinav Ramakrishnan's post in https://stackoverflow.com/a/28261304/7375309.
"""
def _dot(vA, vB):
return vA[0]*vB[0]+vA[1]*vB[1]
def get_coord_angle(origin, distance, angle):
"""
The function returns the coordinates of the line starting from a tuple of coordinates, which forms with the y axis an angle in degree of a certain magnitude,
given the distance from the origin.
Parameters
----------
origin: tuple of float
tuple of coordinates
distance: float
the distance from the origin coordinates
angle: float
the desired angle
Returns:
----------
coords: tuple
the resulting coordinates
"""
(disp_x, disp_y) = (distance * math.sin(math.radians(angle)), distance * math.cos(math.radians(angle)))
coord = (origin[0] + disp_x, origin[1] + disp_y)
return coord
def angle_line_geometries(line_geometryA, line_geometryB, degree = False, deflection = False, angular_change = False):
"""
Given two LineStrings it computes the deflection angle between them. Returns value in degrees or radians.
Parameters
----------
line_geometryA: LineString
the first line
line_geometryB: LineString
the other line; it must share a vertex with line_geometryA
degree: boolean
if True it returns value in degree, otherwise in radians
deflection: boolean
if True it computes angle of incidence between the two lines, otherwise angle between vectors
angular_change: boolean
LineStrings are formed by two vertexes "from" and to". Within the function, four vertexes (2 per line) are considered; two of them are equal and shared between the lines.
The common vertex is used to compute the angle, along with another vertex per line. If True it computes angle of incidence between the two lines, on the basis of the vertex in common and the second following
(intermediate, if existing) vertexes forming each of the line. For example, if the line_geometryA has 3 vertexes composing its geometry, from, to and an intermediate one, the latter is used to compute
the angle along with the one which is shared with line_geometryB. When False, the angle is computed by using exclusively from and to nodes, without considering intermediate vertexes which form the line geometries.
Returns:
----------
angle: float
the resulting angle in radians or degrees
"""
# extracting coordinates and createing lines
coordsA = list(line_geometryA.coords)
x_originA, y_originA = float("{0:.10f}".format(coordsA[0][0])), float("{0:.10f}".format(coordsA[0][1]))
x_secondA, y_secondA = float("{0:.10f}".format(coordsA[1][0])), float("{0:.10f}".format(coordsA[1][1]))
x_destinationA, y_destinationA = float("{0:.10f}".format(coordsA[-1][0])), float("{0:.10f}".format(coordsA[-1][1]))
x_second_lastA, y_second_lastA = float("{0:.10f}".format(coordsA[-2][0])), float("{0:.10f}".format(coordsA[-2][1]))
coordsB = list(line_geometryB.coords)
x_originB, y_originB = float("{0:.10f}".format(coordsB[0][0])), float("{0:.10f}".format(coordsB[0][1]))
x_secondB, y_secondB = float("{0:.10f}".format(coordsB[1][0])), float("{0:.10f}".format(coordsB[1][1]))
x_destinationB, y_destinationB = float("{0:.10f}".format(coordsB[-1][0])), float("{0:.10f}".format(coordsB[-1][1]))
x_second_lastB, y_second_lastB = float("{0:.10f}".format(coordsB[-2][0])), float("{0:.10f}".format(coordsB[-2][1]))
if angular_change:
if (x_destinationA, y_destinationA) == (x_destinationB, y_destinationB):
lineA = ((x_second_lastA, y_second_lastA), (x_destinationA, y_destinationA))
lineB = ((x_destinationB, y_destinationB), (x_second_lastB, y_second_lastB))
elif (x_destinationA, y_destinationA) == (x_originB, y_originB):
lineA = ((x_second_lastA, y_second_lastA), (x_destinationA, y_destinationA))
lineB = ((x_originB, y_originB), (x_secondB, y_secondB))
elif (x_originA, y_originA) == (x_originB, y_originB):
lineA = ((x_secondA, y_secondA), (x_originA, y_originA))
lineB = ((x_originB, y_originB), (x_secondB, y_secondB))
elif (x_originA, y_originA) == (x_destinationB, y_destinationB):
lineA = ((x_secondA, y_secondA), (x_originA, y_originA))
lineB = ((x_destinationB, y_destinationB), (x_second_lastB, y_second_lastB))
# no common vertex
else: raise AngleError("The lines do not intersect! provide lines wich have a common vertex")
# deflection on the entire lines
elif deflection:
if (x_destinationA, y_destinationA) == (x_destinationB, y_destinationB):
lineA = ((x_originA, y_originA), (x_destinationA, y_destinationA))
lineB = ((x_destinationB, y_destinationB), (x_originB, y_originB))
elif (x_destinationA, y_destinationA) == (x_originB, y_originB):
lineA = ((x_originA, y_originA), (x_destinationA, y_destinationA))
lineB = ((x_originB, y_originB), (x_destinationB, y_destinationB))
elif (x_originA, y_originA) == (x_originB, y_originB):
lineA = ((x_destinationA, y_destinationA), (x_originA, y_originA))
lineB = ((x_originB, y_originB), (x_destinationB, y_destinationB))
elif (x_originA, y_originA) == (x_destinationB, y_destinationB):
lineA = ((x_destinationA, y_destinationA), (x_originA, y_originA))
lineB = ((x_destinationB, y_destinationB), (x_originB, y_originB))
# no common vertex
else: raise AngleError("The lines do not intersect! provide lines wich have a common vertex")
# angle between vectors
else:
if (x_destinationA, y_destinationA) == (x_destinationB, y_destinationB):
lineA = ((x_destinationA, y_destinationA), (x_originA, y_originA))
lineB = ((x_destinationB, y_destinationB), (x_originB, y_originB))
elif (x_destinationA, y_destinationA) == (x_originB, y_originB):
lineA = ((x_destinationA, y_destinationA), (x_originA, y_originA))
lineB = ((x_originB, y_originB), (x_destinationB, y_destinationB))
elif (x_originA, y_originA) == (x_originB, y_originB):
lineA = ((x_originA, y_originA), (x_destinationA, y_destinationA))
lineB = ((x_originB, y_originB), (x_destinationB, y_destinationB))
elif (x_originA, y_originA) == (x_destinationB, y_destinationB):
lineA = ((x_originA, y_originA), (x_destinationA, y_destinationA))
lineB = ((x_destinationB, y_destinationB),(x_originB, y_originB))
# no common vertex
else: raise AngleError("The lines do not intersect! provide lines wich have a common vertex")
# Get nicer vector form
vA = [(lineA[0][0]-lineA[1][0]), (lineA[0][1]-lineA[1][1])]
vB = [(lineB[0][0]-lineB[1][0]), (lineB[0][1]-lineB[1][1])]
try:
# Get dot prod
dot_prod = _dot(vA, vB)
# Get magnitudes
magA = _dot(vA, vA)**0.5
magB = _dot(vB, vB)**0.5
# Get cosine value
cos_ = dot_prod/magA/magB
# Get angle in radians and then convert to degrees
angle_rad = math.acos(dot_prod/magB/magA)
# Basically doing angle <- angle mod 360
angle_deg = math.degrees(angle_rad)%360
except:
angle_deg = 0.0
angle_rad = 0.0
angle = angle_rad
if degree:
angle = angle_deg
return angle
def difference_angle_line_geometries(line_geometryA, line_geometryB):
"""
Given two LineStrings it computes the difference of the angles that they form with the Y-axis. Returns value in degrees or radians.
Parameters
----------
line_geometryA: LineString
the first line
line_geometryB: LineString
the other line; it must share a vertex with line_geometryA
Returns:
----------
difference_angle: float
the resulting difference in degrees
"""
# extracting coordinates and createing lines
coordsA = list(line_geometryA.coords)
x_originA, y_originA = float("{0:.10f}".format(coordsA[0][0])), float("{0:.10f}".format(coordsA[0][1]))
x_destinationA, y_destinationA = float("{0:.10f}".format(coordsA[-1][0])), float("{0:.10f}".format(coordsA[-1][1]))
coordsB = list(line_geometryB.coords)
x_originB, y_originB = float("{0:.10f}".format(coordsB[0][0])), float("{0:.10f}".format(coordsB[0][1]))
x_destinationB, y_destinationB = float("{0:.10f}".format(coordsB[-1][0])), float("{0:.10f}".format(coordsB[-1][1]))
if x_originA == x_destinationA:
angle_A = np.pi/2
else: angle_A = np.arctan((y_destinationA-y_originA)/(x_destinationA-x_originA))
if x_originB == x_destinationB:
angle_B = np.pi/2
else: angle_B = np.arctan((y_destinationB-y_originB)/(x_destinationB-x_originB))
angle_A = math.degrees(angle_A)%360
angle_B = math.degrees(angle_B)%360
if angle_A > 180:
angle_A = angle_A-180
if angle_B > 180:
angle_B = angle_B-180
difference_angle = abs(angle_A - angle_B)
return difference_angle
def is_parallel(line_geometry_A, line_geometry_B, hard = False):
difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)
if (difference_angle <= 30):
return True
line_coordsA = list(line_geometry_A.coords)
line_coordsB = list(line_geometry_B.coords)
if ((len(line_coordsA) == 2) | (len(line_coordsB) == 2)):
return False
if not hard:
# remove first coordinates (A,B)
line_geometry_A = LineString([coor for coor in line_coordsA[1:]])
line_geometry_B = LineString([coor for coor in line_coordsB[1:]])
difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)
if (difference_angle <= 20) & (difference_angle >= -20):
return True
# remove first (A) and last (B)
line_geometry_B = LineString([coor for coor in line_coordsB[:-1]])
difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)
if (difference_angle <= 20) & (difference_angle >= -20):
return True
# remove last (A) and first (B)
line_geometry_A = LineString([coor for coor in line_coordsA[:-1]])
line_geometry_B = LineString([coor for coor in line_coordsB[1:]])
difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)
if (difference_angle <= 20) & (difference_angle >= -20):
return True
# remove last coordinates (A, B)
line_geometry_A = LineString([coor for coor in line_coordsA[:-1]])
line_geometry_B = LineString([coor for coor in line_coordsB[:-1]])
difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)
if (difference_angle <= 20) & (difference_angle >= -20):
return True
if ((len(line_coordsA) == 3) | (len(line_coordsB) == 3)):
return False
line_geometry_A = LineString([coor for coor in line_coordsA[1:-1]])
line_geometry_B = LineString([coor for coor in line_coordsB[1:-1]])
difference_angle = difference_angle_line_geometries(line_geometry_A, line_geometry_B)
if (difference_angle <= 20) & (difference_angle >= -20):
return True
return False
def is_continuation(ix_lineA, ix_lineB, edges_gdf):
nameA = edges_gdf.loc[ix_lineA]['name']
nameB = edges_gdf.loc[ix_lineB]['name']
line_geometry_A = edges_gdf.loc[ix_lineA]['geometry']
line_geometry_B = edges_gdf.loc[ix_lineB]['geometry']
if is_parallel(line_geometry_A, line_geometry_B, hard = True):
return True
return ((nameA == nameB) & (is_parallel(line_geometry_A, line_geometry_B, hard = False)))
class Error(Exception):
"""Base class for other exceptions"""
class AngleError(Error):
"""Raised when not-intersecting lines are provided for computing angles"""
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms_lab_publications', '0002_auto_20150527_1148'),
]
operations = [
migrations.AddField(
model_name='publicationset',
name='bulk_pubmed_query',
field=models.TextField(verbose_name='Bulk Query', help_text='Enter PubMed IDs and/or PubMed URLs to get or create multiple Publications and add them to this Publication Set.<br>PubMed IDs/URLs must be separated by commas or whitespace.<br>To add files and tags to publication records, create publications individually via the Publication Admin (or below).', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='publication',
name='redo_query',
field=models.BooleanField(verbose_name='redo PubMed query?', default=False, help_text='Check this box to redo the PubMed query.<br>Any manual changes to the PubMed metadata will be overwritten.'),
preserve_default=True,
),
migrations.AlterField(
model_name='publicationset',
name='label',
field=models.CharField(verbose_name='label', default='Publications', help_text='Enter a label for this Publication Set.<br>This will be the heading displayed above the publications.', max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='publicationset',
name='name',
field=models.CharField(help_text="Enter a unique name for this Publication Set.<br>This won't be displayed on the site.", verbose_name='name', unique=True, max_length=255),
preserve_default=True,
),
migrations.AlterField(
model_name='publicationset',
name='pagination',
field=models.PositiveIntegerField(verbose_name='pagination', default=0, help_text="How many publications should be displayed per page? To show all at once, enter '0'.<br>Server may need to be restarted for changes to take effect."),
preserve_default=True,
),
migrations.AlterField(
model_name='publicationset',
name='publications',
field=models.ManyToManyField(to='cms_lab_publications.Publication', null=True, blank=True),
preserve_default=True,
),
]
|
"""Notifications API"""
import logging
from django.conf import settings
from django.db.models import Q
from django.contrib.auth.models import User
from channels.models import Subscription, ChannelGroupRole, Channel
from channels.api import get_admin_api
from channels.constants import ROLE_MODERATORS
from notifications.notifiers.exceptions import (
UnsupportedNotificationTypeError,
CancelNotificationError,
)
from notifications.models import (
EmailNotification,
PostEvent,
NotificationSettings,
NOTIFICATION_TYPE_FRONTPAGE,
NOTIFICATION_TYPE_COMMENTS,
NOTIFICATION_TYPE_MODERATOR,
FREQUENCY_IMMEDIATE,
FREQUENCY_DAILY,
FREQUENCY_WEEKLY,
FREQUENCY_NEVER,
)
from notifications.notifiers import comments, frontpage, moderator_posts
from notifications import tasks
from open_discussions.utils import chunks
log = logging.getLogger()
def ensure_notification_settings(user, skip_moderator_setting=False):
"""
Populates user with notification settings
Args:
user (User): user to create settings for
skip_moderator_setting (boolean): Skip moderator notifaction creation
"""
existing_notification_types = NotificationSettings.objects.filter(
user=user
).values_list("notification_type", flat=True)
if NOTIFICATION_TYPE_FRONTPAGE not in existing_notification_types:
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_FRONTPAGE,
defaults={"trigger_frequency": FREQUENCY_DAILY},
)
if NOTIFICATION_TYPE_COMMENTS not in existing_notification_types:
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_COMMENTS,
defaults={"trigger_frequency": FREQUENCY_IMMEDIATE},
)
if not skip_moderator_setting:
for channel_group_role in ChannelGroupRole.objects.filter(
group__user=user, role=ROLE_MODERATORS
):
NotificationSettings.objects.get_or_create(
user=user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel=channel_group_role.channel,
defaults={"trigger_frequency": FREQUENCY_IMMEDIATE},
)
def attempt_send_notification_batch(notification_settings_ids):
"""
Attempts to send notification for the given batch of ids
Args:
notification_settings_ids (list of int): list of NotificationSettings.ids
"""
notification_settings = NotificationSettings.objects.filter(
id__in=notification_settings_ids
)
for notification_setting in notification_settings:
try:
notifier = frontpage.FrontpageDigestNotifier(notification_setting)
notifier.attempt_notify()
except: # pylint: disable=bare-except
log.exception(
"Error attempting notification for user %s", notification_setting.user
)
def get_daily_frontpage_settings_ids():
"""Returns daily frontpage digest NotificationSettings"""
return (
NotificationSettings.frontpage_settings()
.filter(trigger_frequency=FREQUENCY_DAILY)
.filter(user__is_active=True)
.values_list("id", flat=True)
.order_by("id")
.iterator()
)
def get_weekly_frontpage_settings_ids():
"""Returns weekly frontpage digest NotificationSettings"""
return (
NotificationSettings.frontpage_settings()
.filter(trigger_frequency=FREQUENCY_WEEKLY)
.filter(user__is_active=True)
.values_list("id", flat=True)
.order_by("id")
.iterator()
)
def _get_notifier_for_notification(notification):
"""
Get the notifier for the notification's type
Args:
notification (NotificationBase): the notification to get a notifier for
Returns:
Notifier: instance of the notifier to use
"""
if notification.notification_type == NOTIFICATION_TYPE_MODERATOR:
channel_api = get_admin_api()
event = PostEvent.objects.get(email_notification=notification)
channel_name = channel_api.get_post(event.post_id).subreddit.display_name
notification_settings = NotificationSettings.objects.get(
user=notification.user,
notification_type=notification.notification_type,
channel__name=channel_name,
)
else:
notification_settings = NotificationSettings.objects.get(
user=notification.user, notification_type=notification.notification_type
)
if notification.notification_type == NOTIFICATION_TYPE_FRONTPAGE:
return frontpage.FrontpageDigestNotifier(notification_settings)
elif notification.notification_type == NOTIFICATION_TYPE_COMMENTS:
return comments.CommentNotifier(notification_settings)
elif notification.notification_type == NOTIFICATION_TYPE_MODERATOR:
return moderator_posts.ModeratorPostsNotifier(notification_settings)
else:
raise UnsupportedNotificationTypeError(
"Notification type '{}' is unsupported".format(
notification.notification_type
)
)
def send_unsent_email_notifications():
"""
Send all notifications that haven't been sent yet
"""
for notification_ids in chunks(
EmailNotification.objects.filter(state=EmailNotification.STATE_PENDING)
.exclude(notification_type=NOTIFICATION_TYPE_FRONTPAGE)
.values_list("id", flat=True),
chunk_size=settings.NOTIFICATION_SEND_CHUNK_SIZE,
):
EmailNotification.objects.filter(id__in=notification_ids).update(
state=EmailNotification.STATE_SENDING
)
tasks.send_email_notification_batch.delay(notification_ids)
for notification_ids in chunks(
EmailNotification.objects.filter(
state=EmailNotification.STATE_PENDING,
notification_type=NOTIFICATION_TYPE_FRONTPAGE,
).values_list("id", flat=True),
chunk_size=settings.NOTIFICATION_SEND_CHUNK_SIZE,
):
EmailNotification.objects.filter(id__in=notification_ids).update(
state=EmailNotification.STATE_SENDING
)
tasks.send_frontpage_email_notification_batch.delay(notification_ids)
def send_email_notification_batch(notification_ids):
"""
Sends a batch of notifications
Args:
notification_ids (list of int): notification ids to send
"""
for notification in EmailNotification.objects.filter(id__in=notification_ids):
try:
notifier = _get_notifier_for_notification(notification)
notifier.send_notification(notification)
except CancelNotificationError:
log.debug("EmailNotification canceled: %s", notification.id)
notification.state = EmailNotification.STATE_CANCELED
notification.save()
except: # pylint: disable=bare-except
log.exception("Error sending notification %s", notification)
def send_comment_notifications(post_id, comment_id, new_comment_id):
"""
Sends notifications for a reply to a given post notification
Args:
post_id (str): base36 post id
comment_id (str): base36 comment id
new_comment_id (str): base36 comment id of the new comment
"""
for subscription in (
Subscription.objects.filter(post_id=post_id)
.filter(Q(comment_id=comment_id) | Q(comment_id=None))
.distinct("user")
.iterator()
):
try:
notification_settings = NotificationSettings.objects.get(
user_id=subscription.user_id,
notification_type=NOTIFICATION_TYPE_COMMENTS,
)
except NotificationSettings.DoesNotExist:
log.exception(
"NotificationSettings didn't exist for subscription %s", subscription.id
)
continue
notifier = comments.CommentNotifier(notification_settings)
notifier.create_comment_event(subscription, new_comment_id)
def send_moderator_notifications(post_id, channel_name):
"""
Sends post notifications to channel moderators
Args:
post_id (str): base36 post id
channel_name (str): channel_name
"""
channel_api = get_admin_api()
for moderator in channel_api.list_moderators(channel_name):
self_user = User.objects.get(username=moderator.name)
try:
notification_setting = NotificationSettings.objects.get(
user=self_user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel__name=channel_name,
)
except NotificationSettings.DoesNotExist:
channel = Channel.objects.get(name=channel_name)
notification_setting = NotificationSettings.objects.create(
user=self_user,
notification_type=NOTIFICATION_TYPE_MODERATOR,
channel=channel,
trigger_frequency=FREQUENCY_NEVER,
)
notifier = moderator_posts.ModeratorPostsNotifier(notification_setting)
notifier.create_moderator_post_event(self_user, post_id)
|
import csv
import random
full_events = []
with open('../../data/fullevents.csv') as csv_file:
csv_reader = csv.reader(csv_file)
next(csv_reader)
for row in csv_reader:
full_events.append(row)
shot_score = {}
for i, item in enumerate(full_events):
if item[1] != 'Huskies':
continue
name = item[2]
if shot_score.get(name) is None:
shot_score[name] = [0, 0, 0, 0, 0]
if item[6] == 'Shot':
shots_add = 1
shots_on_add = 0
shots_block_add = 0
if full_events[i + 1][6] == 'Save attempt':
shots_on_add = 1
if full_events[i + 1][6] == 'Others on the ball' or \
((full_events[i + 1][6] == 'Pass' or full_events[i + 1][6] == 'Duel') and float(full_events[i + 1][5]) - float(item[5]) < 25):
shots_block_add = 1
cur = shot_score[name]
shot_score[name] = [cur[0] + shots_on_add, cur[1] + shots_block_add, cur[2] + shots_add,
cur[3], cur[4]]
if item[7] == 'Free kick shot':
free_kick_shots_add = 1
free_kick_shots_on_add = 0
if full_events[i + 1][6] == 'Save attempt':
free_kick_shots_on_add = 1
cur = shot_score[name]
shot_score[name] = [cur[0], cur[1], cur[2],
cur[3] + free_kick_shots_on_add, cur[4] + free_kick_shots_add]
for key in shot_score:
score = shot_score[key]
if score[2] - score[1] != 0:
shot_score[key].append(score[0] / (score[2] - score[1]))
else:
shot_score[key].append(0)
if score[4] != 0:
shot_score[key].append(score[3] / score[4])
else:
shot_score[key].append(0)
shot_score[key].append(shot_score[key][2] + 60)
if shot_score[key][2] == 0:
shot_score[key].append(random.randint(40, 50))
elif shot_score[key][2] <= 10:
shot_score[key].append(shot_score[key][5] * 25 + 45)
else:
shot_score[key].append(shot_score[key][5] * 60 + 40)
if shot_score[key][4] == 0:
shot_score[key].append(random.randint(40, 50))
else:
shot_score[key].append(shot_score[key][6] * 40 + 60)
score = shot_score[key]
if key.find('D') != -1:
shot_score[key].append(score[7] * 0.2 + score[8] * 0.5 + score[9] * 0.3)
elif key.find('M') != -1:
shot_score[key].append(score[7] * 0.3 + score[8] * 0.3 + score[9] * 0.4)
elif key.find('G') != -1:
shot_score[key].append(random.randint(20, 40))
elif key.find('F') != -1:
shot_score[key].append(score[7] * 0.3 + score[8] * 0.4 + score[9] * 0.3)
headers = ['Name', 'On-Target Shots', 'Blocked Shots', 'Total Shots', 'On-Target Free Kick Shots',
'Total Free Kick Shots', 'Shots Accuracy', 'Free Kick Shots Accuracy',
'Total Shot Score', 'Shot Accuracy Score', 'Free Kick Accuracy Score', 'Shot Score']
with open('../../data/Player_shot_data.csv', 'w', newline='') as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow(headers)
for key in shot_score:
row = shot_score[key]
row.insert(0, key)
csv_writer.writerow(row)
|
# -*- coding: utf-8 -*-
import shutil
import pytest
from click.testing import CliRunner
from parboil.parboil import boil
def test_boil_list_help(boil_runner):
# help message
result = boil_runner("list", "--help")
assert result.exit_code == 0
assert result.output.startswith("Usage: boil list [OPTIONS]")
def test_boil_list_config(boil_runner, repo_path, tpl_path, config_file):
runner = CliRunner()
# Install necessary templates
result = boil_runner.invoke(boil, ["install", f"{tpl_path}/test"])
assert result.exit_code == 0
result = boil_runner.invoke(boil, ["install", f"{tpl_path}/license"])
assert result.exit_code == 0
# normal use
result = boil_runner.invoke(boil, ["list"])
assert result.exit_code == 0
# wrong tpldir
result = runner.invoke(boil, ["--tpldir", "SOME/UNKNOWN/DIR", "list"])
assert result.exit_code == 1
assert "Template folder does not exist." in result.output
# custom tpldir
result = runner.invoke(boil, ["--tpldir", str(repo_path), "list"])
assert result.exit_code == 0
assert "Listing templates in " + str(repo_path) in result.output
assert "test" in result.output
assert "license" in result.output
# custom tpldir via envvar
result = runner.invoke(boil, ["list"], env=dict(BOIL_TPLDIR=str(repo_path)))
assert result.exit_code == 0
assert "Listing templates in " + str(repo_path) in result.output
assert "test" in result.output
assert "license" in result.output
# missing config file
result = runner.invoke(boil, ["-c", "SOME/UNKNOWN/FILE.json", "list"])
assert result.exit_code == 2
assert "No such file or directory" in result.output
# custom config file
result = runner.invoke(boil, ["-c", str(config_file), "list"])
assert result.exit_code == 0
assert "Listing templates in " + str(repo_path) in result.output
assert "test" in result.output
assert "license" in result.output
@pytest.mark.repo_path_contents('hello_world', 'test')
def test_boil_list_plain(boil_runner, repo_path):
result = boil_runner("--tpldir", str(repo_path), "list", "-p")
assert result.exit_code == 0
assert "hello_world\ntest\n" == result.output
|
# Author: Nikolaos Perrakis <nikos@nannyml.com>
#
# License: Apache Software License 2.0
"""NannyML Dataset module."""
from .datasets import (
load_modified_california_housing_dataset,
load_synthetic_binary_classification_dataset,
load_synthetic_multiclass_classification_dataset,
)
|
"""
Django settings for vauhtijuoksu project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from decouple import config
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-8uvqb1l%txv#3u56j&@3#d8z8abp2e3$jqife%8@ze8djlj&dc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Required for django.contrib.sites
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'djangocms_admin_style',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'cms',
'djangocms_link',
'djangocms_file',
'djangocms_picture',
'djangocms_snippet',
'djangocms_style',
'djangocms_text_ckeditor',
'menus',
'treebeard',
'sekizai',
'filer',
'easy_thumbnails',
'mptt',
'sass_processor',
'vj_cms'
]
MIDDLEWARE = [
'cms.middleware.utils.ApphookReloadMiddleware',
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'cms.middleware.user.CurrentUserMiddleware',
'cms.middleware.page.CurrentPageMiddleware',
'cms.middleware.toolbar.ToolbarMiddleware',
'cms.middleware.language.LanguageCookieMiddleware',
]
ROOT_URLCONF = 'vauhtijuoksu.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'sekizai.context_processors.sekizai',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'cms.context_processors.cms_settings',
],
},
},
]
# List of templates that can be used for CMS pages
CMS_TEMPLATES = [
('vauhtijuoksu/vj2021plus.html', 'VJ 2021+ theme'),
('vauhtijuoksu/vj2021plus_fullscreen.html', 'VJ 2021+ fullscreen theme'),
('vauhtijuoksu/vj2021.html', 'VJ 2021 theme'),
]
X_FRAME_OPTIONS = 'SAMEORIGIN'
WSGI_APPLICATION = 'vauhtijuoksu.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGES = [
('en', 'English'),
('fi', 'Suomi')
]
CMS_LANGUAGES = {
1: [
{
'code': 'fi',
'name': 'Suomi',
'public': True,
'fallbacks': ['en']
}
]
}
LANGUAGE_CODE = 'fi'
TIME_ZONE = config('TIME_ZONE', default='Europe/Helsinki')
USE_I18N = False
USE_L10N = False
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATICFILES_STORAGE = 'vauhtijuoksu.storage.WhiteNoiseStaticFilesStorage'
# manage.py compilescss -> this folder
SASS_PROCESSOR_ROOT = config('DJANGO_SASS_ROOT', default=str(BASE_DIR / 'sassfiles'))
STATIC_URL = config('DJANGO_STATIC_URL', default='/static/')
STATIC_ROOT = config('DJANGO_STATIC_ROOT', default=BASE_DIR / 'staticfiles')
STATICFILES_DIRS = [
BASE_DIR / 'static',
BASE_DIR / 'node_modules' / 'bootstrap' / 'dist' / 'js',
SASS_PROCESSOR_ROOT,
]
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
]
# Include node_modules to import Bootstrap styles in SASS
SASS_PROCESSOR_INCLUDE_DIRS = [
str(BASE_DIR / 'node_modules')
]
SASS_PRECISION = 8
# Media
MEDIA_URL = config('DJANGO_MEDIA_URL', '/media/')
MEDIA_ROOT = config('DJANGO_MEDIA_ROOT', default=BASE_DIR / 'media')
THUMBNAIL_PROCESSORS = (
'easy_thumbnails.processors.colorspace',
'easy_thumbnails.processors.autocrop',
'filer.thumbnail_processors.scale_and_crop_with_subject_location',
'easy_thumbnails.processors.filters'
)
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField'
## Vauhtijuoksu API
VJ_API_URL = 'https://api.dev.vauhtijuoksu.fi'
VJ_LEGACY_API_URL = 'https://legacy.vauhtijuoksu.fi/api'
|
from django.db import models
class CustomManager(models.Manager):
"""
Filter not return deleted objects
"""
def get_queryset(self):
return super(CustomManager, self).get_queryset().filter(deleted=False, is_active=True)
class CommonFieldsMixin(models.Model):
"""
Contains Common fields for every model
"""
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
deleted = models.BooleanField(default=False,
help_text="This is for soft delete")
is_active = models.BooleanField(default=True)
# everything will be used to query deleted objects e.g Model.everything.all()
everything = models.Manager()
objects = CustomManager()
def delete(self, *args, **kwargs):
self.deleted = True
self.is_active = False
self.save()
class Meta:
ordering = ['-updated_at', '-created_at']
abstract = True
|
# 6. Необходимо создать (не программно) текстовый файл, где каждая строка описывает учебный
# предмет и наличие лекционных, практических и лабораторных занятий по этому предмету и их количество.
# Важно, чтобы для каждого предмета не обязательно были все типы занятий. Сформировать словарь, содержащий
# название предмета и общее количество занятий по нему. Вывести словарь на экран.
# Примеры строк файла:
# Информатика: 100(л) 50(пр) 20(лаб).
# Физика: 30(л) — 10(лаб)
# Физкультура: — 30(пр) —
# Пример словаря: {“Информатика”: 170, “Физика”: 40, “Физкультура”: 30}
def clean_words(string):
return "".join(filter(str.isdigit, string))
print("")
dicter = {}
sum_m = 0
try:
with open(r"dz6.txt", "r", encoding="utf-8") as my_file:
for line in my_file:
clean_line = line.replace("\n", "")
lister = clean_line.split(" ")
if lister[1] != "—" and lister[1] != "-":
sum_m += int(clean_words(lister[1]))
if lister[2] != "—" and lister[2] != "-":
sum_m += int(clean_words(lister[2]))
if lister[3] != "—" and lister[3] != "-":
sum_m += int(clean_words(lister[3]))
dicter[lister[0][:-1]] = sum_m
sum_m = 0
except IOError:
print("Error")
print(dicter)
|
# Generated by Django 3.1.12 on 2021-06-11 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("core", "0001_initial")]
operations = [
migrations.AlterField(
model_name="event",
name="worth",
field=models.IntegerField(
choices=[
(0, "Не учитывается"),
(1, "Творчество"),
(2, "Спорт"),
(3, "Волонтерство"),
(4, "Образовательный"),
(5, "Городское"),
],
default=0,
verbose_name="Ценность блоков",
),
)
]
|
import logging
import numpy as np
from scipy.sparse import load_npz, csr_matrix, save_npz
class NeighborGraph(object):
def __init__(self, ids, path=None):
self.ids = ids
self.ids_to_indexes = { id : i for i, id in enumerate(ids) }
self.path = path
n = len(self.ids)
if path:
logging.info('loading neighbors from %s', path)
self.graph = load_npz(path)
else:
self.graph = csr_matrix((n, n))
def merge(self, otherGraph):
self.graph = self.graph + otherGraph.graph
def neighbors(self, id):
if id not in self.ids_to_indexes:
return set()
row = self.graph[self.ids_to_indexes[id],:] # row is sparse!
rows, cols = row.nonzero()
return set([self.ids[i] for i in cols])
def index_neighbors(self, index):
row = self.graph[index,:] # row is sparse!
return row.nonzero()[1] # columns
def index_neighbors_and_weights(self, index, n=10):
coo = self.graph.getrow(index).tocoo()
indexes = np.argsort(coo.data)[:n]
# angular distance is sqrt(2 * (1 - cos(u,v)))
# we solve for cos below as 1 - ang-dist**2 / 2
weights = (1 - coo.data ** 2 / 2)[indexes]
# As an ad-hoc thing, it's good to make the weights drop off more quickly
return coo.col[indexes], weights * weights
def save_npz(self, path):
save_npz(path, self.graph)
def num_edges(self):
return len(self.graph.nonzero()[0])
def num_nodes(self):
return len(self.nodes())
def nodes(self):
return [self.ids[i] for i in set(self.graph.nonzero()[0])]
|
# -*- coding: utf-8 -*-
"""
Helpers for functional programming.
The :func:`identity` function simply returns its own inputs. This is useful for
bypassing print statements and many other cases. I also think it looks a little
nicer than ``lambda x: x``.
The :func:`inject_method` function "injects" another function into a class
instance as a method. This is useful for monkey patching.
"""
def identity(arg=None, *args, **kwargs):
"""
The identity function. Simply returns the value of its first input.
All other inputs are ignored. Defaults to None if called without args.
Args:
arg (object, default=None): some value
*args: ignored
**kwargs: ignored
Returns:
object: arg - the same value
Example:
>>> import ubelt as ub
>>> ub.identity(42)
42
>>> ub.identity(42, 42)
42
>>> ub.identity()
None
"""
return arg
def inject_method(self, func, name=None):
"""
Injects a function into an object instance as a bound method
The main use case of this function is for monkey patching. While monkey
patching is sometimes necessary it should generally be avoided. Thus, we
simply remind the developer that there might be a better way.
Args:
self (T):
Instance to inject a function into.
func (Callable[..., Any]):
The function to inject (must contain an arg for self).
name (str, default=None):
Name of the method. optional. If not specified the name of the
function is used.
Example:
>>> import ubelt as ub
>>> class Foo(object):
>>> def bar(self):
>>> return 'bar'
>>> def baz(self):
>>> return 'baz'
>>> self = Foo()
>>> assert self.bar() == 'bar'
>>> assert not hasattr(self, 'baz')
>>> ub.inject_method(self, baz)
>>> assert not hasattr(Foo, 'baz'), 'should only change one instance'
>>> assert self.baz() == 'baz'
>>> ub.inject_method(self, baz, 'bar')
>>> assert self.bar() == 'baz'
"""
# TODO: if func is a bound method we should probably unbind it
new_method = func.__get__(self, self.__class__)
if name is None:
name = func.__name__
setattr(self, name, new_method)
def compatible(config, func, start=0):
"""
Take the subset of dict items that can be passed to function as kwargs
Args:
config (Dict[str, Any]):
a flat configuration dictionary
func (Callable):
a function or method
start (int, default=0):
Only take args after this position. Set to 1 if calling with an
unbound method to avoid the ``self`` argument.
Returns:
Dict[str, Any] :
a subset of ``config`` that only contains items compatible with the
signature of ``func``.
Example:
>>> # An example use case is to select a subset of of a config
>>> # that can be passed to some function as kwargs
>>> import ubelt as ub
>>> # Define a function with args that match some keys in a config.
>>> def func(a, e, f):
>>> return a * e * f
>>> # Define a config that has a superset of items needed by the func
>>> config = {
... 'a': 2, 'b': 3, 'c': 7,
... 'd': 11, 'e': 13, 'f': 17,
... }
>>> # Call the function only with keys that are compatible
>>> func(**ub.compatible(config, func))
442
Example:
>>> # Test case with kwargs
>>> import ubelt as ub
>>> def func(a, e, f, *args, **kwargs):
>>> return a * e * f
>>> config = {
... 'a': 2, 'b': 3, 'c': 7,
... 'd': 11, 'e': 13, 'f': 17,
... }
>>> func(**ub.compatible(config, func))
Ignore:
# xdoctest: +REQUIRES(PY3)
# Test case with positional only 3.6 +
import ubelt as ub
def func(a, e, /, f):
return a * e * f
config = {
'a': 2, 'b': 3, 'c': 7,
'd': 11, 'e': 13, 'f': 17,
}
import pytest
with pytest.raises(ValueError):
func(**ub.compatible(config, func))
"""
import inspect
if hasattr(inspect, 'signature'): # pragma :nobranch
sig = inspect.signature(func)
argnames = []
has_kwargs = False
for arg in sig.parameters.values():
if arg.kind == inspect.Parameter.VAR_KEYWORD:
has_kwargs = True
elif arg.kind == inspect.Parameter.VAR_POSITIONAL:
# Ignore variadic positional args
pass
elif arg.kind == inspect.Parameter.POSITIONAL_ONLY:
raise ValueError('this does not work with positional only')
elif arg.kind in {inspect.Parameter.POSITIONAL_OR_KEYWORD,
inspect.Parameter.KEYWORD_ONLY}:
argnames.append(arg.name)
else: # nocover
raise TypeError(arg.kind)
else: # nocover
# For Python 2.7
spec = inspect.getargspec(func)
argnames = spec.args
has_kwargs = spec.keywords
if has_kwargs:
# kwargs could be anything, so keep everything
common = config
else:
common = {k: config[k] for k in argnames[start:]
if k in config} # dict-isect
return common
|
data = """F10
N3
F7
R90
F11"""
with open('input.txt') as file:
data = file.read()
instructions = []
for line in data.splitlines():
line = line.strip()
d, v = line[0], int(line[1:])
instructions.append((d, v))
dirs = ['E', 'S', 'W', 'N']
ship_dir = 'E'
x, y = 0, 0
def turn(d, v):
global ship_dir
turns = v // 90
ci = dirs.index(ship_dir)
if d == 'R':
ship_dir = dirs[(ci + turns) % len(dirs)]
else: # should be left, right?
ship_dir = dirs[(ci - turns) % len(dirs)]
def swim(d, v):
global x, y
if d == 'N':
y += v
elif d == 'S':
y -= v
elif d == 'E':
x += v
elif d == 'W':
x -= v
else:
assert False
for d, v in instructions:
if d in 'LR':
turn(d, v)
elif d in 'NSWE':
swim(d, v)
elif d == 'F':
swim(ship_dir, v)
print(x, y, abs(x) + abs(y))
|
"""
Regression tutorial
==================================================
This tutorial demonstrates that hardware-compatible Akida models can perform
regression tasks at the same accuracy level as a native CNN network.
This is illustrated through an age estimation problem using the
`UTKFace dataset <https://susanqq.github.io/UTKFace/>`__.
"""
######################################################################
# 1. Load the dataset
# ~~~~~~~~~~~~~~~~~~~
#
from akida_models.utk_face.preprocessing import load_data
# Load the dataset using akida_models preprocessing tool
x_train, y_train, x_test, y_test = load_data()
# For Akida inference, use uint8 raw data
x_test_akida = x_test.astype('uint8')
######################################################################
# 2. Load a pre-trained native Keras model
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The model is a simplified version inspired from `VGG <https://arxiv.org/abs/1409.1556>`__
# architecture. It consists of a succession of convolutional and pooling layers
# and ends with two fully connected layers that outputs a single value
# corresponding to the estimated age. This model architecture is compatible with
# the `design constraints <../../user_guide/cnn2snn.html#design-compatibility-constraints>`__
# before quantization. It is the starting point for a model runnable on the
# Akida NSoC.
#
# The pre-trained native Keras model loaded below was trained on 300 epochs.
# The model file is available on the BrainChip data server.
#
# The performance of the model is evaluated using the "Mean Absolute Error"
# (MAE). The MAE, used as a metric in regression problem, is calculated as an
# average of absolute differences between the target values and the predictions.
# The MAE is a linear score, i.e. all the individual differences are equally
# weighted in the average.
from tensorflow.keras.utils import get_file
from tensorflow.keras.models import load_model
# Retrieve the model file from the BrainChip data server
model_file = get_file("vgg_utk_face.h5",
"http://data.brainchip.com/models/vgg/vgg_utk_face.h5",
cache_subdir='models')
# Load the native Keras pre-trained model
model_keras = load_model(model_file)
model_keras.summary()
######################################################################
# Compile the native Keras model (required to evaluate the MAE)
model_keras.compile(optimizer='Adam', loss='mae')
# Check Keras model performance
mae_keras = model_keras.evaluate(x_test, y_test, verbose=0)
print("Keras MAE: {0:.4f}".format(mae_keras))
######################################################################
# 3. Load a pre-trained quantized Keras model satisfying Akida NSoC requirements
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The above native Keras model is quantized and fine-tuned to get a quantized
# Keras model satisfying the
# `Akida NSoC requirements <../../user_guide/hw_constraints.html>`__.
# The first convolutional layer of our model uses 8-bit weights and other
# layers are quantized using 2-bit weights. All activations are 2 bits.
#
# The pre-trained model was obtained after two fine-tuning episodes:
#
# * the model is first quantized and fine-tuned with 4-bit weights and
# activations (first convolutional weights are 8 bits)
# * the model is then quantized and fine-tuned with 2-bit weights and
# activations (first convolutional weights are still 8 bits).
#
# The table below summarizes the "Mean Absolute Error" (MAE) results obtained
# after every training episode.
#
# +---------+----------------+---------------+------+--------+
# | Episode | Weights Quant. | Activ. Quant. | MAE | Epochs |
# +=========+================+===============+======+========+
# | 1 | N/A | N/A | 5.80 | 300 |
# +---------+----------------+---------------+------+--------+
# | 2 | 8/4 bits | 4 bits | 5.79 | 30 |
# +---------+----------------+---------------+------+--------+
# | 3 | 8/2 bits | 2 bits | 6.15 | 30 |
# +---------+----------------+---------------+------+--------+
#
# Here, we directly load the pre-trained quantized Keras model using the
# akida_models helper.
from akida_models import vgg_utk_face_pretrained
# Load the pre-trained quantized model
model_quantized_keras = vgg_utk_face_pretrained()
model_quantized_keras.summary()
######################################################################
# Compile the quantized Keras model (required to evaluate the MAE)
model_quantized_keras.compile(optimizer='Adam', loss='mae')
# Check Keras model performance
mae_quant = model_quantized_keras.evaluate(x_test, y_test, verbose=0)
print("Keras MAE: {0:.4f}".format(mae_quant))
######################################################################
# 4. Conversion to Akida
# ~~~~~~~~~~~~~~~~~~~~~~
#
# The quantized Keras model is now converted into an Akida model.
# After conversion, we evaluate the performance on the UTKFace dataset.
#
# Since activations sparsity has a great impact on Akida inference time, we
# also have a look at the average input and output sparsity of each layer on
# a subset of the dataset.
from cnn2snn import convert
# Convert the model
model_akida = convert(model_quantized_keras)
model_akida.summary()
#####################################################################
import numpy as np
# Check Akida model performance
y_akida = model_akida.evaluate(x_test_akida)
# Compute and display the MAE
mae_akida = np.sum(np.abs(y_test.squeeze() - y_akida.squeeze())) / len(y_test)
print("Akida MAE: {0:.4f}".format(mae_akida))
# For non-regression purpose
assert abs(mae_keras - mae_akida) < 0.5
######################################################################
# Let's summarize the MAE performance for the native Keras, the quantized Keras
# and the Akida model.
#
# +-----------------+------+
# | Model | MAE |
# +=================+======+
# | native Keras | 5.80 |
# +-----------------+------+
# | quantized Keras | 6.15 |
# +-----------------+------+
# | Akida | 6.21 |
# +-----------------+------+
######################################################################
# 5. Estimate age on a single image
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import matplotlib.pyplot as plt
# Estimate age on a random single image and display Keras and Akida outputs
id = np.random.randint(0, len(y_test) + 1)
age_keras = model_keras.predict(x_test[id:id + 1])
plt.imshow(x_test_akida[id], interpolation='bicubic')
plt.xticks([]), plt.yticks([])
plt.show()
print("Keras estimated age: {0:.1f}".format(age_keras.squeeze()))
print("Akida estimated age: {0:.1f}".format(y_akida[id].squeeze()))
print(f"Actual age: {y_test[id].squeeze()}")
|
# Copyright 2020 The PEGASUS Authors..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pegasus.layers.embedding."""
from absl.testing import absltest
from pegasus.layers import embedding
import tensorflow as tf
class EmbeddingTest(tf.test.TestCase):
def test_embedding_layer_input(self):
embedding_layer = embedding.Embedding(12, 64, "test", tf.float32)
outputs = embedding_layer(tf.ones((5, 7), tf.int64), True)
self.assertEqual(outputs.shape, [5, 7, 64])
def test_embedding_layer_output(self):
embedding_layer = embedding.Embedding(12, 64, "test", tf.float32)
logits = embedding_layer(tf.ones((5, 7, 64)), False)
self.assertEqual(logits.shape, [5, 7, 12])
if __name__ == "__main__":
tf.compat.v1.enable_eager_execution()
absltest.main()
|
from django.shortcuts import render
from rest_framework.permissions import IsAuthenticated
# Third party import
from rest_framework.response import Response
# from rest_framework.views import APIView
from rest_framework.viewsets import ModelViewSet
# Local import
from main_api.models import (
About, Skill, Education, Experience, Recommendation, Interested, Client, Article
)
from main_api.serializers import (
AboutSerializer, SkillsSerializer, EducationsSerializer, ExperienceSerializer, RecommendationSerializer, InterestedSerializer, ClientsSerializer, ArticleSerializer
)
class AboutViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
# def get(self, request, *args, **kwrgs):
queryset = About.objects.all()
serializer_class = AboutSerializer
# qs = About.objects.all()
# serializer = AboutSerializer(qs, many=True)
# return Response(serializer.data)
class SkillsViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
queryset = Skill.objects.all()
serializer_class = SkillsSerializer
class EducationsViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
queryset = Education.objects.all()
serializer_class = EducationsSerializer
class ExperienceViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
queryset = Experience.objects.all()
serializer_class = ExperienceSerializer
class RecommendationViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
queryset = Recommendation.objects.all()
serializer_class = RecommendationSerializer
class InterestedViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
queryset = Interested.objects.all()
serializer_class = InterestedSerializer
class ClientsViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
queryset = Client.objects.all()
serializer_class = ClientsSerializer
class ArticleViews(ModelViewSet):
# permission_classes = [IsAuthenticated]
queryset = Article.objects.all()
serializer_class = ArticleSerializer
|
import os
from decouple import config, Csv
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = config('SECRET_KEY')
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chatbotapp.accounts',
'chatbotapp.chatbot',
'chatbotapp.core',
'corsheaders'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'chatbotapp.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'chatbotapp/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chatbotapp.wsgi.application'
#CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = config('CORS_ORIGIN_WHITELIST', cast=Csv())
# Database
DATABASES = {
'default': dj_database_url.config(default=config('DATABASE_URL'))
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.postgresql',
# 'NAME': config('DB_NAME'),
# 'USER': config('DB_USER'),
# 'PASSWORD': config('DB_PASSWORD'),
# 'HOST': config('DB_HOST'),
# 'PORT': config('DB_PORT'),
# }
# }
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'chatbotapp/static')]
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'chatbotapp.core.utils.CustomMinimumLengthValidator',
},
{
'NAME': 'chatbotapp.core.utils.CustomNumericPasswordValidator',
}
]
# Internationalization
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = config('EMAIL')
EMAIL_FROM = config('EMAIL')
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = config('EMAIL')
EMAIL_HOST_PASSWORD = config('EMAIL_PASSWORD')
EMAIL_PORT = config('EMAIL_PORT', cast=int)
CONTACT_EMAIL = config('EMAIL')
# Auth
LOGIN_URL = 'core:login'
LOGIN_REDIRECT_URL = 'chatbot:conversation'
LOGOUT_URL = 'core:logout'
AUTH_USER_MODEL = 'accounts.User'
|
# Stores classes & functions needed to check harmony of a piece
from pyknon.music import Note, NoteSeq
from CheckMyChords.models import MusicPiece
from CheckMyChords.pyknon_extension import *
class Chord(object):
# a class storing a chord (as Note objects) and additional info about it
# (as well as methods for getting that info)
def __init__(self, soprano, alto, tenor, bass): # TODO: pass more arguments (key)
if (not isinstance(soprano, Note)) or \
(not isinstance(alto, Note)) or \
(not isinstance(tenor, Note)) or \
(not isinstance(bass, Note)):
raise TypeError(
'A Chord object should be built using Note objects'
+ ' as arguments.')
# TODO: add an iterator (over parts) and delete self.parts
self.soprano = soprano
self.alto = alto
self.tenor = tenor
self.bass = bass
self.parts = {"S": self.soprano,
"A": self.alto,
"T": self.tenor,
"B": self.bass}
self.root = None # int 0-11 or None
self.mode = None # 'M', 'm', 'M7', 'm7' or None
self.structure = {"S": None, # as intervals from the root
"A": None, # in "standard" notation (e.g. 5==fifth)
"T": None, # None == 'not recognised'
"B": None}
self._read_chord()
def __str__(self):
return "S:{s}, A:{a}, T:{t}, B:{b}".format(
s = self.soprano.to_str,
a = self.alto.to_str,
t = self.tenor.to_str,
b = self.bass.to_str)
def _read_chord(self):
# determines chord detailed info
self._find_root()
self._find_structure()
self._find_mode()
def _find_root(self):
# method deducting chord details from the notes given
# TODO: Rewrite it to use values instead of midi_numbers
# (will possibly simplify conditions)
b = self.bass.midi_number
t = self.tenor.midi_number
a = self.alto.midi_number
s = self.soprano.midi_number
# finding root
# ifs' condtions ordered by decreasing "importance"
# I. looking for a fifth (including crossed voices)
if (t-b) in (7, 19, 31) \
or (a-b) in (7, 19, 31, 43) \
or (s-b) in (7, 19, 31, 43):
self.root = self.bass.value
elif (b-t) in (7,) \
or (a-t) in (7, 19, 31) \
or (s-t) in (7, 19, 31, 43):
self.root = self.tenor.value
elif (b-a) in (7,) \
or (t-a) in (7, 19) \
or (s-a) in (7, 19, 31):
self.root = self.alto.value
elif (b-s) in (7,) or (t-s) in (7,) or (a-s) in (7,):
self.root = self.soprano.value
# II. looking for a fourth (tonic only above fifth)
elif (b-t) in (5,) or (b-a) in (5,) or (b-s) in (5,):
self.root = self.bass.value
elif (t-b) in (5, 17, 29, 41) \
or (t-a) in (5, 17) \
or (t-s) in (5,):
self.root = self.tenor.value
elif (a-b) in (5, 17, 29, 41) \
or (a-t) in (5, 17, 29, 41) \
or (a-s) in (5,):
self.root = self.alto.value
elif (s-b) in (5, 17, 29, 41, 53) \
or (s-t) in (5, 17, 29, 41) \
or (s-a) in (5, 17, 29, 41):
self.root = self.soprano.value
# III. the fifth is missing, looking for a doubled interval
# (1113 chord or similar)
elif (b%12 == t%12) or (b%12 == a%12) or (b%12 == s%12):
self.root = self.bass.value
elif (t%12 == a%12) or (t%12 == s%12):
self.root = self.tenor.value
elif (a%12 == s%12):
self.root = self.alto.value
# IV. no note is dubled (and 5th missing), assuming bass
# note is the root (to modify, should D9(>) be included)
else:
self.root = self.bass.value
def _find_structure(self):
# method deducting chord structure from notes given (needs root)
# should leave initial (None) for unrecognised notes
# minor/major thirds are distinguished
if self.root == None:
return
intervals = {
"0": "1", # root
"3": "3>", # minor third
"4": "3", # major third
"7": "5", # fith
"10": "7", # minor seventh
}
for voice, note in self.parts.items():
dist_from_root = str((note.midi_number - self.root) % 12)
if dist_from_root in intervals:
self.structure[voice] = intervals[dist_from_root]
def _find_mode(self):
# method determining chord mode (M, m, M7 or m7) from the chord
# structure
if "3>" in self.structure.values() and "3" in self.structure.values():
# both minor and major thirds in a chord at the same time
self.mode = None
elif "3>" in self.structure.values():
self.mode = "m7" if "7" in self.structure.values() else "m"
elif "3" in self.structure.values():
self.mode = "M7" if "7" in self.structure.values() else "M"
else: # no third in a chord (or find_structure failed)
self.mode = None
def harmonic_function(self, key):
# Tonic must be of correct mode
# Tonic6 must be of oposite mode (a minor in C major key)
# Allows both minor and major subdominants in minor and major keys
# Allows minor and major dominants in minor keys
# Allows only major dominant in major keys
# Allows sevenths in dominant only ("D7")
# harm_fs = ("T", "S", "D", "D7", "T6")
# NOTE - this method will recognise function if foreign notes are
# present (C E F# G) will be recognised as a Tonic in C major,
# but (C E G Bb) won't
if self.root == key[0]:
if key[1] == 1 and self.mode == "M": # major
return "T"
elif key[1] == 0 and self.mode =='m': # minor
return "T"
else:
return "" # unrecognised chord or seventh present
elif (self.root - key[0]) % 12 == 5:
if self.mode in ("M", "m"):
return "S"
else:
return ""
elif (self.root - key[0]) % 12 == 7:
if key[1] == 1 and self.mode == "M":
return "D"
elif key[1] == 1 and self.mode == "M7":
return "D7"
elif key[1] == 0 and self.mode in ("M", "m"):
return "D"
elif key[1] == 0 and self.mode in ("M7", "m7"):
return "D7"
else:
return ""
elif (self.root - key[0]) % 12 == 8: #6th in minor scale
if self.mode == "M":
return "TVI"
else:
return ""
elif (self.root - key[0]) % 12 == 9: #6th in major key
if self.mode == "m":
return "TVI"
else:
return ""
else:
return ""
class Piece(object):
# A class analogous to MusicPiece, but stores parts as NoteSeqs objects
# also stores harmony rules functions and results of their "work"
# TODO: add an iterator (over parts) and delete self.parts
def __init__(self, piece):
if not isinstance(piece, MusicPiece):
raise TypeError(
'A Piece object should be built using MusicPiece object'
+ ' as an argument.')
self.soprano = NoteSeq(piece.soprano)
self.alto = NoteSeq(piece.alto)
self.tenor = NoteSeq(piece.tenor)
self.bass = NoteSeq(piece.bass)
self.title = piece.title
self.parts = {"S": self.soprano,
"A": self.alto,
"T": self.tenor,
"B": self.bass}
self._key = [None, None]
self._chords = []
self._err_count = 0
self._war_count = 0
self._err_detailed = []
self._war_detailed = []
self._read_chords()
self._set_key()
@property
def err_count(self):
return self._err_count
@property
def war_count(self):
return self._war_count
@property
def err_detailed(self):
return self._err_detailed
@property
def war_detailed(self):
return self._war_detailed
@property
def key(self):
return self._key
@property
def chords(self):
return self._chords
@property
def parts_hr(self):
# human-readable representation of the part
result = {}
for voice, part in self.parts.items():
result[voice] = "|" + part.to_hrstr + "||"
return result
@property
def key_hr(self):
# human-readable version of key
if None in self.key:
return "Unknown key"
else:
result =" ".join((
("C","C#","D","D#","E","F","F#","G","G#","A",
"A#","B")[self.key[0]],
("minor", "major")[self.key[1]]
))
return result
@property
def functions_hr(self):
# gives harmonic functions set to print under score (compatible with
# parts_hr)
result = "|"
for chord in self.chords:
ch = chord.harmonic_function(self.key)
while len(ch) < 4:
ch += " " # ensures correct spacing
result += ch
result = result[:-1] + "||"
return result
@property
def chord_n_hr(self):
# chord numbers to print above score (compatible with parts_hr)
result = " "
for idx in enumerate(self.chords, 1):
num = str(idx[0])
while len(num) < 4:
num += " "
result += num
return result
def _read_chords(self):
# converts noteSeqs to chords
for i in range(len(self.soprano)):
chord = Chord(self.soprano[i],
self.alto[i],
self.tenor[i],
self.bass[i])
self._chords.append(chord)
def _set_key(self):
# method dentifies key (basing on the first chord)
# key is stored as a touple - first element determinines the tonic,
# (integer 0-11) second detemines the mode (1 == major or 0 == minor)
# method should return C major if failed to read the chord
if self.chords[0].root != None:
self._key[0] = self.chords[0].root
else:
self._key[0] = 0 # C as a fallback value
if self.chords[0].mode in ("M","M7"):
self._key[1] = 1
elif self.chords[0].mode in ("m", "m7"):
self._key[1] = 0
else:
self._key[1] = 1 # major as a fallback value
def check_harmony(self, rules=['ALL']):
# main method for checking harmony of a piece, should call methods
# for checking each rule
if 'ALL' in rules or 'RANGE' in rules:
self._check_range()
if 'ALL' in rules or 'LEAPS' in rules:
self._check_leaps()
if 'ALL' in rules or 'DISTANCES' in rules:
self._check_distances()
if 'ALL' in rules or 'PARALELS' in rules:
self._check_paralels()
if 'ALL' in rules or 'CHORDS' in rules:
self._check_chords()
if 'ALL' in rules or "CHORDS_IN_CTX" in rules:
self._check_chords_in_context()
# Methods checking individual rules. Each method should:
# Increase self.err_count by number of mistakes found
# Append a 3-element touple to self.err_detailed, matching the pattern:
# ( <Mistake type (str)> , <err_count (int)>, <list of str-s with details
# about each mistake> )
def _check_range(self):
# checking vocal range for each voice in the piece
err_count = 0
errs = []
ranges = {
"S" : (58, 81),
"A" : (53, 74),
"T" : (46, 69),
"B" : (40, 62)
}
for voice, part in self.parts.items():
v_range = ranges[voice]
for idx, note in enumerate(part, 1):
if note.midi_number > v_range[1]:
err_count += 1
errs.append("Chord {0}: {1} too high".format(idx, voice))
elif note.midi_number < v_range[0]:
err_count += 1
errs.append("Chord {0}: {1} too low".format(idx, voice))
if err_count:
errs.sort()
self._err_count += err_count
self._err_detailed.append(("Voice range errors", err_count, errs))
def _check_leaps(self):
# checking for restricted intervals: leaps of a 7th, or >=9th
err_count = 0
errs = []
for voice, part in self.parts.items():
for i in range(len(part)-1):
distance = abs(part[i+1].midi_number - part[i].midi_number)
if distance == 10:
err_count +=1
errs.append("Chords {0}/{1}: Restricted leap in {2} - 7".
format(i+1, i+2, voice))
elif distance == 11:
err_count +=1
errs.append("Chords {0}/{1}: Restricted leap in {2} - 7<".
format(i+1, i+2, voice))
elif distance > 12:
err_count +=1
errs.append(
"Chords {0}/{1}: Restricted leap in {2} - over an octave".
format(i+1, i+2, voice))
if err_count:
errs.sort()
self._err_count += err_count
self._err_detailed.append(("Restricted leaps", err_count, errs))
def _check_distances(self):
# checking each chord for too high distances between voices and
# too low distances (overlaps == crossing voices)
err_count = 0
errs = []
war_count = 0
wars = []
for i in range(len(self.soprano)):
if self.soprano[i].midi_number - self.alto[i].midi_number > 12:
err_count += 1
errs.append("Chord {0}: S/A interval to wide".format(i+1))
elif self.soprano[i].midi_number - self.alto[i].midi_number < 0:
err_count += 1
errs.append("Chord {0}: S/A overlap".format(i+1))
if self.alto[i].midi_number - self.tenor[i].midi_number >= 12:
err_count += 1
errs.append("Chord {0}: A/T interval to wide".format(i+1))
elif self.alto[i].midi_number - self.tenor[i].midi_number < 0:
err_count += 1
errs.append("Chord {0}: A/T overlap".format(i+1))
if self.tenor[i].midi_number - self.bass[i].midi_number > 24:
err_count += 1
errs.append("Chord {0}: T/B interval to wide".format(i+1))
elif self.tenor[i].midi_number - self.bass[i].midi_number > 19:
war_count += 1
wars.append("Chord {0}: T/B interval to wide".format(i+1))
elif self.tenor[i].midi_number - self.bass[i].midi_number < 0:
err_count += 1
errs.append("Chord {0}: T/B overlap".format(i+1))
if err_count:
self._err_count += err_count
self._err_detailed.append(
("Voice distance errors", err_count, errs)
)
if war_count:
self._war_count += war_count
self._war_detailed.append(
("Voice distance warnings", war_count, wars)
)
def _check_paralels(self):
# checking for restricted (anti)consecutive intervals (1, 5, 8)
err_count = 0
errs = []
for i in range(len(self.soprano)-1):
s1 = self.soprano[i].midi_number
s2 = self.soprano[i+1].midi_number
a1 = self.alto[i].midi_number
a2 = self.alto[i+1].midi_number
t1 = self.tenor[i].midi_number
t2 = self.tenor[i+1].midi_number
b1 = self.bass[i].midi_number
b2 = self.bass[i+1].midi_number
# conditions writen usin "in" to not falsly trigger it when
# voices move in consecutive forths, A above S
# REVISE IT!
# The distances extended above allowed by check_distance
# (by a reasonable amount to identify both errors if occur
# simultaneously
# paralels should be checked only when note changes, hence:
if s1 != s2 and a1 != a2:
if (s1 - a1) % 12 == 0 and (s2 - a2) % 12 == 0:
err_count += 1
errs.append("Chords {0}/{1}: S/A consecutive Unison/Octave".
format(i+1, i+2))
elif s1 - a1 in (7, 19, 31) and s2 - a2 in (7, 19, 31):
err_count += 1
errs.append("Chords {0}/{1}: S/A consecutive Fifths".
format(i+1, i+2))
if s1 != s2 and t1 != t2:
if (s1 - t1) % 12 == 0 and (s2 - t2) % 12 == 0:
err_count += 1
errs.append("Chords {0}/{1}: S/T consecutive Unison/Octave".
format(i+1, i+2))
elif s1 - t1 in (7, 19, 31, 43) and s2 - t2 in (7, 19, 31, 43):
err_count += 1
errs.append("Chords {0}/{1}: S/T consecutive Fifths".
format(i+1, i+2))
if s1 != s2 and b1 != b2:
if (s1 - b1) % 12 == 0 and (s2 - b2) % 12 == 0:
err_count += 1
errs.append("Chords {0}/{1}: S/B consecutive Unison/Octave".
format(i+1, i+2))
elif s1 - b1 in (7, 19, 31, 43) and s2 - b2 in (7, 19, 31, 43):
err_count += 1
errs.append("Chords {0}/{1}: S/B consecutive Fifths".
format(i+1, i+2))
if a1 != a2 and t1 != t2:
if (a1 - t1) % 12 == 0 and (a2 - t2) % 12 == 0:
err_count += 1
errs.append("Chords {0}/{1}: A/T consecutive Unison/Octave".
format(i+1, i+2))
elif a1 - t1 in (7, 19, 31) and a2 - t2 in (7, 19, 31):
err_count += 1
errs.append("Chords {0}/{1}: A/T consecutive Fifths".
format(i+1, i+2))
if a1 != a2 and b1 != b2:
if (a1 - b1) % 12 == 0 and (a2 - b2) % 12 == 0:
err_count += 1
errs.append("Chords {0}/{1}: A/B consecutive Unison/Octave".
format(i+1, i+2))
elif a1 - b1 in (7, 19, 31, 43) and a2 - b2 in (7, 19, 31, 43):
err_count += 1
errs.append("Chords {0}/{1}: A/B consecutive Fifths".
format(i+1, i+2))
if t1 != t2 and b1 != b2:
if (t1 - b1) % 12 == 0 and (t2 - b2) % 12 == 0:
err_count += 1
errs.append("Chords {0}/{1}: T/B consecutive Unison/Octave".
format(i+1, i+2))
elif t1 - b1 in (7, 19, 31) and t2 - b2 in (7, 19, 31):
err_count += 1
errs.append("Chords {0}/{1}: T/B consecutive Fifths".
format(i+1, i+2))
if err_count:
self._err_count += err_count
self._err_detailed.append(
("Consecutive intervals", err_count, errs)
)
def _check_chords(self):
# checking for wrong chords (unrecognisable, or wrong dubling)
# if chord is unrecognisable, other conditions aren't checked
# e.g - chord c,d,e,g, will get warning (d doesn't belong to C chord)
# but c,d,g,c will not get warning, but will get an error -
# chord mode unknown)
err_count = 0
errs = []
war_count = 0
wars = []
for idx, chord in enumerate(self.chords, 1):
if chord.mode == None:
err_count += 1
errs.append("Chord {0}: Chord mode unknown".format(idx))
else:
roots = 0
thirds = 0
fifths = 0
sevenths = 0
for voice, interval in chord.structure.items():
if interval == "1":
roots += 1
elif interval == "3>" or interval == "3":
thirds += 1
elif interval == "5":
fifths += 1
elif interval == "7":
sevenths += 1
else:
war_count += 1
wars.append(
"Chord {0}: {1} note doesn't belong to the chord".
format(idx, voice)
)
if thirds > 1 and (not chord.harmonic_function(self.key) == "TVI"):
war_count += 1
wars.append("Chord {0}: more than one third in the chord".
format(idx))
elif fifths >1:
war_count += 1
wars.append("Chord {0}: more than one fifth in the chord".
format(idx))
elif sevenths > 1:
err_count += 1
errs.append("Chord {0}: more than one seventh in the chord".
format(idx))
if err_count:
self._err_count += err_count
self._err_detailed.append(("Unnown chords", err_count, errs))
if war_count:
self._war_count += war_count
self._war_detailed.append(
("Foreign notes in chords and wrong doubling", war_count, wars)
)
def _check_chords_in_context(self):
err_count = 0
errs = []
war_count = 0
wars = []
war_count += 1
wars.append("Checking chords in context not yet implemented!")
self._war_count += war_count
self._war_detailed.append(
("Checking chords in context", war_count, wars)
)
def check_harmony_rules(music_piece, rules=['ALL']):
piece = Piece(music_piece)
piece.check_harmony(rules)
return piece
def make_piece(music_piece):
# turns a Music_Piece object into a Piece object (without checking rules)
piece = Piece(music_piece)
return piece
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
"""Generate a summary report including the sanity status of the regions.
Usage:
{prog} XUNIT_RESULTS_FILE [BUILD_NUMBER]
Environment:
SANITY_CHECKS_SETTINGS (Optional) Path to settings file
Files:
etc/settings.json Default settings file
test_results.xml Default xUnit results file
"""
from xml.dom import minidom
from constants import DEFAULT_SETTINGS_FILE, PROPERTIES_CONFIG_KEY_TEST_CASES, PROPERTIES_CONFIG_OPT_TEST_CASES
import os.path
import json
import sys
import re
DEFAULT_RESULTS_FILE = "test_results.xml"
ATTR_TESTS_TOTAL = "tests"
ATTR_TESTS_SKIP = "skip"
ATTR_TESTS_ERROR = "errors"
ATTR_TESTS_FAILURE = "failures"
CHILD_NODE_SKIP = "skipped"
CHILD_NODE_ERROR = "error"
CHILD_NODE_FAILURE = "failure"
CHILD_NODE_OTHER = None
TEST_STATUS_NOT_OK = "NOK"
TEST_STATUS_SKIP = "N/A"
TEST_STATUS_OK = "OK"
GLOBAL_STATUS_PARTIAL_OK = "POK"
GLOBAL_STATUS_NOT_OK = TEST_STATUS_NOT_OK
GLOBAL_STATUS_OK = TEST_STATUS_OK
class ResultAnalyzer(object):
def __init__(self, conf, file=DEFAULT_RESULTS_FILE, build_number=None):
self.build = build_number
self.conf = conf
self.file = file
self.dict = {}
def get_results(self):
"""
Parse report file (xUnit test result report) to get total results per each Region.
"""
doc = minidom.parse(self.file)
testsuite = doc.getElementsByTagName("testsuite")[0]
# Print a summary of the test results
build_info = " | BUILD_NUMBER=%s" % self.build if self.build else ""
print "[Tests: {}, Errors: {}, Failures: {}, Skipped: {}]{}".format(
testsuite.getAttribute(ATTR_TESTS_TOTAL),
testsuite.getAttribute(ATTR_TESTS_ERROR),
testsuite.getAttribute(ATTR_TESTS_FAILURE),
testsuite.getAttribute(ATTR_TESTS_SKIP),
build_info)
# Count errors/failures/skips
for testcase in doc.getElementsByTagName('testcase'):
status = TEST_STATUS_OK
child_node_list = testcase.childNodes
if child_node_list is not None and len(child_node_list) != 0:
if child_node_list[0].localName in [CHILD_NODE_FAILURE, CHILD_NODE_ERROR, CHILD_NODE_OTHER]:
status = TEST_STATUS_NOT_OK
elif child_node_list[0].localName == CHILD_NODE_SKIP:
status = TEST_STATUS_SKIP
# Filter out the "regular" test cases (__main__.<Region>)
testclass = testcase.getAttribute('classname')
if re.match("^__main__\.[A-Z].+$", testclass):
testregion = testclass.split(".")[1]
info_test = {"test_name": testcase.getAttribute('name'), "status": status}
if testregion in self.dict:
self.dict[testregion].append(info_test)
else:
self.dict.update({testregion: [info_test]})
def print_results(self):
"""
Print report to standard output
"""
print "\n*********************************\n"
print "REGION TEST SUMMARY REPORT: "
for item in self.dict:
print "\n >> {}".format(item)
for result_value in self.dict[item]:
print " {status}\t {name}".format(name=result_value['test_name'], status=result_value['status'])
def print_global_status(self):
"""
This method will parse test results for each Region and will take into account whether all key and/or optional
test cases are successful, according to the patterns defined in `settings.json`.
How it works:
* Configuration properties:
key_test_cases_pattern = [a, b]
opt_test_cases_pattern = [b, f, g]
* Logic:
key_test_cases list: It will have *ANY* test that matches with pattern
defined in `key_test_cases_pattern`. If all the tests in this list have got
'OK' status, then the region will pass this validation (*OK* status). i.e:
If test results are:
OK a
NOK b
NOK c
OK d
NOK e
OK f
OK g
> key_test_cases list will have this values: [a, b].
> Due to b=NOK, global region status is not complying with key test cases.
non_opt_test_cases list: It will have *ALL* Key tests that do *NOT* match
with pattern defined in `opt_test_cases_pattern`: All non-optional 'key' test cases.
If all the tests in this list (non optional 'key' tests) have got *OK* status,
then the region will paas this validation (*POK* status). This checks are only
performed when the former one is not successful. i.e:
If test results are:
OK a
NOK b
NOK c
OK d
NOK e
OK f
OK g
> non_opt_test_cases list will have this values: [a]
> Due to a=OK, global region status is complying with this check and it will
have *POK* (partial OK) status.
:return:
"""
key_test_cases = self.conf[PROPERTIES_CONFIG_KEY_TEST_CASES]
opt_test_cases = self.conf[PROPERTIES_CONFIG_OPT_TEST_CASES]
# dictionary holding global status according either key or optional test cases
global_status = {
GLOBAL_STATUS_OK: {
'caption': 'Regions satisfying all key test cases: %s' % key_test_cases,
'empty_msg': 'NONE!!!!!!!',
'region_list': []
},
GLOBAL_STATUS_PARTIAL_OK: {
'caption': 'Regions only failing in optional test cases: %s' % opt_test_cases,
'empty_msg': 'N/A',
'region_list': []
}
}
# check status
key_test_cases_patterns = [re.compile(item) for item in key_test_cases]
opt_test_cases_patterns = [re.compile(item) for item in opt_test_cases]
for region, results in self.dict.iteritems():
key_test_cases = [
item for item in results
if any(pattern.match(item['test_name']) for pattern in key_test_cases_patterns)
]
non_opt_test_cases = [
item for item in key_test_cases
if all(not pattern.match(item['test_name']) for pattern in opt_test_cases_patterns)
]
if all(item['status'] == TEST_STATUS_OK for item in key_test_cases):
global_status[GLOBAL_STATUS_OK]['region_list'].append(region)
elif all(item['status'] == TEST_STATUS_OK for item in non_opt_test_cases):
global_status[GLOBAL_STATUS_PARTIAL_OK]['region_list'].append(region)
# print status
print "\nREGION GLOBAL STATUS"
for status in [GLOBAL_STATUS_OK, GLOBAL_STATUS_PARTIAL_OK]:
region_list = global_status[status]['region_list']
print "\n", global_status[status]['caption']
print " >> %s" % ", ".join(region_list) if len(region_list) else " %s" % global_status[status]['empty_msg']
if __name__ == "__main__":
if len(sys.argv) not in [2, 3]:
usage = re.findall(r'.*{prog}.*', __doc__)[0].format(prog=os.path.basename(__file__)).strip()
print "Usage: %s" % usage
sys.exit(-1)
results_file = sys.argv[1]
build_number = sys.argv[2] if len(sys.argv) > 2 else None
parentdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
settings_file = os.environ.get('SANITY_CHECKS_SETTINGS', os.path.join(parentdir, DEFAULT_SETTINGS_FILE))
with open(settings_file) as settings:
try:
conf = json.load(settings)
except Exception as e:
print "Error parsing config file '{}': {}".format(settings_file, e)
sys.exit(-1)
checker = ResultAnalyzer(conf, results_file, build_number)
checker.get_results()
checker.print_global_status()
checker.print_results()
|
# -*- coding: utf-8 -*-
from random import randint
from string import ascii_letters
def reikna(atkvaedi, fjoldi_fulltrua):
#atkvæði eru á forminu {flokkur1: fjöldi atkvæða fyrir flokk1, flokkur2: fjöldi atkvæða ..., ...}
#fjoldi_fulltrua: heildarfjöldi fulltrúa fyrir kjördæmið
atkvaedi_kjordaemi = sum([atkvaedi[f] for f in atkvaedi])
nidurstodur = {}
for f in atkvaedi:
hlutfall = 1.0 * fjoldi_fulltrua * atkvaedi[f]/atkvaedi_kjordaemi
nidurstodur[f] = [int(hlutfall), hlutfall-int(hlutfall)]
return nidurstodur
kjordaemi = {
'RN': [9,2],
'RS': [9,2],
'SV': [11,2],
'SU': [9,1],
'NV': [7,1],
'NA': [9,1]}
class Flokkur:
def __init__(self, bokstafur):
self.bokstafur = bokstafur #ex. 'a'
self.atkvaedi = {} #{kjördæmi1: fjöldi atkvæða í kjördæmi, kjördæmi2: fjöldi ..., ...}
def gefa_atkvaedi(self, kjordaemi, atkvaedi):
self.atkvaedi[kjordaemi] = atkvaedi #kjördæmi = 'RN', atkvaedi = 123
class Kosningar:
def __init__(self, flokkar, kjordaemi):
self.flokkar = flokkar #[Flokkur('a'), Flokkur('b'), ...]
self.kjordaemi = kjordaemi #{'RN': [9,2], 'SU': [9,1], ...}. [fjöldi kjördæmafulltrúa, fjöldi jöfnunarmanna]
def get_fjoldi_fulltrua(self, kjordaemi):
return self.kjordaemi[kjordaemi][0]
def get_fjoldi_jofnun(self, kjordaemi):
return self.kjordaemi[kjordaemi][1]
def reikna_kjordaemi(self, kjordaemi):
#ex. kjordaemi = 'RN'
#fjöldi atkvæða í kjördæmi
fj_atkvaeda = 0
for f in self.flokkar:
try:
fj_atkvaeda += f.atkvaedi[kjordaemi]
except:
#flokkur ekki með atkvæði í kjördæmi
pass
#búa til heildar- og hlutatölur fyrir flokka
nidurstodur = {}
for f in self.flokkar:
try:
fulltrua_hlutfall = self.get_fjoldi_fulltrua(kjordaemi) * 1.0 * f.atkvaedi[kjordaemi]/fj_atkvaeda
nidurstodur[f.bokstafur] = [int(fulltrua_hlutfall), fulltrua_hlutfall-int(fulltrua_hlutfall)]
except:
#flokkur ekki í framboði í þessu kjördæmi
pass
return nidurstodur
#flokkar = [a for a in ascii_letters[0:randint(1,len(ascii_letters)/2-1)]]
flokkar = [Flokkur(a) for a in ascii_letters[0:4]]
kosningar = Kosningar(flokkar, kjordaemi)
for k in kosningar.kjordaemi:
for f in kosningar.flokkar:
if randint(0,20) == 0:
#flokkur ekki í framboði í þessu kjördæmi
pass
else:
#handahófskennt val á fjölda atkvæða fyrir viðkomandi flokk
a = randint(0,50000)
f.gefa_atkvaedi(k,a)
print(u"Útkoma atkvæðagreiðslu")
for f in kosningar.flokkar:
print(f.bokstafur)
print(f.atkvaedi)
class Nidurstodur:
def __init__(self, kjordaemi, flokkar):
self.nidurstodur = {}
for k in kjordaemi:
self.nidurstodur[k] = {}
for f in flokkar:
self.nidurstodur[k][f] = 0
kosninga_nidurstodur = {}
nidurstodur = Nidurstodur(kosningar.kjordaemi, kosningar.flokkar)
#úthluta kjördæmasætum
for k in kosningar.kjordaemi:
#ex. k = 'RN'
kjordaemi_nidurstodur = kosningar.reikna_kjordaemi(k)
print(kjordaemi_nidurstodur)
|
import pytest
import dpnp
import numpy
def test_choose():
a = numpy.r_[:4]
ia = dpnp.array(a)
b = numpy.r_[-4:0]
ib = dpnp.array(b)
c = numpy.r_[100:500:100]
ic = dpnp.array(c)
expected = numpy.choose([0, 0, 0, 0], [a, b, c])
result = dpnp.choose([0, 0, 0, 0], [ia, ib, ic])
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("offset",
[0, 1],
ids=['0', '1'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]],
[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [
[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]],
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[0, 1, 2], [3, 4, 5], [6, 7, 8]]',
'[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]',
'[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]',
'[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]'])
def test_diagonal(array, offset):
a = numpy.array(array)
ia = dpnp.array(a)
expected = numpy.diagonal(a, offset)
result = dpnp.diagonal(ia, offset)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("val",
[-1, 0, 1],
ids=['-1', '0', '1'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[0, 1, 2], [3, 4, 5], [6, 7, 8]]',
'[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_fill_diagonal(array, val):
a = numpy.array(array)
ia = dpnp.array(a)
expected = numpy.fill_diagonal(a, val)
result = dpnp.fill_diagonal(ia, val)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("dimension",
[(1, ), (2, ), (1, 2), (2, 3), (3, 2), [1], [2], [1, 2], [2, 3], [3, 2]],
ids=['(1, )', '(2, )', '(1, 2)', '(2, 3)', '(3, 2)',
'[1]', '[2]', '[1, 2]', '[2, 3]', '[3, 2]'])
def test_indices(dimension):
expected = numpy.indices(dimension)
result = dpnp.indices(dimension)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("array",
[[],
[[0, 0], [0, 0]],
[[1, 0], [1, 0]],
[[1, 2], [3, 4]],
[[0, 1, 2], [3, 0, 5], [6, 7, 0]],
[[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]],
[[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]],
[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [
[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]],
ids=['[]',
'[[0, 0], [0, 0]]',
'[[1, 0], [1, 0]]',
'[[1, 2], [3, 4]]',
'[[0, 1, 2], [3, 0, 5], [6, 7, 0]]',
'[[0, 1, 0, 3, 0], [5, 0, 7, 0, 9]]',
'[[[1, 2], [0, 4]], [[0, 2], [0, 1]], [[0, 0], [3, 1]]]',
'[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]'])
def test_nonzero(array):
a = numpy.array(array)
ia = dpnp.array(array)
expected = numpy.nonzero(a)
result = dpnp.nonzero(ia)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("vals",
[[100, 200],
(100, 200)],
ids=['[100, 200]',
'(100, 200)'])
@pytest.mark.parametrize("mask",
[[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [True, True]]],
ids=['[[True, False], [False, True]]',
'[[False, True], [True, False]]',
'[[False, False], [True, True]]'])
@pytest.mark.parametrize("arr",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
def test_place1(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
numpy.place(a, m, vals)
dpnp.place(ia, im, vals)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]],
ids=['[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]'])
@pytest.mark.parametrize("arr",
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]],
ids=['[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]'])
def test_place2(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
numpy.place(a, m, vals)
dpnp.place(ia, im, vals)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[[False, False], [True, True]], [[True, True], [True, True]]], [
[[False, False], [True, True]], [[False, False], [False, False]]]]],
ids=['[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]'])
@pytest.mark.parametrize("arr",
[[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_place3(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
numpy.place(a, m, vals)
dpnp.place(ia, im, vals)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("v",
[0, 1, 2, 3, 4],
ids=['0', '1', '2', '3', '4'])
@pytest.mark.parametrize("ind",
[0, 1, 2, 3],
ids=['0', '1', '2', '3'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_put(array, ind, v):
a = numpy.array(array)
ia = dpnp.array(a)
numpy.put(a, ind, v)
dpnp.put(ia, ind, v)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("v",
[[10, 20], [30, 40]],
ids=['[10, 20]', '[30, 40]'])
@pytest.mark.parametrize("ind",
[[0, 1], [2, 3]],
ids=['[0, 1]', '[2, 3]'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_put2(array, ind, v):
a = numpy.array(array)
ia = dpnp.array(a)
numpy.put(a, ind, v)
dpnp.put(ia, ind, v)
numpy.testing.assert_array_equal(a, ia)
def test_put3():
a = numpy.arange(5)
ia = dpnp.array(a)
dpnp.put(ia, [0, 2], [-44, -55])
numpy.put(a, [0, 2], [-44, -55])
numpy.testing.assert_array_equal(a, ia)
def test_put_along_axis_val_int():
a = numpy.arange(16).reshape(4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[3, 0, 2, 1]])
ind_r_i = dpnp.array(ind_r)
for axis in range(2):
numpy.put_along_axis(a, ind_r, 777, axis)
dpnp.put_along_axis(ai, ind_r_i, 777, axis)
numpy.testing.assert_array_equal(a, ai)
def test_put_along_axis1():
a = numpy.arange(64).reshape(4, 4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[[3, 0, 2, 1]]])
ind_r_i = dpnp.array(ind_r)
for axis in range(3):
numpy.put_along_axis(a, ind_r, 777, axis)
dpnp.put_along_axis(ai, ind_r_i, 777, axis)
numpy.testing.assert_array_equal(a, ai)
def test_put_along_axis2():
a = numpy.arange(64).reshape(4, 4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[[3, 0, 2, 1]]])
ind_r_i = dpnp.array(ind_r)
for axis in range(3):
numpy.put_along_axis(a, ind_r, [100, 200, 300, 400], axis)
dpnp.put_along_axis(ai, ind_r_i, [100, 200, 300, 400], axis)
numpy.testing.assert_array_equal(a, ai)
@pytest.mark.parametrize("vals",
[[100, 200]],
ids=['[100, 200]'])
@pytest.mark.parametrize("mask",
[[[True, False], [False, True]],
[[False, True], [True, False]],
[[False, False], [True, True]]],
ids=['[[True, False], [False, True]]',
'[[False, True], [True, False]]',
'[[False, False], [True, True]]'])
@pytest.mark.parametrize("arr",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
def test_putmask1(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
v = numpy.array(vals)
iv = dpnp.array(v)
numpy.putmask(a, m, v)
dpnp.putmask(ia, im, iv)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]],
ids=['[[[True, False], [False, True]], [[False, True], [True, False]], [[False, False], [True, True]]]'])
@pytest.mark.parametrize("arr",
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]],
ids=['[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]'])
def test_putmask2(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
v = numpy.array(vals)
iv = dpnp.array(v)
numpy.putmask(a, m, v)
dpnp.putmask(ia, im, iv)
numpy.testing.assert_array_equal(a, ia)
@pytest.mark.parametrize("vals",
[[100, 200],
[100, 200, 300, 400, 500, 600],
[100, 200, 300, 400, 500, 600, 800, 900]],
ids=['[100, 200]',
'[100, 200, 300, 400, 500, 600]',
'[100, 200, 300, 400, 500, 600, 800, 900]'])
@pytest.mark.parametrize("mask",
[[[[[False, False], [True, True]], [[True, True], [True, True]]], [
[[False, False], [True, True]], [[False, False], [False, False]]]]],
ids=['[[[[False, False], [True, True]], [[True, True], [True, True]]], [[[False, False], [True, True]], [[False, False], [False, False]]]]'])
@pytest.mark.parametrize("arr",
[[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]],
ids=['[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]'])
def test_putmask3(arr, mask, vals):
a = numpy.array(arr)
ia = dpnp.array(a)
m = numpy.array(mask)
im = dpnp.array(m)
v = numpy.array(vals)
iv = dpnp.array(v)
numpy.putmask(a, m, v)
dpnp.putmask(ia, im, iv)
numpy.testing.assert_array_equal(a, ia)
def test_select():
cond_val1 = numpy.array([True, True, True, False, False, False, False, False, False, False])
cond_val2 = numpy.array([False, False, False, False, False, True, True, True, True, True])
icond_val1 = dpnp.array(cond_val1)
icond_val2 = dpnp.array(cond_val2)
condlist = [cond_val1, cond_val2]
icondlist = [icond_val1, icond_val2]
choice_val1 = numpy.full(10, -2)
choice_val2 = numpy.full(10, -1)
ichoice_val1 = dpnp.array(choice_val1)
ichoice_val2 = dpnp.array(choice_val2)
choicelist = [choice_val1, choice_val2]
ichoicelist = [ichoice_val1, ichoice_val2]
expected = numpy.select(condlist, choicelist)
result = dpnp.select(icondlist, ichoicelist)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("indices",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]]],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
@pytest.mark.parametrize("array",
[[[0, 1, 2], [3, 4, 5], [6, 7, 8]],
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]],
[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]],
[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [
[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]],
[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]],
ids=['[[0, 1, 2], [3, 4, 5], [6, 7, 8]]',
'[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]',
'[[[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]]]',
'[[[[1, 2], [3, 4]], [[1, 2], [2, 1]]], [[[1, 3], [3, 1]], [[0, 1], [1, 3]]]]',
'[[[[1, 2, 3], [3, 4, 5]], [[1, 2, 3], [2, 1, 0]]], [[[1, 3, 5], [3, 1, 0]], [[0, 1, 2], [1, 3, 4]]]]',
'[[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]], [[[13, 14, 15], [16, 17, 18]], [[19, 20, 21], [22, 23, 24]]]]'])
def test_take(array, indices):
a = numpy.array(array)
ind = numpy.array(indices)
ia = dpnp.array(a)
iind = dpnp.array(ind)
expected = numpy.take(a, ind)
result = dpnp.take(ia, iind)
numpy.testing.assert_array_equal(expected, result)
def test_take_along_axis():
a = numpy.arange(16).reshape(4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[3, 0, 2, 1]])
ind_r_i = dpnp.array(ind_r)
for axis in range(2):
expected = numpy.take_along_axis(a, ind_r, axis)
result = dpnp.take_along_axis(ai, ind_r_i, axis)
numpy.testing.assert_array_equal(expected, result)
def test_take_along_axis1():
a = numpy.arange(64).reshape(4, 4, 4)
ai = dpnp.array(a)
ind_r = numpy.array([[[3, 0, 2, 1]]])
ind_r_i = dpnp.array(ind_r)
for axis in range(3):
expected = numpy.take_along_axis(a, ind_r, axis)
result = dpnp.take_along_axis(ai, ind_r_i, axis)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("m",
[None, 0, 1, 2, 3, 4],
ids=['None', '0', '1', '2', '3', '4'])
@pytest.mark.parametrize("k",
[0, 1, 2, 3, 4, 5],
ids=['0', '1', '2', '3', '4', '5'])
@pytest.mark.parametrize("n",
[1, 2, 3, 4, 5, 6],
ids=['1', '2', '3', '4', '5', '6'])
def test_tril_indices(n, k, m):
result = dpnp.tril_indices(n, k, m)
expected = numpy.tril_indices(n, k, m)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("k",
[0, 1, 2, 3, 4, 5],
ids=['0', '1', '2', '3', '4', '5'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]], ],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
def test_tril_indices_from(array, k):
a = numpy.array(array)
ia = dpnp.array(a)
result = dpnp.tril_indices_from(ia, k)
expected = numpy.tril_indices_from(a, k)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("m",
[None, 0, 1, 2, 3, 4],
ids=['None', '0', '1', '2', '3', '4'])
@pytest.mark.parametrize("k",
[0, 1, 2, 3, 4, 5],
ids=['0', '1', '2', '3', '4', '5'])
@pytest.mark.parametrize("n",
[1, 2, 3, 4, 5, 6],
ids=['1', '2', '3', '4', '5', '6'])
def test_triu_indices(n, k, m):
result = dpnp.triu_indices(n, k, m)
expected = numpy.triu_indices(n, k, m)
numpy.testing.assert_array_equal(expected, result)
@pytest.mark.parametrize("k",
[0, 1, 2, 3, 4, 5],
ids=['0', '1', '2', '3', '4', '5'])
@pytest.mark.parametrize("array",
[[[0, 0], [0, 0]],
[[1, 2], [1, 2]],
[[1, 2], [3, 4]], ],
ids=['[[0, 0], [0, 0]]',
'[[1, 2], [1, 2]]',
'[[1, 2], [3, 4]]'])
def test_triu_indices_from(array, k):
a = numpy.array(array)
ia = dpnp.array(a)
result = dpnp.triu_indices_from(ia, k)
expected = numpy.triu_indices_from(a, k)
numpy.testing.assert_array_equal(expected, result)
|
import numpy as np
from scipy.special import eval_hermite, factorial
def hermite_functions(n, x, all_n=True, move_axes=(), method="recursive"):
"""
Calculate the Hermite functions up to the nth order at position x, psi_n(x).
For details see:
https://en.wikipedia.org/wiki/Hermite_polynomials#Hermite_functions
If all_n == True, then return all Hermite functions up to n
If all_n == False, only return nth Hermite function
If using recursive method, then the latter is more memory efficient as it
only stores psi_n, psi_{n-1}, and psi_{n-2}
The 'move_axes' option causes the output dimensions to be swapped around
using np.moveaxis.
Uses one of three possible calculation methods:
'recursive' - Uses recursive method. Most efficient for n > 5.
'direct' - Calculates directly using Hermite polynomials.
Inefficient due to factorial and Hermite polynomial,
although useful for comparison when testing
'analytic' - Uses analytic expressions (only for n <= 5)
Recursion relation:
psi_n(x) = sqrt(2/n) * x * psi_{n-1}(x) - sqrt((n-1)/n) * psi_{n-2}(x)
Examples:
>>> x = np.mgrid[-2:3, 0:4]
>>> x.shape
(2, 5, 4)
>>> n = 5
>>> psi = hermite_functions(n, x, all_n=False)
>>> psi.shape
(2, 5, 4)
>>> psi = hermite_functions(n, x, all_n=True)
>>> psi.shape
(6, 2, 5, 4)
>>> reshape = ([0, 1, 2, 3], [1, 3, 2, 0])
>>> psi = hermite_functions(n, x, all_n=True, move_axes=reshape)
>>> psi.shape
(4, 6, 5, 2)
"""
if method not in ["recursive", "analytic", "direct"]:
raise ValueError("Method not recognized.")
if not (issubclass(type(n), int) or issubclass(type(n), np.integer)):
raise TypeError("n must be an integer.")
if n < 0:
raise ValueError("n must be non-negative.")
if method == "analytic" and (n > 5):
raise ValueError("n must not be greater than 5 for analytic calculation.")
if all_n:
psi_n = _Hermite_all_n(n, x, method)
else:
psi_n = _Hermite_single_n(n, x, method)
if move_axes:
psi_n = np.moveaxis(psi_n, move_axes[0], move_axes[1])
return psi_n
def _Hermite_single_n(n, x, method):
"""
Calculates psi_n(x) for a single value of n.
"""
if method == "analytic":
return _H_analytic(n, x)
if method == "direct":
return _H_direct(n, x)
psi_m_minus_2 = _H_analytic(0, x)
if n == 0:
return psi_m_minus_2
psi_m_minus_1 = _H_analytic(1, x)
if n == 1:
return psi_m_minus_1
for m in range(2, n + 1):
psi_m = _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1)
psi_m_minus_2 = psi_m_minus_1
psi_m_minus_1 = psi_m
return psi_m
def _Hermite_all_n(n, x, method):
"""
Calcualtes psi_m(x) for all 0 <= m <= n.
"""
try:
psi_n = np.zeros((n + 1,) + x.shape)
except AttributeError: # x does not have property 'shape'
psi_n = np.zeros((n + 1, 1))
if method == "analytic":
for m in range(n + 1):
psi_n[m, :] = _H_analytic(m, x)
return psi_n
if method == "direct":
for m in range(n + 1):
psi_n[m, :] = _H_direct(m, x)
return psi_n
psi_n[0, :] = _H_analytic(0, x)
if n == 0:
return psi_n
psi_n[1, :] = _H_analytic(1, x)
if n == 1:
return psi_n
for m in range(2, n + 1):
psi_n[m, :] = _H_recursive(m, x, psi_n[m - 2, :], psi_n[m - 1, :])
return psi_n
def _H_recursive(m, x, psi_m_minus_2, psi_m_minus_1):
"""
Calculate psi_m(x) using recursion relation.
"""
return np.sqrt(2 / m) * x * psi_m_minus_1 - np.sqrt((m - 1) / m) * psi_m_minus_2
def _H_analytic(n, x):
"""
Analytic expressions for psi_n(x) for 0 <= n <= 5.
"""
if n == 0:
return np.pi ** (-1 / 4) * np.exp(-(x ** 2) / 2)
if n == 1:
return np.sqrt(2) * np.pi ** (-1 / 4) * x * np.exp(-(x ** 2) / 2)
if n == 2:
return (
(np.sqrt(2) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 2 - 1)
* np.exp(-(x ** 2) / 2)
)
if n == 3:
return (
(np.sqrt(3) * np.pi ** (1 / 4)) ** (-1)
* (2 * x ** 3 - 3 * x)
* np.exp(-(x ** 2) / 2)
)
if n == 4:
return (
(2 * np.sqrt(6) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 4 - 12 * x ** 2 + 3)
* np.exp(-(x ** 2) / 2)
)
if n == 5:
return (
(2 * np.sqrt(15) * np.pi ** (1 / 4)) ** (-1)
* (4 * x ** 5 - 20 * x ** 3 + 15 * x)
* np.exp(-(x ** 2) / 2)
)
raise ValueError("n must be an integer between 0 and 5")
def _H_direct(n, x):
"""
Calculate psi_n(x) using explicit definition.
"""
return (
1
/ np.sqrt(2 ** n * factorial(n))
* np.pi ** (-1 / 4)
* np.exp(-(x ** 2) / 2)
* eval_hermite(n, x)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import math
import random
import numpy as np
import random
import logging
import time
class exp3_m(object):
def __init__(self, choices, reward_min=0, reward_max=1, gamma=0.07, reward_function=None, model_path=None):
self.reward_min = reward_min
self.reward_max = reward_max
self.gamma = gamma
self.S_0 = set()
self.probabilities = []
self.reward_function = reward_function
if model_path is not None:
self.weights = np.load(model_path)
else:
self.weights = np.ones(len(choices))
def depround(self, probabilities):
one_probs = set()
candidates = set(range(len(probabilities)))
probs = probabilities.copy()
# Calling np.random.uniform in the loop doubles execution time, allocate ahead of time
randoms = set(np.random.uniform(0, 1, len(candidates)))
# We assume that all probabilities initally are 0 < p < 1
while len(candidates) > 1:
i = candidates.pop()
j = candidates.pop()
alpha = min(1 - probs[i], probs[j])
beta = min(probs[i], 1 - probs[j])
threshold = randoms.pop()
if threshold > (beta/(alpha+beta)):
probs[i] = probs[i] + alpha
probs[j] = probs[j] - alpha
else:
probs[i] = probs[i] - beta
probs[j] = probs[j] + beta
# Put back into pool or element has been chosen
if probs[i] == 1:
one_probs.add(i)
elif probs[i] > 0:
candidates.add(i)
if probs[j] == 1:
one_probs.add(j)
elif probs[j] > 0:
candidates.add(j)
return np.array(list(one_probs))
def draw(self, k):
max_j = np.argmax(self.weights)
K = len(self.weights)
self.S_0 = set()
# Step 1
sorted_weight_indices = np.argsort(self.weights)[::-1]
s_0_candidates = set()
if self.weights[max_j] >= (1/k - self.gamma/K) * (np.sum(self.weights)/(1-self.gamma)):
rhs = (1/k - self.gamma/K)/(1 - self.gamma)
alpha_t = 0
# Find alpha_t
for i, index in enumerate(sorted_weight_indices):
x = i
y = np.sum(self.weights[sorted_weight_indices[i:]])
alpha_t_candidate = -(y * rhs)/(x*rhs - 1)
s_0_candidates.add(index)
if alpha_t_candidate == rhs:
alpha_t = alpha_t_candidate
self.S_0 = s_0_candidates
break
# Step 2
W = set(sorted_weight_indices)
weights_prime = np.zeros(K)
diff_indices = list(W.difference(self.S_0))
weights_prime[diff_indices] = self.weights[diff_indices]
if len(self.S_0) > 0:
S0_indices = list(self.S_0)
weights_prime[S0_indices] = alpha_t
# Step 3
w_prime_sum = np.sum(weights_prime)
gamma_factor = (1 - self.gamma)
gamma_term = self.gamma/K
self.probabilities = 1/w_prime_sum * weights_prime * gamma_factor
self.probabilities = self.probabilities + gamma_term
self.probabilities = self.probabilities * k
# Step 4
self.choices = self.depround(self.probabilities)
return self.choices
def give_reward(self, reward_data):
rewards = [self.reward_function(choice, reward_data)
for i, choice in enumerate(self.choices)]
regret = np.sum([self.reward_max - r for r in rewards])
for i, reward in enumerate(rewards):
self.update_probability(reward, i, len(self.choices))
return regret/len(self.choices)
def update_probability(self, reward, i, k):
x_hat = reward/self.probabilities[i]
if i not in self.S_0:
self.weights[i] = self.weights[i] * \
math.exp(k * self.gamma * x_hat/(len(self.probabilities)))
|
# Generated by Django 2.1.7 on 2019-03-22 18:44
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('wagtailimages', '0001_squashed_0021'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('index', '0011_storymainpage_categories'),
]
operations = [
migrations.CreateModel(
name='ArticleGalleryImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('caption', models.CharField(blank=True, max_length=200)),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogArticlePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('tagline', wagtail.core.fields.RichTextField(blank=True)),
('published_date', models.DateTimeField(default=django.utils.timezone.now)),
('body', wagtail.core.fields.StreamField([('intro', wagtail.core.blocks.RichTextBlock()), ('textblock', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())])),
('categories', modelcluster.fields.ParentalManyToManyField(blank=True, to='index.StoryCategory')),
('tags', modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='index.StoryMainTags', to='taggit.Tag', verbose_name='Tags')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.AlterModelOptions(
name='storyindexpage',
options={'verbose_name': 'StoryIndexPage'},
),
migrations.AlterModelOptions(
name='storymainpage',
options={'verbose_name': 'StoryMainPage'},
),
migrations.AddField(
model_name='articlegalleryimage',
name='article',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='article_images', to='index.BlogArticlePage'),
),
migrations.AddField(
model_name='articlegalleryimage',
name='image',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.Image'),
),
]
|
"""
PRONTO
Ordena lista em ordem alfabetica humana - http://nedbatchelder.com/blog/200712/human_sorting.html
Fonte: https://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside
"""
import re
def atoi(text):
"""
Se for digito retorna em formato integer, caso contrario retorna valor recebido
"""
return int(text) if text.isdigit() else text
def natural_keys(text):
"""
Ordena lista em ordem alfabetica humana
"""
return [atoi(c) for c in re.split(r'(\d+)', text)]
'''
# Exemplo
a_list=[
"something1",
"something12",
"something17",
"something2",
"something25",
"something29"]
a_list.sort(key=natural_keys)
print(a_list)
'''
|
from .detection_tools import *
|
# author: Paul Galatic
#
# Program to
# STD LIB
import os
import copy
import time
import random
import argparse
import linecache
from pathlib import Path
# REQUIRED LIB
import numpy as np
import grinpy as gp
import networkx as nx
import tabulate as tab
# PROJECT LIB
import de as de
import pso as pso
import firefly as ff
# import aco as aco
from greedy_color import greedy_color
# CONSTANTS
TIME_FORMAT = '%H:%M:%S'
GRAPH_DIR = Path('graph/')
SML = 10
MED = 23
LRG = 100
NUM = 1000
SML_NAME = f'n{SML}.g6'
MED_NAME = f'n{MED}.g6'
LRG_NAME = f'n{LRG}.g6'
def log(s):
'''More informative print debugging'''
print('[%s]: %s' % (time.strftime(TIME_FORMAT, time.localtime()), str(s)))
def parse_args():
'''Parses arguments'''
ap = argparse.ArgumentParser()
ap.add_argument('mode', type=str,
help='[s|m|l] -- small, medium, or large')
ap.add_argument('n', type=int,
help='the number of graphs to analyze')
ap.add_argument('--gtskip', action='store_true',
help='set true to skip ground truth calculations')
return ap.parse_args()
def gen_rand_graphs(dim, theta=0.85):
'''Generates random graphs based on parameter theta'''
graphs = []
log(f'generating {NUM} Erdős-Rényi graphs of size {dim}...')
for _ in range(NUM):
# create a blank adjacency matrix
graph = np.zeros(shape=(dim, dim))
# add edges according to a random process
for idx in range(dim - 1):
for idy in range(idx + 1, dim - 1):
rand = random.random()
if rand <= theta:
# make sure the graphs are symmetrical
graph[idx][idy] = 1
graph[idy][idx] = 1
graphs.append(graph)
return [nx.from_numpy_array(graph) for graph in graphs]
def record_graphs(graphs, path):
'''writes graphs in g6 format to the designated filename'''
for graph in graphs:
with open('temp', 'w') as temp:
nx.write_graph6(graph, 'temp', header=False)
with open('temp', 'r') as temp:
with open(path, 'a') as out:
out.write(''.join(temp.readlines()))
with open(path, 'r') as f:
log('Recorded %d graphs' % len(f.readlines()))
os.remove('temp')
def load_graphs(mode, num):
'''
Loads a random sample of graphs from the graphs file. Returns a mapping of
the graph to its chromatic number.
'''
log('loading graphs...')
if 's' in mode:
fname = str(GRAPH_DIR / SML_NAME)
elif 'm' in mode:
fname = str(GRAPH_DIR / MED_NAME)
elif 'l' in mode:
fname = str(GRAPH_DIR / LRG_NAME)
else:
raise Exception(f'Could not recognize mode: {mode}.')
with open(fname, 'r', newline='\n', encoding='ISO-8859-1') as f:
length = sum(1 for _ in f)
if num >= length:
num = length - 1
log(f'WARNING: Can only load up to {length} graphs from this file.')
with open(fname, 'r') as f:
lines = f.readlines()
raw_graphs = random.sample(lines, num)
# create temporary file so that nx can read it
temp_path = 'temp'
with open(temp_path, 'w', newline='\n') as f:
f.writelines(raw_graphs)
graphs = nx.read_graph6(temp_path)
os.remove(temp_path)
log(f'...{len(graphs)} graphs loaded.')
return graphs
def compute_chi(optim_module, graphs):
'''
Computes the set of best found chi numbers for a given graph
'''
results = dict()
for graph in graphs:
gbest = optim_module.Optimizer(graph=graph).run()
results[graph] = greedy_color(graph, gbest.get_order(gbest.position))
return results
def main():
'''Driver program'''
args = parse_args()
log('Starting...')
# load graphs, or generate them if they don't exist
if not os.path.exists(str(GRAPH_DIR)):
os.mkdir(str(GRAPH_DIR))
if not os.path.exists(str(GRAPH_DIR / SML_NAME)):
record_graphs(gen_rand_graphs(SML), str(GRAPH_DIR / SML_NAME))
if not os.path.exists(str(GRAPH_DIR / MED_NAME)):
record_graphs(gen_rand_graphs(MED), str(GRAPH_DIR / MED_NAME))
if not os.path.exists(str(GRAPH_DIR / LRG_NAME)):
record_graphs(gen_rand_graphs(LRG), str(GRAPH_DIR / LRG_NAME))
if args.n < 1:
log('Must select at least one graph.')
raise SystemExit
graphs = load_graphs(args.mode, args.n)
# ground truth graphs
start = time.time()
if args.gtskip:
gt_graphs = {graph: None for graph in graphs}
else:
log('Calculating ground truth...')
gt_graphs = {graph : gp.chromatic_number(graph) for graph in graphs}
log(f'{round(time.time() - start, 3)} seconds')
# color each graph with each algorithm
# each algorithm will predict the chi of the graph and this will form new
# mappings of graph -> chi
log('Calculating greedy colorings...')
start = time.time()
gr_graphs = {graph: greedy_color(graph) for graph in graphs}
log(f'{round(time.time() - start, 3)} seconds')
log('Calculating differential evolution colorings...')
start = time.time()
de_graphs = compute_chi(de, graphs)
log(f'{round(time.time() - start, 3)} seconds')
log('Calculating particle swarm optimization colorings...')
start = time.time()
pso_graphs = compute_chi(pso, graphs)
log(f'{round(time.time() - start, 3)} seconds')
log('Calculating firefly algorithm colorings...')
start = time.time()
ff_graphs = compute_chi(ff, graphs)
log(f'{round(time.time() - start, 3)} seconds')
# print results
table_1 = tab.tabulate(
zip(list(range(len(graphs))), gt_graphs.values(), gr_graphs.values(), de_graphs.values(), pso_graphs.values(),
ff_graphs.values()), headers=['id', 'truth', 'greedy', 'de', 'pso', 'firefly'])
log(f'\nChromatic numbers for graphs:\n{table_1}')
min_chi = min([min(gr_graphs.values()), min(de_graphs.values()), min(pso_graphs.values()), min(ff_graphs.values())])
max_chi = max([max(gr_graphs.values()), max(de_graphs.values()), max(pso_graphs.values()), max(ff_graphs.values())])
gr_modes = [list(gr_graphs.values()).count(idx) for idx in range(min_chi, max_chi + 1)]
de_modes = [list(de_graphs.values()).count(idx) for idx in range(min_chi, max_chi + 1)]
pso_modes = [list(pso_graphs.values()).count(idx) for idx in range(min_chi, max_chi + 1)]
firefly_mode = [list(ff_graphs.values()).count(idx) for idx in range(min_chi, max_chi + 1)]
table_2 = tab.tabulate(
zip(list(range(min_chi, max_chi + 1)), gr_modes, de_modes, pso_modes, firefly_mode),
headers=['chi', 'greedy', 'de', 'pso', 'firefly']
)
log(f'\nFrequency of chromatic numbers:\n{table_2}')
log('...finished.')
return 0
if __name__ == '__main__':
main()
|
# This file is part of PeachPy package and is licensed under the Simplified BSD license.
# See license.rst for the full text of the license.
import inspect
import peachpy.stream
from peachpy.x86_64.instructions import Instruction
from peachpy.x86_64.operand import check_operand, format_operand_type, is_r32, is_imm32
# Permitted pseudo-instructions:
#
# - [rep] cmps %nacl:(%rsi),%nacl:(%rdi),%rZP (sandboxed cmps)
# mov %esi,%esi
# lea (%rZP,%rsi,1),%rsi
# mov %edi,%edi
# lea (%rZP,%rdi,1),%rdi
# [rep] cmps (%rsi),(%rdi)
#
# - [rep] movs %nacl:(%rsi),%nacl:(%rdi),%rZP (sandboxed movs)
# mov %esi,%esi
# lea (%rZP,%rsi,1),%rsi
# mov %edi,%edi
# lea (%rZP,%rdi,1),%rdi
# [rep] movs (%rsi),(%rdi)
#
# - naclasp ...,%rZP (sandboxed stack increment)
# add ...,%esp
# add %rZP,%rsp
#
# - naclcall %eXX,%rZP (sandboxed indirect call)
# and $-32, %eXX
# add %rZP, %rXX
# call *%rXX
# Note: the assembler ensures all calls (including naclcall) will end at the bundle boundary.
#
# - nacljmp %eXX,%rZP (sandboxed indirect jump)
# and $-32,%eXX
# add %rZP,%rXX
# jmp *%rXX
#
# - naclrestbp ...,%rZP (sandboxed %ebp/rbp restore)
# mov ...,%ebp
# add %rZP,%rbp
#
# - naclrestsp ...,%rZP (sandboxed %esp/rsp restore)
# mov ...,%esp
# add %rZP,%rsp
#
# - naclrestsp_noflags ...,%rZP (sandboxed %esp/rsp restore)
# mov ...,%esp
# lea (%rsp,%rZP,1),%rsp
#
# - naclspadj $N,%rZP (sandboxed %esp/rsp restore from %rbp; incudes $N offset)
# lea N(%rbp),%esp
# add %rZP,%rsp
#
# - naclssp ...,%rZP (sandboxed stack decrement)
# SUB(esp, ...)
# ADD(rZP, rsp)
#
# - [rep] scas %nacl:(%rdi),%?ax,%rZP (sandboxed stos)
# mov %edi,%edi
# lea (%rZP,%rdi,1),%rdi
# [rep] scas (%rdi),%?ax
# [rep] stos %?ax,%nacl:(%rdi),%rZP
#
# - (sandboxed stos) mov %edi,%edi
# LEA(rdi, [rZP + rdi*1])
# REP.STOS([rdi], al/ax/eax/rax)
class NACLJMP(Instruction):
"""Sandboxed Indirect Jump"""
def __init__(self, *args, **kwargs):
"""Supported forms:
* NACLJMP(r32)
"""
origin = kwargs.get("origin")
prototype = kwargs.get("prototype")
if origin is None and prototype is None and peachpy.x86_64.options.get_debug_level() > 0:
origin = inspect.stack()
super(NACLJMP, self).__init__("NACLJMP", origin=origin, prototype=prototype)
self.operands = tuple(map(check_operand, args))
if len(self.operands) != 1:
raise SyntaxError("Instruction \"NACLJMP\" requires 1 operand")
self.in_regs = (True,)
self.out_regs = (False,)
self.out_operands = (True,)
self._gas_name = "nacljmp"
if not is_r32(self.operands[0]):
raise SyntaxError("Invalid operand types: NACLJMP " + ", ".join(map(format_operand_type, self.operands)))
if peachpy.stream.active_stream is not None:
peachpy.stream.active_stream.add_instruction(self)
def _lower(self):
from peachpy.stream import InstructionStream
from peachpy.x86_64.generic import AND, ADD, JMP
from peachpy.x86_64.registers import r15
with InstructionStream() as stream:
AND(self.operands[0], -32)
ADD(self.operands[0].as_qword, r15)
JMP(self.operands[0].as_qword)
return stream.instructions
def encode(self):
import operator
return bytearray().join(map(operator.methodcaller("encode"), self._lower()))
class NACLASP(Instruction):
"""Sandboxed RSP Increment (Addition)"""
def __init__(self, *args, **kwargs):
"""Supported forms:
* NACLASP(r32)
* NACLASP(imm32)
"""
origin = kwargs.get("origin")
prototype = kwargs.get("prototype")
if origin is None and prototype is None and peachpy.x86_64.options.get_debug_level() > 0:
origin = inspect.stack()
super(NACLASP, self).__init__("NACLASP", origin=origin, prototype=prototype)
self.operands = tuple(map(check_operand, args))
if len(self.operands) != 1:
raise SyntaxError("Instruction \"NACLASP\" requires 1 operand")
self.in_regs = (True,)
self.out_regs = (False,)
self.out_operands = (True,)
self._gas_name = "naclasp"
if not is_r32(self.operands[0]) and not is_imm32(self.operands[0]):
raise SyntaxError("Invalid operand types: NACLASP" + ", ".join(map(format_operand_type, self.operands)))
if peachpy.stream.active_stream is not None:
peachpy.stream.active_stream.add_instruction(self)
def _lower(self):
from peachpy.stream import InstructionStream
from peachpy.x86_64.generic import ADD
from peachpy.x86_64.registers import esp, rsp, r15
with InstructionStream() as stream:
ADD(esp, self.operands[0])
ADD(rsp, r15)
return stream.instructions
def encode(self):
import operator
return bytearray().join(map(operator.methodcaller("encode"), self._lower()))
class NACLSSP(Instruction):
"""Sandboxed RSP Decrement (Subtraction)"""
def __init__(self, *args, **kwargs):
"""Supported forms:
* NACLSSP(r32)
* NACLSSP(imm32)
"""
origin = kwargs.get("origin")
prototype = kwargs.get("prototype")
if origin is None and prototype is None and peachpy.x86_64.options.get_debug_level() > 0:
origin = inspect.stack()
super(NACLSSP, self).__init__("NACLSSP", origin=origin, prototype=prototype)
self.operands = tuple(map(check_operand, args))
if len(self.operands) != 1:
raise SyntaxError("Instruction \"NACLSSP\" requires 1 operand")
self.in_regs = (True,)
self.out_regs = (False,)
self.out_operands = (True,)
self._gas_name = "naclssp"
if not is_r32(self.operands[0]) and not is_imm32(self.operands[0]):
raise SyntaxError("Invalid operand types: NACLSSP" + ", ".join(map(format_operand_type, self.operands)))
if peachpy.stream.active_stream is not None:
peachpy.stream.active_stream.add_instruction(self)
def _lower(self):
from peachpy.stream import InstructionStream
from peachpy.x86_64.generic import SUB, ADD
from peachpy.x86_64.registers import esp, rsp, r15
with InstructionStream() as stream:
SUB(esp, self.operands[0])
ADD(rsp, r15)
return stream.instructions
def encode(self):
import operator
return bytearray().join(map(operator.methodcaller("encode"), self._lower()))
class NACLRESTSP(Instruction):
"""Sandboxed RSP Restore"""
def __init__(self, *args, **kwargs):
"""Supported forms:
* NACLRESTSP(r32)
"""
origin = kwargs.get("origin")
prototype = kwargs.get("prototype")
if origin is None and prototype is None and peachpy.x86_64.options.get_debug_level() > 0:
origin = inspect.stack()
super(NACLRESTSP, self).__init__("NACLRESTSP", origin=origin, prototype=prototype)
self.operands = tuple(map(check_operand, args))
if len(self.operands) != 1:
raise SyntaxError("Instruction \"NACLRESTSP\" requires 1 operand")
self.in_regs = (True,)
self.out_regs = (False,)
self.out_operands = (True,)
self._gas_name = "naclrestsp"
if is_r32(self.operands[0]):
pass
else:
raise SyntaxError("Invalid operand types: NACLRESTSP " + ", ".join(map(format_operand_type, self.operands)))
if peachpy.stream.active_stream is not None:
peachpy.stream.active_stream.add_instruction(self)
def _lower(self):
from peachpy.stream import InstructionStream
from peachpy.x86_64.generic import MOV, ADD
from peachpy.x86_64.registers import esp, rsp, r15
with InstructionStream() as stream:
MOV(esp, self.operands[0])
ADD(rsp, r15)
return stream.instructions
def encode(self):
import operator
return bytearray().join(map(operator.methodcaller("encode"), self._lower()))
class NACLRESTBP(Instruction):
"""Sandboxed RBP Restore"""
def __init__(self, *args, **kwargs):
"""Supported forms:
* NACLRESTBP(r32)
"""
origin = kwargs.get("origin")
prototype = kwargs.get("prototype")
if origin is None and prototype is None and peachpy.x86_64.options.get_debug_level() > 0:
origin = inspect.stack()
super(NACLRESTBP, self).__init__("NACLRESTBP", origin=origin, prototype=prototype)
self.operands = tuple(map(check_operand, args))
if len(self.operands) != 1:
raise SyntaxError("Instruction \"NACLRESTBP\" requires 1 operand")
self.in_regs = (True,)
self.out_regs = (False,)
self.out_operands = (True,)
self._gas_name = "naclrestbp"
if is_r32(self.operands[0]):
pass
else:
raise SyntaxError("Invalid operand types: NACLRESTBP " + ", ".join(map(format_operand_type, self.operands)))
if peachpy.stream.active_stream is not None:
peachpy.stream.active_stream.add_instruction(self)
def _lower(self):
from peachpy.stream import InstructionStream
from peachpy.x86_64.generic import MOV, ADD
from peachpy.x86_64.registers import ebp, rbp, r15
with InstructionStream() as stream:
MOV(ebp, self.operands[0])
ADD(rbp, r15)
return stream.instructions
def encode(self):
import operator
return bytearray().join(map(operator.methodcaller("encode"), self._lower()))
|
from datetime import datetime, timedelta
import pytz
import pytest
from src.events import Events
from src.stores import MemoryStore
from src.session import Session
def test_get_event():
store = MemoryStore()
events = Events(store)
session = Session({}, store, '')
start = datetime.now(pytz.timezone("America/New_York"))
dur = timedelta(hours=1)
events = events.add('test', 'test', 30, start, dur, 'test', 'test',
'test@test.com', 'test')
with pytest.raises(Exception):
session.get_event('')
event_dict = session.get_event('test')
assert event_dict
assert 'event' in event_dict
|
import ns.core
import numpy as np
from src.simulator.internet.sender import Sender
from src.simulator.internet.receiver import Receiver
class Communicator:
def __init__(self, ns_node, id=-1, offline_params={}, protocol='tcp', verbose=False,
global_comm_matrix=None):
self.ns_node = ns_node
self.protocol = protocol
self.verbose = verbose
self.offline_params = offline_params
self.global_comm_matrix = global_comm_matrix
self._sink_socket_list = []
self._source_socket_list = []
self._app_sender_dict = {}
self._app_receiver_dict = {}
self._upstream_app_sender_dict = {}
self._is_offline = False
self._id = id
self._next_phase = 1
def __del__(self):
self.reset()
def get_id(self):
return self._id
def reset(self):
for sink_socket in self._sink_socket_list:
sink_socket.Close()
self._sink_socket_list.clear()
for source_socket in self._source_socket_list:
source_socket.Close()
self._source_socket_list.clear()
self._app_sender_dict = {}
self._app_receiver_dict = {}
self._is_offline = False
self._id = -1
def get_offline_duration(self):
return self.offline_params.get("duration", 0)
def is_offline(self):
return self._is_offline
def get_app_receiver_dict(self):
return self._app_receiver_dict
def get_app_sender_dict(self):
return self._app_sender_dict
def get_current_time(self):
current_time = 0
for sender in self._app_sender_dict.values():
current_time = max(current_time, sender.get_current_time())
for receiver in self._app_receiver_dict.values():
current_time = max(current_time, receiver.get_current_time())
return current_time
def is_finished(self):
sender_states = [sender.is_finished() for sender in self._app_sender_dict.values()]
receiver_states = [receiver.is_finished() for receiver in self._app_receiver_dict.values()]
return np.all(sender_states) and np.all(receiver_states)
def add_app_receiver(self, comm_id, phase_rx_size, phases, port, start_time=0, stop_time=None):
sink_node = self.ns_node
sink_socket = ns.network.Socket.CreateSocket(sink_node, ns.core.TypeId.LookupByName("ns3::{:s}SocketFactory".
format(self.protocol.capitalize())))
sink_address = ns.network.InetSocketAddress(ns.network.Ipv4Address.GetAny(), port)
app_receiver = Receiver(communicator=self)
app_receiver.Setup(sink_socket, sink_address, phase_rx_size, phases=phases,
protocol=self.protocol, verbose=self.verbose, id=comm_id)
sink_node.AddApplication(app_receiver)
app_receiver.SetStartTime(ns.core.Seconds(start_time))
if stop_time is not None:
app_receiver.SetStopTime(ns.core.Seconds(stop_time))
self._sink_socket_list.append(sink_socket)
self._app_receiver_dict[comm_id] = app_receiver
return app_receiver
def add_app_sender(self, comm_id, dst_node, phase_rx_size, phases, port, packet_size, data_rate,
start_time=0, stop_time=None):
src_node = self.ns_node
ipv4 = dst_node.GetObject(ns.internet.Ipv4.GetTypeId())
ipv4_int_addr = ipv4.GetAddress(1, 0)
ip_addr = ipv4_int_addr.GetLocal()
sink_address = ns.network.InetSocketAddress(ip_addr, port)
source_socket = ns.network.Socket.CreateSocket(src_node, ns.core.TypeId.LookupByName("ns3::{:s}SocketFactory".
format(self.protocol.capitalize())))
app_sender = Sender(communicator=self)
app_sender.Setup(source_socket, sink_address, packet_size, phase_rx_size,
ns.network.DataRate("{:f}bps".format(data_rate)), phases=phases,
verbose=self.verbose, id=comm_id)
src_node.AddApplication(app_sender)
app_sender.SetStartTime(ns.core.Seconds(start_time))
if stop_time is not None:
app_sender.SetStopTime(ns.core.Seconds(stop_time))
self._source_socket_list.append(source_socket)
self._app_sender_dict[comm_id] = app_sender
return app_sender
def associate_upstream_app_sender(self, comm_id, sender):
self._upstream_app_sender_dict[comm_id] = sender
def offline_operation(self):
if self._is_offline:
return
# communicator itself and its predecessors will stop sending data
if self.verbose:
print("# At time %.6f node %d is offline in %d-th phase" %
(ns.core.Simulator.Now().GetSeconds(), self._id, self._next_phase-1))
self.deactivate_local_senders()
self.deactivate_upstream_senders()
self._is_offline = True
def online_operation(self):
if not self._is_offline:
return
# communicator itself and its predecessors will start sending data
if self.verbose:
print("@ At time %.6f node %d is online in %d-th phase" %
(ns.core.Simulator.Now().GetSeconds(), self._id, self._next_phase-1))
self._is_offline = False
self.activate_local_senders()
self.activate_upstream_senders()
# self.send_message()
# self.inform_upstream_send_message()
def switch_offline(self, current_phase):
p = np.random.random()
is_offline = False if self.offline_params.get('probability') is None or p >= self.offline_params['probability'] else True
if is_offline:
# get offline now and get online after some time
self.offline_operation(current_phase)
online_time = ns.core.Time(ns.core.Seconds(self.get_offline_duration()))
ns.core.Simulator.Schedule(online_time, self.online_operation, current_phase)
# UNBLOCKING offline nodes will not participate the updating of the current phase
# for sender in self._app_sender_list:
# self.update_global_comm_matrix(current_phase, sender.get_id())
# else:
# self.online_operation(current_phase)
return is_offline
def update_phase(self):
update = np.all([receiver.get_current_phase() >= self._next_phase for receiver
in self._app_receiver_dict.values()])
if update:
if self.verbose:
print("Node %d entered %d-th phase" % (self._id, self._next_phase))
self.generate_message()
self.send_message()
self._next_phase += 1
def get_lagging_communicator_ids(self):
lagging_list = []
for comm_id in self._app_receiver_dict:
if self._app_receiver_dict[comm_id].get_current_phase() < self._next_phase:
lagging_list.append(comm_id)
return lagging_list
def generate_message(self, message=None):
if self.verbose:
print("Node %d generate new message" % self._id)
message = self._next_phase if message is None else message
for sender in self._app_sender_dict.values():
sender.add_message(message)
def send_message(self):
if self.is_offline():
return
if self.verbose:
print("Node %d send messages to neighbours" % self._id)
for sender in self._app_sender_dict.values():
sender.ScheduleTx()
def inform_upstream_send_message(self):
for sender in self._upstream_app_sender_dict.values():
sender.get_communicator().send_message()
def deactivate_upstream_senders(self):
for sender in self._upstream_app_sender_dict.values():
sender.deactivate()
def activate_upstream_senders(self):
for sender in self._upstream_app_sender_dict.values():
sender.activate()
def deactivate_local_senders(self):
for sender in self._app_sender_dict.values():
sender.deactivate()
def activate_local_senders(self):
for sender in self._app_sender_dict.values():
sender.activate()
# def abandon_data_from(self, comm):
# abandon_phase = -1
# receiver = self._app_receiver_dict.get(comm.get_id(), None)
# sender = comm.get_app_sender_dict().get(self._id, None)
# if receiver is not None and sender is not None:
# phase_a = sender.fast_forward()
# phase_b = receiver.fast_forward()
# assert phase_a == phase_b
# abandon_phase = phase_a
# return abandon_phase
def abandon_data_from(self, comm, abandon_phase=-1):
receiver = self._app_receiver_dict.get(comm.get_id(), None)
sender = comm.get_app_sender_dict().get(self._id, None)
if receiver is None or sender is None or len(sender.message_queue) == 0 or \
sender.get_current_phase() != receiver.get_current_phase():
return None
if abandon_phase >= 0 and (sender.get_current_phase() != abandon_phase or
receiver.get_current_phase() != abandon_phase):
return None
if self.verbose:
print("Node %d started to abandoned data from node %d" %
(self._id, comm.get_id()))
phase_a = sender.fast_forward()
phase_b = receiver.fast_forward()
# print(phase_a, phase_b)
assert phase_a == phase_b
return phase_a
def update_global_comm_matrix(self, phase, sender_id):
self.global_comm_matrix[phase][self._id, sender_id] += 1
|
# Copyright (C) 2017 Red Hat, Inc.
# Darn, this needs to go away!
import argparse
class _FixMap(object):
opt_to_conf = {}
conf_to_opt = {}
def set(self, opt, conf):
self.conf_to_opt[conf] = opt
self.opt_to_conf[opt] = conf
def __init__(self):
self.set('project-name', 'name')
def opt(self, conf):
return self.conf_to_opt.get(conf, conf)
def conf(self, opt):
return self.opt_to_conf.get(opt, opt)
_fix_map = _FixMap()
class FakeDistribution:
values = {}
def __getattr__(self, key):
key = key.lstrip('get_')
key = key.replace('-', '_')
if not key in self.values:
return lambda : '<<UNSET --' + _fix_map.opt(key) + ' OPTION>>'
return lambda : self.values[key]
class FakeCommand():
def __init__(self):
self.distribution = FakeDistribution()
def getAction(self):
parent = self
class A(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
option_string = option_string.lstrip('--')
option_string = _fix_map.conf(option_string)
option_string = option_string.replace('-', '_')
parent.distribution.values[option_string] = values
return A
|
import threading
import logging
import json
class EventHandler(threading.Thread):
log = logging.getLogger("events.EventHandler")
def __init__(self,event):
self.event=event.split(None)[0]
self.data = json.loads(event.lstrip(self.event).lstrip())
threading.Thread.__init__(self, name="EventHandler for event: <%s>" % event)
|
"""LDAP Source tests"""
from unittest.mock import PropertyMock, patch
from django.test import TestCase
from authentik.core.models import User
from authentik.lib.generators import generate_key
from authentik.sources.ldap.models import LDAPPropertyMapping, LDAPSource
from authentik.sources.ldap.password import LDAPPasswordChanger
from authentik.sources.ldap.tests.mock_ad import mock_ad_connection
LDAP_PASSWORD = generate_key()
LDAP_CONNECTION_PATCH = PropertyMock(return_value=mock_ad_connection(LDAP_PASSWORD))
class LDAPPasswordTests(TestCase):
"""LDAP Password tests"""
def setUp(self):
self.source = LDAPSource.objects.create(
name="ldap",
slug="ldap",
base_dn="dc=goauthentik,dc=io",
additional_user_dn="ou=users",
additional_group_dn="ou=groups",
)
self.source.property_mappings.set(LDAPPropertyMapping.objects.all())
self.source.save()
@patch("authentik.sources.ldap.models.LDAPSource.connection", LDAP_CONNECTION_PATCH)
def test_password_complexity(self):
"""Test password without user"""
pwc = LDAPPasswordChanger(self.source)
self.assertFalse(pwc.ad_password_complexity("test")) # 1 category
self.assertFalse(pwc.ad_password_complexity("test1")) # 2 categories
self.assertTrue(pwc.ad_password_complexity("test1!")) # 2 categories
@patch("authentik.sources.ldap.models.LDAPSource.connection", LDAP_CONNECTION_PATCH)
def test_password_complexity_user(self):
"""test password with user"""
pwc = LDAPPasswordChanger(self.source)
user = User.objects.create(
username="test",
attributes={"distinguishedName": "cn=user,ou=users,dc=goauthentik,dc=io"},
)
self.assertFalse(pwc.ad_password_complexity("test", user)) # 1 category
self.assertFalse(pwc.ad_password_complexity("test1", user)) # 2 categories
self.assertTrue(pwc.ad_password_complexity("test1!", user)) # 2 categories
self.assertFalse(pwc.ad_password_complexity("erin!qewrqewr", user)) # displayName token
self.assertFalse(pwc.ad_password_complexity("hagens!qewrqewr", user)) # displayName token
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script interpolates across gaps in a column.
"""
import sys
import numpy as np
import pandas as pd
def data_interpolate(data, column_reference, column_in, column_out):
"""
Interpolates across gaps in a column
"""
reference = data[column_reference]
values = data[column_in]
nans = np.isnan(values)
values = np.interp(reference[nans], reference[~nans], values[~nans])
data.loc[nans, column_out] = values
return data
if __name__ == '__main__':
"""
Main entry point into the script.
"""
if len(sys.argv) < 5:
print('USAGE: csv_interpolate.py CSV_FILE_IN CSV_FILE_OUT COLUMN_REFERENCE COLUMN_IN COLUMN_OUT') # noqa: E501 pylint: disable=C0301
else:
file_in = sys.argv[1]
file_out = sys.argv[2]
column_reference = sys.argv[3]
column_in = sys.argv[4]
column_out = sys.argv[5]
data = pd.read_csv(file_in, low_memory=False)
data = data_interpolate(data, column_reference, column_in, column_out)
data.to_csv(file_out, index=False)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from mmdet.core import bbox2result, bbox_mapping_back
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class CornerNet(SingleStageDetector):
"""CornerNet.
This detector is the implementation of the paper `CornerNet: Detecting
Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained)
def merge_aug_results(self, aug_results, img_metas):
"""Merge augmented detection bboxes and score.
Args:
aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each
image.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
Returns:
tuple: (bboxes, labels)
"""
recovered_bboxes, aug_labels = [], []
for bboxes_labels, img_info in zip(aug_results, img_metas):
img_shape = img_info[0]['img_shape'] # using shape before padding
scale_factor = img_info[0]['scale_factor']
flip = img_info[0]['flip']
bboxes, labels = bboxes_labels
bboxes, scores = bboxes[:, :4], bboxes[:, -1:]
bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)
recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))
aug_labels.append(labels)
bboxes = torch.cat(recovered_bboxes, dim=0)
labels = torch.cat(aug_labels)
if bboxes.shape[0] > 0:
out_bboxes, out_labels = self.bbox_head._bboxes_nms(
bboxes, labels, self.bbox_head.test_cfg)
else:
out_bboxes, out_labels = bboxes, labels
return out_bboxes, out_labels
def aug_test(self, imgs, img_metas, rescale=False):
"""Augment testing of CornerNet.
Args:
imgs (list[Tensor]): Augmented images.
img_metas (list[list[dict]]): Meta information of each image, e.g.,
image size, scaling factor, etc.
rescale (bool): If True, return boxes in original image space.
Default: False.
Note:
``imgs`` must including flipped image pairs.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
img_inds = list(range(len(imgs)))
assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (
'aug test must have flipped image pair')
aug_results = []
for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):
img_pair = torch.cat([imgs[ind], imgs[flip_ind]])
x = self.extract_feat(img_pair)
outs = self.bbox_head(x)
bbox_list = self.bbox_head.get_bboxes(
*outs, [img_metas[ind], img_metas[flip_ind]], False, False)
aug_results.append(bbox_list[0])
aug_results.append(bbox_list[1])
bboxes, labels = self.merge_aug_results(aug_results, img_metas)
bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)
return [bbox_results]
|
# Generated by Django 2.0.13 on 2019-04-07 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='address',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='user',
name='applicant_number',
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='coupon_history',
field=models.CharField(blank=True, default='', max_length=500, null=True),
),
migrations.AddField(
model_name='user',
name='cumulative_pay_amount',
field=models.CharField(blank=True, default=0, max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='cumulative_usage_count',
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='fcm_pushtoken',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='user',
name='is_leave',
field=models.CharField(blank=True, choices=[('정상 회원', '정상 회원'), ('탈퇴한 회원', '탈퇴한 회원')], max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='note',
field=models.CharField(blank=True, max_length=2000, null=True),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(blank=True, max_length=140, null=True),
),
migrations.AddField(
model_name='user',
name='point',
field=models.PositiveSmallIntegerField(blank=True, default=5000, null=True),
),
migrations.AddField(
model_name='user',
name='postalcode',
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='signature_image',
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='social_id',
field=models.CharField(blank=True, max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='social_number',
field=models.CharField(blank=True, max_length=80, null=True),
),
]
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import defaultdict
def get_jar_infos(ivy_products, confs=None):
"""Returns a list of dicts containing the paths of various jar file resources.
Keys include 'default' (normal jar path), 'sources' (path to source jar), and 'javadoc'
(path to doc jar). None of them are guaranteed to be present, but 'sources' and 'javadoc'
will never be present if 'default' isn't.
:param ivy_products: ivy_jar_products data from a context
:param confs: List of key types to return (eg ['default', 'sources']). Just returns 'default' if
left unspecified.
:returns mapping of IvyModuleRef --> {'default' : [<jar_filenames>],
'sources' : [<jar_filenames>],
'javadoc' : [<jar_filenames>]}
"""
confs = confs or ['default']
classpath_maps = defaultdict(dict)
if ivy_products:
for conf, info_group in ivy_products.items():
if conf not in confs:
continue # We don't care about it.
for info in info_group:
for module in info.modules_by_ref.values():
if module.artifacts:
classpath_maps[module.ref][conf] = [artifact.path for artifact in module.artifacts]
return classpath_maps
|
class Player:
# This class returns a player's status in the killfeed
# def __init__(self, name, rounds, kills, deaths, team_kills, opening_kills, opening_deaths, clutches,
# plants, defuses, trades, headshot):
# self.trades = trades
# self.defuses = defuses
# self.plants = plants
# self.clutches = clutches
# self.opening_deaths = opening_deaths
# self.opening_kills = opening_kills
# self.team_kills = team_kills
# self.deaths = deaths
# self.kills = kills
# self.headshot = headshot
# self.rounds = rounds
# self.name = name
def __init__(self, name):
self.trades = 0
self.defuses = 0
self.plants = 0
self.clutches = 0
self.opening_deaths = 0
self.opening_kills = 0
self.team_kills = 0
self.deaths = 0
self.kills = 0
self.headshot = 0
self.rounds = 1
self.name = name
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Player):
return self.name == other.name
return NotImplemented
def __hash__(self):
"""Overrides the default implementation"""
return hash(tuple(sorted(self.__dict__.items())))
def __repr__(self):
return "name: % s kills:% s deaths:% s opening_kills:% s opening_deaths:% s trades:% s " % (self.name, self.kills, self.deaths, self.opening_kills, self.opening_deaths, self.trades)
|
from account.models import Team, RoleEnvironment
from logical.models import Database
def databases_by_env(qs, teams):
roles = [team.role for team in teams]
role_environments = RoleEnvironment.objects.filter(
role__in=[role.id for role in roles]
).distinct()
environments = []
for role_env in role_environments:
environments.extend(role_env.environments.all())
return qs.filter(environment__in=[env.id for env in environments])
def can_access_database(database, teams):
qs = Database.objects.filter(id=database.id)
return databases_by_env(qs, teams)
|
__all__ = ["trace_with", "val_diffs", "val_range"]
from numbers import Real
import sys
try:
from collections.abc import Mapping, Sequence, Set
from itertools import zip_longest
except ImportError:
from collections import Mapping, Sequence, Set
from itertools import izip_longest as zip_longest
def trace_with(trace, func, *args, **kwds):
old_trace = sys.gettrace()
try:
sys.settrace(trace)
return func(*args, **kwds)
finally:
sys.settrace(old_trace)
def val_diffs(name, old_value, value, sentinel=None):
if old_value is value or old_value == value:
return []
if old_value is sentinel:
return [{"type": "set", "name": name, "to": repr(value)}]
if value is sentinel:
return [{"type": "del", "name": name, "from": repr(old_value)}]
sentinel = object()
if (
isinstance(old_value, Sequence)
and isinstance(value, Sequence)
and not (isinstance(value, str) or isinstance(old_value, str))
):
diffs = []
for n, (i, j) in enumerate(zip_longest(old_value, value, fillvalue=sentinel)):
diffs.extend(val_diffs("{}[{!r}]".format(name, n), i, j, sentinel))
return diffs
if isinstance(old_value, Mapping) and isinstance(value, Mapping):
diffs = []
for i in set(old_value) | set(value):
diffs.extend(
val_diffs(
"{}[{!r}]".format(name, i),
old_value.get(i, sentinel),
value.get(i, sentinel),
sentinel,
)
)
return diffs
if isinstance(old_value, Set) and isinstance(value, Set):
diffs = []
added = {i for i in value if i not in old_value}
if added:
diffs.append({"type": "add", "name": name, "added": list(map(repr, added))})
removed = {i for i in old_value if i not in value}
if removed:
diffs.append(
{"type": "remove", "name": name, "removed": list(map(repr, removed))}
)
return diffs
return [
{"type": "change", "name": name, "from": repr(old_value), "to": repr(value)}
]
def val_range(values):
values = tuple(values)
types = sorted({type(value).__name__ for value in values})
if all(isinstance(value, Real) for value in values):
return {
"type": "real",
"types": types,
"min": str(min(values)),
"max": str(max(values)),
}
else:
return {"type": "other", "types": types}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
create_official_data.py
A script to turn extract:
(1) conversation from Reddit file dumps (originally downloaded from https://files.pushshift.io/reddit/daily/)
(2) grounded data ("facts") extracted from the web, respecting robots.txt
Authors: Michel Galley and Sean Gao
"""
import sys
import time
import os.path
import re
import argparse
import traceback
import json
import bz2
import pickle
import nltk
import urllib.request
import urllib.robotparser
import hashlib
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from bs4.element import CData
from multiprocessing import Pool
from nltk.tokenize import TweetTokenizer
from commoncrawl import CommonCrawl
parser = argparse.ArgumentParser()
parser.add_argument("--rsinput", help="Submission (RS) file to load.")
parser.add_argument("--rcinput", help="Comments (RC) file to load.")
parser.add_argument("--test", help="Hashes of test set convos.", default="")
parser.add_argument("--facts", help="Facts file to create.")
parser.add_argument("--convos", help="Convo file to create.")
parser.add_argument("--pickle", help="Pickle that contains conversations and facts.", default="data.pkl")
parser.add_argument("--subreddit_filter", help="List of subreddits (inoffensive, safe for work, etc.)")
parser.add_argument("--domain_filter", help="Filter on subreddits and domains.")
parser.add_argument("--nsubmissions", help="Number of submissions to process (< 0 means all)", default=-1, type=int)
parser.add_argument("--min_fact_len", help="Minimum number of tokens in each fact (reduce noise in html).", default=0, type=int)
parser.add_argument("--min_res_len", help="Min number of characters in response.", default=2, type=int)
parser.add_argument("--max_res_len", help="Max number of characters in response.", default=280, type=int)
parser.add_argument("--max_context_len", help="Max number of words in context.", default=200, type=int)
parser.add_argument("--max_depth", help="Maximum length of conversation.", default=5, type=int)
parser.add_argument("--mincomments", help="Minimum number of comments per submission.", default=10, type=int)
parser.add_argument("--minscore", help="Minimum score of each comment.", default=1, type=int)
parser.add_argument("--delay", help="Seconds of delay when crawling web pages", default=0, type=int)
parser.add_argument("--tokenize", help="Whether to tokenize facts and conversations.", default=True, type=bool)
parser.add_argument("--anchoronly", help="Filter out URLs with no named anchors.", default=False, type=bool)
parser.add_argument("--use_robots_txt", help="Whether to respect robots.txt (disable this only if urls have been previously checked by other means!)", default=True, type=bool)
parser.add_argument("--use_cc", help="Whether to download pages from Common Crawl.", default=False, type=bool)
parser.add_argument("--dryrun", help="Just collect stats about data; don't create any data.", default=False, type=bool)
parser.add_argument("--blind", help="Don't print out responses.", default=False, type=bool)
args = parser.parse_args()
fields = [ "id", "subreddit", "score", "num_comments", "domain", "title", "url", "permalink" ]
important_tags = ['title', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'p']
notext_tags = ['script', 'style']
deleted_str = '[deleted]'
undisclosed_str = '__UNDISCLOSED__'
batch_download_facts = False
robotparsers = {}
tokenizer = TweetTokenizer(preserve_case=False)
cc = CommonCrawl(-2)
def get_subreddit(submission):
return submission["subreddit"]
def get_domain(submission):
return submission["domain"]
def get_url(submission):
return submission["url"]
def get_submission_text(submission):
return submission["title"]
def get_permalink(submission):
return submission["permalink"]
def get_submission_id(submission):
return submission["id"]
def get_comment_id(comment):
return "t1_" + comment["id"]
#return comment["name"]
def get_parent_comment_id(comment):
return comment["parent_id"]
def get_text(comment):
return comment["body"]
def get_user(comment):
return comment["author"]
def get_score(comment):
return comment["score"]
def get_linked_submission_id(comment):
return comment["link_id"].split("_")[1]
def get_anchor(url):
pos = url.find("#")
if (pos > 0):
label = url[pos+1:]
label = label.strip()
return label
return ""
def filter_submission(submission):
"""Determines whether to filter out this submission (over-18, deleted user, etc.)."""
if submission["num_comments"] < args.mincomments:
return True
if "num_crossposts" in submission and submission["num_crossposts"] > 0:
return True
if "locked" in submission and submission["locked"]:
return True
if "over-18" in submission and submission["over_18"]:
return True
if "brand_safe" in submission and not submission["brand_safe"]:
return True
if submission["distinguished"] != None:
return True
if "subreddit_type" in submission:
if submission["subreddit_type"] == "restricted": # filter only public
return True
if submission["subreddit_type"] == "archived":
return True
url = get_url(submission)
domain = get_domain(submission)
if domain.find("reddit.com") >= 0 or domain.find("twitter.com") >= 0 or domain.find("youtube.com") >= 0 or domain.find("youtube.com") >= 0 or domain.find("imgur.com") >= 0 or domain.find("flickr.com") >= 0 or domain.find("ebay.com") >= 0:
return True
if args.anchoronly and len(get_anchor(url)) <= 2:
return True
if url.find(" ") >= 0:
return True
if url.endswith("jpg") or url.endswith("gif") or url.endswith("png") or url.endswith("pdf"):
return True
return False
def norm_article(t):
"""Minimalistic processing with linebreaking."""
t = re.sub("\s*\n+\s*","\n", t)
t = re.sub(r'(</[pP]>)',r'\1\n', t)
t = re.sub("[ \t]+"," ", t)
t = t.strip()
return t
def norm_sentence(t):
"""Minimalistic processing: remove extra space characters."""
t = re.sub("[ \n\r\t]+", " ", t)
t = t.strip()
if args.tokenize:
t = " ".join(tokenizer.tokenize(t))
t = t.replace('[ deleted ]','[deleted]');
return t
def add_webpage(submission, year, month):
"""Retrive sentences ('facts') from submission["url"]. """
if args.use_cc:
return add_cc_webpage(submission, year, month)
return add_live_webpage(submission)
def add_cc_webpage(submission, year, month):
url = get_url(submission)
src, date = cc.download(url, year, month, False)
sys.stdout.flush()
if src == None:
src, date = cc.download(url, year, month, True)
sys.stdout.flush()
if src == None:
print("Can't fetch: [%s] submission month: [%s-%s]" % (url, year, month))
sys.stdout.flush()
return None
print("Fetching url: [%s] submission month: [%s-%s] commoncrawl date: [%s]" % (url, year, month, str(date)))
submission["source"] = src
return submission
def add_live_webpage(submission):
url = get_url(submission)
domain = get_domain(submission)
try:
if args.use_robots_txt:
if args.delay > 0:
time.sleep(args.delay)
if domain in robotparsers.keys():
rp = robotparsers[domain]
else:
rp = urllib.robotparser.RobotFileParser()
robotparsers[domain] = rp
rurl = "http://" + domain + "/robots.txt"
print("Fetching robots.txt: [%s]" % rurl)
rp.set_url(rurl)
rp.read()
if not rp.can_fetch("*", url):
print("Can't download url due to robots.txt: [%s] domain: [%s]" % (url, domain))
return None
print("Fetching url: [%s] domain: [%s]" % (url, domain))
u = urllib.request.urlopen(url)
src = u.read()
submission["source"] = src
return submission
except urllib.error.HTTPError:
return None
except urllib.error.URLError:
return None
except UnicodeEncodeError:
return None
except:
traceback.print_exc()
return None
def add_webpages(submissions):
"""Use multithreading to retrieve multiple webpages at once."""
print("Downloading %d pages:" % len(submissions))
pool = Pool()
submissions = pool.map(add_webpage, submissions)
print("\nDone.")
return [s for s in submissions if s is not None]
def get_date(file_name):
m = re.search(r'(\d\d\d\d)-(\d\d)', file_name)
year = m.group(1)
month = m.group(2)
return year, month
def get_submissions(rs_file, subreddit_file, domain_file):
"""Return all submissions from a dump submission file rs_file (RS_*.bz2),
restricted to the subreddit+domain listed in filter_file."""
submissions = []
subreddit_dic = None
domain_dic = None
year, month = get_date(rs_file)
if subreddit_file != None:
with open(subreddit_file) as f:
subreddit_dic = dict([ (el.strip(), 1) for el in f.readlines() ])
if domain_file != None:
with open(domain_file) as f:
domain_dic = dict([ (el.strip(), 1) for el in f.readlines() ])
with bz2.open(rs_file, 'rt', encoding="utf-8") as f:
i = 0
for line in f:
try:
submission = json.loads(line)
if not filter_submission(submission):
subreddit = get_subreddit(submission)
domain = get_domain(submission)
scheck = subreddit_dic == None or subreddit in subreddit_dic
dcheck = domain_dic == None or domain in domain_dic
if scheck and dcheck:
s = dict([ (f, submission[f]) for f in fields ])
print("keeping: subreddit=%s\tdomain=%s" % (subreddit, domain))
if args.dryrun:
continue
if not batch_download_facts:
s = add_webpage(s, year, month)
submissions.append(s)
sys.stdout.flush()
sys.stderr.flush()
i += 1
if i == args.nsubmissions:
break
else:
print("skipping: subreddit=%s\tdomain=%s (%s %s)" % (subreddit, domain, scheck, dcheck))
pass
except json.decoder.JSONDecodeError:
pass
except Exception:
traceback.print_exc()
pass
if batch_download_facts:
submissions = add_webpages(submissions)
else:
submissions = [s for s in submissions if s is not None]
return dict([ (get_submission_id(s), s) for s in submissions ])
def get_comments(rc_file, submissions):
"""Return all conversation triples from rc_file (RC_*.bz2),
restricted to given submissions."""
comments = {}
with bz2.open(rc_file, 'rt', encoding="utf-8") as f:
for line in f:
try:
comment = json.loads(line)
sid = get_linked_submission_id(comment)
if sid in submissions.keys():
comments[get_comment_id(comment)] = comment
except Exception:
traceback.print_exc()
pass
return comments
def load_data():
"""Load data either from a pickle file if it exists,
and otherwise from RC_* RS_* and directly from the web."""
if not os.path.isfile(args.pickle):
submissions = get_submissions(args.rsinput, args.subreddit_filter, args.domain_filter)
comments = get_comments(args.rcinput, submissions)
with open(args.pickle, 'wb') as f:
pickle.dump([submissions, comments], f, protocol=pickle.HIGHEST_PROTOCOL)
else:
with open(args.pickle, 'rb') as f:
[submissions, comments] = pickle.load(f)
return submissions, comments
def insert_escaped_tags(tags, label=None):
"""For each tag in "tags", insert contextual tags (e.g., <p> </p>) as escaped text
so that these tags are still there when html markup is stripped out."""
found = False
for tag in tags:
strs = list(tag.strings)
if len(strs) > 0:
if label != None:
l = label
else:
l = tag.name
strs[0].parent.insert(0, NavigableString("<"+l+">"))
strs[-1].parent.append(NavigableString("</"+l+">"))
found = True
return found
def save_facts(submissions, sids = None):
subs = {}
i = 0
if args.facts == '-':
return submissions
with open(args.facts, 'wt', encoding="utf-8") as f:
for id in sorted(submissions.keys()):
s = submissions[id]
url = get_url(s)
label = get_anchor(url)
print("Processing submission %s...\n\turl: %s\n\tanchor: %s\n\tpermalink: http://reddit.com%s" % (id, url, str(label), get_permalink(s)))
subs[id] = s
if sids == None or id in sids.keys():
b = BeautifulSoup(s["source"],'html.parser')
# If there is any anchor in the url, locate it in the facts:
if label != "":
if not insert_escaped_tags(b.find_all(True, attrs={"id": label}), 'anchor'):
print("\t(couldn't find anchor on page: %s)" % label)
# Remove tags whose text we don't care about (javascript, etc.):
for el in b(notext_tags):
el.decompose()
# Delete other unimportant tags, but keep the text:
for tag in b.findAll(True):
if tag.name not in important_tags:
tag.append(' ')
tag.replaceWithChildren()
# All tags left are important (e.g., <p>) so add them to the text:
insert_escaped_tags(b.find_all(True))
# Extract facts from html:
t = b.get_text(" ")
t = norm_article(t)
facts = []
for sent in filter(None, t.split("\n")):
if len(sent.split(" ")) >= args.min_fact_len:
facts.append(norm_sentence(sent))
for fact in facts:
out_str = "\t".join([get_subreddit(s), id, get_domain(s), fact])
hash_str = hashlib.sha224(out_str.encode("utf-8")).hexdigest()
f.write(hash_str + "\t" + out_str + "\n")
s["facts"] = facts
i += 1
if i == args.nsubmissions:
break
return subs
def get_convo(id, submissions, comments, depth=args.max_depth):
c = comments[id]
pid = get_parent_comment_id(c)
if pid in comments.keys() and depth > 0:
els = get_convo(pid, submissions, comments, depth-1)
else:
s = submissions[get_linked_submission_id(c)]
els = [ "START", norm_sentence(get_submission_text(s)) ]
els.append(norm_sentence(get_text(c)))
return els
def save_tuple(f, subreddit, sid, pos, user, context, message, response, score, test_hashes):
cwords = re.split("\s+", context)
mwords = re.split("\s+", message)
max_len = max(args.max_context_len, len(mwords)+1)
if len(cwords) > max_len:
ndel = len(cwords) - max_len
del cwords[:ndel]
context = "... " + " ".join(cwords)
if len(response) <= args.max_res_len and len(response) >= args.min_res_len and response != deleted_str and user != deleted_str and response.find(">") < 0:
if context.find(deleted_str) < 0:
if score >= args.minscore:
out_str = "\t".join([subreddit, sid, str(score), str(pos), context, response])
hash_str = hashlib.sha224(out_str.encode("utf-8")).hexdigest()
if test_hashes == None or hash_str in test_hashes.keys():
if args.blind:
## Note: there is no point in removing the '--blind' flag in order to peek at the reference responses (gold),
## as the organizers will rely on different responses to compute BLEU, etc.
out_str = "\t".join([subreddit, sid, str(score), str(pos), context, undisclosed_str])
f.write(hash_str + "\t" + out_str + "\n")
def save_tuples(submissions, comments, test_hashes):
has_firstturn = {}
with open(args.convos, 'wt', encoding="utf-8") as f:
for id in sorted(comments.keys()):
comment = comments[id]
user = get_user(comment)
score = get_score(comment)
sid = get_linked_submission_id(comment)
if sid in submissions.keys():
s = submissions[sid]
convo = get_convo(id, submissions, comments)
pos = len(convo) - 1
context = " EOS ".join(convo[:-1])
message = convo[-2]
response = convo[-1]
if len(convo) == 3 and not sid in has_firstturn.keys():
save_tuple(f, get_subreddit(s), sid, pos-1, "", convo[-3], convo[-3], message, 1, test_hashes)
has_firstturn[sid] = 1
save_tuple(f, get_subreddit(s), sid, pos, user, context, message, response, score, test_hashes)
def read_test_hashes(hash_file):
hashes = {}
with open(hash_file, 'r') as f:
for line in f:
hash_str = line.rstrip()
hashes[hash_str] = 1
return hashes
if __name__== "__main__":
test_hashes = None
if args.test != "":
test_hashes = read_test_hashes(args.test)
submissions, comments = load_data()
submissions = save_facts(submissions)
save_tuples(submissions, comments, test_hashes)
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 29 16:08:35 2018
@author: Victor Onink
We have lon-lat positions for all the particles, but for making figures and such
makes more sense to consider densities. Also, I want to be able to consider densities
for more than just the last time point, and instead of generating those each time
it makes more sense to just generate and save them once, so i can then call upon them
when I want to make figures or animations
Another new example of my coding getting better so that I can improve my already existing
codes which are a whole lot less efficient
There will be a similar one for the pacific datasets and global datasets of course
"""
from netCDF4 import Dataset
import numpy as np
def AreaCalc(sizeLat,sizeLon): #Calculate surface area of grid cells
deg2rd = np.pi/180.
r=6378.1
lon_bins = np.linspace(0,360., sizeLon+1)
lat_bins = np.linspace(-80, 80, sizeLat+1)
Area=np.array([[deg2rd*(lon_bins[i+1]-lon_bins[i])*(np.sin(deg2rd*lat_bins[j+1])
- np.sin(deg2rd*lat_bins[j])) for i in range(len(lon_bins)-1)]
for j in range(len(lat_bins)-1)])
Area=r*r*Area*1000*1000 #convert it to m^2 instead of km^2
return Area
def HistogramFunction(londata,latdata):
londata,latdata=londata.reshape(np.size(londata)),latdata.reshape(np.size(latdata))
binsLon=np.arange(-180,180)
binsLat=np.arange(-80,80)
density=np.zeros((len(binsLon),len(binsLat)))
for i in range(np.array(londata).shape[0]):
density[np.argmin(np.abs(londata[i]-binsLon)),np.argmin(np.abs(latdata[i]-binsLat))]+=1
#Now, normalize it by area
area=AreaCalc(len(binsLat),len(binsLon)).T
density/=area
density[density==0]=np.nan
return density
#%%
File=[
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticTotal3h.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticTotal24h.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticMeanTotal.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\NorthAtlanticEkman.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticGeostrophic.nc',
# 'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\NorthAtlanticStoke.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticStokeTotal3h.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticStokeTotal.nc',
# 'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\NorthAtlanticStoke.nc'
]
saveFiles=[
'NorthAtlanticTotalDensity3h','NorthAtlanticTotalDensity24h','NorthAtlanticTotalMeanDensity',
'NorthAtlanticEkmanDensity','NorthAtlanticGeostrophicDensity',
# 'NorthAtlanticStokesDensity',
'NorthAtlanticStokesTotalDensity3h','NorthAtlanticStokesTotal24h',
]
location='D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\Densities/'
#location='/scratch/Victor/Densities/'
for i in range(len(File)):
print File[i]
dataset=Dataset(File[i])
lat=dataset.variables['lat'][:]
lon=dataset.variables['lon'][:]
time=dataset.variables['time'][:]
lon[lon>180]-=360
#Now, we want the last 5 years of particle positions, since in this time
#the garbage patch has largely been formed
if lon.shape[1]==4748:
lonLast=lon[:,-365*5:]
latLast=lat[:,-365*5:]
Time=time[:,-365*5:]
else:
lonLast=lon[:,-183*5:]
latLast=lat[:,-183*5:]
Time=time[:,-183*5:]
density=np.zeros((lonLast.shape[1],360,160))
for j in range(lonLast.shape[1]):
density[j,:,:]=HistogramFunction(lonLast[:,j],latLast[:,j])
density.dump(location+saveFiles[i])
Time.dump(location+saveFiles[i]+'Time')
#%%
#File=['D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\AtlanticWindage_1.nc',
# 'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\AtlanticWindage_3.nc',
# 'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\AtlanticWindage_5.nc']
File=['D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles/Onink et al/AtlanticWindage0.01.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles/Onink et al/AtlanticWindage0.03.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles/Onink et al/AtlanticWindage0.05.nc']
saveFiles=['NorthAtlanticWindage1per','NorthAtlanticWindage3per','NorthAtlanticWindage5per']
location='D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles/Onink et al/Densities/'
for i in range(len(File)):
print File[i]
dataset=Dataset(File[i])
lat=dataset.variables['lat'][:]
lon=dataset.variables['lon'][:]
time=dataset.variables['time'][:]
lon[lon>180]-=360
#Now, we want the last 5 years of particle positions, since in this time
#the garbage patch has largely been formed
if lon.shape[1]==4748:
lonLast=lon[:,-365*5:]
latLast=lat[:,-365*5:]
Time=time[:,-365*5:]
else:
lonLast=lon[:,-183*5:]
latLast=lat[:,-183*5:]
Time=time[:,-183*5:]
density=np.zeros((lonLast.shape[1],360,160))
for j in range(lonLast.shape[1]):
density[j,:,:]=HistogramFunction(lonLast[:,j],latLast[:,j])
density.dump(location+saveFiles[i])
Time.dump(location+saveFiles[i]+'Time')
#%%
File=['D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles/Onink et al/AtlanticTotal3h_dt15m.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles/Onink et al/AtlanticTotal3h_dt30m.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticTotal24hBeach.nc',
'D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles\Onink et al\AtlanticTotal24h.nc']
saveFiles=['AtlanticIntegration_30m','AtlanticIntegration_15m','AtlanticBeach','AtlanticNoBeach']
location='D:\Desktop\Thesis\ParcelsFigData\Data\North Atlantic\OutputFiles/Onink et al/Densities/'
for i in [2,3]:#range(len(File)):
print File[i]
dataset=Dataset(File[i])
lat=dataset.variables['lat'][:]
lon=dataset.variables['lon'][:]
time=dataset.variables['time'][:]
lon[lon>180]-=360
#Remove all the beach particles, which requires them to be stuck for 10 steps, so 20 days
for k in range(lon.shape[0]):
if lon[k,-1]==lon[k,-10]:
if lat[k,-1]==lat[k,-10]:
lat[k,:]=np.nan
lon[k,:]=np.nan
#Now, we want the last 5 years of particle positions, since in this time
#the garbage patch has largely been formed
if lon.shape[1]==4748:
lonLast=lon[:,-365*2:]
latLast=lat[:,-365*2:]
Time=time[:,-365*2:]
else:
lonLast=lon[:,-185:]
latLast=lat[:,-185:]
Time=time[:,-185:]
density=np.zeros((lonLast.shape[1],360,160))
for j in range(lonLast.shape[1]):
density[j,:,:]=HistogramFunction(lonLast[:,j],latLast[:,j])
density.dump(location+saveFiles[i])
# Time.dump(location+saveFiles[i]+'Time')
|
#!/usr/bin/env python
ANSIBLE_METADATA = {
"metadata_version": "1.2",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: gns3_nodes_inventory
short_description: Retrieves GNS3 a project nodes console information
version_added: '2.8'
description:
- "Retrieves nodes inventory information from a GNS3 project"
requirements: [ gns3fy ]
author:
- David Flores (@davidban77)
options:
url:
description:
- URL target of the GNS3 server
required: true
type: str
port:
description:
- TCP port to connect to server REST API
type: int
default: 3080
user:
description:
- User to connect to GNS3 server
type: str
password:
description:
- Password to connect to GNS3 server
type: str
project_name:
description:
- Project name
type: str
project_id:
description:
- Project ID
type: str
"""
EXAMPLES = """
# Retrieve the GNS3 server version
- name: Get the server version
gns3_nodes_inventory:
url: http://localhost
port: 3080
project_name: test_lab
register: nodes_inventory
- debug: var=nodes_inventory
"""
RETURN = """
nodes_inventory:
description: Dictionary that contain: name, server, console_port, console_type,
type and template of each node
type: dict
total_nodes:
description: Total number of nodes
type: int
"""
import traceback
GNS3FY_IMP_ERR = None
try:
from gns3fy import Gns3Connector, Project, Node
HAS_GNS3FY = True
except ImportError:
GNS3FY_IMP_ERR = traceback.format_exc()
HAS_GNS3FY = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
def main():
module = AnsibleModule(
argument_spec=dict(
url=dict(type="str", required=True),
port=dict(type="int", default=3080),
user=dict(type="str", default=None),
password=dict(type="str", default=None, no_log=True),
project_name=dict(type="str", default=None),
project_id=dict(type="str", default=None),
),
required_one_of=[["project_name", "project_id"]],
)
if not HAS_GNS3FY:
module.fail_json(msg=missing_required_lib("gns3fy"), exception=GNS3FY_IMP_ERR)
result = dict(changed=False, nodes_inventory=None, total_nodes=None)
server_url = module.params["url"]
server_port = module.params["port"]
server_user = module.params["user"]
server_password = module.params["password"]
project_name = module.params["project_name"]
project_id = module.params["project_id"]
# Create server session
server = Gns3Connector(
url="%s:%s" % (server_url, server_port), user=server_user, cred=server_password
)
# Define the project
if project_name is not None:
project = Project(name=project_name, connector=server)
elif project_id is not None:
project = Project(project_id=project_id, connector=server)
# Retrieve project info
project.get()
nodes_inventory = project.nodes
for _n in nodes_inventory:
result[_n.name] = dict(
project_id = _n.project_id,
node_id = _n.node_id,
compute_id = _n.compute_id,
node_type = _n.node_type,
# connector = _n.connector,
template_id = _n.template_id,
template = _n.template,
node_directory = _n.node_directory,
status = _n.status,
ports = _n.ports,
port_name_format = _n.port_name_format,
port_segment_size = _n.port_segment_size,
first_port_name = _n.first_port_name,
properties = _n.properties,
locked = _n.locked,
label = _n.label,
console = _n.console,
console_host = _n.console_host,
console_auto_start = _n.console_auto_start,
command_line = _n.command_line,
custom_adapters = _n.custom_adapters,
height = _n.height,
width = _n.width,
symbol = _n.symbol,
x = _n.x,
y = _n.y,
z = _n.z,
)
if isinstance(_n.properties, dict):
hd_image_names = {
"hda_disk_image": "hda_disk.qcow2",
"hdb_disk_image": "hdb_disk.qcow2",
"hdc_disk_image": "hdc_disk.qcow2",
"hdd_disk_image": "hdd_disk.qcow2",
}
for disk_image in hd_image_names:
if disk_image in _n.properties:
if _n.properties[disk_image] != "":
key = "_".join([disk_image, "real"])
_n.properties[key] = hd_image_names[disk_image]
module.exit_json(**result)
if __name__ == "__main__":
main()
|
import panther_event_type_helpers as event_type
def rule(event):
return event.udm("event_type") == event_type.MFA_DISABLED
def title(event):
# use unified data model field in title
return f"{event.get('p_log_type')}: User [{event.udm('actor_user')}] disabled MFA"
|
import math
import torch
import torch.nn as nn
from functools import partial
from .activated_batch_norm import ABN
from .activations import activation_from_name
# from pytorch_tools.modules import ABN
# from pytorch_tools.modules import activation_from_name
from pytorch_tools.modules import BlurPool
from pytorch_tools.modules import FastGlobalAvgPool2d
from pytorch_tools.utils.misc import make_divisible
from pytorch_tools.modules import SpaceToDepth
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1, bias=False):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=bias,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1, bias=False):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=bias)
class SEModule(nn.Module):
def __init__(self, channels, reduction_channels, norm_act="relu"):
super(SEModule, self).__init__()
self.pool = FastGlobalAvgPool2d()
# authors of original paper DO use bias
self.fc1 = conv1x1(channels, reduction_channels, bias=True)
self.act1 = activation_from_name(norm_act)
self.fc2 = conv1x1(reduction_channels, channels, bias=True)
def forward(self, x):
x_se = self.pool(x)
x_se = self.fc1(x_se)
x_se = self.act1(x_se)
x_se = self.fc2(x_se)
return x * x_se.sigmoid()
class SEVar3(nn.Module):
"""Variant of SE module from ECA paper (see above) which doesn't have dimensionality reduction"""
def __init__(self, channels, *args):
super().__init__()
self.pool = FastGlobalAvgPool2d()
# authors of original paper DO use bias
self.fc1 = conv1x1(channels, channels, bias=True)
def forward(self, x):
return x * self.fc1(self.pool(x)).sigmoid()
class ECAModule(nn.Module):
"""Efficient Channel Attention
This implementation is different from the paper. I've removed all hyperparameters and
use fixed kernel size of 3. If you think it may be better to use different k_size - feel free to open an issue.
upd. 12.01.2020 increase kernel size
Ref: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
https://arxiv.org/abs/1910.03151
"""
def __init__(self, *args, kernel_size=3, **kwargs):
super().__init__()
self.pool = FastGlobalAvgPool2d()
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
def forward(self, x):
x_s = self.pool(x)
x_s = self.conv(x_s.view(x.size(0), 1, -1))
x_s = x_s.view(x.size(0), -1, 1, 1).sigmoid()
return x * x_s.expand_as(x)
class SSEModule(nn.Module):
"""Spatial Excitation Block (sSE)
Attention which excites certain locations in spatial domain instead of channel. Works better for segmentation than SE
Ref: Recalibrating Fully Convolutional Networks with Spatial and Channel ‘Squeeze & Excitation’ Blocks
https://arxiv.org/abs/1808.08127
"""
def __init__(self, in_ch, *args): # parse additional args for compatability
super().__init__()
self.conv = conv1x1(in_ch, 1, bias=True)
def forward(self, x):
return x * self.conv(x).sigmoid()
class SCSEModule(nn.Module):
"""Idea from Spatial and Channel ‘Squeeze & Excitation’ (scSE)
ECA is proven to work better than (c)SE so i'm using ECA + sSE instead of original cSE + sSE
NOTE: This modules also performs additional conv to return the same number of channels as before
Ref: Recalibrating Fully Convolutional Networks with Spatial and Channel ‘Squeeze & Excitation’ Blocks
https://arxiv.org/abs/1808.08127
Ref: ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks
https://arxiv.org/abs/1910.03151
"""
def __init__(self, in_ch, *args): # parse additional args for compatability
super().__init__()
self.sse = SSEModule(in_ch)
self.cse = ECAModule()
self.reduction_conv = conv1x1(in_ch * 2, in_ch, bias=True) # use bias because there is no BN after
def forward(self, x):
return self.reduction_conv(torch.cat([self.sse(x), self.cse(x)], dim=1))
class MSCAMModule(nn.Module):
"""Idea from Attentional Feature Fusion (MS-CAM)
Combines global and local attention in a better manner than scSE
"""
def __init__(self, in_ch, reduced_ch, norm_layer=ABN, norm_act="relu"): # parse additional args for compatability
super().__init__()
self.global_attn = nn.Sequential(
FastGlobalAvgPool2d(),
conv1x1(in_ch, reduced_ch),
norm_layer(reduced_ch, activation=norm_act),
conv1x1(reduced_ch, in_ch),
norm_layer(in_ch, activation="identity"), # no last activation
)
# this
self.local_attn = nn.Sequential(
conv1x1(in_ch, reduced_ch),
norm_layer(reduced_ch, activation=norm_act),
conv1x1(reduced_ch, in_ch),
norm_layer(in_ch, activation="identity"), # no last activation
)
def forward(self, x):
xl = self.local_attn(x)
xg = self.global_attn(x)
return x * (xl + xg).sigmoid()
class FCAAttn(nn.Module):
"""Inspired by FcaNet: Frequency Channel Attention Networks (https://arxiv.org/pdf/2012.11879.pdf)
But I view it as positional encoding and multiply be a predefined set of filters
"""
def __init__(self, channels, reduction_channels, norm_act="relu"):
super().__init__()
# self.pool = FastGlobalAvgPool2d()
# authors of original paper DO use bias
self.fc = nn.Sequential(
conv1x1(channels, reduction_channels, bias=True),
activation_from_name(norm_act),
conv1x1(reduction_channels, channels, bias=True),
nn.Sigmoid(),
)
# dummy shape. would be overwritten later. not registering as buffer intentionally
self.pos_encoding = torch.ones(1, 1, 1, 1)
def _get_pos_encoding(self, inp):
"""Want this to be generated for each input size separately"""
self.pos_encoding = torch.ones_like(inp)
c_part = inp.size(1) // 4
xx = torch.linspace(0, math.pi, inp.size(3)).cos()[None].repeat(inp.size(2), 1)
yy = torch.linspace(0, math.pi, inp.size(2)).cos()[None].repeat(inp.size(3), 1).T
xy = torch.linspace(-math.pi, math.pi, inp.size(3)).cos().neg().repeat(inp.size(2), 1)
self.pos_encoding[:, c_part * 1 : c_part * 2] = xx
self.pos_encoding[:, c_part * 2 : c_part * 3] = yy
self.pos_encoding[:, c_part * 3 : c_part * 4] = xy
return self.pos_encoding
def forward(self, x):
if x.shape != self.pos_encoding.shape:
self._get_pos_encoding(x)
x_se = (x * self.pos_encoding).sum(dim=(2, 3), keepdim=True)
x_se = self.fc(x_se)
return x * x_se
class FCA_ECA_Attn(nn.Module):
"""Inspired by FcaNet: Frequency Channel Attention Networks (https://arxiv.org/pdf/2012.11879.pdf)
But I view it as positional encoding and multiply be a predefined set of filters
This class uses Efficient Channel Attention instead of SE
"""
def __init__(self, *args, kernel_size=3, **kwargs):
super().__init__()
self.pool = FastGlobalAvgPool2d()
self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=kernel_size // 2, bias=False)
# dummy shape. would be overwritten later. not registering as buffer intentionally
self.pos_encoding = torch.ones(1, 1, 1, 1)
def _get_pos_encoding(self, inp):
"""Want this to be generated for each input size separately"""
self.pos_encoding = torch.ones_like(inp)
c_part = inp.size(1) // 4
xx = torch.linspace(0, math.pi, inp.size(3)).cos()[None].repeat(inp.size(2), 1)
yy = torch.linspace(0, math.pi, inp.size(2)).cos()[None].repeat(inp.size(3), 1).T
xy = torch.linspace(-math.pi, math.pi, inp.size(3)).cos().neg().repeat(inp.size(2), 1)
self.pos_encoding[:, c_part * 1 : c_part * 2] = xx
self.pos_encoding[:, c_part * 2 : c_part * 3] = yy
self.pos_encoding[:, c_part * 3 : c_part * 4] = xy
return self.pos_encoding
def forward(self, x):
# FCA part
if x.shape != self.pos_encoding.shape:
self._get_pos_encoding(x)
# ECA part
x_s = self.pool(x * self.pos_encoding)
x_s = self.conv(x_s.view(x.size(0), 1, -1))
x_s = x_s.view(x.size(0), -1, 1, 1).sigmoid()
return x * x_s.expand_as(x)
class MyAttn(nn.Module):
"""Idea from Attentional Feature Fusion (MS-CAM)
Combines global and local attention in a better manner than scSE
"""
def __init__(self, in_ch, reduced_ch, norm_layer=ABN, norm_act="relu"): # parse additional args for compatability
super().__init__()
self.global_attn = nn.Sequential(
FastGlobalAvgPool2d(),
conv1x1(in_ch, reduced_ch),
norm_layer(reduced_ch, activation=norm_act),
conv1x1(reduced_ch, in_ch),
norm_layer(in_ch, activation="identity"), # no last activation
)
# this
self.local_attn = nn.Sequential(
conv1x1(in_ch, reduced_ch),
norm_layer(reduced_ch, activation=norm_act),
conv1x1(reduced_ch, in_ch),
norm_layer(in_ch, activation="identity"), # no last activation
)
def forward(self, x):
xl = self.local_attn(x)
xg = self.global_attn(x)
return x * (xl + xg).sigmoid()
def get_attn(attn_type):
"""Get attention by name
Args:
attn_type (Uniont[str, None]): Attention type. Supported:
`se` - Squeeze and Excitation
`eca` - Efficient Channel Attention
`sse` - Spatial Excitation
`scse` - Spatial and Channel ‘Squeeze & Excitation’
None - no attention
"""
ATT_TO_MODULE = {
"se": SEModule,
"eca": ECAModule,
"eca": ECAModule,
"eca9": partial(ECAModule, kernel_size=9),
"sse": SSEModule,
"scse": SCSEModule,
"se-var3": SEVar3,
"ms-cam": MSCAMModule,
"fca": FCAAttn,
"fca-eca": FCA_ECA_Attn,
"xca": XCA,
"esa": ESA,
}
if attn_type is None:
return nn.Identity
else:
return ATT_TO_MODULE[attn_type.lower()]
class DepthwiseSeparableConv(nn.Sequential):
"""Depthwise separable conv with BN after depthwise & pointwise."""
def __init__(self, in_channels, out_channels, stride=1, dilation=1, norm_layer=ABN, norm_act="relu", use_norm=True):
modules = [
conv3x3(in_channels, in_channels, stride=stride, groups=in_channels, dilation=dilation),
# Do we need normalization here? If yes why? If no why?
# bias is needed for EffDet because in head conv is separated from normalization
conv1x1(in_channels, out_channels, bias=not use_norm),
norm_layer(out_channels, activation=norm_act) if use_norm else nn.Identity(),
]
super().__init__(*modules)
class InvertedResidual(nn.Module):
def __init__(
self,
in_channels,
out_channels,
dw_kernel_size=3,
stride=1,
dilation=1,
attn_type=None,
expand_ratio=1.0, # expansion
keep_prob=1, # drop connect param
noskip=False,
norm_layer=ABN,
norm_act="relu",
):
super().__init__()
mid_chs = make_divisible(in_channels * expand_ratio)
self.has_residual = (in_channels == out_channels and stride == 1) and not noskip
self.has_expansion = expand_ratio != 1
if self.has_expansion:
self.conv_pw = conv1x1(in_channels, mid_chs)
self.bn1 = norm_layer(mid_chs, activation=norm_act)
self.conv_dw = nn.Conv2d(
mid_chs,
mid_chs,
dw_kernel_size,
stride=stride,
groups=mid_chs,
dilation=dilation,
bias=False,
padding=dilation * (dw_kernel_size - 1) // 2,
)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
# some models like MobileNet use mid_chs here instead of in_channels. But I don't care for now
self.se = get_attn(attn_type)(mid_chs, in_channels // 4, norm_act)
self.conv_pw1 = conv1x1(mid_chs, out_channels)
self.bn3 = norm_layer(out_channels, activation="identity")
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
residual = x
if self.has_expansion:
x = self.conv_pw(x)
x = self.bn1(x)
x = self.conv_dw(x)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pw1(x)
x = self.bn3(x)
if self.has_residual:
x = self.drop_connect(x) + residual
return x
class DropConnect(nn.Module):
"""Randomply drops samples from input.
Implements idea close to one from https://arxiv.org/abs/1603.09382"""
def __init__(self, keep_prob):
super().__init__()
self.keep_prob = keep_prob
def forward(self, x):
if not self.training:
return x
random_tensor = self.keep_prob
random_tensor += torch.rand((x.size(0),) + (1,) * (x.dim() - 1), dtype=x.dtype, device=x.device)
binary_tensor = torch.floor(random_tensor)
output = x / self.keep_prob * binary_tensor
return output
def extra_repr(self):
return f"keep_prob={self.keep_prob:.2f}"
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
attn_type=None,
dilation=1,
norm_layer=ABN,
norm_act="relu",
antialias=False,
keep_prob=1,
):
super(BasicBlock, self).__init__()
antialias = antialias and stride == 2
assert groups == 1, "BasicBlock only supports groups of 1"
assert base_width == 64, "BasicBlock doest not support changing base width"
outplanes = planes * self.expansion
conv1_stride = 1 if antialias else stride
self.conv1 = conv3x3(inplanes, planes, conv1_stride, groups, dilation)
self.bn1 = norm_layer(planes, activation=norm_act)
self.conv2 = conv3x3(planes, outplanes)
self.bn2 = norm_layer(outplanes, activation="identity")
self.se_module = get_attn(attn_type)(outplanes, planes // 4)
self.final_act = activation_from_name(norm_act)
self.downsample = downsample
self.blurpool = BlurPool(channels=planes) if antialias else nn.Identity()
self.antialias = antialias
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
residual = x
if self.downsample is not None:
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
# Conv(s=2)->BN->Relu(s=1) => Conv(s=1)->BN->Relu(s=1)->BlurPool(s=2)
if self.antialias:
out = self.blurpool(out)
out = self.conv2(out)
# avoid 2 inplace ops by chaining into one long op. Needed for inplaceabn
out = self.drop_connect(self.se_module(self.bn2(out))) + residual
return self.final_act(out)
# This class is from torchvision with many (many) modifications
# it's not very intuitive. Check this article if you want to understand the code more
# https://medium.com/@erikgaas/resnet-torchvision-bottlenecks-and-layers-not-as-they-seem-145620f93096
class Bottleneck(nn.Module):
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
attn_type=None,
dilation=1,
norm_layer=ABN,
norm_act="relu",
antialias=False,
keep_prob=1, # for drop connect
):
super(Bottleneck, self).__init__()
antialias = antialias and stride == 2
width = int(math.floor(planes * (base_width / 64)) * groups)
outplanes = planes * self.expansion
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width, activation=norm_act)
conv2_stride = 1 if antialias else stride
self.conv2 = conv3x3(width, width, conv2_stride, groups, dilation)
self.bn2 = norm_layer(width, activation=norm_act)
self.conv3 = conv1x1(width, outplanes)
self.bn3 = norm_layer(outplanes, activation="identity")
self.se_module = get_attn(attn_type)(outplanes, planes // 4)
self.final_act = activation_from_name(norm_act)
self.downsample = downsample
self.blurpool = BlurPool(channels=width) if antialias else nn.Identity()
self.antialias = antialias
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
residual = x
if self.downsample is not None:
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
# Conv(s=2)->BN->Relu(s=1) => Conv(s=1)->BN->Relu(s=1)->BlurPool(s=2)
out = self.conv2(out)
out = self.bn2(out)
if self.antialias:
out = self.blurpool(out)
out = self.conv3(out)
# avoid 2 inplace ops by chaining into one long op
out = self.drop_connect(self.se_module(self.bn3(out))) + residual
return self.final_act(out)
# TResnet models use slightly modified versions of BasicBlock and Bottleneck
# need to adjust for it
class TBasicBlock(BasicBlock):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.final_act = nn.ReLU(inplace=True)
self.bn1.activation_param = 1e-3 # needed for loading weights
if not kwargs.get("attn_type") == "se":
return
planes = kwargs["planes"]
self.se_module = SEModule(planes, max(planes // 4, 64))
class TBottleneck(Bottleneck):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.final_act = nn.ReLU(inplace=True)
self.bn1.activation_param = 1e-3 # needed for loading weights
self.bn2.activation_param = 1e-3
if not kwargs.get("attn_type") == "se":
return
planes = kwargs["planes"]
reduce_planes = max(planes * self.expansion // 8, 64)
self.se_module = SEModule(planes, reduce_planes)
# use se after 2nd conv instead of 3rd
def forward(self, x):
residual = x
if self.downsample is not None:
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
# Conv(s=2)->BN->Relu(s=1) => Conv(s=1)->BN->Relu(s=1)->BlurPool(s=2)
out = self.conv2(out)
out = self.bn2(out)
if self.antialias:
out = self.blurpool(out)
out = self.se_module(out)
out = self.conv3(out)
# avoid 2 inplace ops by chaining into one long op
out = self.drop_connect(self.bn3(out)) + residual
return self.final_act(out)
## DarkNet blocks
class DarkBasicBlock(nn.Module):
"""Basic Block for DarkNet family models"""
def __init__(
self,
in_channels,
out_channels,
bottle_ratio=0.5,
attn_type=None,
norm_layer=ABN,
norm_act="leaky_relu",
keep_prob=1,
):
super().__init__()
mid_channels = int(in_channels * bottle_ratio)
self.bn1 = norm_layer(mid_channels, activation=norm_act)
self.conv1 = conv1x1(in_channels, mid_channels)
self.bn2 = norm_layer(out_channels, activation=norm_act)
self.conv2 = conv3x3(mid_channels, out_channels, groups=32)
# In original DarkNet they have activation after second BN but the most recent papers
# (Mobilenet v2 for example) show that it is better to use linear here
# out_channels // 4 is for SE attention. other attentions don't use second parameter
self.attention = get_attn(attn_type)(out_channels, out_channels // 4)
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
# preAct
out = self.bn1(x)
out = self.conv1(x)
out = self.bn2(out)
out = self.conv2(out)
# out = self.bn3(out)
# out = self.conv3(out)
out = self.drop_connect(self.attention(out)) + x
return out
class CSPDarkBasicBlock(nn.Module):
"""Idea from https://github.com/WongKinYiu/CrossStagePartialNetworks
But implementaion is different. This block divides input into two passes only one part through bottleneck
"""
def __init__(
self,
in_channels,
out_channels,
attn_type=None,
norm_layer=ABN,
norm_act="leaky_relu",
keep_prob=1,
):
super().__init__()
mid_channels = int(in_channels * bottle_ratio)
self.conv1 = conv1x1(in_channels, mid_channels)
self.bn1 = norm_layer(mid_channels, activation=norm_act)
self.conv2 = conv3x3(mid_channels, out_channels)
# In original DarkNet they have activation after second BN but the most recent papers
# (Mobilenet v2 for example) show that it is better to use linear here
self.bn2 = norm_layer(out_channels, activation="identity")
# out_channels // 4 is for SE attention. other attentions don't use second parameter
self.attention = get_attn(attn_type)(out_channels, out_channels // 4)
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
x1, x2 = torch.chunk(x, chunks=2, dim=1)
out = self.conv1(x)
out = self.bn1(out)
out = self.conv2(out)
# avoid 2 inplace ops by chaining into one long op. Needed for inplaceabn
out = self.drop_connect(self.attention(self.bn2(out))) + x
return out
class SimpleBottleneck(nn.Module):
"""Simple Bottleneck without downsample support"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
stride=1,
# attn_type=None,
groups=1,
groups_width=None,
no_groups_with_stride=False,
norm_layer=ABN,
norm_act="relu",
keep_prob=1, # for drop connect
final_act=False, # add activation after summation with residual
):
super().__init__()
groups = mid_chs // groups_width if groups_width else groups
if no_groups_with_stride and stride == 2:
groups = 1 # no groups in first block in stage. helps to avoid representational bottleneck
self.conv1 = conv1x1(in_chs, mid_chs)
self.bn1 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, mid_chs, stride=stride, groups=groups)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
self.conv3 = conv1x1(mid_chs, out_chs)
self.bn3 = norm_layer(out_chs, activation="identity")
self.has_residual = in_chs == out_chs and stride == 1
self.final_act = activation_from_name(norm_act) if final_act else nn.Identity()
# self.se_module = get_attn(attn_type)(outplanes, planes // 4)
# self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.conv3(out)
# avoid 2 inplace ops by chaining into one long op
if self.has_residual:
out = self.bn3(out) + x
else:
out = self.bn3(out)
out = self.final_act(out) # optional last activation
return out
class SimpleBasicBlock(nn.Module):
"""Simple Bottleneck without downsample support"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
stride=1,
# attn_type=None,
groups=1,
groups_width=None,
norm_layer=ABN,
norm_act="relu",
keep_prob=1, # for drop connect
dim_reduction="stride -> expand", # "expand -> stride", "stride & expand"
final_act=False, # add activation after summation with residual
):
super().__init__()
groups = in_chs // groups_width if groups_width else groups
if dim_reduction == "expand -> stride":
self.conv1 = conv3x3(in_chs, mid_chs)
self.bn1 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, out_chs, stride=stride)
elif dim_reduction == "stride -> expand":
# it's ~20% faster to have stride first. maybe accuracy drop isn't that big
# TODO: test MixConv type of block here. I expect it to have the same speed and N params
# while performance should increase
self.conv1 = conv3x3(in_chs, in_chs, stride=stride)
self.bn1 = norm_layer(in_chs, activation=norm_act)
self.conv2 = conv3x3(in_chs, out_chs)
elif dim_reduction == "stride & expand":
self.conv1 = conv3x3(in_chs, mid_chs, stride=stride)
self.bn1 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(out_chs, out_chs)
else:
raise ValueError(f"{dim_reduction} is not valid dim reduction in BasicBlock")
self.bn3 = norm_layer(out_chs, activation="identity")
self.has_residual = in_chs == out_chs and stride == 1
self.final_act = activation_from_name(norm_act) if final_act else nn.Identity()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.conv2(out)
# avoid 2 inplace ops by chaining into one long op
if self.has_residual:
out = self.bn3(out) + x
else:
out = self.bn3(out)
out = self.final_act(out) # optional last activation
return out
class SimplePreActBottleneck(nn.Module):
"""Simple Bottleneck with preactivation"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
stride=1,
groups=1,
groups_width=None,
norm_layer=ABN,
norm_act="relu",
force_residual=False, # force residual in stride=2 blocks
# keep_prob=1, # for drop connect
):
super().__init__()
groups = mid_chs // groups_width if groups_width else groups
self.bn1 = norm_layer(in_chs, activation=norm_act)
self.conv1 = conv1x1(in_chs, mid_chs)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, mid_chs, stride=stride, groups=groups)
self.bn3 = norm_layer(mid_chs, activation=norm_act)
# last conv is not followed by bn, but anyway bias here makes it slightly worse (on Imagenet)
self.conv3 = conv1x1(mid_chs, out_chs)
self.has_residual = in_chs == out_chs and stride == 1
self.force_residual = force_residual
if force_residual:
self.blurpool = BlurPool(channels=in_chs)
self.in_chs = in_chs
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.conv3(out)
if self.has_residual:
out += x
elif self.force_residual: # forces partial residual for stride=2 block
out[:, : self.in_chs] += self.blurpool(x)
return out
class MixConv(nn.Module):
def __init__(self, in_chs, out_chs):
super().__init__()
in_chs_4 = in_chs // 4
self.conv1x1 = nn.Sequential(
pt.modules.BlurPool(in_chs // 4), pt.modules.residual.conv1x1(in_chs // 4, out_chs // 4)
)
self.conv3x3 = nn.Conv2d(in_chs // 4, out_chs // 4, kernel_size=3, stride=2, padding=1, bias=False)
self.conv5x5 = nn.Conv2d(in_chs // 4, out_chs // 4, kernel_size=5, stride=2, padding=2, bias=False)
self.conv7x7 = nn.Conv2d(in_chs // 4, out_chs // 4, kernel_size=7, stride=2, padding=3, bias=False)
def forward(self, x):
x_0, x_1, x_2, x_3 = x.chunk(4, dim=1)
return torch.cat([self.conv1x1(x_0), self.conv3x3(x_1), self.conv5x5(x_2), self.conv7x7(x_3)], dim=1)
class SimplePreActBasicBlock(nn.Module):
"""Simple BasicBlock with preactivatoin & without downsample support"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
stride=1,
groups=1,
groups_width=None,
norm_layer=ABN,
norm_act="relu",
keep_prob=1, # for drop connect
dim_reduction="stride & expand", # "expand -> stride", "stride & expand"
force_residual=False, # always have residual
):
super().__init__()
self.has_residual = in_chs == out_chs and stride == 1
self.force_residual = force_residual
if force_residual:
self.blurpool = BlurPool(channels=in_chs)
self.in_chs = in_chs
groups = in_chs // groups_width if groups_width else groups
if dim_reduction == "expand -> stride":
self.bn1 = norm_layer(in_chs, activation=norm_act)
self.conv1 = conv3x3(in_chs, mid_chs)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, out_chs, stride=stride)
elif dim_reduction == "s2d":
if stride == 2:
# BN before S2D to make sure different pixels from one channel are normalized the same way
self.bn1 = nn.Sequential(norm_layer(in_chs, activation=norm_act), SpaceToDepth(block_size=2))
self.conv1 = conv3x3(in_chs * 4, mid_chs)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, out_chs)
else: # same as stride & expand
self.bn1 = norm_layer(in_chs, activation=norm_act)
self.conv1 = conv3x3(in_chs, mid_chs, stride=stride)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, out_chs)
# elif dim_reduction == "stride -> expand":
# # it's ~20% faster to have stride first. maybe accuracy drop isn't that big
# # TODO: test MixConv type of block here. I expect it to have the same speed and N params
# # while performance should increase
# self.conv1 = conv3x3(in_chs, in_chs, stride=stride)
# self.bn1 = norm_layer(in_chs, activation=norm_act)
# self.conv2 = conv3x3(in_chs, out_chs)
elif dim_reduction == "stride & expand": # only this one is supported for now
self.bn1 = norm_layer(in_chs, activation=norm_act)
self.conv1 = conv3x3(in_chs, mid_chs, stride=stride)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, out_chs)
elif dim_reduction == "mixconv stride & expand":
self.bn1 = norm_layer(in_chs, activation=norm_act)
self.conv1 = conv3x3(in_chs, mid_chs, stride=stride)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
self.conv2 = conv3x3(mid_chs, out_chs)
else:
raise ValueError(f"{dim_reduction} is not valid dim reduction in PreAct BasicBlock")
def forward(self, x):
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.conv2(out)
# avoid 2 inplace ops by chaining into one long op
if self.has_residual:
out += x
elif self.force_residual: # forces partial residual for stride=2 block
out[:, : self.in_chs] += self.blurpool(x)
return out
class SimplePreActRes2BasicBlock(nn.Module):
"""Building block based on BasicBlock with:
preactivatoin
without downsample support
with Res2Net inspired chunking
"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
stride=1,
norm_layer=ABN,
norm_act="relu",
keep_prob=1, # for drop connect
antialias=False,
):
super().__init__()
self.has_residual = in_chs == out_chs and stride == 1
self.stride = stride
if self.stride == 2: # only use Res2Net for stride == 1
self.blocks = nn.Sequential(
norm_layer(in_chs, activation=norm_act),
conv3x3(in_chs, mid_chs, stride=1 if antialias else 2),
BlurPool(channels=mid_chs) if antialias else nn.Identity(),
norm_layer(mid_chs, activation=norm_act),
conv3x3(mid_chs, out_chs),
)
else:
self.bn1 = norm_layer(in_chs, activation=norm_act)
self.block_1 = nn.Sequential(
conv3x3(in_chs // 4, in_chs // 4),
norm_layer(in_chs // 4, activation=norm_act),
)
self.block_2 = nn.Sequential(
conv3x3(in_chs // 4, in_chs // 4),
norm_layer(in_chs // 4, activation=norm_act),
)
self.block_3 = nn.Sequential(
conv3x3(in_chs // 4, in_chs // 4),
norm_layer(in_chs // 4, activation=norm_act),
)
self.last_conv = conv3x3(in_chs, out_chs) # expand in last conv in block
def forward(self, x):
if self.stride == 2:
return self.blocks(x)
# split in 4
x_out0, x_inp1, x_inp2, x_inp3 = self.bn1(x).chunk(4, dim=1)
x_out1 = self.block_1(x_inp1)
x_out2 = self.block_2(x_inp2 + x_out1)
x_out3 = self.block_3(x_inp3 + x_out2)
out = torch.cat([x_out0, x_out1, x_out2, x_out3], dim=1)
out = self.last_conv(out) # always has residual
if self.has_residual:
out += x
return out
class SimpleInvertedResidual(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
attn_type=None,
keep_prob=1, # drop connect param
norm_layer=ABN,
norm_act="relu",
final_act=False, # add activation after summation with residual
):
super().__init__()
self.has_residual = in_chs == out_chs and stride == 1
if in_chs != mid_chs:
self.expansion = nn.Sequential(conv1x1(in_chs, mid_chs), norm_layer(mid_chs, activation=norm_act))
else:
self.expansion = nn.Identity()
self.conv_dw = nn.Conv2d(
mid_chs,
mid_chs,
dw_kernel_size,
stride=stride,
groups=mid_chs,
bias=False,
padding=dw_kernel_size // 2,
)
self.bn2 = norm_layer(mid_chs, activation=norm_act)
# some models like MobileNet use mid_chs here instead of in_channels. But I don't care for now
self.se = get_attn(attn_type)(mid_chs, in_chs // 4, norm_act)
self.conv_pw1 = conv1x1(mid_chs, out_chs)
self.bn3 = norm_layer(out_chs, activation="identity")
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
self.final_act = activation_from_name(norm_act) if final_act else nn.Identity()
def forward(self, x):
residual = x
x = self.expansion(x)
x = self.conv_dw(x)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pw1(x)
x = self.bn3(x)
if self.has_residual:
x = self.drop_connect(x) + residual
x = self.final_act(x) # optional last activation
return x
class SimplePreActInvertedResidual(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
dw_str2_kernel_size=3,
stride=1,
attn_type=None,
keep_prob=1, # drop connect param
norm_layer=ABN,
norm_act="relu",
force_residual=False,
force_expansion=False, # always have expansion
):
super().__init__()
self.has_residual = in_chs == out_chs and stride == 1
self.force_residual = force_residual
if force_residual:
self.blurpool = BlurPool(channels=in_chs) if stride == 2 else nn.Identity()
self.in_chs = in_chs
if in_chs != mid_chs or force_expansion:
self.expansion = nn.Sequential(norm_layer(in_chs, activation=norm_act), conv1x1(in_chs, mid_chs))
else:
self.expansion = nn.Identity()
self.bn2 = norm_layer(mid_chs, activation=norm_act)
dw_kernel_size = dw_str2_kernel_size if stride == 2 else dw_kernel_size
self.conv_dw = nn.Conv2d(
mid_chs,
mid_chs,
dw_kernel_size,
stride=stride,
groups=mid_chs,
bias=False,
padding=dw_kernel_size // 2,
)
# some models like MobileNet use mid_chs here instead of in_channels. But I don't care for now
self.se = get_attn(attn_type)(mid_chs, in_chs // 4, norm_act)
self.bn3 = norm_layer(mid_chs, activation=norm_act) # Note it's NOT identity for PreAct
self.conv_pw1 = conv1x1(mid_chs, out_chs)
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
out = self.expansion(x)
out = self.bn2(out)
out = self.conv_dw(out)
# x = self.se(x)
out = self.bn3(out)
out = self.conv_pw1(out)
if self.has_residual:
out = self.drop_connect(out) + x
elif self.force_residual: # forces partial residual for stride=2 block
out[:, : self.in_chs] += self.blurpool(x)
return out
class PreBlock_2(nn.Module):
"""
pw -> pw -> dw
Always has at least partial residual
"""
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
dw_str2_kernel_size=3,
stride=1,
attn_type=None,
keep_prob=1, # drop connect param
norm_layer=ABN,
norm_act="relu",
):
super().__init__()
self.blurpool = BlurPool(channels=in_chs) if stride == 2 else nn.Identity()
self.in_chs = in_chs
self.pw1 = nn.Sequential(norm_layer(in_chs, activation=norm_act), conv1x1(in_chs, mid_chs))
self.pw2 = nn.Sequential(norm_layer(mid_chs, activation=norm_act), conv1x1(mid_chs, out_chs))
dw_kernel_size = dw_str2_kernel_size if stride == 2 else dw_kernel_size
conv_dw = nn.Conv2d(
out_chs,
out_chs,
dw_kernel_size,
stride=stride,
groups=out_chs,
bias=False,
padding=dw_kernel_size // 2,
)
self.dw = nn.Sequential(norm_layer(out_chs, activation=norm_act), conv_dw)
self.se = get_attn(attn_type)(mid_chs, in_chs // 4, norm_act)
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
out = self.pw1(x)
out = self.pw2(out)
out = self.dw(out)
out[:, : self.in_chs] += self.blurpool(x)
return out
class SimpleSeparable_2(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
attn_type=None,
keep_prob=1, # drop connect param
norm_layer=ABN,
norm_act="relu",
):
super().__init__()
# actially we can have parial residual even when in_chs != out_chs
self.has_residual = in_chs == out_chs and stride == 1
self.sep_convs = nn.Sequential(
DepthwiseSeparableConv(in_chs, out_chs, stride=stride, norm_layer=norm_layer, norm_act=norm_act),
DepthwiseSeparableConv(out_chs, out_chs, norm_layer=norm_layer, norm_act="identity"),
)
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
# x = self.se(x) # maybe attention at the beginning would work better?
# the idea is: it would allow to accentuate what features to process in this block
out = self.sep_convs(x)
if self.has_residual:
out = self.drop_connect(out) + x
return out
class SimplePreActSeparable_2(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
attn_type=None,
keep_prob=1, # drop connect param
norm_layer=ABN,
norm_act="relu",
dim_reduction=None,
):
super().__init__()
# actially we can have parial residual even when in_chs != out_chs
self.has_residual = in_chs == out_chs and stride == 1
if dim_reduction == "s2d_full" and stride == 2: # redesign reduction
self.blocks = nn.Sequential(
# replace first DW -> PW with s2d -> full conv to lose less information
# gives very large increase in number of parameters
norm_layer(in_chs, activation=norm_act),
SpaceToDepth(block_size=2),
conv3x3(in_chs * 4, out_chs),
norm_layer(out_chs, activation=norm_act),
conv3x3(mid_chs, mid_chs, groups=mid_chs), # DW 2
norm_layer(mid_chs, activation=norm_act),
conv1x1(mid_chs, out_chs), # PW 2
)
elif dim_reduction == "s2d_dw" and stride == 2: # redesign reduction
self.blocks = nn.Sequential(
# BN before S2D to make sure different pixels from one channel are normalized the same way
# expand with s2d -> DW -> PW
norm_layer(in_chs, activation=norm_act),
SpaceToDepth(block_size=2),
conv3x3(in_chs * 4, in_chs * 4, groups=in_chs * 4), # DW 1
norm_layer(in_chs * 4, activation=norm_act),
conv1x1(in_chs * 4, out_chs), # PW 1
norm_layer(mid_chs, activation=norm_act),
conv3x3(mid_chs, mid_chs, groups=mid_chs), # DW 2
norm_layer(mid_chs, activation=norm_act),
conv1x1(mid_chs, out_chs), # PW 2
)
else:
self.blocks = nn.Sequential(
norm_layer(in_chs, activation=norm_act),
conv3x3(in_chs, in_chs, stride=stride, groups=in_chs), # DW 1
norm_layer(in_chs, activation=norm_act),
conv1x1(in_chs, mid_chs), # PW 1
norm_layer(mid_chs, activation=norm_act),
conv3x3(mid_chs, mid_chs, groups=mid_chs), # DW 2
norm_layer(mid_chs, activation=norm_act),
conv1x1(mid_chs, out_chs), # PW 2
)
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
# x = self.se(x) # maybe attention at the beginning would work better?
# the idea is: it would allow to accentuate what features to process in this block
out = self.blocks(x)
if self.has_residual:
out = self.drop_connect(out) + x
return out
class SimpleSeparable_3(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
dw_kernel_size=3,
stride=1,
attn_type=None,
keep_prob=1, # drop connect param
norm_layer=ABN,
norm_act="relu",
):
super().__init__()
# actially we can have parial residual even when in_chs != out_chs
self.has_residual = in_chs == out_chs and stride == 1
self.sep_convs = nn.Sequential(
DepthwiseSeparableConv(in_chs, out_chs, stride=stride, norm_layer=norm_layer, norm_act=norm_act),
DepthwiseSeparableConv(out_chs, out_chs, norm_layer=norm_layer, norm_act=norm_act),
DepthwiseSeparableConv(out_chs, out_chs, norm_layer=norm_layer, norm_act="identity"),
)
self.drop_connect = DropConnect(keep_prob) if keep_prob < 1 else nn.Identity()
def forward(self, x):
# x = self.se(x) # maybe attention at the beginning would work better?
# the idea is: it would allow to accentuate what features to process in this block
out = self.sep_convs(x)
if self.has_residual:
out = self.drop_connect(out) + x
return out
class SimpleStage(nn.Module):
"""One stage in DarkNet models. It consists of first transition conv (with stride == 2) and
DarkBasicBlock repeated num_blocks times
Args:
in_channels (int): input channels for this stage
out_channels (int): output channels for this stage
num_blocks (int): number of residual blocks in stage
stride (int): stride for first convolution
bottle_ratio (float): how much channels are reduced inside blocks
antialias (bool): flag to apply gaussiian smoothing before conv with stride 2
Ref: TODO: add
"""
def __init__(
self,
in_chs,
out_chs,
num_blocks,
stride=2,
bottle_ratio=1.0,
# antialias=False,
block_fn=DarkBasicBlock,
attn_type=None,
norm_layer=ABN,
norm_act="leaky_relu",
keep_prob=1,
csp_block_ratio=None, # for compatability
x2_transition=None, # for compatability
filter_steps=0,
**block_kwargs,
):
super().__init__()
if csp_block_ratio is not None:
print("Passing csp block ratio to Simple Stage")
norm_kwarg = dict(norm_layer=norm_layer, norm_act=norm_act, **block_kwargs) # this is dirty
mid_chs = max(int(out_chs * bottle_ratio), 64)
layers = [block_fn(in_chs=in_chs, mid_chs=mid_chs, out_chs=out_chs, stride=stride, **norm_kwarg)]
block_kwargs = dict(
in_chs=out_chs, mid_chs=out_chs + filter_steps, out_chs=out_chs + filter_steps, **norm_kwarg
)
for _ in range(num_blocks - 1):
layers.append(block_fn(**block_kwargs))
block_kwargs["in_chs"] += filter_steps
block_kwargs["mid_chs"] += filter_steps
block_kwargs["out_chs"] += filter_steps
self.blocks = nn.Sequential(*layers)
def forward(self, x):
return self.blocks(x)
class CrossStage(nn.Module):
def __init__(
self,
in_chs,
out_chs,
num_blocks,
stride=2,
bottle_ratio=0.5,
antialias=False,
block_fn=SimpleBottleneck,
attn_type=None,
norm_layer=ABN,
norm_act="leaky_relu",
keep_prob=1,
csp_block_ratio=0.5, # how many channels go to blocks
x2_transition=True,
**block_kwargs,
):
super().__init__()
extra_kwarg = dict(norm_layer=norm_layer, norm_act=norm_act, **block_kwargs)
self.first_layer = block_fn(in_chs=in_chs, mid_chs=out_chs, out_chs=out_chs, stride=stride, **extra_kwarg)
block_chs = int(csp_block_ratio * out_chs) # todo: maybe change to make divizable or hardcode values
extra_kwarg.update(in_chs=block_chs, mid_chs=block_chs, out_chs=block_chs)
self.blocks = nn.Sequential(*[block_fn(**extra_kwarg) for _ in range(num_blocks - 1)])
# using identity activation in transition conv. the idea is the same as in Linear Bottleneck
# maybe need to test this design choice later. maybe I can simply remove this transition
self.x2_transition = (
nn.Sequential(conv1x1(block_chs, block_chs), norm_layer(block_chs, activation="identity"))
if x2_transition
else nn.Identity()
)
self.csp_block_ratio = csp_block_ratio
def forward(self, x):
x = self.first_layer(x)
if self.csp_block_ratio == 0.5:
x1, x2 = torch.chunk(x, chunks=2, dim=1)
elif self.csp_block_ratio == 0.75:
x1, x2, x3, x4 = torch.chunk(x, chunks=4, dim=1)
x2 = torch.cat([x2, x3, x4], dim=1)
x2 = self.blocks(x2)
x2 = self.x2_transition(x2)
out = torch.cat([x1, x2], dim=1)
# no explicit transition here. first conv in the next stage would perform transition
return out
class RepVGGBlock(nn.Module):
"""
This block is inspired by [1] and [2]. The idea is to have `n_heads` parallel branches of convolutions which are then summed + residual
For performance they are implemented as one conv3x3. I removed normalization by making sure this block is variance preserving
RepVGGBlock(act=Identity)( N(0, 1)) ~= N(0, 1)
NOTE: this block requires correct initialization of conv weighs to work. Highly recommended to use with ConvWS
NOTE: for some reasons SELU activation still causes increase in variance. The possible solution is to use gain = 0.5 in ConvWS instead of 1
For inference this block could be replaced with `FusedRepVGGBlock` for significant speed-up
Args:
in_chs (int):
number of input channels
out_chs (int):
number of output channels. if out_chs > in_chs performs partial residual
n_heads (int):
number of parallel branches
act (nn.Module):
activation to use. recommended is nn.SELU
alpha (float):
parameter for skip init, balancing signal between block and residual path. output would be
x * (1 - alpha) + sum[conv3x3_i(x) * alpha], for i in range(n_heads)
trainable_alpha (bool):
if True make alpha a parameter, else it's un-trainable buffer
having trainable alpha may be beneficial. Timm repo says it could bring noticeable
slowdown for training. need to investigate
Ref:
[1] RepVGG: Making VGG-style ConvNets Great Again
[2] High-Performance Large-Scale Image Recognition Without Normalization
"""
def __init__(self, in_chs, out_chs, n_heads=2, act=nn.SELU, alpha=0.2, trainable_alpha=False):
super().__init__()
self.in_chs = in_chs
self.out_chs = out_chs
self.n_heads = n_heads
assert 0 <= alpha <= 1
self.conv = nn.Conv2d(in_chs, out_chs * n_heads, kernel_size=3, padding=1)
# it's important to carefully initialize alpha so that this block is variance preserving
branch_alpha = torch.ones(1, out_chs, 1, 1) * (alpha / n_heads) ** 0.5
# take care of extra features without residual (they appear if out_chs > in_chs)
branch_alpha[:, in_chs:] = (1 / n_heads) ** 0.5
residual_alpha = torch.tensor((1 - alpha) ** 0.5)
if trainable_alpha:
self.register_parameter("skipinit_res", nn.Parameter(residual_alpha))
self.register_parameter("skipinit_branch", nn.Parameter(branch_alpha))
else:
self.register_buffer("skipinit_res", residual_alpha)
self.register_buffer("skipinit_branch", branch_alpha)
self.activation = act()
def forward(self, x):
proj = self.conv(x)
# same as out = reduce(lambda x, y: torch.chunk(out, self.n_heads, 1)) but faster
proj = proj.view(proj.size(0), self.n_heads, proj.size(1) // self.n_heads, proj.size(2), proj.size(3))
branch_res = proj.sum(1)
res = x * self.skipinit_res
if self.in_chs == self.out_chs:
branch_res = branch_res * self.skipinit_branch
branch_res += res
else:
branch_res = branch_res * self.skipinit_branch
branch_res[:, : self.in_chs] += res
out = self.activation(branch_res)
return out
def extra_repr(self):
return f"num_heads={self.n_heads}"
class FusedRepVGGBlock(nn.Sequential):
def __init__(self, in_chs, out_chs, act=nn.SELU):
super().__init__()
self.add_module("conv", nn.Conv2d(in_chs, out_chs, kernel_size=3, padding=1))
self.add_module("activation", act())
def load_state_dict(self, state_dict):
if state_dict.get("skipinit_res", None) is None:
super().load_state_dict(state_dict)
self_w = self.conv.weight
self_b = self.conv.bias
kernel_id = torch.zeros_like(self_w)
kernel_id[: self_w.size(1), :, 1, 1] = torch.eye(self_w.size(1))
kernel_id *= state_dict["skipinit_res"]
kernel_conv = state_dict["conv.weight"].view(-1, *self_w.shape).sum(0)
bias_conv = state_dict["conv.bias"].view(-1, *self.conv.bias.shape).sum(0)
kernel_conv *= state_dict["skipinit_branch"].view(self_w.size(0), 1, 1, 1)
bias_conv *= state_dict["skipinit_branch"].flatten()
self.conv.weight.data = kernel_id + kernel_conv
self.conv.bias.data = bias_conv
# modules from XCiT paper
# XCA_Token is from timm, XCA is the same (convered by tests) but works direcly on B x C x H x W tensor
class XCA_Token(nn.Module):
"""Cross-Covariance Attention (XCA)
Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax
normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h)
"""
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
# Result of next line is (qkv, B, num (H)eads, (C')hannels per head, N)
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 4, 1)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
# Paper section 3.2 l2-Normalization and temperature scaling
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# (B, H, C', N), permute -> (B, N, H, C')
x = (attn @ v).permute(0, 3, 1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class XCA(nn.Module):
"""Cross-Covariance Attention (XCA)
Operation where the channels are updated using a weighted sum. The weights are obtained from the (softmax
normalized) Cross-covariance matrix (Q^T \\cdot K \\in d_h \\times d_h)
This could be viewed as dynamic 1x1 convolution
"""
def __init__(self, dim, num_heads=8, qkv_bias=True, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.qkv = conv1x1(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = conv1x1(dim, dim, bias=True)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, C, H, W = x.shape
# C` == channels per head, Hd == num heads
# B x C x H x W -> B x 3*C x H x W -> B x 3 x Hd x C` x H*W -> 3 x B x Hd x C` x H*W
qkv = self.qkv(x).reshape(B, 3, self.num_heads, C // self.num_heads, -1).transpose(0, 1)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy
# Paper section 3.2 l2-Normalization and temperature scaling
q = torch.nn.functional.normalize(q, dim=-1)
k = torch.nn.functional.normalize(k, dim=-1)
# -> B x Hd x C` x C`
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
# B x Hd x C` x C` @ B x Hd x C` x H*W -> B x C x H x W
x_out = (attn @ v).reshape(B, C, H, W)
x_out = self.proj(x_out)
x_out = self.proj_drop(x_out)
# in original paper there is no residual here
return x + x_out
def load_state_dict(self, state_dict):
# to allow loading from Linear layer
new_sd = {}
for k, v in state_dict.items():
if v.dim() == 2:
new_sd[k] = v[..., None, None]
else:
new_sd[k] = v
super().load_state_dict(new_sd)
class ESA(nn.Module):
"""
Efficient self-attention. Performs self-attention on channels after GAP to significantly reduce ammount of required compute
Close to SE-Var3 from ECA paper and XCA above
"""
def __init__(self, dim, num_heads=8, qkv_bias=True, use_proj=True):
super().__init__()
self.num_heads = num_heads
self.pool = FastGlobalAvgPool2d()
self.qkv = conv1x1(dim, dim * 3, bias=qkv_bias)
self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
self.proj = conv1x1(dim, dim, bias=True) if use_proj else nn.Identity()
def forward(self, x):
B, C = x.shape[:2]
# C` == channels per head, Hd == num heads
# B x C x H x W -> B x 3*C x 1 x 1 -> B x 3 x Hd x C` x 1 -> 3 x B x Hd x C` x 1
qkv = self.qkv(self.pool(x)).reshape(B, 3, self.num_heads, C // self.num_heads, -1).transpose(0, 1)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy
# Paper section 3.2 l2-Normalization and temperature scaling. BUT! scaling is for channels because we don't have enough tokens
q = torch.nn.functional.normalize(q, dim=-2)
k = torch.nn.functional.normalize(k, dim=-2)
# -> B x Hd x C` x C`
attn = (q @ k.transpose(-2, -1)) * self.temperature
attn = attn.softmax(dim=-1)
# B x Hd x C` x C` @ B x Hd x C` x 1 -> B x C x 1
x_out = (attn @ v).reshape(B, C, 1, 1)
x_out = self.proj(x_out)
return x + x_out.expand_as(x)
|
#!/usr/bin/env python2
"""
files.py: Write to filesj
"""
from __future__ import print_function
import os
import sys
import mylib
from mylib import log
def run_tests():
# type: () -> None
f = mylib.BufWriter()
for i in xrange(30):
f.write(chr(i + 65))
contents = f.getvalue()
log('Wrote %d bytes to StringIO', len(contents))
log('contents = %s ... %s', contents[:10], contents[-10:])
f2 = mylib.Stdout()
f2.write('stdout\n')
def run_benchmarks():
# type: () -> None
n = 10000
result = 0
i = 0
while i < n:
f = mylib.BufWriter()
for j in xrange(30):
f.write(chr(j + 65))
result += len(f.getvalue())
i += 1
log('Ran %d iterations', n)
log('result = %d', result)
if __name__ == '__main__':
if os.getenv('BENCHMARK'):
log('Benchmarking...')
run_benchmarks()
else:
run_tests()
|
import os
from dotenv import load_dotenv
load_dotenv()
class Config(object):
DEBUG=True
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL')
SQLALCHEMY_TRACK_MODIFICATIONS = True
SEND_FILE_MAX_AGE_DEFAULT = 0
SECRET_KEY = "secret"
EMAIL_API = os.environ.get('EMAIL_API')
FACEBOOK_OAUTH_CLIENT_ID = os.environ.get("FACEBOOK_OAUTH_CLIENT_ID")
FACEBOOK_OAUTH_CLIENT_SECRET = os.environ.get("FACEBOOK_OAUTH_CLIENT_SECRET")
|
from django.apps import AppConfig
class MusicsConfig(AppConfig):
name = 'nomadgram.musics'
|
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def isAlienSorted(self, words: List[str], order: str) -> bool:
order_map={c:i for i,c in enumerate(order)} # create a hashmap
wordIndices=[[order_map[c] for c in word] for word in words]
return all(w1<=w2 for w1,w2 in zip(wordIndices,wordIndices[1:]))
__________________________________________________________________________________________________
sample 13008 kb submission
class Solution:
def isAlienSorted(self, words, order):
"""
:type words: List[str]
:type order: str
:rtype: bool
"""
indexList = list(order)
for wordIndex in range(len(words)-1):
letterIndex = 0
while letterIndex != len(words[wordIndex]):
if letterIndex == len(words[wordIndex+1]):
return False
if words[wordIndex][letterIndex] == words[wordIndex+1][letterIndex]:
letterIndex += 1
else:
if indexList.index(words[wordIndex][letterIndex]) < indexList.index(words[wordIndex+1][letterIndex]):
break
else:
return False
return True
__________________________________________________________________________________________________
|
import unittest
import find_ele
class test_find_ele(unittest.TestCase):
def test_array_pair_sum(self):
self.assertEqual(find_ele.finder([5,5,7,7],[5,7,7]),5)
self.assertEqual(find_ele.finder([1,2,3,4,5,6,7],[3,7,2,1,4,6]),5)
self.assertEqual(find_ele.finder([9,8,7,6,5,4,3,2,1],[9,8,7,5,4,3,2,1]),6)
print("All tests passed")
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Ed Mountjoy
#
import json
from glob import glob
import gzip
def main():
# Args
out_json = 'configs/manifest.json.gz'
valid_chrom = set([str(chrom) for chrom in range(1, 23)])
method = 'conditional'
# Path patterns (server)
root = '/home/js29/genetics-finemapping'
input_pattern = root + '/tmp/filtered_input/*.json.gz'
out_path = root + '/output/study_id={0}/phenotype_id={1}/bio_feature={2}/chrom={3}'
log_path = root + '/logs/study_id={0}/phenotype_id={1}/bio_feature={2}/chrom={3}'
tmp_path = root + '/tmp/study_id={0}/phenotype_id={1}/bio_feature={2}/chrom={3}'
# In base folder rather than genenetics-finemapping for sharing with coloc pipeline
ld_ref = '/home/js29/data/ukb_v3_downsampled10k/ukb_v3_chr{chrom}.downsampled10k'
# Create manifest
manifest = []
for in_record in read_json_records(input_pattern):
# initiate output
out_record = {}
# Skip if chromosome is not valid
if not in_record['chrom'] in valid_chrom:
continue
# Add study identifier arguments
out_record['type'] = in_record.get('type')
out_record['study_id'] = in_record.get('study_id')
out_record['phenotype_id'] = in_record.get('phenotype_id', None)
out_record['bio_feature'] = in_record.get('bio_feature', None)
out_record['chrom'] = in_record.get('chrom')
# Add input files
out_record['in_pq'] = parse_input_name(in_record.get('input_name'))
out_record['in_ld'] = ld_ref
# Add output files
out_record['out_top_loci'] = out_path.format(
out_record['study_id'], out_record['phenotype_id'],
out_record['bio_feature'], out_record['chrom']
) + '/top_loci.json.gz'
out_record['out_credset'] = out_path.format(
out_record['study_id'], out_record['phenotype_id'],
out_record['bio_feature'], out_record['chrom']
) + '/credible_set.json.gz'
out_record['out_finemap'] = out_path.format(
out_record['study_id'], out_record['phenotype_id'],
out_record['bio_feature'], out_record['chrom']
) + '/finemap_snp.tsv.gz'
out_record['out_log'] = log_path.format(
out_record['study_id'], out_record['phenotype_id'],
out_record['bio_feature'], out_record['chrom']
) + '/logfile.txt'
out_record['tmpdir'] = tmp_path.format(
out_record['study_id'], out_record['phenotype_id'],
out_record['bio_feature'], out_record['chrom']
)
# Add method
out_record['method'] = method
out_record['pval_threshold'] = in_record.get('pval_threshold')
manifest.append(out_record)
# Write manifest as a json
with gzip.open(out_json, 'w') as out_h:
for record in manifest:
out_h.write((json.dumps(record) + '\n').encode())
return 0
def read_json_records(in_pattern):
''' Globs json inputs then yields all records as dicts.
Expects inputs to be gzipped.
'''
for inf in glob(in_pattern):
with gzip.open(inf, 'r') as in_h:
for in_record in in_h:
in_record = json.loads(in_record.decode().rstrip())
yield in_record
def parse_input_name(s):
''' Parses the required input name. Spark's input_file_name() returns the
nested parquet file, I need the top level parquet.
'''
# Strip nested parquet
out_s = s.split('.parquet')[0] + '.parquet'
# Stip file://
out_s = out_s.replace('file://', '')
return out_s
if __name__ == '__main__':
main()
|
#-------------------------------------------------------------------------------
# Author: Karthik Vegi
# Email: karthikvegi@outlook.com
# Python Version: 3.6
#-------------------------------------------------------------------------------
import math
from datetime import datetime
def send_to_destination(output, destination, delimiter):
destination.write(delimiter.join(output) + "\n")
def empty_fields(fields):
if any(map(lambda x: not x.strip(), fields)):
return True
def malformed_field(field, ideal_length):
if len(field) < ideal_length:
return True
def invalid_date(field, format):
try:
datetime.strptime(field, format)
except Exception as e:
return True
# Nearest-rank method percentile
def get_ordinal_rank(ord_list, percentile):
idx = int(math.ceil(percentile * 0.01 * len(ord_list)))
return (idx-1)
|
'''
Graph the functions 8n, 4nlogn, 2n^2, n^3, and 2^n using a logarithmic scale
for the x- and y-axes; that is, if the function value f (n) is y, plot this as a
point with x-coordinate at logn and y-coordinate at logy.
'''
import numpy
import matplotlib.pyplot
x , y, y1,y2,y3,y4 = [],[],[],[],[],[]
for i in range(2,20):
x.append(numpy.log(i))
y.append(3+x[i-2]) #log (8n)
y1.append(2+x[i-2]+numpy.log(x[i-2])) #log (4nlogn)
y2.append(1+2*x[i-2]) # log (2n^2)
y3.append(3*x[i-2]) #log(n^3)
y4.append(numpy.log2(x[i-2])) #log(2^n)
print(x)
#matplotlib.pyplot.plot(x,y,'ro',x,y1,'b*',x,y2,'g^',x,y3,'ks',x,y4,'m+')
matplotlib.pyplot.plot(x,y,'r',x,y1,'b',x,y2,'g',x,y3,'k',x,y4,'m')
matplotlib.pyplot.show()
|
def main():
y = 3
for x in [1, 2, 3]:
s = x + y
print(s)
y -= 1
print(s)
|
from typing import Dict
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from .conv import Conv2dRT, Conv2dLRT, Conv3dRT, Conv3dLRT
from .linear import LinearRT, LinearLRT
from .dropout import MCDropout
class MeanFieldVI(nn.Module):
def __init__(self,
net: nn.Module,
prior: Dict[str, float] = None,
posteriors: Dict[str, float] = None,
beta: float = 1.,
kl_type: str = 'reverse',
reparam: str = 'local'):
super(MeanFieldVI, self).__init__()
self.net = net
if reparam == 'local':
self._conv3d = Conv3dLRT
self._conv2d = Conv2dLRT
self._linear = LinearLRT
else:
self._conv3d = Conv3dRT
self._conv2d = Conv2dRT
self._linear = LinearRT
self._replace_deterministic_modules(self.net, prior, posteriors, kl_type)
# self.net.kl = self.kl
self.beta = torch.tensor([beta])
def forward(self, x: Tensor) -> Tensor:
return self.net(x)
@property
def kl(self) -> Tensor:
kl = 0
for layer in self.modules():
if hasattr(layer, '_kl'):
kl += layer._kl
return self.beta.to(kl.device) * kl
def _replace_deterministic_modules(self,
module: nn.Module,
prior: Dict[str, float],
posteriors: Dict[str, float],
kl_type: str):
for key, _module in module._modules.items():
if len(_module._modules):
self._replace_deterministic_modules(_module, prior, posteriors, kl_type)
else:
if isinstance(_module, nn.Linear):
layer = self._linear(
_module.in_features,
_module.out_features,
torch.is_tensor(_module.bias))
module._modules[key] = layer
elif isinstance(_module, nn.Conv2d):
layer = self._conv2d(
in_channels=_module.in_channels,
out_channels=_module.out_channels,
kernel_size=_module.kernel_size,
bias=torch.is_tensor(_module.bias),
stride=_module.stride,
padding=_module.padding,
dilation=_module.dilation,
groups=_module.groups,
prior=prior,
posteriors=posteriors,
kl_type=kl_type)
module._modules[key] = layer
elif isinstance(_module, nn.Conv3d):
layer = self._conv3d(
in_channels=_module.in_channels,
out_channels=_module.out_channels,
kernel_size=_module.kernel_size,
bias=torch.is_tensor(_module.bias),
stride=_module.stride,
padding=_module.padding,
dilation=_module.dilation,
groups=_module.groups,
prior=prior,
posteriors=posteriors,
kl_type=kl_type)
module._modules[key] = layer
class MCDropoutVI(nn.Module):
def __init__(self,
net: nn.Module,
dropout_type: str = '1d',
dropout_p: float = 0.5,
deterministic_output: bool = False,
output_dip_drop: bool = False):
super(MCDropoutVI, self).__init__()
self.net = net
self.dropout_type = dropout_type
self.dropout_p = dropout_p
self._replace_deterministic_modules(self.net)
# self.deterministic_output = deterministic_output
if deterministic_output:
self._make_last_layer_deterministic(self.net)
if not output_dip_drop:
self._dip_make_output_deterministic(self.net)
def forward(self, x: Tensor) -> Tensor:
return self.net(x)
def _replace_deterministic_modules(self, module: nn.Module):
for key, _module in module._modules.items():
if len(_module._modules):
self._replace_deterministic_modules(_module)
else:
if isinstance(_module, (nn.Linear, nn.Conv2d, nn.Conv3d)):
module._modules[key] = MCDropout(_module, self.dropout_type, self.dropout_p)
def _make_last_layer_deterministic(self, module: nn.Module):
for i, (key, layer) in enumerate(module._modules.items()):
if i == len(module._modules) - 1:
if isinstance(layer, MCDropout):
module._modules[key] = layer.layer
elif len(layer._modules):
self._make_last_layer_deterministic(layer)
def _dip_make_output_deterministic(self, module: nn.Module):
for i, (key, layer) in enumerate(module._modules.items()):
if type(layer) == nn.Sequential:
for name, m in layer._modules.items():
if type(m) == MCDropout:
layer._modules[name] = m.layer
|
from pyramid.view import (
notfound_view_config,
exception_view_config,
forbidden_view_config,
)
from pyramid.httpexceptions import (
HTTPServerError,
HTTPBadRequest,
HTTPUnauthorized,
)
from ..services.encoding import encode_error_message
@notfound_view_config(renderer="json")
def notfound_view(message, request):
request.response.status = 404
return encode_error_message(request.response.status_int, message)
@exception_view_config(HTTPServerError, renderer="json")
def client_error_view(message, request):
request.response.status = 500
return encode_error_message(request.response.status_int, message)
@exception_view_config(HTTPBadRequest, renderer="json")
def exc_bad_request_view(message, request):
request.response.status = 400
return encode_error_message(request.response.status_int, message)
@forbidden_view_config(renderer="json")
def forbidden_view(message, request):
request.response.status = 403
return encode_error_message(request.response.status_int, message)
|
import csv
list_workclass = ['Private', 'Self-emp-not-inc', 'Self-emp-inc', 'Federal-gov', 'Local-gov', 'State-gov',
'Without-pay', 'Never-worked']
list_education = ['Bachelors', 'Some-college', '11th', 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', '9th',
'7th-8th', '12th', 'Masters', '1st-4th', '10th', 'Doctorate', '5th-6th', 'Preschool']
list_marital_status = ['Married-civ-spouse', 'Divorced', 'Never-married', 'Separated', 'Widowed',
'Married-spouse-absent', 'Married-AF-spouse']
list_occupation = ['Tech-support', 'Craft-repair', 'Other-service', 'Sales', 'Exec-managerial', 'Prof-specialty',
'Handlers-cleaners', 'Machine-op-inspct', 'Adm-clerical', 'Farming-fishing', 'Transport-moving',
'Priv-house-serv', 'Protective-serv', 'Armed-Forces']
list_relationship = ['Wife', 'Own-child', 'Husband', 'Not-in-family', 'Other-relative', 'Unmarried']
list_race = ['White', 'Asian-Pac-Islander', 'Amer-Indian-Eskimo', 'Other', 'Black']
list_sex = ['Female', 'Male']
list_native_country = ['United-States', 'Cambodia', 'England', 'Puerto-Rico', 'Canada', 'Germany',
'Outlying-US(Guam-USVI-etc)', 'India', 'Japan', 'Greece', 'South', 'China', 'Cuba', 'Iran',
'Honduras', 'Philippines', 'Italy', 'Poland', 'Jamaica', 'Vietnam', 'Mexico', 'Portugal',
'Ireland', 'France', 'Dominican-Republic', 'Laos', 'Ecuador', 'Taiwan', 'Haiti', 'Columbia',
'Hungary', 'Guatemala', 'Nicaragua', 'Scotland', 'Thailand', 'Yugoslavia', 'El-Salvador',
'Trinadad&Tobago', 'Peru', 'Hong', 'Holand-Netherlands']
dict_workclass = dict(zip(list_workclass, range(0, len(list_workclass))))
dict_education = dict(zip(list_education, range(0, len(list_education))))
dict_marital_status = dict(zip(list_marital_status, range(0, len(list_marital_status))))
dict_occupation = dict(zip(list_occupation, range(0, len(list_occupation))))
dict_relationship = dict(zip(list_relationship, range(0, len(list_relationship))))
dict_race = dict(zip(list_race, range(0, len(list_race))))
dict_sex = dict(zip(list_sex, range(0, len(list_sex))))
dict_native_country = dict(zip(list_native_country, range(0, len(list_native_country))))
dict_is_earning_more_than_50 = {
"<=50K": 0,
">50K": 1
}
if __name__ == "__main__":
data_without_string = []
with open("adult.data", 'r', newline='') as csvfile:
filereaderData = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(filereaderData):
for j in range(0, len(row)): row[j] = row[j].lstrip()
data_without_string.append([row[0], dict_workclass.get(row[1], "null"), row[2],
dict_education.get(row[3], "null"), dict_marital_status.get(row[5], "null"),
dict_occupation.get(row[6], "null"), dict_relationship.get(row[7], "null"),
dict_race.get(row[8], "null"), dict_sex.get(row[9], "null"), row[10], row[11],
row[12], dict_native_country.get(row[13], "null"),
dict_is_earning_more_than_50.get(row[14], "null")])
print(i)
'''
data_without_string[i][0] = row[0]
data_without_string[i][1] = dict_workclass(row[1])
data_without_string[i][2] = row[2]
data_without_string[i][3] = dict_education(row[3])
# La colonne 4 contient "education-num" que l'on remplace à l'aide de dict_education
data_without_string[i][4] = dict_marital_status(row[5])
data_without_string[i][5] = dict_occupation(row[6])
data_without_string[i][6] = dict_relationship(row[7])
data_without_string[i][7] = dict_race(row[8])
data_without_string[i][8] = dict_sex(row[9])
data_without_string[i][9] = row[10]
data_without_string[i][10] = row[11]
data_without_string[i][11] = row[12]
data_without_string[i][12] = dict_native_country(row[13])
data_without_string[i][13] = dict_is_earning_more_than_50(row[14])
'''
filename = 'adult_processed_data.data'
header = ['age', 'workclass', 'fnlwgt', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'is-upper-than-50k']
with open(filename, 'w', newline='') as file:
csvwriter = csv.writer(file)
csvwriter.writerow(header)
csvwriter.writerows(data_without_string)
|
from ariadne.objects import MutationType
from chowkidar.utils import AuthError
from social_core.exceptions import MissingBackend
from social_django.views import _do_login
from social_django.utils import load_backend, load_strategy
social_auth_mutations = MutationType()
@social_auth_mutations.field('socialAuth')
def authenticate_using_social_auth(_, info, accessToken, provider):
try:
strategy = load_strategy(info.context.request)
backend = load_backend(strategy, provider, redirect_uri=None)
except MissingBackend:
raise AuthError('Auth Provider Not Supported', code='INVALID_PROVIDER')
user = backend.do_auth(accessToken, user=None)
_do_login(backend, user, user.social_user)
return {"success": True, "user": user.__dict__}
__all__ = [
'social_auth_mutations'
]
|
#!/usr/bin/env python
# coding: utf-8
import html
import os
import re
import pandas as pd
import requests
from prettyprinter import cpprint
target_url = "http://scp-jp.wikidot.com/guide-hub"
start_word = '<h1 id="toc0"><span>先ずはこれを読んでください</span></h1>'
end_word = '<div class="footnotes-footer">'
def guide_hub():
response = requests.get(target_url)
if response.status_code is not requests.codes.ok:
print(f"\trequest err : {response.status_code}")
masterpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
lines = response.text.split("\n")
start = lines.index(start_word)
df = pd.DataFrame(columns=['url', 'title', 'description'])
urls = []
titles = []
descriptions = []
for line in lines[start:]:
line = html.unescape(line)
if end_word in line:
break
if 'href' in line:
sp_line = re.split(r'[<>]', line)
# 要改善
for i, sp in enumerate(sp_line):
if 'href' in sp:
if 'newpage' in sp_line[i]:
url = sp_line[i].replace(
'a class="newpage" href=', "").replace(
'"', "")
else:
url = sp_line[i].replace(
'a href=', "").replace(
'"', "")
urls.append(url)
titles.append(sp_line[i + 1])
descriptions.append(sp_line[i + 5].replace(": ", ''))
break
df['url'] = urls
df['title'] = titles
df['description'] = descriptions
df.to_csv(masterpath + "/data/guide_hub.csv", header=True, encoding="utf-8")
if __name__ == "__main__":
print("菖蒲:ガイドハブデータベースの更新を開始します。")
guide_hub()
print("菖蒲:ガイドハブデータベースの更新、完了しました。")
|
# Imports: standard library
import os
import logging
from typing import Dict, List, Tuple, Union, Optional
from collections import OrderedDict, defaultdict
# Imports: third party
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Model
# Imports: first party
from ml4c3.plots import (
plot_scatter,
subplot_rocs,
subplot_scatters,
plot_roc_per_class,
plot_confusion_matrix,
plot_feature_coefficients,
plot_prediction_calibration,
plot_precision_recall_per_class,
)
from ml4c3.models import SKLEARN_MODELS
from ml4c3.datasets import (
BATCH_IDS_INDEX,
BATCH_INPUT_INDEX,
BATCH_OUTPUT_INDEX,
get_array_from_dict_of_arrays,
get_dicts_of_arrays_from_dataset,
)
from definitions.types import Path, Paths, Inputs, Outputs, Predictions
from definitions.globals import CSV_EXT
from tensormap.TensorMap import TensorMap, find_negative_label_and_channel
def predict_and_evaluate(
model: Union[Model, SKLEARN_MODELS],
data: Union[
tf.data.Dataset,
Tuple[Inputs, Outputs],
Tuple[Inputs, Outputs, Paths],
],
tensor_maps_in: List[TensorMap],
tensor_maps_out: List[TensorMap],
plot_path: Path,
data_split: str,
image_ext: str,
save_coefficients: bool = False,
batch_size: Optional[int] = None,
save_predictions: bool = False,
top_features_to_plot: Optional[int] = None,
) -> Dict:
"""
Evaluate trained model on dataset, save plots, and return performance metrics
:param model: Model
:param data: tensorflow Dataset or tuple of inputs, outputs, and optionally paths
:param tensor_maps_in: Input maps
:param tensor_maps_out: Output maps
:param plot_path: Path to directory to save plots to
:param data_split: Name of data split
:param save_coefficients: Save model coefficients
:param batch_size: Number of samples to use in a batch, required if data is a
tuple of input and output numpy arrays
:param save_predictions: If true, save predicted and actual output values to a csv
:param top_features_to_plot: Number of features to plot in features coefficients
plot.
:return: Dictionary of performance metrics
"""
performance_metrics = {}
scatters: List[Tuple[np.ndarray, np.ndarray, str, List[str]]] = []
rocs: List[Tuple[np.ndarray, np.ndarray, Dict[str, int]]] = []
layer_names = []
if isinstance(model, Model):
layer_names = [layer.name for layer in model.layers]
for tm in tensor_maps_out:
if tm.output_name not in layer_names:
raise ValueError(
"Output tensor map name not found in layers of loaded model",
)
if (
save_coefficients
and isinstance(model, Model)
and not len(model.layers)
== len(tensor_maps_in) + len(tensor_maps_out) + 1 # concat layer
):
pass
elif save_coefficients:
if isinstance(model, Model):
coefficients = [c[0].round(3) for c in model.layers[-1].get_weights()[0]]
else:
coefficients = get_sklearn_model_coefficients(model=model)
# Get feature names from TMaps
feature_names = []
for tm in tensor_maps_in:
# Use append to add single string to list
if tm.channel_map is None:
feature_names.append(tm.name)
# Use extend to add list items to list
else:
feature_names.extend(tm.channel_map)
if len(coefficients) != len(feature_names):
raise ValueError("Number of coefficient values and names differ!")
# Create dataframe of features
df = pd.DataFrame({"feature": feature_names, "coefficient": coefficients})
df = df.iloc[(-df["coefficient"]).argsort()].reset_index(drop=True)
# Create coefficients plots
if top_features_to_plot:
plot_feature_coefficients(
plot_path=plot_path,
model_name=model.name,
feature_values=df,
top_features_to_plot=top_features_to_plot,
image_ext=image_ext,
)
# Save dataframe
fname = os.path.join(plot_path, "coefficients" + ".csv")
if not os.path.exists(os.path.dirname(fname)):
os.makedirs(os.path.dirname(fname))
df.round(3).to_csv(path_or_buf=fname, index=False)
y_predictions, output_data, data_paths = _get_predictions_from_data(
model=model,
data=data,
batch_size=batch_size,
tensor_maps_in=tensor_maps_in,
tensor_maps_out=tensor_maps_out,
)
if save_predictions:
save_data = OrderedDict()
if data_paths is not None:
save_data["patient_id"] = [
os.path.splitext(os.path.basename(p))[0] for p in data_paths
]
for y_prediction, tm in zip(y_predictions, tensor_maps_out):
if tm.axes != 1:
continue
y_actual = tm.rescale(output_data[tm.output_name])
y_prediction = tm.rescale(y_prediction)
if tm.channel_map is not None:
negative_label_idx = -1
if len(tm.channel_map) == 2:
_, negative_label_idx = find_negative_label_and_channel(
tm.channel_map,
)
for cm, idx in tm.channel_map.items():
if idx == negative_label_idx:
continue
save_data[f"{tm.name}-{cm}-truth"] = y_actual[..., idx]
save_data[f"{tm.name}-{cm}-predicted"] = y_prediction[..., idx]
else:
save_data[f"{tm.name}-truth"] = y_actual.flatten()
save_data[f"{tm.name}-predicted"] = y_prediction.flatten()
path = os.path.join(plot_path, f"predictions-{data_split}{CSV_EXT}")
pd.DataFrame(save_data).round(6).to_csv(path, index=False)
logging.info(f"Saved predictions at: {path}")
# Iterate over each output tensor map and assess performance of predictions
for y, tm in zip(y_predictions, tensor_maps_out):
if isinstance(model, Model):
if tm.output_name not in layer_names:
continue
y_truth = np.array(output_data[tm.output_name])
performance_metrics.update(
evaluate_predictions(
tm=tm,
y_predictions=y,
y_truth=y_truth,
title=tm.name,
image_ext=image_ext,
folder=plot_path,
test_paths=data_paths,
rocs=rocs,
scatters=scatters,
data_split=data_split,
),
)
if len(rocs) > 1:
subplot_rocs(
rocs=rocs,
data_split=data_split,
image_ext=image_ext,
plot_path=plot_path,
)
if len(scatters) > 1:
subplot_scatters(
scatters=scatters,
data_split=data_split,
image_ext=image_ext,
plot_path=plot_path,
)
return performance_metrics
def evaluate_predictions(
tm: TensorMap,
y_predictions: np.ndarray,
y_truth: np.ndarray,
title: str,
image_ext: str,
folder: str,
test_paths: Optional[List[str]] = None,
max_melt: int = 30000,
rocs: List[Tuple[np.ndarray, np.ndarray, Dict[str, int]]] = [],
scatters: List[Tuple[np.ndarray, np.ndarray, str, List[str]]] = [],
data_split: str = "test",
) -> Dict[str, float]:
"""Evaluate predictions for a given TensorMap with truth data and plot the
appropriate metrics. Accumulates data in the rocs and scatters lists to
facilitate subplotting.
:param tm: The TensorMap predictions to evaluate
:param y_predictions: The predictions
:param y_truth: The truth
:param title: A title for the plots
:param image_ext: File type to save images as
:param folder: The folder to save the plots at
:param test_paths: The tensor paths that were predicted
:param max_melt: For multi-dimensional prediction the maximum number of
prediction to allow in the flattened array
:param rocs: (output) List of Tuples which are inputs for ROC curve plotting to
allow subplotting downstream
:param scatters: (output) List of Tuples which are inputs for scatter plots to
allow subplotting downstream
:param data_split: The data split being evaluated (train, valid, or test)
:return: Dictionary of performance metrics with string keys for labels and float
values
"""
performance_metrics = {}
if tm.is_categorical and tm.axes == 1:
logging.info(
f"{data_split} split: {tm.name} has channel map: {tm.channel_map}"
f" with {y_predictions.shape[0]} examples.\n"
f"Sum Truth:{np.sum(y_truth, axis=0)} \nSum pred"
f" :{np.sum(y_predictions, axis=0)}",
)
performance_metrics.update(
plot_roc_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
plot_precision_recall_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
)
plot_prediction_calibration(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
)
rocs.append((y_predictions, y_truth, tm.channel_map))
# For non-binary classification tasks, plot confusion matrix
if len(tm.channel_map) > 2:
plot_confusion_matrix(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
)
elif tm.is_categorical and tm.axes == 2:
melt_shape = (
y_predictions.shape[0] * y_predictions.shape[1],
y_predictions.shape[2],
)
idx = np.random.choice(
np.arange(melt_shape[0]),
min(melt_shape[0], max_melt),
replace=False,
)
y_predictions = y_predictions.reshape(melt_shape)[idx]
y_truth = y_truth.reshape(melt_shape)[idx]
performance_metrics.update(
plot_roc_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
performance_metrics.update(
plot_precision_recall_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
plot_prediction_calibration(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
)
rocs.append((y_predictions, y_truth, tm.channel_map))
elif tm.is_categorical and tm.axes == 3:
melt_shape = (
y_predictions.shape[0] * y_predictions.shape[1] * y_predictions.shape[2],
y_predictions.shape[3],
)
idx = np.random.choice(
np.arange(melt_shape[0]),
min(melt_shape[0], max_melt),
replace=False,
)
y_predictions = y_predictions.reshape(melt_shape)[idx]
y_truth = y_truth.reshape(melt_shape)[idx]
performance_metrics.update(
plot_roc_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
performance_metrics.update(
plot_precision_recall_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
plot_prediction_calibration(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
)
rocs.append((y_predictions, y_truth, tm.channel_map))
elif tm.is_categorical and tm.axes == 4:
melt_shape = (
y_predictions.shape[0]
* y_predictions.shape[1]
* y_predictions.shape[2]
* y_predictions.shape[3],
y_predictions.shape[4],
)
idx = np.random.choice(
np.arange(melt_shape[0]),
min(melt_shape[0], max_melt),
replace=False,
)
y_predictions = y_predictions.reshape(melt_shape)[idx]
y_truth = y_truth.reshape(melt_shape)[idx]
performance_metrics.update(
plot_roc_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
performance_metrics.update(
plot_precision_recall_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
plot_prediction_calibration(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
)
rocs.append((y_predictions, y_truth, tm.channel_map))
elif tm.is_language:
performance_metrics.update(
plot_roc_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
performance_metrics.update(
plot_precision_recall_per_class(
prediction=y_predictions,
truth=y_truth,
labels=tm.channel_map,
title=title,
image_ext=image_ext,
prefix=folder,
data_split=data_split,
),
)
rocs.append((y_predictions, y_truth, tm.channel_map))
elif tm.is_continuous:
performance_metrics.update(
plot_scatter(
prediction=tm.rescale(y_predictions),
truth=tm.rescale(y_truth),
title=title,
image_ext=image_ext,
prefix=folder,
paths=test_paths,
data_split=data_split,
),
)
scatters.append(
(tm.rescale(y_predictions), tm.rescale(y_truth), title, test_paths),
)
else:
logging.warning(f"No evaluation clause for tensor map {tm.name}")
return performance_metrics
def get_sklearn_model_coefficients(model: SKLEARN_MODELS) -> np.array:
if model.name == "logreg":
return model.coef_.flatten()
if model.name == "svm":
return model.LSVC.coef_.flatten()
if model.name == "randomforest" or model.name == "xgboost":
return model.feature_importances_.flatten()
raise ValueError(f"{model.name} lacks feature coefficients or importances")
def _get_predictions_from_data(
model: Union[Model, SKLEARN_MODELS],
data: Union[
tf.data.Dataset,
Tuple[Inputs, Outputs],
Tuple[Inputs, Outputs, Paths],
],
batch_size: Optional[int],
tensor_maps_in: Optional[List[TensorMap]],
tensor_maps_out: Optional[List[TensorMap]],
) -> Tuple[Predictions, Outputs, Optional[Paths]]:
"""
Get model predictions, output data, and paths from data source. Data must not
be infinite.
:param model: Model
:param data: finite tensorflow Dataset or tuple of inputs, outputs, and
optionally paths
:param batch_size: Number of samples to use in a batch, required if data is a
tuple input and output numpy arrays
:return: Tuple of predictions as a list of numpy arrays, a dictionary of
output data, and optionally paths
"""
if isinstance(data, tuple):
if len(data) == 2:
input_data, output_data = data
paths = None
elif len(data) == 3:
input_data, output_data, paths = data
else:
raise ValueError(
f"Expected 2 or 3 elements to dataset tuple, got {len(data)}",
)
if batch_size is None:
raise ValueError(
"When providing dataset as tuple of inputs and outputs, batch_size "
"is required, got {batch_size}",
)
y_predictions = model.predict(x=input_data, batch_size=batch_size)
elif isinstance(data, tf.data.Dataset):
y_prediction_batches = defaultdict(list)
output_data_batches = defaultdict(list)
id_batches = []
if isinstance(model, Model):
for batch in data:
output_data_batch = batch[BATCH_OUTPUT_INDEX]
for output_name, output_tensor in output_data_batch.items():
output_data_batches[output_name].append(output_tensor.numpy())
batch_y_predictions = model.predict(batch[BATCH_INPUT_INDEX])
if not isinstance(batch_y_predictions, list):
batch_y_predictions = [batch_y_predictions]
for prediction_idx, batch_y_prediction in enumerate(
batch_y_predictions,
):
y_prediction_batches[prediction_idx].append(batch_y_prediction)
if len(batch) == 3:
id_batches.append(batch[BATCH_IDS_INDEX].numpy().astype(str))
y_predictions = [
np.concatenate(y_prediction_batches[prediction_idx])
for prediction_idx in sorted(y_prediction_batches)
]
elif isinstance(model, SKLEARN_MODELS.__args__):
data = get_dicts_of_arrays_from_dataset(dataset=data)
assert all(tm.axes == 1 for tm in tensor_maps_in + tensor_maps_out)
assert len(tensor_maps_out) == 1
# Isolate arrays from datasets for desired tensor maps
X = get_array_from_dict_of_arrays(
tensor_maps=tensor_maps_in,
data=data[BATCH_INPUT_INDEX],
drop_redundant_columns=False,
)
y_predictions = model.predict_proba(X)
for output_name, output_tensor in data[BATCH_OUTPUT_INDEX].items():
output_data_batches[output_name].append(output_tensor)
if len(data) == 3:
id_batches.append(data[BATCH_IDS_INDEX])
else:
raise NotImplementedError(
f"Cannot perform inference on model of type {type(model).__name}",
)
# Iterate over batches and concatenate into dict of arrays
output_data = {
output_name: np.concatenate(output_data_batches[output_name])
for output_name in output_data_batches
}
paths = None if len(id_batches) == 0 else np.concatenate(id_batches).tolist()
else:
raise NotImplementedError(
"Cannot get data for inference from data of type "
"{type(data).__name__}: {data}",
)
if not isinstance(y_predictions, list):
y_predictions = [y_predictions]
return y_predictions, output_data, paths
|
import sys, sysconfig
import copy
import numpy
from distutils.extension import Extension
from distutils.util import get_platform
from distutils.dist import Distribution
from distutils.command.install_lib import install_lib
def get_openmoc_object_name():
"""Returns the name of the main openmoc shared library object"""
ext_suffix = sysconfig.get_config_var('SOABI')
if ext_suffix is None:
filename = '_openmoc.so'
else:
filename = '_openmoc.{0}.so'.format(ext_suffix)
return filename
def get_shared_object_path():
"""Returns the name of the distutils build directory"""
install_lib_command = install_lib(Distribution())
install_lib_command.initialize_options()
install_lib_command.finalize_options()
directory = install_lib_command.build_dir
return directory
def get_openmoc():
"""Returns the path and name of the main shared library object"""
return get_shared_object_path() + '/' + get_openmoc_object_name()
class configuration:
"""User-defined build configuration options for OpenMOC
Configuration options may be set using compile time flags. To view a
list of these options, run 'python setup.py install --help' in the
console. The default configuration options are shown below and should
only be revised by developers familiar with the code and its configuration
management system.
"""
#############################################################################
# User Options
#############################################################################
# Default C++ compiler for the main openmoc module is GCC
cc = 'gcc'
# Default floating point for the main openmoc module is single precision
fp = 'single'
# Compile using ccache (for developers needing fast recompilation)
with_ccache = False
# Compile code with debug symbols (ie, -g)
debug_mode = False
# Compile code with debug symbols (ie, -g, -pg)
profile_mode = False
# Build the openmoc.cuda module
with_cuda = False
# The vector length used for the VectorizedSolver class. This will used
# as a hint for the Intel compiler to issue SIMD (ie, SSE, AVX, etc) vector
# instructions. This is accomplished by adding "dummy" energy groups such
# that the number of energy groups is be fit too a multiple of this
# vector_length, and restructuring the innermost loops in the solver to
# loop from 0 to the vector length
vector_length = 8
# The vector alignment used in the VectorizedSolver class when allocating
# aligned data structures using MM_MALLOC and MM_FREE
vector_alignment = 16
# List of C/C++/CUDA distutils.extension objects which are created based
# on which flags are specified at compile time.
extensions = list()
# List of the possible packages to install based on runtime options
packages = ['openmoc', 'openmoc.cuda', 'openmoc.compatible']
#############################################################################
# Source Code
#############################################################################
# Dictionary of source code files to compile for each extension module
sources = dict()
sources['gcc'] = ['openmoc/openmoc_wrap.cpp',
'src/Cell.cpp',
'src/Geometry.cpp',
'src/LocalCoords.cpp',
'src/log.cpp',
'src/Material.cpp',
'src/Point.cpp',
'src/PolarQuad.cpp',
'src/ExpEvaluator.cpp',
'src/Solver.cpp',
'src/CPUSolver.cpp',
'src/Surface.cpp',
'src/Timer.cpp',
'src/Track.cpp',
'src/TrackGenerator.cpp',
'src/Universe.cpp',
'src/Vector.cpp',
'src/Matrix.cpp',
'src/Cmfd.cpp',
'src/linalg.cpp']
sources['clang'] = ['openmoc/openmoc_wrap.cpp',
'src/Cell.cpp',
'src/Geometry.cpp',
'src/LocalCoords.cpp',
'src/log.cpp',
'src/Material.cpp',
'src/Point.cpp',
'src/PolarQuad.cpp',
'src/ExpEvaluator.cpp',
'src/Solver.cpp',
'src/CPUSolver.cpp',
'src/Surface.cpp',
'src/Timer.cpp',
'src/Track.cpp',
'src/TrackGenerator.cpp',
'src/Universe.cpp',
'src/Cmfd.cpp',
'src/Vector.cpp',
'src/Matrix.cpp',
'src/linalg.cpp']
sources['icpc'] = ['openmoc/openmoc_wrap.cpp',
'src/Cell.cpp',
'src/Geometry.cpp',
'src/LocalCoords.cpp',
'src/log.cpp',
'src/Material.cpp',
'src/Point.cpp',
'src/PolarQuad.cpp',
'src/ExpEvaluator.cpp',
'src/Solver.cpp',
'src/CPUSolver.cpp',
'src/VectorizedSolver.cpp',
'src/Surface.cpp',
'src/Timer.cpp',
'src/Track.cpp',
'src/TrackGenerator.cpp',
'src/Universe.cpp',
'src/Cmfd.cpp',
'src/Vector.cpp',
'src/Matrix.cpp',
'src/linalg.cpp']
sources['bgxlc'] = ['openmoc/openmoc_wrap.cpp',
'src/Cell.cpp',
'src/Geometry.cpp',
'src/LocalCoords.cpp',
'src/log.cpp',
'src/Material.cpp',
'src/Point.cpp',
'src/PolarQuad.cpp',
'src/ExpEvaluator.cpp',
'src/Solver.cpp',
'src/CPUSolver.cpp',
'src/Surface.cpp',
'src/Timer.cpp',
'src/Track.cpp',
'src/TrackGenerator.cpp',
'src/Universe.cpp',
'src/Cmfd.cpp',
'src/Vector.cpp',
'src/Matrix.cpp',
'src/linalg.cpp']
sources['nvcc'] = ['openmoc/cuda/openmoc_cuda_wrap.cpp',
'src/accel/cuda/GPUExpEvaluator.cu',
'src/accel/cuda/GPUQuery.cu',
'src/accel/cuda/clone.cu',
'src/accel/cuda/GPUSolver.cu']
#############################################################################
# Compiler Flags
#############################################################################
# A dictionary of the compiler flags to use for each compiler type
compiler_flags = dict()
compiler_flags['gcc'] = ['-c', '-O3', '-ffast-math', '-fopenmp',
'-std=c++11', '-fpic']
compiler_flags['clang'] = ['-c', '-O3', '-ffast-math', '-std=c++11',
'-fopenmp', '-fvectorize', '-fpic',
'-Qunused-arguments',
'-Wno-deprecated-register',
'-Wno-parentheses-equality']
compiler_flags['icpc'] =['-c', '-O3', '-fast', '--ccache-skip',
'-openmp', '-xhost', '-std=c++11',
'--ccache-skip', '-fpic',
'-openmp-report', '-vec-report']
compiler_flags['bgxlc'] = ['-c', '-O2', '-qarch=qp', '-qreport',
'-qsimd=auto', '-qtune=qp', '-qunroll=auto',
'-qsmp=omp', '-qpic']
compiler_flags['nvcc'] = ['--relocatable-device-code', 'true',
'-c', '-O3', '-std=c++11',
'--compiler-options', '-fpic',
'-arch=compute_20']
#############################################################################
# Linker Flags
#############################################################################
# A dictionary of the linker flags to use for each compiler type
linker_flags = dict()
if ('macosx' in get_platform()):
linker_flags['gcc'] = ['-fopenmp', '-dynamiclib', '-lpython2.7',
'-Wl,-install_name,' + get_openmoc_object_name()]
else:
linker_flags['gcc'] = ['-fopenmp', '-shared',
'-Wl,-soname,' + get_openmoc_object_name()]
if ('macosx' in get_platform()):
linker_flags['clang'] = ['-fopenmp', '-dynamiclib', '-lpython2.7',
'-Wl,-install_name,' + get_openmoc_object_name()]
else:
linker_flags['clang'] = ['-fopenmp', '-shared',
'-Wl,-soname,' + get_openmoc_object_name()]
linker_flags['icpc'] = [ '-openmp', '-shared',
'-Xlinker', '-soname=' + get_openmoc_object_name()]
linker_flags['bgxlc'] = ['-qmkshrobj', '-shared',
'-R/soft/compilers/ibmcmp-may2013/lib64/bg/bglib64',
'-Wl,-soname,' + get_openmoc_object_name()]
linker_flags['nvcc'] = ['-shared', get_openmoc()]
#############################################################################
# Shared Libraries
#############################################################################
# A dictionary of the shared libraries to use for each compiler type
shared_libraries = dict()
shared_libraries['gcc'] = ['stdc++', 'gomp', 'dl','pthread', 'm']
shared_libraries['clang'] = ['stdc++', 'gomp', 'dl','pthread', 'm']
shared_libraries['icpc'] = ['stdc++', 'iomp5', 'pthread', 'irc',
'imf','rt', 'mkl_rt','m',]
shared_libraries['bgxlc'] = ['stdc++', 'pthread', 'm', 'xlsmp', 'rt']
shared_libraries['nvcc'] = ['cudadevrt', 'cudart']
#############################################################################
# Library Directories
#############################################################################
# A dictionary of the library directories to use for each compiler type
# if not set in the LD_LIBRARY_PATH environment variable
library_directories = dict()
usr_lib = sys.exec_prefix + '/lib'
library_directories['gcc'] = [usr_lib]
library_directories['clang'] = [usr_lib]
library_directories['icpc'] = [usr_lib]
library_directories['bgxlc'] = [usr_lib]
library_directories['nvcc'] = [usr_lib, '/usr/local/cuda/lib64']
#############################################################################
# Include Directories
#############################################################################
# A dictionary of the include directories to use for each compiler type
# for header files not found from paths set in the user's environment
include_directories = dict()
include_directories['gcc'] = list()
include_directories['clang'] = list()
include_directories['icpc'] = list()
include_directories['bgxlc'] = list()
include_directories['nvcc'] = ['/usr/local/cuda/include']
###########################################################################
# SWIG Flags
###########################################################################
# A list of the flags for SWIG
swig_flags = ['-c++', '-python', '-keyword']
# Python 3 only
if sys.version_info[0] == 3:
swig_flags.append('-py3')
#############################################################################
# Macros
#############################################################################
# A dictionary of the macros to set at compile time for each compiler type
# and floating point precisin level
macros = dict()
macros['gcc'] = dict()
macros['clang'] = dict()
macros['icpc'] = dict()
macros['bgxlc'] = dict()
macros['nvcc'] = dict()
macros['gcc']['single']= [('FP_PRECISION', 'float'),
('SINGLE', None),
('GCC', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment)]
macros['clang']['single']= [('FP_PRECISION', 'float'),
('SINGLE', None),
('CLANG', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment)]
macros['icpc']['single']= [('FP_PRECISION', 'float'),
('SINGLE', None),
('ICPC', None),
('MKL_ILP64', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment)]
macros['bgxlc']['single'] = [('FP_PRECISION', 'float'),
('SINGLE', None),
('BGXLC', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment),
('CCACHE_CC', 'bgxlc++_r')]
macros['nvcc']['single'] = [('FP_PRECISION', 'float'),
('SINGLE', None),
('NVCC', None),
('CCACHE_CC', 'nvcc')]
macros['gcc']['double'] = [('FP_PRECISION', 'double'),
('DOUBLE', None),
('GCC', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment)]
macros['clang']['double'] = [('FP_PRECISION', 'double'),
('DOUBLE', None),
('CLANG', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment)]
macros['icpc']['double'] = [('FP_PRECISION', 'double'),
('DOUBLE', None),
('ICPC', None),
('MKL_ILP64', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment)]
macros['bgxlc']['double'] = [('FP_PRECISION', 'double'),
('DOUBLE', None),
('BGXLC', None),
('VEC_LENGTH', vector_length),
('VEC_ALIGNMENT', vector_alignment),
('CCACHE_CC', 'bgxlc++_r')]
macros['nvcc']['double'] = [('FP_PRECISION', 'double'),
('DOUBLE', None),
('NVCC', None),
('CCACHE_CC', 'nvcc')]
# define OPENMP and SWIG (for log output)
for compiler in macros:
for precision in macros[compiler]:
macros[compiler][precision].append(('OPENMP', None))
macros[compiler][precision].append(('SWIG', None))
def setup_extension_modules(self):
"""Sets up the C/C++/CUDA extension modules for this distribution.
Create list of extensions for Python modules within the openmoc
Python package based on the user-defined flags defined at compile time.
"""
# If the user wishes to compile using debug mode, append the debugging
# flag to all lists of compiler flags for all distribution types
if self.debug_mode:
for k in self.compiler_flags:
self.compiler_flags[k].append('-g')
# If the user wishes to compile using profile mode, append the profiling
# flag to all lists of compiler flags for all distribution types
if self.profile_mode:
for k in self.compiler_flags:
self.compiler_flags[k].append('-pg')
self.compiler_flags[k].append('-g')
# Obtain the NumPy include directory
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# Add the NumPy include directory to the include directories
# list for each type of compiler
for cc in self.include_directories.keys():
self.include_directories[cc].append(numpy_include)
# The main openmoc extension (defaults are gcc and single precision)
self.swig_flags += ['-D' + self.fp.upper()]
if self.fp == 'double':
self.swig_flags += ['-DFP_PRECISION=double']
else:
self.swig_flags += ['-DFP_PRECISION=float']
self.extensions.append(
Extension(name = '_openmoc',
sources = copy.deepcopy(self.sources[self.cc]),
library_dirs = self.library_directories[self.cc],
libraries = self.shared_libraries[self.cc],
extra_link_args = self.linker_flags[self.cc],
include_dirs = self.include_directories[self.cc],
define_macros = self.macros[self.cc][self.fp],
swig_opts = self.swig_flags + ['-D' + self.cc.upper()]))
# The openmoc.cuda extension if requested by the user at compile
# time (--with-cuda)
if self.with_cuda:
self.extensions.append(
Extension(name = '_openmoc_cuda',
sources = copy.deepcopy(self.sources['nvcc']),
library_dirs = self.library_directories['nvcc'],
libraries = self.shared_libraries['nvcc'],
extra_link_args = self.linker_flags['nvcc'],
include_dirs = self.include_directories['nvcc'],
define_macros = self.macros['nvcc'][self.fp],
swig_opts = self.swig_flags + ['-DNVCC'],
export_symbols = ['init_openmoc']))
|
# Generated by Django 2.1.7 on 2019-05-24 10:05
import datetime
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Assessment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('added', models.DateTimeField(default=datetime.datetime.now)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Case',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, default='')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Cvss',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attack_vector', models.CharField(choices=[('N', 'Network'), ('A', 'Adjacent'), ('L', 'Local'), ('P', 'Physical')], max_length=1)),
('attack_complexity', models.CharField(choices=[('L', 'Low'), ('H', 'High')], max_length=1)),
('privilege_required', models.CharField(choices=[('N', 'None'), ('L', 'Low'), ('H', 'High')], max_length=1)),
('user_interaction', models.CharField(choices=[('N', 'None'), ('R', 'Required')], max_length=1)),
('scope', models.CharField(choices=[('U', 'Unchanged'), ('C', 'Changed')], max_length=1)),
('confidentiality', models.CharField(choices=[('N', 'None'), ('L', 'Low'), ('H', 'High')], max_length=1)),
('integrity', models.CharField(choices=[('N', 'None'), ('L', 'Low'), ('H', 'High')], max_length=1)),
('availability', models.CharField(choices=[('N', 'None'), ('L', 'Low'), ('H', 'High')], max_length=1)),
('decimal_value', models.DecimalField(decimal_places=1, default=-1.0, max_digits=3)),
],
options={
'ordering': ('decimal_value',),
},
),
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('note', models.TextField(blank=True, default='')),
('asset', models.CharField(blank=True, default='', max_length=256)),
('done', models.BooleanField(default=False)),
('added', models.DateTimeField(default=datetime.datetime.now)),
('assessment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='ptart.Assessment')),
('assignee', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('title',),
},
),
migrations.CreateModel(
name='Label',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('color', models.CharField(max_length=7)),
],
options={
'ordering': ('pk',),
},
),
migrations.CreateModel(
name='Methodology',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, default='')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True, default='')),
('methodology', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='ptart.Methodology')),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('scope', models.TextField(blank=True, default='')),
('added', models.DateTimeField(default=datetime.datetime.now)),
('pentesters', models.ManyToManyField(related_name='project_pentesters', to=settings.AUTH_USER_MODEL)),
('viewers', models.ManyToManyField(related_name='project_viewers', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('name',),
},
),
migrations.CreateModel(
name='Screenshot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('screenshot', models.ImageField(upload_to='screenshots')),
],
),
migrations.CreateModel(
name='Hit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField(blank=True, default='')),
('asset', models.CharField(blank=True, default='', max_length=256)),
('added', models.DateTimeField(default=datetime.datetime.now)),
('severity', models.IntegerField(default=5, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('assessment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='ptart.Assessment')),
('cvss', models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='ptart.Cvss')),
('labels', models.ManyToManyField(to='ptart.Label')),
],
options={
'ordering': ('severity', '-cvss', 'title'),
},
),
migrations.CreateModel(
name='Template',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('severity', models.IntegerField(default=5, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(5)])),
('body', models.TextField(blank=True, default='')),
('asset', models.CharField(blank=True, default='', max_length=256)),
],
options={
'ordering': ('severity', 'name'),
},
),
migrations.AddField(
model_name='screenshot',
name='hit',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='ptart.Hit'),
),
migrations.AddField(
model_name='case',
name='module',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ptart.Module'),
),
migrations.AddField(
model_name='assessment',
name='project',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ptart.Project'),
),
]
|
print('Sequência Fibonacci')
termos = int( input('Quantos termos? ') )
seq = []
for c in range(termos):
if(c == 0 or c == 1):
seq.append(c)
else:
seq.append( seq[c-1] + seq[c-2] )
print('\n', seq)
|
from dataclasses import dataclass, field
from typing import List, Type
@dataclass
class Node:
class Meta:
name = "node"
node_or_mixed_a_or_mixed_b: List[object] = field(
default_factory=list,
metadata={
"type": "Elements",
"choices": (
{
"name": "node",
"type": Type["Node"],
},
{
"name": "mixedA",
"type": List[str],
"default_factory": list,
"tokens": True,
},
{
"name": "mixedB",
"type": List[str],
"default_factory": list,
"tokens": True,
},
),
}
)
mixed_a_attribute: List[str] = field(
default_factory=list,
metadata={
"name": "mixedA",
"type": "Attribute",
"tokens": True,
}
)
mixed_b_attribute: List[str] = field(
default_factory=list,
metadata={
"name": "mixedB",
"type": "Attribute",
"tokens": True,
}
)
@dataclass
class Doc:
class Meta:
name = "doc"
node: List[Node] = field(
default_factory=list,
metadata={
"type": "Element",
"min_occurs": 1,
}
)
|
import asyncio
from pyrogram import idle
from scp import user, bot
from scp.core.functions.plugins import (
loadUserPlugins,
loadBotPlugins,
loadPrivatePlugins,
)
from scp.utils.selfInfo import updateInfo
from scp.utils.interpreter import shell
from scp.database.Operational import InitializeDatabase
HELP_COMMANDS = {}
loop = asyncio.get_event_loop()
async def start_bot():
await bot.start()
await user.start()
await updateInfo()
await InitializeDatabase()
asyncio.create_task(shell())
await asyncio.gather(
loadBotPlugins(),
loadUserPlugins(),
loadPrivatePlugins(),
idle(),
)
if __name__ == '__main__':
loop.run_until_complete(start_bot())
|
from src.utils.variants import variants
_values_in_file = variants + ['rd_' + v for v in variants] + ['fide']
def _format_line(ranking):
rank_list = [ranking[v][0] for v in variants] + [ranking[v][1] for v in variants] + [ranking['fide']]
return ("{}," * len(_values_in_file))[:-1].format(*rank_list)
def _parse_line(line):
values = [int(v) if v != "None" else None for v in line[:-1].split(',')]
ranks = {}
for i, v in enumerate(variants):
ranks[v] = (values[i], values[i + len(variants)])
ranks['fide'] = values[2 * len(variants)]
return ranks
def save_rankings(rankings, filename):
exists = True
try:
open(filename, 'r').close()
except FileNotFoundError:
exists = False
with open(filename, 'a+') as f:
if not exists:
f.write(",".join(_values_in_file) + "\n")
for r in rankings:
f.write(_format_line(r) + "\n")
def read_rankings(filename):
ranks = []
with open(filename, 'r') as f:
f.__next__()
for line in f:
ranks.append(_parse_line(line))
return ranks
|
import unittest
from math import pi
import numpy as np
from wisdem.ccblade.Polar import Polar, blend
class TestBlend(unittest.TestCase):
def setUp(self):
alpha = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl = [
-0.071,
0.044,
0.144,
0.241,
0.338,
0.435,
0.535,
0.632,
0.728,
0.813,
0.883,
0.946,
1.001,
1.054,
1.056,
1.095,
1.138,
1.114,
1.073,
1.008,
0.95,
0.902,
0.795,
0.797,
0.8,
]
cd = [
0.0122,
0.0106,
0.0114,
0.0134,
0.0136,
0.014,
0.0147,
0.0156,
0.0162,
0.0173,
0.0191,
0.0215,
0.0248,
0.0339,
0.0544,
0.0452,
0.0445,
0.067,
0.0748,
0.1028,
0.1473,
0.2819,
0.2819,
0.2819,
0.3,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1068,
-0.0981,
-0.0894,
-0.0807,
]
Re = 1
self.polar1 = Polar(Re, alpha, cl, cd, cm)
alpha = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.189,
16.17,
17.14,
18.06,
19.06,
20.07,
21.08,
22.09,
23.1,
25,
]
cl = [
-0.0852,
0.0528,
0.1728,
0.2892,
0.4056,
0.522,
0.642,
0.7584,
0.8736,
0.9756,
1.0596,
1.1352,
1.2012,
1.2648,
1.2672,
1.314,
1.3656,
1.3368,
1.2876,
1.2096,
1.14,
1.0824,
0.954,
0.9564,
1,
1.2,
1.4,
1.6,
]
cd = [
0.01464,
0.01272,
0.01368,
0.01608,
0.01632,
0.0168,
0.01764,
0.01872,
0.01944,
0.02076,
0.02292,
0.0258,
0.02976,
0.04068,
0.06528,
0.05424,
0.0534,
0.0804,
0.08976,
0.12336,
0.17676,
0.33828,
0.33828,
0.33828,
0.35,
0.4,
0.45,
0.5,
]
cm = [
-0.0037,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1068,
-0.0981,
-0.0894,
-0.0807,
-0.072,
-0.0633,
]
self.polar2 = Polar(Re, alpha, cl, cd, cm)
def test_blend1(self):
polar3 = blend(self.polar1, self.polar2, 0.5)
alpha_blend = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl_blend = [
-0.078,
0.048,
0.158,
0.265,
0.372,
0.479,
0.589,
0.695,
0.801,
0.894,
0.971,
1.041,
1.101,
1.159,
1.162,
1.205,
1.252,
1.225,
1.181,
1.109,
1.045,
0.992,
0.875,
0.877,
1.200,
]
cd_blend = [
0.0134,
0.0117,
0.0125,
0.0147,
0.0150,
0.0154,
0.0162,
0.0172,
0.0178,
0.0190,
0.0210,
0.0237,
0.0273,
0.0373,
0.0598,
0.0497,
0.0490,
0.0737,
0.0822,
0.1131,
0.1620,
0.3101,
0.3101,
0.3101,
0.4000,
]
cm_blend = [
-0.00405,
-0.00475,
-0.00165,
-0.0099,
-0.0249,
-0.0314,
-0.03755,
-0.043,
-0.0481,
-0.04555,
-0.03625,
-0.0301,
-0.02825,
-0.0303,
-0.03415,
-0.0362,
-0.0378,
-0.03955,
-0.06905,
-0.11125,
-0.11985,
-0.11115,
-0.10245,
-0.09375,
-0.072,
]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend2(self):
polar3 = blend(self.polar1, self.polar2, 0.7)
alpha_blend = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl_blend = [
-0.081,
0.050,
0.164,
0.275,
0.385,
0.496,
0.610,
0.720,
0.830,
0.927,
1.007,
1.078,
1.141,
1.202,
1.204,
1.248,
1.297,
1.270,
1.224,
1.149,
1.083,
1.028,
0.906,
0.909,
1.360,
]
cd_blend = [
0.0139,
0.0121,
0.0130,
0.0153,
0.0155,
0.0160,
0.0168,
0.0178,
0.0185,
0.0197,
0.0218,
0.0245,
0.0283,
0.0386,
0.0620,
0.0515,
0.0507,
0.0764,
0.0852,
0.1172,
0.1679,
0.3214,
0.3214,
0.3214,
0.4400,
]
cm_blend = [
-0.00391,
-0.00461,
-0.00303,
-0.00522,
-0.02358,
-0.03012,
-0.03637,
-0.042,
-0.04706,
-0.04761,
-0.03791,
-0.0309,
-0.02819,
-0.02954,
-0.03337,
-0.03616,
-0.0372,
-0.03945,
-0.057347,
-0.10607,
-0.12159,
-0.11289,
-0.10419,
-0.09549,
-0.06852,
]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
def test_blend3(self):
polar3 = blend(self.polar1, self.polar2, 0.2)
alpha_blend = [
-3.04,
-2.03,
-1.01,
0.01,
1.03,
2.05,
3.07,
4.09,
5.11,
6.13,
7.14,
8.16,
9.17,
10.18,
11.18,
12.19,
13.18,
14.18,
15.18,
16.17,
17.14,
18.06,
19.06,
20.07,
25,
]
cl_blend = [
-0.074,
0.046,
0.150,
0.251,
0.352,
0.452,
0.556,
0.657,
0.757,
0.846,
0.918,
0.984,
1.041,
1.096,
1.098,
1.139,
1.184,
1.159,
1.116,
1.048,
0.988,
0.938,
0.827,
0.829,
0.960,
]
cd_blend = [
0.0127,
0.0110,
0.0119,
0.0139,
0.0141,
0.0146,
0.0153,
0.0162,
0.0168,
0.0180,
0.0199,
0.0224,
0.0258,
0.0353,
0.0566,
0.0470,
0.0463,
0.0697,
0.0778,
0.1069,
0.1532,
0.2932,
0.2932,
0.2932,
0.3400,
]
cm_blend = [
-0.00426,
-0.00496,
0.00042,
-0.01692,
-0.02688,
-0.03332,
-0.03932,
-0.0445,
-0.04966,
-0.04246,
-0.03376,
-0.0289,
-0.02834,
-0.03144,
-0.03532,
-0.03626,
-0.0387,
-0.0397,
-0.0866,
-0.11902,
-0.11724,
-0.10854,
-0.09984,
-0.09114,
-0.07722,
]
# re-interpolate b/c angles of attack are different
cl3 = np.interp(alpha_blend, polar3.alpha, polar3.cl)
cd3 = np.interp(alpha_blend, polar3.alpha, polar3.cd)
cm3 = np.interp(alpha_blend, polar3.alpha, polar3.cm)
# should be within 1e-3
np.testing.assert_allclose(cl3, cl_blend, atol=1e-3)
np.testing.assert_allclose(cd3, cd_blend, atol=1e-3)
np.testing.assert_allclose(cm3, cm_blend, atol=1e-3)
class Test3DStall(unittest.TestCase):
def setUp(self):
alpha = [
-9.000,
-8.000,
-7.000,
-6.000,
-5.000,
-4.000,
-3.000,
-2.000,
-1.000,
0.000,
1.000,
2.000,
3.000,
4.000,
5.000,
6.000,
7.000,
8.000,
9.000,
10.000,
11.000,
12.000,
13.000,
14.000,
15.000,
16.000,
17.000,
18.000,
19.000,
20.000,
30.000,
40.000,
50.000,
]
cl = [
-0.802,
-0.721,
-0.611,
-0.506,
-0.408,
-0.313,
-0.220,
-0.133,
-0.060,
0.036,
0.227,
0.342,
0.436,
0.556,
0.692,
0.715,
0.761,
0.830,
0.893,
0.954,
1.013,
1.042,
1.061,
1.083,
1.078,
0.882,
0.811,
0.793,
0.793,
0.798,
0.772,
0.757,
0.700,
]
cd = [
0.027,
0.025,
0.024,
0.023,
0.022,
0.022,
0.023,
0.025,
0.027,
0.028,
0.024,
0.019,
0.017,
0.015,
0.017,
0.019,
0.021,
0.024,
0.027,
0.031,
0.037,
0.046,
0.058,
0.074,
0.088,
0.101,
0.114,
0.128,
0.142,
0.155,
0.321,
0.525,
0.742,
]
cm = [
-0.0037,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1068,
-0.0981,
-0.0894,
-0.0807,
-0.072,
-0.0633,
-0.054,
-0.045,
-0.036,
-0.22,
-0.13,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_stall1(self):
R = 2.4
r = 0.25 * R
chord = 0.18
Omega = 200 * pi / 30
Uinf = 10.0
tsr = Omega * R / Uinf
newpolar = self.polar.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [
-0.8466,
-0.7523,
-0.6420,
-0.5342,
-0.4302,
-0.3284,
-0.2276,
-0.1303,
-0.0404,
0.0618,
0.2191,
0.3321,
0.4336,
0.5501,
0.6755,
0.7363,
0.8101,
0.8973,
0.9810,
1.0640,
1.1450,
1.2098,
1.2682,
1.3281,
1.3731,
1.3088,
1.3159,
1.3534,
1.4010,
1.4515,
1.9140,
1.8857,
1.6451,
]
# Eggers method
cd_3d = [
0.0399,
0.0334,
0.0316,
0.0293,
0.0269,
0.0254,
0.0246,
0.0246,
0.0246,
0.0252,
0.0249,
0.0200,
0.0167,
0.0157,
0.0174,
0.0183,
0.0212,
0.0255,
0.0303,
0.0367,
0.0465,
0.0615,
0.0800,
0.1047,
0.1301,
0.1695,
0.2047,
0.2384,
0.2728,
0.3081,
0.8097,
1.2625,
1.6280,
]
# # Du Selig method
# cd_3d = [0.027, 0.024, 0.023, 0.021, 0.02, 0.02, 0.021, 0.024, 0.027, 0.028, 0.023, 0.016, 0.013, 0.011, 0.013, 0.016, 0.019, 0.023, 0.027, 0.032, 0.04, 0.052, 0.068, 0.09, 0.109, 0.126, 0.144, 0.162, 0.181, 0.199, 0.422, 0.696, 0.987]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3, rtol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3, rtol=1e-3)
def test_stall2(self):
R = 2.4
r = 0.75 * R
chord = 0.28
Omega = 200 * pi / 30
Uinf = 14.0
tsr = Omega * R / Uinf
newpolar = self.polar.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [
-0.81340155,
-0.72876051,
-0.61903798,
-0.51322348,
-0.41336822,
-0.31696485,
-0.22214149,
-0.13269893,
-0.05485453,
0.04222704,
0.22525537,
0.33917483,
0.43518608,
0.55464051,
0.68785835,
0.72023796,
0.77302335,
0.84665343,
0.91485674,
0.98191931,
1.04592758,
1.08446883,
1.11313747,
1.14423161,
1.15194066,
0.98921407,
0.93776667,
0.93384528,
0.94558296,
0.96199091,
1.05910388,
1.04054486,
0.93735382,
]
# Eggers method
cd_3d = [
0.03050922,
0.02712935,
0.02589588,
0.02453937,
0.02341344,
0.02320787,
0.02359745,
0.02497252,
0.02653913,
0.02751806,
0.02430795,
0.01935093,
0.01663156,
0.01552516,
0.01698944,
0.01853615,
0.02107760,
0.02443710,
0.02784230,
0.03217433,
0.03929881,
0.05021192,
0.06322801,
0.08159739,
0.09837902,
0.11798276,
0.13692472,
0.15565820,
0.17470667,
0.19368328,
0.44408310,
0.71034295,
0.96437541,
]
# # Du Selig method
# cd_3d = [0.027, 0.025, 0.024, 0.023, 0.022, 0.022, 0.023, 0.025, 0.027, 0.028, 0.024, 0.019, 0.017, 0.015, 0.017, 0.019, 0.021, 0.024, 0.027, 0.031, 0.037, 0.046, 0.059, 0.075, 0.089, 0.102, 0.116, 0.13, 0.144, 0.157, 0.326, 0.534, 0.755]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall3(self):
R = 5.0
r = 0.5 * R
chord = 0.5
Omega = 100 * pi / 30
Uinf = 10.0
tsr = Omega * R / Uinf
newpolar = self.polar.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [
-0.8240,
-0.7363,
-0.6264,
-0.5199,
-0.4188,
-0.3206,
-0.2239,
-0.1319,
-0.0502,
0.0485,
0.2233,
0.3369,
0.4347,
0.5532,
0.6839,
0.7254,
0.7849,
0.8629,
0.9361,
1.0082,
1.0777,
1.1246,
1.1628,
1.2031,
1.2228,
1.0916,
1.0589,
1.0682,
1.0914,
1.1188,
1.3329,
1.3112,
1.1640,
]
# Eggers method
cd_3d = [
0.0335,
0.0291,
0.0277,
0.0261,
0.0245,
0.0239,
0.0239,
0.0249,
0.0259,
0.0268,
0.0245,
0.0195,
0.0167,
0.0156,
0.0171,
0.0185,
0.0211,
0.0248,
0.0286,
0.0336,
0.0416,
0.0538,
0.0686,
0.0890,
0.1085,
0.1345,
0.1586,
0.1822,
0.2061,
0.2303,
0.5612,
0.8872,
1.1769,
]
# # Du Selig method
# cd_3d = [0.027, 0.025, 0.024, 0.022, 0.021, 0.021, 0.022, 0.025, 0.027, 0.028, 0.024, 0.018, 0.016, 0.014, 0.016, 0.018, 0.02, 0.024, 0.027, 0.031, 0.038, 0.048, 0.061, 0.079, 0.095, 0.109, 0.123, 0.139, 0.155, 0.169, 0.353, 0.58, 0.821]
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
def test_stall4_cm(self):
R = 5.0
r = 0.5 * R
chord = 0.5
Omega = 100 * pi / 30
Uinf = 10.0
tsr = Omega * R / Uinf
newpolar = self.polar2.correction3D(
r / R, chord / r, tsr, alpha_max_corr=30, alpha_linear_min=-4, alpha_linear_max=4
)
cl_3d = [
-0.8240,
-0.7363,
-0.6264,
-0.5199,
-0.4188,
-0.3206,
-0.2239,
-0.1319,
-0.0502,
0.0485,
0.2233,
0.3369,
0.4347,
0.5532,
0.6839,
0.7254,
0.7849,
0.8629,
0.9361,
1.0082,
1.0777,
1.1246,
1.1628,
1.2031,
1.2228,
1.0916,
1.0589,
1.0682,
1.0914,
1.1188,
1.3329,
1.3112,
1.1640,
]
# Eggers method
cd_3d = [
0.0335,
0.0291,
0.0277,
0.0261,
0.0245,
0.0239,
0.0239,
0.0249,
0.0259,
0.0268,
0.0245,
0.0195,
0.0167,
0.0156,
0.0171,
0.0185,
0.0211,
0.0248,
0.0286,
0.0336,
0.0416,
0.0538,
0.0686,
0.0890,
0.1085,
0.1345,
0.1586,
0.1822,
0.2061,
0.2303,
0.5612,
0.8872,
1.1769,
]
# # Du Selig method
# cd_3d = [0.027, 0.025, 0.024, 0.022, 0.021, 0.021, 0.022, 0.025, 0.027, 0.028, 0.024, 0.018, 0.016, 0.014, 0.016, 0.018, 0.02, 0.024, 0.027, 0.031, 0.038, 0.048, 0.061, 0.079, 0.095, 0.109, 0.123, 0.139, 0.155, 0.169, 0.353, 0.58, 0.821]
# cm = [-0.0037, -0.0044, -0.0051, 0.0018, -0.0216, -0.0282, -0.0346,
# -0.0405, -0.0455, -0.0507, -0.0404, -0.0321, -0.0281, -0.0284,
# -0.0322, -0.0361, -0.0363, -0.0393, -0.0398, -0.0983, -0.1242,
# -0.1155, -0.1068, -0.0981, -0.0894, -0.0807, -0.072, -0.0633,
# -0.054, -0.045, -0.036, -0.22, -0.13]
cm_zeros = np.zeros(len(cd_3d))
# test equality
np.testing.assert_allclose(newpolar.cl, cl_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cd, cd_3d, atol=1e-3)
np.testing.assert_allclose(newpolar.cm, cm_zeros, atol=1e-3)
class TestExtrap(unittest.TestCase):
def setUp(self):
alpha = [
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
]
cl = [
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
]
cd = [
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_extrap1(self):
cdmax = 1.29
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.4907,
0.5053,
0.4805,
0.4102,
0.2985,
0.1565,
0.0000,
-0.1565,
-0.2985,
-0.4102,
-0.4805,
-0.5053,
-0.4907,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.7010,
0.7219,
0.6864,
0.5860,
0.4264,
0.2235,
0.0000,
-0.1565,
-0.2985,
-0.4102,
-0.4805,
-0.5053,
-0.4907,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.1770,
0.2132,
0.3173,
0.4758,
0.6686,
0.8708,
1.0560,
1.1996,
1.2818,
1.2900,
1.2818,
1.1996,
1.0560,
0.8708,
0.6686,
0.4758,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.4758,
0.6686,
0.8708,
1.0560,
1.1996,
1.2818,
1.2900,
1.2818,
1.1996,
1.0560,
0.8708,
0.6686,
0.4758,
0.3173,
0.2132,
0.1770,
]
cm_extrap = [
0.0000,
0.4000,
0.2431,
0.2568,
0.2865,
0.3185,
0.3458,
0.3632,
0.3672,
0.3559,
0.3443,
0.3182,
0.2808,
0.2362,
0.1886,
0.1414,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1710,
-0.2202,
-0.2637,
-0.3002,
-0.3284,
-0.3471,
-0.3559,
-0.3672,
-0.3632,
-0.3458,
-0.3185,
-0.2865,
-0.2568,
-0.2431,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap2(self):
cdmax = 1.0
newpolar = self.polar.extrapolate(cdmax=cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.4411,
0.4287,
0.3943,
0.3297,
0.2364,
0.1225,
0.0000,
-0.1225,
-0.2364,
-0.3297,
-0.3943,
-0.4287,
-0.4411,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.6302,
0.6124,
0.5633,
0.4710,
0.3378,
0.1750,
0.0000,
-0.1225,
-0.2364,
-0.3297,
-0.3943,
-0.4287,
-0.4411,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.2135,
0.2404,
0.3176,
0.4349,
0.5767,
0.7241,
0.8568,
0.9560,
1.0069,
1.0000,
1.0069,
0.9560,
0.8568,
0.7241,
0.5767,
0.4349,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.4349,
0.5767,
0.7241,
0.8568,
0.9560,
1.0069,
1.0000,
1.0069,
0.9560,
0.8568,
0.7241,
0.5767,
0.4349,
0.3176,
0.2404,
0.2135,
]
cm_extrap = [
0.0000,
0.4000,
0.2432,
0.2354,
0.2500,
0.2695,
0.2864,
0.2961,
0.2956,
0.2834,
0.2776,
0.2603,
0.2337,
0.2013,
0.1663,
0.1310,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1577,
-0.1930,
-0.2239,
-0.2494,
-0.2683,
-0.2798,
-0.2834,
-0.2956,
-0.2961,
-0.2864,
-0.2695,
-0.2500,
-0.2354,
-0.2432,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
def test_extrap3(self):
cdmax = 1.5
newpolar = self.polar.extrapolate(cdmax)
alpha_extrap = [
-180,
-170,
-160,
-150,
-140,
-130,
-120,
-110,
-100,
-90,
-80,
-70,
-60,
-50,
-40,
-30,
-20,
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
30,
40,
50,
60,
70,
80,
90,
100,
110,
120,
130,
140,
150,
160,
170,
180,
]
cl_extrap = [
0.0000,
0.2299,
0.4597,
0.5266,
0.5608,
0.5429,
0.4685,
0.3434,
0.1810,
0.0000,
-0.1810,
-0.3434,
-0.4685,
-0.5429,
-0.5608,
-0.5266,
-0.4637,
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
0.7523,
0.8012,
0.7756,
0.6693,
0.4906,
0.2586,
0.0000,
-0.1810,
-0.3434,
-0.4685,
-0.5429,
-0.5608,
-0.5266,
-0.4597,
-0.2299,
0.0000,
]
cd_extrap = [
0.1506,
0.1936,
0.3170,
0.5054,
0.7351,
0.9771,
1.2003,
1.3760,
1.4809,
1.5000,
1.4809,
1.3760,
1.2003,
0.9771,
0.7351,
0.5054,
0.3158,
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
0.5054,
0.7351,
0.9771,
1.2003,
1.3760,
1.4809,
1.5000,
1.4809,
1.3760,
1.2003,
0.9771,
0.7351,
0.5054,
0.3170,
0.1936,
0.1506,
]
cm_extrap = [
0.0000,
0.4000,
0.2431,
0.2723,
0.3130,
0.3540,
0.3888,
0.4118,
0.4190,
0.4084,
0.3926,
0.3602,
0.3148,
0.2614,
0.2049,
0.1488,
0.0942,
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
-0.1807,
-0.2399,
-0.2925,
-0.3370,
-0.3719,
-0.3959,
-0.4084,
-0.4190,
-0.4118,
-0.3888,
-0.3540,
-0.3130,
-0.2723,
-0.2431,
-0.5000,
0.0000,
]
# re-interpolate b/c angles of attack are different
cl = np.interp(alpha_extrap, newpolar.alpha, newpolar.cl)
cd = np.interp(alpha_extrap, newpolar.alpha, newpolar.cd)
cm = np.interp(alpha_extrap, newpolar.alpha, newpolar.cm)
# test equality
np.testing.assert_allclose(cl, cl_extrap, atol=1.5e-4)
np.testing.assert_allclose(cd, cd_extrap, atol=1.5e-4)
np.testing.assert_allclose(cm, cm_extrap, atol=5e-3)
class TestMisc(unittest.TestCase):
def setUp(self):
alpha = [
-10.1,
-8.2,
-6.1,
-4.1,
-2.1,
0.1,
2,
4.1,
6.2,
8.1,
10.2,
11.3,
12.1,
13.2,
14.2,
15.3,
16.3,
17.1,
18.1,
19.1,
20.1,
]
cl = [
-0.6300,
-0.5600,
-0.6400,
-0.4200,
-0.2100,
0.0500,
0.3000,
0.5400,
0.7900,
0.9000,
0.9300,
0.9200,
0.9500,
0.9900,
1.0100,
1.0200,
1.0000,
0.9400,
0.8500,
0.7000,
0.6600,
]
cd = [
0.0390,
0.0233,
0.0131,
0.0134,
0.0119,
0.0122,
0.0116,
0.0144,
0.0146,
0.0162,
0.0274,
0.0303,
0.0369,
0.0509,
0.0648,
0.0776,
0.0917,
0.0994,
0.2306,
0.3142,
0.3186,
]
cm = [
-0.0044,
-0.0051,
0.0018,
-0.0216,
-0.0282,
-0.0346,
-0.0405,
-0.0455,
-0.0507,
-0.0404,
-0.0321,
-0.0281,
-0.0284,
-0.0322,
-0.0361,
-0.0363,
-0.0393,
-0.0398,
-0.0983,
-0.1242,
-0.1155,
]
cm_zeros = np.zeros(len(cm))
Re = 1
self.polar = Polar(Re, alpha, cl, cd, cm)
self.polar2 = Polar(Re, alpha, cl, cd, cm_zeros)
def test_unsteady(self):
alpha0, alpha1, alpha2, cnSlope, cn1, cn2, cd0, cm0 = self.polar.unsteadyParams()
np.testing.assert_allclose(alpha0, -0.32307692307692304)
np.testing.assert_allclose(alpha1, 9.260783831245934)
np.testing.assert_allclose(alpha2, -6.779334979177289)
np.testing.assert_allclose(cnSlope, 6.4380618436681765)
np.testing.assert_allclose(cn1, 0.9201540372961516)
np.testing.assert_allclose(cn2, -0.6377683435797556)
np.testing.assert_allclose(cd0, 0.012142307692307694)
np.testing.assert_allclose(cm0, -0.03336923076923077)
def test_fully_separated(self):
cl_fs, f_st = self.polar.cl_fully_separated()
cl_fs_ref = np.array(
[
-0.63,
-0.42017185,
-0.35815607,
-0.23440711,
-0.11213462,
0.02669872,
0.15,
0.2815297,
0.41432191,
0.51685242,
0.60852946,
0.6464375,
0.68202361,
0.7299095,
0.76769179,
0.8037866,
0.82370687,
0.81723832,
0.78926905,
0.69419819,
0.65999953,
]
)
f_st_ref = np.array(
[
0.00000000e00,
2.34199688e-01,
7.26644559e-01,
7.32580663e-01,
8.34063987e-01,
8.34063987e-01,
1.00000000e00,
8.92315821e-01,
8.77625013e-01,
6.71133852e-01,
4.28392660e-01,
3.20122429e-01,
2.90558283e-01,
2.55881726e-01,
2.18728235e-01,
1.78134763e-01,
1.33254382e-01,
8.56818538e-02,
3.81986876e-02,
3.19820908e-03,
2.39632149e-07,
]
)
np.testing.assert_allclose(cl_fs, cl_fs_ref)
np.testing.assert_allclose(f_st, f_st_ref)
def test_cl_max(self):
cl_max, alpha_cl_max = self.polar.cl_max()
np.testing.assert_allclose(cl_max, 1.02)
np.testing.assert_allclose(alpha_cl_max, 15.3)
def test_linear_region(self):
alpha_linear_region, cl_linear_region, slope, alpha0 = self.polar.linear_region()
np.testing.assert_allclose(alpha_linear_region, np.array([-6.17381944, 7.43986639]))
np.testing.assert_allclose(cl_linear_region, np.array([-0.68718783, 0.91178174]))
np.testing.assert_allclose(slope, 0.11745309755638363)
np.testing.assert_allclose(alpha0, -0.32307692307692304)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestBlend))
suite.addTest(unittest.makeSuite(Test3DStall))
suite.addTest(unittest.makeSuite(TestExtrap))
suite.addTest(unittest.makeSuite(TestMisc))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
|
from flask import (Flask, request, jsonify)
from flask_jwt_extended import JWTManager
from flask_bcrypt import Bcrypt
from flask_sqlalchemy import SQLAlchemy
from flask_swagger_ui import get_swaggerui_blueprint
from datetime import (timedelta)
app = Flask(__name__)
app.config["JWT_SECRET_KEY"] = "super-secret"
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False
app.config["SQLALCHEMY_DATABASE_URI"] = "postgresql://postgres:bePG2jqRxmRZiz@postgres/appdb"
app.config["JWT_ACCESS_TOKEN_EXPIRES"] = timedelta(days=30)
jwt = JWTManager(app)
bcrypt = Bcrypt(app)
db = SQLAlchemy(app)
SWAGGER_URL = "/apidocs"
API_URL = "/static/swagger.yml"
bp_swagger_ui = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config={
'app_name': "Test application"
}
)
from .user import bp_user
from .list import bp_list
from .anime import bp_anime
app.register_blueprint(bp_swagger_ui)
app.register_blueprint(bp_user)
app.register_blueprint(bp_list)
app.register_blueprint(bp_anime)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import sys
from debugpy.common import log
from tests.patterns import some
NONE = None
NAN = float("nan")
def log_repr(x):
s = repr(x)
log.info("{0}", s)
VALUES = [
object(),
True,
False,
0,
-1,
-1.0,
1.23,
b"abc",
b"abcd",
"abc",
"abcd",
(),
(1, 2, 3),
[],
[1, 2, 3],
{},
{"a": 1, "b": 2},
]
@pytest.mark.parametrize("x", VALUES)
def test_value(x):
log_repr(some.object)
assert x == some.object
log_repr(some.object.equal_to(x))
assert x == some.object.equal_to(x)
log_repr(some.object.not_equal_to(x))
assert x != some.object.not_equal_to(x)
log_repr(some.object.same_as(x))
assert x == some.object.same_as(x)
log_repr(some.thing)
assert x == some.thing
log_repr(~some.thing)
assert x != ~some.thing
log_repr(~some.object)
assert x != ~some.object
log_repr(~some.object | x)
assert x == ~some.object | x
def test_none():
assert NONE == some.object
assert NONE == some.object.equal_to(None)
assert NONE == some.object.same_as(None)
assert NONE != some.thing
assert NONE == some.thing | None
def test_equal():
assert 123.0 == some.object.equal_to(123)
assert NAN != some.object.equal_to(NAN)
def test_not_equal():
assert 123.0 != some.object.not_equal_to(123)
assert NAN == some.object.not_equal_to(NAN)
def test_same():
assert 123.0 != some.object.same_as(123)
assert NAN == some.object.same_as(NAN)
def test_inverse():
pattern = ~some.object.equal_to(2)
log_repr(pattern)
assert pattern == 1
assert pattern != 2
assert pattern == 3
assert pattern == "2"
assert pattern == NONE
def test_either():
pattern = some.number | some.str
log_repr(pattern)
assert pattern == 123
pattern = some.str | 123 | some.bool
log_repr(pattern)
assert pattern == 123
def test_in_range():
pattern = some.int.in_range(-5, 5)
log_repr(pattern)
assert all([pattern == x for x in range(-5, 5)])
assert pattern != -6
assert pattern != 5
def test_str():
log_repr(some.str)
assert some.str == "abc"
if sys.version_info < (3,):
assert b"abc" == some.str
else:
assert b"abc" != some.str
def test_matching():
pattern = some.str.matching(r".(b+).")
log_repr(pattern)
assert pattern == "abbbc"
pattern = some.str.matching(r"bbb")
log_repr(pattern)
assert pattern != "abbbc"
pattern = some.bytes.matching(br".(b+).")
log_repr(pattern)
assert pattern == b"abbbc"
pattern = some.bytes.matching(br"bbb")
log_repr(pattern)
assert pattern != b"abbbc"
def test_starting_with():
pattern = some.str.starting_with("aa")
log_repr(pattern)
assert pattern == "aabbbb"
assert pattern != "bbbbaa"
assert pattern != "bbaabb"
assert pattern != "ababab"
pattern = some.bytes.starting_with(b"aa")
log_repr(pattern)
assert pattern == b"aabbbb"
assert pattern != b"bbbbaa"
assert pattern != b"bbaabb"
assert pattern != b"ababab"
def test_ending_with():
pattern = some.str.ending_with("aa")
log_repr(pattern)
assert pattern == "bbbbaa"
assert pattern == "bb\nbb\naa"
assert pattern != "aabbbb"
assert pattern != "bbaabb"
assert pattern != "ababab"
pattern = some.bytes.ending_with(b"aa")
log_repr(pattern)
assert pattern == b"bbbbaa"
assert pattern == b"bb\nbb\naa"
assert pattern != b"aabbbb"
assert pattern != b"bbaabb"
assert pattern != b"ababab"
def test_containing():
pattern = some.str.containing("aa")
log_repr(pattern)
assert pattern == "aabbbb"
assert pattern == "bbbbaa"
assert pattern == "bbaabb"
assert pattern == "bb\naa\nbb"
assert pattern != "ababab"
pattern = some.bytes.containing(b"aa")
log_repr(pattern)
assert pattern == b"aabbbb"
assert pattern == b"bbbbaa"
assert pattern == b"bbaabb"
assert pattern == b"bb\naa\nbb"
assert pattern != b"ababab"
def test_list():
assert [1, 2, 3] == [1, some.thing, 3]
assert [1, 2, 3, 4] != [1, some.thing, 4]
assert [1, 2, 3, 4] == some.list.containing(1)
assert [1, 2, 3, 4] == some.list.containing(2)
assert [1, 2, 3, 4] == some.list.containing(3)
assert [1, 2, 3, 4] == some.list.containing(4)
assert [1, 2, 3, 4] == some.list.containing(1, 2)
assert [1, 2, 3, 4] == some.list.containing(2, 3)
assert [1, 2, 3, 4] == some.list.containing(3, 4)
assert [1, 2, 3, 4] == some.list.containing(1, 2, 3)
assert [1, 2, 3, 4] == some.list.containing(2, 3, 4)
assert [1, 2, 3, 4] == some.list.containing(1, 2, 3, 4)
assert [1, 2, 3, 4] != some.list.containing(5)
assert [1, 2, 3, 4] != some.list.containing(1, 3)
assert [1, 2, 3, 4] != some.list.containing(1, 2, 4)
assert [1, 2, 3, 4] != some.list.containing(2, 3, 5)
def test_dict():
pattern = {"a": some.thing, "b": 2}
log_repr(pattern)
assert pattern == {"a": 1, "b": 2}
pattern = some.dict.containing({"a": 1})
log_repr(pattern)
assert pattern == {"a": 1, "b": 2}
def test_such_that():
pattern = some.thing.such_that(lambda x: x != 1)
log_repr(pattern)
assert 0 == pattern
assert 1 != pattern
assert 2 == pattern
def test_error():
log_repr(some.error)
assert some.error == Exception("error!")
assert some.error != {}
def test_recursive():
pattern = some.dict.containing(
{
"dict": some.dict.containing({"int": some.int.in_range(100, 200)}),
"list": [None, ~some.error, some.number | some.str],
}
)
log_repr(pattern)
assert pattern == {
"list": [None, False, 123],
"bool": True,
"dict": {"int": 123, "str": "abc"},
}
|
# Generated by Django 2.1.15 on 2021-04-20 06:47
import core.models
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('observerInSitesIds', models.CharField(default='', max_length=255)),
('operatorInSitesIds', models.CharField(default='', max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('image', models.ImageField(null=True, upload_to=core.models.user_image_file_path)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Device',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('state', models.BooleanField()),
('lastRealPowerValue', models.FloatField(default=0.0)),
('lastAlgoPowerValue', models.FloatField(default=0.0)),
('cost', models.FloatField(default=0.0)),
('lastTimeValue', models.DateTimeField(null=True)),
('created_at', models.DateTimeField(default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='DeviceType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('HVAC', 'HVAC'), ('Lighting', 'Lighting'), ('Pump', 'Pump')], max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('time_minutes', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('link', models.CharField(blank=True, max_length=255)),
('image', models.ImageField(null=True, upload_to=core.models.recipe_image_file_path)),
('ingredients', models.ManyToManyField(to='core.Ingredient')),
],
),
migrations.CreateModel(
name='Sensor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('lastRealValue', models.FloatField(default=0.0)),
('lastAlgoValue', models.FloatField(default=0.0)),
('lastTimeValue', models.DateTimeField(null=True)),
('created_at', models.DateTimeField(default=datetime.datetime.now)),
],
),
migrations.CreateModel(
name='SensorData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('datetime', models.DateTimeField(null=True)),
('realvalue', models.FloatField(default=0.0)),
('algovalue', models.FloatField(default=0.0)),
('name', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.Sensor')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SensorType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[('Temp', 'Temperature'), ('SP', 'Space'), ('Hum', 'Humidity')], max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Site',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('locationX', models.CharField(max_length=50)),
('locationY', models.CharField(max_length=50)),
('link', models.CharField(blank=True, max_length=255)),
('image', models.ImageField(null=True, upload_to=core.models.site_image_file_path)),
('created_at', models.DateTimeField(default=datetime.datetime.now)),
('devices', models.ManyToManyField(to='core.Device')),
('sensors', models.ManyToManyField(to='core.Sensor')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='sensor',
name='type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.SensorType'),
),
migrations.AddField(
model_name='sensor',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='recipe',
name='tags',
field=models.ManyToManyField(to='core.Tag'),
),
migrations.AddField(
model_name='recipe',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='device',
name='sensors',
field=models.ManyToManyField(to='core.Sensor'),
),
migrations.AddField(
model_name='device',
name='type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='core.DeviceType'),
),
migrations.AddField(
model_name='device',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
from django.db import models
from robots_scraper.controller import get_robots_txt, robots_txt_extrapolation
class WebSite(models.Model):
domain = models.CharField(max_length=50)
website_url = models.CharField(max_length=50)
robots_txt_url = models.CharField(max_length=50)
websites = models.Manager()
def __str__(self):
return self.url
def parse_robot_txt(self):
robots_txt = get_robots_txt(self.robots_txt_url )
content = robots_txt_extrapolation(robots_txt)
return content
def after_save(self):
parsed = self.parse_robot_txt()
for user_agent, rules in parsed.items():
_user_agent = UserAgent(website=self, user_agent=user_agent)
_user_agent.save()
for rule, routes in rules.items():
_rule = Rule(user_agent=_user_agent, rule=rule)
_rule.save()
for route in routes:
_route = Route(rule=_rule, route=route)
_route.save()
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
self.after_save()
class UserAgent(models.Model):
website = models.ForeignKey(WebSite, on_delete=models.CASCADE)
user_agent = models.CharField(max_length=30)
def __str__(self):
return self.user_agent
class Rule(models.Model):
user_agent = models.ForeignKey(UserAgent, on_delete=models.CASCADE)
rule = models.CharField(max_length=30)
def __str__(self):
return self.rule
class Route(models.Model):
rule = models.ForeignKey(Rule, on_delete=models.CASCADE)
route = models.CharField(max_length=50)
def __str__(self):
return self.route
|
import argparse
import joblib
import json
import numpy as np
import os
import pandas as pd
import warnings
from itertools import chain
from scipy.io import mmread
from sklearn.pipeline import Pipeline
from sklearn.metrics._scorer import _check_multimetric_scoring
from sklearn.model_selection._validation import _score
from sklearn.utils import indexable, _safe_indexing
from galaxy_ml.model_validations import train_test_split
from galaxy_ml.keras_galaxy_models import (_predict_generator,
KerasGBatchClassifier)
from galaxy_ml.model_persist import load_model_from_h5, dump_model_to_h5
from galaxy_ml.utils import (SafeEval, clean_params, gen_compute_scores,
get_main_estimator, get_scoring, get_module,
read_columns)
N_JOBS = int(os.environ.get('GALAXY_SLOTS', 1))
CACHE_DIR = os.path.join(os.getcwd(), 'cached')
del os
NON_SEARCHABLE = ('n_jobs', 'pre_dispatch', 'memory', '_path', '_dir',
'nthread', 'callbacks')
ALLOWED_CALLBACKS = ('EarlyStopping', 'TerminateOnNaN', 'ReduceLROnPlateau',
'CSVLogger', 'None')
def _eval_swap_params(params_builder):
swap_params = {}
for p in params_builder['param_set']:
swap_value = p['sp_value'].strip()
if swap_value == '':
continue
param_name = p['sp_name']
if param_name.lower().endswith(NON_SEARCHABLE):
warnings.warn("Warning: `%s` is not eligible for search and was "
"omitted!" % param_name)
continue
if not swap_value.startswith(':'):
safe_eval = SafeEval(load_scipy=True, load_numpy=True)
ev = safe_eval(swap_value)
else:
# Have `:` before search list, asks for estimator evaluatio
safe_eval_es = SafeEval(load_estimators=True)
swap_value = swap_value[1:].strip()
# TODO maybe add regular express check
ev = safe_eval_es(swap_value)
swap_params[param_name] = ev
return swap_params
def train_test_split_none(*arrays, **kwargs):
"""extend train_test_split to take None arrays
and support split by group names.
"""
nones = []
new_arrays = []
for idx, arr in enumerate(arrays):
if arr is None:
nones.append(idx)
else:
new_arrays.append(arr)
if kwargs['shuffle'] == 'None':
kwargs['shuffle'] = None
group_names = kwargs.pop('group_names', None)
if group_names is not None and group_names.strip():
group_names = [name.strip() for name in
group_names.split(',')]
new_arrays = indexable(*new_arrays)
groups = kwargs['labels']
n_samples = new_arrays[0].shape[0]
index_arr = np.arange(n_samples)
test = index_arr[np.isin(groups, group_names)]
train = index_arr[~np.isin(groups, group_names)]
rval = list(chain.from_iterable(
(_safe_indexing(a, train),
_safe_indexing(a, test)) for a in new_arrays))
else:
rval = train_test_split(*new_arrays, **kwargs)
for pos in nones:
rval[pos * 2: 2] = [None, None]
return rval
def _evaluate_keras_and_sklearn_scores(estimator, data_generator, X,
y=None, sk_scoring=None,
steps=None, batch_size=32,
return_predictions=False):
"""output scores for bother keras and sklearn metrics
Parameters
-----------
estimator : object
Fitted `galaxy_ml.keras_galaxy_models.KerasGBatchClassifier`.
data_generator : object
From `galaxy_ml.preprocessors.ImageDataFrameBatchGenerator`.
X : 2-D array
Contains indecies of images that need to be evaluated.
y : None
Target value.
sk_scoring : dict
Galaxy tool input parameters.
steps : integer or None
Evaluation/prediction steps before stop.
batch_size : integer
Number of samples in a batch
return_predictions : bool, default is False
Whether to return predictions and true labels.
"""
scores = {}
generator = data_generator.flow(X, y=y, batch_size=batch_size)
# keras metrics evaluation
# handle scorer, convert to scorer dict
generator.reset()
score_results = estimator.model_.evaluate_generator(generator,
steps=steps)
metrics_names = estimator.model_.metrics_names
if not isinstance(metrics_names, list):
scores[metrics_names] = score_results
else:
scores = dict(zip(metrics_names, score_results))
if sk_scoring['primary_scoring'] == 'default' and\
not return_predictions:
return scores
generator.reset()
predictions, y_true = _predict_generator(estimator.model_,
generator,
steps=steps)
# for sklearn metrics
if sk_scoring['primary_scoring'] != 'default':
scorer = get_scoring(sk_scoring)
if not isinstance(scorer, (dict, list)):
scorer = [sk_scoring['primary_scoring']]
scorer = _check_multimetric_scoring(estimator, scoring=scorer)
sk_scores = gen_compute_scores(y_true, predictions, scorer)
scores.update(sk_scores)
if return_predictions:
return scores, predictions, y_true
else:
return scores, None, None
def main(inputs, infile_estimator, infile1, infile2,
outfile_result, outfile_object=None,
outfile_y_true=None,
outfile_y_preds=None, groups=None,
ref_seq=None, intervals=None, targets=None,
fasta_path=None):
"""
Parameter
---------
inputs : str
File path to galaxy tool parameter.
infile_estimator : str
File path to estimator.
infile1 : str
File path to dataset containing features.
infile2 : str
File path to dataset containing target values.
outfile_result : str
File path to save the results, either cv_results or test result.
outfile_object : str, optional
File path to save searchCV object.
outfile_y_true : str, optional
File path to target values for prediction.
outfile_y_preds : str, optional
File path to save predictions.
groups : str
File path to dataset containing groups labels.
ref_seq : str
File path to dataset containing genome sequence file.
intervals : str
File path to dataset containing interval file.
targets : str
File path to dataset compressed target bed file.
fasta_path : str
File path to dataset containing fasta file.
"""
warnings.simplefilter('ignore')
with open(inputs, 'r') as param_handler:
params = json.load(param_handler)
# load estimator
estimator = load_model_from_h5(infile_estimator)
estimator = clean_params(estimator)
# swap hyperparameter
swapping = params['experiment_schemes']['hyperparams_swapping']
swap_params = _eval_swap_params(swapping)
estimator.set_params(**swap_params)
estimator_params = estimator.get_params()
# store read dataframe object
loaded_df = {}
input_type = params['input_options']['selected_input']
# tabular input
if input_type == 'tabular':
header = 'infer' if params['input_options']['header1'] else None
column_option = (params['input_options']['column_selector_options_1']
['selected_column_selector_option'])
if column_option in ['by_index_number', 'all_but_by_index_number',
'by_header_name', 'all_but_by_header_name']:
c = params['input_options']['column_selector_options_1']['col1']
else:
c = None
df_key = infile1 + repr(header)
df = pd.read_csv(infile1, sep='\t', header=header,
parse_dates=True)
loaded_df[df_key] = df
X = read_columns(df, c=c, c_option=column_option).astype(float)
# sparse input
elif input_type == 'sparse':
X = mmread(open(infile1, 'r'))
# fasta_file input
elif input_type == 'seq_fasta':
pyfaidx = get_module('pyfaidx')
sequences = pyfaidx.Fasta(fasta_path)
n_seqs = len(sequences.keys())
X = np.arange(n_seqs)[:, np.newaxis]
for param in estimator_params.keys():
if param.endswith('fasta_path'):
estimator.set_params(
**{param: fasta_path})
break
else:
raise ValueError(
"The selected estimator doesn't support "
"fasta file input! Please consider using "
"KerasGBatchClassifier with "
"FastaDNABatchGenerator/FastaProteinBatchGenerator "
"or having GenomeOneHotEncoder/ProteinOneHotEncoder "
"in pipeline!")
elif input_type == 'refseq_and_interval':
path_params = {
'data_batch_generator__ref_genome_path': ref_seq,
'data_batch_generator__intervals_path': intervals,
'data_batch_generator__target_path': targets
}
estimator.set_params(**path_params)
n_intervals = sum(1 for line in open(intervals))
X = np.arange(n_intervals)[:, np.newaxis]
# Get target y
header = 'infer' if params['input_options']['header2'] else None
column_option = (params['input_options']['column_selector_options_2']
['selected_column_selector_option2'])
if column_option in ['by_index_number', 'all_but_by_index_number',
'by_header_name', 'all_but_by_header_name']:
c = params['input_options']['column_selector_options_2']['col2']
else:
c = None
df_key = infile2 + repr(header)
if df_key in loaded_df:
infile2 = loaded_df[df_key]
else:
infile2 = pd.read_csv(infile2, sep='\t',
header=header, parse_dates=True)
loaded_df[df_key] = infile2
y = read_columns(
infile2,
c=c,
c_option=column_option,
sep='\t',
header=header,
parse_dates=True)
if len(y.shape) == 2 and y.shape[1] == 1:
y = y.ravel()
if input_type == 'refseq_and_interval':
estimator.set_params(
data_batch_generator__features=y.ravel().tolist())
y = None
# end y
# load groups
if groups:
groups_selector = (params['experiment_schemes']['test_split']
['split_algos']).pop('groups_selector')
header = 'infer' if groups_selector['header_g'] else None
column_option = \
(groups_selector['column_selector_options_g']
['selected_column_selector_option_g'])
if column_option in ['by_index_number', 'all_but_by_index_number',
'by_header_name', 'all_but_by_header_name']:
c = groups_selector['column_selector_options_g']['col_g']
else:
c = None
df_key = groups + repr(header)
if df_key in loaded_df:
groups = loaded_df[df_key]
groups = read_columns(
groups,
c=c,
c_option=column_option,
sep='\t',
header=header,
parse_dates=True)
groups = groups.ravel()
# del loaded_df
del loaded_df
# cache iraps_core fits could increase search speed significantly
memory = joblib.Memory(location=CACHE_DIR, verbose=0)
main_est = get_main_estimator(estimator)
if main_est.__class__.__name__ == 'IRAPSClassifier':
main_est.set_params(memory=memory)
# handle scorer, convert to scorer dict
scoring = params['experiment_schemes']['metrics']['scoring']
scorer = get_scoring(scoring)
if not isinstance(scorer, (dict, list)):
scorer = [scoring['primary_scoring']]
scorer = _check_multimetric_scoring(estimator, scoring=scorer)
# handle test (first) split
test_split_options = (params['experiment_schemes']
['test_split']['split_algos'])
if test_split_options['shuffle'] == 'group':
test_split_options['labels'] = groups
if test_split_options['shuffle'] == 'stratified':
if y is not None:
test_split_options['labels'] = y
else:
raise ValueError("Stratified shuffle split is not "
"applicable on empty target values!")
X_train, X_test, y_train, y_test, groups_train, groups_test = \
train_test_split_none(X, y, groups, **test_split_options)
exp_scheme = params['experiment_schemes']['selected_exp_scheme']
# handle validation (second) split
if exp_scheme == 'train_val_test':
val_split_options = (params['experiment_schemes']
['val_split']['split_algos'])
if val_split_options['shuffle'] == 'group':
val_split_options['labels'] = groups_train
if val_split_options['shuffle'] == 'stratified':
if y_train is not None:
val_split_options['labels'] = y_train
else:
raise ValueError("Stratified shuffle split is not "
"applicable on empty target values!")
X_train, X_val, y_train, y_val, groups_train, groups_val = \
train_test_split_none(X_train, y_train, groups_train,
**val_split_options)
# train and eval
if hasattr(estimator, 'config') and hasattr(estimator, 'model_type'):
if exp_scheme == 'train_val_test':
estimator.fit(X_train, y_train,
validation_data=(X_val, y_val))
else:
estimator.fit(X_train, y_train,
validation_data=(X_test, y_test))
else:
estimator.fit(X_train, y_train)
if isinstance(estimator, KerasGBatchClassifier):
scores = {}
steps = estimator.prediction_steps
batch_size = estimator.batch_size
data_generator = estimator.data_generator_
scores, predictions, y_true = _evaluate_keras_and_sklearn_scores(
estimator, data_generator, X_test, y=y_test,
sk_scoring=scoring, steps=steps, batch_size=batch_size,
return_predictions=bool(outfile_y_true))
else:
scores = {}
if hasattr(estimator, 'model_') \
and hasattr(estimator.model_, 'metrics_names'):
batch_size = estimator.batch_size
score_results = estimator.model_.evaluate(X_test, y=y_test,
batch_size=batch_size,
verbose=0)
metrics_names = estimator.model_.metrics_names
if not isinstance(metrics_names, list):
scores[metrics_names] = score_results
else:
scores = dict(zip(metrics_names, score_results))
if hasattr(estimator, 'predict_proba'):
predictions = estimator.predict_proba(X_test)
else:
predictions = estimator.predict(X_test)
y_true = y_test
sk_scores = _score(estimator, X_test, y_test, scorer)
scores.update(sk_scores)
# handle output
if outfile_y_true:
try:
pd.DataFrame(y_true).to_csv(outfile_y_true, sep='\t',
index=False)
pd.DataFrame(predictions).astype(np.float32).to_csv(
outfile_y_preds, sep='\t', index=False,
float_format='%g', chunksize=10000)
except Exception as e:
print("Error in saving predictions: %s" % e)
# handle output
for name, score in scores.items():
scores[name] = [score]
df = pd.DataFrame(scores)
df = df[sorted(df.columns)]
df.to_csv(path_or_buf=outfile_result, sep='\t',
header=True, index=False)
memory.clear(warn=False)
if outfile_object:
dump_model_to_h5(estimator, outfile_object)
if __name__ == '__main__':
aparser = argparse.ArgumentParser()
aparser.add_argument("-i", "--inputs", dest="inputs", required=True)
aparser.add_argument("-e", "--estimator", dest="infile_estimator")
aparser.add_argument("-X", "--infile1", dest="infile1")
aparser.add_argument("-y", "--infile2", dest="infile2")
aparser.add_argument("-O", "--outfile_result", dest="outfile_result")
aparser.add_argument("-o", "--outfile_object", dest="outfile_object")
aparser.add_argument("-l", "--outfile_y_true", dest="outfile_y_true")
aparser.add_argument("-p", "--outfile_y_preds", dest="outfile_y_preds")
aparser.add_argument("-g", "--groups", dest="groups")
aparser.add_argument("-r", "--ref_seq", dest="ref_seq")
aparser.add_argument("-b", "--intervals", dest="intervals")
aparser.add_argument("-t", "--targets", dest="targets")
aparser.add_argument("-f", "--fasta_path", dest="fasta_path")
args = aparser.parse_args()
main(args.inputs, args.infile_estimator, args.infile1, args.infile2,
args.outfile_result, outfile_object=args.outfile_object,
outfile_y_true=args.outfile_y_true,
outfile_y_preds=args.outfile_y_preds,
groups=args.groups,
ref_seq=args.ref_seq, intervals=args.intervals,
targets=args.targets, fasta_path=args.fasta_path)
|
#!/usr/bin/env python
# coding: utf-8
"""
"""
import traceback
import time
import argparse
import sys
import datetime
from pathlib import Path
import shutil
from dotenv import load_dotenv
import pandas as pd
from column_definitions import standard_columns, key_mapping
from get_paper_info import get_paper_info, which_literature_site
from workflow_utilities import labels_fix, abstract_fix, extract, filter_by_lit_site, filter_by_count, save_status, scrape_paper_info
def update_paper_info_db(df_paper_info_db,n,filter):
# Need to keep track of the status of each attempt to get paper info
df_status = pd.DataFrame(columns=['url', 'get_paper_info_result',
'title_len', 'abstract_len', 'doi_len',
'full_doc_link_len', 'is_open_access',
'num_labels',
'error_traceback', 'scrape_time'])
df_status.astype(int) # No floats
i = 0
for index, row in df_paper_info_db.iterrows():
if filter and filter != row['literature_site']:
continue
if n and i >= n:
break
input_record = row.to_dict()
print(f"{index}: input_record['url']: {input_record['url']}")
# check to see if anything missing
if len(input_record['title']) and len(input_record['abstract']) \
and len(input_record['full_doc_link']) and len(input_record['url']) \
and len(input_record['doi']):
print("already complete")
continue
# scrape this url
new_input_record, df_status = scrape_paper_info(input_record, df_status)
# Loop through the fields and if the same, skip. If different warn and update. If
# current was empty, just update
# for key in standard_columns:
# if not matching_record[key]:
# matching_record[key] = input_record[key]
# else:
# if matching_record[key] != input_record[key]:
# print(f"Warning! For {input_record['url']} the {key} value changed.")
# matching_record[key] = input_record[key]
# replace?
print("updating")
# df_paper_info_db.loc[df_paper_info_db['url'] == input_record['url']] = matching_record
i = df_paper_info_db[ df_paper_info_db['url'] == input_record['url']].index
df_paper_info_db.drop(i, inplace=True)
df_paper_info_db = df_paper_info_db.append(input_record, ignore_index=True)
return df_paper_info_db, df_status
default_path_to_env = Path( Path.home(), '.petal_env')
parser = argparse.ArgumentParser(prog = sys.argv[0],
description = "scape sites and generate csv with info.")
parser.add_argument('paper_info_csv', type=str, help='main paper info CSV file to be updated')
parser.add_argument('status_csv', type=str, help='status CSV file')
parser.add_argument('-n', help='limit the number of journals to this number',
default=None, type=int)
parser.add_argument('--filter', type=str,
help='filter based on matching this search string in Primary Lit Site',
default=None)
parser.add_argument("--env_path", help = "path to .env file containing API keys",
default = default_path_to_env, type = str)
args = parser.parse_args()
load_dotenv(args.env_path)
df_paper_info_db = pd.read_csv(args.paper_info_csv)
# Make a timestamped backup copy
path = Path(args.paper_info_csv)
mtime = datetime.datetime.fromtimestamp(path.stat().st_mtime)
s = mtime.strftime('%Y%m%d-%H-%M-%S')
p = str(path.with_suffix('')) + '_' + s + path.suffix
shutil.copy2(args.paper_info_csv, p)
df_paper_info_db = df_paper_info_db.fillna("")
# Update and append paper info table
df_paper_info_db, df_status = update_paper_info_db(df_paper_info_db,args.n,args.filter)
df_paper_info_db.to_csv(args.paper_info_csv, index=False)
save_status(df_status, args.status_csv)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.