max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
regression/errors.py | sahitpj/MachineLearning | 2 | 12761951 | <reponame>sahitpj/MachineLearning
import numpy as np
import torch
def MSE(Y_predict, Y):
assert(Y_predict.shape[0] == Y.shape[0])
return np.sum((Y_predict-Y)**2)/Y.shape[0]
def MSE_torch(Y_predict, Y):
assert(Y_predict.shape[0] == Y.shape[0])
return torch.sum((Y_predict-Y)**2)/Y.shape[0]
def SSE(Y_predict, Y):
assert(Y_predict.shape[0] == Y.shape[0])
return np.sum((Y_predict-Y)**2)
def SSE_torch(Y_predict, Y):
assert(Y_predict.shape[0] == Y.shape[0])
return torch.sum((Y_predict-Y)**2) | 3.125 | 3 |
paper1_examples/tsunami_Alaska/maketopo.py | rjleveque/adjoint | 3 | 12761952 | """
Download topo and dtopo files needed for this example.
Call functions with makeplots==True to create plots of topo, slip, and dtopo.
"""
import os
import clawpack.clawutil.data
try:
CLAW = os.environ['CLAW']
except:
raise Exception("*** Must first set CLAW enviornment variable")
# Scratch directory for storing topo and dtopo files:
scratch_dir = os.path.join(CLAW, 'geoclaw', 'scratch')
def get_topo(makeplots=False):
"""
Retrieve the topo file from online.
"""
from clawpack.geoclaw import topotools
topo_fname = 'etopo1min170E124W40N61N.asc'
url = 'http://students.washington.edu/bndavis/misc/topo/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
topo_fname = 'etopo4min120E110W0N62N.asc'
url = 'http://students.washington.edu/bndavis/misc/topo/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
topo_fname = 'cc-1sec-c.asc'
url = 'http://students.washington.edu/bndavis/misc/topo/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
topo_fname = 'cc-1_3sec-c_pierless.asc'
url = 'http://students.washington.edu/bndavis/misc/topo/' + topo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=topo_fname, verbose=True)
if makeplots:
from matplotlib import pyplot as plt
topo = topotools.Topography(topo_fname, topo_type=2)
topo.plot()
fname = os.path.splitext(topo_fname)[0] + '.png'
plt.savefig(fname)
print "Created ",fname
def make_dtopo(makeplots=False):
"""
Create dtopo data file for deformation of sea floor due to earthquake.
Uses the Okada model with fault parameters and mesh specified below.
"""
from clawpack.geoclaw import dtopotools
import numpy
dtopo_fname = 'AASZ04v2.tt3'
url = 'http://students.washington.edu/bndavis/misc/dtopo/alaska/' + dtopo_fname
clawpack.clawutil.data.get_remote_file(url, output_dir=scratch_dir,
file_name=dtopo_fname, verbose=True)
if makeplots:
from matplotlib import pyplot as plt
if fault.dtopo is None:
# read in the pre-existing file:
print "Reading in dtopo file..."
dtopo = dtopotools.DTopography()
dtopo.read(dtopo_fname, dtopo_type=3)
x = dtopo.x
y = dtopo.y
plt.figure(figsize=(12,7))
ax1 = plt.subplot(121)
ax2 = plt.subplot(122)
fault.plot_subfaults(axes=ax1,slip_color=True)
ax1.set_xlim(x.min(),x.max())
ax1.set_ylim(y.min(),y.max())
dtopo.plot_dz_colors(1.,axes=ax2)
fname = os.path.splitext(dtopo_fname)[0] + '.png'
plt.savefig(fname)
print "Created ",fname
if __name__=='__main__':
get_topo(False)
make_dtopo(False)
| 3.015625 | 3 |
oops_fhir/r4/value_set/immunization_evaluation_dose_status_codes.py | Mikuana/oops_fhir | 0 | 12761953 | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.immunization_evaluation_dose_status_codes import (
ImmunizationEvaluationDoseStatusCodes as ImmunizationEvaluationDoseStatusCodes_,
)
__all__ = ["ImmunizationEvaluationDoseStatusCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class ImmunizationEvaluationDoseStatusCodes(ImmunizationEvaluationDoseStatusCodes_):
"""
Immunization Evaluation Dose Status codes
The value set to instantiate this attribute should be drawn from a
terminologically robust code system that consists of or contains
concepts to support describing the validity of a dose relative to a
particular recommended schedule. This value set is provided as a
suggestive example.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/immunization-evaluation-dose-status
"""
class Meta:
resource = _resource
| 2.046875 | 2 |
Preparation/Sorting Method/insertion_sort.py | jaiswalIT02/pythonprograms | 0 | 12761954 | a=[2,6,7,5,11,15]
n=len(a)
print("old array=",a)
for i in range(n-1):
if a[i]<=a[i+1]:
continue
t=a[i+1]
j=i+1
while j>=1 and a[j-1]>t:
a[j]=a[j-1]
j=j-1
a[j]=t
print("Sorted Array=",a) | 3.609375 | 4 |
eICU_tstr_evaluation.py | cliohong/RGAN | 585 | 12761955 | <reponame>cliohong/RGAN
import data_utils
import pandas as pd
import numpy as np
import tensorflow as tf
import math, random, itertools
import pickle
import time
import json
import os
import math
import data_utils
import pickle
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_curve, auc, precision_recall_curve
import copy
from scipy.stats import sem
print ("Starting TSTR experiment.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
# iterate over all dataset versions generated after running the GAN for 5 times
aurocs_all_runs = []
auprcs_all_runs = []
for oo in range(5):
print (oo)
# find the best "dataset epoch", meaning the GAN epoch that generated the dataset
# validation is only done in some of the tasks, and the others are considered unknown
# (use validation set to pick best GAN epoch, then get result on test set)
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
all_aurocs_exp = []
all_auprcs_exp = []
for nn in np.arange(50,1050,50):
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(nn) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
all_aurocs = []
all_auprcs = []
# in case we want to train each random forest multiple times with each dataset
for exp_num in range(1):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(vali_seqs_r, vali_targets[:,col_num]))
preds = estimator.predict(vali_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=vali_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=vali_targets[:,col_num]))
preds = estimator.predict_proba(vali_seqs_r)
fpr, tpr, thresholds = roc_curve(vali_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(vali_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
all_aurocs.append(aurocs)
all_auprcs.append(auprcs)
all_aurocs_exp.append(all_aurocs)
all_auprcs_exp.append(all_auprcs)
#with open('all_aurocs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_aurocs_exp)
#with open('all_auprcs_exp_r' + str(oo) + '.pk', 'wb') as f:
# pickle.dump(file=f, obj=all_auprcs_exp)
best_idx = np.argmax(np.array(all_aurocs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1) + np.array(all_auprcs_exp).sum(axis=1)[:,[0,2,4]].sum(axis=1))
best = np.arange(50,1050,50)[best_idx]
with open('./synthetic_eICU_datasets/samples_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_data = pickle.load(file=f)
with open('./synthetic_eICU_datasets/labels_eICU_cdgan_synthetic_dataset_r' + str(oo) + '_' + str(best) + '.pk', 'rb') as f:
synth_labels = pickle.load(file=f)
train_seqs = synth_data
train_targets = synth_labels
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
print ("----------------------------")
aurocs_all_runs.append(aurocs)
auprcs_all_runs.append(auprcs)
allr = np.vstack(aurocs_all_runs)
allp = np.vstack(auprcs_all_runs)
tstr_aurocs_mean = allr.mean(axis=0)
tstr_aurocs_sem = sem(allr, axis=0)
tstr_auprcs_mean = allp.mean(axis=0)
tstr_auprcs_sem = sem(allp, axis=0)
# get AUROC/AUPRC for real, random data
print ("Experiment with real data.")
print ("loading data...")
samples, labels = data_utils.eICU_task()
train_seqs = samples['train'].reshape(-1,16,4)
vali_seqs = samples['vali'].reshape(-1,16,4)
test_seqs = samples['test'].reshape(-1,16,4)
train_targets = labels['train']
vali_targets = labels['vali']
test_targets = labels['test']
train_seqs, vali_seqs, test_seqs = data_utils.scale_data(train_seqs, vali_seqs, test_seqs)
print ("data loaded.")
train_seqs_r = train_seqs.reshape((train_seqs.shape[0], -1))
vali_seqs_r = vali_seqs.reshape((vali_seqs.shape[0], -1))
test_seqs_r = test_seqs.reshape((test_seqs.shape[0], -1))
aurocs_all = []
auprcs_all = []
for i in range(5):
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
estimator = RandomForestClassifier(n_estimators=100)
estimator.fit(train_seqs_r, train_targets[:,col_num])
accuracies.append(estimator.score(test_seqs_r, test_targets[:,col_num]))
preds = estimator.predict(test_seqs_r)
precisions.append(precision_score(y_pred=preds, y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=preds, y_true=test_targets[:,col_num]))
preds = estimator.predict_proba(test_seqs_r)
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds[:,1])
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds[:,1])
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
aurocs_all.append(aurocs)
auprcs_all.append(auprcs)
real_aurocs_mean = np.array(aurocs_all).mean(axis=0)
real_aurocs_sem = sem(aurocs_all, axis=0)
real_auprcs_mean = np.array(auprcs_all).mean(axis=0)
real_auprcs_sem = sem(auprcs_all, axis=0)
print ("Experiment with random predictions.")
#random score
test_targets_random = copy.deepcopy(test_targets)
random.shuffle(test_targets_random)
accuracies = []
precisions = []
recalls = []
aurocs = []
auprcs = []
for col_num in range(train_targets.shape[1]):
accuracies.append(accuracy_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
precisions.append(precision_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
recalls.append(recall_score(y_pred=test_targets_random[:,col_num], y_true=test_targets[:,col_num]))
preds = np.random.rand(len(test_targets[:,col_num]))
fpr, tpr, thresholds = roc_curve(test_targets[:,col_num], preds)
aurocs.append(auc(fpr, tpr))
precision, recall, thresholds = precision_recall_curve(test_targets[:,col_num], preds)
auprcs.append(auc(recall, precision))
print(accuracies)
print(precisions)
print(recalls)
print(aurocs)
print(auprcs)
random_aurocs = aurocs
random_auprcs = auprcs
print("Results")
print("------------")
print("------------")
print("TSTR")
print(tstr_aurocs_mean)
print(tstr_aurocs_sem)
print(tstr_auprcs_mean)
print(tstr_auprcs_sem)
print("------------")
print("Real")
print(real_aurocs_mean)
print(real_aurocs_sem)
print(real_auprcs_mean)
print(real_auprcs_sem)
print("------------")
print("Random")
print(random_aurocs)
print(random_auprcs) | 2.484375 | 2 |
SciFiReaders/readers/microscopy/spm/afm/pifm.py | itsalexis962/SciFiReaders | 8 | 12761956 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 21:46:14 2021
@author: Raj
"""
import sidpy as sid
from sidpy.sid import Reader
from sidpy.sid import Dimension
import os
import numpy as np
import h5py
from pyNSID.io.hdf_io import write_nsid_dataset
from pyNSID.io.hdf_io import create_indexed_group, write_simple_attrs
class PiFMTranslator(Reader):
"""
Class that writes images, spectrograms, point spectra and associated ancillary data sets to h5 file in pyUSID data
structure.
"""
def read(self ):
"""
Parameters
----------
file_path : String / unicode
Absolute path of the .ibw file
verbose : Boolean (Optional)
Whether or not to show print statements for debugging
Returns
-------
sidpy.Dataset : List of sidpy.Dataset objects.
Image layers are saved as separate Dataset objects
"""
self.get_path()
self.read_anfatec_params()
self.read_file_desc()
self.read_spectrograms()
self.read_imgs()
self.read_spectra()
self.datasets = self.make_datasets()
return self.datasets
def create_h5(self, append_path='', overwrite=False):
"""
Writes a new HDF5 file with the translated data
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
overwrite : bool (optional, default=False)
If True, will overwrite an existing .h5 file of the same name
"""
self.create_hdf5_file(append_path, overwrite)
self.write_datasets_hdf5()
return
def get_path(self):
"""writes full path, directory, and file name as attributes to class"""
self.path = self._input_file_path
full_path = os.path.realpath(self.path)
directory = os.path.dirname(full_path)
# file name
basename = os.path.basename(self.path)
self.full_path = full_path
self.directory = directory
self.basename = basename
def read_anfatec_params(self):
"""reads the scan parameters and writes them to a dictionary"""
params_dictionary = {}
params = True
with open(self.path, 'r', encoding="ISO-8859-1") as f:
for line in f:
if params:
sline = [val.strip() for val in line.split(':')]
if len(sline) == 2 and sline[0][0] != ';':
params_dictionary[sline[0]] = sline[1]
#in ANFATEC parameter files, all attributes are written before file references.
if sline[0].startswith('FileDesc'):
params = False
f.close()
self.params_dictionary = params_dictionary
self.x_len, self.y_len = int(params_dictionary['xPixel']), int(params_dictionary['yPixel'])
def read_file_desc(self):
"""reads spectrogram, image, and spectra file descriptions and stores all to dictionary where
the key:value pairs are filename:[all descriptors]"""
spectrogram_desc = {}
img_desc = {}
spectrum_desc = {}
pspectrum_desc = {}
with open(self.path,'r', encoding="ISO-8859-1") as f:
lines = f.readlines()
for index, line in enumerate(lines):
sline = [val.strip() for val in line.split(':')]
#if true, then file describes image.
if sline[0].startswith('FileDescBegin'):
no_descriptors = 5
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#img_desc['filename'] = caption, scale, physical unit, offset
img_desc[file_desc[0]] = file_desc[1:]
#if true, file describes spectrogram (ie hyperspectral image)
if sline[0].startswith('FileDesc2Begin'):
no_descriptors = 10
file_desc = []
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#caption, bytes perpixel, scale, physical unit, offset, offset, datatype, bytes per reading
#filename wavelengths, phys units wavelengths.
spectrogram_desc[file_desc[0]] = file_desc[1:]
if sline[0].startswith('AFMSpectrumDescBegin'):
file_desc = []
line_desc = [val.strip() for val in lines[index+1].split(':')][1]
if 'powerspectrum' in line_desc:
no_descriptors = 2
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
pspectrum_desc[file_desc[0]] = file_desc[1:]
else:
no_descriptors = 7
for i in range(no_descriptors):
line_desc = [val.strip() for val in lines[index+i+1].split(':')]
file_desc.append(line_desc[1])
#file name, position x, position y
spectrum_desc[file_desc[0]] = file_desc[1:]
f.close()
self.img_desc = img_desc
self.spectrogram_desc = spectrogram_desc
self.spectrum_desc = spectrum_desc
self.pspectrum_desc = pspectrum_desc
def read_spectrograms(self):
"""reads spectrograms, associated spectral values, and saves them in two dictionaries"""
spectrograms = {}
spectrogram_spec_vals = {}
for file_name, descriptors in self.spectrogram_desc.items():
spec_vals_i = np.loadtxt(os.path.join(self.directory, file_name.strip('.int') + 'Wavelengths.txt'))
#if true, data is acquired with polarizer, with an attenuation data column
if np.array(spec_vals_i).ndim == 2:
spectrogram_spec_vals[file_name] = spec_vals_i[:, 0]
attenuation = {}
attenuation[file_name] = spec_vals_i[:, 1]
self.attenuation = attenuation
else:
spectrogram_spec_vals[file_name] = spec_vals_i
#load and save spectrograms
spectrogram_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
spectrograms[file_name] = np.zeros((self.x_len, self.y_len, len(spec_vals_i)))
for y, line in enumerate(np.split(spectrogram_i, self.y_len)):
for x, pt_spectrum in enumerate(np.split(line, self.x_len)):
spectrograms[file_name][x, y, :] = pt_spectrum * float(descriptors[2])
self.spectrograms = spectrograms
self.spectrogram_spec_vals = spectrogram_spec_vals
def read_imgs(self):
"""reads images and saves to dictionary"""
imgs = {}
for file_name, descriptors in self.img_desc.items():
img_i = np.fromfile(os.path.join(self.directory, file_name), dtype='i4')
imgs[file_name] = np.zeros((self.x_len, self.y_len))
for y, line in enumerate(np.split(img_i, self.y_len)):
for x, pixel in enumerate(np.split(line, self.x_len)):
imgs[file_name][x, y] = pixel * float(descriptors[1])
self.imgs = imgs
def read_spectra(self):
"""reads all point spectra and saves to dictionary"""
spectra = {}
spectra_spec_vals = {}
spectra_x_y_dim_name = {}
for file_name, descriptors in self.spectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
for file_name, descriptors in self.pspectrum_desc.items():
spectrum_f = np.loadtxt(os.path.join(self.directory, file_name), skiprows=1)
spectra_spec_vals[file_name] = spectrum_f[:, 0]
spectra[file_name] = spectrum_f[:,1]
with open(os.path.join(self.directory, file_name)) as f:
spectra_x_y_dim_name[file_name] = f.readline().strip('\n').split('\t')
self.spectra = spectra
self.spectra_spec_vals = spectra_spec_vals
self.spectra_x_y_dim_name = spectra_x_y_dim_name
def make_datasets(self):
datasets = []
self.make_dimensions()
# Spectrograms
if bool(self.spectrogram_desc):
for spectrogram_f, descriptors in self.spectrogram_desc.items():
# channel_i = create_indexed_group(self.h5_meas_grp, 'Channel_')
spec_vals_i = self.spectrogram_spec_vals[spectrogram_f]
spectrogram_data = self.spectrograms[spectrogram_f]
dset = sid.Dataset.from_array(spectrogram_data, name=descriptors[0])
dset.data_type = 'Spectrogram'
dset.set_dimension(0, self.dim0)
dset.set_dimension(1, self.dim0)
# spectrogram_spec_dims = Dimension('Wavelength', descriptors[8], spec_vals_i)
spectrogram_dims = Dimension(values=spec_vals_i, name='Spectrogram',
units=descriptors[3], quantity='Wavelength', type='spectral' )
dset.set_dimension(2, spectrogram_dims)
dset.metadata = {'Caption': descriptors[0],
'Bytes_Per_Pixel': descriptors[1],
'Scale': descriptors[2],
'Physical_Units': descriptors[3],
'Offset': descriptors[4],
'Datatype': descriptors[5],
'Bytes_Per_Reading': descriptors[6],
'Wavelength_File': descriptors[7],
'Wavelength_Units': descriptors[8]}
datasets.append(dset)
# Images
if bool(self.img_desc):
for img_f, descriptors in self.img_desc.items():
img_data = self.imgs[img_f]
dset = sid.Dataset.from_array(img_data, name = descriptors[0])
dset.data_type = 'Image'
dset.set_dimension(0, self.dim0)
dset.set_dimension(1, self.dim1)
dset.units = descriptors[2]
dset.quantity = descriptors[0]
dset.metadata = {'Caption': descriptors[0],
'Scale': descriptors[1],
'Physical_Units': descriptors[2],
'Offset': descriptors[3]}
datasets.append(dset)
# Spectra
if bool(self.spectrum_desc):
for spec_f, descriptors in self.spectrum_desc.items():
#create new measurement group for each spectrum
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
dset = sid.Dataset.from_array(self.spectra[spec_f], name = 'Raw_Spectrum')
dset.set_dimension(0, Dimension(np.array([float(descriptors[1])]),
name='X',units=self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
quantity = 'X_position'))
dset.set_dimension(1, Dimension(np.array([float(descriptors[2])]),
name='Y',units=self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
quantity = 'Y_position'))
dset.data_type = 'Spectrum'
dset.units = y_unit
dset.quantity = y_name
spectra_dims = Dimension(values=self.spectra_spec_vals[spec_f], name='Wavelength',
units=x_unit, quantity=x_name, type='spectral' )
dset.set_dimension(2, spectra_dims)
dset.metadata = {'XLoc': descriptors[1], 'YLoc': descriptors[2]}
datasets.append(dset)
# Power Spectra
if bool(self.pspectrum_desc):
for spec_f, descriptors in self.pspectrum_desc.items():
#create new measurement group for each spectrum
x_name = self.spectra_x_y_dim_name[spec_f][0].split(' ')[0]
x_unit = self.spectra_x_y_dim_name[spec_f][0].split(' ')[1]
y_name = self.spectra_x_y_dim_name[spec_f][1].split(' ')[0]
y_unit = self.spectra_x_y_dim_name[spec_f][1].split(' ')[1]
dset = sid.Dataset.from_array(self.spectra[spec_f], name = 'Power_Spectrum')
dset.set_dimension(0, Dimension(np.array([0]),
name='X',units=self.params_dictionary['XPhysUnit'].replace('\xb5','u'),
quantity = 'X_position'))
dset.set_dimension(1, Dimension(np.array([0]),
name='Y',units=self.params_dictionary['YPhysUnit'].replace('\xb5','u'),
quantity = 'Y_position'))
dset.data_type = 'Spectrum'
dset.units = y_unit
dset.quantity = y_name
spectra_dims = Dimension(values=self.spectra_spec_vals[spec_f], name='Wavelength',
units=x_unit, quantity=x_name, type='spectral' )
dset.set_dimension(2, spectra_dims)
dset.metadata = {'XLoc': 0, 'YLoc': 0}
datasets.append(dset)
return datasets
def make_dimensions(self):
x_range = float(self.params_dictionary['XScanRange'])
y_range = float(self.params_dictionary['YScanRange'])
x_center = float(self.params_dictionary['xCenter'])
y_center = float(self.params_dictionary['yCenter'])
x_start = x_center-(x_range/2); x_end = x_center+(x_range/2)
y_start = y_center-(y_range/2); y_end = y_center+(y_range/2)
dx = x_range/self.x_len
dy = y_range/self.y_len
#assumes y scan direction:down; scan angle: 0 deg
y_linspace = -np.arange(y_start, y_end, step=dy)
x_linspace = np.arange(x_start, x_end, step=dx)
qtyx = self.params_dictionary['XPhysUnit'].replace('\xb5', 'u')
qtyy = self.params_dictionary['YPhysUnit'].replace('\xb5', 'u')
self.dim0 = Dimension(x_linspace, name = 'x', units = qtyx,
dimension_type = 'spatial', quantity='Length')
self.dim1 = Dimension(y_linspace, name = 'y', units = qtyy,
dimension_type = 'spatial', quantity='Length')
# self.pos_ind, self.pos_val, self.pos_dims = pos_ind, pos_val, pos_dims
return
# HDF5 creation
def create_hdf5_file(self, append_path='', overwrite=False):
""" Sets up the HDF5 file for writing
append_path : string (Optional)
h5_file to add these data to, must be a path to the h5_file on disk
overwrite : bool (optional, default=False)
If True, will overwrite an existing .h5 file of the same name
"""
if not append_path:
h5_path = os.path.join(self.directory, self.basename.replace('.txt', '.h5'))
if os.path.exists(h5_path):
if not overwrite:
raise FileExistsError('This file already exists). Set attribute overwrite to True')
else:
print('Overwriting file', h5_path)
#os.remove(h5_path)
self.h5_f = h5py.File(h5_path, mode='w')
else:
if not os.path.exists(append_path):
raise Exception('File does not exist. Check pathname.')
self.h5_f = h5py.File(append_path, mode='r+')
self.h5_img_grp = create_indexed_group(self.h5_f, "Images")
self.h5_spectra_grp = create_indexed_group(self.h5_f, "Spectra")
self.h5_spectrogram_grp = create_indexed_group(self.h5_f, "Spectrogram")
write_simple_attrs(self.h5_img_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectra_grp, self.params_dictionary)
write_simple_attrs(self.h5_spectrogram_grp, self.params_dictionary)
return
def write_datasets_hdf5(self):
""" Writes the datasets as pyNSID datasets to the HDF5 file"""
for dset in self.datasets:
if 'IMAGE' in dset.data_type.name:
write_nsid_dataset(dset, self.h5_img_grp)
elif 'SPECTRUM' in dset.data_type.name:
write_nsid_dataset(dset, self.h5_spectra_grp)
else:
write_nsid_dataset(dset, self.h5_spectrogram_grp)
self.h5_f.file.close()
return | 2.328125 | 2 |
bench.py | Tijani-Dia/yrouter-bench | 0 | 12761957 | from django.urls import resolve, set_urlconf
from routes.falcon import falcon_router
from routes.sanic import sanic_router
from routes.werkzeug import werkzeug_router
from routes.yrouter import y_router
def bench():
y_router.match("/")
y_router.match("/articles/2020/")
y_router.match("/articles/2015/")
y_router.match("/articles/2015/04/12/")
y_router.match("/articles/categories/sport/newest/")
y_router.match("/users/extra")
y_router.match("catchall")
y_router.match("/int/92")
y_router.match("/articles/2015/04/12/98/")
y_router.match("/users/extra/bog")
def bench_dj():
resolve("/")
resolve("/articles/2020/")
resolve("/articles/2015/")
resolve("/articles/2015/04/12/")
resolve("/articles/categories/sport/newest/")
resolve("/users/extra/")
resolve("/catchall")
resolve("/int/92")
try:
resolve("/articles/2015/04/12/98/")
except:
pass
try:
resolve("/users/extra/bog")
except:
pass
def bench_sanic():
sanic_router.get("/", method="BASE")
sanic_router.get("/articles/2020/", method="BASE")
sanic_router.get("/articles/2015/", method="BASE")
sanic_router.get("/articles/2015/04/12/", method="BASE")
sanic_router.get("/articles/categories/sport/newest/", method="BASE")
sanic_router.get("/users/extra", method="BASE")
sanic_router.get("/catchall", method="BASE")
sanic_router.get("/int/92", method="BASE")
try:
sanic_router.get("/articles/2015/04/12/98/", method="BASE")
except:
pass
try:
sanic_router.get("/users/extra/bog", method="BASE")
except:
pass
def bench_falcon():
falcon_router.find("/")
falcon_router.find("/articles/2020/")
falcon_router.find("/articles/2015/")
falcon_router.find("/articles/2015/04/12/")
falcon_router.find("/articles/categories/sport/newest/")
falcon_router.find("/users/extra")
falcon_router.find("/catchall")
falcon_router.find("/int/92")
falcon_router.find("/articles/2015/04/12/98")
falcon_router.find("/users/extra/bog")
def bench_werkzeug():
werkzeug_router.match("/")
werkzeug_router.match("/articles/2020/")
werkzeug_router.match("/articles/2015/")
werkzeug_router.match("/articles/2015/04/12/")
werkzeug_router.match("/articles/categories/sport/newest/")
werkzeug_router.match("/users/extra/")
werkzeug_router.match("/catchall/")
werkzeug_router.match("/int/92/")
try:
werkzeug_router.match("/articles/2015/04/12/98")
except:
pass
try:
werkzeug_router.match("/users/extra/bog")
except:
pass
if __name__ == "__main__":
import timeit
set_urlconf("routes.django")
print("yrouter is running...")
ytime = timeit.timeit("bench()", globals=globals(), number=10000)
print(f"Took {ytime} seconds.\n")
print("django is running...")
djtime = timeit.timeit("bench_dj()", globals=globals(), number=10000)
print(f"Took {djtime} seconds.\n")
print("sanic is running...")
sanic_time = timeit.timeit("bench_sanic()", globals=globals(), number=10000)
print(f"Took {sanic_time} seconds.\n")
print("falcon is running...")
falcon_time = timeit.timeit("bench_falcon()", globals=globals(), number=10000)
print(f"Took {falcon_time} seconds.\n")
print("werkzeug is running...")
werkzeug_time = timeit.timeit("bench_werkzeug()", globals=globals(), number=10000)
print(f"Took {werkzeug_time} seconds.\n")
| 2.046875 | 2 |
expusers.py | ZappedC64/expimpacct | 0 | 12761958 | #! /usr/bin/env python
# Script to export users from an existing system
import os, os.path
import sys
import tarfile
# Must be run as root.
if not os.geteuid() == 0:
sys.exit('This script must be run as root (or sudo)!')
def info_message(txtmessage):
print(txtmessage, end='')
def ok_message():
print(" [ OK ]")
# Open the password, group, and shadow files
# and store their contents in lists
info_message("Reading passwd file...")
with open('/etc/passwd') as fpwd:
fpwdfile = fpwd.readlines()
ok_message()
info_message("Reading group file...")
with open('/etc/group') as fgrp:
fgrpfile = fgrp.readlines()
ok_message()
info_message("Reading shadow file...")
with open('/etc/shadow') as fsha:
fshafile = fsha.readlines()
ok_message()
# Strip out newlines
fpwdfile = [x.strip() for x in fpwdfile]
fgrpfile = [x.strip() for x in fgrpfile]
fshafile = [x.strip() for x in fshafile]
# Parse the password file. Grab only UIDs between 500 and 65534
info_message("Parsing passwd file. Looking for accounts with UID's >= 500...")
countl = 0
pwdlist = []
for line in fpwdfile:
countl += 1
fpwdsplit = (line.split(':'))
uidval = int(fpwdsplit[3])
if 1000 <= uidval < 65534:
pwdlist.append(line)
# Write the output to a new file
with open('passwd_mig.txt', "a") as fpwd_write:
for item in pwdlist:
fpwd_write.write(item + "\n")
ok_message()
# Parse the group file. Grab only GIDs between 1000 and 65534
info_message("Parsing group file. Looking for accounts with GID's >= 500...")
countl = 0
grplist = []
for line in fgrpfile:
countl += 1
fgrpsplit = (line.split(':'))
uidval = int(fgrpsplit[2])
userstr = str(fgrpsplit[0])
if 1000 <= uidval < 65534:
grplist.append(line)
# Write the output to a new file
with open('group_mig.txt', "a") as fgrp_write:
for item in grplist:
fgrp_write.write(item + "\n")
ok_message()
# Create and write new shadow file
info_message("Parsing shadow file. Looking for accounts that match the UIDs...")
with open('shadow_mig.txt', 'a') as fsha_write:
# Match only the line in the shadow file that matches the user
filter_object = filter(lambda a: userstr in a, fshafile)
strshad = ''.join(filter_object)
fsha_write.write(strshad + '\n')
ok_message()
# Tar it all up
with tarfile.open("user_export.tgz", "w:gz") as tar:
info_message('Creating tar archive...')
for file in ["passwd_mig.txt", "group_mig.txt", "shadow_mig.txt"]:
tar.add(os.path.basename(file))
ok_message()
# Cleanup
info_message('Cleaning up temp files...')
if os.path.exists("passwd_mig.txt"):
os.remove("passwd_mig.txt")
if os.path.exists("group_mig.txt"):
os.remove("group_mig.txt")
if os.path.exists("shadow_mig.txt"):
os.remove("shadow_mig.txt")
ok_message()
| 2.78125 | 3 |
chapter4/snaps.py | chavo1/playground-python | 0 | 12761959 | # Some pygame helper functions for simple image display
# and sound effect playback
# <NAME> July 2017
# Version 1.0
import pygame
surface = None
def setup(width=800, height=600, title=''):
'''
Sets up the pygame environment
'''
global window_size
global back_color
global text_color
global image
global surface
# Don't initialise if we already have
if surface is not None:
return
window_size = (width, height)
back_color = (255, 255, 255)
text_color = (255, 0, 0)
image = None
# pre initialise pyGame's audio engine to avoid sound latency issues
pygame.mixer.pre_init(frequency=44100)
pygame.init()
# initialise pyGame's audio engine
pygame.mixer.init()
# Create the game surface
surface = pygame.display.set_mode(window_size)
clear_display()
pygame.display.set_caption(title)
def handle_events():
'''
Consume events that are generated by the pygame window
These are not presntly used for anything
'''
setup()
for event in pygame.event.get():
pass
def play_sound(filepath):
'''
Plays the specified sound file
'''
pygame.mixer.init()
sound = pygame.mixer.Sound(filepath)
sound.play()
def display_image(filepath):
'''
Displays the image from the given filepath
Starts pygame if required
May throw exceptions
'''
global surface
global window_size
global image
handle_events()
image = pygame.image.load(filepath)
image = pygame.transform.smoothscale(image, window_size)
surface.blit(image, (0, 0))
pygame.display.flip()
def clear_display():
'''
Clears the display to the background colour
and the image (if any) on top of it
'''
global surface
global image
global back_color
handle_events()
surface.fill(back_color)
if image is not None:
surface.blit(image, (0, 0))
def get_display_lines(text, font, width):
'''
Returns a list of strings which have been split
to fit the given window width using the supplied font
'''
space_width = font.size(' ')[0]
result = []
text_lines = text.splitlines()
for text_line in text_lines:
words = text_line.split()
x = 0
line = ''
for word in words:
word_width = font.size(word)[0]
if x + word_width > width:
# Remove the trailing space from the line
# before adding to the list of lines to return
line = line.strip()
result.append(line)
line = word + ' '
x = word_width + space_width
else:
line = line + word + ' '
x = x + word_width + space_width
if line != '':
# Got a partial line to add to the end
# Remove the trailing space from the line
# before adding to the list of lines to return
line = line.strip()
result.append(line)
return result
def display_message(text, size=200, margin=20, horiz='center', vert='center',
color=(255, 0, 0)):
'''
Displays the text as a message
Sice can be used to select the size of the
text
'''
global window_size
global surface
handle_events()
clear_display()
# Get the text version of the input
text = str(text)
font = pygame.font.Font(None, size)
available_width = window_size[0] - (margin * 2)
lines = get_display_lines(text, font, available_width)
rendered_lines = []
height = 0
for line in lines:
rendered_line = font.render(line, 1, color)
height += rendered_line.get_height()
rendered_lines.append(rendered_line)
if height > window_size[1]:
raise Exception('Text too large for window')
if vert == 'center':
y = (window_size[1] - height) / 2.0
elif vert == 'top':
y = margin
elif vert == 'bottom':
y=(window_size[1]-margin) - height
for rendered_line in rendered_lines:
width = rendered_line.get_width()
height = rendered_line.get_height()
if horiz == 'center':
x = (available_width - width) / 2.0 + margin
elif horiz == 'left':
x = margin
elif horiz == 'right':
x = self.window_size[0] - width - margin
surface.blit(rendered_line, (x, y))
y += height
pygame.display.flip()
import urllib.request
import xml.etree.ElementTree
def get_weather_temp(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'temperature':
if i.attrib['type'] == 'apparent':
for t in i:
if t.tag =='value':
return int(t.text)
def get_weather_desciption(latitude,longitude):
'''
Uses forecast.weather.gov to get the weather
for the specified latitude and longitude
'''
url="http://forecast.weather.gov/MapClick.php?lat={0}&lon={1}&unit=0&lg=english&FcstType=dwml".format(latitude,longitude)
req=urllib.request.urlopen(url)
page=req.read()
doc=xml.etree.ElementTree.fromstring(page)
# I'm not proud of this, but by gum it works...
for child in doc:
if child.tag == 'data':
if child.attrib['type'] == 'current observations':
for item in child:
if item.tag == 'parameters':
for i in item:
if i.tag == 'weather':
for t in i:
if t.tag == 'weather-conditions':
if t.get('weather-summary') is not None:
return t.get('weather-summary')
| 3.421875 | 3 |
invenio_celery/config.py | ppanero/invenio-celery | 0 | 12761960 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Default configuration values for Celery integration.
For further Celery configuration variables see
`Celery <http://docs.celeryproject.org/en/3.1/configuration.html>`_
documentation.
"""
BROKER_URL = 'redis://localhost:6379/0'
CELERY_BROKER_URL = BROKER_URL # For Celery 4
"""Broker settings."""
CELERY_RESULT_BACKEND = 'redis://localhost:6379/1'
"""The backend used to store task results."""
CELERY_ACCEPT_CONTENT = ['json', 'msgpack', 'yaml']
"""A whitelist of content-types/serializers."""
CELERY_RESULT_SERIALIZER = 'msgpack'
"""Result serialization format. Default is ``msgpack``."""
CELERY_TASK_SERIALIZER = 'msgpack'
"""The default serialization method to use. Default is ``msgpack``."""
| 1.757813 | 2 |
setup.py | gams/openkongqi | 4 | 12761961 | # -*- coding: utf-8 -*-
from codecs import open
from os import path
from setuptools import setup, find_packages
import openkongqi as okq
# local path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst')) as fd:
long_description = fd.read()
requirements = [
"beautifulsoup4==4.9.3",
"celery>=5.0.5",
"hiredis==1.1.0",
"html5lib==1.1",
"pytz>=2020.5",
"redis==3.5.3",
"six>=1.13.0",
"sqlalchemy>=1.3.23",
]
setup(
name=okq.__name__,
version=okq.__version__,
author=okq.__author__,
author_email=okq.__contact__,
license="Apache License 2.0",
packages=find_packages(exclude=['docs', 'test*']),
url="https://github.com/gams/openkongqi",
description="Outdoor air quality data",
long_description=long_description,
install_requires=requirements,
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Framework :: Sphinx',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Text Processing :: Markup :: HTML',
'Topic :: Text Processing :: Markup :: XML',
],
keywords="air quality",
package_data={
'openkongqi': [
'data/user_agent_strings.json',
'data/sources/pm25.in.json',
'openkongqi/data/stations/cn/shanghai.json'
],
},
entry_points={
'console_scripts': [
"okq-server=openkongqi.bin:okq_server",
"okq-init=openkongqi.bin:okq_init",
"okq-source-test=utils.source_test:main",
]
},
include_package_data=True,
)
| 1.484375 | 1 |
spartan/model/holoscope/matricizationSVD.py | sunxiaobing1999/spartan2 | 60 | 12761962 | import sys,math
import numpy as np
import scipy.sparse.linalg as slin
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
from svddenseblock import *
from mytools.ioutil import myreadfile
from os.path import expanduser
home = expanduser("~")
def loadtensor2matricization(tensorfile, sumout=[], mtype=coo_matrix,
weighted=True, dtype=int):
'sumout: marginized (sumout) the given ways'
matcols={}
rindexcols={}
xs, ys, data = [], [], []
with myreadfile(tensorfile, 'rb') as f:
for line in f:
elems = line.strip().split(',')
elems = np.array(elems)
u = int(elems[0])
colidx = range(1,len(elems)-1) #remove sumout
colidx = set(colidx) - set(list(sumout))
colidx = sorted(list(colidx))
col=' '.join(elems[colidx])
if col not in matcols:
idx = len(matcols)
matcols[col] = idx
rindexcols[idx]=col
cid = matcols[col]
w = dtype(elems[-1])
xs.append(u)
ys.append(cid)
data.append(w)
nrow, ncol = max(xs)+1, max(ys)+1
sm = mtype( (data, (xs, ys)), shape=(nrow, ncol), dtype=dtype )
if weighted is False:
sm.data[0:] = dtype(1)
f.close()
return sm, rindexcols
def matricizeSVDdenseblock(sm, rindexcols, rbd='avg'):
A, tmpB = svddenseblock(sm, rbd=rbd)
rows = A.nonzero()[0]
cols = tmpB.nonzero()[0]
bcols = set()
for col in cols:
'col name'
cnm = rindexcols[col]
cnm = cnm.strip().split(' ')
b = int(cnm[0])
bcols.add(b)
return set(rows), set(bcols)
if __name__=="__main__":
path = home+'/Data/BeerAdvocate/'
respath= path+'results/'
tsfile = path+'userbeerts.dict'
ratefile = path+'userbeerrate.dict'
tensorfile =respath+'userbeer.tensor'
sm, rindexcols = loadtensor2matricization(tensorfile,
sumout=[3],mtype=csr_matrix,
dtype=float,weighted=True)
A, B = matricizeSVDdenseblock(sm, rindexcols, rbd='avg')
| 2.125 | 2 |
readJTAG.py | BerkeleyLab/XVC-FTDI-JTAG | 11 | 12761963 | <gh_stars>10-100
#!/usr/bin/env python
# XVC FTDI JTAG Copyright (c) 2021, The Regents of the University of
# California, through Lawrence Berkeley National Laboratory (subject to
# receipt of any required approvals from the U.S. Dept. of Energy). All
# rights reserved.
#
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at
# <EMAIL>.
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights. As
# such, the U.S. Government has been granted for itself and others acting on
# its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the
# Software to reproduce, distribute copies to the public, prepare derivative
# works, and perform publicly and display publicly, and to permit others to
# do so.
from __future__ import print_function
import socket
import time
xvcGetinfo = bytearray(b'getinfo:')
xvcResetTap = bytearray(b'shift:\x06\x00\x00\x00\x1F\x3F')
xvcShifToID = bytearray(b'shift:\x05\x00\x00\x00\x02\x1F')
xvcGetID = bytearray(b'shift:\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("127.0.0.1", 2542))
for cmd in (xvcGetinfo, xvcResetTap, xvcShifToID):
sock.send(cmd)
print(sock.recv(100))
sock.send(xvcGetID)
id = bytearray(sock.recv(100))
print("%02X%02X%02X%02X"%(id[3], id[2], id[1], id[0]))
| 2.234375 | 2 |
agd/he.py | b3rt01ac3/agd-he | 0 | 12761964 | import numpy as np
from agd.seal.seal import Evaluator, Ciphertext, CKKSEncoder, \
GaloisKeys, RelinKeys
def acg_qp(Q: np.ndarray, p: np.ndarray, beta: float, alpha: float, n: int, x0: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
def f(x): return x.T.dot(Q).dot(x) + p.dot(x)
def df(x): return Q.dot(x) + p
x = x0
y = x0
kappa = beta/alpha
gamma = (kappa**0.5-1)/(kappa**0.5+1)
for t in range(n):
y_ = y
y = x - 1/beta * df(x)
x_ = x
x = (1 + gamma) * y - gamma * y_
tol = np.abs(f(x) - f(x_))
print("The error after {} steps is: {} ".format(n, tol))
return x, tol
def he_acg_qp(Q: Ciphertext, p: Ciphertext, beta: float, kappa: float, n: int, c0: Ciphertext, evaluator: Evaluator, encoder: CKKSEncoder, gal_keys: GaloisKeys, relin_keys: RelinKeys) -> Tuple[Ciphertext, Ciphertext]:
c = c0
d = c0
gamma = (kappa**0.5-1)/(kappa**0.5+1)
for t in range(n):
d_ = d
# d = evaluator.add_inplace(c, - 1/beta * df(c)
c_ = c
c = (1 + gamma) * d - gamma * d_
tol = np.abs(f(c) - f(c_))
return c, tol | 2.390625 | 2 |
ee/api/chalicelib/blueprints/bp_ee_crons.py | nogamenofun98/openreplay | 3,614 | 12761965 | <gh_stars>1000+
from chalice import Blueprint
from chalice import Cron
from chalicelib import _overrides
app = Blueprint(__name__)
_overrides.chalice_app(app) | 1.351563 | 1 |
BurnieYilmazRS19/dataPrep/REDDIT/dailyWordFreq3.py | Charles0009/crypto_finance_analysis | 0 | 12761966 | # ----------------------------------------------------------------------------------
# # Calculating Word Frequencies
# ----------------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from text.dataHandling import DataHandling
from text.vocabCounter import vocabCounter
from collections import Counter
from itertools import chain
def dailywordfreq(start, end, name_for_saving_processed):
print("Collecting Data")
dataObj = DataHandling()
dataObj.collectData(dataPath='/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/terms/', regexpr=r'^subTerms', verbose=True)
dataObj.aggData()
data = dataObj.selectFirstFrame()
del dataObj
print("REMOVE SUBMISSIONS BLANK AFTER PROCESSING")
data = data[data.text.apply(lambda x: x != [])]
print("CREATE VOCAB")
vc = vocabCounter(
rawData = data,
start = start,
end = end,
# start=1640023150,
# end=1640823600,
step = 86400
)
#vc.getRaw().to_csv('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/tokenFreq/daily8888.csv')
del data
print("AT MOST ONE TERM / SUBMISSION")
vc.oneTokenPerSubmission()
print(vc.getRaw())
print("CREATE COUNTER OBJECT")
vc.createCountData()
dataf = vc.getCountData()
dataf.to_pickle(name_for_saving_processed)
# df2 = pd.read_pickle('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/tokenFreq/dailyTokenFreq_041218.pkl')
# df2.to_csv('/mnt/c/Users/charl/Desktop/finance_perso/BurnieYilmazRS19/dataPrep/REDDIT/data/processing/tokenFreq/dailyTokenFreq_0666.csv')
# dailywordfreq( start=1630023150,
# end=1640823600 ) | 2.828125 | 3 |
src/scheduler/serializers.py | ShizhuZhang/ontask_b | 1 | 12761967 | # -*- coding: UTF-8 -*-#
from __future__ import unicode_literals, print_function
import datetime
import pytz
from django.utils.translation import ugettext_lazy as _, ugettext
from rest_framework import serializers
from rest_framework.exceptions import APIException
from django.conf import settings
from validate_email import validate_email
from action.models import Action
from dataops.pandas_db import execute_select_on_table
from scheduler.models import ScheduledAction
class ScheduledActionSerializer(serializers.ModelSerializer):
"""
Serializer to take care of a few fields and the item column
"""
item_column = serializers.CharField(source='item_column_name',
required=False)
def instantiate_or_update(self,
validated_data,
action,
execute,
item_column,
exclude_values,
payload,
scheduled_obj=None):
"""
Given the validated data and a set of parameters that have been
validated, instantiate or update the object of class ScheduledAction.
:param validated_data: Data obtained by the serializer
:param action: Action object
:param execute: Execution date/time
:param item_column: Item column object (if given)
:param exclude_values: List of values from item_column to exluce
:param payload: JSON object
:param scheduled_obj: Object to instantiate or update
:return: instantiated object
"""
if not scheduled_obj:
scheduled_obj = ScheduledAction()
scheduled_obj.user = self.context['request'].user
scheduled_obj.name = validated_data['name']
scheduled_obj.description_text = validated_data.get('description_text')
scheduled_obj.action = action
scheduled_obj.execute = execute
scheduled_obj.item_column = item_column
scheduled_obj.exclude_values = exclude_values
scheduled_obj.payload = payload
scheduled_obj.status = ScheduledAction.STATUS_PENDING
scheduled_obj.save()
return scheduled_obj
def extra_validation(self, validated_data):
"""
Checking for extra validation properties in the information contained in
the validated data. Namely:
- The action name corresponds with a valid action for the user.
- The execute time must be in the future
- The item_column, if present, must be a correct column name
- Exclude_values must be a list
- Exclude_values can only be non-empty if item_column is given.
- The received object has a payload
:param validated_data:
:return: action, execute, item_column, exclude_values, payload
"""
# Get the action
action = validated_data['action']
if action.workflow.user != self.context['request'].user:
# The action could not be found.
raise APIException(_('Incorrect permission to manipulate action.'))
# Execution date must be in the future
execute = validated_data.get('execute', None)
now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE))
if not execute or execute <= now:
raise APIException(_('Invalid date/time for execution'))
# Item_column, if present has to be a correct column name
item_column = validated_data.get('item_column_name')
if item_column:
item_column = action.workflow.columns.filter(
name=item_column
).first()
if not item_column:
raise APIException(_('Invalid column name for selecting items'))
exclude_values = validated_data.get('exclude_values')
# Exclude_values has to be a list
if exclude_values and not isinstance(exclude_values, list):
raise APIException(_('Exclude_values must be a list'))
# Exclude_values can only have content if item_column is given.
if not item_column and exclude_values:
raise APIException(_('Exclude items needs a value in item_column'))
# Check that the received object has a payload
payload = validated_data.get('payload', {})
if not payload:
raise APIException(_('Scheduled objects needs a payload.'))
return action, execute, item_column, exclude_values, payload
def create(self, validated_data, **kwargs):
action, execute, item_column, exclude_values, payload = \
self.extra_validation(validated_data)
try:
scheduled_obj = self.instantiate_or_update(validated_data,
action,
execute,
item_column,
exclude_values,
payload)
except Exception as e:
raise APIException(
ugettext('Scheduled action could not be created: {0}').format(
e.message)
)
return scheduled_obj
def update(self, instance, validated_data):
action, execute, item_column, exclude_values, payload = \
self.extra_validation(validated_data)
try:
instance = self.instantiate_or_update(validated_data,
action,
execute,
item_column,
exclude_values,
payload,
instance)
# Save the object
instance.save()
except Exception as e:
raise APIException(
ugettext('Unable to update scheduled action: {0}'.format(
e.message
))
)
return instance
class Meta:
model = ScheduledAction
fields = ('id', 'name', 'description_text', 'action', 'execute',
'item_column', 'exclude_values', 'payload')
class ScheduledEmailSerializer(ScheduledActionSerializer):
def extra_validation(self, validated_data):
action, execute, item_column, exclude_values, payload = \
super(ScheduledEmailSerializer, self).extra_validation(
validated_data
)
if action.action_type != Action.PERSONALIZED_TEXT:
raise APIException(_('Incorrect type of action to schedule.'))
subject = payload.get('subject')
if not subject:
raise APIException(_('Personalized text needs a subject.'))
if not item_column:
raise APIException(_('Personalized text needs a item_column'))
# Check if the values in the email column are correct emails
try:
column_data = execute_select_on_table(
action.workflow.id,
[],
[],
column_names=[item_column.name])
if not all([validate_email(x[0]) for x in column_data]):
# column has incorrect email addresses
raise APIException(
_('The column with email addresses has incorrect values.')
)
except TypeError:
raise APIException(
_('The column with email addresses has incorrect values.')
)
if not all([validate_email(x)
for x in payload.get('cc_email', []) if x]):
raise APIException(
_('cc_email must be a comma-separated list of emails.')
)
if not all([validate_email(x)
for x in payload.get('bcc_email', []) if x]):
raise APIException(
_('bcc_email must be a comma-separated list of emails.')
)
return action, execute, item_column, exclude_values, payload
class ScheduledJSONSerializer(ScheduledActionSerializer):
def extra_validation(self, validated_data):
action, execute, item_column, exclude_values, payload = \
super(ScheduledJSONSerializer, self).extra_validation(
validated_data
)
if action.action_type != Action.PERSONALIZED_JSON:
raise APIException(_('Incorrect type of action to schedule.'))
token = payload.get('token')
if not token:
raise APIException(_('Personalized JSON needs a token in payload.'))
return action, execute, item_column, exclude_values, payload
| 2.25 | 2 |
js/angular_ui_calendar/__init__.py | fanstatic/js.angular_ui_calendar | 0 | 12761968 | from fanstatic import Library, Resource
import js.angular
import js.fullcalendar
library = Library('angular-ui-calendar', 'resources')
angular_ui_calendar = Resource(
library,
'calendar.js',
depends=[js.angular.angular, js.fullcalendar.fullcalendar])
| 1.585938 | 2 |
test/test_xhtml.py | jubalh/poezio | 50 | 12761969 | <reponame>jubalh/poezio
"""
Test the functions in the `xhtml` module
"""
import pytest
import xml
import poezio.xhtml
from poezio.xhtml import (poezio_colors_to_html, xhtml_to_poezio_colors,
_parse_css as parse_css, clean_text)
class ConfigShim:
def __init__(self):
self.value = True
def get(self, *args, **kwargs):
return self.value
def getbool(self, *args, **kwargs):
return self.value
config = ConfigShim()
poezio.xhtml.config = config
def test_clean_text():
example_string = '\x191}Toto \x192,-1}titi\x19b Tata'
assert clean_text(example_string) == 'Toto titi Tata'
clean_string = 'toto titi tata'
assert clean_text(clean_string) == clean_string
def test_poezio_colors_to_html():
base = "<body xmlns='http://www.w3.org/1999/xhtml'><p>"
end = "</p></body>"
text = '\x191}coucou'
assert poezio_colors_to_html(text) == base + '<span style="color: red;">coucou</span>' + end
text = '\x19bcoucou\x19o toto \x194}titi'
assert poezio_colors_to_html(text) == base + '<span style="font-weight: bold;">coucou</span> toto <span style="color: blue;">titi</span>' + end
text = '\x19icoucou'
assert poezio_colors_to_html(text) == base + '<span style="font-style: italic;">coucou</span>' + end
def test_xhtml_to_poezio_colors():
start = b'<body xmlns="http://www.w3.org/1999/xhtml"><p>'
end = b'</p></body>'
xhtml = start + b'test' + end
assert xhtml_to_poezio_colors(xhtml) == 'test'
xhtml = start + b'<a href="http://perdu.com">salut</a>' + end
assert xhtml_to_poezio_colors(xhtml) == '\x19usalut\x19o (http://perdu.com)'
xhtml = start + b'<a href="http://perdu.com">http://perdu.com</a>' + end
assert xhtml_to_poezio_colors(xhtml) == '\x19uhttp://perdu.com\x19o'
xhtml = start + b'<span style="font-style: italic">Test</span>' + end
assert xhtml_to_poezio_colors(xhtml) == '\x19iTest\x19o'
xhtml = b'<div style="font-weight:bold">Allo <div style="color:red">test <div style="color: blue">test2</div></div></div>'
assert xhtml_to_poezio_colors(xhtml, force=True) == '\x19bAllo \x19196}test \x1921}test2\x19o'
xhtml = (b'<div style="color:blue"><div style="color:yellow">'
b'<div style="color:blue">Allo <div style="color:red">'
b'test <div style="color: blue">test2</div></div></div></div></div>')
assert xhtml_to_poezio_colors(xhtml, force=True) == '\x1921}Allo \x19196}test \x1921}test2\x19o'
with pytest.raises(xml.sax._exceptions.SAXParseException):
xhtml_to_poezio_colors(b'<p>Invalid xml')
def test_xhtml_to_poezio_colors_disabled():
config.value = False
start = b'<body xmlns="http://www.w3.org/1999/xhtml"><p>'
end = b'</p></body>'
xhtml = start + b'test' + end
assert xhtml_to_poezio_colors(xhtml) == 'test'
xhtml = start + b'<a href="http://perdu.com">salut</a>' + end
assert xhtml_to_poezio_colors(xhtml) == '\x19usalut\x19o (http://perdu.com)'
xhtml = start + b'<a href="http://perdu.com">http://perdu.com</a>' + end
assert xhtml_to_poezio_colors(xhtml) == '\x19uhttp://perdu.com\x19o'
xhtml = b'<div style="font-weight:bold">Allo <div style="color:red">test <div style="color: blue">test2</div></div></div>'
assert xhtml_to_poezio_colors(xhtml, force=True) == 'Allo test test2'
xhtml = (b'<div style="color:blue"><div style="color:yellow">'
b'<div style="color:blue">Allo <div style="color:red">'
b'test <div style="color: blue">test2</div></div></div></div></div>')
assert xhtml_to_poezio_colors(xhtml, force=True) == 'Allo test test2'
config.value = True
def test_parse_css():
example_css = 'text-decoration: underline; color: red;'
assert parse_css(example_css) == '\x19u\x19196}'
example_css = 'text-decoration: underline coucou color: red;'
assert parse_css(example_css) == ''
| 2.671875 | 3 |
bb-master/sandbox/lib/python3.5/site-packages/buildbot/worker/hyper.py | Alecto3-D/testable-greeter | 2 | 12761970 | <reponame>Alecto3-D/testable-greeter<filename>bb-master/sandbox/lib/python3.5/site-packages/buildbot/worker/hyper.py
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from twisted.internet import reactor as global_reactor
from twisted.internet import defer
from twisted.internet import threads
from twisted.python import threadpool
from buildbot import config
from buildbot.interfaces import LatentWorkerFailedToSubstantiate
from buildbot.util import service
from buildbot.util.logger import Logger
from buildbot.worker.docker import DockerBaseWorker
try:
import docker # noqa pylint: disable=unused-import
from docker.errors import NotFound
from hyper_sh import Client as Hyper
except ImportError:
Hyper = None
log = Logger()
class HyperLatentManager(service.SharedService):
"""A shared service class that manages all the connections to the hyper cloud
There is one instance of this manager per host, accesskey, secretkey tuple.
This manager manages its own thread pull, as Hyper_sh is blocking.
You can change the maximum number of concurrent access to hyper using
import buildbot.worker.hyper
buildbot.worker.hyper.HyperLatentManager.MAX_THREADS = 1
This feature is undocumented for now, as we are not sure if this is ideal API.
"""
MAX_THREADS = 5
def __init__(self, hyper_host, hyper_accesskey, hyper_secretkey):
service.SharedService.__init__(self)
# Prepare the parameters for the Docker Client object.
self._client_args = {'clouds': {
hyper_host: {
"accesskey": hyper_accesskey,
"secretkey": hyper_secretkey
}
}}
def startService(self):
self._threadPool = threadpool.ThreadPool(
minthreads=1, maxthreads=self.MAX_THREADS, name='hyper')
self._threadPool.start()
self._client = Hyper(self._client_args)
@property
def client(self):
return self._client
def stopService(self):
self.client.close()
return self._threadPool.stop()
def deferToThread(self, reactor, meth, *args, **kwargs):
return threads.deferToThreadPool(reactor, self._threadPool, meth, *args, **kwargs)
class HyperLatentWorker(DockerBaseWorker):
"""hyper.sh is a docker CaaS company"""
instance = None
ALLOWED_SIZES = ['s1', 's2', 's3', 's4',
'm1', 'm2', 'm3', 'l1', 'l2', 'l3']
image = None
reactor = global_reactor
def checkConfig(self, name, password, hyper_host,
hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs):
DockerBaseWorker.checkConfig(self, name, password, image=image, masterFQDN=masterFQDN, **kwargs)
if not Hyper:
config.error("The python modules 'docker-py>=1.4' and 'hyper_sh' are needed to use a"
" HyperLatentWorker")
if hyper_size not in self.ALLOWED_SIZES:
config.error("Size is not valid {!r} vs {!r}".format(
hyper_size, self.ALLOWED_SIZES))
@property
def client(self):
if self.manager is None:
return None
return self.manager.client
@defer.inlineCallbacks
def reconfigService(self, name, password, hyper_host,
hyper_accesskey, hyper_secretkey, image, hyper_size="s3", masterFQDN=None, **kwargs):
yield DockerBaseWorker.reconfigService(self, name, password, image=image,
masterFQDN=masterFQDN, **kwargs)
self.manager = yield HyperLatentManager.getService(self.master, hyper_host, hyper_accesskey,
hyper_secretkey)
self.size = hyper_size
def deferToThread(self, meth, *args, **kwargs):
return self.manager.deferToThread(self.reactor, meth, *args, **kwargs)
@defer.inlineCallbacks
def start_instance(self, build):
image = yield build.render(self.image)
yield self.deferToThread(self._thd_start_instance, image)
defer.returnValue(True)
def _thd_cleanup_instance(self):
container_name = self.getContainerName()
instances = self.client.containers(
all=1,
filters=dict(name=container_name))
for instance in instances:
# hyper filtering will match 'hyper12" if you search for 'hyper1' !
if "".join(instance['Names']).strip("/") != container_name:
continue
try:
self.client.remove_container(instance['Id'], v=True, force=True)
except NotFound:
pass # that's a race condition
except docker.errors.APIError as e:
if "Conflict operation on container" not in str(e):
raise
# else: also race condition.
def _thd_start_instance(self, image):
t1 = time.time()
self._thd_cleanup_instance()
t2 = time.time()
instance = self.client.create_container(
image,
environment=self.createEnvironment(),
labels={
'sh_hyper_instancetype': self.size
},
name=self.getContainerName()
)
t3 = time.time()
if instance.get('Id') is None:
raise LatentWorkerFailedToSubstantiate(
'Failed to start container'
)
instance['image'] = image
self.instance = instance
self.client.start(instance)
t4 = time.time()
log.debug('{name}:{containerid}: Container started in {total_time:.2f}', name=self.name,
containerid=self.shortid,
clean_time=t2 - t1, create_time=t3 - t2, start_time=t4 - t3, total_time=t4 - t1)
return [instance['Id'], image]
def stop_instance(self, fast=False):
if self.instance is None:
# be gentle. Something may just be trying to alert us that an
# instance never attached, and it's because, somehow, we never
# started.
return defer.succeed(None)
return self.deferToThread(self._thd_stop_instance, fast)
def _thd_stop_instance(self, fast):
if self.instance is None:
return
log.debug('{name}:{containerid}: Stopping container', name=self.name,
containerid=self.shortid)
t1 = time.time()
try:
self.client.stop(self.instance['Id'])
except NotFound:
# That's ok. container was already deleted, probably by an admin
# lets fail nicely
log.warn('{name}:{containerid}: container was already deleted!', name=self.name,
containerid=self.shortid)
self.instance = None
return
t2 = time.time()
if not fast:
self.client.wait(self.instance['Id'])
t3 = time.time()
self.client.remove_container(self.instance['Id'], v=True, force=True)
t4 = time.time()
log.debug('{name}:{containerid}: Stopped container in {total_time:.2f}', name=self.name,
containerid=self.shortid,
stop_time=t2 - t1, wait_time=t3 - t2, remove_time=t4 - t3, total_time=t4 - t1)
self.instance = None
| 1.664063 | 2 |
notifications/email_constants.py | bfortuner/VOCdetect | 336 | 12761971 | <gh_stars>100-1000
import config
import constants as c
WEBSITE_URL = config.KIBANA_URL
ADMIN_EMAIL = config.ADMIN_EMAIL
USER_EMAIL = config.USER_EMAIL
EMAIL_CHARSET = 'UTF-8'
HEADER="<html>"
FOOTER="</html>"
EXPERIMENT_STATUS_EMAIL_TEMPLATE="""
<p>Hello,</p>
<p>Your experiment has ended.</p>
<p><b>Name:</b> %s</p>
<p><b>Status:</b> %s</p>
<p><b>Status Msg:</b> %s</p>
<p><a href="%s">View Dashboard</a></p>
<p><b>Experiment Results:</b></p>
<p>%s</p>
<p><b>Experiment Config:</b></p>
<p>%s</p>
<p><b>Thanks,<br>
Team</p>
"""
EXPERIMENT_STATUS_EMAIL_BODY = (
HEADER + EXPERIMENT_STATUS_EMAIL_TEMPLATE + FOOTER
)
EXPERIMENT_STATUS_EMAIL ={
'subject' : 'New Experiment Results',
'body' : EXPERIMENT_STATUS_EMAIL_BODY
}
| 1.78125 | 2 |
SmileGAN/Smile_GAN_clustering.py | zhijian-yang/SmileGAN | 10 | 12761972 | import sys
import os
import numpy as np
from sklearn import metrics
from .model import SmileGAN
from .utils import highest_matching_clustering, consensus_clustering, parse_validation_data
from .clustering import Smile_GAN_train
__author__ = "<NAME>"
__copyright__ = "Copyright 2019-2020 The CBICA & SBIA Lab"
__credits__ = ["<NAME>"]
__license__ = "See LICENSE file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def model_filtering(model_dirs, ncluster, data, covariate=None):
"""
Function used for filter out models who have significantly different clustering results with others.
This function deal with rare failing cases of Smile-GAN
Args:
model_dirs: list, list of dirs of all saved models
ncluster: int, number of defined clusters
data, data_frame, dataframe with same format as training data. CN data must be exactly same as CN data in training dataframe while
PT data can be any samples in or out of the training set.
covariate, data_frame, dataframe with same format as training covariate. CN data must be exactly same as CN data in training covariate while
PT data can be any samples in or out of the training set.
Returns: list of index indicating outlier models
"""
_, validation_data = parse_validation_data(data, covariate)
all_prediction_labels = []
for models in model_dirs:
model = SmileGAN()
model.load(models)
all_prediction_labels.append(np.argmax(model.predict_cluster(validation_data), axis=1))
model_aris = [[] for _ in range(len(model_dirs))]
filtered_models = []
for i in range(len(model_dirs)):
for j in range(len(model_dirs)):
if i!=j:
model_aris[i].append(metrics.adjusted_rand_score(all_prediction_labels[i], all_prediction_labels[j]))
median_aris = np.median(model_aris, axis=1)
for j in range(median_aris.shape[0]):
rest_aris = np.delete(median_aris,j)
if (median_aris[j]-np.mean(rest_aris))/np.std(rest_aris)<-2:
filtered_models.append(j)
return filtered_models
def calculate_ari(prediction_labels):
model_aris = []
for i in range(len(prediction_labels)):
for j in range(i+1,len(prediction_labels)):
model_aris.append(metrics.adjusted_rand_score(prediction_labels[i], prediction_labels[j]))
return np.mean(model_aris), np.std(model_aris)
def clustering_result(model_dirs, ncluster, consensus_type, data, covariate=None):
"""
Function used for derive clustering results from several saved models
Args:
model_dirs: list, list of dirs of all saved models
ncluster: int, number of defined clusters
consensus_type: string, the method used for deriving final clustering results with all models derived through CV
choose between 'highest_matching_clustering' and 'consensus_clustering'
data, data_frame, dataframe with same format as training data. CN data must be exactly same as CN data in training dataframe while
PT data can be any samples in or out of the training set.
covariate, data_frame, dataframe with same format as training covariate. CN data must be exactly same as CN data in training covariate while
PT data can be any samples in or out of the training set.
Returns: clustering outputs.
"""
_, validation_data = parse_validation_data(data, covariate)
all_prediction_labels = []
all_prediction_probabilities = []
for models in model_dirs:
model = SmileGAN()
model.load(models)
all_prediction_labels.append(np.argmax(model.predict_cluster(validation_data), axis=1))
all_prediction_probabilities.append(model.predict_cluster(validation_data))
if len(model_dirs) > 1:
mean_ari, std_ari = calculate_ari(all_prediction_labels)
print("Results have Adjuested_random_index (ARI) = %.2f+- %.2f" %(mean_ari, std_ari))
if mean_ari<0.3 and consensus_type == 'highest_matching_clustering':
print('mean ARI < 0.3, consensus_clustering is recommended')
if len(all_prediction_labels) == 1:
return np.array(all_prediction_labels[0]), np.array(all_prediction_probabilities[0]), 1, 0
elif consensus_type == 'highest_matching_clustering':
cluster_label, cluster_prob = highest_matching_clustering(all_prediction_labels, all_prediction_probabilities, ncluster)
return cluster_label, cluster_prob, mean_ari, std_ari
elif consensus_type == 'consensus_clustering':
return consensus_clustering(all_prediction_labels, ncluster), None, mean_ari, std_ari
else:
raise Exception("Please choose between 'highest_matching_clustering' and 'consensus_clustering'")
def single_model_clustering(data, ncluster, start_saving_epoch, max_epoch, output_dir, WD_threshold, AQ_threshold, \
cluster_loss_threshold, covariate=None, saved_model_name='converged_model', lam=9, mu=5, batchSize=25, lipschitz_k = 0.5, verbose = False, \
beta1 = 0.5, lr = 0.0002, max_gnorm = 100, eval_freq = 5, save_epoch_freq = 5):
"""
one of Smile-GAN core function for clustering. Only one model will be trained. (not recommended since result may be not reproducible)
Args:
data: dataframe, dataframe file with all ROI (input features) The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be the extracted features. e.g., the ROI features"
covariate: dataframe, not required; dataframe file with all confounding covariates to be corrected. The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be all confounding covariates. e.g., age, sex"
ncluster: int, number of defined clusters
start_saving_epoch: int, epoch number from which model will be saved and training will be stopped if stopping criteria satisfied
max_epoch: int, maximum trainig epoch: training will stop even if criteria not satisfied.
output_dir: str, the directory underwhich model and results will be saved
WD_threshold: int, chosen WD theshold for stopping criteria
AQ_threshold: int, chosen AQ threhold for stopping criteria
cluster_loss_threshold: int, chosen cluster_loss threhold for stopping criteria
load_model: bool, whether load one pre-saved checkpoint
saved_model_name: str, the name of the saved model
lam: int, hyperparameter for cluster loss
mu: int, hyperparameter for change loss
batchsize: int, batck size for training procedure
lipschitz_k = float, hyper parameter for weight clipping of mapping and clustering function
verbose: bool, choose whether to print out training procedure
beta1: float, parameter of ADAM optimization method
lr: float, learning rate
max_gnorm: float, maximum gradient norm for gradient clipping
eval_freq: int, the frequency at which the model is evaluated during training procedure
save_epoch_freq: int, the frequency at which the model is saved during training procedure
Returns: clustering outputs.
"""
print('Start Smile-GAN for semi-supervised clustering')
Smile_GAN_model = Smile_GAN_train(ncluster, start_saving_epoch, max_epoch, WD_threshold, AQ_threshold, \
cluster_loss_threshold, lam=lam, mu=mu, batchSize=batchSize, lipschitz_k = lipschitz_k,
beta1 = beta1, lr = lr, max_gnorm = max_gnorm, eval_freq = eval_freq, save_epoch_freq = save_epoch_freq)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, verbose = verbose)
while not converge:
print("****** Model not converging or not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, verbose = verbose)
cluster_label, cluster_prob, mean_ari, std_ari = clustering_result([os.path.join(output_dir,saved_model_name)], ncluster, 'highest_matching_clustering', data, covariate)
pt_data = data.loc[data['diagnosis'] == 1][['participant_id','diagnosis']]
pt_data['cluster_label'] = cluster_label + 1
for i in range(ncluster):
pt_data['p'+str(i+1)] = cluster_prob[:,i]
pt_data.to_csv(os.path.join(output_dir,'clustering_result.csv'), index = False)
return pt_data
def cross_validated_clustering(data, ncluster, fold_number, fraction, start_saving_epoch, max_epoch, output_dir, WD_threshold, AQ_threshold, \
cluster_loss_threshold, consensus_type, covariate=None, lam=9, mu=5, batchSize=25, lipschitz_k = 0.5, verbose = False, \
beta1 = 0.5, lr = 0.0002, max_gnorm = 100, eval_freq = 5, save_epoch_freq = 5, start_fold = 0, stop_fold = None, check_outlier = True):
"""
cross_validated clustering function using Smile-GAN (recommended)
Args:
data: dataframe, dataframe file with all ROI (input features) The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be the extracted features. e.g., the ROI features"
covariate: dataframe, not required; dataframe file with all confounding covariates to be corrected. The dataframe contains
the following headers: "
"i) the first column is the participant_id;"
"iii) the second column should be the diagnosis;"
"The following column should be all confounding covariates. e.g., age, sex"
ncluster: int, number of defined clusters
fold_number: int, number of folds for leave-out cross validation
fraction: float, fraction of data used for training in each fold
start_saving_epoch: int, epoch number from which model will be saved and training will be stopped if stopping criteria satisfied
max_epoch: int, maximum trainig epoch: training will stop even if criteria not satisfied.
output_dir: str, the directory underwhich model and results will be saved
WD_threshold: int, chosen WD theshold for stopping criteria
AQ_threshold: int, chosen AQ threhold for stopping criteria
cluster_loss_threshold: int, chosen cluster_loss threhold for stopping criteria
###load_model: bool, whether load one pre-saved checkpoint
consensus_type: string, the method used for deriving final clustering results with all models saved during CV
choose between 'highest_matching_clustering' and 'consensus_clustering'
saved_model_name: str, the name of the saved model
lam: int, hyperparameter for cluster loss
mu: int, hyperparameter for change loss
batchsize: int, batck size for training procedure
lipschitz_k = float, hyper parameter for weight clipping of mapping and clustering function
verbose: bool, choose whether to print out training procedure
beta1: float, parameter of ADAM optimization method
lr: float, learning rate
max_gnorm: float, maximum gradient norm for gradient clipping
eval_freq: int, the frequency at which the model is evaluated during training procedure
save_epoch_freq: int, the frequency at which the model is saved during training procedure
start_fold; int, indicate the last saved fold index,
used for restart previous half-finished cross validation; set defaultly to be 0 indicating a new cv process
stop_fold: int, indicate the index of fold at which the cv early stop,
used for stopping cv process eartly and resuming later; set defaultly to be None and cv will not stop till the end
check_outlier: bool, whether check outlier model (potential unsuccessful model) after cv process and retrain the fold
Returns: clustering outputs.
"""
print('Start Smile-GAN for semi-supervised clustering')
Smile_GAN_model = Smile_GAN_train(ncluster, start_saving_epoch, max_epoch, WD_threshold, AQ_threshold, \
cluster_loss_threshold, lam=lam, mu=mu, batchSize=batchSize, \
lipschitz_k = lipschitz_k, beta1 = beta1, lr = lr, max_gnorm = max_gnorm, eval_freq = eval_freq, save_epoch_freq = save_epoch_freq)
saved_models = [os.path.join(output_dir, 'coverged_model_fold'+str(i)) for i in range(fold_number)]
if stop_fold == None:
stop_fold = fold_number
for i in range(start_fold, stop_fold):
print('****** Starting training of Fold '+str(i)+" ******")
saved_model_name = 'coverged_model_fold'+str(i)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
while not converge:
print("****** Model not converging or not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
if check_outlier:
print('****** Start Checking outlier models ******')
outlier_models = model_filtering(saved_models, ncluster, data, covariate)
if len(outlier_models) > 0:
print('Model', end=' ')
for model in outlier_models:
print(str(model),end=' ')
print('have low agreement with other models')
else:
print('****** There are no outlier models ******')
for i in outlier_models:
print('****** Starting training of Fold '+str(i)+" ******")
saved_model_name = 'coverged_model_fold'+str(i)
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
while not converge:
print("****** Model not converged at max interation, Start retraining ******")
converge = Smile_GAN_model.train(saved_model_name, data, covariate, output_dir, random_seed=i, data_fraction = fraction, verbose = verbose)
cluster_label, cluster_prob, mean_ari, std_ari = clustering_result(saved_models, ncluster, consensus_type, data, covariate)
pt_data = data.loc[data['diagnosis'] == 1][['participant_id','diagnosis']]
pt_data['cluster_label'] = cluster_label + 1
if consensus_type == "highest_matching_clustering":
for i in range(ncluster):
pt_data['p'+str(i+1)] = cluster_prob[:,i]
pt_data["ARI = %.2f+- %.2f" %(mean_ari, std_ari)] = ''
pt_data.to_csv(os.path.join(output_dir,'clustering_result.csv'), index = False)
print('****** Smile-GAN clustering finished ******')
| 2.4375 | 2 |
tests/test_writer_data_expectations.py | mabel-dev/mabel | 0 | 12761973 | <gh_stars>0
import os
import sys
import pytest
sys.path.insert(1, os.path.join(sys.path[0], ".."))
from mabel.adapters.disk import DiskReader, DiskWriter
from mabel.adapters.null import NullWriter
from mabel.data import Reader, Writer
from rich import traceback
from data_expectations import Expectations
from data_expectations.errors import ExpectationNotMetError
traceback.install()
VALID_TARGET = {
"dataset": "_temp",
"set_of_expectations": [
{"expectation": "expect_column_to_exist", "column": "name"},
{"expectation": "expect_column_to_exist", "column": "alter"},
],
}
INVALID_TARGET = {
"dataset": "_temp",
"set_of_expectations": [
{"expectation": "expect_column_to_exist", "column": "show"}
],
}
def test_validator_expected_to_work():
w = Writer(inner_writer=NullWriter, **VALID_TARGET)
w.append({"name": "<NAME>", "alter": "<NAME>"})
w.append({"name": "<NAME>", "alter": "<NAME>"})
w.append({"name": "<NAME>", "alter": "<NAME>"})
w.finalize()
def test_validator_expected_to_not_work():
w = Writer(inner_writer=NullWriter, **INVALID_TARGET)
with pytest.raises(ExpectationNotMetError):
w.append({"name": "<NAME>", "alter": "<NAME>"})
with pytest.raises(ExpectationNotMetError):
w.append({"name": "<NAME>", "alter": "<NAME>"})
w.finalize()
if __name__ == "__main__": # pragma: no cover
test_validator_expected_to_work()
test_validator_expected_to_not_work()
print("okay")
| 2.109375 | 2 |
src/parsers/markdown.py | iweans/translrt | 1 | 12761974 |
class MarkdownParser:
def __init__(self, whitelist_filters=None):
self._delimiter = '\n'
self._single_delimiters = \
('#', '*', '+', '~', '-', '|', '`', '\n')
self._whitelist_filters = whitelist_filters or [str.isdigit]
def encode(self, text):
content_parts, content_delimiters = \
self.parse(text)
encoded_text = self._delimiter.join(content_parts)
return encoded_text, content_delimiters
def decode(self, text, content_delimiters):
translated_parts = ('\n\n' + text).split('\n\n')
result = ''
for content, delimiter in zip(translated_parts, content_delimiters):
result += (content + delimiter)
return result
def parse(self, text):
content_parts = []
content_delimiters = []
tmp_delimiter = tmp_part = ''
# ----------------------------------------
for char in text:
if char in self._single_delimiters:
tmp_delimiter += char
continue
# ------------------------------
if self._check_whitelist(char):
tmp_delimiter += char
continue
# ------------------------------
if char.isspace() and tmp_delimiter:
tmp_delimiter += char
continue
# ------------------------------
if not char.isspace() and tmp_delimiter:
content_parts.append(tmp_part)
content_delimiters.append(tmp_delimiter)
tmp_delimiter = tmp_part = ''
# --------------------
tmp_part += char
# ----------------------------------------
tmp_part and content_parts.append(tmp_part)
content_delimiters.append(tmp_delimiter or '\n')
return content_parts, content_delimiters
def _check_whitelist(self, char):
for filter_callable in self._whitelist_filters:
if not filter_callable(char):
return False
# ----------------------------------------
return True
| 3.078125 | 3 |
toRLior.py | Night46/toRLior | 2 | 12761975 | <reponame>Night46/toRLior
# ##################################################################################################################
# PREREQUISITES
# - tor config file 'torrc' default location /usr/local/etc/tor
# - set the tor config to allow SocksPort on port 9999
# - set the tor config to allow ControlPort on port 9991
# - set the tor config to allow HashedControlPassword
# - set the ControlPort password to <PASSWORD> (via CLI tor --hash-password <PASSWORD>)
# - set the ControlPort hash in your 'torrc' file to 16:4C46EEA1DBCFB4C96047FE8342A4C19120C5A493962943EC0F486FFC69
# ##################################################################################################################
import threading
import socket
import socks
import urllib
import time
import re
class toRLior:
def __init__(self):
self.proxy_ip = '127.0.0.1'
# IP address or DNS for the proxy server
self.proxy_port = 9999
# proxy port Default is 1080 for socks and 8080 for http
self.proxy_type = socks.PROXY_TYPE_SOCKS5
# PROXY_TYPE_SOCKS4, PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
self.control_ip = '127.0.0.1'
# the control ip address
self.control_port = 9991
# the control port
self.control_socket = "('127.0.0.1', 9991)"
# the socket for tor control
self.check_ip = 'http://my-ip.herokuapp.com'
# returns the external ip address
self.rdns = True
# DNS resolving remotely, False, DNS resolving locally
self.username = None
# Socks5, username / password authentication; Socks4 servers, be sent as the userid HTTP server parameter is ignored
self.passwd = None
# only for Socks5
self.c_username = 'user'
# control port user
self.c_passwd = '<PASSWORD>'
# control port pass
self.dest_socket = "('my-ip.herokuapp.com', 80)"
# destination address and destination port
self.test_count = 3
# number of times to run the conneciton test function
self.post_dest = 'http://my-ip.herokuapp.com'
# destination address for tor_post
self.post_data = {'A': 'A'}
# data to be sent as the POST
self.get_dest = 'http://my-ip.herokuapp.com'
# # destination address for tor_get
self.post_data_encode = urllib.urlencode(self.post_data)
# urllib encoded representation of post_data
self.post_headers = {
'Host': 'my-ip.herokuapp.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.49 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'en-US,en;q=0.8,he;q=0.6',
'DNT': '1'
}
self.thread_dest = 'http://my-ip.herokuapp.com'
# threaded sockets destination address
self.thread_count = 5
# the number of itterations
self.c = socket.socket()
# TCP socket for raw data
self.s = socks.setdefaultproxy(self.proxy_type, self.proxy_ip, self.proxy_port)
# set TOR proxy
self.sp = socks.socksocket()
# SPCKS socket
self.sp.setproxy(self.proxy_type, self.control_ip, self.control_port, self.rdns, self.c_username, self.c_passwd)
# set TOR ControlPort proxy
self.ip_regex = re.compile(r'(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})')
# regex search for an IP address
class connect(toRLior):
def tor_connect(self):
self.s
socket.socket = socks.socksocket
def tor_extern_ip(self):
import urllib2
request = urllib2.Request(self.check_ip)
open_url = urllib2.urlopen(request)
raw_data = open_url.read()
ip = self.ip_regex.search(raw_data)
print ip.group()
def tor_get(self):
import urllib2
request = urllib2.Request(self.get_dest)
open_url = urllib2.urlopen(request)
raw_data = open_url.read()
print raw_data
def tor_post(self):
import urllib2
request = urllib2.Request(self.post_dest, self.post_data_encode, self.post_headers)
open_url = urllib2.urlopen(request)
raw_data = open_url.read()
ip = self.ip_regex.search(raw_data)
print ip.group()
def send_close(self):
self.s.close()
class multi_thread(toRLior, threading.Thread):
def tor_thread_connect(self):
self.s
socket.socket = socks.socksocket
def threaded_get(self):
import urllib2
request = urllib2.Request(self.thread_dest)
open_url = urllib2.urlopen(request)
raw_data = open_url.read()
print raw_data
print threading.current_thread()
def threaded_get_run(self):
threads = []
for i in range (self.thread_count):
t = threading.Thread(target=self.threaded_get())
print threads
threads.append(t)
print threads
t.start()
t.join()
print threads
def threaded_get_changeIP(self):
import urllib2
request = urllib2.Request(self.thread_dest)
open_url = urllib2.urlopen(request)
raw_data = open_url.read()
print raw_data
print ''
print 'asking for a new_circuit..'
print 'waiting for 15s before ciruit change..'
time.sleep(15)
torcontrol = controller()
torcontrol.control_connect(('127.0.0.1', 9991))
torcontrol.new_circuit()
def threaded_get_changeIP_run(self):
threads = []
for i in range (self.thread_count):
t = threading.Thread(target=self.threaded_get_changeIP())
print threads
threads.append(t)
print threads
t.start()
t.join()
print threads
def threaded_post(self):
import urllib2
request = urllib2.Request(self.post_dest, self.post_data_encode, self.post_headers)
open_url = urllib2.urlopen(request)
raw_data = open_url.read()
print raw_data
def threaded_post_run(self):
threads = []
for i in range (self.thread_count):
t = threading.Thread(target=self.threaded_post())
print threads
threads.append(t)
print threads
t.start()
t.join()
print threads
def threaded_post_changeIP(self):
import urllib2
request = urllib2.Request(self.post_dest, self.post_data_encode, self.post_headers)
open_url = urllib2.urlopen(request)
raw_data = open_url.read()
print raw_data
print ''
print 'asking for a new_circuit..'
print 'waiting for 15s before ciruit change..'
time.sleep(15)
torcontrol = controller()
torcontrol.control_connect(('127.0.0.1', 9991))
torcontrol.new_circuit()
def send_thread_close(self):
self.s.close()
class controller(toRLior):
def control_connect(self, control_socket):
self.sp.setproxy()
self.sp.connect(control_socket)
self.sp.send('AUTHENTICATE "toRLior"\r\n')
def new_circuit(self):
self.sp.send('SIGNAL NEWNYM\r\n')
self.sp.close()
def clear_dns_cache(self):
self.sp.send('SIGNAL CLEARDNSCACHE\n\r')
self.sp.close()
def halt(self):
self.sp.send('SIGNAL HALT\n\r')
self.sp.close()
class test(toRLior):
def test_circuit_change(self):
for i in range(self.test_count):
print ''
torconnect = connect()
torconnect.tor_connect()
print 'tor_extern_ip -'+str(i+1)+'-'
torconnect.tor_extern_ip()
print ''
if i < 1:
torcontrol = controller()
torcontrol.control_connect(('127.0.0.1', 9991))
print 'asking for a new_circuit..'
torcontrol.new_circuit()
elif i < self.test_count-1 :
print 'waiting for 15s before ciruit change..'
time.sleep(15)
torcontrol = controller()
torcontrol.control_connect(('127.0.0.1', 9991))
print 'asking for a new_circuit..'
torcontrol.new_circuit()
i = i+1
if __name__ == '__main__':
# ##################################### #
# uncomment below to test functionality #
# ##################################### #
# print 'test'
# debug = test()
# debug.test_circuit_change()
# torconnect = connect()
# torconnect.tor_connect()
# torconnect.tor_get()
# torconnect = connect()
# torconnect.tor_connect()
# torconnect.tor_post()
# torconnect = connect()
# torconnect.tor_connect()
# torconnect.tor_extern_ip()
# torthread = multi_thread()
# torthread.tor_thread_connect()
# torthread.threaded_get_run()
# torthread = multi_thread()
# torthread.tor_thread_connect()
# torthread.threaded_get_changeIP_run()
# torcontrol = controller()
# torcontrol.control_connect(('127.0.0.1', 9991))
# torcontrol.new_circuit()
# torcontrol = controller()
# torcontrol.control_connect(('127.0.0.1', 9991))
# torcontrol.clear_dns_cache()
# torcontrol = controller()
# torcontrol.control_connect(('127.0.0.1', 9991))
# torcontrol.halt() | 2.21875 | 2 |
build/lib/TEF/dfmeta.py | tll549/TEF | 2 | 12761976 | import numpy as np
import pandas as pd
import io
import re
import warnings
from scipy.stats import skew, skewtest
from scipy.stats import rankdata
from .plot_1var import *
# from plot_1var import * # for local testing only
from IPython.display import HTML
def print_list(l, br=', '):
o = ''
for e in l:
o += str(e) + br
return o[:-len(br)]
def summary(s, max_lev=10, br_way=', ', sum_num_like_cat_if_nunique_small=5):
'''
a function that takes a series and returns a summary string
'''
if s.nunique(dropna=False) == 1:
return(f'all the same: {s.unique()[0]}')
elif s.notnull().sum() == 0:
return(f'all are NaNs')
if s.dtype.name in ['object', 'bool', 'category'] or \
(('float' in s.dtype.name or 'int' in s.dtype.name) \
and s.nunique() <= sum_num_like_cat_if_nunique_small):
if len(s.unique()) <= max_lev:
# consider drop na?
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
for name, v in zip(vc.index, vc.values):
s += f'{name} {v*100:>2.0f}%' + br_way
return s[:-len(br_way)]
else:
vc = s.value_counts(dropna=False, normalize=True)
# vc = s.value_counts(dropna=True, normalize=True)
s = ''
i = 0
cur_sum_perc = 0
for name, v in zip(vc.index, vc.values):
if i == max_lev or \
(i >= 5 and cur_sum_perc >= 0.8) or \
(i == 0 and cur_sum_perc < 0.05):
# break if the it has describe 80% of the data, or the
break
s += f'{name} {v*100:>2.0f}%' + br_way
i += 1
cur_sum_perc += v
s += f'other {(1-cur_sum_perc)*100:>2.0f}%'
# return s[:-len(br_way)]
return s
elif 'float' in s.dtype.name or 'int' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values.tolist()
cv = round(s.std()/s.mean(), 2) if s.mean() != 0 else 'nan'
sk = round(skew(s[s.notnull()]), 2) if len(s[s.notnull()]) > 0 else 'nan'
o = f'{qs}{br_way}\
mean: {s.mean():.2f} std: {s.std():.2f}{br_way}\
cv: {cv} skew: {sk}'
if sum(s.notnull()) > 8: # requirement of skewtest
p = skewtest(s[s.notnull()]).pvalue
o += f'*' if p <= 0.05 else ''
if min(s[s!=0]) > 0 and len(s[s!=0]) > 8: # take log
o += f'{br_way}log skew: {skew(np.log(s[s>0])):.2f}'
p = skewtest(np.log(s[s!=0])).pvalue
o += f'*' if p != p and p <= 0.05 else ''
return o
elif 'datetime' in s.dtype.name:
qs = s.quantile(q=[0, 0.25, 0.5, 0.75, 1]).values
dt_range = (qs[-1]-qs[0]).astype('timedelta64[D]')
if dt_range > np.timedelta64(1, 'D'):
to_print = [np.datetime_as_string(q, unit='D') for q in qs]
else:
to_print = [np.datetime_as_string(q, unit='s') for q in qs]
return print_list(to_print, br=br_way)
else:
return ''
def possible_dup_lev(series, threshold=0.9, truncate=False):
try:
from fuzzywuzzy import fuzz
except ImportError:
sys.exit("""Please install fuzzywuzzy first
install it using: pip install fuzzywuzzy
if installing the dependency python-levenshtein is failed and you are using Anaconda, try
conda install -c conda-forge python-levenshtein""")
if series.dtype.name not in ['category', 'object']:
return ''
if series.nunique() > 100 and series.dtype.name == 'object' and truncate: # maybe should adjust
# warnings.warn('Checking duplicates on a long list will take a long time', RuntimeWarning)
# simplified = series.str.lower().replace(r'\W', '')
# if simplified.nunique() < series.nunique():
# return f"too many levls, didn't check, but didn't pass a quick check"
# else:
# return ''
return ''
threshold *= 100
l = series.unique().tolist()
l = [y for y in l if type(y) == str] # remove nan, True, False
candidate = []
for i in range(len(l)):
for j in range(i+1, len(l)):
if l[i].isdigit() or l[j].isdigit():
continue
if any([fuzz.ratio(l[i], l[j]) > threshold,
fuzz.partial_ratio(l[i], l[j]) > threshold,
fuzz.token_sort_ratio(l[i], l[j]) > threshold,
fuzz.token_set_ratio(l[i], l[j]) > threshold]):
candidate.append((l[i], l[j]))
o = '; '.join(['('+', '.join(can)+')' for can in candidate])
if truncate and len(o) > 1000:
o = o[:1000] + f'...truncated, call TEF.possible_dup_lev({series.name}) for a full result'
return o
def dfmeta(df, description=None, max_lev=10, transpose=True, sample=True,
style=True, color_bg_by_type=True, highlight_nan=0.5, in_cell_next_line=True,
drop=None,
check_possible_error=True, dup_lev_prop=0.9,
fitted_feat_imp=None,
plot=True,
standard=False):
# validation
assert max_lev > 2, 'max_lev should > 2'
assert sample < df.shape[0], 'sample should < nrows'
if sample == True and df.shape[0] < 3:
sample = df.shape[0]
assert drop is None or 'NaNs' not in drop, 'Cannot drop NaNs for now'
assert drop is None or 'dtype' not in drop, 'Cannot drop dtype for now'
warnings.simplefilter('ignore', RuntimeWarning) # caused from skewtest, unknown
if standard: # overwrite thise args
check_possible_error = False
sample = False
# drop=['unique levs']
# the first line, shape, dtypes, memory
buffer = io.StringIO()
df.info(verbose=False, buf=buffer)
s = buffer.getvalue()
if style == False:
print(f'shape: {df.shape}')
print(s.split('\n')[-3])
print(s.split('\n')[-2])
color_bg_by_type, highlight_nan, in_cell_next_line = False, False, False
br_way = "<br/> " if in_cell_next_line else ", " # notice a space here
o = pd.DataFrame(columns=df.columns)
o.loc['idx'] = list(range(df.shape[1]))
o.loc['dtype'] = df.dtypes
if description is not None:
o.loc['description'] = ''
for col, des in description.items():
if col in df.columns.tolist():
o.loc['description', col] = des
o.loc['NaNs'] = df.apply(lambda x: f'{sum(x.isnull())}{br_way}{sum(x.isnull())/df.shape[0]*100:.0f}%')
o.loc['unique counts'] = df.apply(lambda x: f'{len(x.unique())}{br_way}{len(x.unique())/df.shape[0]*100:.0f}%')
# def unique_index(s):
# if len(s.unique()) <= max_lev:
# o = ''
# for i in s.value_counts(dropna=False).index.tolist():
# o += str(i) + br_way
# return o[:-len(br_way)]
# else:
# return ''
# o.loc['unique levs'] = df.apply(unique_index, result_type='expand')
o.loc['summary'] = df.apply(summary, result_type='expand', max_lev=max_lev, br_way=br_way) # need result_type='true' or it will all convert to object dtype
# maybe us args=(arg1, ) or sth?
if plot and style:
o.loc['summary plot'] = ['__TO_PLOT_TO_FILL__'] * df.shape[1]
if fitted_feat_imp is not None:
def print_fitted_feat_imp(fitted_feat_imp, indices):
fitted_feat_imp = fitted_feat_imp[fitted_feat_imp.notnull()]
o = pd.Series(index=indices)
rank = len(fitted_feat_imp) - rankdata(fitted_feat_imp).astype(int) + 1
for i in range(len(fitted_feat_imp)):
o[fitted_feat_imp.index[i]] = f'{rank[i]:.0f}/{len(fitted_feat_imp)} {fitted_feat_imp[i]:.2f} {fitted_feat_imp[i]/sum(fitted_feat_imp)*100:.0f}%'
o.loc[o.isnull()] = ''
return o
o.loc['fitted feature importance'] = print_fitted_feat_imp(fitted_feat_imp, df.columns)
if check_possible_error:
def possible_nan(x):
if x.dtype.name not in ['category', 'object']:
return ''
check_list = ['NEED', 'nan', 'Nan', 'nAn', 'naN', 'NAn', 'nAN', 'NaN', 'NAN']
check_list_re = [r'^ +$', '^null$', r'^[^a-zA-Z0-9]*$']
o = ''
if sum(x==0) > 0:
o += f' "0": {sum(x==0)}, {sum(x==0)/df.shape[0]*100:.2f}%{br_way}'
for to_check in check_list:
if to_check in x.unique().tolist():
o += f' "{to_check}": {sum(x==to_check)}, {sum(x==to_check)/df.shape[0]*100:.2f}%{br_way}'
for to_check in check_list_re:
is_match = [re.match(to_check, str(lev), flags=re.IGNORECASE) is not None for lev in x]
if any(is_match):
to_print = ', '.join(x[is_match].unique())
o += f' "{to_print}": {sum(is_match)}, {sum(is_match)/df.shape[0]*100:.2f}%{br_way}'
if len(o) > 1000:
o = o[:5000] + f'...truncated'
return o
o.loc['possible NaNs'] = df.apply(possible_nan)
o.loc['possible dup lev'] = df.apply(possible_dup_lev, args=(dup_lev_prop, True))
if sample != False:
if sample == True and type(sample) is not int:
sample_df = df.sample(3).sort_index()
elif sample == 'head':
sample_df = df.head(3)
elif type(sample) is int:
sample_df = df.sample(sample)
sample_df.index = ['row ' + str(x) for x in sample_df.index.tolist()]
o = o.append(sample_df)
if drop:
o = o.drop(labels=drop)
if transpose:
o = o.transpose()
o = o.rename_axis('col name').reset_index()
if color_bg_by_type or highlight_nan != False:
def style_rule(data, color='yellow'):
if color_bg_by_type:
cell_rule = 'border: 1px solid white;'
# https://www.w3schools.com/colors/colors_picker.asp
# saturation 92%, lightness 95%
cmap = {'object': '#f2f2f2',
'datetime64[ns]': '#e7feee',
'int8': '#fefee7',
'int16': '#fefee7',
'int32': '#fefee7',
'int64': '#fefee7',
'uint8': '#fefee7',
'uint16': '#fefee7',
'uint32': '#fefee7',
'uint64': '#fefee7',
'float16': '#fef2e7',
'float32': '#fef2e7',
'float64': '#fef2e7',
'bool': '#e7fefe',
'category': '#e7ecfe'}
# if data.iloc[2] not in cmap: # idx 2 is dtype
if data.loc['dtype'].name not in cmap:
cell_rule += "background-color: grey"
else:
cell_rule += "background-color: {}".format(cmap[data.loc['dtype'].name])
rule = [cell_rule] * len(data)
if transpose:
rule[0] = 'background-color: white;'
else:
rule = [''] * len(data)
# if float(data.iloc[3][-3:-1])/100 > highlight_nan or data.iloc[3][-4:] == '100%': # idx 3 is NaNs
if float(data.loc['NaNs'][-3:-1])/100 > highlight_nan or data.loc['NaNs'][-4:] == '100%':
rule[np.where(data.index=='NaNs')[0][0]] += '; color: red'
if data.loc['unique counts'][:(3+len(br_way))] == f'{df.shape[0]}{br_way}': # all unique
rule[np.where(data.index=='unique counts')[0][0]] += '; color: blue'
elif data.loc['unique counts'][:(1+len(br_way))] == f'1{br_way}': # all the same
rule[np.where(data.index=='unique counts')[0][0]] += '; color: red'
if fitted_feat_imp is not None:
if data.loc['fitted feature importance'][:2] in ['1/', '2/', '3/']:
rule[np.where(data.index=='fitted feature importance')[0][0]] += '; font-weight: bold'
return rule
o = o.style.apply(style_rule, axis=int(transpose)) # axis=1 for row-wise, for transpose=True
if transpose:
o = o.hide_index()
if style: # caption
s = print_list(s.split('\n')[-3:-1], br='; ')
o = o.set_caption(f"shape: {df.shape}; {s}")
o = o.render() # convert from pandas.io.formats.style.Styler to html code
if plot and style:
for c in range(df.shape[1]):
html_1var = plot_1var_series(df, c, max_lev, log_numeric=False, save_plt=None, return_html=True)
o = o.replace('__TO_PLOT_TO_FILL__', html_1var, 1)
o = HTML(o) # convert from html to IPython.core.display.HTML
return o
def dfmeta_to_htmlfile(styled_df, filename, head=''):
'''
styled_df should be <class 'IPython.core.display.HTML'>
'''
r = f'<h1>{head}</h1>\n' + '<body>\n' + styled_df.data + '\n</body>'
with open(filename, 'w') as f:
f.write(r)
return f'{filename} saved'
# def print_html_standard(df, description):
# meta = dfmeta(df,
# description=description,
# check_possible_error=False, sample=False, drop=['unique levs'])
# dfmeta_verbose_html = ''
# buffer = io.StringIO()
# df.info(verbose=False, buf=buffer)
# s = buffer.getvalue().split('\n')
# dfmeta_verbose = f"shape: {df.shape}<br/>{s[-3]}<br/>{s[-2]}"
# dfmeta_verbose_html = '<p>' + dfmeta_verbose + '</p>'
# r = dfmeta_verbose_html + '<body>\n' + meta.data + '\n</body>'
# for e in r.split('\n'):
# print(e)
# def dfmeta_to_htmlfile_standard(df, description, filename, head):
# '''
# a function that call dfmeta and then dfmeta_to_htmlfile using a standard configuration
# '''
# meta = dfmeta(df,
# description=description,
# check_possible_error=False, sample=False, drop=['unique levs'])
# return dfmeta_to_htmlfile(meta, filename, head)
def get_desc_template(df, var_name='desc', suffix_idx=False):
print(var_name, '= {')
max_cn = max([len(x) for x in df.columns.tolist()]) + 1
len_cn = 25 if max_cn > 25 else max_cn
for i in range(df.shape[1]):
c = df.columns[i]
c += '"'
if c[:-1] != df.columns.tolist()[-1]:
if suffix_idx == False:
print(f' "{c:{len_cn}}: "",')
else:
print(f' "{c:{len_cn}}: "", # {i}')
else:
if suffix_idx == False:
print(f' "{c:{len_cn}}: ""')
else:
print(f' "{c:{len_cn}}: "" # {i}')
print('}')
def get_desc_template_file(df, filename='desc.py', var_name='desc', suffix_idx=False):
'''%run filename.py'''
max_cn = max([len(x) for x in df.columns.tolist()]) + 1
len_cn = 25 if max_cn > 25 else max_cn
o = var_name + ' = {' + '\n'
for i in range(df.shape[1]):
c = df.columns[i]
c += '"'
if c[:-1] != df.columns.tolist()[-1]:
o += f' "{c:{len_cn}}: "", # {i}' + '\n'
else:
o += f' "{c:{len_cn}}: "" # {i}' + '\n'
o += '}'
with open(filename, 'w') as f:
f.write(o)
return f'{filename} saved' | 2.59375 | 3 |
lsf_runner/single_machine_runner.py | sebascuri/runner | 0 | 12761977 | """Definition of all runner classes."""
import multiprocessing
import os
import time
import warnings
from typing import List, Optional
from .abstract_runner import AbstractRunner
from .util import start_process
class SingleRunner(AbstractRunner):
"""Runner in a Single Machine.
The runner submits the jobs in parallel to the `num_workers'. While the workers are
working, it keeps on checking and spawns a new job every time a worker is freed up.
Parameters
----------
name: str.
Runner name.
num_threads: int, optional. (default=1)/.
Number of threads to use.
num_workers: int, optional. (default = cpu_count() // num_threads - 1).
Number of workers where to run the process.
"""
num_workers: int
def __init__(
self, name: str, num_threads: int = 1, num_workers: Optional[int] = None
):
super().__init__(name, num_threads=num_threads)
if num_workers is None:
num_workers = max(1, multiprocessing.cpu_count() // num_threads - 1)
if (num_workers >= multiprocessing.cpu_count() // num_threads) and (
num_workers > 1
):
num_workers = max(1, multiprocessing.cpu_count() // num_threads - 1)
warnings.warn(f"Too many workers requested. Limiting them to {num_workers}")
self.num_workers = num_workers
def run(self, cmd_list: List[str]) -> List[str]:
"""See `AbstractRunner.run'."""
workers_idle = [False] * self.num_workers
pool = [start_process(lambda: None) for _ in range(self.num_workers)]
tasks = cmd_list[:]
while not all(workers_idle):
for i in range(self.num_workers):
if not pool[i].is_alive():
pool[i].terminate()
if len(tasks) > 0:
time.sleep(1)
cmd = tasks.pop(0)
pool[i] = start_process(lambda x: os.system(x), (cmd,))
else:
workers_idle[i] = True
return cmd_list
def run_batch(self, cmd_list: List[str]) -> str:
"""See `AbstractRunner.run_batch'."""
return "".join(self.run(cmd_list))
| 3.8125 | 4 |
tests/integration/services/shop/order/email/test_email_on_order_placed.py | byceps/byceps | 33 | 12761978 | <filename>tests/integration/services/shop/order/email/test_email_on_order_placed.py
"""
:Copyright: 2006-2021 <NAME>
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import datetime
from decimal import Decimal
from typing import Iterator
from unittest.mock import patch
import pytest
from byceps.services.shop.article import service as article_service
from byceps.services.shop.order.email import service as order_email_service
from byceps.services.shop.order import (
sequence_service as order_sequence_service,
service as order_service,
)
from byceps.services.shop.storefront import service as storefront_service
from byceps.services.shop.storefront.transfer.models import Storefront
from byceps.services.snippet import service as snippet_service
from tests.helpers import current_user_set
from tests.integration.services.shop.helpers import (
create_article as _create_article,
)
from .helpers import (
assert_email,
get_current_user_for_user,
place_order_with_items,
)
@pytest.fixture(scope='module')
def customer(make_user):
return make_user('Interessent', email_address='<EMAIL>')
@pytest.fixture
def storefront(
make_order_number_sequence_id, make_storefront
) -> Iterator[Storefront]:
order_number_sequence_id = make_order_number_sequence_id(252)
storefront = make_storefront(order_number_sequence_id)
yield storefront
storefront_service.delete_storefront(storefront.id)
order_sequence_service.delete_order_number_sequence(
order_number_sequence_id
)
@pytest.fixture
def article1(shop):
article = create_article(
shop.id,
'AC-14-A00003',
'Einzelticket, Kategorie Loge',
Decimal('99.00'),
123,
)
article_id = article.id
yield article
article_service.delete_article(article_id)
@pytest.fixture
def article2(shop):
article = create_article(
shop.id,
'AC-14-A00007',
'T-Shirt, Größe L',
Decimal('14.95'),
50,
)
article_id = article.id
yield article
article_service.delete_article(article_id)
@pytest.fixture
def order(
storefront,
article1,
article2,
customer,
email_payment_instructions_snippet_id,
email_footer_snippet_id,
):
created_at = datetime(2014, 8, 15, 20, 7, 43)
items_with_quantity = [
(article1, 5),
(article2, 2),
]
order = place_order_with_items(
storefront.id, customer, created_at, items_with_quantity
)
yield order
snippet_service.delete_snippet(email_payment_instructions_snippet_id)
snippet_service.delete_snippet(email_footer_snippet_id)
order_service.delete_order(order.id)
@patch('byceps.email.send')
def test_email_on_order_placed(send_email_mock, site_app, customer, order):
app = site_app
current_user = get_current_user_for_user(customer)
with current_user_set(app, current_user), app.app_context():
order_email_service.send_email_for_incoming_order_to_orderer(order.id)
expected_sender = '<EMAIL>'
expected_recipients = ['<EMAIL>']
expected_subject = 'Deine Bestellung (AC-14-B00253) ist eingegangen.'
expected_body = '''
Hallo Interessent,
vielen Dank für deine Bestellung mit der Nummer AC-14-B00253 am 15.08.2014 über unsere Website.
Folgende Artikel hast du bestellt:
Beschreibung: Einzelticket, Kategorie Loge
Anzahl: 5
Stückpreis: 99,00 €
Beschreibung: T-Shirt, Größe L
Anzahl: 2
Stückpreis: 14,95 €
Gesamtbetrag: 524,90 €
Bitte überweise den Gesamtbetrag auf folgendes Konto:
Zahlungsempfänger: <Name>
IBAN: <IBAN>
BIC: <BIC>
Bank: <Kreditinstitut>
Verwendungszweck: AC-14-B00253
Wir werden dich informieren, sobald wir deine Zahlung erhalten haben.
Hier kannst du deine Bestellungen einsehen: https://www.acmecon.test/shop/orders
Für Fragen stehen wir gerne zur Verfügung.
Viele Grüße,
das Team der Acme Entertainment Convention
--
Acme Entertainment Convention
E-Mail: <EMAIL>
'''.strip()
assert_email(
send_email_mock,
expected_sender,
expected_recipients,
expected_subject,
expected_body,
)
# helpers
def create_article(shop_id, item_number, description, price, total_quantity):
return _create_article(
shop_id,
item_number=item_number,
description=description,
price=price,
total_quantity=total_quantity,
)
| 1.914063 | 2 |
Data-Pipeline-Capstone-Project-master/lambda/s3Posts-to-ES-lambda/s3-to-es_aws.py | jrderek/Build-AWS-Data-Lake-and-Data-Pipeline-to-Elasticsearch-and-Redshift-with-Apache-Airflow-and-Spark | 0 | 12761979 | <reponame>jrderek/Build-AWS-Data-Lake-and-Data-Pipeline-to-Elasticsearch-and-Redshift-with-Apache-Airflow-and-Spark
import boto3
import json
import datetime
import urllib
import urllib3
import logging
from pprint import pprint
from requests_aws4auth import AWS4Auth
import requests
import botocore
from io import BytesIO
import re
"""
Can Override the global variables using Lambda Environment Parameters
"""
globalVars = {}
globalVars['Owner'] = "Derrick"
globalVars['Environment'] = "Dev"
globalVars['awsRegion'] = "us-east-1"
globalVars['tagName'] = "serverless-s3-to-es-log-ingester"
globalVars['service'] = "es"
globalVars['esIndexPrefix'] = "/instagram_graph_posts/"
globalVars['esIndexDocType'] = "_doc"
globalVars['esHosts'] = {
'prod': 'something more secure here',
'aws': 'https://search-social-system-kkehzvprsvgkfisnfulapobkpm.us-east-1.es.amazonaws.com'
}
# Initialize Logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def indexDocElement(es_Url, awsauth, docData):
"""
Loads completed document to Elasticsearch index.
PARAMS:
es_url - Elasticsearch Url for PUT requests
awsauth - AWS credentials for Elasticsearch
docData - formated dict like object to update elasticsearch record.
"""
try:
headers = {"Content-Type": "application/json"}
# headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
resp = requests.put(es_Url, auth=awsauth,
headers=headers, json=docData)
print(resp.content)
if resp.status_code == 201:
logger.info('INFO: Successfully created element into ES')
elif resp.status_code == 200:
logger.info('INFO: Successfully updated element into ES')
else:
logger.error(f'FAILURE: Unable to index element {resp.content}')
raise
except Exception as e:
logger.error(f'ERROR: {str(e)}')
logger.error(f"ERROR: Unable to index line:{docData['content']}")
raise
def store_images(temp, bucket):
"""
Stores images from media url to S3 bucket.
Checks for existing image in S3, loads it if absent
Then checks if "children" Media urls are present and saves as such:
17841401753941377 / 10010784827803388 / original.jpg, 17870925466585765.jpg, 18015701020271224.jpg
PARAMS:
temp - dictionary object loaded from from S3 with post data
bucket - bucket to store loaded images
RETURNS:
New url of media object stored on S3
"""
s3 = boto3.client('s3')
if 'media_url' in temp.keys():
image = temp['media_url']
elif 'thumbnail_url' in temp.keys():
image = temp['thumbnail_url']
else:
return 'image load fail'
img_key = 'instagram_graph_image_store/' + \
str(temp['owner_id']) + '/' + str(temp['id'])
# Get file extension (.jpg or .mp4)
ext = re.search(r'\.\w{3,4}(?=\?)', image).group()
# look for photos already in bucket
response = s3.list_objects_v2(
Bucket=bucket,
Prefix=img_key,
)
# if key count 0, there is no photo already stored in s3
if response.get('KeyCount', []) < 1:
response = requests.get(image).content
s3.put_object(Bucket=bucket, Key=img_key +
'/original' + ext, Body=response)
logger.info(f"Added {img_key + '/original.jpg'} to S3")
# if this is a carousel post, get all child images
if 'children' in temp.keys():
for i in range(len(temp['children']['data'])):
img_id = temp['children']['data'][i]['id']
image = temp['children']['data'][i]['media_url']
ext = re.search(r'\.\w{3,4}(?=\?)', image).group()
tmp_key = img_key + '/' + img_id + ext
response = requests.get(image).content
s3.put_object(Bucket=bucket, Key=tmp_key, Body=response)
# if this is a video get the thumbnail
elif 'thumbnail_url' in temp.keys():
img_id = 'thumbnail'
image = temp['thumbnail_url']
ext = re.search(r'\.\w{3,4}(?=\?)', image).group()
tmp_key = img_key + '/' + img_id + ext
response = requests.get(image).content
s3.put_object(Bucket=bucket, Key=tmp_key, Body=response)
else:
logger.info('No carousel images found')
else:
logger.info(
f"Found the file {img_key + '/original.jpg'} already in S3")
return 'https://social-system-test.s3.amazonaws.com/' + img_key + '/original' + ext
def lambda_handler(event, context):
credentials = boto3.Session().get_credentials()
# Set up connection to S3 bucket with raw json
s3 = boto3.client('s3')
# set up connection to AWS Elasticsearch
awsauth = AWS4Auth(credentials.access_key,
credentials.secret_key,
globalVars['awsRegion'],
globalVars['service'],
session_token=credentials.token
)
logger.info("Received event: " + json.dumps(event, indent=2))
try:
bucket = event['Records'][0]['s3']['bucket']['name']
key = urllib.parse.unquote_plus(
event['Records'][0]['s3']['object']['key'])
# Get document (obj) from S3
obj = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
logger.error('ERROR: {0}'.format(str(e)))
logger.error(
'ERROR: Unable able to GET object:{0} from S3 Bucket:{1}. Verify object exists.'.format(key, bucket))
# Create read object
body = obj['Body'].read().decode("utf-8")
logger.info('SUCCESS: Retreived object from S3')
# Create document headers
docData = {}
docData['objectKey'] = str(key)
docData['createdDate'] = str(obj['LastModified'])
docData['content_type'] = str(obj['ContentType'])
docData['content_length'] = str(obj['ContentLength'])
# Parsing content before sending to Elasticsearch
temp = json.loads(body)
# items not needed in ES.
temp.pop("ig_id", None)
temp.pop("username", None)
temp.pop("is_comment_enabled", None)
# flatten Owner ID
temp['owner_id'] = temp['owner']['id']
# set post id to string
temp['id'] = str(temp['id'])
# look for followers, if none set to -1
try:
temp['followers'] = temp['owner']['followers_count']
except Exception as e:
logger.info(f'Failed to set followers. {e}')
temp['followers'] = -1
temp.pop('owner')
# load images to S3
temp['fohr_media'] = store_images(temp, bucket)
# package up the document data
docData['content'] = temp
# Build Elasticsearch URL
es_Url = globalVars['esHosts'].get(
'aws') + globalVars['esIndexPrefix'] + globalVars['esIndexDocType'] + '/' + temp['id']
# send to AWS ES
logger.info('Calling AWS ES...')
indexDocElement(es_Url, awsauth, docData)
logger.info('SUCCESS: Successfully indexed the entire doc into ES')
if __name__ == '__main__':
lambda_handler(None, None)
| 2.171875 | 2 |
src/providers/commoncrawl/ScienceMuseum.py | 9LKQ7ZLC82/cccatalog | 1 | 12761980 | <reponame>9LKQ7ZLC82/cccatalog
"""
Content Provider: Science Museum - Historical exhibits and artworks
ETL Process: Identify artworks that are available under a Creative
Commons license or in the public domain.
Output: TSV file containing images of artworks and their respective meta-data.
"""
from Provider import *
logging.basicConfig(format='%(asctime)s - %(name)s: [%(levelname)s - Science Museum UK] =======> %(message)s', level=logging.INFO)
class ScienceMuseum(Provider):
def __init__(self, _name, _domain, _cc_index):
Provider.__init__(self, _name, _domain, _cc_index)
def getMetaData(self, _html, _url):
"""
Parameters
------------------
_html: string
The HTML page that was extracted from Common Crawls WARC file.
_url: string
The url for the webpage.
Returns
------------------
A tab separated string which contains the meta data that was extracted from the HTML.
"""
soup = BeautifulSoup(_html, 'html.parser')
otherMetaData = {}
src = None
license = None
version = None
imageURL = None
tags = None
extracted = []
self.clearFields()
self.provider = self.name
self.source = 'commoncrawl'
#verify the license
licenseDetails = soup.find('div', {'class': 'cite__method'}) #soup.find('svg', {'class': 'icon icon-cc-zero'})
if licenseDetails:
imgLicense = licenseDetails.findChild('img') #licenseDetails.parent
if imgLicense and 'src' in imgLicense.attrs:
imgLicense = self.validateContent('', imgLicense, 'src').split('/')
imgLicense = imgLicense[len(imgLicense)-1].split('.')[0].lstrip('cc-')
license = imgLicense.lower()
if not license:
logging.warning('License not detected in url: {}'.format(_url))
return None
self.license = license
url = soup.find('meta', {'property': 'og:url'})
if url:
self.foreignLandingURL = self.validateContent(_url, url, 'content')
#get the title
title = soup.find('meta', {'property': 'og:title'})
if title:
self.title = self.validateContent('', title, 'content')
#description/summary
description = soup.find('meta', {'property': 'og:description'})
if description:
otherMetaData['description'] = self.validateContent('', description, 'content')
#credits/attribution info
makerInfo = soup.find('dl', {'class': 'record-top__dl fact-maker'})
if makerInfo:
maker = makerInfo.findChild('a')
if maker:
makerName = maker.text.strip()
if makerName.lower() != 'unknown':
self.creator = makerName
if 'href' in maker.attrs:
self.creatorURL = self.validateContent('', maker, 'href')
#meta data
timeline = soup.find('dl', {'class': 'record-top__dl fact-Made'})
if timeline:
timeline = timeline.text.strip().replace('Made:', '').replace('Maker:', '').split('in')
if len(timeline) > 1:
otherMetaData['date'] = timeline[0].strip()
otherMetaData['geography'] = timeline[1].strip()
otherDetails = soup.find_all('dl', {'class': re.compile(r'(record-details.*?)')})
if otherDetails:
for detail in otherDetails:
key = detail.findChild('dt').text.strip().lower().replace(' ', '_')
val = detail.findChild('dd').text.strip()
otherMetaData[key] = val
records = soup.find_all('img', {'class': 'carousel__image'})
if not records:
records = soup.find_all('img', {'class': 'single_image'})
if otherMetaData:
self.metaData = otherMetaData
if records:
for record in records:
self.url = ''
if 'src' in record.attrs:
self.url = record.attrs['src'].strip()
elif 'data-flickity-lazyload' in record.attrs:
self.url = record.attrs['data-flickity-lazyload'].strip()
if self.url == '':
logging.warning('Image not detected in url: {}'.format(_url))
continue
extracted.extend(self.formatOutput)
return extracted
| 2.609375 | 3 |
components/collector/src/source_collectors/gitlab/merge_requests.py | Erik-Stel/quality-time | 0 | 12761981 | """GitLab merge requests collector."""
from typing import cast
from collector_utilities.functions import match_string_or_regular_expression
from collector_utilities.type import URL, Value
from source_model import Entities, Entity, SourceResponses
from .base import GitLabBase
class GitLabMergeRequests(GitLabBase):
"""Collector class to measure the number of merge requests."""
async def _api_url(self) -> URL:
"""Override to return the merge requests API."""
return await self._gitlab_api_url("merge_requests")
async def _landing_url(self, responses: SourceResponses) -> URL:
"""Extend to add the project branches."""
return URL(f"{str(await super()._landing_url(responses))}/{self._parameter('project')}/-/merge_requests")
async def _parse_entities(self, responses: SourceResponses) -> Entities:
"""Override to parse the merge requests."""
merge_requests = []
for response in responses:
merge_requests.extend(await response.json())
return Entities(self._create_entity(mr) for mr in merge_requests if self._include_merge_request(mr))
async def _parse_total(self, responses: SourceResponses) -> Value:
"""Override to parse the total number of merge requests."""
return str(sum([len(await response.json()) for response in responses]))
@staticmethod
def _create_entity(merge_request) -> Entity:
"""Create an entity from a GitLab JSON result."""
return Entity(
key=merge_request["id"],
title=merge_request["title"],
target_branch=merge_request["target_branch"],
url=merge_request["web_url"],
state=merge_request["state"],
created=merge_request.get("created_at"),
updated=merge_request.get("updated_at"),
merged=merge_request.get("merged_at"),
closed=merge_request.get("closed_at"),
downvotes=str(merge_request.get("downvotes", 0)),
upvotes=str(merge_request.get("upvotes", 0)),
)
def _include_merge_request(self, merge_request) -> bool:
"""Return whether the merge request should be counted."""
request_matches_state = merge_request["state"] in self._parameter("merge_request_state")
branches = self._parameter("target_branches_to_include")
target_branch = merge_request["target_branch"]
request_matches_branches = match_string_or_regular_expression(target_branch, branches) if branches else True
# If the required number of upvotes is zero, merge requests are included regardless of how many upvotes they
# actually have. If the required number of upvotes is more than zero then only merge requests that have fewer
# than the minimum number of upvotes are included in the count:
required_upvotes = int(cast(str, self._parameter("upvotes")))
request_has_fewer_than_min_upvotes = required_upvotes == 0 or int(merge_request["upvotes"]) < required_upvotes
return request_matches_state and request_matches_branches and request_has_fewer_than_min_upvotes
| 2.625 | 3 |
halinuxcompanion/sensors/memory.py | muniter/halinuxcompanion | 7 | 12761982 | <reponame>muniter/halinuxcompanion
from types import MethodType
from halinuxcompanion.sensor import Sensor
import psutil
Memory = Sensor()
Memory.config_name = "memory"
Memory.attributes = {
"total": 0,
"available": 0,
"used": 0,
"free": 0,
}
Memory.device_class = "power_factor"
Memory.state_class = "measurement"
Memory.icon = "mdi:memory"
Memory.name = "Memory Load"
Memory.state = 0
Memory.type = "sensor"
Memory.unique_id = "memory_usage"
Memory.unit_of_measurement = "%"
def updater(self):
data = psutil.virtual_memory()
self.state = round((data.total - data.available) / data.total * 100, 1)
self.attributes["total"] = data.total / 1024
self.attributes["available"] = data.available / 1024
self.attributes["used"] = data.used / 1024
self.attributes["free"] = data.free / 1024
Memory.updater = MethodType(updater, Memory)
| 2.625 | 3 |
erpdevs/erpdevs/doctype/erf_operational_requirements/erf_operational_requirements.py | ONE-F-M/erpdevs | 0 | 12761983 | <filename>erpdevs/erpdevs/doctype/erf_operational_requirements/erf_operational_requirements.py
# -*- coding: utf-8 -*-
# Copyright (c) 2020, One FM and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ERFOperationalRequirements(Document):
pass
@frappe.whitelist()
def get_designation_data(designation):
try:
skills = frappe.db.sql("""select skill from `tabDesignation Skill` where parent = %s """, designation, as_dict = True)
profile = frappe.db.sql(""" select objective_type, objective, objective_definition, days from `tabPerformance Profile` where parent = %s """, designation, as_dict = True)
return skills, profile
except Exception as e:
frappe.msgprint(e)
| 1.976563 | 2 |
logdevice/ops/ldops/exceptions.py | majra20/LogDevice | 1,831 | 12761984 | <filename>logdevice/ops/ldops/exceptions.py<gh_stars>1000+
#!/usr/bin/env python3
# pyre-strict
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
ldops.exceptions
~~~~~~~~~~~
Contains LDOps-wide exceptions.
"""
class LDOpsError(Exception):
"""
Generic error in LDOps
"""
pass
class NodeNotFoundError(LDOpsError):
"""
Raised when node not found
"""
pass
class NodeIsNotASequencerError(LDOpsError):
"""
Raised when node which is not a sequencer was used in a context
expecting that it is a sequencer
"""
pass
class NodeIsNotAStorageError(LDOpsError):
"""
Raised when node which is not a storage is used in a context
expectit that it is a storage
"""
pass
| 1.757813 | 2 |
website/apps/urls.py | MAAF72/Auto-Grader-Glitch | 0 | 12761985 | <reponame>MAAF72/Auto-Grader-Glitch
from django.urls import path
from . import views
app_name = 'apps'
urlpatterns = [
path('', views.index, name='index'),
path('login', views.login, name='login'),
path('register', views.register, name='register'),
path('logout', views.logout, name='logout'),
path('profile/<str:username>', views.profile, name='profile'),
path('problems', views.problems, name='problems'),
path('problems/manage/', views.problems_manage, name='problems_manage'),
path('problem/test_case/<int:id>', views.problem_test_case, name='problem_test_case'),
path('test_case/<int:id>/create/', views.test_case_create, name='test_case_create'),
path('test_case/edit/<int:id>/', views.test_case_edit, name='test_case_edit'),
path('test_case/delete/<int:id>/', views.test_case_delete, name='test_case_delete'),
path('problem/create', views.problem_create, name='problem_create'),
path('problem/edit/<int:id>', views.problem_edit, name='problem_edit'),
path('problem/delete/<int:id>', views.problem_delete, name='problem_delete'),
path('problem/<int:id>', views.problem, name='problem'),
path('submissions', views.submissions, name='submissions'),
path('submission/<int:id>', views.submission, name='submission')
]
| 2.125 | 2 |
ev3/async.py | inductivekickback/ev3 | 5 | 12761986 | <gh_stars>1-10
"""A simple thread subclass for making ev3 function calls asynchronous.
EXAMPLE USAGE:
import time
from ev3 import *
finished = False
def keep_alive_finished(result):
global finished
print 'The keep_alive() function returned: ', result
finished = True
if ("__main__" == __name__):
try:
async_thread = async.AsyncThread()
with ev3.EV3() as brick:
async_thread.put(brick.keep_alive, keep_alive_finished)
while (not finished):
print 'Waiting...'
time.sleep(0.1)
except ev3.EV3Error as ex:
print 'An error occurred: ', ex
async_thread.stop()
"""
import threading
import Queue
class AsyncThread(threading.Thread):
"""A simple thread subclass maintains a queue of functions to call."""
_STOP_QUEUE_ITEM = 'STOP'
def __init__(self):
"""Creates and starts a new thread."""
super(AsyncThread, self).__init__()
self._daemon = True
self._queue = Queue.Queue()
self.start()
def run(self):
"""This function is called automatically by the Thread class."""
try:
while(True):
item = self._queue.get(block=True)
if (self._STOP_QUEUE_ITEM == item):
break
ev3_func, cb, args, kwargs = item
cb(ev3_func(*args, **kwargs))
except KeyboardInterrupt:
pass
def stop(self):
"""Instructs the thread to exit after the current function is
finished.
"""
with self._queue.mutex:
self._queue.queue.clear()
self._queue.put(self._STOP_QUEUE_ITEM)
def put(self, ev3_func, cb, *args, **kwargs):
"""Adds a new function to the queue. The cb (callback) parameter should
be a function that accepts the result as its only parameter.
"""
self._queue.put((ev3_func, cb, args, kwargs))
| 3.953125 | 4 |
compyle/capture_stream.py | adityapb/compyle | 65 | 12761987 | import io
import os
import sys
from tempfile import mktemp
def get_ipython_capture():
try:
# This will work inside IPython but not outside it.
name = get_ipython().__class__.__name__
if name.startswith('ZMQ'):
from IPython.utils.capture import capture_output
return capture_output
else:
return None
except NameError:
return None
class CaptureStream(object):
"""A context manager which captures any errors on a given stream (like
sys.stderr). The stream is captured and the outputs can be used.
We treat sys.stderr and stdout specially as very often these are
overridden by nose or IPython. We always wrap the underlying file
descriptors in this case as this is the intent of this context manager.
This is somewhat based on this question:
http://stackoverflow.com/questions/7018879/disabling-output-when-compiling-with-distutils
Examples
--------
See the tests in tests/test_capture_stream.py for example usage.
"""
def __init__(self, stream=sys.stderr):
self.stream = stream
if stream is sys.stderr:
self.fileno = 2
elif stream is sys.stdout:
self.fileno = 1
else:
self.fileno = stream.fileno()
self.orig_stream = None
self.tmp_stream = None
self.tmp_path = ''
self._cached_output = None
def __enter__(self):
if sys.platform.startswith('win32') and sys.version_info[:2] > (3, 5):
return self
self.orig_stream = os.dup(self.fileno)
self.tmp_path = mktemp()
self.tmp_stream = io.open(self.tmp_path, 'w+', encoding='utf-8')
os.dup2(self.tmp_stream.fileno(), self.fileno)
return self
def __exit__(self, type, value, tb):
if sys.platform.startswith('win32') and sys.version_info[:2] > (3, 5):
return
if self.orig_stream is not None:
os.dup2(self.orig_stream, self.fileno)
if self.tmp_stream is not None:
self._cache_output()
self.tmp_stream.close()
os.remove(self.tmp_path)
def _cache_output(self):
if self._cached_output is not None:
return
tmp_stream = self.tmp_stream
result = ''
if tmp_stream is not None:
tmp_stream.flush()
tmp_stream.seek(0)
result = tmp_stream.read()
self._cached_output = result
def get_output(self):
"""Return the captured output.
"""
if self._cached_output is None:
self._cache_output()
return self._cached_output
class CaptureMultipleStreams(object):
"""This lets one capture multiple streams together.
"""
def __init__(self, streams=None):
streams = (sys.stdout, sys.stderr) if streams is None else streams
self.streams = streams
self.captures = [CaptureStream(x) for x in streams]
cap = get_ipython_capture()
if cap:
self.jcap = cap(stdout=True, stderr=True, display=True)
else:
self.jcap = None
self.joutput = None
def __enter__(self):
for capture in self.captures:
capture.__enter__()
if self.jcap:
self.joutput = self.jcap.__enter__()
return self
def __exit__(self, type, value, tb):
for capture in self.captures:
capture.__exit__(type, value, tb)
if self.jcap:
self.jcap.__exit__(type, value, tb)
def get_output(self):
out = list(x.get_output() for x in self.captures)
if self.joutput:
out[0] += self.joutput.stdout
out[1] += self.joutput.stderr
return out
| 2.828125 | 3 |
start.py | Jerrynicki/jrery-bot | 0 | 12761988 | <filename>start.py<gh_stars>0
import subprocess
import time
while True:
proc = subprocess.Popen("python3 main.py", shell=True) # for some reason this avoids a bug where the bot doesn't play anything when not started from a shell
proc.communicate()
print("Process ended. Restarting in 5s...")
time.sleep(5) | 2.203125 | 2 |
source/utils.py | sanixa/GS-WGAN-custom | 0 | 12761989 | <reponame>sanixa/GS-WGAN-custom
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.utils.data.dataset import Dataset
from torchvision import datasets, transforms
import torchvision.utils as vutils
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def savefig(fname, dpi=None):
dpi = 150 if dpi == None else dpi
plt.savefig(fname, dpi=dpi, format='png')
def inf_train_gen(trainloader):
while True:
for images, targets in iter(trainloader):
yield (images, targets)
def generate_image_mnist(iter, netG, fix_noise, save_dir, device, num_classes=10,
img_w=28, img_h=28):
batchsize = fix_noise.size()[0]
nrows = 10
ncols = num_classes
figsize = (ncols, nrows)
noise = fix_noise.to(device)
sample_list = []
for class_id in range(num_classes):
label = torch.full((nrows,), class_id, dtype=torch.long).to(device)
sample = netG(noise, label)
sample = sample.view(batchsize, img_w, img_h)
sample = sample.cpu().data.numpy()
sample_list.append(sample)
samples = np.transpose(np.array(sample_list), [1, 0, 2, 3])
samples = np.reshape(samples, [nrows * ncols, img_w, img_h])
samples = np.clip(samples, 0, 1)
plt.figure(figsize=figsize)
for i in range(nrows * ncols):
plt.subplot(nrows, ncols, i + 1)
plt.imshow(samples[i], cmap='gray')
plt.axis('off')
savefig(os.path.join(save_dir, 'samples_{}.png'.format(iter)))
del label, noise, sample
torch.cuda.empty_cache()
def generate_image_cifar10(iter, netG, fix_noise, save_dir, device, num_classes=10,
img_w=32, img_h=32):
batchsize = fix_noise.size()[0]
nrows = 10
ncols = num_classes
figsize = (ncols, nrows)
noise = fix_noise.to(device)
sample_list = []
for class_id in range(num_classes):
label = torch.full((nrows,), class_id, dtype=torch.long).to(device)
sample = netG(noise, label)
sample = sample.view(batchsize, img_w, img_h)
sample = sample.cpu().data.numpy()
sample_list.append(sample)
samples = np.transpose(np.array(sample_list), [1, 0, 2, 3])
samples = np.reshape(samples, [nrows * ncols, img_w, img_h])
samples = np.clip(samples, 0, 1)
plt.figure(figsize=figsize)
for i in range(nrows * ncols):
plt.subplot(nrows, ncols, i + 1)
plt.imshow(samples[i], cmap='gray')
plt.axis('off')
savefig(os.path.join(save_dir, 'samples_{}.png'.format(iter)))
del label, noise, sample
torch.cuda.empty_cache()
def generate_image_celeba(iter, netG, fixed_noise, save_dir, device, num_classes=2,
img_w=64, img_h=64):
nrows = 10
ncols = 10
figsize = (ncols, nrows)
noise = fixed_noise.to(device)
sample_list = []
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
fake = np.clip(fake, 0 , 1)
plt.figure(figsize=figsize)
for i in range(nrows * ncols):
plt.subplot(nrows, ncols, i + 1)
plt.imshow(np.transpose(fake[i],(1,2,0)))
plt.axis('off')
savefig(os.path.join(save_dir, 'samples_{}.png'.format(iter)))
del noise, fake
torch.cuda.empty_cache()
def get_device_id(id, num_discriminators, num_gpus):
partitions = np.linspace(0, 1, num_gpus, endpoint=False)[1:]
device_id = 0
for p in partitions:
if id <= num_discriminators * p:
break
device_id += 1
return device_id
| 2.15625 | 2 |
beastx/__main__.py | attack1991/Gareey | 1 | 12761990 | import logging
from pathlib import Path
from sys import argv
import var
import telethon.utils
from telethon import TelegramClient
from telethon import events,Button
import os
from var import Var
from . import beast
from telethon.tl import functions
from beastx.Configs import Config
from telethon.tl.functions.messages import AddChatUserRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.functions.channels import LeaveChannelRequest
from telethon.tl.functions.account import UpdateProfileRequest
from beastx.utils import load_module, start_assistant
import asyncio
from . import bot,sedmrunal
bot = beast
#rom . import semxx,semxxx
#####################################
plugin_channel = "@BeastX_Plugins"
#####################################
sur = Config.PRIVATE_GROUP_ID
UL = Config.TG_BOT_USER_NAME_BF_HER
VR = "Beast 0.1"
chat_id = sur
MSG = f"""
✨𝔹𝕖𝕒𝕤𝕥 ℍ𝕒𝕤 𝔹𝕖𝕖𝕟 𝔻𝕖𝕡𝕝𝕠𝕪𝕖𝕕!
☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎ ☟︎︎︎
┏━━━━━━━━━━━━━━━━━
┣•Assistant➠ {UL}
┣•Status➠ `Running`
┣•Version➠ {VR}
┗━━━━━━━━━━━━━━━━━
Do `.ping `or` /alive` for check userbot working
"""
sed = logging.getLogger("beastx")
async def add_bot(bot_token):
await bot.start(bot_token)
bot.me = await bot.get_me()
bot.uid = telethon.utils.get_peer_id(bot.me)
await sedmrunal.send_message(sur, MSG,
buttons=[
[Button.url("⭐Updates", url="https://t.me/BeastX_Userbot")],
[ Button.url("⚡Support",url="https://t.me/BeastX_Support")]
])
await beast(functions.channels.JoinChannelRequest(channel="@BeastX_Userbot"))
await beast(functions.channels.JoinChannelRequest(channel="@BeastX_Support"))
if len(argv) not in (1, 3, 4):
bot.disconnect()
else:
bot.tgbot = None
if Var.TG_BOT_USER_NAME_BF_HER is not None:
bot.tgbot = TelegramClient(
"TG_BOT_TOKEN", api_id=Var.APP_ID, api_hash=Var.API_HASH
).start(bot_token=Var.TG_BOT_TOKEN_BF_HER)
bot.loop.run_until_complete(add_bot(Var.TG_BOT_USER_NAME_BF_HER))
else:
bot.start()
async def a():
sed.info("Connecting...") ;
o = ""
la = 0
try:
await bot.start() ; sed.info("beastx connected") ; o = "client"
except:
sed.info("Telegram String Session Wrong or Expired Please Add new one ") ; quit(1)
import glob
async def a():
test1 = await bot.get_messages(plugin_channel, None , filter=InputMessagesFilterDocument) ; total = int(test1.total) ; total_doxx = range(0, total)
for ixo in total_doxx:
mxo = test1[ixo].id ; await bot.download_media(await bot.get_messages(client, ids=mxo), "beastx/modules/")
ar = glob.glob("beastx/modules/*.py")
f = len(ar)
sed.info(f" loading {f} modules it may take 1 minute please wait")
for i in ar:
br = os.path.basename(i)
cr = (os.path.splitext(br)[0])
import_module(f"beastx.modules.{cr}")
la += 1
sed.info(f" loaded {la}/{f} modules")
path = "beastx/modules/*.py"
files = glob.glob(path)
for name in files:
with open(name) as f:
path1 = Path(f.name)
shortname = path1.stem
load_module(shortname.replace(".py", ""))
if Config.ENABLE_ASSISTANTBOT == "ENABLE":
path = "beastx/modules/assistant/*.py"
files = glob.glob(path)
for name in files:
with open(name) as f:
path1 = Path(f.name)
shortname = path1.stem
start_assistant(shortname.replace(".py", ""))
sed.info("beastx And Assistant Bot Have Been Installed Successfully !")
sed.info("---------------------------------------")
sed.info("------------@BeastX_Userbot------------")
sed.info("---------------------------------------")
else:
sed.info("beastx Has Been Installed Sucessfully !")
sed.info("Hope you will enjoy")
#await bot.send_message(chat_id,MSG)
#else:
# sed.info("your Get_Msg disable")
if len(argv) not in (1, 3, 4):
bot.disconnect()
else:
bot.run_until_disconnected()
| 1.921875 | 2 |
setup.py | akush07/Forest-Fire-Prediction | 0 | 12761991 | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='The Aim is to predict forest fire before it happens based on dataset that contains tree observations from four areas of the Roosevelt National Forest in Colorado. All observations are cartographic variables (no remote sensing) from 30 meter x 30 meter sections of forest. There are over half a million measurements total!',
author='<NAME>',
license='MIT',
)
| 1.078125 | 1 |
test/federated_training/logistic_regression/he_otp_lr_ft2/test_coordinator.py | yuzhangTD/ibond_flex | 31 | 12761992 | <filename>test/federated_training/logistic_regression/he_otp_lr_ft2/test_coordinator.py<gh_stars>10-100
from flex.constants import HE_OTP_LR_FT2
from flex.api import make_protocol
from test.fed_config_example import fed_conf_coordinator
def test_he_otp_lr_ft2():
federal_info = fed_conf_coordinator
sec_param = {
"he_algo": 'paillier',
"he_key_length": 1024
}
trainer = make_protocol(HE_OTP_LR_FT2, federal_info, sec_param)
trainer.exchange()
| 1.875 | 2 |
refinery/units/meta/xfcc.py | jhhcs/refinery | 1 | 12761993 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
from refinery.units import arg, Unit
class xfcc(Unit):
"""
The cross frame chunk count unit! It computes the number of times a chunk occurs across several frames
of input. It consumes all frames in the current and counts the number of times each item occurs. It
converts a frame tree of depth 2 into a new frame tree of depth 2 where the parent of every leaf has
this leaf as its only child. The leaves of this tree have been enriched with a meta variable containing
the number of times the corresponding chunk has occurred in the input frame tree.
"""
def __init__(
self,
variable: arg(help='The variable which is used as the accumulator') = 'count',
relative: arg.switch('-r', help='Normalize the accumulator to a number between 0 and 1.') = False
):
super().__init__(variable=variable, relative=relative)
self._trunk = None
self._store = collections.defaultdict(int)
def finish(self):
if self.args.relative and self._store:
maximum = max(self._store.values())
for k, (chunk, count) in enumerate(self._store.items()):
if self.args.relative:
count /= maximum
chunk._meta[self.args.variable] = count
chunk._path = chunk.path[:-2] + (0, k)
yield chunk
self._store.clear()
def _getcount(self, chunk):
try:
count = int(chunk.meta[self.args.variable])
except (AttributeError, KeyError, TypeError):
return 1
else:
return count
def filter(self, chunks):
it = iter(chunks)
try:
head = next(it)
except StopIteration:
return
if len(head.path) < 2:
self.log_warn(F'the current frame is nested {len(head.path)} layers deep, at least two layers are required.')
yield head
for item in it:
self.log_debug(repr(item))
yield item
return
trunk = head.path[:-2]
store = self._store
if trunk != self._trunk:
yield from self.finish()
self._trunk = trunk
store[head] += self._getcount(head)
for chunk in it:
store[chunk] += self._getcount(chunk)
| 2.8125 | 3 |
anime_search/__init__.py | BotDevGroup/anime_search | 1 | 12761994 | # -*- coding: utf-8 -*-
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.1.0'
from anime_search.base import AnimeSearch
plugin = AnimeSearch()
| 1.195313 | 1 |
web/views.py | felmats/bill | 0 | 12761995 | <reponame>felmats/bill
from django.shortcuts import render
from django.http import HttpResponse
from web.models import Company
def company_list(request):
"""一覧"""
#return HttpResponse("一覧")
#companies = Company.objects.all().order_by('company_cd')
return render(request,
'web/company_list.html',
)
def company_edit(request, company_cd=None):
"""編集"""
return HttpResponse('編集')
def company_del(request, company_cd):
"""削除"""
return HttpResponse('削除')
| 2.046875 | 2 |
rr/migrations/0050_serviceprocider_add_jwks_and_jwks_uri.py | UniversityofHelsinki/sp-registry | 0 | 12761996 | # Generated by Django 2.2.7 on 2019-11-20 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rr', '0049_statistics'),
]
operations = [
migrations.AlterModelOptions(
name='statistics',
options={'ordering': ['-date']},
),
migrations.AddField(
model_name='serviceprovider',
name='jwks',
field=models.TextField(blank=True, verbose_name='JSON Web Key Set'),
),
migrations.AddField(
model_name='serviceprovider',
name='jwks_uri',
field=models.URLField(blank=True, max_length=255, verbose_name='URL for the JSON Web Key Set'),
),
]
| 1.609375 | 2 |
QPT_client/Python/Lib/site-packages/qpt/memory.py | Scxw010516/Smart_container | 150 | 12761997 | # Author: <NAME>
# Datetime:2021/7/3
# Copyright belongs to the author.
# Please indicate the source for reprinting.
import platform
import os
from distutils.sysconfig import get_python_lib
from qpt.kernel.qlog import Logging
def init_wrapper(var=True):
def i_wrapper(func):
if var:
@property
def render(self):
if func.__name__ in self.memory:
out = self.memory[func.__name__]
else:
out = func(self)
self.memory[func.__name__] = out
return out
else:
def render(self, *args, **kwargs):
if func.__name__ in self.memory:
out = self.memory[func.__name__]
else:
out = func(self, *args, **kwargs)
self.memory[func.__name__] = out
return out
return render
return i_wrapper
class QPTMemory:
def __init__(self):
self.memory = dict()
def set_mem(self, name, variable):
self.memory[name] = variable
return variable
def free_mem(self, name):
self.memory.pop(name)
@init_wrapper()
def platform_bit(self):
arc = platform.machine()
Logging.debug(f"操作系统位数:{arc}")
return arc
@init_wrapper()
def platform_os(self):
p_os = platform.system()
Logging.debug(f"操作系统类型:{p_os}")
return p_os
@init_wrapper()
def site_packages_path(self):
site_package_path = os.path.abspath(get_python_lib())
return site_package_path
@init_wrapper()
def pip_tool(self):
from qpt.kernel.qinterpreter import PipTools
pip_tools = PipTools()
return pip_tools
@init_wrapper()
def get_win32con(self):
import win32con
return win32con
@init_wrapper()
def get_win32api(self):
import win32api
return win32api
@init_wrapper(var=False)
def get_env_vars(self, work_dir="."):
return get_env_vars(work_dir)
QPT_MEMORY = QPTMemory()
def check_bit():
arc = QPT_MEMORY.platform_bit
assert "64" in arc, "当前QPT不支持32位操作系统"
def check_os():
p_os = QPT_MEMORY.platform_os
assert "Windows" in p_os, "当前QPT只支持Windows系统"
IGNORE_ENV_FIELD = ["conda", "Conda", "Python", "python"]
def get_env_vars(work_dir="."):
"""
获取当前待设置的环境变量字典
:param work_dir:
:return: dict
"""
env_vars = dict()
# Set PATH ENV
path_env = os.environ.get("PATH").split(";")
pre_add_env = os.path.abspath("./Python/Lib/site-packages") + ";" + \
os.path.abspath("./Python/Lib") + ";" + \
os.path.abspath("./Python/Lib/ext") + ";" + \
os.path.abspath("./Python") + ";" + \
os.path.abspath("./Python/Scripts") + ";"
for pe in path_env:
if pe:
add_flag = True
for ief in IGNORE_ENV_FIELD:
if ief in pe:
add_flag = False
break
if add_flag:
pre_add_env += pe + ";"
env_vars["PATH"] = pre_add_env + \
"%SYSTEMROOT%/System32/WindowsPowerShell/v1.0;" + \
"C:/Windows/System32/WindowsPowerShell/v1.0;" + \
"%ProgramFiles%/WindowsPowerShell/Modules;" + \
"%SystemRoot%/system32/WindowsPowerShell/v1.0/Modules;" + \
f"{os.path.join(os.path.abspath(work_dir), 'opt/CUDA')};"
# Set PYTHON PATH ENV
env_vars["PYTHONPATH"] = os.path.abspath("./Python/Lib/site-packages") + ";" + \
work_dir + ";" + \
os.path.abspath("./Python")
os_env = os.environ.copy()
os_env.update(env_vars)
if QPT_MODE and QPT_MODE.lower() == "debug":
Logging.debug(msg="Python所识别到的环境变量如下:\n" +
"".join([_ek + ":" + _e_v + " \n" for _ek, _ev in env_vars.items()
for _e_v in _ev.split(";")]))
return os_env
PYTHON_IGNORE_DIRS = [".idea", ".git", ".github", "venv"]
# 被忽略的Python包
IGNORE_PACKAGES = ["virtualenv", "pip", "setuptools", "cpython"]
# QPT运行状态 Run/Debug
QPT_MODE = os.getenv("QPT_MODE")
# QPT检测到的运行状态 Run/本地Run - 目的给予开发者警告,避免软件包膨胀
QPT_RUN_MODE = None
class CheckRun:
@staticmethod
def make_run_file(configs_path):
with open(os.path.join(configs_path, "run_act.lock"), "w") as f:
f.write("Run Done")
@staticmethod
def check_run_file(configs_path):
global QPT_RUN_MODE
if QPT_RUN_MODE is None:
QPT_RUN_MODE = os.path.exists(os.path.join(configs_path, "run_act.lock"))
return QPT_RUN_MODE
def check_all():
# 检查系统
check_os()
# 检查arc
check_bit()
check_all()
| 2.15625 | 2 |
examples/calc/v4/calc.py | apalala/grako | 21 | 12761998 | # -*- coding: utf-8 -*-
# Copyright (C) 2017 by <NAME>
# Copyright (C) 2012-2016 by <NAME> and <NAME>
# this is calc.py
from __future__ import print_function
import sys
from calc_parser import CalcParser
class CalcSemantics(object):
def number(self, ast):
return int(ast)
def addition(self, ast):
return ast.left + ast.right
def subtraction(self, ast):
return ast.left - ast.right
def multiplication(self, ast):
return ast.left * ast.right
def division(self, ast):
return ast.left / ast.right
def calc(text):
parser = CalcParser(semantics=CalcSemantics())
return parser.parse(text)
if __name__ == '__main__':
text = open(sys.argv[1]).read()
result = calc(text)
print(text.strip(), '=', result)
| 3.421875 | 3 |
pycharm-remap/frictionless/remap.py | indika/frictionless | 0 | 12761999 | #!/usr/bin/python
"""Remap
Remaps Python keyboard mapping files between OSX and Windows
Usage:
remap.py
remap.py to_osx <osx_file> <win_file>
remap.py to_windows <osx_file> <win_file>
Options:
-h --help Show this screen.
--version Show version.
"""
__author__ = "<NAME>"
import os
import sys
import glob
import logging
from docopt import docopt
from map_keyboard import MapKeyboardLayoutFile
logger = logging.getLogger(__name__)
class Remap:
def __init__(self):
self.configure_logging()
self.win_file = 'Win_Pycharm_Frictionless.xml'
self.osx_file = 'OSX_Pycharm_Frictionless.xml'
self.keyboard_mapper = MapKeyboardLayoutFile()
def convert(self):
arguments = docopt(__doc__, version='Remap 0.1')
self.win_file = arguments['<win_file>']
self.osx_file = arguments['<osx_file>']
logger.info('Using Windows file: {0}'.format(self.win_file))
logger.info('Using OSX file: {0}'.format(self.osx_file))
if arguments['to_osx']:
self.convert_to_osx()
elif arguments['to_windows']:
self.convert_to_windows()
else:
logger.info('Determining translation path from date modified...')
return
self.last_file_updated()
pass
def convert_to_windows(self):
logger.info('Converting OSX -> Windows')
source_file = self.osx_file
target_file = self.win_file
self.keyboard_mapper.convert_to_windows(source_file, target_file)
os.utime(target_file, (self.atime(source_file), self.mtime(source_file)))
def convert_to_osx(self):
logger.info('Converting Windows -> OSX')
source_file = self.win_file
target_file = self.osx_file
self.keyboard_mapper.convert_to_osx(source_file, target_file)
os.utime(target_file, (self.atime(source_file), self.mtime(source_file)))
def last_file_updated(self):
"""
Attempt to figure out which file is the source based on last modified
time
"""
query = '*.xml'
keymap_files = glob.glob(query)
sorted_files = sorted(keymap_files, key=self.mtime, reverse=1)
last_modified_file = sorted_files[0]
second_last_modified_file = sorted_files[1]
t1 = self.mtime(last_modified_file)
t2 = self.mtime(second_last_modified_file)
logger.debug('Last modified time: {0}'.format(t1))
logger.debug('Second Last modified time: {0}'.format(t2))
last_modified_time = self.mtime(last_modified_file)
last_access_time = self.atime(last_modified_file)
if sys.platform == "win32":
logger.info('Detected Windows environment')
# self.regenerate_osx(last_access_time, last_modified_time)
elif sys.platform == 'darwin':
logger.info('Detected OSX environment')
# self.regenerate_windows(last_access_time, last_modified_time)
else:
logger.error('Unhandled platform: {0}'.format(sys.platform))
pass
@staticmethod
def configure_logging():
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
pass
def mtime(self, filename):
return os.stat(filename).st_mtime
def atime(self, filename):
return os.stat(filename).st_atime
if __name__ == "__main__":
print "Running Remap in stand-alone-mode"
remap = Remap()
remap.convert()
| 3.25 | 3 |
source/knowledge/python/example_python/HzuNews.py | Eugene-Forest/NoteBook | 1 | 12762000 | # 用来存储从惠州学院新闻网获取的一个新闻对象
class HzuNews:
"""一个简单的新闻信息数据结构"""
def __init__(self, title, link, time):
self.title = title
self.link = link
self.time = time
def get_title(self):
return self.title
| 2.71875 | 3 |
cert_issuer/certificate_handler.py | NunoEdgarGFlowHub/cert-issuer | 0 | 12762001 | <reponame>NunoEdgarGFlowHub/cert-issuer
import json
from abc import abstractmethod
from cert_schema import normalize_jsonld
from cert_schema import validate_v2
from cert_issuer.signer import FinalizableSigner
class CertificateHandler(object):
@abstractmethod
def validate_certificate(self, certificate_metadata):
pass
@abstractmethod
def sign_certificate(self, signer, certificate_metadata):
pass
@abstractmethod
def get_byte_array_to_issue(self, certificate_metadata):
pass
@abstractmethod
def add_proof(self, certificate_metadata, merkle_proof):
pass
class CertificateV2Handler(CertificateHandler):
def validate_certificate(self, certificate_metadata):
with open(certificate_metadata.unsigned_cert_file_name) as cert:
certificate_json = json.load(cert)
# Both tests raise exception on failure
# 1. json schema validation
validate_v2(certificate_json)
# 2. detect if there are any unmapped fields
normalize_jsonld(certificate_json, detect_unmapped_fields=True)
def sign_certificate(self, signer, certificate_metadata):
pass
def get_byte_array_to_issue(self, certificate_metadata):
certificate_json = self._get_certificate_to_issue(certificate_metadata)
normalized = normalize_jsonld(certificate_json, detect_unmapped_fields=False)
return normalized.encode('utf-8')
def add_proof(self, certificate_metadata, merkle_proof):
"""
:param certificate_metadata:
:param merkle_proof:
:return:
"""
certificate_json = self._get_certificate_to_issue(certificate_metadata)
certificate_json['signature'] = merkle_proof
with open(certificate_metadata.blockchain_cert_file_name, 'w') as out_file:
out_file.write(json.dumps(certificate_json))
def _get_certificate_to_issue(self, certificate_metadata):
with open(certificate_metadata.unsigned_cert_file_name, 'r') as unsigned_cert_file:
certificate_json = json.load(unsigned_cert_file)
return certificate_json
class CertificateBatchHandler(object):
"""
Manages a batch of certificates. Responsible for iterating certificates in a consistent order.
In this case, certificates are initialized as an Ordered Dictionary, and we iterate in insertion order.
"""
def __init__(self, secret_manager, certificate_handler, merkle_tree):
self.certificate_handler = certificate_handler
self.secret_manager = secret_manager
self.merkle_tree = merkle_tree
def set_certificates_in_batch(self, certificates_to_issue):
self.certificates_to_issue = certificates_to_issue
def prepare_batch(self):
"""
Propagates exception on failure
:return: byte array to put on the blockchain
"""
# validate batch
for _, metadata in self.certificates_to_issue.items():
self.certificate_handler.validate_certificate(metadata)
# sign batch
with FinalizableSigner(self.secret_manager) as signer:
for _, metadata in self.certificates_to_issue.items():
self.certificate_handler.sign_certificate(signer, metadata)
self.merkle_tree.populate(self.get_certificate_generator())
return self.merkle_tree.get_blockchain_data()
def get_certificate_generator(self):
"""
Returns a generator (1-time iterator) of certificates in the batch
:return:
"""
for uid, metadata in self.certificates_to_issue.items():
data_to_issue = self.certificate_handler.get_byte_array_to_issue(metadata)
yield data_to_issue
def finish_batch(self, tx_id, chain):
proof_generator = self.merkle_tree.get_proof_generator(tx_id, chain)
for uid, metadata in self.certificates_to_issue.items():
proof = next(proof_generator)
self.certificate_handler.add_proof(metadata, proof)
| 2.46875 | 2 |
cmsplugin_bootstrap_grid/models.py | movermeyer/cmsplugin-bootstrap | 0 | 12762002 | <gh_stars>0
# coding: utf-8
from cms.models import CMSPlugin
from cmsplugin_bootstrap_grid.utils import HtmlAttributeDict
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext as _
CONFIG = {'COLUMNS': 12}
CONFIG.update(getattr(settings, 'CMSPLUGIN_GRID_CONFIG', {}))
SIZE_XS_CHOICES = [('%s' % i, 'col-xs-%s' % i) for i in range(1, CONFIG['COLUMNS'] + 1)]
SIZE_SM_CHOICES = [('%s' % i, 'col-sm-%s' % i) for i in range(1, CONFIG['COLUMNS'] + 1)]
SIZE_MD_CHOICES = [('%s' % i, 'col-md-%s' % i) for i in range(1, CONFIG['COLUMNS'] + 1)]
SIZE_LG_CHOICES = [('%s' % i, 'col-lg-%s' % i) for i in range(1, CONFIG['COLUMNS'] + 1)]
SIZE_XS_OFFSET_CHOICES = [('%s' % i, 'col-xs-offset-%s' % i) for i in range(0, CONFIG['COLUMNS'] + 1)]
SIZE_SM_OFFSET_CHOICES = [('%s' % i, 'col-sm-offset-%s' % i) for i in range(0, CONFIG['COLUMNS'] + 1)]
SIZE_MD_OFFSET_CHOICES = [('%s' % i, 'col-md-offset-%s' % i) for i in range(0, CONFIG['COLUMNS'] + 1)]
SIZE_LG_OFFSET_CHOICES = [('%s' % i, 'col-lg-offset-%s' % i) for i in range(0, CONFIG['COLUMNS'] + 1)]
class Row(CMSPlugin):
css_classes = models.CharField(
_('css classes'), max_length=200, blank=True,
help_text=_("Add extra classes to bootstrap row. (Separate classes with space)"))
def _get_attrs(self):
if not hasattr(self, '_cached_attrs'):
self._cached_attrs = HtmlAttributeDict({"class": "row"})
self._cached_attrs.add_class(self.css_classes)
return self._cached_attrs
attrs = property(_get_attrs)
def __unicode__(self):
return ''
class Column(CMSPlugin):
size_xs = models.CharField(
_('Size xs'), choices=SIZE_XS_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Extra small devices Phones (<768px)"))
size_sm = models.CharField(
_('Size sm'), choices=SIZE_SM_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Small devices Tablets (≥768px)"))
size_md = models.CharField(
_('Size md'), choices=SIZE_MD_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Medium devices Desktops (≥992px)"))
size_lg = models.CharField(
_('Size lg'), choices=SIZE_LG_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Large devices Desktops (≥1200px)"))
size_offset_xs = models.CharField(
_('Offset xs'), choices=SIZE_XS_OFFSET_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Extra small devices Phones (<768px)"))
size_offset_sm = models.CharField(
_('Offset sm'), choices=SIZE_SM_OFFSET_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Small devices Tablets (≥768px)"))
size_offset_md = models.CharField(
_('Offset md'), choices=SIZE_MD_OFFSET_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Medium devices Desktops (≥992px)"))
size_offset_lg = models.CharField(
_('Offset lg'), choices=SIZE_LG_OFFSET_CHOICES,
default=None, max_length=50, null=True, blank=True,
help_text=_("Large devices Desktops (≥1200px)"))
css_classes = models.CharField(
_('css classes'), max_length=200, blank=True,
help_text=_("Add extra classes to bootstrap column. (Separate classes with space)"))
def _get_attrs(self):
if not hasattr(self, '_cached_attrs'):
self._cached_attrs = HtmlAttributeDict()
self._cached_attrs.add_class(self.css_classes)
self._cached_attrs.add_class(self.get_size_xs_display())
self._cached_attrs.add_class(self.get_size_sm_display())
self._cached_attrs.add_class(self.get_size_md_display())
self._cached_attrs.add_class(self.get_size_lg_display())
self._cached_attrs.add_class(self.get_size_offset_xs_display())
self._cached_attrs.add_class(self.get_size_offset_sm_display())
self._cached_attrs.add_class(self.get_size_offset_md_display())
self._cached_attrs.add_class(self.get_size_offset_lg_display())
return self._cached_attrs
attrs = property(_get_attrs)
def __unicode__(self):
return self.attrs['class'] | 1.851563 | 2 |
makeFeatureVecsForChrArmFromVcf_ogSHIC.py | jradrion/diploSHIC | 0 | 12762003 | import os
import allel
import h5py
import numpy as np
import sys
import time
from fvTools import *
if not len(sys.argv) in [13,15]:
sys.exit("usage:\npython makeFeatureVecsForChrArmFromVcf_ogSHIC.py chrArmFileName chrArm chrLen targetPop winSize numSubWins maskFileName sampleToPopFileName ancestralArmFaFileName statFileName outFileName [segmentStart segmentEnd]\n")
if len(sys.argv) == 15:
chrArmFileName, chrArm, chrLen, targetPop, winSize, numSubWins, maskFileName, unmaskedFracCutoff, sampleToPopFileName, ancestralArmFaFileName, statFileName, outfn, segmentStart, segmentEnd = sys.argv[1:]
segmentStart, segmentEnd = int(segmentStart), int(segmentEnd)
else:
chrArmFileName, chrArm, chrLen, targetPop, winSize, numSubWins, maskFileName, unmaskedFracCutoff, sampleToPopFileName, ancestralArmFaFileName, statFileName, outfn = sys.argv[1:]
segmentStart = None
unmaskedFracCutoff = float(unmaskedFracCutoff)
chrLen, winSize, numSubWins = int(chrLen), int(winSize), int(numSubWins)
assert winSize % numSubWins == 0 and numSubWins > 1
subWinSize = int(winSize/numSubWins)
def getSubWinBounds(chrLen, subWinSize):
lastSubWinEnd = chrLen - chrLen % subWinSize
lastSubWinStart = lastSubWinEnd - subWinSize + 1
subWinBounds = []
for subWinStart in range(1, lastSubWinStart+1, subWinSize):
subWinEnd = subWinStart + subWinSize - 1
subWinBounds.append((subWinStart, subWinEnd))
return subWinBounds
def getSnpIndicesInSubWins(subWinSize, lastSubWinEnd, snpLocs):
subWinStart = 1
subWinEnd = subWinStart + subWinSize - 1
snpIndicesInSubWins = [[]]
for i in range(len(snpLocs)):
while snpLocs[i] <= lastSubWinEnd and not (snpLocs[i] >= subWinStart and snpLocs[i] <= subWinEnd):
subWinStart += subWinSize
subWinEnd += subWinSize
snpIndicesInSubWins.append([])
if snpLocs[i] <= lastSubWinEnd:
snpIndicesInSubWins[-1].append(i)
while subWinEnd < lastSubWinEnd:
snpIndicesInSubWins.append([])
subWinStart += subWinSize
subWinEnd += subWinSize
return snpIndicesInSubWins
chrArmFile = allel.read_vcf(chrArmFileName)
chroms = chrArmFile["variants/CHROM"]
positions = np.extract(chroms == chrArm, chrArmFile["variants/POS"])
if maskFileName.lower() in ["none", "false"]:
sys.stderr.write("Warning: a mask.fa file for the chr arm with all masked sites N'ed out is strongly recommended" +
" (pass in the reference to remove Ns at the very least)!\n")
unmasked = [True] * chrLen
else:
unmasked = readMaskDataForScan(maskFileName, chrArm)
assert len(unmasked) == chrLen
if statFileName.lower() in ["none", "false"]:
statFileName = None
samples = chrArmFile["samples"]
if not sampleToPopFileName.lower() in ["none", "false"]:
sampleToPop = readSampleToPopFile(sampleToPopFileName)
sampleIndicesToKeep = [i for i in range(len(samples)) if sampleToPop.get(samples[i], "popNotFound!") == targetPop]
else:
sampleIndicesToKeep = [i for i in range(len(samples))]
rawgenos = np.take(chrArmFile["calldata/GT"], [i for i in range(len(chroms)) if chroms[i] == chrArm], axis=0)
genos = allel.GenotypeArray(rawgenos)
refAlleles = np.extract(chroms == chrArm, chrArmFile['variants/REF'])
altAlleles = np.extract(chroms == chrArm, chrArmFile['variants/ALT'])
if segmentStart != None:
snpIndicesToKeep = [i for i in range(len(positions)) if segmentStart <= positions[i] <= segmentEnd]
positions = [positions[i] for i in snpIndicesToKeep]
refAlleles = [refAlleles[i] for i in snpIndicesToKeep]
altAlleles = [altAlleles[i] for i in snpIndicesToKeep]
genos = allel.GenotypeArray(genos.subset(sel0=snpIndicesToKeep))
genos = allel.GenotypeArray(genos.subset(sel1=sampleIndicesToKeep))
alleleCounts = genos.count_alleles()
#remove all but mono/biallelic unmasked sites
isBiallelic = alleleCounts.is_biallelic()
for i in range(len(isBiallelic)):
if not isBiallelic[i]:
unmasked[positions[i]-1] = False
#polarize
if not ancestralArmFaFileName.lower() in ["none", "false"]:
sys.stderr.write("polarizing snps\n")
ancArm = readFaArm(ancestralArmFaFileName, chrArm).upper()
startTime = time.clock()
#NOTE: mapping specifies which alleles to swap counts for based on polarization; leaves unpolarized snps alone
#NOTE: those snps need to be filtered later on (as done below)!
# this will also remove sites that could not be polarized
mapping, unmasked = polarizeSnps(unmasked, positions, refAlleles, altAlleles, ancArm)
sys.stderr.write("took %s seconds\n" %(time.clock()-startTime))
statNames = ["pi", "thetaW", "tajD", "thetaH", "fayWuH", "maxFDA", "HapCount", "H1", "H12", "H2/H1", "ZnS", "Omega", "distVar", "distSkew", "distKurt"]
else:
statNames = ["pi", "thetaW", "tajD", "HapCount", "H1", "H12", "H2/H1", "ZnS", "Omega", "distVar", "distSkew", "distKurt"]
snpIndicesToKeep = [i for i in range(len(positions)) if unmasked[positions[i]-1]]
genos = allel.GenotypeArray(genos.subset(sel0=snpIndicesToKeep))
positions = [positions[i] for i in snpIndicesToKeep]
alleleCounts = allel.AlleleCountsArray([[alleleCounts[i][0], max(alleleCounts[i][1:])] for i in snpIndicesToKeep])
if not ancestralArmFaFileName.lower() in ["none", "false"]:
mapping = [mapping[i] for i in snpIndicesToKeep]
alleleCounts = alleleCounts.map_alleles(mapping)
haps = genos.to_haplotypes()
subWinBounds = getSubWinBounds(chrLen, subWinSize)
precomputedStats = {} #not using this
header = "chrom classifiedWinStart classifiedWinEnd bigWinRange".split()
statHeader = "chrom start end".split()
for statName in statNames:
statHeader.append(statName)
for i in range(numSubWins):
header.append("%s_win%d" %(statName, i))
statHeader = "\t".join(statHeader)
header = "\t".join(header)
outFile=open(outfn,'w')
outFile.write(header+"\n")
statVals = {}
for statName in statNames:
statVals[statName] = []
startTime = time.clock()
goodSubWins = []
lastSubWinEnd = chrLen - chrLen % subWinSize
snpIndicesInSubWins = getSnpIndicesInSubWins(subWinSize, lastSubWinEnd, positions)
subWinIndex = 0
lastSubWinStart = lastSubWinEnd - subWinSize + 1
if statFileName:
statFile = open(statFileName, "w")
statFile.write(statHeader + "\n")
for subWinStart in range(1, lastSubWinStart+1, subWinSize):
subWinEnd = subWinStart + subWinSize - 1
unmaskedFrac = unmasked[subWinStart-1:subWinEnd].count(True)/float(subWinEnd-subWinStart+1)
if segmentStart == None or subWinStart >= segmentStart and subWinEnd <= segmentEnd:
sys.stderr.write("%d-%d num unmasked snps: %d; unmasked frac: %f\n" %(subWinStart, subWinEnd, len(snpIndicesInSubWins[subWinIndex]), unmaskedFrac))
if len(snpIndicesInSubWins[subWinIndex]) > 0 and unmaskedFrac >= unmaskedFracCutoff:
hapsInSubWin = allel.HaplotypeArray(haps.subset(sel0=snpIndicesInSubWins[subWinIndex]))
statValStr = []
for statName in statNames:
calcAndAppendStatValForScan(alleleCounts, positions, statName, subWinStart, \
subWinEnd, statVals, subWinIndex, hapsInSubWin, unmasked, precomputedStats)
statValStr.append("%s: %s" %(statName, statVals[statName][-1]))
sys.stderr.write("\t".join(statValStr) + "\n")
goodSubWins.append(True)
if statFileName:
statFile.write("\t".join([chrArm, str(subWinStart), str(subWinEnd)] + [str(statVals[statName][-1]) for statName in statNames]) + "\n")
else:
for statName in statNames:
appendStatValsForMonomorphicForScan(statName, statVals, subWinIndex)
goodSubWins.append(False)
if goodSubWins[-numSubWins:].count(True) == numSubWins:
outVec = []
for statName in statNames:
outVec += normalizeFeatureVec(statVals[statName][-numSubWins:])
midSubWinEnd = subWinEnd - subWinSize*(numSubWins/2)
midSubWinStart = midSubWinEnd-subWinSize+1
outFile.write("%s\t%d\t%d\t%d-%d\t" %(chrArm, midSubWinStart, midSubWinEnd, subWinEnd-winSize+1, subWinEnd) + "\t".join([str(x) for x in outVec]))
outFile.write('\n')
subWinIndex += 1
if statFileName:
statFile.close()
outFile.close()
sys.stderr.write("completed in %g seconds\n" %(time.clock()-startTime))
| 2.015625 | 2 |
getting_pixels.py | Abdulwaliy/OpenCV_Basics | 0 | 12762004 | # python getting_pixels.py --image obama.jpg
# import the necessary packages
import argparse
from collections import defaultdict
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("--image", required=True,
help="path to input image")
args = vars(ap.parse_args())
# load the image, grab its spatial dimensions (width and height),
# and then display the original image to our screen
image = cv2.imread(args["image"])
(h, w) = image.shape[:2]
cv2.imshow("Original", image)
# images are simply NumPy arrays -- with the origin (0, 0) located at
# the top-left of the image
(b, g, r) = image[0, 0]
print("Pixel at (0, 0) - Red: {}, Green: {}, Blue: {}".format(r, g, b))
# access the pixel located at x=100, y=5
(b, g, r) = image[5, 100]
print("Pixel at (100, 5) - Red: {}, Green: {}, Blue: {}".format(r, g, b))
# access the pixel at x= 100, y =50 and set it to blue
(b, g, r) = image[50, 100]
(b, g, r) = (255, 0, 0)
print("Pixel at (100, 50) - Red: {}, Green: {}, Blue: {}".format(r, g, b))
# compute the center of the image, which is simply the width and height
# divided by two
(cX, cY) = (w // 2, h // 2)
print ("the centre of the image is cX: {}, cY: {}".format(cX, cY))
cv2.waitKey(0) | 3.46875 | 3 |
bin/preview.py | lotabout/orgmark.vim | 0 | 12762005 | <reponame>lotabout/orgmark.vim
#!/usr/bin/env python3
import os
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
PLACE_HOLDER_STYLE = '#style#'
PLACE_HOLDER_SCRIPT = '#script#'
PLACE_HOLDER_MARKDOWN = '#markdown#'
PLACE_HOLDER_MARKDOWN_BASE64 = '#markdown-base64#'
style_files = [
'assets/highlight.css',
'assets/normalize.css',
'assets/noise.css',
'assets/tomorrow.css',
'assets/custom.css',
]
script_files = [
'assets/highlight.min.js',
'assets/marked.min.js',
'assets/MathJax.js',
'assets/custom.js',
]
template_file = os.path.join(SCRIPT_PATH, 'assets/index.html')
scripts = []
for script_file in script_files:
if script_file.startswith('http'):
line = f'<script src="{script_file}"></script>'
else:
path = script_file if script_file.startswith('/') else os.path.join(SCRIPT_PATH, script_file)
with open(path) as fp:
script_content = fp.read()
line = f'<script>{script_content}</script>'
scripts.append(line)
script_renderred = '\n'.join(scripts)
styles = []
for style_file in style_files:
if style_file.startswith('http'):
style = f'<link rel="stylesheet" href="{style_file}">'
else:
path = style_file if style_file.startswith('/') else os.path.join(SCRIPT_PATH, style_file)
with open(path) as fp:
style_content = fp.read()
style = f'<style type="text/css" media="screen">{style_content}</style>'
styles.append(style)
style_renderred = '\n'.join(styles)
with open(template_file) as fp:
template_content = fp.read()
import fileinput
import sys
import argparse
parser = argparse.ArgumentParser(description='convert markdown to previewable html')
parser.add_argument('-i', '--input', nargs = '?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('-o', '--output', nargs='?', type=argparse.FileType('w'), help='The file name to output', default=sys.stdout)
args = parser.parse_args()
# markdown_file = args.input
markdown = args.input.read()
import base64
markdown_base64 = base64.b64encode(markdown.encode('utf8')).decode('utf8')
renderred = template_content
renderred = renderred.replace(PLACE_HOLDER_STYLE, style_renderred)
renderred = renderred.replace(PLACE_HOLDER_SCRIPT, script_renderred)
renderred = renderred.replace(PLACE_HOLDER_MARKDOWN, markdown)
renderred = renderred.replace(PLACE_HOLDER_MARKDOWN_BASE64, markdown_base64)
args.output.write(renderred)
| 2.359375 | 2 |
mofa/database_API/lang/en_US/CourseSettings.py | BoxInABoxICT/BoxPlugin | 0 | 12762006 | <filename>mofa/database_API/lang/en_US/CourseSettings.py
# This program has been developed by students from the bachelor Computer Science at Utrecht University within the
# Software and Game project course
# ©Copyright Utrecht University Department of Information and Computing Sciences.
settingStrings = {
"updateMessage": "Settings updates succesfully",
"updateErrorMessage": "Couldn't update course settings",
"deadline":
{
"title": "Deadline notifications",
"desc": "Customize whether your want to enable deadline notifications and how many hours prior to a deadline a notification should be sent.",
"valueType": "hours"
},
"inactivity":
{
"title": "Inactivity notifications",
"desc": "Customize whether your want to enable inactivity notifications and after how many days a student had no activity on this course a notification should be sent.",
"valueType": "days"
},
"unknown":
{
"title": "Unrecognized setting block",
"desc": "No description available",
"valueType": "NaN"
}
}
assistantStrings = {
"updateMessage": "Assistants updated succesfully",
"updateErrorMessage": "Couldn't update assistants",
"new_activity": {
"title": "New Activity Assistant",
"desc": "Notify a student when a new activity gets added to the course",
},
"quiz_feedback": {
"title": "Quiz Feedback Assistant",
"desc": "Send students notifications when they score below the treshold",
},
"unknown": {
"title": "Unrecognized Assistant",
"desc": "No description available",
}
}
| 2.515625 | 3 |
site-packages/keystoneclient/v3/roles.py | hariza17/freezer_libraries | 0 | 12762007 | # Copyright 2011 OpenStack Foundation
# Copyright 2011 Nebula, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from debtcollector import removals
from keystoneclient import base
from keystoneclient import exceptions
from keystoneclient.i18n import _
class Role(base.Resource):
"""Represents an Identity role.
Attributes:
* id: a uuid that identifies the role
* name: user-facing identifier
* domain: optional domain for the role
"""
pass
class InferenceRule(base.Resource):
"""Represents a rule that states one role implies another.
Attributes:
* prior_role: this role implies the other
* implied_role: this role is implied by the other
"""
pass
class RoleManager(base.CrudManager):
"""Manager class for manipulating Identity roles."""
resource_class = Role
collection_key = 'roles'
key = 'role'
deprecation_msg = 'keystoneclient.v3.roles.InferenceRuleManager'
def _role_grants_base_url(self, user, group, system, domain, project,
use_inherit_extension):
# When called, we have already checked that only one of user & group
# and one of domain & project have been specified
params = {}
if project:
params['project_id'] = base.getid(project)
base_url = '/projects/%(project_id)s'
elif domain:
params['domain_id'] = base.getid(domain)
base_url = '/domains/%(domain_id)s'
elif system:
if system == 'all':
base_url = '/system'
else:
# NOTE(lbragstad): If we've made it this far, a user is
# attempting to do something with system scope that isn't
# supported yet (e.g. 'all' is currently the only supported
# system scope). In the future that may change but until then
# we should fail like we would if a user provided a bogus
# project name or domain ID.
msg = _("Only a system scope of 'all' is currently supported")
raise exceptions.ValidationError(msg)
if use_inherit_extension:
base_url = '/OS-INHERIT' + base_url
if user:
params['user_id'] = base.getid(user)
base_url += '/users/%(user_id)s'
elif group:
params['group_id'] = base.getid(group)
base_url += '/groups/%(group_id)s'
return base_url % params
def _enforce_mutually_exclusive_group(self, system, domain, project):
if not system:
if domain and project:
msg = _('Specify either a domain or project, not both')
raise exceptions.ValidationError(msg)
elif not (domain or project):
msg = _('Must specify either system, domain, or project')
raise exceptions.ValidationError(msg)
elif system:
if domain and project:
msg = _(
'Specify either system, domain, or project, not all three.'
)
raise exceptions.ValidationError(msg)
if domain:
msg = _('Specify either system or a domain, not both')
raise exceptions.ValidationError(msg)
if project:
msg = _('Specify either a system or project, not both')
raise exceptions.ValidationError(msg)
def _require_user_xor_group(self, user, group):
if user and group:
msg = _('Specify either a user or group, not both')
raise exceptions.ValidationError(msg)
elif not (user or group):
msg = _('Must specify either a user or group')
raise exceptions.ValidationError(msg)
def create(self, name, domain=None, **kwargs):
"""Create a role.
:param str name: the name of the role.
:param domain: the domain of the role. If a value is passed it is a
domain-scoped role, otherwise it's a global role.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param kwargs: any other attribute provided will be passed to the
server.
:returns: the created role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
domain_id = None
if domain:
domain_id = base.getid(domain)
return super(RoleManager, self).create(
name=name,
domain_id=domain_id,
**kwargs)
def get(self, role):
"""Retrieve a role.
:param role: the role to be retrieved from the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: the specified role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
return super(RoleManager, self).get(role_id=base.getid(role))
def list(self, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""List roles and role grants.
:param user: filter in role grants for the specified user on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: filter in role grants for the specified group on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param domain: filter in role grants on the specified domain. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: filter in role grants on the specified project. Either
user or group must be specified. Project, domain and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will filter roles on.
:returns: a list of roles.
:rtype: list of :class:`keystoneclient.v3.roles.Role`
"""
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
if user or group:
self._require_user_xor_group(user, group)
self._enforce_mutually_exclusive_group(system, domain, project)
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited
)
return super(RoleManager, self).list(base_url=base_url,
**kwargs)
return super(RoleManager, self).list(**kwargs)
def update(self, role, name=None, **kwargs):
"""Update a role.
:param role: the role to be updated on the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param str name: the new name of the role.
:param kwargs: any other attribute provided will be passed to server.
:returns: the updated role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
return super(RoleManager, self).update(
role_id=base.getid(role),
name=name,
**kwargs)
def delete(self, role):
"""Delete a role.
When a role is deleted all the role inferences that have deleted role
as prior role will be deleted as well.
:param role: the role to be deleted on the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: Response object with 204 status.
:rtype: :class:`requests.models.Response`
"""
return super(RoleManager, self).delete(
role_id=base.getid(role))
def grant(self, role, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""Grant a role to a user or group on a domain or project.
:param role: the role to be granted on the server.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param user: the specified user to have the role granted on a resource.
Domain or project must be specified. User and group are
mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: the specified group to have the role granted on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param system: system information to grant the role on. Project,
domain, and system are mutually exclusive.
:type system: str
:param domain: the domain in which the role will be granted. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: the project in which the role will be granted. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will be passed to server.
:returns: the granted role returned from server.
:rtype: :class:`keystoneclient.v3.roles.Role`
"""
self._enforce_mutually_exclusive_group(system, domain, project)
self._require_user_xor_group(user, group)
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited)
return super(RoleManager, self).put(base_url=base_url,
role_id=base.getid(role),
**kwargs)
def check(self, role, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""Check if a user or group has a role on a domain or project.
:param user: check for role grants for the specified user on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: check for role grants for the specified group on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param system: check for role grants on the system. Project, domain,
and system are mutually exclusive.
:type system: str
:param domain: check for role grants on the specified domain. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: check for role grants on the specified project. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will be passed to server.
:returns: the specified role returned from server if it exists.
:rtype: :class:`keystoneclient.v3.roles.Role`
:returns: Response object with 204 status if specified role
doesn't exist.
:rtype: :class:`requests.models.Response`
"""
self._enforce_mutually_exclusive_group(system, domain, project)
self._require_user_xor_group(user, group)
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited)
return super(RoleManager, self).head(
base_url=base_url,
role_id=base.getid(role),
os_inherit_extension_inherited=os_inherit_extension_inherited,
**kwargs)
def revoke(self, role, user=None, group=None, system=None, domain=None,
project=None, os_inherit_extension_inherited=False, **kwargs):
"""Revoke a role from a user or group on a domain or project.
:param user: revoke role grants for the specified user on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type user: str or :class:`keystoneclient.v3.users.User`
:param group: revoke role grants for the specified group on a
resource. Domain or project must be specified.
User and group are mutually exclusive.
:type group: str or :class:`keystoneclient.v3.groups.Group`
:param system: revoke role grants on the system. Project, domain, and
system are mutually exclusive.
:type system: str
:param domain: revoke role grants on the specified domain. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type domain: str or :class:`keystoneclient.v3.domains.Domain`
:param project: revoke role grants on the specified project. Either
user or group must be specified. Project, domain, and
system are mutually exclusive.
:type project: str or :class:`keystoneclient.v3.projects.Project`
:param bool os_inherit_extension_inherited: OS-INHERIT will be used.
It provides the ability for
projects to inherit role
assignments from their
domains or from parent
projects in the hierarchy.
:param kwargs: any other attribute provided will be passed to server.
:returns: the revoked role returned from server.
:rtype: list of :class:`keystoneclient.v3.roles.Role`
"""
self._enforce_mutually_exclusive_group(system, domain, project)
self._require_user_xor_group(user, group)
if os_inherit_extension_inherited:
kwargs['tail'] = '/inherited_to_projects'
base_url = self._role_grants_base_url(
user, group, system, domain, project,
os_inherit_extension_inherited)
return super(RoleManager, self).delete(
base_url=base_url,
role_id=base.getid(role),
os_inherit_extension_inherited=os_inherit_extension_inherited,
**kwargs)
@removals.remove(message='Use %s.create instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def create_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).create(prior_role,
implied_role)
@removals.remove(message='Use %s.delete instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def delete_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).delete(prior_role,
implied_role)
@removals.remove(message='Use %s.get instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def get_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).get(prior_role,
implied_role)
@removals.remove(message='Use %s.check instead.' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def check_implied(self, prior_role, implied_role, **kwargs):
return InferenceRuleManager(self.client).check(prior_role,
implied_role)
@removals.remove(message='Use %s.list_inference_roles' % deprecation_msg,
version='3.9.0', removal_version='4.0.0')
def list_role_inferences(self, **kwargs):
return InferenceRuleManager(self.client).list_inference_roles()
class InferenceRuleManager(base.CrudManager):
"""Manager class for manipulating Identity inference rules."""
resource_class = InferenceRule
collection_key = 'role_inferences'
key = 'role_inference'
def _implied_role_url_tail(self, prior_role, implied_role):
base_url = ('/%(prior_role_id)s/implies/%(implied_role_id)s' %
{'prior_role_id': base.getid(prior_role),
'implied_role_id': base.getid(implied_role)})
return base_url
def create(self, prior_role, implied_role):
"""Create an inference rule.
An inference rule is comprised of two roles, a prior role and an
implied role. The prior role will imply the implied role.
Valid HTTP return codes:
* 201: Resource is created successfully
* 404: A role cannot be found
* 409: The inference rule already exists
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: a newly created role inference returned from server.
:rtype: :class:`keystoneclient.v3.roles.InferenceRule`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
_resp, body = self.client.put("/roles" + url_tail)
return self._prepare_return_value(
_resp, self.resource_class(self, body['role_inference']))
def delete(self, prior_role, implied_role):
"""Delete an inference rule.
When deleting an inference rule, both roles are required. Note that
neither role is deleted, only the inference relationship is dissolved.
Valid HTTP return codes:
* 204: Delete request is accepted
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: Response object with 204 status.
:rtype: :class:`requests.models.Response`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
return self._delete("/roles" + url_tail)
def get(self, prior_role, implied_role):
"""Retrieve an inference rule.
Valid HTTP return codes:
* 200: Inference rule is returned
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: the specified role inference returned from server.
:rtype: :class:`keystoneclient.v3.roles.InferenceRule`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
_resp, body = self.client.get("/roles" + url_tail)
return self._prepare_return_value(
_resp, self.resource_class(self, body['role_inference']))
def list(self, prior_role):
"""List all roles that a role may imply.
Valid HTTP return codes:
* 200: List of inference rules are returned
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: the specified role inference returned from server.
:rtype: :class:`keystoneclient.v3.roles.InferenceRule`
"""
url_tail = ('/%s/implies' % base.getid(prior_role))
_resp, body = self.client.get("/roles" + url_tail)
return self._prepare_return_value(
_resp, self.resource_class(self, body['role_inference']))
def check(self, prior_role, implied_role):
"""Check if an inference rule exists.
Valid HTTP return codes:
* 204: The rule inference exists
* 404: A role cannot be found
:param prior_role: the role which implies ``implied_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:param implied_role: the role which is implied by ``prior_role``.
:type role: str or :class:`keystoneclient.v3.roles.Role`
:returns: response object with 204 status returned from server.
:rtype: :class:`requests.models.Response`
"""
url_tail = self._implied_role_url_tail(prior_role, implied_role)
return self._head("/roles" + url_tail)
def list_inference_roles(self):
"""List all rule inferences.
Valid HTTP return codes:
* 200: All inference rules are returned
:param kwargs: attributes provided will be passed to the server.
:returns: a list of inference rules.
:rtype: list of :class:`keystoneclient.v3.roles.InferenceRule`
"""
return super(InferenceRuleManager, self).list()
def update(self, **kwargs):
raise exceptions.MethodNotImplemented(
_('Update not supported for rule inferences'))
def find(self, **kwargs):
raise exceptions.MethodNotImplemented(
_('Find not supported for rule inferences'))
def put(self, **kwargs):
raise exceptions.MethodNotImplemented(
_('Put not supported for rule inferences'))
| 2.046875 | 2 |
src/api/routes.py | mihai-dobre/autoscaling | 0 | 12762008 | import time
import uuid
from pathlib import Path
from flask import Blueprint, abort, jsonify, request, url_for
from src import logger
from src.api.helpers import add
bp = Blueprint("api", __name__)
@bp.route("/ping")
def ping():
return jsonify({"status": "success", "message": "pong"})
@bp.route("/add", methods=["POST"])
def add_job():
"""
Runs sum.
Expected format of response:
{
"sum": "<sum of a and b>"
}
"""
logger.info("Serving add endpoint.")
data = request.json
logger.info(f"Calling celery worker with arguments {data}.")
add.delay(data)
return jsonify({'status': 'created'}), 201
| 2.296875 | 2 |
fds.analyticsapi.engines/fds/analyticsapi/engines/api/calculations_api.py | katsuya-horiuchi/analyticsapi-engines-python-sdk | 0 | 12762009 | <reponame>katsuya-horiuchi/analyticsapi-engines-python-sdk<filename>fds.analyticsapi.engines/fds/analyticsapi/engines/api/calculations_api.py
# coding: utf-8
"""
Engines API
Allow clients to fetch Engines Analytics through APIs. # noqa: E501
The version of the OpenAPI document: 2
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from fds.analyticsapi.engines.api_client import ApiClient
from fds.analyticsapi.engines.exceptions import (
ApiTypeError,
ApiValueError
)
class CalculationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def cancel_calculation_by_id(self, id, **kwargs): # noqa: E501
"""Cancel calculation by id # noqa: E501
This is the endpoint to cancel a previously submitted calculation request. Instead of doing a GET on the getCalculationById URL, cancel the calculation by doing a DELETE. All individual calculation units within the calculation will be canceled if they have not already finished. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_calculation_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: From url, provided from the location header in the Run Multiple Calculations endpoint. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.cancel_calculation_by_id_with_http_info(id, **kwargs) # noqa: E501
def cancel_calculation_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Cancel calculation by id # noqa: E501
This is the endpoint to cancel a previously submitted calculation request. Instead of doing a GET on the getCalculationById URL, cancel the calculation by doing a DELETE. All individual calculation units within the calculation will be canceled if they have not already finished. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.cancel_calculation_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: From url, provided from the location header in the Run Multiple Calculations endpoint. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method cancel_calculation_by_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `cancel_calculation_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/engines/v2/calculations/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_calculation_status_by_id(self, id, **kwargs): # noqa: E501
"""Get calculation status by id # noqa: E501
This is the endpoint to check on the progress of a previous calculation request. Response body contains status information of the entire request and each individual calculation unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_calculation_status_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: From url, provided from the location header in the Run Multiple Calculations endpoint. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: CalculationStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_calculation_status_by_id_with_http_info(id, **kwargs) # noqa: E501
def get_calculation_status_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Get calculation status by id # noqa: E501
This is the endpoint to check on the progress of a previous calculation request. Response body contains status information of the entire request and each individual calculation unit. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_calculation_status_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str id: From url, provided from the location header in the Run Multiple Calculations endpoint. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(CalculationStatus, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_calculation_status_by_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_calculation_status_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/engines/v2/calculations/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='CalculationStatus', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_calculation_status_summaries(self, **kwargs): # noqa: E501
"""Get all calculation statuses # noqa: E501
This endpoints returns all active calculation requests. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_calculation_status_summaries(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: dict(str, CalculationStatusSummary)
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_calculation_status_summaries_with_http_info(**kwargs) # noqa: E501
def get_calculation_status_summaries_with_http_info(self, **kwargs): # noqa: E501
"""Get all calculation statuses # noqa: E501
This endpoints returns all active calculation requests. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_calculation_status_summaries_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(dict(str, CalculationStatusSummary), status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_calculation_status_summaries" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['text/plain', 'application/json', 'text/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/engines/v2/calculations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, CalculationStatusSummary)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def run_calculation(self, **kwargs): # noqa: E501
"""Run calculation # noqa: E501
This endpoint creates a new calculation and runs the set of calculation units specified in the POST body. This must be used first before get status or cancelling endpoints with a calculation id. A successful response will contain the URL to check the status of the calculation request. Remarks: ⢠Maximum 25 points allowed per calculation and maximum 500 points allowed across all simultaneous calculations. (Refer API documentation for more information) ⢠Any settings in POST body will act as a one-time override over the settings saved in the PA/SPAR/Vault template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.run_calculation(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Calculation calculation:
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.run_calculation_with_http_info(**kwargs) # noqa: E501
def run_calculation_with_http_info(self, **kwargs): # noqa: E501
"""Run calculation # noqa: E501
This endpoint creates a new calculation and runs the set of calculation units specified in the POST body. This must be used first before get status or cancelling endpoints with a calculation id. A successful response will contain the URL to check the status of the calculation request. Remarks: ⢠Maximum 25 points allowed per calculation and maximum 500 points allowed across all simultaneous calculations. (Refer API documentation for more information) ⢠Any settings in POST body will act as a one-time override over the settings saved in the PA/SPAR/Vault template. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.run_calculation_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Calculation calculation:
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['calculation'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method run_calculation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'calculation' in local_var_params:
body_params = local_var_params['calculation']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Basic'] # noqa: E501
return self.api_client.call_api(
'/analytics/engines/v2/calculations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 2.375 | 2 |
Packs/ShiftManagement/Scripts/GetAwayUsers/GetAwayUsers_test.py | sorkan/content | 799 | 12762010 | import io
import json
from copy import deepcopy
import GetAwayUsers
import demistomock as demisto
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
away_user_data = util_load_json('test_data/away_user.json')
def test_script_valid(mocker):
"""
Given:
When:
- Calling to GetAwayUsers Script.
Then:
- Ensure expected outputs are returned.
"""
from GetAwayUsers import main
return_results_mock = mocker.patch.object(GetAwayUsers, 'return_results')
away_user = away_user_data
not_away_user = deepcopy(away_user_data)
not_away_user['isAway'] = False
mocker.patch.object(demisto, 'executeCommand', return_value=[{'Type': '1', 'Contents': [away_user, not_away_user]}])
main()
command_results = return_results_mock.call_args[0][0]
assert command_results.outputs == [{'email': '',
'id': 'admin',
'name': 'Admin',
'phone': '+650-123456',
'roles': {'demisto': ['Administrator']},
'username': 'admin'}]
def test_script_invalid(mocker):
"""
Given:
When:
- Calling to GetAwayUsers Script. Error during the demisto.executeCommand to getUsers.
Then:
- Ensure error is returned.
"""
from GetAwayUsers import main
error_entry_type: int = 4
mocker.patch.object(GetAwayUsers, 'return_error')
mocker.patch.object(demisto, 'error')
away_user = away_user_data
not_away_user = deepcopy(away_user_data)
not_away_user['isAway'] = False
mocker.patch.object(demisto, 'executeCommand',
return_value=[{'Type': error_entry_type, 'Contents': [away_user, not_away_user]}])
main()
assert GetAwayUsers.return_error.called
| 2.484375 | 2 |
app.py | jakehemmerle/beiwe-backend | 0 | 12762011 | import os
from datetime import datetime
import jinja2
from flask import Flask, redirect, render_template
from raven.contrib.flask import Sentry
from werkzeug.middleware.proxy_fix import ProxyFix
from config import load_django
from api import (admin_api, copy_study_api, dashboard_api, data_access_api, data_pipeline_api,
mobile_api, participant_administration, survey_api)
from config.settings import SENTRY_ELASTIC_BEANSTALK_DSN, SENTRY_JAVASCRIPT_DSN
from libs.admin_authentication import is_logged_in
from libs.security import set_secret_key
from pages import (admin_pages, data_access_web_form, mobile_pages, survey_designer,
system_admin_pages)
def subdomain(directory):
app = Flask(__name__, static_folder=directory + "/static")
set_secret_key(app)
loader = [app.jinja_loader, jinja2.FileSystemLoader(directory + "/templates")]
app.jinja_loader = jinja2.ChoiceLoader(loader)
app.wsgi_app = ProxyFix(app.wsgi_app)
return app
# Register pages here
app = subdomain("frontend")
app.jinja_env.globals['current_year'] = datetime.now().strftime('%Y')
app.register_blueprint(mobile_api.mobile_api)
app.register_blueprint(admin_pages.admin_pages)
app.register_blueprint(mobile_pages.mobile_pages)
app.register_blueprint(system_admin_pages.system_admin_pages)
app.register_blueprint(survey_designer.survey_designer)
app.register_blueprint(admin_api.admin_api)
app.register_blueprint(participant_administration.participant_administration)
app.register_blueprint(survey_api.survey_api)
app.register_blueprint(data_access_api.data_access_api)
app.register_blueprint(data_access_web_form.data_access_web_form)
app.register_blueprint(copy_study_api.copy_study_api)
app.register_blueprint(data_pipeline_api.data_pipeline_api)
app.register_blueprint(dashboard_api.dashboard_api)
# Don't set up Sentry for local development
if os.environ['DJANGO_DB_ENV'] != 'local':
sentry = Sentry(app, dsn=SENTRY_ELASTIC_BEANSTALK_DSN)
@app.route("/<page>.html")
def strip_dot_html(page):
# Strips away the dot html from pages
return redirect("/%s" % page)
@app.context_processor
def inject_dict_for_all_templates():
return {"SENTRY_JAVASCRIPT_DSN": SENTRY_JAVASCRIPT_DSN}
# Extra Production settings
if not __name__ == '__main__':
# Points our custom 404 page (in /frontend/templates) to display on a 404 error
@app.errorhandler(404)
def e404(e):
return render_template("404.html", is_logged_in=is_logged_in()), 404
# Extra Debugging settings
if __name__ == '__main__':
# might be necessary if running on windows/linux subsystem on windows.
# from gevent.wsgi import WSGIServer
# http_server = WSGIServer(('', 8080), app)
# http_server.serve_forever()
app.run(host='0.0.0.0', port=int(os.getenv("PORT", "8080")), debug=True)
| 1.8125 | 2 |
odoo-13.0/addons/website_crm/controllers/main.py | VaibhavBhujade/Blockchain-ERP-interoperability | 0 | 12762012 | <reponame>VaibhavBhujade/Blockchain-ERP-interoperability
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import http
from odoo.http import request
from odoo.addons.website_form.controllers.main import WebsiteForm
class WebsiteForm(WebsiteForm):
def _get_country(self):
country_code = request.session.geoip and request.session.geoip.get('country_code') or False
if country_code:
return request.env['res.country'].sudo().search([('code', '=', country_code)], limit=1)
return request.env['res.country']
def _get_phone_fields_to_validate(self):
return ['phone', 'mobile']
# Check and insert values from the form on the model <model> + validation phone fields
@http.route('/website_form/<string:model_name>', type='http', auth="public", methods=['POST'], website=True)
def website_form(self, model_name, **kwargs):
model_record = request.env['ir.model'].sudo().search([('model', '=', model_name), ('website_form_access', '=', True)])
if model_record and hasattr(request.env[model_name], 'phone_format'):
try:
data = self.extract_data(model_record, request.params)
except:
# no specific management, super will do it
pass
else:
record = data.get('record', {})
phone_fields = self._get_phone_fields_to_validate()
country = request.env['res.country'].browse(record.get('country_id'))
contact_country = country.exists() and country or self._get_country()
for phone_field in phone_fields:
if not record.get(phone_field):
continue
number = record[phone_field]
fmt_number = request.env[model_name].phone_format(number, contact_country)
request.params.update({phone_field: fmt_number})
if model_name == 'crm.lead' and not request.params.get('state_id'):
geoip_country_code = request.session.get('geoip', {}).get('country_code')
geoip_state_code = request.session.get('geoip', {}).get('region')
if geoip_country_code and geoip_state_code:
state = request.env['res.country.state'].search([('code', '=', geoip_state_code), ('country_id.code', '=', geoip_country_code)])
if state:
request.params['state_id'] = state.id
return super(WebsiteForm, self).website_form(model_name, **kwargs)
def insert_record(self, request, model, values, custom, meta=None):
is_lead_model = model.model == 'crm.lead'
if is_lead_model:
if 'company_id' not in values:
values['company_id'] = request.website.company_id.id
lang = request.context.get('lang', False)
lang_id = request.env["res.lang"].sudo().search([('code', '=', lang)], limit=1).id
values['lang_id'] = lang_id
result = super(WebsiteForm, self).insert_record(request, model, values, custom, meta=meta)
if is_lead_model:
visitor_sudo = request.env['website.visitor']._get_visitor_from_request()
if visitor_sudo and result:
lead_sudo = request.env['crm.lead'].browse(result).sudo()
if lead_sudo.exists():
vals = {'lead_ids': [(4, result)]}
if not visitor_sudo.lead_ids and not visitor_sudo.partner_id:
vals['name'] = lead_sudo.contact_name
visitor_sudo.write(vals)
return result
| 2.421875 | 2 |
rl_server/server/play.py | parilo/tars-rl | 9 | 12762013 | #!/usr/bin/env python
from misc.common import parse_play_args
from misc.config import load_config
from rl_server.server.agent import run_agent
from rl_server.server.run_agents import get_algo_and_agent_config
args = parse_play_args()
config = load_config(args.config)
algo_config, agent_config = get_algo_and_agent_config(
config,
args.algorithm_id,
args.agent_id,
args.seed
)
run_agent(
config,
agent_config,
checkpoint_path=args.checkpoint
)
| 1.664063 | 2 |
test/app_config.py | timgates42/r3 | 49 | 12762014 | #!/usr/bin/python
# -*- coding: utf-8 -*-
INPUT_STREAMS = [
'test.count_words_stream.CountWordsStream'
]
REDUCERS = [
'test.count_words_reducer.CountWordsReducer'
]
| 1.164063 | 1 |
src/visualization/visualize_parameters.py | mikolajsacha/tweetsclassification | 4 | 12762015 | import ast
import os
import numpy as np
import matplotlib.pyplot as plt
import itertools
from mpl_toolkits.mplot3d import Axes3D # do not remove this import
import matplotlib.patches as mpatches
from matplotlib import cm
from scipy.interpolate import griddata
from src.common import choose_classifier, DATA_FOLDER
from src.models.model_testing.grid_search import get_grid_search_results_path
from src.visualization.save_visualization import save_current_plot
def get_all_grid_searched_parameters(classifier_class):
""" Returns list of combinations of parameters as a list of pairs (param_dictionary, validation_result) """
summary_file_path = get_grid_search_results_path(DATA_FOLDER, classifier_class)
if not (os.path.exists(summary_file_path) and os.path.isfile(summary_file_path)):
print "Grid Search summary file does not exist. Please run grid_search.py at first."
return None
if os.stat(summary_file_path).st_size == 0:
print "Grid Search summary file is empty. Please run grid_search.py to get some results."
return None
all_parameters = []
print("Found Grid Search results in " + summary_file_path.split("..")[-1])
for line in open(summary_file_path, 'r'):
split_line = tuple(line.split(";"))
result = float(split_line[-1])
parameters_str = split_line[:-1]
word_embedding, word_embedding_params, sentence_embedding, classifier_params = parameters_str
parameters_dict = ast.literal_eval(classifier_params)
parameters_dict["Sentence Embedding"] = sentence_embedding
parameters_dict["Word Embedding"] = "{0}({1})".format(word_embedding, word_embedding_params)
all_parameters.append((parameters_dict, result))
return all_parameters
def choose_parameters_to_analyze(parameters_list):
""" Lets user choose one or two parameters to analyze """
print "Choose one or two of the following parameters by typing a number or two numbers, e.g. '1' or '3,4': "
print "\n".join("{0} - {1}".format(i, param_name) for i, param_name in enumerate(parameters_list))
numbers = []
max_number = min(2, len(parameters_list))
while True:
try:
str_numbers = raw_input().replace(" ", "").split(',')
if len(str_numbers) > max_number:
raise ValueError()
for str_number in str_numbers:
number = int(str_number)
if len(parameters_list) > number >= 0:
numbers.append(number)
else:
raise ValueError()
break
except ValueError:
print "Please insert a correct number or two numbers"
return [parameters_list[i] for i in numbers]
def analyze_single_parameter(parameter, classifier_class, all_parameters_list):
# count average, max and min performance for each value of parameter
average_performances = {}
min_performances = {}
max_performances = {}
for parameters, result in all_parameters_list:
tested_param_value = parameters[parameter]
if tested_param_value in average_performances:
average_performances[tested_param_value] += result
if result < min_performances[tested_param_value]:
min_performances[tested_param_value] = result
if result > max_performances[tested_param_value]:
max_performances[tested_param_value] = result
else:
average_performances[tested_param_value] = result
min_performances[tested_param_value] = result
max_performances[tested_param_value] = result
param_values = sorted(average_performances.iterkeys())
param_values_count = len(param_values)
tests_count_per_param_value = len(all_parameters_list) / param_values_count
for param_value in average_performances.iterkeys():
average_performances[param_value] /= tests_count_per_param_value
# convert dictionaries to lists sorted by tested param values
average_performances = [average_performances[key] for key in param_values]
min_performances = [min_performances[key] for key in param_values]
max_performances = [max_performances[key] for key in param_values]
fig, ax = plt.subplots()
use_log_scale = False
# if parameter is numerical, plot lines and ask if to use logarithmic scale
if all(isinstance(x, int) or isinstance(x, float) for x in param_values):
use_log_answer = raw_input("Use logarithmic scale? [y/n] ").lower()
use_log_scale = use_log_answer == 'y' or use_log_answer == 'yes'
if use_log_scale:
ax.set_xscale('log')
lines = ax.plot(param_values, average_performances, 'orange', param_values, min_performances, 'r',
param_values, max_performances, 'g')
ax.scatter(param_values, average_performances, c='orange', s=150, marker='*', edgecolors='black')
ax.scatter(param_values, min_performances, c='red', s=150, marker='*', edgecolors='black')
ax.scatter(param_values, max_performances, c='green', s=150, marker='*', edgecolors='black')
plt.setp(lines, linewidth=2, markersize=8)
# if parameter is non-numerical, plot a bar chart
else:
N , width = param_values_count, 0.15
ind = np.arange(N)
ax.bar(ind, average_performances, width, color='orange', label='Avg')
ax.bar(ind + width, min_performances, width, color='r', label='Min')
ax.bar(ind + 2 * width, max_performances, width, color='g', label='Max')
plt.xticks(ind + width, param_values)
avg_legend = mpatches.Patch(color='orange', label="Average performance")
min_legend = mpatches.Patch(color='r', label="Minimum performance")
max_legend = mpatches.Patch(color='g', label="Maximum performance")
plt.legend(handles=[avg_legend, min_legend, max_legend])
plt.title('{0} performance for different values of {1}'.format(classifier_class.__name__, parameter))
if use_log_scale:
plt.xlabel('Values of {0} (logarithmic scale)'.format(parameter))
else:
plt.xlabel('Values of {0}'.format(parameter))
plt.ylabel('Cross-validation results')
save_current_plot('parameters_{0}_{1}.svg'.format(classifier_class.__name__, parameter))
plt.show()
def analyze_two_parameters(parameter1, parameter2, classifier_class, all_parameters_list):
# count max performance for each combinations of value of parameter1 and parameter2
max_performances = {}
for parameters, result in all_parameters_list:
tested_param1_value = parameters[parameter1]
tested_param2_value = parameters[parameter2]
tested_tuple = (tested_param1_value, tested_param2_value)
if tested_tuple in max_performances:
if result > max_performances[tested_tuple]:
max_performances[tested_tuple] = result
else:
max_performances[tested_tuple] = result
param1_values = sorted([p1 for p1, p2 in max_performances.iterkeys()])
param2_values = sorted([p2 for p1, p2 in max_performances.iterkeys()])
# plot makes sense only if parameters ale numerical
if not all(isinstance(x, int) or isinstance(x, float) for x in param1_values) or \
not all(isinstance(x, int) or isinstance(x, float) for x in param2_values):
print "Tested parameters must be numerical. Non-numerical paramateres can be analyzed only individually"
return
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
zv = np.empty((len(param1_values), len(param2_values)))
for i, param1 in enumerate(param1_values):
for j, param2 in enumerate(param2_values):
zv[i, j] = max_performances[(param1, param2)]
points = np.zeros((len(param1_values) * len(param2_values), 2))
values = np.zeros((len(param1_values) * len(param2_values)))
param1_points = param1_values[:]
param2_points = param2_values[:]
use_log_answer = raw_input("Use logarithmic scale for {0}? [y/n] ".format(parameter1)).lower()
use_log_scale1 = use_log_answer == 'y' or use_log_answer == 'yes'
if use_log_scale1:
param1_points = np.log2(param1_values)
use_log_answer = raw_input("Use logarithmic scale for {0}? [y/n] ".format(parameter2)).lower()
use_log_scale2 = use_log_answer == 'y' or use_log_answer == 'yes'
if use_log_scale2:
param2_points = np.log2(param2_values)
# interpolate for better visual effect
point_i = 0
for i, param1_val in enumerate(param1_values):
for j, param2_val in enumerate(param2_values):
points[point_i] = [param1_points[i], param2_points[j]]
values[point_i] = max_performances[(param1_val, param2_val)]
point_i += 1
grid_size = 20
grid_x, grid_y = np.meshgrid(np.linspace(param1_points[0], param1_points[-1], num=grid_size),
np.linspace(param2_points[0], param2_points[-1], num=grid_size))
grid_z = griddata(points, values, (grid_x, grid_y), method='linear')
for i in xrange(grid_z.shape[0]):
for j in xrange(grid_z.shape[1]):
if grid_z[i,j] > 100:
grid_z[i, j] = 100
ax.plot_surface(grid_x, grid_y, grid_z, cmap=cm.coolwarm, linewidth=0, alpha=0.8)
# scatter real points
xs_and_ys = list(itertools.product(param1_points, param2_points))
xs = [x for x, y in xs_and_ys]
ys = [y for x, y in xs_and_ys]
zs = [max_performances[(x,y)] for (x, y) in itertools.product(param1_values, param2_values)]
ax.scatter(xs, ys, zs, s=5)
plt.title('{0} performance for different values of {1} and {2}'
.format(classifier_class.__name__, parameter1, parameter2))
if use_log_scale1:
ax.set_xlabel('Values of {0} (logarithmic scale: 2^))'.format(parameter1))
else:
ax.set_xlabel('Values of {0}'.format(parameter1))
if use_log_scale2:
ax.set_ylabel('Values of {0} (logarithmic scale: 2^))'.format(parameter2))
else:
ax.set_ylabel('Values of {0}'.format(parameter2))
ax.set_zlabel('Cross-validation results')
save_current_plot('parameters_{0}_{1}_and_{2}.svg'.format(classifier_class.__name__, parameter1, parameter2))
plt.show()
if __name__ == "__main__":
classifier_class = choose_classifier()
parameters_list = get_all_grid_searched_parameters(classifier_class)
if not parameters_list:
exit(-1)
tested_parameters = list(parameters_list[0][0].iterkeys())
parameters_to_analyze = choose_parameters_to_analyze(tested_parameters)
if len(parameters_to_analyze) == 1:
analyze_single_parameter(parameters_to_analyze[0], classifier_class, parameters_list)
elif len(parameters_to_analyze) == 2:
analyze_two_parameters(parameters_to_analyze[0], parameters_to_analyze[1], classifier_class, parameters_list)
| 2.4375 | 2 |
tests/tests.py | DemocracyClub/eco-parser | 0 | 12762016 | import os
import unittest
from eco_parser import EcoParser, ParseError
SCHEDULE_WITH_TABLE = (
"http://www.legislation.gov.uk/uksi/2017/1067/schedule/1/made/data.xml"
)
SCHEDULE_WITHOUT_TABLE = (
"http://www.legislation.gov.uk/uksi/2017/477/schedule/1/made/data.xml"
)
ARTICLE_WITHOUT_TABLE = (
"http://www.legislation.gov.uk/uksi/2017/1270/article/3/made/data.xml"
)
TABLE_WITHOUT_HEADER = (
"http://www.legislation.gov.uk/uksi/2015/1873/schedule/1/made/data.xml"
)
ONE_ROW_TABLE_VALID = (
"http://www.legislation.gov.uk/uksi/2016/1140/schedule/1/made/data.xml"
)
ONE_ROW_TABLE_INVALID = (
"http://www.legislation.gov.uk/uksi/2016/657/schedule/1/made/data.xml"
)
UNKNOWN_TABLE_FORMAT = (
"http://www.legislation.gov.uk/uksi/no-example-of-this/schedule/1/made/data.xml"
)
# stub parser implementation we can run tests against
class StubParser(EcoParser):
def get_data(self):
fixtures = {
SCHEDULE_WITH_TABLE: "fixtures/schedule_with_table.xml",
SCHEDULE_WITHOUT_TABLE: "fixtures/schedule_without_table.xml",
ARTICLE_WITHOUT_TABLE: "fixtures/article_without_table.xml",
TABLE_WITHOUT_HEADER: "fixtures/table_without_header.xml",
ONE_ROW_TABLE_VALID: "fixtures/one_row_table_valid.xml",
ONE_ROW_TABLE_INVALID: "fixtures/one_row_table_invalid.xml",
UNKNOWN_TABLE_FORMAT: "fixtures/unknown_table_format.xml",
}
dirname = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.abspath(os.path.join(dirname, fixtures[self.url]))
if self.url in fixtures:
return bytes(open(file_path, "r").read(), "utf-8")
else:
raise Exception("no test fixture defined for url '%s'" % self.url)
class ParserTest(unittest.TestCase):
def test_no_parser_found(self):
p = StubParser("foo.bar/baz")
with self.assertRaises(ParseError):
p.parse()
def test_schedule_with_table(self):
p = StubParser(SCHEDULE_WITH_TABLE)
self.assertSequenceEqual(
[
("(1) Name of borough ward", "(2) Number of councillors"),
("Crummock & Derwent Valley", "1"),
("<NAME>", "3"),
("Warnell", "1"),
("Westward Ho!", "2"),
("Audley & Queen’s Park", "2"),
],
p.parse(),
)
def test_table_without_header(self):
p = StubParser(TABLE_WITHOUT_HEADER)
self.assertSequenceEqual(
[
("Crummock & Derwent Valley", "1"),
("St John’s", "3"),
("Warnell", "1"),
("Westward Ho!", "2"),
("Audley & Queen’s Park", "2"),
],
p.parse(),
)
def test_schedule_without_table(self):
p = StubParser(SCHEDULE_WITHOUT_TABLE)
self.assertSequenceEqual(
[
("Crummock & Derwent Valley",),
("<NAME>",),
("Warnell",),
("Westward Ho!",),
("Audley & Queen’s Park",),
],
p.parse(),
)
def test_article_without_table(self):
p = StubParser(ARTICLE_WITHOUT_TABLE)
self.assertSequenceEqual(
[
("The existing wards of the borough of Foo Town are abolished",),
("The borough of Foo Town is divided into 5 wards as follows—",),
("Crummock & Derwent Valley",),
("<NAME>",),
("Warnell",),
("Westward Ho!",),
("Audley & Queen’s Park",),
(
"Each ward comprises the area identified on the map by reference to the name of the ward",
),
("Three councillors are to be elected for each ward",),
],
p.parse(),
)
def test_unknown_table_format(self):
p = StubParser(UNKNOWN_TABLE_FORMAT)
with self.assertRaises(ParseError):
p.parse()
def test_one_row_table_valid(self):
p = StubParser(ONE_ROW_TABLE_VALID)
self.assertSequenceEqual(
[
("(1) Name of borough ward", "(2) Number of councillors"),
("Crummock & Derwent Valley", "1"),
("<NAME>’s", "3"),
("Warnell", "1"),
("Westward Ho!", "2"),
("Audley & Queen’s Park", "2"),
],
p.parse(),
)
def test_one_row_table_invalid(self):
p = StubParser(ONE_ROW_TABLE_INVALID)
with self.assertRaises(ParseError):
p.parse()
| 2.859375 | 3 |
cloudify_nsx/library/nsx_security_tag.py | cloudify-cosmo/cloudify-nsx-plugin | 2 | 12762017 | # Copyright (c) 2016 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nsx_common as common
from cloudify import exceptions as cfy_exc
def get_tag(client_session, name):
return common.nsx_search(
client_session, 'body/securityTags/securityTag',
name, 'securityTag'
)
def add_tag(client_session, name, description):
security_group = {
'securityTag': {
'name': name
}
}
if description:
security_group['securityTag']['description'] = description
result_raw = client_session.create(
'securityTag',
request_body_dict=security_group
)
common.check_raw_result(result_raw)
return result_raw['objectId']
def delete_tag(client_session, resource_id):
result = client_session.delete(
'securityTagID',
uri_parameters={'tagId': resource_id}
)
common.check_raw_result(result)
def tag_vm_to_resource_id(tag_id, vm_id):
"""Generate resource_id from tag_id/vm_id"""
if not vm_id or not tag_id:
raise cfy_exc.NonRecoverableError(
"Please recheck tag_id/vm_id"
)
return "%s|%s" % (tag_id, vm_id)
def add_tag_vm(client_session, tag_id, vm_id):
resource_id = tag_vm_to_resource_id(tag_id, vm_id)
result_raw = client_session.update(
'securityTagVM',
uri_parameters={
'tagId': tag_id,
'vmMoid': vm_id
}
)
common.check_raw_result(result_raw)
return resource_id
def delete_tag_vm(client_session, resource_id):
ids = resource_id.split("|")
if len(ids) != 2:
raise cfy_exc.NonRecoverableError(
'Unexpected error retrieving resource ID'
)
# get list of attached
attached_vms_raw = common.nsx_read(
client_session, 'body',
'securityTagVMsList', uri_parameters={'tagId': ids[0]}
)
if not attached_vms_raw:
return
attached_vms = common.nsx_struct_get_list(
attached_vms_raw, 'basicinfolist/basicinfo'
)
# delete only attached
for vm in attached_vms:
if vm.get('objectId') == ids[1]:
result_raw = client_session.delete(
'securityTagVM',
uri_parameters={
'tagId': ids[0],
'vmMoid': ids[1]
}
)
common.check_raw_result(result_raw)
break
| 1.804688 | 2 |
szyfrow/amsco.py | NeilNjae/szyfrow | 0 | 12762018 | <filename>szyfrow/amsco.py
"""Enciphering and deciphering using the [Amsco cipher](http://ericbrandel.com/2016/10/09/the-amsco-cipher/).
Also attempts to break messages that use an Amsco cipher.
The Amsco cipher is a column transpositoin cipher. The plaintext is laid out,
row by row, into columns. However, different numbers of letters are laid out
in each cell, typically in a 1-2 pattern.
It's clearer with an example. Consider we're using the keyword "perceptive",
which turns into "perctiv". The text ""It is a truth universally
acknowledged, that a single man in, possession of a good fortune, must be in
want of a wife." is laid out in seven columns like this:
p e r c t i v
--------------------
i ti s at r ut h
un i ve r sa l ly
a ck n ow l ed g
ed t ha t as i ng
l em a ni n po s
se s si o no f ag
o od f or t un e
mu s tb e in w an
t of a wi f e
The ciphertext is read out in columns, according to the order of the keyword.
In this example, the "c" column is read first, then the "e" column, and so on.
That gives the ciphertext of "atrowtnioorewi tiicktemsodsof utledipofunwe
iunaedlseomut svenhaasiftba rsalasnnotinf hlygngsagean".
"""
from enum import Enum
import multiprocessing
import itertools
from szyfrow.support.utilities import *
from szyfrow.support.language_models import *
__pdoc__ = {}
AmscoSlice = collections.namedtuple('AmscoSlice', ['index', 'start', 'end'])
__pdoc__['AmscoSlice'] = """Where each piece of plainatext ends up in the AMSCO
transpositon cipher."""
__pdoc__['AmscoSlice.index'] = """Where the slice appears in the plaintext"""
__pdoc__['AmscoSlice.start'] = """Where the slice starts in the plaintext"""
__pdoc__['AmscoSlice.end'] = """Where the slice ends in the plaintext"""
class AmscoFillStyle(Enum):
"""Different methods of filling the grid.
* `continuous`: continue the fillpattern unbroken by row boundaries
* `same_each_row`: each row has the same fillpattern
* `reverse_each_row`: each row has the reversed fillpattern to the row above
"""
continuous = 1
same_each_row = 2
reverse_each_row = 3
def amsco_positions(message, keyword,
fillpattern=(1, 2),
fillstyle=AmscoFillStyle.continuous,
fillcolumnwise=False,
emptycolumnwise=True):
"""Creates the grid for the AMSCO transposition cipher. Each element in the
grid shows the index of that slice and the start and end positions of the
plaintext that go to make it up.
>>> amsco_positions(string.ascii_lowercase, 'freddy', \
fillpattern=(1, 2)) # doctest: +NORMALIZE_WHITESPACE
[[AmscoSlice(index=3, start=4, end=6),
AmscoSlice(index=2, start=3, end=4),
AmscoSlice(index=0, start=0, end=1),
AmscoSlice(index=1, start=1, end=3),
AmscoSlice(index=4, start=6, end=7)],
[AmscoSlice(index=8, start=12, end=13),
AmscoSlice(index=7, start=10, end=12),
AmscoSlice(index=5, start=7, end=9),
AmscoSlice(index=6, start=9, end=10),
AmscoSlice(index=9, start=13, end=15)],
[AmscoSlice(index=13, start=19, end=21),
AmscoSlice(index=12, start=18, end=19),
AmscoSlice(index=10, start=15, end=16),
AmscoSlice(index=11, start=16, end=18),
AmscoSlice(index=14, start=21, end=22)],
[AmscoSlice(index=18, start=27, end=28),
AmscoSlice(index=17, start=25, end=27),
AmscoSlice(index=15, start=22, end=24),
AmscoSlice(index=16, start=24, end=25),
AmscoSlice(index=19, start=28, end=30)]]
"""
transpositions = transpositions_of(keyword)
fill_iterator = itertools.cycle(fillpattern)
indices = itertools.count()
message_length = len(message)
current_position = 0
grid = []
current_fillpattern = fillpattern
while current_position < message_length:
row = []
if fillstyle == AmscoFillStyle.same_each_row:
fill_iterator = itertools.cycle(fillpattern)
if fillstyle == AmscoFillStyle.reverse_each_row:
fill_iterator = itertools.cycle(current_fillpattern)
for _ in range(len(transpositions)):
index = next(indices)
gap = next(fill_iterator)
row += [AmscoSlice(index, current_position, current_position + gap)]
current_position += gap
grid += [row]
if fillstyle == AmscoFillStyle.reverse_each_row:
current_fillpattern = list(reversed(current_fillpattern))
return [transpose(r, transpositions) for r in grid]
def amsco_encipher(message, keyword,
fillpattern=(1,2), fillstyle=AmscoFillStyle.reverse_each_row):
"""AMSCO transposition encipher.
>>> amsco_encipher('hellothere', 'abc', fillpattern=(1, 2))
'hoteelhler'
>>> amsco_encipher('hellothere', 'abc', fillpattern=(2, 1))
'hetelhelor'
>>> amsco_encipher('hellothere', 'acb', fillpattern=(1, 2))
'hotelerelh'
>>> amsco_encipher('hellothere', 'acb', fillpattern=(2, 1))
'hetelorlhe'
>>> amsco_encipher('hereissometexttoencipher', 'encode')
'etecstthhomoerereenisxip'
>>> amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 2))
'hetcsoeisterereipexthomn'
>>> amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 2), fillstyle=AmscoFillStyle.continuous)
'hecsoisttererteipexhomen'
>>> amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(2, 1))
'heecisoosttrrtepeixhemen'
>>> amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 3, 2))
'hxtomephescieretoeisnter'
>>> amsco_encipher('hereissometexttoencipher', 'cipher', fillpattern=(1, 3, 2), fillstyle=AmscoFillStyle.continuous)
'hxomeiphscerettoisenteer'
"""
grid = amsco_positions(message, keyword,
fillpattern=fillpattern, fillstyle=fillstyle)
ct_as_grid = [[message[s.start:s.end] for s in r] for r in grid]
return combine_every_nth(ct_as_grid)
def amsco_decipher(message, keyword,
fillpattern=(1,2), fillstyle=AmscoFillStyle.reverse_each_row):
"""AMSCO transposition decipher
>>> amsco_decipher('hoteelhler', 'abc', fillpattern=(1, 2))
'hellothere'
>>> amsco_decipher('hetelhelor', 'abc', fillpattern=(2, 1))
'hellothere'
>>> amsco_decipher('hotelerelh', 'acb', fillpattern=(1, 2))
'hellothere'
>>> amsco_decipher('hetelorlhe', 'acb', fillpattern=(2, 1))
'hellothere'
>>> amsco_decipher('etecstthhomoerereenisxip', 'encode')
'hereissometexttoencipher'
>>> amsco_decipher('hetcsoeisterereipexthomn', 'cipher', fillpattern=(1, 2))
'hereissometexttoencipher'
>>> amsco_decipher('hecsoisttererteipexhomen', 'cipher', fillpattern=(1, 2), fillstyle=AmscoFillStyle.continuous)
'hereissometexttoencipher'
>>> amsco_decipher('heecisoosttrrtepeixhemen', 'cipher', fillpattern=(2, 1))
'hereissometexttoencipher'
>>> amsco_decipher('hxtomephescieretoeisnter', 'cipher', fillpattern=(1, 3, 2))
'hereissometexttoencipher'
>>> amsco_decipher('hxomeiphscerettoisenteer', 'cipher', fillpattern=(1, 3, 2), fillstyle=AmscoFillStyle.continuous)
'hereissometexttoencipher'
"""
grid = amsco_positions(message, keyword,
fillpattern=fillpattern, fillstyle=fillstyle)
transposed_sections = [s for c in [l for l in zip(*grid)] for s in c]
plaintext_list = [''] * len(transposed_sections)
current_pos = 0
for slice in transposed_sections:
plaintext_list[slice.index] = message[current_pos:current_pos-slice.start+slice.end][:len(message[slice.start:slice.end])]
current_pos += len(message[slice.start:slice.end])
return cat(plaintext_list)
def amsco_break(message, translist=None, patterns = [(1, 2), (2, 1)],
fillstyles = [AmscoFillStyle.continuous,
AmscoFillStyle.same_each_row,
AmscoFillStyle.reverse_each_row],
fitness=Pbigrams,
chunksize=500):
"""Breaks an AMSCO transposition cipher using a dictionary and
n-gram frequency analysis.
If `translist` is not specified, use
[`szyfrow.support.langauge_models.transpositions`](support/language_models.html#szyfrow.support.language_models.transpositions).
>>> amsco_break(amsco_encipher(sanitise( \
"It is a truth universally acknowledged, that a single man in \
possession of a good fortune, must be in want of a wife. However \
little known the feelings or views of such a man may be on his \
first entering a neighbourhood, this truth is so well fixed in \
the minds of the surrounding families, that he is considered the \
rightful property of some one or other of their daughters."), \
'encipher'), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
(5, 0, 6, 1, 3, 4, 2): ['fourteen'], \
(6, 1, 0, 4, 5, 3, 2): ['keyword']}, \
patterns=[(1, 2)]) # doctest: +ELLIPSIS
(((2, 0, 5, 3, 1, 4, 6), (1, 2), <AmscoFillStyle.continuous: 1>), -709.4646722...)
>>> amsco_break(amsco_encipher(sanitise( \
"It is a truth universally acknowledged, that a single man in \
possession of a good fortune, must be in want of a wife. However \
little known the feelings or views of such a man may be on his \
first entering a neighbourhood, this truth is so well fixed in \
the minds of the surrounding families, that he is considered the \
rightful property of some one or other of their daughters."), \
'encipher', fillpattern=(2, 1)), \
translist={(2, 0, 5, 3, 1, 4, 6): ['encipher'], \
(5, 0, 6, 1, 3, 4, 2): ['fourteen'], \
(6, 1, 0, 4, 5, 3, 2): ['keyword']}, \
patterns=[(1, 2), (2, 1)], fitness=Ptrigrams) # doctest: +ELLIPSIS
(((2, 0, 5, 3, 1, 4, 6), (2, 1), <AmscoFillStyle.continuous: 1>), -997.0129085...)
"""
if translist is None:
translist = transpositions
with multiprocessing.Pool() as pool:
helper_args = [(message, trans, pattern, fillstyle, fitness)
for trans in translist
for pattern in patterns
for fillstyle in fillstyles]
# Gotcha: the helper function here needs to be defined at the top level
# (limitation of Pool.starmap)
breaks = pool.starmap(amsco_break_worker, helper_args, chunksize)
return max(breaks, key=lambda k: k[1])
def amsco_break_worker(message, transposition,
pattern, fillstyle, fitness):
plaintext = amsco_decipher(message, transposition,
fillpattern=pattern, fillstyle=fillstyle)
fit = fitness(sanitise(plaintext))
return (transposition, pattern, fillstyle), fit
if __name__ == "__main__":
import doctest | 3.15625 | 3 |
datasets/SOT/seed/Impl/TrackingNet.py | zhangzhengde0225/SwinTrack | 143 | 12762019 | import os
from datasets.types.data_split import DataSplit
from datasets.SOT.constructor.base_interface import SingleObjectTrackingDatasetConstructor
import numpy as np
def construct_TrackingNet(constructor: SingleObjectTrackingDatasetConstructor, seed):
root_path = seed.root_path
data_type = seed.data_split
enable_set_ids = seed.enable_set_ids
sequence_name_class_map_file_path = seed.sequence_name_class_map_file_path
if data_type != DataSplit.Training and enable_set_ids is not None:
raise Exception("unsupported configuration")
sequence_name_class_map = {}
if sequence_name_class_map_file_path is None:
sequence_name_class_map_file_path = os.path.join(os.path.dirname(__file__), 'data_specs', 'trackingnet_sequence_classes_map.txt')
for line in open(sequence_name_class_map_file_path, 'r', encoding='utf-8'):
line = line.strip()
name, category = line.split('\t')
sequence_name_class_map[name] = category
categories = set(sequence_name_class_map.values())
category_id_name_map = {i: v for i, v in enumerate(categories)}
category_name_id_map = {v: i for i, v in enumerate(categories)}
if enable_set_ids is not None:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in enable_set_ids]
else:
trackingNetSubsets = []
if data_type & DataSplit.Training:
trackingNetSubsets = ['TRAIN_{}'.format(v) for v in range(12)]
if data_type & DataSplit.Testing:
trackingNetSubsets.append('TEST')
sequence_list = []
for subset in trackingNetSubsets:
subset_path = os.path.join(root_path, subset)
frames_path = os.path.join(subset_path, 'frames')
anno_path = os.path.join(subset_path, 'anno')
bounding_box_annotation_files = os.listdir(anno_path)
bounding_box_annotation_files = [bounding_box_annotation_file for bounding_box_annotation_file in
bounding_box_annotation_files if bounding_box_annotation_file.endswith('.txt')]
bounding_box_annotation_files.sort()
sequences = [sequence[:-4] for sequence in bounding_box_annotation_files]
for sequence, bounding_box_annotation_file in zip(sequences, bounding_box_annotation_files):
sequence_image_path = os.path.join(frames_path, sequence)
bounding_box_annotation_file_path = os.path.join(anno_path, bounding_box_annotation_file)
sequence_list.append((sequence, sequence_image_path, bounding_box_annotation_file_path))
constructor.set_category_id_name_map(category_id_name_map)
constructor.set_total_number_of_sequences(len(sequence_list))
for sequence, sequence_image_path, sequence_bounding_box_annotation_file_path in sequence_list:
with constructor.new_sequence(category_name_id_map[sequence_name_class_map[sequence]]) as sequence_constructor:
sequence_constructor.set_name(sequence)
bounding_boxes = np.loadtxt(sequence_bounding_box_annotation_file_path, dtype=np.float, delimiter=',')
images = os.listdir(sequence_image_path)
images = [image for image in images if image.endswith('.jpg')]
if bounding_boxes.ndim == 2:
is_testing_sequence = False
assert len(images) == len(bounding_boxes)
else:
is_testing_sequence = True
assert bounding_boxes.ndim == 1 and bounding_boxes.shape[0] == 4
for i in range(len(images)):
image_file_name = '{}.jpg'.format(i)
image_file_path = os.path.join(sequence_image_path, image_file_name)
with sequence_constructor.new_frame() as frame_constructor:
frame_constructor.set_path(image_file_path)
if is_testing_sequence:
if i == 0:
frame_constructor.set_bounding_box(bounding_boxes.tolist())
else:
frame_constructor.set_bounding_box(bounding_boxes[i].tolist())
| 2.296875 | 2 |
trainer/transducer_runners.py | ishine/TensorflowASR-1 | 0 | 12762020 | <gh_stars>0
import tensorflow as tf
import tensorflow.keras.mixed_precision.experimental as mixed_precision
from trainer.base_runners import BaseTrainer
from losses.rnnt_losses import USE_TF,tf_rnnt_loss,rnnt_loss
from AMmodel.transducer_wrap import Transducer
from utils.text_featurizers import TextFeaturizer
import logging
class TransducerTrainer(BaseTrainer):
def __init__(self,
speech_featurizer,
text_featurizer: TextFeaturizer,
config: dict,
is_mixed_precision: bool = False,
strategy=None
):
"""
Args:
config: the 'running_config' part in YAML config file'
text_featurizer: the TextFeaturizer instance
is_mixed_precision: a boolean for using mixed precision or not
"""
super(TransducerTrainer, self).__init__(config)
self.speech_featurizer=speech_featurizer
self.text_featurizer = text_featurizer
self.is_mixed_precision = is_mixed_precision
self.set_strategy(strategy)
if USE_TF:
self.rnnt_loss=tf_rnnt_loss
else:
self.rnnt_loss=rnnt_loss
def set_train_metrics(self):
self.train_metrics = {
"transducer_loss": tf.keras.metrics.Mean("train_transducer_loss", dtype=tf.float32),
"ctc_loss": tf.keras.metrics.Mean("ctc_loss", dtype=tf.float32),
"ctc_acc": tf.keras.metrics.Mean("ctc_acc", dtype=tf.float32),
}
def set_eval_metrics(self):
self.eval_metrics = {
"transducer_loss": tf.keras.metrics.Mean("eval_transducer_loss", dtype=tf.float32),
"ctc_loss": tf.keras.metrics.Mean("ctc_loss", dtype=tf.float32),
"ctc_acc": tf.keras.metrics.Mean("ctc_acc", dtype=tf.float32),
}
def ctc_acc(self, labels, y_pred):
T1 = tf.shape(y_pred)[1]
T2 = tf.shape(labels)[1]
T = tf.reduce_min([T1, T2])
y_pred = y_pred[:, :T]
labels = labels[:, :T]
mask = tf.cast(tf.not_equal(labels, 0), 1.)
y_pred = tf.cast(y_pred, tf.float32)
labels = tf.cast(labels, tf.float32)
value = tf.cast(labels == y_pred, tf.float32)
accs = tf.reduce_sum(value, -1) / (tf.reduce_sum(mask, -1) + 1e-6)
return accs
@tf.function(experimental_relax_shapes=True)
def _train_step(self, batch):
features, input_length, labels, label_length = batch
pred_inp=labels
target=labels[:,1:]
label_length-=1
ctc_label = tf.where(target==self.text_featurizer.blank,0,target)
with tf.GradientTape() as tape:
logits,ctc_logits = self.model([features, pred_inp], training=True)
# print(logits.shape,target.shape)
ctc_preds=tf.nn.softmax(ctc_logits,-1)
if USE_TF:
per_train_loss=self.rnnt_loss(logits=logits, labels=target
, label_length=label_length, logit_length=input_length)
# per_train_loss = tf.clip_by_value(per_train_loss, 0., 500.)
else:
per_train_loss = self.rnnt_loss(
logits=logits, labels=labels, label_length=label_length,
logit_length=(input_length // self.model.time_reduction_factor),
blank=self.text_featurizer.blank)
ctc_loss = tf.keras.backend.ctc_batch_cost(tf.cast(ctc_label, tf.int32),
tf.cast(ctc_preds, tf.float32),
tf.cast(input_length[:,tf.newaxis], tf.int32),
tf.cast(label_length[:,tf.newaxis], tf.int32),
)
# ctc_loss = tf.clip_by_value(ctc_loss, 0., 1000.)
train_loss = tf.nn.compute_average_loss(per_train_loss+ctc_loss,
global_batch_size=self.global_batch_size)
if self.is_mixed_precision:
scaled_train_loss = self.optimizer.get_scaled_loss(train_loss)
if self.is_mixed_precision:
scaled_gradients = tape.gradient(scaled_train_loss, self.model.trainable_variables)
gradients = self.optimizer.get_unscaled_gradients(scaled_gradients)
else:
gradients = tape.gradient(train_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
ctc_pred = tf.keras.backend.ctc_decode(ctc_preds, input_length)[0][0]
ctc_acc = self.ctc_acc(ctc_label, ctc_pred)
self.train_metrics['ctc_acc'].update_state(ctc_acc)
self.train_metrics["transducer_loss"].update_state(per_train_loss)
self.train_metrics["ctc_loss"].update_state(ctc_loss)
@tf.function(experimental_relax_shapes=True)
def _eval_step(self, batch):
features,input_length, labels, label_length = batch
pred_inp = labels
target = labels[:, 1:]
label_length -= 1
ctc_label = tf.where(target == self.text_featurizer.blank, 0, target)
logits ,ctc_logits= self.model([features, pred_inp], training=False)
ctc_preds=tf.nn.softmax(ctc_logits,-1)
if USE_TF:
eval_loss = self.rnnt_loss(logits=logits, labels=target
, label_length=label_length,
logit_length=input_length,
)
else:
eval_loss = self.rnnt_loss(
logits=logits, labels=target, label_length=label_length,
logit_length=(input_length // self.model.time_reduction_factor),
blank=self.text_featurizer.blank)
ctc_loss = tf.nn.ctc_loss(ctc_label, ctc_logits, label_length, input_length, False, blank_index=-1)
ctc_loss = tf.clip_by_value(ctc_loss, 0., 500.)
ctc_pred = tf.keras.backend.ctc_decode(ctc_preds, input_length)[0][0]
ctc_acc = self.ctc_acc(ctc_label, ctc_pred)
self.eval_metrics['ctc_acc'].update_state(ctc_acc)
self.eval_metrics["transducer_loss"].update_state(eval_loss)
self.eval_metrics["ctc_loss"].update_state(ctc_loss)
def compile(self,
model: Transducer,
optimizer: any,
max_to_keep: int = 10):
f, c = self.speech_featurizer.compute_feature_dim()
with self.strategy.scope():
self.model = model
if self.model.mel_layer is not None:
self.model._build([1, 16000 if self.config['streaming'] is False else self.model.chunk_size*3, 1])
else:
self.model._build([1, 80, f, c])
self.model.summary(line_length=100)
try:
self.load_checkpoint()
except:
logging.info('trainer resume failed,use init state')
self.optimizer = tf.keras.optimizers.get(optimizer)
if self.is_mixed_precision:
self.optimizer = mixed_precision.LossScaleOptimizer(self.optimizer, "dynamic")
self.set_progbar()
# self.load_checkpoint()
def fit(self, epoch=None):
if epoch is not None:
self.epochs=epoch
self.train_progbar.set_description_str(
f"[Train] [Epoch {epoch}/{self.config['num_epochs']}]")
self._train_batches()
self._check_eval_interval()
def _train_batches(self):
"""Train model one epoch."""
for batch in self.train_datasets:
try:
self.strategy.run(self._train_step,args=(batch,))
self.steps+=1
self.train_progbar.update(1)
self._print_train_metrics(self.train_progbar)
self._check_log_interval()
if self._check_save_interval():
break
except tf.errors.OutOfRangeError:
continue
| 2.203125 | 2 |
msticpy/config/ce_keyvault.py | kubajir/msticpy | 820 | 12762021 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Key Vault component edit."""
from .._version import VERSION
from .ce_simple_settings import CESimpleSettings
__version__ = VERSION
__author__ = "<NAME>"
class CEKeyVault(CESimpleSettings):
"""Key Vault settings edit component."""
_DESCRIPTION = "Key Vault Setup"
_COMP_PATH = "KeyVault"
_HELP_TEXT = """
Set the parameters for your Key Vault here to store secret
values such as API Keys.<br>
Check <b>UseKeyring</b> if you have Keyring installed and want to be
able to cache the secrets locally. (Note: keyring is not supported
by default on many Linux distributions)<br>
The first five items are mandatory.<br>
The value for <b>Authority</b> should be set to the Azure Cloud that you use.<br>
Options are:
<ul>
<li>global (Commercial Azure cloud)</li>
<li>usgov (US Government cloud)</li>
<li>cn (China national cloud)</li>
<li>de (German national cloud)</li>
</ul>
The default is "global".<br>
"""
_HELP_URI = {
"Key Vault Settings": (
"https://msticpy.readthedocs.io/en/latest/getting_started/"
+ "msticpyconfig.html#specifying-secrets-as-key-vault-secrets"
)
}
| 2.09375 | 2 |
data.py | mpuk/VI-Recommender | 0 | 12762022 | <gh_stars>0
from pandas import read_csv
from timeit import default_timer
from cPickle import dump
from scipy.stats.stats import pearsonr
from math import isnan
from numpy import array, reshape
def prep_data(df_activities, df_deals, df_items, similarity, c_k):
start = default_timer()
users_unique = df_activities['user_id'].unique()
users_unique.sort()
items_unique = [i for i in df_items['id']]
items_unique.sort()
users = {}
items = {}
similarities = {}
stats = []
s1 = default_timer()
for i in items_unique:
for i_b in df_activities[['user_id', 'quantity']][df_activities['dealitem_id'] == i].itertuples():
index, u_id, quantity = i_b
try:
items[i][u_id] += quantity
except KeyError:
items.setdefault(i, {})[u_id] = quantity
dump(items, open("items_itembased1.p", "wb"))
e1 = default_timer()
# print "Items exec time", (e1 - s1) / 60, "min"
s2 = default_timer()
for u in users_unique:
for i in df_activities[['dealitem_id', 'quantity']][df_activities['user_id'] == u].itertuples():
index, i_id, quantity = i
try:
users[u][i_id] += quantity
except KeyError:
users.setdefault(u, {})[i_id] = quantity
dump(users, open("users_itembased1.p", "wb"))
e2 = default_timer()
# print "Users exec time", (e2 - s2) / 60, "min"
s3 = default_timer()
for i in items:
similarities[i] = []
for ii in items:
if i != ii:
common_keys = [k for k in items[i] if k in items[ii]]
if common_keys and len(common_keys) >= c_k:
if similarity.__name__ == 'cosine_similarity':
i1 = array([items[i][k] for k in common_keys])
i2 = array([items[ii][k] for k in common_keys])
t_sim = similarity(i1.reshape(1, -1), i2.reshape(1, -1))
sim = t_sim[0][0]
elif similarity.__name__ == 'pearsonr':
sim, tail = similarity([items[i][k] for k in common_keys], [items[ii][k] for k in common_keys])
elif similarity.__name__ == 'jaccard_similarity_score':
sim = similarity([items[i][k] for k in common_keys], [items[ii][k] for k in common_keys])
if not isnan(sim):
try:
similarities[i].append((ii, sim))
except KeyError:
similarities.setdefault(i, [])
similarities[i].append((ii, sim))
similarities[i].sort(key=lambda tup: tup[1], reverse=True)
dump(similarities, open("similarities_itembased1.p", "wb"))
e3 = default_timer()
# print "Similarities exec time", (e3 - s3) / 60, "min"
s4 = default_timer()
for i in items:
stats.append((i, sum([items[i][q] for q in items[i]])))
stats.sort(key=lambda tup: tup[1], reverse=True)
dump(stats, open("stats_itembased1.p", "wb"))
e4 = default_timer()
# print "Stats exec time", (e4 - s4) / 60, "min"
end = default_timer()
# print "Execution time data.py", (end - start) / 60, "min" | 2.15625 | 2 |
dia_2/tensorflow/basic_classification.py | mariogen/curso_python | 0 | 12762023 | <filename>dia_2/tensorflow/basic_classification.py
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
np.argmax(predictions_single[0])
| 1.882813 | 2 |
setup.py | WALL-E/django-admin-row-actions | 100 | 12762024 | <filename>setup.py
from setuptools import find_packages
from setuptools import setup
setup(
name='django-admin-row-actions',
version='0.0.5',
description='django admin row actions',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/DjangoAdminHackers/django-admin-row-actions',
packages=find_packages(),
install_requires=[
'six',
],
package_data={
'django_admin_row_actions': [
'static/css/*.css',
'static/js/*.js',
'templates/django_admin_row_actions/*.html',
]
},
include_package_data=True,
)
| 1.351563 | 1 |
app/grandchallenge/profiles/urls.py | pushpanjalip/grand-challenge.org | 1 | 12762025 | <reponame>pushpanjalip/grand-challenge.org
from django.conf.urls import include
from django.urls import path, re_path
from grandchallenge.profiles.forms import SignupFormExtra
from grandchallenge.profiles.views import (
PreSocialView,
UserProfileDetail,
login_redirect,
profile,
profile_edit,
profile_edit_redirect,
signin,
signup,
signup_complete,
)
urlpatterns = [
path(
"signup/",
signup,
{"signup_form": SignupFormExtra},
name="profile_signup",
),
path("signup-social/", PreSocialView.as_view(), name="pre-social"),
path("signin/", signin, name="profile_signin"),
path("signup_complete/", signup_complete, name="profile_signup_complete"),
path("login-redirect/", login_redirect, name="login_redirect"),
path("profile/edit/", profile_edit_redirect, name="profile_redirect_edit"),
path("profile/", profile, name="profile_redirect"),
re_path(
r"^(?P<username>[\@\.\+\w-]+)/edit/$",
profile_edit,
name="userena_profile_edit",
),
re_path(
r"^(?P<username>(?!(signout|signup|signin)/)[\@\.\+\w-]+)/$",
UserProfileDetail.as_view(),
name="userena_profile_detail",
),
path("", include("userena.urls")),
]
| 2.09375 | 2 |
stellar/app.py | kevinjqiu/stellar | 1 | 12762026 | import logging
import os
import sys
import click
from functools import partial
from .config import load_config
from .models import Snapshot, Table, Base
from .operations import (
copy_database,
create_database,
database_exists,
remove_database,
rename_database,
terminate_database_connections,
list_of_databases,
)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.exc import ProgrammingError
from psutil import pid_exists
__version__ = '0.4.1'
logger = logging.getLogger(__name__)
class Operations(object):
def __init__(self, raw_connection, config):
self.terminate_database_connections = partial(
terminate_database_connections, raw_connection
)
self.create_database = partial(create_database, raw_connection)
self.copy_database = partial(copy_database, raw_connection)
self.database_exists = partial(database_exists, raw_connection)
self.rename_database = partial(rename_database, raw_connection)
self.remove_database = partial(remove_database, raw_connection)
self.list_of_databases = partial(list_of_databases, raw_connection)
class Stellar(object):
def __init__(self):
logger.debug('Initialized Stellar()')
self.load_config()
self.init_database()
def load_config(self):
self.config = load_config()
logging.basicConfig(level=self.config['logging'])
def init_database(self):
self.raw_db = create_engine(self.config['url'], echo=False)
self.raw_conn = self.raw_db.connect()
self.operations = Operations(self.raw_conn, self.config)
try:
self.raw_conn.connection.set_isolation_level(0)
except AttributeError:
logger.info('Could not set isolation level to 0')
self.db = create_engine(self.config['stellar_url'], echo=False)
self.db.session = sessionmaker(bind=self.db)()
self.raw_db.session = sessionmaker(bind=self.raw_db)()
tables_missing = self.create_stellar_database()
self.create_stellar_tables()
# logger.getLogger('sqlalchemy.engine').setLevel(logger.WARN)
def create_stellar_database(self):
if not self.operations.database_exists('stellar_data'):
self.operations.create_database('stellar_data')
return True
else:
return False
def create_stellar_tables(self):
Base.metadata.create_all(self.db)
self.db.session.commit()
def get_snapshot(self, snapshot_name):
return self.db.session.query(Snapshot).filter(
Snapshot.snapshot_name == snapshot_name,
Snapshot.project_name == self.config['project_name']
).first()
def get_snapshots(self):
return self.db.session.query(Snapshot).filter(
Snapshot.project_name == self.config['project_name']
).order_by(
Snapshot.created_at.desc()
).all()
def get_latest_snapshot(self):
return self.db.session.query(Snapshot).filter(
Snapshot.project_name == self.config['project_name']
).order_by(Snapshot.created_at.desc()).first()
def create_snapshot(self, snapshot_name, before_copy=None):
snapshot = Snapshot(
snapshot_name=snapshot_name,
project_name=self.config['project_name']
)
self.db.session.add(snapshot)
self.db.session.flush()
for table_name in self.config['tracked_databases']:
if before_copy:
before_copy(table_name)
table = Table(
table_name=table_name,
snapshot=snapshot
)
logger.debug('Copying %s to %s' % (
table_name,
table.get_table_name('master')
))
self.operations.copy_database(
table_name,
table.get_table_name('master')
)
self.db.session.add(table)
self.db.session.commit()
self.start_background_slave_copy(snapshot)
def remove_snapshot(self, snapshot):
for table in snapshot.tables:
try:
self.operations.remove_database(
table.get_table_name('master')
)
except ProgrammingError:
pass
try:
self.operations.remove_database(
table.get_table_name('slave')
)
except ProgrammingError:
pass
self.db.session.delete(table)
self.db.session.delete(snapshot)
self.db.session.commit()
def rename_snapshot(self, snapshot, new_name):
snapshot.snapshot_name = new_name
self.db.session.commit()
def restore(self, snapshot):
for table in snapshot.tables:
click.echo("Restoring database %s" % table.table_name)
if not self.operations.database_exists(
table.get_table_name('slave')
):
click.echo(
"Database %s does not exist."
% table.get_table_name('slave')
)
sys.exit(1)
try:
self.operations.remove_database(table.table_name)
except ProgrammingError:
logger.warn('Database %s does not exist.' % table.table_name)
self.operations.rename_database(
table.get_table_name('slave'),
table.table_name
)
snapshot.worker_pid = 1
self.db.session.commit()
self.start_background_slave_copy(snapshot)
def start_background_slave_copy(self, snapshot):
logger.debug('Starting background slave copy')
snapshot_id = snapshot.id
self.raw_conn.close()
self.raw_db.session.close()
self.db.session.close()
pid = os.fork() if hasattr(os, 'fork') else None
if pid:
return
self.init_database()
self.operations = Operations(self.raw_conn, self.config)
snapshot = self.db.session.query(Snapshot).get(snapshot_id)
snapshot.worker_pid = os.getpid()
self.db.session.commit()
self.inline_slave_copy(snapshot)
sys.exit()
def inline_slave_copy(self, snapshot):
for table in snapshot.tables:
self.operations.copy_database(
table.get_table_name('master'),
table.get_table_name('slave')
)
snapshot.worker_pid = None
self.db.session.commit()
def is_copy_process_running(self, snapshot):
return pid_exists(snapshot.worker_pid)
def is_old_database(self):
for snapshot in self.db.session.query(Snapshot):
for table in snapshot.tables:
for postfix in ('master', 'slave'):
old_name = table.get_table_name(postfix=postfix, old=True)
if self.operations.database_exists(old_name):
return True
return False
def update_database_names_to_new_version(self, after_rename=None):
for snapshot in self.db.session.query(Snapshot):
for table in snapshot.tables:
for postfix in ('master', 'slave'):
old_name = table.get_table_name(postfix=postfix, old=True)
new_name = table.get_table_name(postfix=postfix, old=False)
if self.operations.database_exists(old_name):
self.operations.rename_database(old_name, new_name)
if after_rename:
after_rename(old_name, new_name)
def delete_orphan_snapshots(self, after_delete=None):
stellar_databases = set()
for snapshot in self.db.session.query(Snapshot):
for table in snapshot.tables:
stellar_databases.add(table.get_table_name('master'))
stellar_databases.add(table.get_table_name('slave'))
databases = set(self.operations.list_of_databases())
for database in filter(
lambda database: (
database.startswith('stellar_') and
database != 'stellar_data'
),
(databases-stellar_databases)
):
self.operations.remove_database(database)
if after_delete:
after_delete(database)
@property
def default_snapshot_name(self):
n = 1
while self.db.session.query(Snapshot).filter(
Snapshot.snapshot_name == 'snap%d' % n,
Snapshot.project_name == self.config['project_name']
).count():
n += 1
return 'snap%d' % n
| 2.1875 | 2 |
com/wy/example/E_Clawer01.py | mygodness100/Python | 0 | 12762027 | # 利用gevent进行爬虫,python3.8没有匹配的gevent,需要等待
from urllib import request # 使用gevent爬虫,自动,gevent需要安装
import gevent, time
from gevent import monkey
from http.cookiejar import CookieJar
from bs4 import BeautifulSoup
monkey.patch_all() # 把当前程序的所有的io操作给我单独的做上标记,必须要加,因为gevent不能urllib中的io操作
def f(url):
resp = request.urlopen(url) # 打开一个网页
data = resp.read() # 读取网页中所有的内容
print('%d bytes received from %s.' % (len(data), url))
urls = ['https://www.python.org/', 'https://www.yahoo.com/', 'https://github.com/' ]
time_start = time.time()
for url in urls: # 同步进行爬虫操作
f(url)
print("同步cost", time.time() - time_start)
async_time_start = time.time()
gevent.joinall([gevent.spawn(f, urls[0]), gevent.spawn(f, urls[1]), gevent.spawn(f, urls[2]), ]) # 异步进行爬虫操作
print("异步cost", time.time() - async_time_start)
request.urlretrieve("http地址", "存储到本地的完整地址") # 将文件直接下载到本地
# 爬虫的一种方法,不直接用request,而是伪造一个Request请求,加上一些请求头等
fake1 = request.Request("url")
fake1.add_header("user_agent", "Mozilla/5.0") # 添加浏览器类型
resp1 = request.urlopen(fake1)
print(resp1.read())
# 爬虫的另外一种方法,添加特殊处理方式
# 1.需要添加cookie的:HTTPCookieProcessor
# 2.需要代理才能访问的:ProxyHandler
# 3.需要https加密访问的:HTTPSHandler
# 4.有自动跳转的:HTTPRedirectHandler
# specialHandler = request.build_opener(HTTPSHandler())
# request.install_opener(specialHandler)
# request.urlopen("url")
cookie = CookieJar()
fake2 = request.build_opener(request.HTTPCookieProcessor(cookie))
request.install_opener(fake2)
resp2 = request.urlopen("url")
print(resp2.read())
# BeautifulSoup:解析html页面,需要安装beautifulsoup4,还需要安装lxml
soup= BeautifulSoup("html字符串","html.parser:指定解析器,html就是html.parser","from_encoding=utf8")
| 2.84375 | 3 |
problem062_facebook_matrix.py | loghmanb/daily-coding-problem | 0 | 12762028 | '''
This problem was asked by Facebook.
There is an N by M matrix of zeroes. Given N and M, write a function to count the number of ways of starting at the top-left corner and getting to the bottom-right corner. You can only move right or down.
For example, given a 2 by 2 matrix, you should return 2, since there are two ways to get to the bottom-right:
Right, then down
Down, then right
Given a 5 by 5 matrix, there are 70 ways to get to the bottom-right.
'''
def no_of_ways(N):
ways = [[0]*N for _ in range(N)]
for i in range(N):
for j in range(N):
if i==0 or j==0:
ways[i][j] = 1
else:
ways[i][j] = ways[i-1][j] + ways[i][j-1]
return ways[-1][-1]
if __name__=='__main__':
data = [
[2, 2],
[3, 6],
[5, 70],
]
for d in data:
print('input', d[0], 'output', no_of_ways(d[0])) | 3.890625 | 4 |
fonts/romfonts/vga1_8x8.py | slabua/st7789py_mpy | 153 | 12762029 | """converted from vga_8x8.bin """
WIDTH = 8
HEIGHT = 8
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x18\x3c\x3c\x18\x18\x00\x18\x00'\
b'\x66\x66\x24\x00\x00\x00\x00\x00'\
b'\x6c\x6c\xfe\x6c\xfe\x6c\x6c\x00'\
b'\x18\x3e\x60\x3c\x06\x7c\x18\x00'\
b'\x00\xc6\xcc\x18\x30\x66\xc6\x00'\
b'\x38\x6c\x38\x76\xdc\xcc\x76\x00'\
b'\x18\x18\x30\x00\x00\x00\x00\x00'\
b'\x0c\x18\x30\x30\x30\x18\x0c\x00'\
b'\x30\x18\x0c\x0c\x0c\x18\x30\x00'\
b'\x00\x66\x3c\xff\x3c\x66\x00\x00'\
b'\x00\x18\x18\x7e\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x30'\
b'\x00\x00\x00\x7e\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x00'\
b'\x06\x0c\x18\x30\x60\xc0\x80\x00'\
b'\x38\x6c\xc6\xd6\xc6\x6c\x38\x00'\
b'\x18\x38\x18\x18\x18\x18\x7e\x00'\
b'\x7c\xc6\x06\x1c\x30\x66\xfe\x00'\
b'\x7c\xc6\x06\x3c\x06\xc6\x7c\x00'\
b'\x1c\x3c\x6c\xcc\xfe\x0c\x1e\x00'\
b'\xfe\xc0\xc0\xfc\x06\xc6\x7c\x00'\
b'\x38\x60\xc0\xfc\xc6\xc6\x7c\x00'\
b'\xfe\xc6\x0c\x18\x30\x30\x30\x00'\
b'\x7c\xc6\xc6\x7c\xc6\xc6\x7c\x00'\
b'\x7c\xc6\xc6\x7e\x06\x0c\x78\x00'\
b'\x00\x18\x18\x00\x00\x18\x18\x00'\
b'\x00\x18\x18\x00\x00\x18\x18\x30'\
b'\x06\x0c\x18\x30\x18\x0c\x06\x00'\
b'\x00\x00\x7e\x00\x00\x7e\x00\x00'\
b'\x60\x30\x18\x0c\x18\x30\x60\x00'\
b'\x7c\xc6\x0c\x18\x18\x00\x18\x00'\
b'\x7c\xc6\xde\xde\xde\xc0\x78\x00'\
b'\x38\x6c\xc6\xfe\xc6\xc6\xc6\x00'\
b'\xfc\x66\x66\x7c\x66\x66\xfc\x00'\
b'\x3c\x66\xc0\xc0\xc0\x66\x3c\x00'\
b'\xf8\x6c\x66\x66\x66\x6c\xf8\x00'\
b'\xfe\x62\x68\x78\x68\x62\xfe\x00'\
b'\xfe\x62\x68\x78\x68\x60\xf0\x00'\
b'\x3c\x66\xc0\xc0\xce\x66\x3a\x00'\
b'\xc6\xc6\xc6\xfe\xc6\xc6\xc6\x00'\
b'\x3c\x18\x18\x18\x18\x18\x3c\x00'\
b'\x1e\x0c\x0c\x0c\xcc\xcc\x78\x00'\
b'\xe6\x66\x6c\x78\x6c\x66\xe6\x00'\
b'\xf0\x60\x60\x60\x62\x66\xfe\x00'\
b'\xc6\xee\xfe\xfe\xd6\xc6\xc6\x00'\
b'\xc6\xe6\xf6\xde\xce\xc6\xc6\x00'\
b'\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00'\
b'\xfc\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x7c\xc6\xc6\xc6\xc6\xce\x7c\x0e'\
b'\xfc\x66\x66\x7c\x6c\x66\xe6\x00'\
b'\x3c\x66\x30\x18\x0c\x66\x3c\x00'\
b'\x7e\x7e\x5a\x18\x18\x18\x3c\x00'\
b'\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00'\
b'\xc6\xc6\xc6\xc6\xc6\x6c\x38\x00'\
b'\xc6\xc6\xc6\xd6\xd6\xfe\x6c\x00'\
b'\xc6\xc6\x6c\x38\x6c\xc6\xc6\x00'\
b'\x66\x66\x66\x3c\x18\x18\x3c\x00'\
b'\xfe\xc6\x8c\x18\x32\x66\xfe\x00'\
b'\x3c\x30\x30\x30\x30\x30\x3c\x00'\
b'\xc0\x60\x30\x18\x0c\x06\x02\x00'\
b'\x3c\x0c\x0c\x0c\x0c\x0c\x3c\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\xff'\
b'\x30\x18\x0c\x00\x00\x00\x00\x00'\
b'\x00\x00\x78\x0c\x7c\xcc\x76\x00'\
b'\xe0\x60\x7c\x66\x66\x66\xdc\x00'\
b'\x00\x00\x7c\xc6\xc0\xc6\x7c\x00'\
b'\x1c\x0c\x7c\xcc\xcc\xcc\x76\x00'\
b'\x00\x00\x7c\xc6\xfe\xc0\x7c\x00'\
b'\x3c\x66\x60\xf8\x60\x60\xf0\x00'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\xf8'\
b'\xe0\x60\x6c\x76\x66\x66\xe6\x00'\
b'\x18\x00\x38\x18\x18\x18\x3c\x00'\
b'\x06\x00\x06\x06\x06\x66\x66\x3c'\
b'\xe0\x60\x66\x6c\x78\x6c\xe6\x00'\
b'\x38\x18\x18\x18\x18\x18\x3c\x00'\
b'\x00\x00\xec\xfe\xd6\xd6\xd6\x00'\
b'\x00\x00\xdc\x66\x66\x66\x66\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\x00'\
b'\x00\x00\xdc\x66\x66\x7c\x60\xf0'\
b'\x00\x00\x76\xcc\xcc\x7c\x0c\x1e'\
b'\x00\x00\xdc\x76\x60\x60\xf0\x00'\
b'\x00\x00\x7e\xc0\x7c\x06\xfc\x00'\
b'\x30\x30\xfc\x30\x30\x36\x1c\x00'\
b'\x00\x00\xcc\xcc\xcc\xcc\x76\x00'\
b'\x00\x00\xc6\xc6\xc6\x6c\x38\x00'\
b'\x00\x00\xc6\xd6\xd6\xfe\x6c\x00'\
b'\x00\x00\xc6\x6c\x38\x6c\xc6\x00'\
b'\x00\x00\xc6\xc6\xc6\x7e\x06\xfc'\
b'\x00\x00\x7e\x4c\x18\x32\x7e\x00'\
b'\x0e\x18\x18\x70\x18\x18\x0e\x00'\
b'\x18\x18\x18\x18\x18\x18\x18\x00'\
b'\x70\x18\x18\x0e\x18\x18\x70\x00'\
b'\x76\xdc\x00\x00\x00\x00\x00\x00'\
b'\x00\x10\x38\x6c\xc6\xc6\xfe\x00'\
FONT = memoryview(_FONT)
| 1.9375 | 2 |
fil_finder/tests/test_widths.py | e-koch/fil_finder | 28 | 12762030 |
import pytest
from ..width import nonparam_width, gauss_model, radial_profile
from .testing_utils import generate_filament_model
import numpy as np
import numpy.testing as npt
from scipy import ndimage as nd
def generate_gaussian_profile(pts, width=3.0, amplitude=2.0, background=0.5):
return amplitude * np.exp(- pts ** 2 / (2 * width ** 2)) + background
def test_nonparam():
pts = np.linspace(0, 10, 100)
profile = generate_gaussian_profile(pts)
params, errors, fail = \
nonparam_width(pts, profile, pts, profile, 1.0, 5, 99)
# This shouldn't be failing
assert fail is False
# Check the amplitude
npt.assert_allclose(params[0], 2.5, atol=0.01)
# Width
npt.assert_allclose(params[1], 3.0, atol=0.01)
# Background
npt.assert_allclose(params[2], 0.5, atol=0.02)
def test_gaussian():
pts = np.linspace(0, 10, 100)
profile = generate_gaussian_profile(pts)
params, errors, _, _, fail = \
gauss_model(pts, profile, np.ones_like(pts), 1.0)
# Check the amplitude
npt.assert_allclose(params[0], 2.5, atol=0.01)
# Width
npt.assert_allclose(params[1], 3.0, atol=0.01)
# Background
npt.assert_allclose(params[2], 0.5, atol=0.02)
@pytest.mark.parametrize(('theta'), [(0.0)])
def test_radial_profile_output(theta):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False, max_distance=20)
params, errors, _, _, fail = \
gauss_model(dist, radprof, np.ones_like(dist), 1.0)
npt.assert_allclose(params[:-1], [1.0, 10.0, 0.0], atol=1e-1)
@pytest.mark.parametrize(('cutoff'), [(10.0), (20.0), (30.0)])
def test_radial_profile_cutoff(cutoff):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False, max_distance=cutoff)
assert unbin_dist.max() == cutoff
assert dist.max() < cutoff
@pytest.mark.parametrize(('padding'), [(5.0), (10.0), (20.0)])
def test_radial_profile_padding(padding, max_distance=20.0):
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False,
max_distance=max_distance, pad_to_distance=padding)
if padding <= max_distance:
assert unbin_dist.max() == max_distance
assert dist.max() < max_distance
else:
assert unbin_dist.max() == padding
assert dist.max() < padding
@pytest.mark.xfail(raises=ValueError)
def test_radial_profile_fail_pad(padding=30.0, max_distance=20.0):
'''
Cannot pad greater than max_distance
'''
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=False,
max_distance=max_distance, pad_to_distance=padding)
def test_radial_profile_autocut():
'''
Test auto-cutting with a secondary offset peak.
'''
model, skeleton = generate_filament_model(width=10.0,
amplitude=1.0, background=0.0)
model += np.roll(model, -30, axis=0).copy()
model += np.roll(model, +30, axis=0).copy()
# all_skeleton += np.roll(skeleton, -30, axis=0)
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=True,
max_distance=50.0, auto_cut_kwargs={'smooth_size': 3.0,
'pad_cut': 0})
npt.assert_equal(dist.max(), 19.25)
def test_radial_profile_autocut_plateau():
'''
Test auto-cutting with a plateau and a second fall.
'''
model, skeleton = generate_filament_model(shape=160, width=10.0,
amplitude=10.0, background=5.0)
# Create a second drop-off profile 40 pixels from the center on each side.
for i, row in enumerate(model[120:].T):
model[120:, i] = generate_gaussian_profile(np.arange(row.size),
width=5.0,
amplitude=5.0,
background=0.0)
for i, row in enumerate(model[:40].T):
model[:40, i] = generate_gaussian_profile(np.arange(row.size),
width=5.0,
amplitude=5.0,
background=0.0)[::-1]
dist_transform = nd.distance_transform_edt((~skeleton).astype(np.int))
dist, radprof, weights, unbin_dist, unbin_radprof = \
radial_profile(model, dist_transform, dist_transform,
((0, 0), (model.shape[0] // 2, model.shape[1] // 2)),
img_scale=1.0, auto_cut=True,
max_distance=60.0, auto_cut_kwargs={'smooth_size': 3.0,
'pad_cut': 0,
'interp_factor': 1})
# By-eye, this should be 18-19
npt.assert_almost_equal(dist.max(), 38.201, decimal=3)
| 2.03125 | 2 |
examples/data/norm_feature.py | leilin-research/Time-series-prediction | 552 | 12762031 | <gh_stars>100-1000
import os
import joblib
import pandas as pd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
class FeatureNorm(object):
def __init__(self, type='minmax'):
self.type = type
def __call__(self, x, mode='train', model_dir='../weights', name='scaler'):
assert len(x.shape) == 2, "Input rank for FeatureNorm should be 2"
if self.type == 'standard':
scaler = StandardScaler()
elif self.type == 'minmax':
scaler = MinMaxScaler()
else:
raise ValueError("Unsupported norm type yet: {}".format(self.type))
if mode == 'train':
scaler.fit(x)
joblib.dump(scaler, os.path.join(model_dir, name+'.pkl'))
else:
scaler = joblib.load(os.path.join(model_dir, name+'.pkl'))
output = scaler.transform(x)
try:
return pd.DataFrame(output, index=x.index, columns=x.columns)
except:
return output
| 2.703125 | 3 |
omtk/ui/widget_list_influences.py | CDufour909/omtk_unreal | 0 | 12762032 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '/home/rlessard/packages/omtk/0.4.999/python/omtk/ui/widget_list_influences.ui'
#
# Created: Tue Feb 20 10:34:53 2018
# by: pyside2-uic running on Qt 2.0.0~alpha0
#
# WARNING! All changes made in this file will be lost!
from omtk.vendor.Qt import QtCore, QtGui, QtWidgets, QtCompat
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(316, 295)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lineEdit_search = QtWidgets.QLineEdit(Form)
self.lineEdit_search.setObjectName("lineEdit_search")
self.horizontalLayout.addWidget(self.lineEdit_search)
self.btn_update = QtWidgets.QPushButton(Form)
self.btn_update.setObjectName("btn_update")
self.horizontalLayout.addWidget(self.btn_update)
self.verticalLayout.addLayout(self.horizontalLayout)
self.checkBox_hideAssigned = QtWidgets.QCheckBox(Form)
self.checkBox_hideAssigned.setChecked(True)
self.checkBox_hideAssigned.setObjectName("checkBox_hideAssigned")
self.verticalLayout.addWidget(self.checkBox_hideAssigned)
self.treeWidget = QtWidgets.QTreeWidget(Form)
self.treeWidget.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.treeWidget.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.treeWidget.setObjectName("treeWidget")
self.treeWidget.headerItem().setText(0, "1")
self.treeWidget.header().setVisible(False)
self.verticalLayout.addWidget(self.treeWidget)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(QtCompat.translate("Form", "Form", None, -1))
self.btn_update.setText(QtCompat.translate("Form", "Update", None, -1))
self.checkBox_hideAssigned.setText(QtCompat.translate("Form", "Hide Assigned", None, -1))
| 1.359375 | 1 |
packages/w3af/w3af/core/controllers/configurable.py | ZooAtmosphereGroup/HelloPackages | 3 | 12762033 | """
configurable.py
Copyright 2006 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
class Configurable(object):
"""
This is mostly "an interface", this "interface" states that all
classes that implement it, should implement the following methods:
1. set_options( options_list )
2. get_options()
:author: <NAME> (<EMAIL>)
"""
def set_options(self, options_list):
"""
Sets the Options given on the options_list to self. The options
are the result of a user entering some data on a window that
was constructed using the XML Options that was retrieved from
the plugin using get_options()
This method MUST be implemented on every configurable object.
:return: No value is returned.
"""
raise NotImplementedError('Configurable object is not implementing '
'required method set_options')
def get_options(self):
"""
This method returns an OptionList containing the options
objects that the configurable object has. Using this option
list the framework will build a window, a menu, or some
other input method to retrieve the info from the user.
This method MUST be implemented on every plugin.
:return: OptionList.
"""
raise NotImplementedError('Configurable object is not implementing '
'required method get_options')
def get_name(self):
return type(self).__name__
def get_type(self):
return 'configurable'
| 2.328125 | 2 |
config.py | kolyat/testutils | 0 | 12762034 | <filename>config.py
import logging
from utils import service
#
# Defaults
#
DEFAULT_TARGET_CONFIG = {
'default': {
'protocol': 'http',
'server': 'test.server',
'api_version': 'v5',
'users': {
'default': {
'username': 'bot',
'userpass': 'password',
'mail_server': 'outlook.office365.com'
}
},
'platforms': {
'default': {
'name': 'bot_platform'
}
}
}
}
DEFAULT_LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s | [%(levelname)8s] | '
'%(module)s.%(funcName)s(%(lineno)d) - %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'standard',
'level': 'INFO',
'stream': 'ext://sys.stdout'
},
'http_file': {
'class': 'logging.FileHandler',
'filename': './logs/http.log',
'formatter': 'standard',
'level': 'DEBUG',
'mode': 'a'
}
},
'loggers': {
'urllib3': {
'handlers': ['http_file'],
'level': 'DEBUG',
'propagate': False
},
'http.client': {
'handlers': ['http_file'],
'level': 'DEBUG',
'propagate': False
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
#
# Selenium
#
IMPLICIT_WAIT = 0
TIMEOUT = 5
POLL_FREQUENCY = 1
#
# Init
#
current_config = service.Config()
current_config.update_config('config.json')
logging.config.dictConfig(current_config.logging)
service.httpclient_logging_patch()
| 1.976563 | 2 |
PycharmProjects/pythonProject/venv/ex042.py | New-Caiocolas/Exercicios-Python | 0 | 12762035 | <reponame>New-Caiocolas/Exercicios-Python<filename>PycharmProjects/pythonProject/venv/ex042.py
print('=--=' * 20)
print('ANALIZADOR DE TRIÂNGULOS')
print('=--=' * 20)
reta1 = int(input('Digite a primeira reta:'))
reta2 = int(input('Digite a segunda reta:'))
reta3 = int(input('Digite a terceira reta:'))
if reta1 < reta2 + reta3 and reta2 < reta1 + reta3 and reta3 < reta1 + reta2:
print('Os segmentos acima podem formar triângulo ',end='')
if reta1 == reta2 == reta3:
print('EQUILÁTERO')
elif reta1 != reta2 != reta3 != reta1:
print('ESCALENO')
else:
print('ISÓSCELES')
else:
print('Os segmentos acima não podem formar um triângulo.') | 3.953125 | 4 |
src/pretalx/common/mixins/views.py | MaxRink/pretalx | 0 | 12762036 | import urllib
from contextlib import suppress
from importlib import import_module
from urllib.parse import quote
from django.conf import settings
from django.core.exceptions import FieldDoesNotExist
from django.db.models import CharField, Q
from django.db.models.functions import Lower
from django.http import Http404
from django.shortcuts import redirect
from django.utils.functional import cached_property
from i18nfield.forms import I18nModelForm
from rules.contrib.views import PermissionRequiredMixin
from pretalx.common.forms import SearchForm
SessionStore = import_module(settings.SESSION_ENGINE).SessionStore
class ActionFromUrl:
write_permission_required = None
@cached_property
def object(self):
return self.get_object()
@cached_property
def permission_object(self):
return self.object
@cached_property
def _action(self):
if not any(_id in self.kwargs for _id in ['pk', 'code']):
return 'create'
if self.request.user.has_perm(
self.write_permission_required, self.permission_object
):
return 'edit'
return 'view'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['action'] = self._action
return context
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['read_only'] = self._action == 'view'
if hasattr(self.request, 'event') and issubclass(
self.form_class, I18nModelForm
):
kwargs['locales'] = self.request.event.locales
return kwargs
class Sortable:
"""
In the main class, you'll have to call sort_queryset() in get_queryset.
In the template, do this:
{% load url_replace %}
<th>
{% trans "Title" %}
<a href="?{% url_replace request 'sort' '-title' %}"><i class="fa fa-caret-down"></i></a>
<a href="?{% url_replace request 'sort' 'title' %}"><i class="fa fa-caret-up"></i></a>
</th>
"""
sortable_fields = []
def sort_queryset(self, qs):
sort_key = self.request.GET.get('sort') or getattr(
self, 'default_sort_field', ''
)
if sort_key:
plain_key = sort_key[1:] if sort_key.startswith('-') else sort_key
reverse = not (plain_key == sort_key)
if plain_key in self.sortable_fields:
is_text = False
if '__' not in plain_key:
with suppress(FieldDoesNotExist):
is_text = isinstance(
qs.model._meta.get_field(plain_key), CharField
)
else:
split_key = plain_key.split('__')
if len(split_key) == 2:
is_text = isinstance(
qs.model._meta.get_field(
split_key[0]
).related_model._meta.get_field(split_key[1]),
CharField,
)
if is_text:
# TODO: this only sorts direct lookups case insensitively
# A sorting field like 'speaker__name' will not be found
qs = qs.annotate(key=Lower(plain_key)).order_by(
'-key' if reverse else 'key'
)
else:
qs = qs.order_by(sort_key)
return qs
class Filterable:
filter_fields = []
default_filters = []
def filter_queryset(self, qs):
if self.filter_fields:
qs = self._handle_filter(qs)
if 'q' in self.request.GET:
qs = self._handle_search(qs)
return qs
def _handle_filter(self, qs):
for key in self.request.GET: # Do NOT use items() to preserve multivalue fields
value = self.request.GET.getlist(key)
if len(value) == 1:
value = value[0]
elif len(value) > 1:
key = f'{key}__in' if not key.endswith('__in') else key
if value:
lookup_key = key.split('__')[0]
print(value)
if lookup_key in self.filter_fields:
qs = qs.filter(**{key: value})
return qs
def _handle_search(self, qs):
query = urllib.parse.unquote(self.request.GET['q'])
_filters = [Q(**{field: query}) for field in self.default_filters]
if len(_filters) > 1:
_filter = _filters[0]
for additional_filter in _filters[1:]:
_filter = _filter | additional_filter
qs = qs.filter(_filter)
elif _filters:
qs = qs.filter(_filters[0])
return qs
def get_context_data(self, **kwargs):
from django import forms
context = super().get_context_data(**kwargs)
context['search_form'] = SearchForm(
self.request.GET if 'q' in self.request.GET else {}
)
if hasattr(self, 'filter_form_class'):
context['filter_form'] = self.filter_form_class(
self.request.event, self.request.GET
)
elif hasattr(self, 'get_filter_form'):
context['filter_form'] = self.get_filter_form()
elif self.filter_fields:
context['filter_form'] = forms.modelform_factory(
self.model, fields=self.filter_fields
)(self.request.GET)
for field in context['filter_form'].fields.values():
field.required = False
if hasattr(field, 'queryset'):
field.queryset = field.queryset.filter(event=self.request.event)
return context
class PermissionRequired(PermissionRequiredMixin):
def has_permission(self):
result = super().has_permission()
if not result:
request = getattr(self, 'request', None)
if request and hasattr(request, 'event'):
key = f'pretalx_event_access_{request.event.pk}'
if key in request.session:
sparent = SessionStore(request.session.get(key))
parentdata = []
with suppress(Exception):
parentdata = sparent.load()
return 'event_access' in parentdata
return result
def get_login_url(self):
"""We do this to avoid leaking data about existing pages."""
raise Http404()
def handle_no_permission(self):
request = getattr(self, 'request', None)
if (
request
and hasattr(request, 'event')
and request.user.is_anonymous
and 'cfp' in request.resolver_match.namespaces
):
params = '&' + request.GET.urlencode() if request.GET else ''
return redirect(
request.event.urls.login + f'?next={quote(request.path)}' + params
)
raise Http404()
class EventPermissionRequired(PermissionRequired):
def get_permission_object(self):
return self.request.event
| 1.75 | 2 |
posthog/models/property_definition.py | dorucioclea/posthog | 0 | 12762037 | <reponame>dorucioclea/posthog<gh_stars>0
from django.contrib.postgres.indexes import GinIndex
from django.db import models
from posthog.models.team import Team
from posthog.models.utils import UUIDModel
class PropertyType(models.TextChoices):
Datetime = "DateTime", "DateTime"
String = "String", "String"
Numeric = "Numeric", "Numeric"
Boolean = "Boolean", "Boolean"
class PropertyFormat(models.TextChoices):
UnixTimestamp = "unix_timestamp", "Unix Timestamp in seconds"
UnixTimestampMilliseconds = "unix_timestamp_milliseconds", "Unix Timestamp in milliseconds"
ISO8601Date = "YYYY-MM-DDThh:mm:ssZ", "YYYY-MM-DDThh:mm:ssZ"
FullDate = "YYYY-MM-DD hh:mm:ss", "YYYY-MM-DD hh:mm:ss"
FullDateIncreasing = "DD-MM-YYYY hh:mm:ss", "DD-MM-YYYY hh:mm:ss"
Date = "YYYY-MM-DD", "YYYY-MM-DD"
RFC822 = "rfc_822", "day, DD MMM YYYY hh:mm:ss TZ"
WithSlashes = "YYYY/MM/DD hh:mm:ss", "YYYY/MM/DD hh:mm:ss"
WithSlashesIncreasing = "DD/MM/YYYY hh:mm:ss", "DD/MM/YYYY hh:mm:ss"
class PropertyDefinition(UUIDModel):
team: models.ForeignKey = models.ForeignKey(
Team, on_delete=models.CASCADE, related_name="property_definitions", related_query_name="team",
)
name: models.CharField = models.CharField(max_length=400)
is_numerical: models.BooleanField = models.BooleanField(
default=False,
) # whether the property can be interpreted as a number, and therefore used for math aggregation operations
query_usage_30_day: models.IntegerField = models.IntegerField(
default=None, null=True,
) # Number of times the event has been used in a query in the last 30 rolling days (computed asynchronously)
property_type = models.CharField(max_length=50, choices=PropertyType.choices, blank=True, null=True)
# DEPRECATED
property_type_format = models.CharField(
max_length=50, choices=PropertyFormat.choices, blank=True, null=True
) # Deprecated in #8292
# DEPRECATED
volume_30_day: models.IntegerField = models.IntegerField(
default=None, null=True,
) # Deprecated in #4480
class Meta:
unique_together = ("team", "name")
indexes = [
GinIndex(name="index_property_definition_name", fields=["name"], opclasses=["gin_trgm_ops"]),
] # To speed up DB-based fuzzy searching
constraints = [
models.CheckConstraint(name="property_type_is_valid", check=models.Q(property_type__in=PropertyType.values))
]
def __str__(self) -> str:
return f"{self.name} / {self.team.name}"
# This is a dynamically calculated field in api/property_definition.py. Defaults to `True` here to help serializers.
def is_event_property(self) -> None:
return None
| 2.1875 | 2 |
WeatherPy/api_keys.py | Dillongrow/python-api-challenge | 0 | 12762038 | weather_api_key="my key"
| 1.179688 | 1 |
fsleyes/controls/overlaydisplaypanel.py | pauldmccarthy/fsleyes | 12 | 12762039 | #!/usr/bin/env python
#
# overlaydisplaypanel.py - The OverlayDisplayPanel.
#
# Author: <NAME> <<EMAIL>>
"""This module provides the :class:`OverlayDisplayPanel` class, a *FSLeyes
control* panel which allows the user to change overlay display settings.
"""
import logging
import functools
import collections
import collections.abc as abc
import wx
import fsleyes_props as props
import fsleyes.views.canvaspanel as canvaspanel
import fsleyes.controls.controlpanel as ctrlpanel
import fsleyes.strings as strings
import fsleyes.tooltips as fsltooltips
from . import overlaydisplaywidgets as odwidgets
log = logging.getLogger(__name__)
class OverlayDisplayPanel(ctrlpanel.SettingsPanel):
"""The ``OverlayDisplayPanel`` is a :class:`.SettingsPanel` which allows
the user to change the display settings of the currently selected
overlay (which is defined by the :attr:`.DisplayContext.selectedOverlay`
property). The display settings for an overlay are contained in the
:class:`.Display` and :class:`.DisplayOpts` instances associated with
that overlay. An ``OverlayDisplayPanel`` looks something like the
following:
.. image:: images/overlaydisplaypanel.png
:scale: 50%
:align: center
An ``OverlayDisplayPanel`` uses a :class:`.WidgetList` to organise the
settings into two main sections:
- Settings which are common across all overlays - these are defined
in the :class:`.Display` class.
- Settings which are specific to the current
:attr:`.Display.overlayType` - these are defined in the
:class:`.DisplayOpts` sub-classes.
The settings that are displayed on an ``OverlayDisplayPanel`` are
defined in the :attr:`_DISPLAY_PROPS` and :attr:`_DISPLAY_WIDGETS`
dictionaries.
"""
@staticmethod
def supportedViews():
"""Overrides :meth:`.ControlMixin.supportedViews`. The
``OverlayDisplayPanel`` is only intended to be added to
:class:`.OrthoPanel`, :class:`.LightBoxPanel`, or
:class:`.Scene3DPanel` views.
"""
return [canvaspanel.CanvasPanel]
@staticmethod
def defaultLayout():
"""Returns a dictionary containing layout settings to be passed to
:class:`.ViewPanel.togglePanel`.
"""
return {'location' : wx.LEFT}
def __init__(self, parent, overlayList, displayCtx, canvasPanel):
"""Create an ``OverlayDisplayPanel``.
:arg parent: The :mod:`wx` parent object.
:arg overlayList: The :class:`.OverlayList` instance.
:arg displayCtx: The :class:`.DisplayContext` instance.
:arg canvasPanel: The :class:`.CanvasPanel` instance.
"""
from fsleyes.views.scene3dpanel import Scene3DPanel
ctrlpanel.SettingsPanel.__init__(self,
parent,
overlayList,
displayCtx,
canvasPanel,
kbFocus=True)
displayCtx .addListener('selectedOverlay',
self.name,
self.__selectedOverlayChanged)
overlayList.addListener('overlays',
self.name,
self.__selectedOverlayChanged)
self.__threedee = isinstance(parent, Scene3DPanel)
self.__viewPanel = canvasPanel
self.__widgets = None
self.__currentOverlay = None
self.__selectedOverlayChanged()
def destroy(self):
"""Must be called when this ``OverlayDisplayPanel`` is no longer
needed. Removes property listeners, and calls the
:meth:`.SettingsPanel.destroy` method.
"""
self.displayCtx .removeListener('selectedOverlay', self.name)
self.overlayList.removeListener('overlays', self.name)
if self.__currentOverlay is not None and \
self.__currentOverlay in self.overlayList:
display = self.displayCtx.getDisplay(self.__currentOverlay)
display.removeListener('overlayType', self.name)
self.__viewPanel = None
self.__widgets = None
self.__currentOverlay = None
ctrlpanel.SettingsPanel.destroy(self)
def __selectedOverlayChanged(self, *a):
"""Called when the :class:`.OverlayList` or
:attr:`.DisplayContext.selectedOverlay` changes. Refreshes this
``OverlayDisplayPanel`` so that the display settings for the newly
selected overlay are shown.
"""
overlay = self.displayCtx.getSelectedOverlay()
lastOverlay = self.__currentOverlay
widgetList = self.getWidgetList()
if overlay is None:
self.__currentOverlay = None
self.__widgets = None
widgetList.Clear()
self.Layout()
return
if overlay is lastOverlay:
return
self.__currentOverlay = overlay
self.__widgets = collections.OrderedDict()
display = self.displayCtx.getDisplay(overlay)
opts = display.opts
if self.__threedee:
groups = ['display', 'opts', '3d']
targets = [ display, opts, opts]
labels = [strings.labels[self, display],
strings.labels[self, opts],
strings.labels[self, '3d']]
else:
groups = ['display', 'opts']
targets = [ display, opts]
labels = [strings.labels[self, display],
strings.labels[self, opts]]
keepExpanded = {g : True for g in groups}
if lastOverlay is not None and lastOverlay in self.overlayList:
lastDisplay = self.displayCtx.getDisplay(lastOverlay)
lastDisplay.removeListener('overlayType', self.name)
if lastOverlay is not None:
for g in groups:
keepExpanded[g] = widgetList.IsExpanded(g)
display.addListener('overlayType', self.name, self.__ovlTypeChanged)
widgetList.Clear()
for g, l, t in zip(groups, labels, targets):
widgetList.AddGroup(g, l)
self.__widgets[g] = self.__updateWidgets(t, g)
widgetList.Expand(g, keepExpanded[g])
self.setNavOrder()
self.Layout()
def setNavOrder(self):
allWidgets = self.__widgets.items()
allWidgets = functools.reduce(lambda a, b: a + b, allWidgets)
ctrlpanel.SettingsPanel.setNavOrder(self, allWidgets)
def __ovlTypeChanged(self, *a):
"""Called when the :attr:`.Display.overlayType` of the current overlay
changes. Refreshes the :class:`.DisplayOpts` settings which are shown,
as a new :class:`.DisplayOpts` instance will have been created for the
overlay.
"""
opts = self.displayCtx.getOpts(self.__currentOverlay)
widgetList = self.getWidgetList()
self.__widgets[opts] = self.__updateWidgets(opts, 'opts')
widgetList.RenameGroup('opts', strings.labels[self, opts])
if '3d' in self.__widgets:
self.__widgets['3d'] = self.__updateWidgets(opts, '3d')
self.setNavOrder()
self.Layout()
def updateWidgets(self, target, groupName):
"""Re-generates the widgets for the given target/group. """
self.__widgets[target] = self.__updateWidgets(target, groupName)
self.setNavOrder()
self.Layout()
def __updateWidgets(self, target, groupName):
"""Called by the :meth:`__selectedOverlayChanged` and
:meth:`__ovlTypeChanged` methods. Re-creates the controls on this
``OverlayDisplayPanel`` for the specified group.
:arg target: A :class:`.Display` or :class:`.DisplayOpts` instance,
which contains the properties that controls are to be
created for.
:arg groupName: Either ``'display'`` or ``'opts'``/``'3d'``,
corresponding to :class:`.Display` or
:class:`.DisplayOpts` properties.
:returns: A list containing all of the new widgets that
were created.
"""
widgetList = self.getWidgetList()
widgetList.ClearGroup(groupName)
if groupName == '3d':
dispProps = odwidgets.get3DPropertyList(target)
dispSpecs = odwidgets.get3DWidgetSpecs( target, self.displayCtx)
else:
dispProps = odwidgets.getPropertyList(target,
self.__threedee)
dispSpecs = odwidgets.getWidgetSpecs( target,
self.displayCtx,
self.__threedee)
allLabels = []
allTooltips = []
allWidgets = []
allContainers = []
for p in dispProps:
spec = dispSpecs[p]
specs = [spec]
labels = [strings .properties.get((target, p), None)]
tooltips = [fsltooltips.properties.get((target, p), None)]
if callable(spec):
# Will either return a contsiner
# widget/sizer and a list of widgets
# for setting the navigation order,
# or will return a list of specs
# (with an irrelevant second parameter)
container, widgets = spec(
target,
widgetList,
self,
self.overlayList,
self.displayCtx,
self.__threedee)
if isinstance(container, abc.Sequence):
specs = container
keys = [s.key for s in specs]
labels = [strings.properties.get((target, k), None)
for k in keys]
tooltips = [fsltooltips.properties.get((target, k), None)
for k in keys]
else:
allContainers.append(container)
allWidgets .extend(widgets)
specs = []
for s in specs:
widget = props.buildGUI(widgetList, target, s)
allWidgets .append(widget)
allContainers.append(widget)
allLabels .extend(labels)
allTooltips.extend(tooltips)
for widget, label, tooltip in zip(allContainers,
allLabels,
allTooltips):
if label is None:
label = ''
widgetList.AddWidget(
widget,
label,
tooltip=tooltip,
groupName=groupName)
return allWidgets
| 2.78125 | 3 |
process_rfi/cngi_io.py | autocorr/rfi-diagnostic-plotting | 0 | 12762040 | # CASA Next Generation Infrastructure
# Copyright (C) 2021 AUI, Inc. Washington DC, USA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#################################
# Helper File
#
# Not exposed in API
#
#################################
import warnings, time, os, psutil, multiprocessing, logging, re
import numpy as np
# from casatools import table as tb
from casatools import ms
from casatools import image as ia
from casatools import quanta as qa
try:
import pandas as pd
import xarray, dask, dask.array, dask.delayed, dask.distributed
except:
print('#### ERROR - dask and/or xarray dependencies are missing ####')
try:
from casacore import tables
except:
print('#### ERROR - python-casacore not found, must be manually installed by user ####')
warnings.filterwarnings('ignore', category=FutureWarning)
# TODO: python-casacore dependency is needed here
# Problems with the table tool:
# - inflates data sizes by reading everything as 64-bit float / 128-bit complex,
# - segfaults when used in dask delayed objects with non-locking reads
# - row access not available, segfaults on column access for some test data
########################################################
# helper function to initialize the processing environment
def initialize_processing(cores=None, memory_limit=None):
# setup dask.distributed based multiprocessing environment
if cores is None: cores = multiprocessing.cpu_count()
if memory_limit is None: memory_limit = str(round(((psutil.virtual_memory().available / (1024 ** 2)) * 0.75) / cores)) + 'MB'
dask.config.set({"distributed.scheduler.allowed-failures": 10})
dask.config.set({"distributed.scheduler.work-stealing": False})
dask.config.set({"distributed.scheduler.unknown-task-duration": '99m'})
dask.config.set({"distributed.worker.memory.pause": False})
dask.config.set({"distributed.worker.memory.terminate": False})
dask.config.set({"distributed.worker.memory.recent-to-old-time": '999s'})
dask.config.set({"distributed.comm.timeouts.connect": '360s'})
dask.config.set({"distributed.comm.timeouts.tcp": '360s'})
dask.config.set({"distributed.nanny.environ.OMP_NUM_THREADS": 1})
dask.config.set({"distributed.nanny.environ.MKL_NUM_THREADS": 1})
cluster = dask.distributed.LocalCluster(n_workers=cores, threads_per_worker=1, processes=True, memory_limit=memory_limit, silence_logs=logging.ERROR)
client = dask.distributed.Client(cluster)
return client
########################################################
# helper for reading time columns to datetime format
# pandas datetimes are referenced against a 0 of 1970-01-01
# CASA's modified julian day reference time is (of course) 1858-11-17
# this requires a correction of 3506716800 seconds which is hardcoded to save time
def convert_time(rawtimes):
correction = 3506716800.0
return pd.to_datetime(np.array(rawtimes) - correction, unit='s').values
# dt = pd.to_datetime(np.atleast_1d(rawtimes) - correction, unit='s').values
# if len(np.array(rawtimes).shape) == 0: dt = dt[0]
# return dt
def revert_time(datetimes):
return (datetimes.astype(float) / 10 ** 9) + 3506716800.0
#######################################################################################
# return a dictionary of table attributes created from keywords and column descriptions
def extract_table_attributes(infile):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
kwd = tb_tool.getkeywords()
attrs = dict([(kk, kwd[kk]) for kk in kwd if kk not in os.listdir(infile)])
cols = tb_tool.colnames()
column_descriptions = {}
for col in cols:
column_descriptions[col] = tb_tool.getcoldesc(col)
attrs['column_descriptions'] = column_descriptions
attrs['info'] = tb_tool.info()
tb_tool.close()
return attrs
#################################################
# translate numpy dtypes to casacore type strings
def type_converter(npdtype):
cctype = 'bad'
if (npdtype == 'int64') or (npdtype == 'int32'):
cctype = 'int'
elif npdtype == 'bool':
cctype = 'bool'
elif npdtype == 'float32':
cctype = 'float'
elif (npdtype == 'float64') or (npdtype == 'datetime64[ns]'):
cctype = 'double'
elif npdtype == 'complex64':
cctype = 'complex'
elif npdtype == 'complex128':
cctype = 'dcomplex'
elif str(npdtype).startswith('<U'):
cctype = 'string'
return cctype
###############################################################################
# create and initialize new output table
def create_table(outfile, xds, max_rows, infile=None, cols=None, generic=False):
if os.path.isdir(outfile):
os.system('rm -fr %s' % outfile)
# create column descriptions for table description
if cols is None: cols = list(set(list(xds.data_vars) + list(xds.attrs['column_descriptions'].keys())) if 'column_descriptions' in xds.attrs else list(xds.data_vars))
tabledesc = {}
for col in cols:
if ('column_descriptions' in xds.attrs) and (col in xds.attrs['column_descriptions']):
coldesc = xds.attrs['column_descriptions'][col]
else:
coldesc = {'valueType': type_converter(xds[col].dtype)}
if generic or (col == 'UVW'): # will be statically shaped even if not originally
coldesc = {'shape': tuple(np.clip(xds[col].shape[1:], 1, None))}
elif xds[col].ndim > 1: # make variably shaped
coldesc = {'ndim': xds[col].ndim - 1}
coldesc['name'] = col
coldesc['desc'] = col
tabledesc[col] = coldesc
if generic:
tb_tool = tables.table(outfile, tabledesc=tabledesc, nrow=max_rows, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
else:
tb_tool = tables.default_ms(outfile, tabledesc)
tb_tool.addrows(max_rows)
if 'DATA_DESC_ID' in cols: tb_tool.putcol('DATA_DESC_ID', np.zeros((max_rows), dtype='int32') - 1, 0, max_rows)
# write xds attributes to table keywords, skipping certain reserved attributes
existing_keywords = tb_tool.getkeywords()
for attr in xds.attrs:
if attr in ['bad_cols', 'bad_types', 'column_descriptions', 'history', 'subtables', 'info'] + list(existing_keywords.keys()): continue
tb_tool.putkeyword(attr, xds.attrs[attr])
if 'info' in xds.attrs: tb_tool.putinfo(xds.attrs['info'])
# copy subtables and add to main table
if infile:
subtables = [ss.path for ss in os.scandir(infile) if ss.is_dir() and ('SORTED_TABLE' not in ss.path)]
os.system('cp -r %s %s' % (' '.join(subtables), outfile))
for subtable in subtables:
sub_tbl = tables.table(os.path.join(outfile, subtable[subtable.rindex('/') + 1:]), readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tb_tool.putkeyword(subtable[subtable.rindex('/') + 1:], sub_tbl, makesubrecord=True)
sub_tbl.close()
tb_tool.close()
##################################################################################################
##
## MeasurementSets
##
##################################################################################################
##################################################################
# takes a list of visibility xarray datasets and packages them as a dataset of datasets
# xds_list is a list of tuples (name, xds)
def vis_xds_packager(xds_list):
mxds = xarray.Dataset(attrs=dict(xds_list))
coords = {}
if 'ANTENNA' in mxds.attrs:
coords['antenna_ids'] = mxds.ANTENNA.row.values
coords['antennas'] = xarray.DataArray(mxds.ANTENNA.NAME.values, dims=['antenna_ids'])
if 'FIELD' in mxds.attrs:
coords['field_ids'] = mxds.FIELD.row.values
coords['fields'] = xarray.DataArray(mxds.FIELD.NAME.values, dims=['field_ids'])
if 'FEED' in mxds.attrs:
coords['feed_ids'] = mxds.FEED.FEED_ID.values
if 'OBSERVATION' in mxds.attrs:
coords['observation_ids'] = mxds.OBSERVATION.row.values
coords['observations'] = xarray.DataArray(mxds.OBSERVATION.PROJECT.values, dims=['observation_ids'])
if 'POLARIZATION' in mxds.attrs:
coords['polarization_ids'] = mxds.POLARIZATION.row.values
if 'SOURCE' in mxds.attrs:
coords['source_ids'] = mxds.SOURCE.SOURCE_ID.values
coords['sources'] = xarray.DataArray(mxds.SOURCE.NAME.values, dims=['source_ids'])
if 'SPECTRAL_WINDOW' in mxds.attrs:
coords['spw_ids'] = mxds.SPECTRAL_WINDOW.row.values
if 'STATE' in mxds.attrs:
coords['state_ids'] = mxds.STATE.row.values
mxds = mxds.assign_coords(coords)
return mxds
########################################################################################
# translates MS selection parameters into corresponding row indices and channel indices
def ms_selection(infile, outfile=None, verbose=False, spw=None, field=None, times=None, baseline=None, scan=None, scanintent=None, array=None, uvdist=None, observation=None, polarization=None):
"""
"""
infile = os.path.expanduser(infile)
mstool = ms()
mstool.open(infile)
# build the selection structure
selection = {}
if (spw is not None) and (len(spw) > 0): selection['spw'] = spw
if (field is not None) and (len(field) > 0): selection['field'] = field
if (scan is not None) and (len(scan) > 0): selection['scan'] = scan
if (baseline is not None) and (len(baseline) > 0): selection['baseline'] = baseline
if (times is not None) and (len(times) > 0): selection['time'] = times
if (scanintent is not None) and (len(scanintent) > 0): selection['scanintent'] = scanintent
if (uvdist is not None) and (len(uvdist) > 0): selection['uvdist'] = uvdist
if (polarization is not None) and (len(polarization) > 0): selection['polarization'] = polarization
if (array is not None) and (len(array) > 0): selection['array'] = array
if (observation is not None) and (len(observation) > 0): selection['observation'] = observation
# build structure of indices per DDI, intersected with selection criteria
ddis, total_rows = [], None
chanmap = {} # dict of ddis to channels
if len(selection) > 0:
if verbose: print('selecting data...')
mstool.msselect(selection)
total_rows = mstool.range('rows')['rows']
selectedindices = mstool.msselectedindices()
ddis, chanranges = selectedindices['dd'], selectedindices['channel']
for ci, cr in enumerate(chanranges):
if ddis[ci] not in chanmap: chanmap[ddis[ci]] = []
chanmap[ddis[ci]] = np.concatenate((chanmap[ddis[ci]], list(range(cr[1], cr[2] + 1, cr[3]))), axis=0).astype(int)
# copy the selected table to the outfile destination if given
if outfile is not None:
outfile = os.path.expanduser(outfile)
if verbose: print('copying selection to output...')
if len(selection) > 0:
mstool.split(outfile, whichcol='all')
else:
os.system('rm -fr %s' % outfile)
os.system('cp -r %s %s' % (infile, outfile))
mstool.reset()
if len(ddis) == 0: # selection didn't reduce ddi count, so get them all
ddis = list(mstool.range('data_desc_id')['data_desc_id'])
# figure out which selected rows are in which ddis
if verbose: print('intersecting DDI row ids...')
rowmap = {} # dict of ddis to (rows, channels)
for ddi in ddis:
mstool.selectinit(datadescid=ddi)
ddirowidxs = mstool.range('rows')['rows']
if total_rows is None:
rowmap[ddi] = (ddirowidxs, chanmap[ddi] if ddi in chanmap else None)
else:
rowmap[ddi] = (np.intersect1d(ddirowidxs, total_rows, assume_unique=True), chanmap[ddi] if ddi in chanmap else None)
mstool.reset()
mstool.close()
if verbose: print('selection complete')
return rowmap
##################################################################
## expand row dimension of xds to (time, baseline)
def expand_xds(xds):
txds = xds.copy()
unique_baselines, baselines = np.unique([txds.ANTENNA1.values, txds.ANTENNA2.values], axis=1, return_inverse=True)
txds['baseline'] = xarray.DataArray(baselines.astype('int32'), dims=['row'])
txds['time'] = txds['TIME'].copy()
try:
txds = txds.set_index(row=['time', 'baseline']).unstack('row').transpose('time', 'baseline', ...)
# unstack makes everything a float, so we need to reset to the proper type
for dv in txds.data_vars:
txds[dv] = txds[dv].astype(xds[dv].dtype)
except:
print("WARNING: Cannot expand rows to (time, baseline), possibly duplicate values in (time, baseline)")
txds = xds.copy()
return txds
##################################################################
## flatten (time, baseline) dimensions of xds back to single row
def flatten_xds(xds):
nan_int = np.array([np.nan]).astype('int32')[0]
txds = xds.copy()
# flatten the time x baseline dimensions of main table
if ('time' in xds.dims) and ('baseline' in xds.dims):
txds = xds.stack({'row': ('time', 'baseline')}).transpose('row', ...)
txds = txds.where((txds.STATE_ID != nan_int) & (txds.FIELD_ID != nan_int), drop=True) #.unify_chunks()
for dv in list(xds.data_vars):
txds[dv] = txds[dv].astype(xds[dv].dtype)
return txds
##################################################################
# read casacore table format in to memory
##################################################################
def read_generic_table(infile, subtables=False, timecols=None, ignore=None):
"""
read generic casacore table format to xarray dataset loaded in memory
Parameters
----------
infile : str
Input table filename. To read a subtable simply append the subtable folder name under the main table (ie infile = '/path/mytable.tbl/mysubtable')
subtables : bool
Whether or not to include subtables underneath the specified table. If true, an attribute called subtables will be added to the returned xds.
Default False
timecols : list
list of column names to convert to numpy datetime format. Default None leaves times as their original casacore format.
ignore : list
list of column names to ignore and not try to read. Default None reads all columns
Returns
-------
xarray.core.dataset.Dataset
"""
if timecols is None: timecols = []
if ignore is None: ignore = []
infile = os.path.expanduser(infile)
assert os.path.isdir(infile), "invalid input filename to read_generic_table"
attrs = extract_table_attributes(infile)
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
if tb_tool.nrows() == 0:
tb_tool.close()
return xarray.Dataset(attrs=attrs)
dims = ['row'] + ['d%i' % ii for ii in range(1, 20)]
cols = tb_tool.colnames()
ctype = dict([(col, tb_tool.getcell(col, 0)) for col in cols if (col not in ignore) and (tb_tool.iscelldefined(col, 0))])
mvars, mcoords, xds = {}, {}, xarray.Dataset()
tr = tb_tool.row(ignore, exclude=True)[:]
# extract data for each col
for col in ctype.keys():
if tb_tool.coldatatype(col) == 'record': continue # not supported
try:
data = np.stack([rr[col] for rr in tr]) # .astype(ctype[col].dtype)
if isinstance(tr[0][col], dict):
data = np.stack([rr[col]['array'].reshape(rr[col]['shape']) if len(rr[col]['array']) > 0 else np.array(['']) for rr in tr])
except:
# sometimes the columns are variable, so we need to standardize to the largest sizes
if len(np.unique([isinstance(rr[col], dict) for rr in tr])) > 1: continue # can't deal with this case
mshape = np.array(max([np.array(rr[col]).shape for rr in tr]))
try:
data = np.stack([np.pad(rr[col] if len(rr[col]) > 0 else np.array(rr[col]).reshape(np.arange(len(mshape)) * 0),
[(0, ss) for ss in mshape - np.array(rr[col]).shape], 'constant', constant_values=np.array([np.nan]).astype(np.array(ctype[col]).dtype)[0]) for rr in tr])
except:
data = []
if len(data) == 0: continue
if col in timecols: convert_time(data)
if col.endswith('_ID'):
mcoords[col] = xarray.DataArray(data, dims=['d%i_%i' % (di, ds) for di, ds in enumerate(np.array(data).shape)])
else:
mvars[col] = xarray.DataArray(data, dims=['d%i_%i' % (di, ds) for di, ds in enumerate(np.array(data).shape)])
xds = xarray.Dataset(mvars, coords=mcoords)
xds = xds.rename(dict([(dv, dims[di]) for di, dv in enumerate(xds.dims)]))
attrs['bad_cols'] = list(np.setdiff1d([dv for dv in tb_tool.colnames()], [dv for dv in list(xds.data_vars) + list(xds.coords)]))
# if this table has subtables, use a recursive call to store them in subtables attribute
if subtables:
stbl_list = sorted([tt for tt in os.listdir(infile) if os.path.isdir(os.path.join(infile, tt))])
attrs['subtables'] = []
for ii, subtable in enumerate(stbl_list):
sxds = read_generic_table(os.path.join(infile, subtable), subtables=subtables, timecols=timecols, ignore=ignore)
if len(sxds.dims) != 0: attrs['subtables'] += [(subtable, sxds)]
xds = xds.assign_attrs(attrs)
tb_tool.close()
return xds
##################################################################
# Summarize the contents of an MS directory in casacore table format
def describe_ms(infile):
infile = os.path.expanduser(infile) # does nothing if $HOME is unknown
assert os.path.isdir(infile), "invalid input filename to describe_ms"
# figure out characteristics of main table from select subtables (must all be present)
spw_xds = read_generic_table(os.path.join(infile, 'SPECTRAL_WINDOW'))
pol_xds = read_generic_table(os.path.join(infile, 'POLARIZATION'))
ddi_xds = read_generic_table(os.path.join(infile, 'DATA_DESCRIPTION'))
ddis = list(ddi_xds.row.values)
summary = pd.DataFrame([])
spw_ids = ddi_xds.SPECTRAL_WINDOW_ID.values
pol_ids = ddi_xds.POLARIZATION_ID.values
chans = spw_xds.NUM_CHAN.values
pols = pol_xds.NUM_CORR.values
for ddi in ddis:
print('processing ddi %i of %i' % (ddi + 1, len(ddis)), end='\r')
sorted_table = tables.taql('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))
sdf = {'ddi': ddi, 'spw_id': spw_ids[ddi], 'pol_id': pol_ids[ddi], 'rows': sorted_table.nrows(),
'times': len(np.unique(sorted_table.getcol('TIME'))),
'baselines': len(np.unique(np.hstack([sorted_table.getcol(rr)[:, None] for rr in ['ANTENNA1', 'ANTENNA2']]), axis=0)),
'chans': chans[spw_ids[ddi]],
'pols': pols[pol_ids[ddi]]}
sdf['size_MB'] = np.ceil((sdf['times'] * sdf['baselines'] * sdf['chans'] * sdf['pols'] * 9) / 1024 ** 2).astype(int)
summary = pd.concat([summary, pd.DataFrame(sdf, index=[str(ddi)])], axis=0, sort=False)
sorted_table.close()
print(' ' * 50, end='\r')
return summary.set_index('ddi').sort_index()
#######################################################
# helper function extract data chunk for each col
# this is fed to dask.delayed
def read_flat_col_chunk(infile, col, cshape, ridxs, cstart, pstart):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
rgrps = [(rr[0], rr[-1]) for rr in np.split(ridxs, np.where(np.diff(ridxs) > 1)[0] + 1)]
try:
if (len(cshape) == 1) or (col == 'UVW'): # all the scalars and UVW
data = np.concatenate([tb_tool.getcol(col, rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
elif len(cshape) == 2: # WEIGHT, SIGMA
data = np.concatenate([tb_tool.getcolslice(col, pstart, pstart + cshape[1] - 1, [], rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
elif len(cshape) == 3: # DATA and FLAG
data = np.concatenate([tb_tool.getcolslice(col, (cstart, pstart), (cstart + cshape[1] - 1, pstart + cshape[2] - 1), [], rr[0], rr[1] - rr[0] + 1) for rr in rgrps], axis=0)
except:
print('ERROR reading chunk: ', col, cshape, cstart, pstart)
tb_tool.close()
return data
##############################################################
def read_flat_main_table(infile, ddi, rowidxs=None, chunks=(22000, 512, 2)):
# get row indices relative to full main table
if rowidxs is None:
tb_tool = tables.taql('select rowid() as ROWS from %s where DATA_DESC_ID = %i' % (infile, ddi))
rowidxs = tb_tool.getcol('ROWS')
tb_tool.close()
nrows = len(rowidxs)
if nrows == 0:
return xarray.Dataset()
tb_tool = tables.taql('select * from %s where DATA_DESC_ID = %i' % (infile, ddi))
cols = tb_tool.colnames()
ignore = [col for col in cols if (not tb_tool.iscelldefined(col, 0)) or (tb_tool.coldatatype(col) == 'record')]
cdata = dict([(col, tb_tool.getcol(col, 0, 1)) for col in cols if col not in ignore])
chan_cnt, pol_cnt = [(cdata[cc].shape[1], cdata[cc].shape[2]) for cc in cdata if len(cdata[cc].shape) == 3][0]
mvars, mcoords, bvars, xds = {}, {}, {}, xarray.Dataset()
tb_tool.close()
# loop over row chunks
for rc in range(0, nrows, chunks[0]):
crlen = min(chunks[0], nrows - rc) # chunk row length
rcidxs = rowidxs[rc:rc + chunks[0]]
# loop over each column and create delayed dask arrays
for col in cdata.keys():
if col not in bvars: bvars[col] = []
if len(cdata[col].shape) == 1:
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen,), rcidxs, None, None)
bvars[col] += [dask.array.from_delayed(delayed_array, (crlen,), cdata[col].dtype)]
elif col == 'UVW':
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, 3), rcidxs, None, None)
bvars[col] += [dask.array.from_delayed(delayed_array, (crlen, 3), cdata[col].dtype)]
elif len(cdata[col].shape) == 2:
pol_list = []
dd = 1 if cdata[col].shape[1] == chan_cnt else 2
for pc in range(0, cdata[col].shape[1], chunks[dd]):
plen = min(chunks[dd], cdata[col].shape[1] - pc)
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, plen), rcidxs, None, pc)
pol_list += [dask.array.from_delayed(delayed_array, (crlen, plen), cdata[col].dtype)]
bvars[col] += [dask.array.concatenate(pol_list, axis=1)]
elif len(cdata[col].shape) == 3:
chan_list = []
for cc in range(0, chan_cnt, chunks[1]):
clen = min(chunks[1], chan_cnt - cc)
pol_list = []
for pc in range(0, cdata[col].shape[2], chunks[2]):
plen = min(chunks[2], cdata[col].shape[2] - pc)
delayed_array = dask.delayed(read_flat_col_chunk)(infile, col, (crlen, clen, plen), rcidxs, cc, pc)
pol_list += [dask.array.from_delayed(delayed_array, (crlen, clen, plen), cdata[col].dtype)]
chan_list += [dask.array.concatenate(pol_list, axis=2)]
bvars[col] += [dask.array.concatenate(chan_list, axis=1)]
# now concat all the dask chunks from each time to make the xds
mvars = {}
for kk in bvars.keys():
if kk == 'UVW':
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'uvw_index'])
elif len(bvars[kk][0].shape) == 2 and (bvars[kk][0].shape[-1] == pol_cnt):
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'pol'])
elif len(bvars[kk][0].shape) == 2 and (bvars[kk][0].shape[-1] == chan_cnt):
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'chan'])
else:
mvars[kk] = xarray.DataArray(dask.array.concatenate(bvars[kk], axis=0), dims=['row', 'chan', 'pol'][:len(bvars[kk][0].shape)])
mvars['TIME'] = xarray.DataArray(convert_time(mvars['TIME'].values), dims=['row']).chunk({'row': chunks[0]})
attrs = extract_table_attributes(infile)
attrs['bad_cols'] = ignore
xds = xarray.Dataset(mvars, coords=mcoords).assign_attrs(attrs)
return xds
#####################################################################
def read_ms(infile, rowmap=None, subtables=False, expand=False, chunks=(22000, 512, 2)):
"""
Read legacy format MS to xarray Visibility Dataset
The MS is partitioned by DDI, which guarantees a fixed data shape per partition. This results in separate xarray
dataset (xds) partitions contained within a main xds (mxds).
Parameters
----------
infile : str
Input MS filename
rowmap : dict
Dictionary of DDI to tuple of (row indices, channel indices). Returned by ms_selection function. Default None ignores selections
subtables : bool
Also read and include subtables along with main table selection. Default False will omit subtables (faster)
expand : bool
Whether or not to return the original flat row structure of the MS (False) or expand the rows to time x baseline dimensions (True).
Expanding the rows allows for easier indexing and parallelization across time and baseline dimensions, at the cost of some conversion
time. Default False
chunks: 4-D tuple of ints
Shape of desired chunking in the form of (time, baseline, channel, polarization). Larger values reduce the number of chunks and
speed up the reads at the cost of more memory. Chunk size is the product of the four numbers. Default is (400, 400, 64, 2). None
disables re-chunking and returns native chunk size from table row reads
Returns
-------
xarray.core.dataset.Dataset
Main xarray dataset of datasets for this visibility set
"""
import warnings
warnings.filterwarnings('ignore', category=FutureWarning)
# parse filename to use
infile = os.path.expanduser(infile)
assert os.path.isdir(infile), "invalid input filename to read_ms"
# we need the spectral window, polarization, and data description tables for processing the main table
spw_xds = read_generic_table(os.path.join(infile, 'SPECTRAL_WINDOW'))
pol_xds = read_generic_table(os.path.join(infile, 'POLARIZATION'))
ddi_xds = read_generic_table(os.path.join(infile, 'DATA_DESCRIPTION'))
# each DATA_DESC_ID (ddi) is a fixed shape that may differ from others
# form a list of ddis to process, each will be placed it in its own xarray dataset and partition
ddis = np.arange(ddi_xds.row.shape[0]) if rowmap is None else list(rowmap.keys())
xds_list = []
####################################################################
# process each selected DDI from the input MS, assume a fixed shape within the ddi (should always be true)
for ddi in ddis:
rowidxs = None if rowmap is None else rowmap[ddi][0]
chanidxs = None if rowmap is None else rowmap[ddi][1]
if ((rowidxs is not None) and (len(rowidxs) == 0)) or ((chanidxs is not None) and (len(chanidxs) == 0)): continue
xds = read_flat_main_table(infile, ddi, rowidxs=rowidxs, chunks=chunks)
if len(xds.dims) == 0: continue
# grab the channel frequency values from the spw table data and pol idxs from the polarization table, add spw and pol ids
chan = spw_xds.CHAN_FREQ.values[ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], :xds.chan.shape[0]]
pol = pol_xds.CORR_TYPE.values[ddi_xds.POLARIZATION_ID.values[ddi], :xds.pol.shape[0]]
coords = {'chan': chan, 'pol': pol, 'spw_id': [ddi_xds['SPECTRAL_WINDOW_ID'].values[ddi]], 'pol_id': [ddi_xds['POLARIZATION_ID'].values[ddi]]}
xds = xds.assign_coords(coords) # .assign_attrs(attrs)
# filter by channel selection
if (chanidxs is not None) and (len(chanidxs) < len(xds.chan)):
xds = xds.isel(chan=chanidxs)
spw_xds['CHAN_FREQ'][ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], :len(chanidxs)] = spw_xds.CHAN_FREQ[ddi_xds.SPECTRAL_WINDOW_ID.values[ddi], chanidxs]
# expand the row dimension out to (time, baseline)
if expand:
xds = expand_xds(xds)
xds_list += [('xds' + str(ddi), xds)]
# read other subtables
xds_list += [('SPECTRAL_WINDOW', spw_xds), ('POLARIZATION', pol_xds), ('DATA_DESCRIPTION', ddi_xds)]
if subtables:
skip_tables = ['SORTED_TABLE', 'SPECTRAL_WINDOW', 'POLARIZATION', 'DATA_DESCRIPTION']
stbl_list = sorted([tt for tt in os.listdir(infile) if os.path.isdir(os.path.join(infile, tt)) and tt not in skip_tables])
for ii, subtable in enumerate(stbl_list):
sxds = read_generic_table(os.path.join(infile, subtable), subtables=True, timecols=['TIME'], ignore=[])
if len(sxds.dims) != 0: xds_list += [(subtable, sxds)]
# build the master xds to return
mxds = vis_xds_packager(xds_list)
return mxds
############################################################################################
## write functions
############################################################################################
###################################
def write_generic_table(xds, outfile, subtable='', cols=None, verbose=False):
"""
Write generic xds contents back to casacore table format on disk
Parameters
----------
xds : xarray.Dataset
Source xarray dataset data
outfile : str
Destination filename (or parent main table if writing subtable)
subtable : str
Name of the subtable being written, triggers special logic to add subtable to parent table. Default '' for normal generic writes
cols : str or list
List of cols to write. Default None writes all columns
"""
outfile = os.path.expanduser(outfile)
if verbose: print('writing %s...' % os.path.join(outfile, subtable))
if cols is None: cols = list(set(list(xds.data_vars) + [cc for cc in xds.coords if cc not in xds.dims] + (list(xds.attrs['column_descriptions'].keys() if 'column_descriptions' in xds.attrs else []))))
cols = list(np.atleast_1d(cols))
max_rows = xds.row.shape[0] if 'row' in xds.dims else 0
create_table(os.path.join(outfile, subtable), xds, max_rows, infile=None, cols=cols, generic=True)
tb_tool = tables.table(os.path.join(outfile, subtable), readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
try:
for dv in cols:
if (dv not in xds) or (np.prod(xds[dv].shape) == 0): continue
values = xds[dv].values if xds[dv].dtype != 'datetime64[ns]' else revert_time(xds[dv].values)
tb_tool.putcol(dv, values, 0, values.shape[0], 1)
except:
print("ERROR: exception in write generic table - %s, %s, %s, %s" % (os.path.join(outfile,subtable), dv, str(values.shape), tb_tool.nrows()))
# now we have to add this subtable to the main table keywords (assuming a main table already exists)
if len(subtable) > 0:
main_tbl = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
main_tbl.putkeyword(subtable, tb_tool, makesubrecord=True)
main_tbl.done()
tb_tool.close()
# if this table has its own subtables, they need to be written out recursively
if 'subtables' in xds.attrs:
for st in list(xds.attrs['subtables']):
write_generic_table(st[1], os.path.join(outfile, subtable, st[0]), subtable='', verbose=verbose)
###################################
def write_main_table_slice(xda, outfile, ddi, col, full_shape, starts):
"""
Write an xds row chunk to the corresponding main table slice
"""
# trigger the DAG for this chunk and return values while the table is unlocked
values = xda.compute().values
if xda.dtype == 'datetime64[ns]':
values = revert_time(values)
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tbs = tables.taql('select * from $tb_tool where DATA_DESC_ID = %i' % ddi)
if tbs.nrows() == 0: # this DDI has not been started yet
tbs = tables.taql('select * from $tb_tool where DATA_DESC_ID = -1')
#try:
if (values.ndim == 1) or (col == 'UVW'): # scalar columns
tbs.putcol(col, values, starts[0], len(values))
else:
if not tbs.iscelldefined(col, starts[0]): tbs.putcell(col, starts[0]+np.arange(len(values)), np.zeros((full_shape)))
tbs.putcolslice(col, values, starts[1:values.ndim], tuple(np.array(starts[1:values.ndim]) + np.array(values.shape[1:])-1), [], starts[0], len(values), 1)
#except:
# print("ERROR: write exception - %s, %s, %s" % (col, str(values.shape), str(starts)))
tbs.close()
tb_tool.close()
###################################
def write_ms(mxds, outfile, infile=None, subtables=False, modcols=None, verbose=False, execute=True):
"""
Write ms format xds contents back to casacore table format on disk
Parameters
----------
mxds : xarray.Dataset
Source multi-xarray dataset (originally created by read_ms)
outfile : str
Destination filename
infile : str
Source filename to copy subtables from. Generally faster than reading/writing through mxds via the subtables parameter. Default None
does not copy subtables to output.
subtables : bool
Also write subtables from mxds. Default of False only writes mxds attributes that begin with xdsN to the MS main table.
Setting to True will write all other mxds attributes to subtables of the main table. This is probably going to be SLOW!
Use infile instead whenever possible.
modcols : list
List of strings indicating what column(s) were modified (aka xds data_vars). Different logic can be applied to speed up processing when
a data_var has not been modified from the input. Default None assumes everything has been modified (SLOW)
verbose : bool
Whether or not to print output progress. Since writes will typically execute the DAG, if something is
going to go wrong, it will be here. Default False
execute : bool
Whether or not to actually execute the DAG, or just return it with write steps appended. Default True will execute it
"""
outfile = os.path.expanduser(outfile)
if verbose: print('initializing output...')
start = time.time()
xds_list = [flatten_xds(mxds.attrs[kk]) for kk in mxds.attrs if kk.startswith('xds')]
cols = list(set([dv for dx in xds_list for dv in dx.data_vars]))
if modcols is None: modcols = cols
modcols = list(np.atleast_1d(modcols))
# create an empty main table with enough space for all desired xds partitions
# the first selected xds partition will be passed to create_table to provide a definition of columns and table keywords
# we first need to add in additional keywords for the selected subtables that will be written as well
max_rows = np.sum([dx.row.shape[0] for dx in xds_list])
create_table(outfile, xds_list[0], max_rows=max_rows, infile=infile, cols=cols, generic=False)
# start a list of dask delayed writes to disk (to be executed later)
# the SPECTRAL_WINDOW table is assumed to always be present and will always be written since it is needed for channel frequencies
delayed_writes = [dask.delayed(write_generic_table)(mxds.SPECTRAL_WINDOW, outfile, 'SPECTRAL_WINDOW', cols=None)]
if subtables: # also write the rest of the subtables
for subtable in list(mxds.attrs.keys()):
if subtable.startswith('xds') or (subtable == 'SPECTRAL_WINDOW'): continue
if verbose: print('writing subtable %s...' % subtable)
delayed_writes += [dask.delayed(write_generic_table)(mxds.attrs[subtable], outfile, subtable, cols=None, verbose=verbose)]
for xds in xds_list:
txds = xds.copy().unify_chunks()
ddi = txds.DATA_DESC_ID[:1].values[0]
# serial write entire DDI column first so subsequent delayed writes can find their spot
if verbose: print('setting up DDI %i...' % ddi)
write_main_table_slice(txds['DATA_DESC_ID'], outfile, ddi=-1, col='DATA_DESC_ID', full_shape=None, starts=(0,))
# write each chunk of each modified data_var, triggering the DAG along the way
for col in modcols:
chunks = txds[col].chunks
dims = txds[col].dims
for d0 in range(len(chunks[0])):
d0start = ([0] + list(np.cumsum(chunks[0][:-1])))[d0]
for d1 in range(len(chunks[1]) if len(chunks) > 1 else 1):
d1start = ([0] + list(np.cumsum(chunks[1][:-1])))[d1] if len(chunks) > 1 else 0
for d2 in range(len(chunks[2]) if len(chunks) > 2 else 1):
d2start = ([0] + list(np.cumsum(chunks[2][:-1])))[d2] if len(chunks) > 2 else 0
starts = [d0start, d1start, d2start]
lengths = [chunks[0][d0], (chunks[1][d1] if len(chunks) > 1 else 0), (chunks[2][d2] if len(chunks) > 2 else 0)]
slices = [slice(starts[0], starts[0]+lengths[0]), slice(starts[1], starts[1]+lengths[1]), slice(starts[2], starts[2]+lengths[2])]
txda = txds[col].isel(dict(zip(dims, slices)), missing_dims='ignore')
delayed_writes += [dask.delayed(write_main_table_slice)(txda, outfile, ddi=ddi, col=col, full_shape=txds[col].shape[1:], starts=starts)]
# now write remaining data_vars from the xds that weren't modified
# this can be done faster by collapsing the chunking to maximum size (minimum #) possible
max_chunk_size = np.prod([txds.chunks[kk][0] for kk in txds.chunks if kk in ['row', 'chan', 'pol']])
for col in list(np.setdiff1d(cols, modcols)):
col_chunk_size = np.prod([kk[0] for kk in txds[col].chunks])
col_rows = int(np.ceil(max_chunk_size / col_chunk_size)) * txds[col].chunks[0][0]
for rr in range(0, txds[col].row.shape[0], col_rows):
txda = txds[col].isel(row=slice(rr, rr + col_rows))
delayed_writes += [dask.delayed(write_main_table_slice)(txda, outfile, ddi=ddi, col=col, full_shape=txda.shape[1:], starts=(rr,)+(0,)*(len(txda.shape)-1))]
if execute:
if verbose: print('triggering DAG...')
zs = dask.compute(delayed_writes)
if verbose: print('execution time %0.2f sec' % (time.time() - start))
else:
if verbose: print('returning delayed task list')
return delayed_writes
###########################################################################################################
def visplot(xda, axis=None, overplot=False, drawplot=True, tsize=250):
"""
Plot a preview of Visibility xarray DataArray contents
Parameters
----------
xda : xarray.core.dataarray.DataArray
input DataArray to plot
axis : str or list or xarray.core.dataarray.DataArray
Coordinate(s) within the xarray DataArray, or a second xarray DataArray to plot against. Default None uses range.
All other coordinates will be maxed across dims
overplot : bool
Overlay new plot on to existing window. Default of False makes a new window for each plot
drawplot : bool
Display plot window. Should pretty much always be True unless you want to overlay things
in a Jupyter notebook.
tsize : int
target size of the preview plot (might be smaller). Default is 250 points per axis
Returns
-------
Open matplotlib window
"""
import matplotlib.pyplot as plt
import xarray
import numpy as np
import warnings
warnings.simplefilter("ignore", category=RuntimeWarning) # suppress warnings about nan-slices
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
if overplot:
axes = None
else:
fig, axes = plt.subplots(1, 1)
# fast decimate to roughly the desired size
thinf = np.ceil(np.array(xda.shape) / tsize)
txda = xda.thin(dict([(xda.dims[ii], int(thinf[ii])) for ii in range(len(thinf))]))
# can't plot complex numbers, bools (sometimes), or strings
if (txda.dtype == 'complex128') or (txda.dtype == 'complex64'):
txda = (txda.real ** 2 + txda.imag ** 2) ** 0.5
elif txda.dtype == 'bool':
txda = txda.astype(int)
elif txda.dtype.type is np.int32:
txda = txda.where(txda > np.full((1), np.nan, dtype=np.int32)[0])
elif txda.dtype.type is np.str_:
txda = xarray.DataArray(np.unique(txda, return_inverse=True)[1], dims=txda.dims, coords=txda.coords, name=txda.name)
######################
# decisions based on supplied axis to plot against
# no axis - plot against range of data
# collapse all but first dimension
if axis is None:
collapse = [ii for ii in range(1, txda.ndim)]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda[txda.dims[0]] = np.arange(txda.shape[0])
txda.plot.line(ax=axes, marker='.', linewidth=0.0)
# another xarray DataArray as axis
elif type(axis) == xarray.core.dataarray.DataArray:
txda2 = axis.thin(dict([(xda.dims[ii], int(thinf[ii])) for ii in range(len(thinf))]))
if txda2.dtype.type is np.int32: txda2 = txda2.where(txda2 > np.full((1), np.nan, dtype=np.int32)[0])
xarray.Dataset({txda.name: txda, txda2.name: txda2}).plot.scatter(txda.name, txda2.name)
# single axis
elif len(np.atleast_1d(axis)) == 1:
axis = np.atleast_1d(axis)[0]
# coord ndim is 1
if txda[axis].ndim == 1:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in txda[axis].dims]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.line(ax=axes, x=axis, marker='.', linewidth=0.0)
# coord ndim is 2
elif txda[axis].ndim == 2:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in txda[axis].dims]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.pcolormesh(ax=axes, x=axis, y=txda.dims[0])
# two axes
elif len(axis) == 2:
collapse = [ii for ii in range(txda.ndim) if txda.dims[ii] not in (txda[axis[0]].dims + txda[axis[1]].dims)]
if len(collapse) > 0: txda = txda.max(axis=collapse)
txda.plot.pcolormesh(ax=axes, x=axis[0], y=axis[1])
plt.title(txda.name)
if drawplot:
plt.show()
##################################################################################################
##
## Images
##
##################################################################################################
############################################
def read_image_chunk(infile, shapes, starts):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
data = tb_tool.getcellslice(tb_tool.colnames()[0], 0, starts, tuple(np.array(starts) + np.array(shapes) - 1))
tb_tool.close()
return data
############################################
def read_image_array(infile, dimorder, chunks):
tb_tool = tables.table(infile, readonly=True, lockoptions={'option': 'usernoread'}, ack=False)
cshape = eval(tb_tool.getcolshapestring(tb_tool.colnames()[0])[0])
cdata = tb_tool.getcellslice(tb_tool.colnames()[0], 0, tuple(np.repeat(0, len(cshape))), tuple(np.repeat(0, len(cshape))))
tb_tool.close()
# expand the actual data shape to the full 5 possible dims
full_shape = cshape + [1 for rr in range(5) if rr >= len(cshape)]
full_chunks = chunks[::-1] + [1 for rr in range(5) if rr >= len(chunks)]
d0slices = []
for d0 in range(0, full_shape[0], full_chunks[0]):
d0len = min(full_chunks[0], full_shape[0] - d0)
d1slices = []
for d1 in range(0, full_shape[1], full_chunks[1]):
d1len = min(full_chunks[1], full_shape[1] - d1)
d2slices = []
for d2 in range(0, full_shape[2], full_chunks[2]):
d2len = min(full_chunks[2], full_shape[2] - d2)
d3slices = []
for d3 in range(0, full_shape[3], full_chunks[3]):
d3len = min(full_chunks[3], full_shape[3] - d3)
d4slices = []
for d4 in range(0, full_shape[4], full_chunks[4]):
d4len = min(full_chunks[4], full_shape[4] - d4)
shapes = tuple([d0len, d1len, d2len, d3len, d4len][:len(cshape)])
starts = tuple([d0, d1, d2, d3, d4][:len(cshape)])
delayed_array = dask.delayed(read_image_chunk)(infile, shapes, starts)
d4slices += [dask.array.from_delayed(delayed_array, shapes, cdata.dtype)]
d3slices += [dask.array.concatenate(d4slices, axis=4)] if len(cshape) > 4 else d4slices
d2slices += [dask.array.concatenate(d3slices, axis=3)] if len(cshape) > 3 else d3slices
d1slices += [dask.array.concatenate(d2slices, axis=2)] if len(cshape) > 2 else d2slices
d0slices += [dask.array.concatenate(d1slices, axis=1)] if len(cshape) > 1 else d1slices
xda = xarray.DataArray(dask.array.concatenate(d0slices, axis=0), dims=dimorder[::-1]).transpose()
return xda
############################################
def read_image(infile, masks=True, history=True, chunks=(1000, 1000, 1, 4), verbose=False):
"""
Read casacore format Image to xarray Image Dataset format
Parameters
----------
infile : str
Input image filename (.image or .fits format)
masks : bool
Also read image masks as additional image data_vars. Default is True
history : bool
Also read history log table. Default is True
chunks: 4-D tuple of ints
Shape of desired chunking in the form of (l, m, chan, pol). Default is (1000, 1000, 1, 4)
Note: chunk size is the product of the four numbers (up to the actual size of the dimension)
Returns
-------
xarray.core.dataset.Dataset
new xarray Datasets of Image data contents
"""
infile = os.path.expanduser(infile)
IA = ia()
QA = qa()
rc = IA.open(infile)
csys = IA.coordsys()
ims = IA.shape() # image shape
attrs = extract_table_attributes(infile)
if verbose: print('opening %s with shape %s' % (infile, str(ims)))
# construct a mapping of dimension names to image indices
dimmap = [(coord[:-1], attrs['coords']['pixelmap%s' % coord[-1]][0]) for coord in attrs['coords'] if coord[:-1] in ['direction', 'stokes', 'spectral', 'linear']]
dimmap = dict([(rr[0].replace('stokes','pol').replace('spectral','chan').replace('linear','component'), rr[1]) for rr in dimmap])
if 'direction' in dimmap: dimmap['l'] = dimmap.pop('direction')
if 'l' in dimmap: dimmap['m'] = dimmap['l'] + 1
# compute world coordinates for spherical dimensions
sphr_dims = [dimmap['l'], dimmap['m']] if 'l' in dimmap else []
coord_idxs = np.mgrid[[range(ims[dd]) if dd in sphr_dims else range(1) for dd in range(len(ims))]].reshape(len(ims), -1)
coord_world = csys.toworldmany(coord_idxs.astype(float))['numeric'][sphr_dims].reshape((-1,) + tuple(ims[sphr_dims]))
coords = dict([(['right_ascension','declination'][dd], (['l', 'm'], coord_world[di])) for di, dd in enumerate(sphr_dims)])
# compute world coordinates for cartesian dimensions
cart_names, cart_dims = list(zip(*[(kk, dimmap[kk]) for kk in dimmap if kk != 'direction']))
for cd in range(len(cart_dims)):
coord_idxs = np.mgrid[[range(ims[dd]) if dd == cart_dims[cd] else range(1) for dd in range(len(ims))]].reshape(len(ims), -1)
coord_world = csys.toworldmany(coord_idxs.astype(float))['numeric'][cart_dims[cd]].reshape(-1,)
coords.update({cart_names[cd]: coord_world})
# assign values to l, m coords based on incr and refpix in metadata
if len(sphr_dims) > 0:
sphr_coord = [coord for coord in attrs['coords'] if coord.startswith('direction')][0]
coords['l'] = np.arange(-attrs['coords'][sphr_coord]['crpix'][0], ims[0]-attrs['coords'][sphr_coord]['crpix'][0]) * attrs['coords'][sphr_coord]['cdelt'][0]
coords['m'] = np.arange(-attrs['coords'][sphr_coord]['crpix'][1], ims[1]-attrs['coords'][sphr_coord]['crpix'][1]) * attrs['coords'][sphr_coord]['cdelt'][1]
rc = csys.done()
rc = IA.close()
# chunks are in (l, m, chan, pol) order, rearrange to match the actual data order
dimorder = [dd for rr in range(5) for dd in dimmap if (dimmap[dd] is not None) and (dimmap[dd] == rr)]
chunks = list(np.array(chunks + (9999999,))[[['l', 'm', 'chan', 'pol', 'component'].index(rr) for rr in dimorder]])
# wrap the actual image data reads in dask delayed calls returned as an xarray dataarray
xds = xarray.Dataset(coords=coords)
xda = read_image_array(infile, dimorder, chunks)
xda = xda.rename('IMAGE')
xds[xda.name] = xda
# add mask(s) alongside image data
if masks and 'masks' in attrs:
for ii, mask in enumerate(list(attrs['masks'].keys())):
if not os.path.isdir(os.path.join(infile, mask)): continue
xda = read_image_array(os.path.join(infile, mask), dimorder, chunks)
xda = xda.rename('IMAGE_%s' % mask)
xds[xda.name] = xda
attrs[mask+'_column_descriptions'] = extract_table_attributes(os.path.join(infile, mask))['column_descriptions']
# if also loading history, put it as another xds in the attrs
if history and os.path.isdir(os.path.join(infile, 'logtable')):
attrs['history'] = read_generic_table(os.path.join(infile, 'logtable'))
if 'coords' in attrs: attrs['icoords'] = attrs.pop('coords') # rename coord table keyword to avoid confusion with xds coords
xds = xds.assign_attrs(attrs)
return xds
############################################
def write_image_slice(xda, outfile, col, starts):
"""
Write image xda chunk to the corresponding image table slice
"""
# trigger the DAG for this chunk and return values while the table is unlocked
values = xda.compute().values
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
tb_tool.putcellslice(col, 0, values, starts, tuple(np.array(starts) + np.array(values.shape) - 1))
tb_tool.close()
############################################
def write_image(xds, outfile, portion='IMAGE', masks=True, history=True, verbose=False, execute=True):
"""
Read casacore format Image to xarray Image Dataset format
Parameters
----------
xds : xarray.Dataset
Image xarray dataset to write
outfile : str
Output image filename (.image format)
portion : str
Name of the data_var in the xds that corresponds to the image data. Default 'IMAGE'
masks : bool
Also write the masks to the output. Can be used instead of infile parameter. Default True
history : bool
Also write the history log file to the output. Can be used instead of infile paramter. Default True
verbose : bool
Whether or not to print output progress. Since writes will typically execute the DAG, if something is
going to go wrong, it will be here. Default False
execute : bool
Whether or not to actually execute the DAG, or just return it with write steps appended. Default True will execute it
"""
outfile = os.path.expanduser(outfile)
start = time.time()
xds = xds.copy()
# initialize list of column names and xda's to be written. The column names are not the same as the data_var names
cols = [list(xds.attrs['column_descriptions'].keys())[0] if 'column_descriptions' in xds.attrs else list(xds.data_vars.keys())[0]]
xda_list = [xds[portion]]
subtable_list = ['']
if 'icoords' in xds.attrs: xds.attrs['coords'] = xds.attrs.pop('icoords') # rename back for proper table keyword creation
# initialize output table (must do it this way since create_table mysteriously throws image tool errors when subsequently opened)
IA = ia()
imtype = 'd' if xds[portion].dtype == 'float64' else 'c' if xds[portion].dtype == 'complex64' else 'cd' if xds[portion].dtype == 'complex128' else 'f'
IA.fromshape(outfile, list(xds[portion].shape), csys=xds.attrs['coords'], overwrite=True, log=False, type=imtype)
IA.close()
# write image history to logfile subtable (not delayed)
if history and ('history' in xds.attrs):
if verbose: print('writing history log...')
write_generic_table(xds.history, outfile, subtable='logtable')
# add masks to the list of xda's to be written
if masks and ('masks' in xds.attrs):
for mask in xds.masks:
if verbose: print('writing %s...' % mask)
mask_var = '%s_%s' % (portion, mask)
if (mask + '_column_descriptions' not in xds.attrs) or (mask_var not in xds): continue
cols += [list(xds.attrs[mask+'_column_descriptions'].keys())[0]]
xda_list += [xds[mask_var]]
subtable_list += [mask]
xds.attrs['masks'][mask]['mask'] = 'Table: %s' % os.path.abspath(os.path.join(outfile, mask))
xds.attrs[mask+'_column_descriptions'][cols[-1]]['shape'] = list(xds[mask_var].transpose().shape)
txds = xarray.Dataset({mask_var: xds[mask_var]}).assign_attrs({'column_descriptions': xds.attrs[mask+'_column_descriptions']})
create_table(os.path.join(outfile, mask), txds, max_rows=1, infile=None, cols=[cols[-1]], generic=True)
# write xds attribute to output table keywords
tb_tool = tables.table(outfile, readonly=False, lockoptions={'option': 'permanentwait'}, ack=False)
for attr in xds.attrs:
if (attr in ['bad_cols', 'bad_types', 'column_descriptions', 'history', 'subtables', 'info']) or attr.endswith('column_descriptions'): continue
tb_tool.putkeyword(attr, xds.attrs[attr])
if 'info' in xds.attrs: tb_tool.putinfo(xds.attrs['info'])
tb_tool.close()
# write each xda transposed to disk
chunks = [rr[0] for rr in xds[portion].chunks][::-1]
cshapes = xds[portion].shape[::-1]
dims = xds[portion].dims[::-1]
delayed_writes = []
for ii, xda in enumerate(xda_list):
for d0 in range(0, cshapes[0], chunks[0]):
d0len = min(chunks[0], cshapes[0] - d0)
for d1 in range(0, cshapes[1] if len(cshapes) > 1 else 1, chunks[1] if len(chunks) > 1 else 1):
d1len = min(chunks[1], cshapes[1] - d1) if len(cshapes) > 1 else 0
for d2 in range(0, cshapes[2] if len(cshapes) > 2 else 1, chunks[2] if len(chunks) > 2 else 1):
d2len = min(chunks[2], cshapes[2] - d2) if len(cshapes) > 2 else 0
for d3 in range(0, cshapes[3] if len(cshapes) > 3 else 1, chunks[3] if len(chunks) > 3 else 1):
d3len = min(chunks[3], cshapes[3] - d3) if len(cshapes) > 3 else 0
for d4 in range(0, cshapes[4] if len(cshapes) > 4 else 1, chunks[4] if len(chunks) > 4 else 1):
d4len = min(chunks[4], cshapes[4] - d4) if len(cshapes) > 4 else 0
starts = [d0, d1, d2, d3, d4][:len(cshapes)]
slices = [slice(d0, d0+d0len), slice(d1, d1+d1len), slice(d2, d2+d2len), slice(d3, d3+d3len), slice(d4, d4+d4len)]
txda = xda.transpose().isel(dict(zip(dims, slices)), missing_dims='ignore')
delayed_writes += [dask.delayed(write_image_slice)(txda, os.path.join(outfile, subtable_list[ii]), col=cols[ii], starts=starts)]
if execute:
if verbose: print('triggering DAG...')
zs = dask.compute(delayed_writes)
if verbose: print('execution time %0.2f sec' % (time.time() - start))
else:
if verbose: print('returning delayed task list')
return delayed_writes
| 1.796875 | 2 |
files/exercises/defensive-testing-assertions.py | mforneris/introduction_to_python_course | 0 | 12762041 | <reponame>mforneris/introduction_to_python_course
# Testing Assertions
# Given a sequence of a number of cars,
# the function get_total_cars returns the total number of cars.
# get_total_cars([1, 2, 3, 4])
# outputs: 10
# get_total_cars(['a', 'b', 'c'])
# outputs: ValueError: invalid literal for int() with base 10: 'a'
# Explain in words what the assertions in this function check, and for each one, give an example of input that will make that assertion fail.
def get_total(values):
assert len(values) > 0
for element in values:
assert int(element)
values = [int(element) for element in values]
total = sum(values)
assert total > 0
return total | 4.40625 | 4 |
program/dataHandling.py | otills/embryocv | 1 | 12762042 | <filename>program/dataHandling.py
# Import dependencies
import cv2
import numpy as np
import pandas as pd
import glob
import os
import sys
import eggUI
import viewOutput
import time
import pathos
import json
import xarray as xr
from PyQt5.Qt import *
from PyQt5 import QtGui
class dataHandling(object):
#==============================================================================
# Produce a list of embryos with results fileslarger than a particular size.
#==============================================================================
def filterResultsList(self,fileSize):
#self.resultsDir = os.path.dirname(self.parentPath + "phenomeData/")
self.sizeFilteredEmbryos = []
for f in range(len(self.embryoLabels)):
self.embryo = self.embryoLabels[f]
resultsStructure = self.resultsDir +'/' + self.embryo + '.pandas'
size = os.path.getsize(resultsStructure)/1000000
if size >= fileSize:
self.sizeFilteredEmbryos.append(self.embryo)
#==============================================================================
# Function to identify the egg
#==============================================================================
def seqImport(self, n):
self.seq = np.zeros(shape=(self.results.shape[1],self.imImport(0,0).shape[0],self.imImport(0,0).shape[1]))
for f in range(self.results.shape[1]):
self.seq[f] = cv2.imread(self.results.iloc[n]['currentFolder'][f] + self.results.iloc[n]['file'][f],cv2.IMREAD_ANYDEPTH)
ran = (self.seq.max() - self.seq.min()) / 255.
self.seq = self.seq/ran
self.seq = self.seq-self.seq.min()
self.seq = np.ascontiguousarray(self.seq.astype(np.uint8))
#np.ascontiguousarray
#==============================================================================
# Import single image using results
#==============================================================================
def imImport(self,n,m):
im = cv2.imread(self.results.iloc[n]['currentFolder'][m] + self.results.iloc[n]['file'][m],cv2.IMREAD_ANYDEPTH)
ran = (im.max()-im.min())/255.
out = (im/ran)
out = out-out.min()
out = out.astype(np.uint8)
return out
#==============================================================================
# Get embryo labels from a parentPath
#==============================================================================
def getEmbryoLabels(self,parentPath):
folders = glob.glob(parentPath + "*/*/")
# Trouble with getctime .. doesn't seem well supported. Therefore switched.
folders.sort(key=os.path.getmtime)
if len(folders)<1:
print 'Data not found.'
embryoLabels = []
for p in range(len(folders)):
embryoLabels.append(os.path.basename(os.path.normpath((folders[p]))))
if self.species == 'rbalthica':
# Reduce to only the unique labels
embryoLabels = np.unique(embryoLabels)
self.embryoLabels = embryoLabels[embryoLabels != 'BLANK']
# Use to remove 'problematic embryos'
if self.exclude is not 'na':
if type(self.exclude) is str:
self.embryoLabels = self.embryoLabels[self.embryoLabels != self.exclude]
else:
self.embryoLabels = self.embryoLabels
for p in range(len(self.exclude)):
self.embryoLabels = self.embryoLabels[self.embryoLabels != str(self.exclude[p])]
elif self.species == 'ogammarellus':
# Add a filter to deal with 'copied data - such as Orchestia methods MS data'
labs = []
for p in range(len(folders)):
labs.append(str(os.path.basename(os.path.normpath((folders[p])))).split(' ')[0])
# Reduce to only the unique labels
embryoLabels = np.unique(labs)
self.embryoLabels = embryoLabels[embryoLabels != 'BLANK']
#==============================================================================
# Get embryo labels from a parentPath
#==============================================================================
def getEmbryoLabelsFromResultsFolder(self,resultsDir):
if self.dataformat:
folders = glob.glob(resultsDir + "/*.pandas")
embryoLabels = []
resultsDir +"/"
for f in range(len(folders)):
embryoLabels.append(folders[f].replace(".pandas", ""))
#embryoLabels[f] = embryoLabels[f].replace(resultsDir +"/","")
embryoLabels[f] = embryoLabels[f].replace(resultsDir,"")
# Reduce to only the unique labels (shouldn't be a problem as from results).
embryoLabels = np.unique(embryoLabels)
self.embryoLabels = embryoLabels[embryoLabels != 'BLANK']
# Use to remove 'problematic embryos'
if self.exclude is not 'na':
if type(self.exclude) is str:
self.embryoLabels = self.embryoLabels[self.embryoLabels != self.exclude]
else:
self.embryoLabels = self.embryoLabels
for p in range(len(self.exclude)):
self.embryoLabels = self.embryoLabels[self.embryoLabels != str(self.exclude[p])]
else:
# If raw does not equal True (i.e. if data do not follow the typical raw MicroManager format
# due perhaps to being copied from elesewhere).
# For example Parent Folder/EmbryoA/Time1
# /Time2
# /EmbryoB/Time1
# /Time2
folders = glob.glob(resultsDir + "/*.pandas")
embryoLabels = []
resultsDir +"/"
for f in range(len(folders)):
embryoLabels.append(folders[f].replace(".pandas", ""))
embryoLabels[f] = embryoLabels[f].replace(resultsDir +"/","")
# Reduce to only the unique labels (shouldn't be a problem as from results).
embryoLabels = np.unique(embryoLabels)
self.embryoLabels = embryoLabels[embryoLabels != 'BLANK']
# Debug
print self.embryoLabels
# Debug
# print self.embryoLabels
#==============================================================================
# Get embryo labels from a parentPath with XARRAY data
#==============================================================================
def getXREmbryoLabelsFromResultsFolder(self,resultsDir):
# If data in normal MM format..
folders = glob.glob(resultsDir + "/*.HDF5")
embryoLabels = []
resultsDir +"/"
for f in range(len(folders)):
embryoLabels.append(folders[f].replace(".HDF5", ""))
embryoLabels.append(folders[f].replace("dataset", ""))
embryoLabels[f] = embryoLabels[f].replace(resultsDir +"/","")
# Reduce to only the unique labels (shouldn't be a problem as from results).
embryoLabels = np.unique(embryoLabels)
self.embryoLabels = embryoLabels[embryoLabels != 'BLANK']
# Debug
# print self.embryoLabels
#==============================================================================
# Get folders for a particular embryo
#==============================================================================
def getEmbryoFolders(self, parentPath, embryo):
if self.mode == 'results':
self.parentPath = parentPath
self.embryo = embryo
self.embryoFolders = self.results.ix[:,0,'currentFolder']
self.getShortenedPaths()
if (self.mode == 'resume'):
self.parentPath = parentPath
self.embryo = embryo
if self.species == 'rbalthica':
#self.embryoFolders = self.results.items
# Oli changed (1211) to deal with corrupted HD (2017018)
#self.embryoFolders = glob.glob(parentPath + "*/" + embryo +"/")
self.embryoFolders = self.results.ix[:,0,'currentFolder']
self.embryoFolders = list(self.embryoFolders)
self.embryoFolders.sort(key=os.path.getmtime)
#self.embryoFolders.sort(key=os.path.getmtime)
self.getShortenedPaths()
elif self.species == 'ogammarellus':
self.embryoFolders = self.results.items
self.embryoFolders = glob.glob(parentPath + "*/" + embryo +" */")
self.embryoFolders.sort(key=os.path.getmtime)
self.getShortenedPaths()
if (self.mode == 'new'):
if self.species == 'rbalthica':
self.parentPath = parentPath
self.embryo = embryo
self.embryoFolders = glob.glob(parentPath + "*/" + embryo +"/")
self.embryoFolders.sort(key=os.path.getmtime)
self.getShortenedPaths()
elif self.species == 'ogammarellus':
#self.embryoFolders = self.results.items
self.embryoFolders = glob.glob(parentPath + "*/" + embryo +" */")
self.embryoFolders.sort(key=os.path.getmtime)
self.getShortenedPaths()
#==============================================================================
# Get list of shortenedPaths
#==============================================================================
def getShortenedPaths(self):
if (self.mode == 'results')|(self.mode == 'resume'):
self.shortenedPaths = self.results.items
if self.mode == 'new':
self.shortenedPaths=[]
for f in range(len(self.embryoFolders)):
self.shortenedPaths.append(os.path.relpath(self.embryoFolders[f], self.parentPath))
#==============================================================================
# Create a directory for saving results
#==============================================================================
def createResultsFolder(self):
self.resultsDir = os.path.dirname(self.parentPath + "phenomeData/")
if not os.path.exists(self.resultsDir):
os.makedirs(self.resultsDir)
#==============================================================================
# Generate a results panel suitable for experiment AND then Run egg ID
#==============================================================================
def generateResultsAndFindEggs(self, parentPath, scale, eggInt=1234):
self.eggInt = eggInt
totts = time.time()
self.compiledData = {}
self.parentPath = parentPath
self.scale = scale
self.getEmbryoLabels(self.parentPath)
self.createResultsFolder()
for e in range(len(self.embryoLabels)):
ts = time.time()
print 'Initiation started for', self.embryoLabels[e]
self.getEmbryoFolders(self.parentPath,self.embryoLabels[e])
for f in range(len(self.embryoFolders)):
print f, self.embryoFolders[f]
self.currentFolder = self.embryoFolders[f]
self.shortenedPath = self.shortenedPaths[f]
self.parseMetadata()
self.createResultsTable()
self.compiledData[self.shortenedPath] = self.results
print 'Initiation complete for', self.embryoLabels[e], ('in {} s'.format(time.time()-ts))
self.resultSheets = pd.Panel.from_dict(self.compiledData)
self.resultSheets.to_pickle(self.resultsDir + "/" + self.embryo + '.pandas')
self.results=[]
self.resultSheets=[]
self.compiledData={}
self.saveMetadata()
self.runEggID(self.eggInt)
print 'Egg identification and creation of results files is now complete. '
print 'This took ', ('{} s'.format(time.time()-totts))
#==============================================================================
# Just generate results - route for O. gammarellus analysis. Initiated upon creating
# a 'new' experiment.
#
# Generate a results panel suitable for experiment AND then Run egg ID
#==============================================================================
def generateResults(self, parentPath, scale):
totts = time.time()
self.compiledData = {}
self.parentPath = parentPath
self.scale = scale
self.getEmbryoLabels(self.parentPath)
self.createResultsFolder()
for e in range(len(self.embryoLabels)):
ts = time.time()
print 'Initiation started for', self.embryoLabels[e]
self.getEmbryoFolders(self.parentPath,self.embryoLabels[e])
for f in range(len(self.embryoFolders)):
print f, self.embryoFolders[f]
self.currentFolder = self.embryoFolders[f]
self.shortenedPath = self.shortenedPaths[f]
self.parseMetadata()
self.createResultsTable()
self.compiledData[self.shortenedPath] = self.results
print 'Results file created for ', self.embryoLabels[e], ('in {} s'.format(time.time()-ts))
self.resultSheets = pd.Panel.from_dict(self.compiledData)
self.resultSheets.to_pickle(self.resultsDir + "/" + self.embryo + '.pandas')
self.results=[]
self.resultSheets=[]
self.compiledData={}
self.saveMetadata()
#self.runEggID(self.eggInt)
print 'This took ', ('{} s'.format(time.time()-totts))
print 'Egg identification and creation of results files is now complete. Use instance.analyseAllEmbryos function to continue'
#==============================================================================
# Create results structure
#==============================================================================
def createResultsTable(self):
global da
# Store embryo outline as numpy array, but results as panda.
cols = ['embryo','currentFolder','parentPath','file','scale','UUID','dateTime','elapsedTime',
'area','centroidX','centroidY','solidity','aspect','extent',
'hullArea','bboxMincol','bboxMinrow','bboxWidth','bboxHeight', 'embryoOutline'
,'eggRotBBox','eggBoxPoints', 'blockWise']
#, 'eggBBox'
self.results = pd.DataFrame(index = range(int(len(self.filtFiles))), columns = cols)
self.results.embryo = self.embryo
self.results.currentFolder = self.currentFolder
self.results.file = self.filtFiles
self.results.parentPath = self.parentPath
self.results.scale = self.scale
self.results.dateTime = self.filtTimes
self.results.elapsedTime = self.filtElapsedTimes
self.results.UUID = self.filtUUID
#==============================================================================
# Load metadata
#==============================================================================
def parseMetadata(self):
# Search currentFolder for metadata
with open(glob.glob(self.currentFolder + "/*.txt")[0], 'r') as f:
jsonDict = json.loads(f.read())
times = []
elapsedTimes = []
files = []
UUID = []
for i in jsonDict.keys():
if i!=u'Summary':
times.append(jsonDict[i]['Time'])
elapsedTimes.append(jsonDict[i]['ElapsedTime-ms'])
files.append(jsonDict[i]['FileName'])
UUID.append(jsonDict[i]['UUID'])
meta = [(y,x,z,a) for (y,x,z,a) in sorted(zip(times,elapsedTimes,files,UUID))]
self.filtTimes = []
self.filtElapsedTimes = []
self.filtFiles = []
self.filtUUID = []
for i in range(len(meta)):
self.filtTimes.append(str(meta[i][0]))
self.filtElapsedTimes.append(str(meta[i][1]))
self.filtFiles.append(str(meta[i][2]))
self.filtUUID.append(str(meta[i][3]))
#==============================================================================
# Apply locateEgg to image sequence at specified interval
#==============================================================================
def getIntervIndicesFromSequence(self, n):
# n = Specifies how many frames to skip in between the locateEgg.
#self.intN = self.results.shape[1]-1
self.intN = n
self.eggIDIms = []
# If n = 1234 (default), only the first image from each sequence is taken.
if self.intN == 1234:
self.eggIDIms.append(0)
# If an interval is specified...
else:
seqLength = self.results.shape[1]
# Get number of IDs necessary, based on n and seqLength.
eggIDNum = seqLength/n
# Get frame indices
for i in range(eggIDNum):
self.eggIDIms.append(i*self.intN)
#==============================================================================
# Import a non continuous sequence
#==============================================================================
def nonContSeqImport(self, f):
# Create np stack of appropriate size
im = self.imImport(0,0)
self.seq = np.zeros(shape=(int(len(self.eggIDIms)),im.shape[0],im.shape[1]))
# Loop over the eggIDIms
for g in range(len(self.eggIDIms)):
# Import image one by one, from correct folder (argument) and iterated eggIDIm.
self.seq[g] = self.imImport(f,self.eggIDIms[g])
self.seq = self.seq.astype(np.uint8)
#==============================================================================
# Load results for current embryo
#==============================================================================
def loadResults(self):
# Iterate over embryo labels...
resultsStructure = self.resultsDir +'/' + self.embryo + '.pandas'
results = pd.read_pickle(resultsStructure)
# Reorder panels so they are chronological, using the first datetime from each panel.
dates = pd.to_datetime(results.ix[:,0,6])
# Sort them
dates.sort_values(inplace=True)
# Reindex axis
self.results = results.reindex_axis(dates.index, copy='False')
#==============================================================================
# Load results for current embryo - XARRAY
#==============================================================================
def loadXRResults(self):
# Iterate over embryo labels...
if self.mode is 'xarray':
if float(str(glob.glob(self.parentPath + self.embryo + "dataset*")[0]).find('log')) > 0:
self.results = 'NoData'
else:
self.results = xr.open_mfdataset(self.parentPath + self.embryo + 'dataset.HDF5')
#==============================================================================
# Load, but return results
#==============================================================================
def returnResults(self, embryo):
# Iterate over embryo labels...
resultsStructure = self.resultsDir +'/' + embryo + '.pandas'
results = pd.read_pickle(resultsStructure)
# Reorder panels so they are chronological, using the first datetime from each panel.
dates = pd.to_datetime(results.ix[:,0,6])
# Sort them
dates.sort_values(inplace=True)
# Reindex axis
results = results.reindex_axis(dates.index, copy='False')
return results
#==============================================================================
# Apply egg ID
#==============================================================================
# Run egg ID and add to the results table before saving
# If no eggInt given then just run on the first image of each sequence
def runEggID(self, eggInt = 1234):
self.eggInt = eggInt
for e in range(len(self.embryoLabels)):
self.embryo = self.embryoLabels[e]
self.getEmbryoFolders(self.parentPath, self.embryo)
self.loadResults()
#if self.eggInt ==1234:
#self.eggIDIms = []
#self.eggIDIms.append(0)
self.getIntervIndicesFromSequence(int(self.eggInt))
for f in range(len(self.embryoFolders)):
print self.embryoFolders[f]
self.nonContSeqImport(f)
for g in range(len(self.eggIDIms)):
# If g = [0] i.e. eggInt = 1234, make g = 0.
if len(self.eggIDIms) == 1:
g =0
# NOW perform the eggID on each intervalled image
self.locateEgg(self.seq[g])
# AND store eggID output in results table
#self.results.ix[f,self.eggIDIms[g],'eggBBox'] = self.eggBBox
self.results.ix[f,self.eggIDIms[g],'eggRotBBox'] = self.eggRotBBox
self.results.ix[f,self.eggIDIms[g],'eggBoxPoints'] = self.boxPoints
self.results.to_pickle(self.resultsDir + "/" + self.embryo + '.pandas')
#==============================================================================
# Fill missing egg measurements with ones from either earlier or later time points
# in the data panel series. Note: This is a suboptimal solution and should be
# avoided via manual checking using eggUI.
#==============================================================================
def fillEggMeasurements(self):
# Fill non calculated values frames from most nrecent previously calculated.
for f in range(self.results.shape[0]):
#self.results.ix[f,:,'eggBBox'] = self.results.ix[f,:,'eggBBox'].fillna(method = 'ffill')
#self.results.ix[f,:,'eggBBox'] = self.results.ix[f,:,'eggBBox'].fillna(method = 'bfill')
self.results.ix[f,:,'eggBoxPoints'] = self.results.ix[f,:,'eggBoxPoints'].fillna(method = 'ffill')
self.results.ix[f,:,'eggBoxPoints'] = self.results.ix[f,:,'eggBoxPoints'].fillna(method = 'bfill')
self.results.ix[f,:,'eggRotBBox'] = self.results.ix[f,:,'eggRotBBox'].fillna(method = 'ffill')
self.results.ix[f,:,'eggRotBBox'] = self.results.ix[f,:,'eggRotBBox'].fillna(method = 'bfill')
self.results.ix[f,:,'bboxMincol'] = self.results.ix[f,:,'bboxMincol'].fillna(method = 'ffill')
self.results.ix[f,:,'bboxMinrow'] = self.results.ix[f,:,'bboxMinrow'].fillna(method = 'bfill')
# If no value in first datapoint in first panel..
for f in range(self.results.shape[0]):
null = self.results.ix[:,0,'eggBoxPoints'].isnull()
if null[f]:
for g in range(len(null)-f):
if ~null[(f+g)]:
#self.results.ix[f,0,'eggBBox'] = self.results.ix[(f+g),0,'eggBBox']
#self.results.ix[f,:,'eggBBox'] = self.results.ix[f,:,'eggBBox'].fillna(method = 'ffill')
self.results.ix[f,0,'eggBoxPoints'] = self.results.ix[(f+g),0,'eggBoxPoints']
self.results.ix[f,:,'eggBoxPoints'] = self.results.ix[f,:,'eggBoxPoints'].fillna(method = 'ffill')
self.results.ix[f,0,'eggRotBBox'] = self.results.ix[(f+g),0,'eggRotBBox']
self.results.ix[f,:,'eggRotBBox'] = self.results.ix[f,:,'eggRotBBox'].fillna(method = 'ffill')
self.results.ix[f,0,'bboxMincol'] = self.results.ix[(f+g),0,'bboxMincol']
self.results.ix[f,:,'bboxMincol'] = self.results.ix[f,:,'bboxMincol'].fillna(method = 'ffill')
self.results.ix[f,0,'bboxMinrow'] = self.results.ix[(f+g),0,'bboxMinrow']
self.results.ix[f,:,'bboxMinrow'] = self.results.ix[f,:,'bboxMinrow'].fillna(method = 'ffill')
break
# Now repeat process but filling later panels in the dataframe with data from earlier panels.
for f in range(self.results.shape[0]):
null = self.results.ix[:,0,'eggBoxPoints'].isnull()
if null[f]:
#print f
for g in range(f,0,-1):
if ~null[g]:
#self.results.ix[f,0,'eggBBox'] = self.results.ix[g,0,'eggBBox']
#self.results.ix[f,:,'eggBBox'] = self.results.ix[f,:,'eggBBox'].fillna(method = 'ffill')
self.results.ix[f,0,'eggBoxPoints'] = self.results.ix[g,0,'eggBoxPoints']
self.results.ix[f,:,'eggBoxPoints'] = self.results.ix[f,:,'eggBoxPoints'].fillna(method = 'ffill')
self.results.ix[f,0,'eggRotBBox'] = self.results.ix[g,0,'eggRotBBox']
self.results.ix[f,:,'eggRotBBox'] = self.results.ix[f,:,'eggRotBBox'].fillna(method = 'ffill')
self.results.ix[f,0,'bboxMincol'] = self.results.ix[g,0,'bboxMincol']
self.results.ix[f,:,'bboxMincol'] = self.results.ix[f,:,'bboxMincol'].fillna(method = 'ffill')
self.results.ix[f,0,'bboxMinrow'] = self.results.ix[g,0,'bboxMinrow']
self.results.ix[f,:,'bboxMinrow'] = self.results.ix[f,:,'bboxMinrow'].fillna(method = 'ffill')
break
#==============================================================================
# Apply embryo segmentation to all embryos
#==============================================================================
def segmentAllEmbryos(self):
for e in range(len(self.embryoLabels)):
print 'Starting segmentation of', self.embryoLabels[e]
# Make embyo label current (for saving)
self.embryo = self.embryoLabels[e]
self.loadResults()
# Interpolate egg measurements to whole dataframe
self.fillEggMeasurements()
self.getEmbryoFolders(self.parentPath,self.embryoLabels[e])
self.runEmbryoSegmentation()
print 'Finished segmentation of', self.embryoLabels[e]
#==============================================================================
# Apply embryo segmentation to specific embryo
#==============================================================================
def segmentSpecificEmbryos(self, embryo):
# Make embyo label current (for saving)
if isinstance(embryo, str):
self.embryo = embryo
self.loadResults()
self.fillEggMeasurements()
self.getEmbryoFolders(self.parentPath,self.embryo)
self.runEmbryoSegmentation()
print self.embryo, 'segmentation complete'
if isinstance(embryo, list):
for e in range(len(embryo)):
self.embryo = embryo[e]
self.loadResults()
self.getEmbryoFolders(self.parentPath,self.embryo)
self.runEmbryoSegmentation()
print self.embryo, 'segmentation complete'
#==============================================================================
# Worker function for parallel embryo segmentation
#==============================================================================
def parallelSegmentAllEmbryos(self,e):
print 'Analysis started for:', self.embryoLabels[e]
# Make embyo label current (for saving)
ts = time.time()
self.embryo = self.embryoLabels[e]
self.loadResults()
if self.species == 'rbalthica':
# Interpolate egg measurements to whole dataframe
self.fillEggMeasurements()
self.getEmbryoFolders(self.parentPath,self.embryoLabels[e])
self.runParEmbryoSegmentation()
print 'Analysis complete for', self.embryoLabels[e], ('in {} s'.format(time.time()-ts))
#==============================================================================
# Multiprocessing parallel embryo segmetnation function
#==============================================================================
def quantifyAllEmbryos(self,par=True,exclude='na'):
# Exclude any embryos that user does not want to be analysed.
if exclude is not 'na':
if type(exclude) is str:
self.embryoLabels = self.embryoLabels[self.embryoLabels != exclude]
else:
for p in range(len(exclude)):
self.embryoLabels = self.embryoLabels[self.embryoLabels != str(exclude[p])]
if par is True:
# Four seems a good compromise for processing vs data IO on a 12 core
# MacPro, with data on a 7200 RPM SATA, connected via USB3.0/eSATA or Thunderbolt.
cpuCount = 4
# Uncomment if you want to maximise cpu usage, note that data IO from
# drives will likely become limiting and cause serious issues..
#cpuCount = pathos.multiprocessing.cpu_count()
self.getEmbryoLabels(self.parentPath)
jobSize= len(self.embryoLabels)
jobRange= range(len(self.embryoLabels))
if cpuCount > len(self.embryoLabels):
pool = pathos.multiprocessing.ProcessPool(jobSize)
pool.map(self.parallelSegmentAllEmbryos, jobRange)
else:
pool = pathos.multiprocessing.ProcessPool(cpuCount)
pool.map(self.parallelSegmentAllEmbryos, jobRange)
else:
# If par is not True then use non parallel version.
self.segmentAllEmbryos()
#==============================================================================
# Save metadata
#==============================================================================
def saveMetadata(self):
if self.species == 'rbalthica':
metadata = {'embryoLabels':self.embryoLabels, 'scale':self.scale, 'eggInt':self.eggInt}
elif self.species == 'ogammarellus':
metadata = {'embryoLabels':self.embryoLabels, 'scale':self.scale, 'eggInt':1234}
np.save(self.resultsDir + "/phenomeMetadata", metadata)
#==============================================================================
# Load metadata
#==============================================================================
def loadMetadata(self):
self.metadata = np.load(self.resultsDir + "/phenomeMetadata.npy")
self.scale = self.metadata[()]['scale']
self.embryoLabels = self.metadata[()]['embryoLabels']
# Use to remove 'problematic embryos'
if self.exclude is not 'na':
if type(self.exclude) is str:
self.embryoLabels = self.embryoLabels[self.embryoLabels != self.exclude]
else:
self.embryoLabels = self.embryoLabels
for p in range(len(self.exclude)):
self.embryoLabels = self.embryoLabels[self.embryoLabels != str(self.exclude[p])]
#==============================================================================
# Multiprocessing parallel embryo segmetnation functions
#==============================================================================
# Performs analysis for each embryo, looping over embryoFolders.
def initEmParallel(self,e):
print 'Initiation in progress for ', self.embryoLabels[e]
# Make embyo label current (for saving)
#ts = time.time()
self.getEmbryoFolders(self.parentPath,self.embryoLabels[e])
self.embryo = self.embryoLabels[e]
for f in range(len(self.embryoFolders)):
self.currentFolder = self.embryoFolders[f]
self.shortenedPath = self.shortenedPaths[f]
#self.shortenedPath = os.path.relpath(self.currentFolder, self.parentPath)
self.parseMetadata()
self.createResultsTable()
self.compiledData[self.shortenedPath] = self.results
self.resultSheets = pd.Panel.from_dict(self.compiledData)
self.resultSheets.to_pickle(self.resultsDir + "/" + self.embryo + '.pandas')
self.results=[]
self.resultSheets=[]
self.compiledData={}
# Now the results file is created and saved open it and populate with egg measurements
self.loadResults()
self.getIntervIndicesFromSequence(int(self.eggInt))
for f in range(len(self.embryoFolders)):
self.nonContSeqImport(f)
for g in range(len(self.eggIDIms)):
if len(self.eggIDIms) == 1:
g =0
# NOW perform the eggID on each intervalled image
self.locateEgg(self.seq[g])
# AND store eggID output in results table
#self.results.ix[f,self.eggIDIms[g],'eggBBox'] = self.eggBBox
self.results.ix[f,self.eggIDIms[g],'eggRotBBox'] = self.eggRotBBox
self.results.ix[f,self.eggIDIms[g],'eggBoxPoints'] = self.boxPoints
self.results.to_pickle(self.resultsDir + "/" + self.embryo + '.pandas')
# Creates worker pool and allocates jobs.
def parallelInitiation(self):
self.getEmbryoLabels(self.parentPath)
#cpuCount = pathos.multiprocessing.cpu_count()
cpuCount = pathos.multiprocessing.cpu_count()
jobSize= len(self.embryoLabels)
jobRange= range(len(self.embryoLabels))
if cpuCount > len(self.embryoLabels):
pool = pathos.multiprocessing.ProcessPool(jobSize)
pool.map(self.initEmParallel, jobRange)
else:
pool = pathos.multiprocessing.ProcessPool(cpuCount)
pool.map(self.initEmParallel, jobRange)
# Worker function ensuring appropriate functions are called before and after parallel processing.
def parallelGenerateResultsAndFindEggs(self, parentPath, scale, eggInt=1234):
if self.species == 'rbalthica':
totts = time.time()
self.eggInt = eggInt
self.compiledData = {}
self.parentPath = parentPath
self.scale = scale
self.getEmbryoLabels(self.parentPath)
# Debug
print self.embryoLabels
print len(self.embryoLabels)
self.createResultsFolder()
self.parallelInitiation()
self.saveMetadata()
print 'Egg identification and creation of results files is now complete. '
print 'This took ', ('{} s'.format(time.time()-totts))
#==============================================================================
# Return a pandas panel with blockwise and embryooutline removed - can be
# useful for some downstream, restricted analysis.
#==============================================================================
def reduceData(self):
res = self.results.drop('blockWise', axis=2)
res = res.drop('embryoOutline', axis=2)
return res
#==============================================================================
# Save a restricted dataset (excluding blockwise data and embryo outlines
# to a Numpy dictionary.
#==============================================================================
def saveReducedResults(self, savePath):
# Use function and add output to results dict.
reducedResults = {}
for f in range(len(self.embryoLabels)):
self.embryo = self.embryoLabels[f]
self.results = self.returnResults(self.embryo)
reduced = self.reduceData()
# Take embryo label and assign results
#exec('self.%s = reduced' % self.embryo)
print self.embryoLabels[f], 'loaded'
reducedResults[self.embryoLabels[f]] = reduced
# Get an appropriate name to save..
out = self.parentPath.replace('/phenomeData/','')
out = out.split('/')
out = out[len(out)-1]
# Finally save
np.save(savePath + out + '_reducedPhenomeResults.npy', reducedResults)
#==============================================================================
# Functionn to launch Egg ID UI
#==============================================================================
def validateEggs(self, eggInt = 1234):
self.eggInt = eggInt
app = 0
#QtGui.QApplication.setGraphicsSystem('raster')
app = QtGui.QApplication(sys.argv)
self.UI = eggUI.eggUI()
self.dataForUI(0)
self.UI.embryoFolders = self.embryoFolders
self.UI.showUI(self.UI.compSeq, self.results[:,self.eggIDIms,'eggRotBBox'].values, self.results[:,self.eggIDIms,'eggBoxPoints'].values,list(self.embryoLabels), self.eggInt)
#instance1.UI.showUI(instance1.UI.compSeq, instance1.results[:,instance1.eggIDIms,'eggRotBBox'].values, instance1.results[:,instance1.eggIDIms,'eggBoxPoints'].values,list(instance1.embryoLabels), instance1.eggInt)
self.UI.diag.imv.sigTimeChanged.connect(self.UI.updateOpenCVEggROICurrEmbryo)
self.UI.diag.table.itemSelectionChanged.connect(self.supplyUINewEmbryoData)
self.UI.approveROI_btn.clicked.connect(self.saveUpdatedROI)
# Update image when timeline slider is changed.
# self.UI.diag.imv.timeLine.sigPositionChanged.connect(self.UI.updateImage)
app.exec_()
#==============================================================================
# Functions for the validateEggs() user interface - to validate egg locations etc..
#==============================================================================
#==============================================================================
# Load data for the eggUI for a particular embryo
#==============================================================================
def dataForUI(self,e):
# Make embyo label current (for saving)
self.embryo = self.embryoLabels[e]
# Load results
self.loadResults()
self.getEmbryoFolders(self.parentPath, self.embryo)
# If self.eggInt = 1234, only check the first image of each image sequence
# /time series. This is the default. However, users can check very image if
# desired by setting an appropriate eggInt.
if self.eggInt ==1234:
self.eggIDIms = []
self.eggIDIms.append(0)
self.intN = self.results.shape[1]-1
self.getIntervIndicesFromSequence(int(self.eggInt))
else:
# Get indices for loading, based on interval (intN).
self.getIntervIndicesFromSequence(int(self.eggInt))
# Create an empty stack for the egg approval images. Use first im for dimensions
im = self.imImport(0,0)
self.UI.compSeq = np.zeros(shape=(int(len(self.eggIDIms)*len(self.embryoFolders)),im.shape[0],im.shape[1]))
self.UI.eggUIimPaths = []
for e in range(len(self.embryoFolders)):
self.UI.eggUIimPaths[e*len(self.eggIDIms):(e*len(self.eggIDIms)+len(self.eggIDIms))] = self.results.iloc[e]['currentFolder'][self.eggIDIms] + self.results.iloc[e]['file'][self.eggIDIms]
#==============================================================================
# Save changes to egg ROI to disk.
#==============================================================================
def saveUpdatedROI(self):
# Save changes for each embryo's ROI
for r in range(self.UI.eggRotBBox.shape[1]):
self.results.iloc[r]['eggRotBBox'][self.eggIDIms] = self.UI.eggRotBBox[:,r]
for r in range(self.UI.eggBoxPoints.shape[1]):
self.results.iloc[r]['eggBoxPoints'][self.eggIDIms] = self.UI.eggBoxPoints[:,r]
# Store changes to disk
self.results.to_pickle(self.resultsDir + "/" + self.embryo + '.pandas')
#==============================================================================
# When table row selection changes, load new embryo data
#==============================================================================
def supplyUINewEmbryoData(self):
# Debug
# print 'supplyUINewEmbryoData', self.UI.diag.table.currentRow()
currRow = self.UI.diag.table.currentRow()
self.dataForUI(currRow)
self.UI.updateUI(self.UI.compSeq, self.results[:,self.eggIDIms,'eggRotBBox'].values, self.results[:,self.eggIDIms,'eggBoxPoints'].values)
#self.dataforViewOutput(currRow)
#==============================================================================
# View Output functions
#==============================================================================
def viewOutput(self):
app = 0
app = QApplication(sys.argv)
self.outputUI = viewOutput.viewOutput()
self.outputUI.scale = self.scale
self.dataforViewOutput(0)
self.outputUI.showUI(list(self.embryoLabels))
#self.dataForUI(0)
#self.UI.outputUI(self.compSeq, self.results[:,self.eggIDIms,'eggRotBBox'].values, self.results[:,self.eggIDIms,'eggBoxPoints'].values,list(self.embryoLabels))
#test.UI.diag.imv.sigTimeChanged.connect(updateAlert)
#self.UI.diag.imv.sigTimeChanged.connect(self.UI.updateOpenCVEggROICurrEmbryo)
self.outputUI.diag.table.itemSelectionChanged.connect(self.supplyoutputUINewEmbryoData)
#self.UI.approveROI_btn.clicked.connect(self.saveUpdatedROI)
#test.UI.roi.sigRegionChangeFinished.connect(roiChanged)
app.exec_()
# Provide results data
def dataforViewOutput(self,n=0):
# Make embyo label current (for saving)
self.embryo = self.embryoLabels[int(n)]
# Load results
self.loadResults()
self.outputUI.results = self.results
self.getEmbryoFolders(self.parentPath, self.embryo)
# Send timeSeriesEmbryoBB data ...
self.outputUI.embryoBBRange = self.timeSeriesEmbryoBB()
def supplyoutputUINewEmbryoData(self):
# Debug
# print 'supplyUINewEmbryoData', self.UI.diag.table.currentRow()
currRow = self.outputUI.diag.table.currentRow()
self.dataforViewOutput(currRow)
self.outputUI.updateUI()
#==============================================================================
# Get Embryo bounding box max and min locations across entire data panel..
#==============================================================================
def timeSeriesEmbryoBB(self):
maxX = np.zeros(len(self.results))
maxY = np.zeros(len(self.results))
minX = np.zeros(len(self.results))
minY = np.zeros(len(self.results))
for f in range(len(self.results)):
self.currentFolder = self.embryoFolders[f]
self.shortenedPath = self.shortenedPaths[f]
#self.getShortenedPath()
out = self.getEmbryoBB()
maxX[f], maxY[f], minX[f], minY[f] = out['maxX'],out['maxY'],out['minX'],out['minY']
maxX = max(maxX)
maxY = max(maxY)
minX = min(minX)
minY = min(minY)
return {'minX':minX, 'minY':minY, 'maxX':maxX, 'maxY':maxY}
# Get Egg bounding box max and min locations across entire data panel..
def timeSeriesEggBB(self):
maxX = np.zeros(len(self.results))
maxY = np.zeros(len(self.results))
minX = np.zeros(len(self.results))
minY = np.zeros(len(self.results))
for f in range(len(self.results)):
self.currentFolder = self.embryoFolders[f]
self.shortenedPath = self.shortenedPaths[f]
out = self.getSeqEggBB()
maxX[f], maxY[f], minX[f], minY[f] = out['eggMaxX'],out['eggMaxY'],out['eggMinX'],out['eggMinY']
maxX = max(maxX)
maxY = max(maxY)
minX = min(minX)
minY = min(minY)
return {'minX':minX, 'minY':minY, 'maxX':maxX, 'maxY':maxY}
| 2.59375 | 3 |
airone/urls.py | userlocalhost/airone-1 | 0 | 12762043 | <reponame>userlocalhost/airone-1<gh_stars>0
from django.conf import settings
from django.conf.urls import url, include
from django.contrib.auth import views as auth_views
from django.views.generic import RedirectView
from api_v1.urls import urlpatterns as api_v1_urlpatterns
from airone.auth import view as auth_view
urlpatterns = [
url(r"^$", RedirectView.as_view(url="dashboard/")),
url(r"^acl/", include(("acl.urls", "acl"))),
url(r"^user/", include(("user.urls", "user"))),
url(r"^group/", include(("group.urls", "group"))),
url(r"^entity/", include(("entity.urls", "entity"))),
url(r"^dashboard/", include(("dashboard.urls", "dashboard"))),
url(r"^new-ui/", include(("dashboard.urls_for_new_ui", "dashboard_for_new_ui"))),
url(r"^entry/", include(("entry.urls", "entry"))),
url(r"^api/v1/", include(api_v1_urlpatterns)),
url(r"^api/v2/", include(("api_v2.urls", "api_v2"))),
url(r"^job/", include(("job.urls", "job"))),
url(
r"^auth/login/",
auth_views.LoginView.as_view(
redirect_authenticated_user=True,
extra_context={
"title": settings.AIRONE["TITLE"],
"subtitle": settings.AIRONE["SUBTITLE"],
"note_desc": settings.AIRONE["NOTE_DESC"],
"note_link": settings.AIRONE["NOTE_LINK"],
},
),
name="login",
),
url(r"^auth/logout/", auth_view.logout, name="logout"),
url(r"^webhook/", include(("webhook.urls", "webhook"))),
url(r"^role/", include(("role.urls", "role"))),
# url(r'^__debug__/', include('debug_toolbar.urls')),
]
for extension in settings.AIRONE["EXTENSIONS"]:
urlpatterns.append(
url(r"^extension/%s" % extension, include(("%s.urls" % extension, extension)))
)
| 1.90625 | 2 |
cgatpipelines/tasks/counts2table.py | kevinrue/cgat-flow | 0 | 12762044 | '''counts2table.py - wrap various differential expression tools
=============================================================
:Tags: Python
Purpose
-------
This script provides a convenience wrapper for differential expression
analysis for a variety of methods.
The aim of this script is to provide a common tabular output format
that is consistent between the different methods.
The script will call the selected method and output a variety of
diagnostic plots. Generally, the analysis aims to follow published
workflows for the individual method together with outputting diagnostic
plots to spot any problems. The script will also preprocess count
data to apply some common filtering methods.
The methods implemented are:
sleuth
Application of sleuth.
deseq2
Application of DESeq2
edger
Application of EdgeR
dexseq
Application of DEXSeq
ttest
Application of Welch's ttest to FPKM values
mock
A mock analysis. No differential analysis is performed,
but fold changes are computed and output.
Use --sleuth-genewise to test at gene rather than transcript level.
For genewise analysis, also require --gene-biomart option Use
following R code to identify the correct database, e.g
hsapiens_gene_ensembl) > library(biomaRt)
>listDatasets(useEnsembl(biomart="ensembl"))
Use the option --use-ihw to use the independent hypothesis weighting
method to calculate a weighted FDR. Note this will replace the
unweighted BH FDR in the final results table.
Usage
-----
Input
+++++
The input to this script is a table of measurements reflecting
expression levels. For the tag counting methods such as DESeq2 or
EdgeR, these should be the raw counts, while for other methods such as
ttest, these can be normalized values such as FPKM values. In
addition, sleuth does not use an expression table but rather the
directory of expression estimates from e.g kallisto.
See option --sleuth-counts-dir
The script further requires a design table describing the tests to
be performed. The design table has four columns::
track include group pair
CW-CD14-R1 0 CD14 1
CW-CD14-R2 0 CD14 1
CW-CD14-R3 1 CD14 1
CW-CD4-R1 1 CD4 1
FM-CD14-R1 1 CD14 2
FM-CD4-R2 0 CD4 2
FM-CD4-R3 0 CD4 2
FM-CD4-R4 0 CD4 2
These files should be tab separated as this is enforced in downstream
analyses and will cause the script to error.
track
name of track - should correspond to column header in the counts
table.
include
flag to indicate whether or not to include this data (0, 1)
group
group indicator - experimental group
pair
pair that sample belongs to (for paired tests) - set to 0 if the
design is not paired.
Note: additional columns included after pair can be used to specify
covariates (e.g replicate number etc)
Output
++++++
The script outputs a table with the following columns:
+------------------+------------------------------------------------------+
|*Column name* |*Content* |
+------------------+------------------------------------------------------+
|test_id |Name of the test (gene name, ... |
+------------------+------------------------------------------------------+
|treatment_name |Name of the treatment condition |
+------------------+------------------------------------------------------+
|treatment_mean |Estimated expression value for treatment |
+------------------+------------------------------------------------------+
|treatment_std |Standard deviation |
+------------------+------------------------------------------------------+
|control_name |Name of the control condition |
+------------------+------------------------------------------------------+
|control_mean |Estimated expression value for control |
+------------------+------------------------------------------------------+
|control_std |Standard deviation |
+------------------+------------------------------------------------------+
|pvalue |The p value for rejecting the null hypothesis |
+------------------+------------------------------------------------------+
|qvalue |Multiple testing correction |
+------------------+------------------------------------------------------+
|l2fold |log2 foldchange of treatment/control |
+------------------+------------------------------------------------------+
|transformed_l2fold|a transformed log2 foldchange value. |
+------------------+------------------------------------------------------+
|fold |foldchange of treatment/control |
+------------------+------------------------------------------------------+
|significant |Flag, 1 if test called significant according to FDR |
+------------------+------------------------------------------------------+
|status |test status (OK|FAIL) |
+------------------+------------------------------------------------------+
Additional plots and tables are generated and method specific.
Command line options
--------------------
To do:
-- add some E.infos
Document!!!
'''
import sys
import pandas as pd
import cgatcore.experiment as E
import cgatpipelines.tasks.expression as expression
import cgatcore.iotools as iotools
import cgatpipelines.tasks.counts as Counts
import cgatpipelines.tasks.R as R
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
parser.add_option("-t", "--tag-tsv-file", dest="input_filename_tags",
type="string",
help="input file with tag counts [default=%default].")
parser.add_option("-d", "--design-tsv-file", dest="input_filename_design",
type="string",
help="input file with experimental design "
"[default=%default].")
parser.add_option("-m", "--method", dest="method", type="choice",
choices=("ttest", "sleuth", "edger", "deseq2", "mock",
"dexseq"),
help="differential expression method to apply "
"[default=%default].")
parser.add_option("--deseq2-dispersion-method",
dest="deseq2_dispersion_method",
type="choice",
choices=("pooled", "per-condition", "blind"),
help="dispersion method for deseq2 [default=%default].")
parser.add_option("--deseq2-fit-type", dest="deseq2_fit_type",
type="choice",
choices=("parametric", "local"),
help="fit type for deseq2 [default=%default].")
parser.add_option("--edger-dispersion",
dest="edger_dispersion", type="float",
help="dispersion value for edgeR if there are no "
"replicates [default=%default].")
parser.add_option("-f", "--fdr", dest="fdr", type="float",
help="fdr to apply [default=%default].")
# currently not implemented
# parser.add_option("-R", "--output-R-code", dest="save_r_environment",
# type="string",
# help="save R environment to loc [default=%default]")
parser.add_option("-r", "--reference-group", dest="ref_group",
type="string",
help="Group to use as reference to compute "
"fold changes against [default=$default]")
parser.add_option("--filter-min-counts-per-row",
dest="filter_min_counts_per_row",
type="int",
help="remove rows with less than this "
"number of counts in total [default=%default].")
parser.add_option("--filter-min-counts-per-sample",
dest="filter_min_counts_per_sample",
type="int",
help="remove samples with a maximum count per sample of "
"less than this number [default=%default].")
parser.add_option("--filter-percentile-rowsums",
dest="filter_percentile_rowsums",
type="int",
help="remove percent of rows with "
"lowest total counts [default=%default].")
parser.add_option("--model",
dest="model",
type="string",
help=("model for GLM"))
parser.add_option("--reduced-model",
dest="reduced_model",
type="string",
help=("reduced model for LRT"))
parser.add_option("--contrast",
dest="contrast",
type="string",
help=("contrast for differential expression testing"))
parser.add_option("--sleuth-counts-dir",
dest="sleuth_counts_dir",
type="string",
help=("directory containing expression estimates"
"from sleuth. Sleuth expects counts"
"files to be called abundance.h5"))
parser.add_option("--dexseq-counts-dir",
dest="dexseq_counts_dir",
type="string",
help=("directory containing counts for dexseq. DEXSeq "
"expects counts files to be called .txt and"
"to be generated by the DEXSeq_counts.py script"))
parser.add_option("--dexseq-flattened-file",
dest="dexseq_flattened_file",
type="string",
help=("directory containing flat gtf for dexseq. DEXSeq "
"expects this to be generated by the"
"DEXSeq_prepare_annotations.py script"))
parser.add_option("--outfile-sleuth-count",
dest="outfile_sleuth_count",
type="string",
help=("outfile for full count table generated by sleuth"))
parser.add_option("--outfile-sleuth-tpm",
dest="outfile_sleuth_tpm",
type="string",
help=("outfile for full tpm table generated by sleuth"))
parser.add_option("--use-ihw",
dest="use_ihw",
action="store_true",
help=("use the independent hypothesis weighting method "
"to obtain weighted FDR"))
parser.add_option("--sleuth-genewise",
dest="sleuth_genewise",
action="store_true",
help=("run genewise, rather than transcript level testing"))
parser.add_option("--gene-biomart",
dest="gene_biomart",
type="string",
help=("name of ensemble gene biomart"))
parser.add_option("--de-test",
dest="DEtest",
type="choice",
choices=("wald", "lrt"),
help=("Differential expression test"))
parser.add_option("--Rhistory",
dest="Rhistory",
type="string",
help=("Outfile for R history"))
parser.add_option("--Rimage",
dest="Rimage",
type="string",
help=("Outfile for R image"))
parser.set_defaults(
input_filename_tags="-",
input_filename_design=None,
output_filename=sys.stdout,
method="deseq2",
fdr=0.1,
deseq2_dispersion_method="pooled",
deseq2_fit_type="parametric",
edger_dispersion=0.4,
ref_group=False,
filter_min_counts_per_row=None,
filter_min_counts_per_sample=None,
filter_percentile_rowsums=None,
spike_foldchange_max=4.0,
spike_expression_max=5.0,
spike_expression_bin_width=0.5,
spike_foldchange_bin_width=0.5,
spike_max_counts_per_bin=50,
model=None,
contrast=None,
output_filename_pattern=None,
sleuth_counts_dir=None,
dexseq_counts_dir=None,
dexseq_flattened_file=None,
outfile_sleuth_count=None,
outfile_sleuth_tpm=None,
use_ihw=False,
sleuth_genewise=False,
gene_biomart=None,
DEtest="wald",
reduced_model=None,
Rhistory=None,
Rimage=None)
# add common options (-h/--help, ...) and parse command line
(options, args) = E.start(parser, argv=argv, add_output_options=True)
RH = None
if options.Rhistory or options.Rimage:
RH = R.R_with_History()
outfile_prefix = options.output_filename_pattern
# Expression.py currently expects a refernce group for edgeR and
# sleuth, regardless of which test is used
if not options.ref_group and (
options.method is "edger" or options.method is "sleuth"):
raise ValueError("Must provide a reference group ('--reference-group')")
# create Design object
design = expression.ExperimentalDesign(
pd.read_csv(iotools.open_file(options.input_filename_design, "r"),
sep="\t", index_col=0, comment="#"))
if len(set(design.table[options.contrast])) > 2:
if options.method == "deseq2" or options.method == "sleuth":
if options.DEtest == "wald":
raise ValueError(
"Factor must have exactly two levels for Wald Test. "
"If you have more than two levels in your factor, "
"consider LRT")
else:
E.info('''There are more than 2 levels for the contrast
specified" "(%s:%s). The log2fold changes in the results table
and MA plots will be for the first two levels in the
contrast. The p-value will be the p-value for the overall
significance of the contrast. Hence, some genes will have a
signficant p-value but 0-fold change between the first two
levels''' % (options.contrast, set(design[options.contrast])))
# Sleuth reads in data itself so we don't need to create a counts object
if options.method == "sleuth":
assert options.sleuth_counts_dir, (
"need to specify the location of the abundance.h5 counts files "
" (--sleuth-counts-dir)")
# validate design against counts and model
design.validate(model=options.model)
experiment = expression.DEExperiment_Sleuth()
results = experiment.run(design,
base_dir=options.sleuth_counts_dir,
model=options.model,
contrast=options.contrast,
outfile_prefix=outfile_prefix,
counts=options.outfile_sleuth_count,
tpm=options.outfile_sleuth_tpm,
fdr=options.fdr,
genewise=options.sleuth_genewise,
gene_biomart=options.gene_biomart,
DE_test=options.DEtest,
ref_group=options.ref_group,
reduced_model=options.reduced_model)
# DEXSeq reads in data itself
elif options.method == "dexseq":
assert options.dexseq_counts_dir, (
"need to specify the location of the .txt counts files")
# create Design object
design = expression.ExperimentalDesign(
pd.read_csv(iotools.open_file(options.input_filename_design, "r"),
sep="\t", index_col=0, comment="#"))
# validate design against counts and model
# design.validate(model=options.model)
experiment = expression.DEExperiment_DEXSeq()
results = experiment.run(design,
base_dir=options.dexseq_counts_dir,
model=options.model,
contrast=options.contrast,
ref_group=options.ref_group,
outfile_prefix=outfile_prefix,
flattenedfile=options.dexseq_flattened_file,
fdr=options.fdr)
else:
# create Counts object
if options.input_filename_tags == "-":
counts = Counts.Counts(pd.io.parsers.read_csv(
sys.stdin, sep="\t", index_col=0, comment="#"))
else:
counts = Counts.Counts(pd.io.parsers.read_csv(
iotools.open_file(options.input_filename_tags, "r"),
sep="\t", index_col=0, comment="#"))
# validate design against counts and model
design.validate(counts, options.model)
# restrict counts to samples in design table
counts.restrict(design)
# remove sample with low counts
if options.filter_min_counts_per_sample:
counts.removeSamples(
min_counts_per_sample=options.filter_min_counts_per_sample)
# remove observations with low counts
if options.filter_min_counts_per_row:
counts.removeObservationsFreq(
min_counts_per_row=options.filter_min_counts_per_row)
# remove bottom percentile of observations
if options.filter_percentile_rowsums:
counts.removeObservationsPerc(
percentile_rowsums=options.filter_percentile_rowsums)
# check samples are the same in counts and design following counts
# filtering and, if not, restrict design table and re-validate
design.revalidate(counts, options.model)
# set up experiment and run tests
if options.method == "ttest":
experiment = expression.DEExperiment_TTest()
results = experiment.run(counts, design)
elif options.method == "edger":
experiment = expression.DEExperiment_edgeR()
results = experiment.run(counts,
design,
model=options.model,
contrast=options.contrast,
outfile_prefix=outfile_prefix,
ref_group=options.ref_group,
fdr=options.fdr,
dispersion=options.edger_dispersion)
elif options.method == "deseq2":
experiment = expression.DEExperiment_DESeq2()
results = experiment.run(counts,
design,
model=options.model,
contrast=options.contrast,
outfile_prefix=outfile_prefix,
fdr=options.fdr,
fit_type=options.deseq2_fit_type,
ref_group=options.ref_group,
DEtest=options.DEtest,
R=RH)
results.getResults(fdr=options.fdr)
if options.use_ihw:
results.calculateIHW(alpha=options.fdr)
for contrast in set(results.table['contrast']):
results.plotVolcano(contrast, outfile_prefix=outfile_prefix, R=RH)
results.plotMA(contrast, outfile_prefix=outfile_prefix, R=RH)
results.plotPvalueHist(contrast, outfile_prefix=outfile_prefix, R=RH)
results.plotPvalueQQ(contrast, outfile_prefix=outfile_prefix, R=RH)
results.table.to_csv(sys.stdout, sep="\t", na_rep="NA", index=False)
results.summariseDEResults()
# write out summary tables for each comparison/contrast
for test_group in list(results.Summary.keys()):
outf = iotools.open_file("_".join(
[outfile_prefix, test_group, "summary.tsv"]), "w")
outf.write("category\tcounts\n%s\n"
% results.Summary[test_group].asTable())
outf.close()
if options.Rhistory:
RH.saveHistory(options.Rhistory)
if options.Rimage:
RH.saveImage(options.Rimage)
E.stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 2.375 | 2 |
hitbasic/visitor/factory.py | pvmm/hitbasic | 0 | 12762045 | <reponame>pvmm/hitbasic<filename>hitbasic/visitor/factory.py<gh_stars>0
from ..factory import factory
from ..exceptions import *
from .. import language_clauses as clauses
from .. import language_statements as statements
class FactoryProxy:
def __init__(self):
self.current_node = None
self.current_rule = None
def create_token(self, *token, **kwargs): # *token allow a compound token
node = kwargs.pop('node', self.current_node)
return factory.create_token(*token, node=node, **kwargs)
def create_reference(self, value, params=None, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_reference(value, params, node=node, **kwargs)
def create_nil(self, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_nil(node=node, **kwargs)
def create_literal(self, value, type, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_literal(value, type, node=node, **kwargs)
def create_sep_list(self, *args, sep=',', list_type=clauses.REGULAR, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_sep_list(*args, sep=sep, list_type=list_type, node=node, **kwargs)
def create_clause(self, clause, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_clause(clause, node=node, **kwargs)
def create_unary_op(self, op, operand, need_parens=False, **kwargs):
node = kwargs.pop('node', self.current_node)
try:
return factory.create_unary_op(op, operand, need_parens, node=node, **kwargs)
except TypeMismatch as e:
raise e.set_location(self.parser.file_name, self.parser.context(position=node.position),
self.parser.pos_to_linecol(node.position))
def create_operation(self, op, op1, op2, need_parens=False, **kwargs):
'legacy compatible shortcut'
node = kwargs.pop('node', self.current_node)
try:
return factory.create_operation(op, op1, op2, need_parens, node=node, **kwargs)
except TypeMismatch as e:
raise e.set_location(self.parser.file_name, self.parser.context(position=node.position),
self.parser.pos_to_linecol(node.position))
def create_case_op(self, op, op2, need_rparens=False, **kwargs):
node = kwargs.pop('node', self.current_node)
try:
return factory.create_case_op(op, op2, need_rparens, node=node, **kwargs)
except TypeMismatch as e:
raise e.set_location(self.parser.file_name, self.parser.context(position=node.position),
self.parser.pos_to_linecol(node.position))
def create_tuple(self, *values, use_parentheses=False, **kwargs):
'Create a n-tuple.'
node = kwargs.pop('node', self.current_node)
return factory.create_tuple(*values, node=node, use_parentheses=use_parentheses, **kwargs)
def create_initialisation(self, dimensions, values, type, **kwargs):
'Processes Dim initialisation expressions.'
node = kwargs.pop('node', None)
return factory.create_initialisation(dimensions, values, type, node=node, **kwargs)
def create_range(self, begin, end, **kwargs):
'Processes range clauses, like "1 to 20".'
node = kwargs.pop('node', self.current_node)
return factory.create_range(begin, end, node=node, **kwargs)
def create_point(self, x, y, step, **kwargs):
'Processes range clauses, like "1 to 20".'
node = kwargs.pop('node', self.current_node)
return factory.create_point(x, y, step, node=node, **kwargs)
def create_box(self, *points, **kwargs):
'Processes range clauses, like "1 to 20".'
node = kwargs.pop('node', self.current_node)
return factory.create_box(*points, node=node, **kwargs)
def create_statement(self, *tokens, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_statement(*tokens, node=node, **kwargs)
def create_attribution(self, lvalue, rvalue, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_attribution(lvalue, rvalue, node=node, **kwargs)
def create_label(self, identifier, type=statements.INTERNAL, line_number=None, **kwargs):
'legacy compatible shortcut'
node = kwargs.pop('node', self.current_node)
return factory.create_label(identifier, type=type, line_number=line_number, node=node, **kwargs)
def create_subroutine(self, code_block, **kwargs):
node = kwargs.pop('node', self.current_node)
return factory.create_subroutine(code_block, node=node, **kwargs)
def put_location(self, exception, pos=None):
'insert location in exception for a reraise'
return exception.set_location(self.parser.file_name, self.parser.context(pos),
self.parser.pos_to_linecol(pos))
def create_exception(self, exception_type, *args, **kwargs):
'create exception with predefined location'
node = kwargs.pop('node', self.current_node)
pos = kwargs.pop('pos', node.position or None)
raise exception_type(*args, filename=self.parser.file_name, context=self.parser.context(pos),
pos=self.parser.pos_to_linecol(pos))
| 2.25 | 2 |
cm/services/data/bucket.py | adrianppg/cloudman | 0 | 12762046 | <gh_stars>0
"""
Methods for managing S3 Buckets and making them available as local file systems.
"""
import os
import urllib2
import threading
from cm.util import misc
from cm.services import service_states
from cm.util.misc import _if_not_installed
from cm.util.decorators import TestFlag
import logging
log = logging.getLogger('cloudman')
class Bucket(object):
def __init__(self, filesystem, bucket_name, a_key=None, s_key=None):
"""
Define the properties for the a given bucket and how it maps to the
local file system.
Currently, S3 buckets are the only supported data source, even if the
current cluster is not running on AWS. If the source bucket is private,
for the AWS case, credentials used by the current CloudMan cluster are
used. Alternatively, if running on a non-AWS cloud, explicit credentials
need to be passed and will be used when interacting with this bucket.
Note that this method uses ``s3fs`` for mounting S3 buckets and, if ``s3fs``
command is not available on the system, it will automatically install it.
"""
self.fs = filesystem # Filesystem that the bucket represents
self.app = self.fs.app # A local reference to app (used by @TestFlag)
self.bucket_name = bucket_name
self.mount_point = self.fs.mount_point
if a_key is None:
self.a_key = self.app.ud.get('access_key', None)
self.s_key = self.app.ud.get('secret_key', None)
else:
self.a_key = a_key
self.s_key = s_key
log.debug("If needed, when mounting bucket {0} will use the following credentials: '{1}' & '{2}'"
.format(self.bucket_name, self.a_key, self.s_key))
# Before we can mount a bucket, s3fs needs to be installed; installing
# s3fs takes a while and it thus done in a separate thread not to block.
# However, regular status updates will kick in before s3fs is installed
# so keep up with the installation progress to be able to appropriately
# update status. Because the install runs in a separate thread, without
# much work (see Python's Queue.Queue), we'll just set the flag as if
# s3fs is installed by default and update the value directly from the
# method vs. via a return value.
self.s3fs_installed = True
threading.Thread(target=self._install_s3fs).start()
@TestFlag(True)
@_if_not_installed("s3fs")
def _install_s3fs(self):
msg = "s3fs is not installed; will install it now (this typically takes 2-5 minutes)."
self.s3fs_installed = False
log.info(msg)
self.app.msgs.info(msg)
misc.run(
"cd /tmp;wget --output-document=s3fs.sh http://s3.amazonaws.com/cloudman/pss/s3fs.sh")
if misc.run("cd /tmp;bash s3fs.sh"):
msg = "Done installing s3fs"
self.s3fs_installed = True
else:
msg = "Trouble installing sf3s; giving up."
self.fs.state = service_states.ERROR
log.debug(msg)
self.app.msgs.info(msg)
def __str__(self):
return str(self.bucket_name)
def __repr__(self):
return str(self.bucket_name)
def _get_details(self, details):
"""
Bucket-specific details for this file system
"""
details['DoT'] = "No"
details['bucket_name'] = self.bucket_name
details['access_key'] = self.a_key
# TODO: keep track of any errors
details['err_msg'] = None if details.get(
'err_msg', '') == '' else details['err_msg']
return details
def _compose_mount_cmd(self):
"""
Compose the command line used to mount the current bucket as a file system.
This method checks if a given bucket is public or private and composes
the appropriate command line.
"""
bucket_url = 'http://{0}.s3.amazonaws.com/'.format(self.bucket_name)
cl = None
is_public = False
try:
u = urllib2.urlopen(bucket_url)
if u.msg == 'OK':
is_public = True
log.debug("Bucket {0} is public".format(self.bucket_name))
except urllib2.HTTPError:
log.debug("Bucket {0} is NOT public".format(self.bucket_name))
if is_public:
cl = "s3fs -oallow_other -opublic_bucket=1 {0} {1}".format(
self.bucket_name, self.mount_point)
else:
# Create a file containing user creds - we'll create one such file
# per bucket for maximum flexibility
creds_file = os.path.join('/tmp', self.bucket_name)
with open(creds_file, 'w') as cf:
cf.write("{0}:{1}".format(self.a_key, self.s_key))
os.chmod(creds_file, 0600) # Required by s3fs
cl = "s3fs -oallow_other -opasswd_file={0} {1} {2}"\
.format(creds_file, self.bucket_name, self.mount_point)
return cl
@TestFlag(True)
def mount(self):
"""
Mount the bucket as a local file system, making it available at
``self.fs.mount_point`` (which is typically ``/mnt/filesystem_name``)
"""
if not self.s3fs_installed:
log.debug("Waiting for s3fs to install before mounting bucket {0}"
.format(self.bucket_name))
self.fs.state = service_states.UNSTARTED
return True
try:
log.debug("Mounting file system {0} from bucket {1} to {2}"
.format(self.fs.get_full_name(), self.bucket_name, self.mount_point))
if os.path.exists(self.mount_point):
if len(os.listdir(self.mount_point)) != 0:
log.warning(
"Filesystem at %s already exists and is not empty." % self.mount_point)
return False
else:
os.mkdir(self.mount_point)
mount_cmd = None
mount_cmd = self._compose_mount_cmd()
if mount_cmd is not None:
if not misc.run(mount_cmd):
msg = "Seems to have run into a problem adding bucket {0} as a local file "\
"system.".format(self.bucket_name)
log.warning(msg)
self.app.msgs.info(msg)
return False
return True
else:
log.error("Cannot compose command line for mounting bucket {0}".format(
self.bucket_name))
except Exception, e:
log.error("Trouble mounting bucket {0} as a file system at {1}: {2}"
.format(self.bucket_name, self.mount_point, e))
return False
@TestFlag(True)
def unmount(self):
"""
Unmount the local file system mounted from the current bucket
"""
log.debug("Unmounting bucket {0} from {1}".format(
self.bucket_name, self.mount_point))
return misc.run("/bin/umount {0}".format(self.mount_point))
def status(self):
"""
Check on the status of this bucket as a mounted file system
"""
# TODO
self.fs._update_size()
| 3 | 3 |
phonotactics/codas/__init__.py | shlomo-Kallner/coventreiya | 0 | 12762047 |
__package__ = "codas"
__all__ = [ "codas" , "ver_1_5_1" , "ver_1_5_7" ]
| 1.078125 | 1 |
site_info/migrations/0001_initial.py | WarwickAnimeSoc/aniMango | 0 | 12762048 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-18 09:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Exec',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('exec_role', models.CharField(max_length=100)),
('exec_info', models.TextField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='HistoryEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('body', models.TextField()),
('academic_year', models.CharField(choices=[('1997/1998', '1997/1998'), ('1998/1999', '1998/1999'), ('1999/2000', '1999/2000'), ('2000/2001', '2000/2001'), ('2001/2002', '2001/2002'), ('2002/2003', '2002/2003'), ('2003/2004', '2003/2004'), ('2004/2005', '2004/2005'), ('2005/2006', '2005/2006'), ('2006/2007', '2006/2007'), ('2007/2008', '2007/2008'), ('2008/2009', '2008/2009'), ('2009/2010', '2009/2010'), ('2010/2011', '2010/2011'), ('2011/2012', '2011/2012'), ('2012/2013', '2012/2013'), ('2013/2014', '2013/2014'), ('2014/2015', '2014/2015'), ('2015/2016', '2015/2016'), ('2016/2017', '2016/2017')], default='year/year', max_length=9)),
],
),
]
| 1.703125 | 2 |
udyam/udyam/report/circulation_list_report/circulation_list_report.py | jsukrut/udyam | 0 | 12762049 | <gh_stars>0
# Copyright (c) 2013, Udyam and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
def execute(filters=None):
columns, data = [], []
columns = get_columns()
data = get_data(filters)
return columns, data
def get_data(filters):
if filters:
circulation_list = filters['circulation_list']
data = frappe.db.sql(""" SELECT CL.name, CD.name,CD.customer,CD.customer_name,CD.company_name,CD.address,
CD.address_title,CD.address_line1,CD.city,CD.state,CD.pincode,CD.phone,CD.qty,CD.tracking_id,
CD.booking_status,CD.transaction_date,CD.delivery_date,CD.transaction_status
FROM `tabCirculation List` CL
INNER JOIN `tabCirculation Details` CD
ON CL.name =CD.circulation_list
where CL.name ='{0}'""".format(circulation_list))
return data
def get_columns():
columns = [
{
"fieldname": "circulation_list",
"label": _("Circulation List"),
"fieldtype": "Link",
"options": "Circulation List",
"width": 100
},
{
"fieldname": "circulation_Detail",
"label": _("Circulation Detail"),
"fieldtype": "Link",
"options": "Circulation Detail",
"width": 100
},
{
"fieldname": "customer",
"label": _("Customer"),
"fieldtype": "Data",
"width": 300
},
{
"fieldname": "customer_name",
"label": _("<NAME>"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "company_name",
"label": _("Company Name"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "address",
"label": _("Address"),
"fieldtype": "data",
"width": 90
},
{
"fieldname": "address_title",
"label": _("Address Title"),
"fieldtype": "data",
"width": 90
},
{
"fieldname": "address_line1",
"label": _("Address Line 1"),
"fieldtype": "data",
"width": 90
},
{
"fieldname": "city",
"label": _("City"),
"fieldtype": "data",
"width": 90
},
{
"fieldname": "state",
"label": _("State"),
"fieldtype": "data",
"width": 120
},
{
"fieldname": "pin_code",
"label": _("Pin Code"),
"fieldtype": "Data",
"width": 170
},
{
"fieldname": "mobile_no",
"label": _("Mobile No"),
"fieldtype": "Data",
"width": 170
},
{
"fieldname": "qty",
"label": _("Qty"),
"fieldtype": "float",
"width": 170
},
{
"fieldname": "tracking_id",
"label": _("Tracking id"),
"fieldtype": "Data",
"width": 170
},
{
"fieldname": "booking_status",
"label": _("Booking Status"),
"fieldtype": "Data",
"width": 170
},
{
"fieldname": "transaction_date",
"label": _("Transaction Date"),
"fieldtype": "Date",
"width": 170
},
{
"fieldname": "delivery_date",
"label": _("Delivery Date"),
"fieldtype": "Date",
"width": 170
},
{
"fieldname": "transaction_status",
"label": _("Transaction Status"),
"fieldtype": "Data",
"width": 170
},
]
return columns | 2.25 | 2 |
utils/image_saver.py | amirbaghi/weeker_raytracer | 1 | 12762050 | <reponame>amirbaghi/weeker_raytracer<gh_stars>1-10
import re
import numpy as np
from PIL import Image
from pathlib import Path
# Open image file, slurp the lot
contents = Path('build/image.txt').read_text()
# Make a list of anything that looks like numbers using a regex...
# ... taking first as height, second as width and remainder as pixels
w,h, *pixels = re.findall(r'[0-9]+', contents)
# Now make pixels into Numpy array of uint8 and reshape to correct height, width and depth
na = np.array(pixels, dtype=np.uint8).reshape((int(h),int(w),3))
# Now make the Numpy array into a PIL Image and save
Image.fromarray(na).save("output/result.png") | 3.140625 | 3 |