hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a71396c8eccbd499f64ee47c8235e9246d3bc275
| 32,867
|
py
|
Python
|
saber/xbrain/xbrain.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 12
|
2018-05-14T17:43:18.000Z
|
2021-11-16T04:03:33.000Z
|
saber/xbrain/xbrain.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 34
|
2019-05-06T19:13:36.000Z
|
2021-05-06T19:12:35.000Z
|
saber/xbrain/xbrain.py
|
elenimath/saber
|
71acab9798cf3aee1c4d64b09453e5234f8fdf1e
|
[
"Apache-2.0"
] | 3
|
2019-10-08T17:42:17.000Z
|
2021-07-28T05:52:02.000Z
|
# Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
def classify_pixel(input_data, classifier, threads=8, ram=4000):
"""
Runs a pre-trained ilastik classifier on a volume of data
Adapted from Stuart Berg's example here:
https://github.com/ilastik/ilastik/blob/master/examples/example_python_client.py
Arguments:
input_data: data to be classified - 3D numpy array
classifier: ilastik trained/classified file
threads: number of thread to use for classifying input data
ram: RAM to use in MB
Returns:
pixel_out: The raw trained classifier
"""
import numpy as np
import six
import pdb
from collections import OrderedDict
import vigra
import os
import ilastik_main
from ilastik.applets.dataSelection import DatasetInfo
from ilastik.workflows.pixelClassification import PixelClassificationWorkflow
# Before we start ilastik, prepare these environment variable settings.
os.environ["LAZYFLOW_THREADS"] = str(threads)
os.environ["LAZYFLOW_TOTAL_RAM_MB"] = str(ram)
# Set the command-line arguments directly into argparse.Namespace object
# Provide your project file, and don't forget to specify headless.
args = ilastik_main.parser.parse_args([])
args.headless = True
args.project = classifier
# Instantiate the 'shell', (an instance of ilastik.shell.HeadlessShell)
# This also loads the project file into shell.projectManager
shell = ilastik_main.main(args)
assert isinstance(shell.workflow, PixelClassificationWorkflow)
# Obtain the training operator
opPixelClassification = shell.workflow.pcApplet.topLevelOperator
# Sanity checks
assert len(opPixelClassification.InputImages) > 0
assert opPixelClassification.Classifier.ready()
# For this example, we'll use random input data to "batch process"
print("input_data.shape", input_data.shape)
# In this example, we're using 2D data (extra dimension for channel).
# Tagging the data ensures that ilastik interprets the axes correctly.
input_data = vigra.taggedView(input_data, 'xyz')
# In case you're curious about which label class is which,
# let's read the label names from the project file.
label_names = opPixelClassification.LabelNames.value
label_colors = opPixelClassification.LabelColors.value
probability_colors = opPixelClassification.PmapColors.value
print("label_names, label_colors, probability_colors", label_names, label_colors, probability_colors)
# Construct an OrderedDict of role-names -> DatasetInfos
# (See PixelClassificationWorkflow.ROLE_NAMES)
role_data_dict = OrderedDict([("Raw Data",
[DatasetInfo(preloaded_array=input_data)])])
# Run the export via the BatchProcessingApplet
# Note: If you don't provide export_to_array, then the results will
# be exported to disk according to project's DataExport settings.
# In that case, run_export() returns None.
predictions = shell.workflow.batchProcessingApplet.\
run_export(role_data_dict, export_to_array=True)
predictions = np.squeeze(predictions)
print("predictions.dtype, predictions.shape", predictions.dtype, predictions.shape)
print("DONE.")
return predictions
#Unsupervised gmm clasification
def gmm_classify_pixel(volume,numsamp,numcomp,erodesz):
import numpy as np
import sklearn.mixture
import scipy.ndimage.morphology
IM = volume
whichsamp = np.random.randint(IM.shape[0]*IM.shape[1],size=numsamp)
trainind = IM.ravel()[whichsamp].astype(float)
im_gmm = sklearn.mixture.GaussianMixture(n_components=numcomp, covariance_type='diag')
im_gmm.fit(trainind.reshape(-1, 1))
#gm = fitgmdist(traind,numcomp, 'CovType','diagonal','Options',options);
whichCell = np.argmin(im_gmm.means_)
#[~,whichCell] = min(gm.mu);
Probx = im_gmm.predict_proba(IM.ravel().astype(float).reshape(-1, 1))
#Probx = posterior(gm,IM(:));
CellMap = np.reshape(Probx[:,whichCell],IM.shape)
if erodesz > 0:
CellMapErode = scipy.ndimage.morphology.grey_erosion(CellMap,footprint=strel2D(erodesz))
#print(strel2D(args.erodesz))
#print(CellMap[0:5,0:5])
#print(CellMapErode[0:5,0:5])
else:
CellMapErode = CellMap
return CellMapErode
#Unsupervised gmm clasification (3D for xbrain data)
#cell_class=1, then cells are darker than background. Cell_class=0, cells are lighter than background
def gmm_classify_pixel3D(volume,numsamp,numcomp,vessel_thres,min_size,cell_class=1):
import numpy as np
import sklearn.mixture
import scipy.ndimage.morphology
import skimage.measure
IM = volume
whichsamp = np.random.randint(IM.size,size=numsamp)
trainind = IM.ravel()[whichsamp].astype(float)
im_gmm = sklearn.mixture.GaussianMixture(n_components=numcomp, covariance_type='diag')
im_gmm.fit(trainind.reshape(-1, 1))
#gm = fitgmdist(traind,numcomp, 'CovType','diagonal','Options',options);
if cell_class==1:
whichCell = np.argmin(im_gmm.means_)
else:
whichCell = np.argmax(im_gmm.means_)
#[~,whichCell] = min(gm.mu);
Probx = im_gmm.predict_proba(IM.ravel().astype(float).reshape(-1, 1))
#Probx = posterior(gm,IM(:));
CellMap = np.reshape(Probx[:,whichCell],IM.shape)
#Now remove vessels that have been detected
CellMapT = np.multiply(CellMap,(CellMap>vessel_thres).astype(int))
#foot = strel(1)
foot = [[[True]]]
CellMapErode = scipy.ndimage.morphology.grey_erosion((CellMapT*255).astype(int),footprint=foot)
cc_labels,num = skimage.measure.label((CellMapErode>0).astype(int),connectivity=1,background=0,return_num=True)
numslices = []
for i_label in range(1,num+1):
inds = np.where(cc_labels==i_label)
z_inds = inds[2]
numslices.append(len(np.unique(z_inds))) # np.unique(tempinds))) #unique z inds
for i_label in range(1,num+1):
if numslices[i_label-1] < np.round(min_size * CellMapErode.shape[2]):
CellMapErode[np.where(cc_labels==i_label)] = 0 #eliminate vessels
Vmap = scipy.ndimage.morphology.grey_dilation(CellMapErode,footprint=strel(1)) #note that tjs strel3d function is not same as this strel
VmapT = (Vmap==0).astype(int)
ProbMap = np.multiply(CellMap,VmapT)
return ProbMap
#print(strel2D(args.erodesz))
#print(CellMap[0:5,0:5])
#print(CellMapErode[0:5,0:5])
def segment_vessels(vessel_probability, probability_threshold, dilation_size, minimum_size):
"""
This function produces a binary image with segmented vessels from a probability map (from
ilastik or another classifier).
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vessel_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a vessel.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
vessel_probability(r,c,z) > probability_threshold).
dilation_size : int
Sphere Structural Element diameter size.
minimum_size : int
components smaller than this are removed from image.
Returns
-------
ndarry
Binary Image
"""
import numpy as np
import scipy.io as sio
from scipy import ndimage as ndi
from skimage import morphology
smallsize = 100 # components smaller than this size are removed. WHY Fixed Size??
unfiltered_im = (vessel_probability >= probability_threshold)
im_removed_small_objects = morphology.remove_small_objects(unfiltered_im,
min_size = smallsize, in_place = True)
dilated_im = ndi.binary_dilation(im_removed_small_objects, morphology.ball((dilation_size-1)/2))
image_out = morphology.remove_small_objects(dilated_im, min_size = minimum_size,
in_place = True)
return(image_out)
def detect_cells2D(cell_probability, probability_threshold, stopping_criterion,
initial_template_size, dilation_size, max_no_cells):
"""
This is the top level function to infer the position (and eventually size) of all cells in a 2D
volume of image data. We assume that we already have computed a "probability map" which encodes
the probability that each voxel corresponds to a cell body.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
cell_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a cell body.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
cell_probability(r,c,z) > probability_threshold)
stopping_criterion : float
stopping criterion is a value between (0,1) (minimum normalized correlation between
template and probability map) (Example = 0.47)
initial_template_size : int
initial size of spherical template (to use in sweep)
dilation_size : int
size to increase mask around each detected cell (zero out sphere of radius with
initial_template_size+dilation_size around each centroid)
max_no_cells : int
maximum number of cells (alternative stopping criterion)
Returns
-------
ndarray
centroids = D x 4 matrix, where D = number of detected cells.
The (x,y,z) coordinate of each cell are in columns 1-3.
The fourth column contains the correlation (ptest) between the template
and probability map and thus represents our "confidence" in the estimate.
The algorithm terminates when ptest<=stopping_criterion.
ndarray
new_map = Nr x Nc x Nz matrix containing labeled detected cells (1,...,D)
"""
# following imports to be updated when directory structure are finalized
#import create_synth_dict
#from compute3dvec import compute3dvec
from scipy import signal
import numpy as np
import pdb
import logging
# threshold probability map.
newtest = (cell_probability * (cell_probability > probability_threshold)).astype('float32')
#initial_template_size is an int now but could a vector later on - convert it to an array
initial_template_size = np.atleast_1d(initial_template_size)
# create dictionary of spherical templates
box_radius = np.ceil(np.max(initial_template_size)/2) + 1
dict = create_synth_dict2D(initial_template_size, box_radius)
dilate_dict = create_synth_dict2D(initial_template_size + dilation_size, box_radius)
box_length = int(round(np.shape(dict)[0] ** (1/2)))
new_map = np.zeros((np.shape(cell_probability)), dtype='uint8')
newid = 1
centroids = np.empty((0, 3))
# run greedy search step for at most max_no_cells steps (# cells <= max_no_cells)
for ktot in range(max_no_cells):
val = np.zeros((np.shape(dict)[1], 1), dtype='float32')
id = np.zeros((np.shape(dict)[1], 1), dtype='uint32')
# loop to convolve the probability cube with each template in dict
for j in range(np.shape(dict)[1]):
convout = signal.fftconvolve(newtest, np.reshape(dict[:,j], (box_length, box_length)), mode='same')
# get the max value of the flattened convout array and its index
val[j],id[j] = np.real(np.amax(convout)), np.argmax(convout)
# find position in image with max correlation
which_atom = np.argmax(val)
which_loc = id[which_atom]
# Save dict into a cube array with its center given by which_loc and place it into a 3-D array.
x2 = compute2dvec(dict[:, which_atom], which_loc, box_length, np.shape(newtest))
xid = np.nonzero(x2)
# Save dilate_dict into a cube array with its center given by which_loc and place it into a 3-D array.
x3 = compute2dvec(dilate_dict[:, which_atom], which_loc, box_length, np.shape(newtest))
newtest = newtest * (x3 == 0)
ptest = val/np.sum(dict, axis=0)
if ptest < stopping_criterion:
print("Cell Detection is done")
return(centroids, new_map)
# Label detected cell
new_map[xid] = newid
newid = newid + 1
#Convert flat index to indices
rr, cc = np.unravel_index(which_loc, np.shape(newtest))
new_centroid = cc, rr #Check - why cc is first? Flip indices
# insert a row into centroids
centroids = np.vstack((centroids, np.append(new_centroid, ptest)))
# for later: convert to logging and print with much less frequency
if(ktot % 10 == 0):
print('Iteration remaining = ', (max_no_cells - ktot - 1), 'Correlation = ', ptest )
print("Cell Detection is done")
return(centroids, new_map)
def detect_cells(cell_probability, probability_threshold, stopping_criterion,
initial_template_size, dilation_size, max_no_cells):
"""
This is the top level function to infer the position (and eventually size) of all cells in a 3D
volume of image data. We assume that we already have computed a "probability map" which encodes
the probability that each voxel corresponds to a cell body.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
cell_probability : ndarray
Nr x Nc x Nz matrix which contains the probability of each voxel being a cell body.
probability_threshold : float
threshold between (0,1) to apply to probability map (only consider voxels for which
cell_probability(r,c,z) > probability_threshold)
stopping_criterion : float
stopping criterion is a value between (0,1) (minimum normalized correlation between
template and probability map) (Example = 0.47)
initial_template_size : int
initial size of spherical template (to use in sweep)
dilation_size : int
size to increase mask around each detected cell (zero out sphere of radius with
initial_template_size+dilation_size around each centroid)
max_no_cells : int
maximum number of cells (alternative stopping criterion)
Returns
-------
ndarray
centroids = D x 4 matrix, where D = number of detected cells.
The (x,y,z) coordinate of each cell are in columns 1-3.
The fourth column contains the correlation (ptest) between the template
and probability map and thus represents our "confidence" in the estimate.
The algorithm terminates when ptest<=stopping_criterion.
ndarray
new_map = Nr x Nc x Nz matrix containing labeled detected cells (1,...,D)
"""
# following imports to be updated when directory structure are finalized
#import create_synth_dict
#from compute3dvec import compute3dvec
from scipy import signal
import numpy as np
import pdb
import logging
if len(cell_probability.shape) == 4:
print('Assuming Z, Chan, Y, X input')
cell_probability = np.transpose(cell_probability[:,0,:,:], (2,1,0))
# threshold probability map.
newtest = (cell_probability * (cell_probability > probability_threshold)).astype('float32')
#initial_template_size is an int now but could a vector later on - convert it to an array
initial_template_size = np.atleast_1d(initial_template_size)
# create dictionary of spherical templates
box_radius = np.ceil(np.max(initial_template_size)/2) + 1
dict = create_synth_dict(initial_template_size, box_radius)
dilate_dict = create_synth_dict(initial_template_size + dilation_size, box_radius)
box_length = int(round(np.shape(dict)[0] ** (1/3)))
new_map = np.zeros((np.shape(cell_probability)), dtype='uint8')
newid = 1
centroids = np.empty((0, 4))
# run greedy search step for at most max_no_cells steps (# cells <= max_no_cells)
for ktot in range(max_no_cells):
val = np.zeros((np.shape(dict)[1], 1), dtype='float32')
id = np.zeros((np.shape(dict)[1], 1), dtype='uint32')
# loop to convolve the probability cube with each template in dict
for j in range(np.shape(dict)[1]):
convout = signal.fftconvolve(newtest, np.reshape(dict[:,j], (box_length, box_length,
box_length)), mode='same')
# get the max value of the flattened convout array and its index
val[j],id[j] = np.real(np.amax(convout)), np.argmax(convout)
# find position in image with max correlation
which_atom = np.argmax(val)
which_loc = id[which_atom]
# Save dict into a cube array with its center given by which_loc and place it into a 3-D array.
x2 = compute3dvec(dict[:, which_atom], which_loc, box_length, np.shape(newtest))
xid = np.nonzero(x2)
# Save dilate_dict into a cube array with its center given by which_loc and place it into a 3-D array.
x3 = compute3dvec(dilate_dict[:, which_atom], which_loc, box_length, np.shape(newtest))
newtest = newtest * (x3 == 0)
ptest = val/np.sum(dict, axis=0)
if ptest < stopping_criterion:
print("Cell Detection is done")
return(centroids, new_map)
# Label detected cell
new_map[xid] = newid
newid = newid + 1
#Convert flat index to indices
rr, cc, zz = np.unravel_index(which_loc, np.shape(newtest))
new_centroid = rr, cc, zz #Check - why cc is first?
# insert a row into centroids
centroids = np.vstack((centroids, np.append(new_centroid, ptest)))
# for later: convert to logging and print with much less frequency
if(ktot % 10 == 0):
print('Iteration remaining = ', (max_no_cells - ktot - 1), 'Correlation = ', ptest )
print("Cell Detection is done, centroids: {} map: {}".format(centroids.shape, new_map.shape))
return(centroids, new_map)
def create_synth_dict(radii, box_radius):
"""
This function creates a collection of spherical templates of different sizes.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
radii : int
radii coubld be 1xN vector but currently is an integer
box_radius : float
Returns
-------
ndarray
dictionary of template vectors, of size (box_length ** 3 x length(radii)), where
box_length = box_radius*2 +1 and radii is an input to the function which contains a vector
of different sphere sizes.
"""
import numpy as np
from numpy import linalg as LA
from scipy import ndimage as ndi
from skimage.morphology import ball
box_length = int(box_radius * 2 + 1) #used for array dimension
dict = np.zeros((box_length**3, np.size(radii)), dtype='float32')
cvox = int((box_length-1)/2 + 1)
for i in range(len(radii)):
template = np.zeros((box_length, box_length, box_length))
template[cvox, cvox, cvox] = 1
dict[:, i] = np.reshape(ndi.binary_dilation(template, ball((radii[i] - 1)/2)), (box_length**3))
dict[:, i] = dict[:, i]/(LA.norm(dict[:, i]))
return(dict)
def create_synth_dict2D(radii, box_radius):
"""
This function creates a collection of spherical templates of different sizes.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
radii : int
radii coubld be 1xN vector but currently is an integer
box_radius : float
Returns
-------
ndarray
dictionary of template vectors, of size (box_length ** 3 x length(radii)), where
box_length = box_radius*2 +1 and radii is an input to the function which contains a vector
of different sphere sizes.
"""
import numpy as np
from numpy import linalg as LA
from scipy import ndimage as ndi
from skimage.morphology import ball
box_length = int(box_radius * 2 + 1) #used for array dimension
dict = np.zeros((box_length**2, np.size(radii)), dtype='float32')
cvox = int((box_length-1)/2 + 1)
for i in range(len(radii)):
template = np.zeros((box_length, box_length, box_length))
template[cvox, cvox, cvox] = 1
tmp = ndi.binary_dilation(template, ball((radii[i] - 1)/2))
dict[:, i] = np.reshape(tmp[:,:,cvox], (box_length**2))
if(LA.norm(dict[:, i])>0):
dict[:, i] = dict[:, i]/(LA.norm(dict[:, i]))
return(dict)
def placeatom(vector, box_length, which_loc, stacksz):
"""
Copies the data from vector into a cube with the width of "box_length" and places the cube
into a 3-D array with the shape/size defined by the "stacksz" parameter. The center of cube is
given by the "which_loc" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom in the flattened array
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = np.zeros((stacksz), dtype='float32')
#Convert flat index to indices
r, c, z = np.unravel_index(which_loc, (stacksz))
output_array[r, c, z] = 1
# Increase every dimension by box_length at the top and at the bottom and fill them with zeroes.
output_array = np.lib.pad(output_array, ((box_length, box_length), (box_length, box_length),
(box_length, box_length)), 'constant', constant_values=(0, 0))
# get the indices of the center of cube into increased dimensions output_array.
r, c, z = np.nonzero(output_array)
#save the output of round() function to avoid multiple calls to it.
half_length = np.int(round(box_length/2))
# TODO: casting to int to avoid problems downstream with indexing
c = np.int(c)
r = np.int(r)
z = np.int(z)
#Save the data from the cube into output_array.
output_array[(r - half_length +1) : (r + box_length - half_length +1), \
(c - half_length +1) : (c + box_length - half_length +1), \
(z - half_length +1) : (z + box_length - half_length +1)] = \
np.reshape(vector, (box_length, box_length, box_length))
return(output_array)
def placeatom2D(vector, box_length, which_loc, stacksz):
"""
Copies the data from vector into a cube with the width of "box_length" and places the
into a 2-D array with the shape/size defined by the "stacksz" parameter. The center of tbhe data is
given by the "which_loc" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom in the flattened array
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = np.zeros((stacksz), dtype='float32')
#Convert flat index to indices
r, c = np.unravel_index(which_loc, (stacksz))
output_array[r, c] = 1
# Increase every dimension by box_length at the top and at the bottom and fill them with zeroes.
output_array = np.lib.pad(output_array, ((box_length, box_length), (box_length, box_length)), 'constant', constant_values=(0, 0))
# get the indices of the center of cube into increased dimensions output_array.
r, c = np.nonzero(output_array)
#save the output of round() function to avoid multiple calls to it.
half_length = np.int(round(box_length/2))
# TODO: casting to int to avoid problems downstream with indexing
c = np.int(c)
r = np.int(r)
#Save the data from the cube into output_array.
output_array[(r - half_length +1) : (r + box_length - half_length +1), \
(c - half_length +1) : (c + box_length - half_length +1)] = \
np.reshape(vector, (box_length, box_length))
return(output_array)
def compute3dvec(vector, which_loc, box_length, stacksz):
"""
Resizes the array dimension returned by placeatom() to the shape/size given by "stacksz" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = placeatom(vector, box_length, which_loc, stacksz)
#delete the top "box_length" arrays for all dimensions.
x, y, z = np.shape(output_array)
output_array = output_array[box_length:x, box_length:y, box_length:z]
#delete the bottom "box_length" arrays for all dimensions.
x, y, z = np.shape(output_array)
output_array = output_array[0 : (x - box_length), 0 : (y - box_length), 0 : (z - box_length)]
return output_array
def compute2dvec(vector, which_loc, box_length, stacksz):
"""
Resizes the array dimension returned by placeatom() to the shape/size given by "stacksz" parameter.
Copyright (c) 2016, UChicago Argonne, LLC.
Parameters
----------
vector : ndarray
Nx1 array
box_length : int
Lenght
which_loc : int
location to place atom
stacksz : ndarry
shape of the array (3D)
Returns
-------
ndarray
"""
import numpy as np
output_array = placeatom2D(vector, box_length, which_loc, stacksz)
#delete the top "box_length" arrays for all dimensions.
x, y = np.shape(output_array)
output_array = output_array[box_length:x, box_length:y]
#delete the bottom "box_length" arrays for all dimensions.
x, y = np.shape(output_array)
output_array = output_array[0 : (x - box_length), 0 : (y - box_length)]
return output_array
def strel2D(sesize):
import numpy as np
#sw = ((sesize-1)/2)
#ses2 = int(math.ceil(sesize/2))
[y,x] = np.meshgrid(list(range(-sesize,sesize+1)), list(range(-sesize,sesize+1)))
se = ((((x/sesize)**2.0 + (y/sesize)**2.0) **(1/2.0))<=1)
return(se)
def strel(sesize):
import numpy as np
#sw = ((sesize-1)/2)
#ses2 = int(math.ceil(sesize/2))
[y,x,z] = np.meshgrid(list(range(-sesize,sesize+1)), list(range(-sesize,sesize+1)), list(range(-sesize,sesize+1)))
se = ((((x/sesize)**2.0 + (y/sesize)**2.0 + (z/sesize)**2.0) **(1/2.0))<=1)
return(se)
#Call this function for centroid-level f1 score on 2d (nii) data
def cell_metrics2D(cells,im_train,initial_template_size):
import numpy as np
from skimage.measure import label, regionprops
im_truth_labeled = label(im_train)
regions = regionprops(im_truth_labeled)
C0_prev = np.empty((0, 2))
for props in regions:
y0, x0 = props.centroid
C0_prev = np.concatenate((C0_prev, [[x0,y0]]), axis=0)
#CC = measure.label(im_train)
#CC = measure.label(im_train, background=0)
#regions = regionprops(CC)
#C0_prev = np.zeros((1,2))
#for props in regions:
# y0, x0 = props.centroid
# C0_prev = np.concatenate((C0_prev, [x0,y0]), axis=0)
C1 = pad2D(cells[:,:2], initial_template_size, im_train.shape[0], im_train.shape[1])
#C1 = pad(C1_prev, args.initial_template_size, im_train.shape[0], im_train.shape[1])
C0 = pad2D(C0_prev, initial_template_size, im_train.shape[0], im_train.shape[1])
C0 = np.transpose(C0)
C1 = np.transpose(C1)
thresh = initial_template_size
f1 = centroid_f1(C0,C1,thresh)
return f1
def pad2D(C0_prev, sphere_sz, improb0_sz, improb1_sz):
import numpy as np
C0 = np.empty((0,2))
for i in range(0,C0_prev.shape[0]):
curr_row = C0_prev[i,:]
if curr_row[0] > sphere_sz and curr_row[0] < improb0_sz - sphere_sz and curr_row[1] > sphere_sz and curr_row[1] < improb1_sz - sphere_sz :
C0 = np.concatenate((C0,[curr_row]),axis=0)
return C0
#Call this function for centroid-level f1 score on 2d (nii) data
def f1_centroid3D(cells,im_train,initial_template_size):
import numpy as np
from skimage.measure import label, regionprops
cells = cells[:,0:3] #Chop off last column, which is correlation score
im_truth_labeled = label(im_train)
regions = regionprops(im_truth_labeled)
C0_prev = np.empty((0, 3))
for props in regions:
x0, y0, z0 = props.centroid #poss pull should put in x,y,z
C0_prev = np.concatenate((C0_prev, [[x0,y0,z0]]), axis=0)
#CC = measure.label(im_train)
#CC = measure.label(im_train, background=0)
#regions = regionprops(CC)
#C0_prev = np.zeros((1,2))
#for props in regions:
# y0, x0 = props.centroid
# C0_prev = np.concatenate((C0_prev, [x0,y0]), axis=0)
C1 = pad3D(cells, initial_template_size, im_train.shape[0], im_train.shape[1], im_train.shape[2])
#C1 = pad(C1_prev, args.initial_template_size, im_train.shape[0], im_train.shape[1])
C0 = pad3D(C0_prev, initial_template_size, im_train.shape[0], im_train.shape[1], im_train.shape[2])
C0 = np.transpose(C0)
C1 = np.transpose(C1)
thresh = initial_template_size
f1 = centroid_f1(C0,C1,thresh)
return f1
def pad3D(C0_prev, sphere_sz, improb0_sz, improb1_sz, improb2_sz):
import numpy as np
C0 = np.empty((0,3))
for i in range(0,C0_prev.shape[0]):
curr_row = C0_prev[i,:]
if curr_row[0] > sphere_sz and curr_row[0] < improb0_sz - sphere_sz and curr_row[1] > sphere_sz and curr_row[1] < improb1_sz - sphere_sz and curr_row[2] > sphere_sz and curr_row[2] < improb2_sz - sphere_sz :
C0 = np.concatenate((C0,[curr_row]),axis=0)
return C0
#Dense, 3D f1 score of cell detection
def dense_f1_3D(cell_map,cell_gt_map):
import numpy as np
import math
# processing params
bin_cell_map = cell_map
bin_cell_map[bin_cell_map>0]=1
bin_cell_map[bin_cell_map<=0]=0
bin_gt_map = cell_gt_map
bin_gt_map[bin_gt_map>0]=1
bin_gt_map[bin_gt_map<=0]=0
beta = 2
#cells
cell_true_detect = np.sum(np.logical_and(bin_cell_map,bin_gt_map).astype(int).ravel())
cell_detections = np.sum(bin_cell_map.ravel())
cell_true_positives = np.sum(bin_gt_map.ravel())
if(cell_detections>0):
cell_p = cell_true_detect/cell_detections
else:
cell_p = 0
if(cell_true_positives>0):
cell_r = cell_true_detect/cell_true_positives
else:
cell_r = 0
if(cell_p + cell_r >0):
f_cell = (1+math.pow(beta,2)) * (cell_p*cell_r)/(math.pow(beta,2)*cell_p + cell_r)
else:
f_cell = 0
return f_cell
def centroid_f1(C0,C1,thres):
import scipy
import numpy as np
C0 = np.transpose(C0)
C1 = np.transpose(C1)
Y = scipy.spatial.distance.cdist(C0, C1, 'euclidean')
try:
vals = np.sort(np.amin(Y,axis=1))
valinds = np.argsort(np.min(Y,axis=1))
except ValueError:
print("No Detected Objects")
return 0
L = len(vals[np.where(vals<=thres)])
C0 = np.transpose(C0)
C0 = C0[:,valinds]
Y2 = scipy.spatial.distance.cdist(C1, np.transpose(C0), 'euclidean')
matches = np.zeros((2,L))
dvec = np.zeros((L,1))
for i in range(0,L):
idcol = i
valtmp = np.amin(Y2[:,i])
idrow = np.argmin(Y2[:,i])
#idrow = np.argmin(Y2[i,:])
if valtmp<=thres:
matches[:,i] = [idrow,idcol]
dvec[i] = valtmp
Y2[idrow,:]=thres+100
Y2[:,idcol]=thres+100
idd = np.where(dvec>thres)
matches[:,idd]=[]
matches = np.asarray(matches)
numcorrect = sum([sum(x)!=0 for x in zip(*matches)])
numgt = C0.shape[1]
numrecov = C1.shape[0]
b=1 #f1 score
TP = numcorrect/numrecov
FP = 1 - TP
FN = (numgt - numcorrect)/numgt
p = TP /(TP + FP)
r = TP /(TP + FN)
f1 = (1 + b**2)*p*r/(((b**2)*p)+r)
return f1
| 37.562286
| 220
| 0.66334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14,643
| 0.445523
|
a71437b5469d3a544e7b8017e8d77b89874193c2
| 2,088
|
py
|
Python
|
migrations/versions/b846613b404e_.py
|
python-02/flask-spa-CoopApp
|
8ecd9e22847401c6ee18b76a80c68c8ba5d77401
|
[
"MIT"
] | 6
|
2021-04-16T06:37:04.000Z
|
2021-11-11T23:37:04.000Z
|
migrations/versions/b846613b404e_.py
|
python-02/flask-spa-CoopApp
|
8ecd9e22847401c6ee18b76a80c68c8ba5d77401
|
[
"MIT"
] | null | null | null |
migrations/versions/b846613b404e_.py
|
python-02/flask-spa-CoopApp
|
8ecd9e22847401c6ee18b76a80c68c8ba5d77401
|
[
"MIT"
] | 2
|
2021-06-01T15:35:17.000Z
|
2022-03-05T03:50:57.000Z
|
"""empty message
Revision ID: b846613b404e
Revises: fc25bf71d841
Create Date: 2020-01-06 21:43:28.958558
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'b846613b404e'
down_revision = 'fc25bf71d841'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('loan',
sa.Column('uuid', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('code', sa.String(length=20), nullable=True),
sa.Column('borrower_code', sa.String(length=20), nullable=True),
sa.Column('type_loan', sa.String(length=20), nullable=True),
sa.Column('date_loan', sa.DateTime(), nullable=True),
sa.Column('date_start', sa.Date(), nullable=True),
sa.Column('date_end', sa.Date(), nullable=True),
sa.Column('term', sa.Integer(), nullable=True),
sa.Column('type_schedule', sa.String(length=20), nullable=True),
sa.Column('is_settled', sa.Boolean(), nullable=True),
sa.Column('amount', sa.Float(), nullable=True),
sa.Column('interest_rate', sa.Float(), nullable=True),
sa.Column('interest_amount', sa.Float(), nullable=True),
sa.Column('remarks', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('uuid'),
sa.UniqueConstraint('code')
)
op.create_table('loan_detail',
sa.Column('uuid', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('loan_code', sa.String(length=20), nullable=True),
sa.Column('type_line', sa.String(length=20), nullable=True),
sa.Column('amount_to_pay', sa.Float(), nullable=True),
sa.Column('amount_payed', sa.Float(), nullable=True),
sa.Column('date_to_pay', sa.Date(), nullable=True),
sa.Column('date_payed', sa.Date(), nullable=True),
sa.PrimaryKeyConstraint('uuid')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('loan_detail')
op.drop_table('loan')
# ### end Alembic commands ###
| 36.631579
| 69
| 0.684866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 651
| 0.311782
|
a7143837d4f1b09881e05cb620fce36372532de7
| 2,010
|
py
|
Python
|
alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/domain/AlipayEcoCityserviceIndustryEnergySendModel.py
|
antopen/alipay-sdk-python-all
|
8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.EnergyExtRequest import EnergyExtRequest
class AlipayEcoCityserviceIndustryEnergySendModel(object):
def __init__(self):
self._ext_info = None
self._outer_no = None
self._scene = None
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
if isinstance(value, EnergyExtRequest):
self._ext_info = value
else:
self._ext_info = EnergyExtRequest.from_alipay_dict(value)
@property
def outer_no(self):
return self._outer_no
@outer_no.setter
def outer_no(self, value):
self._outer_no = value
@property
def scene(self):
return self._scene
@scene.setter
def scene(self, value):
self._scene = value
def to_alipay_dict(self):
params = dict()
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
if self.outer_no:
if hasattr(self.outer_no, 'to_alipay_dict'):
params['outer_no'] = self.outer_no.to_alipay_dict()
else:
params['outer_no'] = self.outer_no
if self.scene:
if hasattr(self.scene, 'to_alipay_dict'):
params['scene'] = self.scene.to_alipay_dict()
else:
params['scene'] = self.scene
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayEcoCityserviceIndustryEnergySendModel()
if 'ext_info' in d:
o.ext_info = d['ext_info']
if 'outer_no' in d:
o.outer_no = d['outer_no']
if 'scene' in d:
o.scene = d['scene']
return o
| 26.8
| 69
| 0.584577
| 1,825
| 0.90796
| 0
| 0
| 895
| 0.445274
| 0
| 0
| 200
| 0.099502
|
a715a55b0649d434e3e3db7475617b277a5112ae
| 1,657
|
py
|
Python
|
project_receipt/receipt/urls.py
|
Guilouf/django-receipt
|
fb42de12311cd1a20cc28c74a732d818f28ef551
|
[
"Apache-2.0"
] | null | null | null |
project_receipt/receipt/urls.py
|
Guilouf/django-receipt
|
fb42de12311cd1a20cc28c74a732d818f28ef551
|
[
"Apache-2.0"
] | 8
|
2021-02-01T12:47:02.000Z
|
2021-12-13T09:34:38.000Z
|
project_receipt/receipt/urls.py
|
Guilouf/django-receipt
|
fb42de12311cd1a20cc28c74a732d818f28ef551
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from receipt import views
urlpatterns = [
path('', views.ReceiptList.as_view(), name='home'),
path('receipt/', views.ReceiptList.as_view(), name='receipt_list'),
path('receipt/create', views.ReceiptCreate.as_view(), name='receipt_create'),
path('receipt/<int:pk>/edit', views.ReceiptUpdate.as_view(), name='receipt_update'),
path('establishment/', views.EstablishmentList.as_view(), name='establishment_list'),
path('establishment/create', views.EstablishmentCreate.as_view(), name='establishment_create'),
path('establishment/<int:pk>/edit', views.EstablishmentUpdate.as_view(), name='establishment_update'),
path('establishment/<int:pk>', views.EstablishmentDetail.as_view(), name='establishment_detail'),
path('establishment/<int:pk>/add_receipt', views.ReceiptFromEstablishmentCreate.as_view(),
name='establishment_add_receipt'),
path('company/', views.CompanyList.as_view(), name='company_list'),
path('company/create', views.CompanyCreate.as_view(), name='company_create'),
path('company/<int:pk>/edit', views.CompanyUpdate.as_view(), name='company_update'),
path('company/<int:pk>', views.CompanyDetail.as_view(), name='company_detail'),
path('company/<int:pk>/add_establishment', views.EstablishmentFromCompanyCreate.as_view(),
name='company_add_establishment'),
path('tag/', views.TagList.as_view(), name='tag_list'),
path('tag/create', views.TagCreate.as_view(), name='tag_create'),
path('tag/<int:pk>/edit', views.TagUpdate.as_view(), name='tag_update'),
path('tag/<int:pk>', views.TagDetail.as_view(), name='tag_detail'),
]
| 61.37037
| 106
| 0.719976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 632
| 0.381412
|
a71e3a4361a99f178927d847326e3096eeaee755
| 4,216
|
py
|
Python
|
utils/common/_common.py
|
Pzqqt/Django_Transportation_Management_System
|
f4f0905d8e007920ae190252eeaefbc6ee67ed85
|
[
"MIT"
] | null | null | null |
utils/common/_common.py
|
Pzqqt/Django_Transportation_Management_System
|
f4f0905d8e007920ae190252eeaefbc6ee67ed85
|
[
"MIT"
] | null | null | null |
utils/common/_common.py
|
Pzqqt/Django_Transportation_Management_System
|
f4f0905d8e007920ae190252eeaefbc6ee67ed85
|
[
"MIT"
] | null | null | null |
from functools import partial
from itertools import chain
from collections import UserList
import logging
import traceback
from django import forms
from django.db.models import Model
from django.core.validators import validate_comma_separated_integer_list
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.fields.related import ForeignKey
from django.http import JsonResponse
from django.utils import timezone
class UnescapedDjangoJSONEncoder(DjangoJSONEncoder):
""" 自定义的JSON编码器, 强制ensure_ascii为False, 避免中文字符被编码为乱码 """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# 强制ensure_ascii为False
self.ensure_ascii = False
UnescapedJsonResponse = partial(JsonResponse, encoder=UnescapedDjangoJSONEncoder)
class SortableModelChoiceField(forms.ModelChoiceField):
"""
为ModelChoiceField的choices进行排序是件很麻烦的事
尽管我们可以对queryset属性使用`order_by`进行排序
但是还需要考虑对数据库的优化(尽可能避免explain中出现`using filesort`)
因此, 我们在ModelChoiceIterator中添加一个额外可选的属性, 以允许在遍历choices时对其进行排序
这是在应用层的排序, 意在减少数据库的压力
"""
class _ModelChoiceIterator(forms.models.ModelChoiceIterator):
class _FakeQuerySet(UserList):
_prefetch_related_lookups = ()
def iterator(self):
yield from self
def __iter__(self):
sort_key = self.field.sort_key
if sort_key is not None:
# sorted之后(立即执行数据库查询), _prefetch_related_lookups就没有意义了
self.queryset = self._FakeQuerySet(sorted(self.queryset, key=sort_key))
return super().__iter__()
iterator = _ModelChoiceIterator
def __init__(self, queryset, **kwargs):
super().__init__(queryset, **kwargs)
self.sort_key = kwargs.get("sort_key", None)
def multi_lines_log(logger: logging.Logger, string: str, level=logging.INFO):
""" 记录多行日志 """
for line in string.splitlines():
logger.log(level, line)
def traceback_log(logger: logging.Logger, level=logging.ERROR):
""" 记录异常栈 """
multi_lines_log(logger=logger, string=traceback.format_exc(), level=level)
def traceback_and_detail_log(request, logger: logging.Logger, level=logging.ERROR):
""" 记录异常栈和其他一些详细信息 """
logger.log(level, "=" * 100)
logger.log(level, "Exception:")
logger.log(level, "Time: %s" % timezone.make_naive(timezone.now()).strftime("%Y-%m-%d %H:%M:%S"))
logger.log(level, "Url: %s" % request.path)
logger.log(level, "Method: %s" % request.method)
logger.log(level, "Cookies: %s" % request.COOKIES)
logger.log(level, "Session: %s" % dict(request.session.items()))
if request.method == "POST":
logger.log(level, "Post data: %s" % request.POST.dict())
logger.log(level, "")
traceback_log(logger=logger, level=level)
logger.log(level, "=" * 100)
def validate_comma_separated_integer_list_and_split(string: str, auto_strip=True) -> list:
""" 判断字符串是否是一个以逗号分隔的数字列表
如果是, 则自动进行分割并返回列表; 如果不是, 则抛出ValidationError异常
:param string: 要解析的字符串
:param auto_strip: 为True时则提前对string进行strip(默认)
:return: list
"""
if auto_strip:
string = string.strip()
validate_comma_separated_integer_list(string)
return [int(x) for x in string.split(',')]
def model_to_dict_(instance: Model) -> dict:
""" Django有一个内置的django.forms.models.model_to_dict方法(以下简称原model_to_dict方法)
可以方便地把模型转为字典, 但是有一个坑, 被标记为不可编辑(editable为False)的字段不会包含在输出的字典中
原model_to_dict方法仅在初始化ModelForm时被使用, 为了安全起见, 这样做无可厚非
但是我们想要的"模型转为字典"的方法应该包含模型的所有字段
所以我们参考原model_to_dict方法编写了新的model_to_dict_方法
比起原model_to_dict方法缺少了fields和exclude参数, 因为我们暂时不需要
"""
opts = instance._meta
data = {}
for f in chain(opts.concrete_fields, opts.private_fields, opts.many_to_many):
# 对于一对一和多对一外键, 返回外键模型对象 (多对多外键会在else子句中合适地处理)
# 注: 由于ForeignKey的attname属性值为"字段名_id", 所以调用value_from_object方法的话, 返回的是外键对象的id
if isinstance(f, ForeignKey):
data[f.name] = getattr(instance, f.name, None)
else:
data[f.name] = f.value_from_object(instance)
return data
def del_session_item(request, *items):
""" 从request会话中删除键值 """
for item in items:
request.session.pop(item, None)
| 35.728814
| 101
| 0.708491
| 1,540
| 0.302079
| 51
| 0.010004
| 0
| 0
| 0
| 0
| 2,086
| 0.40918
|
a71f0fb6127bf9b694c0e036c4b163b042f9e29b
| 127
|
py
|
Python
|
landingpage/urls.py
|
aurphillus/Django-Library-Completed
|
f46e45f85c888e7694323e22f6e966c291a4a0be
|
[
"MIT"
] | null | null | null |
landingpage/urls.py
|
aurphillus/Django-Library-Completed
|
f46e45f85c888e7694323e22f6e966c291a4a0be
|
[
"MIT"
] | null | null | null |
landingpage/urls.py
|
aurphillus/Django-Library-Completed
|
f46e45f85c888e7694323e22f6e966c291a4a0be
|
[
"MIT"
] | null | null | null |
from django.urls import path
from landingpage.views import *
urlpatterns = [
path('',landingpage,name="landingpage"),
]
| 14.111111
| 44
| 0.716535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.11811
|
a71fe8e9c812b790a9f8e10c54db7ff385e01808
| 31,509
|
py
|
Python
|
cloud-v2.0/verify/verify.py
|
13242084001/api
|
71f57b485d685caae94a84b625d64be832cf8910
|
[
"Apache-2.0"
] | null | null | null |
cloud-v2.0/verify/verify.py
|
13242084001/api
|
71f57b485d685caae94a84b625d64be832cf8910
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:58:32.000Z
|
2021-03-25T23:58:32.000Z
|
cloud-v2.0/verify/verify.py
|
13242084001/api
|
71f57b485d685caae94a84b625d64be832cf8910
|
[
"Apache-2.0"
] | null | null | null |
from common import sshClient
import time
import eventlet
from .gol import *
import requests
from common.uploadMirror import login
from common.sqlquery import Query
#import pytest
import json
def check_login_response_headers(response):
result = False
if "cloud0" in response.headers.get("Set-Cookie"):
result = True
assert result == True
def logout_ok(response):
pass
def check_stop_py_machine(response):
#print(json.dumps(response.json()))
#print(response.json().get("code"), "yyyyyyyyyyyyyyy")
assert response.json().get("code") == 0
def check_add_role(response):
body_json = response.json()
assert body_json.get("code") == 1
assert body_json.get("error") == None
def check_remove_role(response):
body = response.json()
assert body.get("code") == 1
assert body.get("error") == None
#校验添加区域
def check_add_zone(response):
body = response.json()
resourceIds = body.get("resourceIds")
#print(body)
assert body.get("code") == 1
#assert isinstance(resourceIds,list)
def check_query_zone(response):
body = response.json()
assert body.get("code") == 1
def check_query_cluster(response):
body = response.json()
print("####################################################")
assert body.get("code") == 1
assert isinstance(body.get("rows"), list)
#json 校验,暂未使用
def check_cluster_add(response):
body = response.json()
print(body)
def check_physicalmachine_query_ok(response):
body = response.json()
print(body)
assert body.get("code") == 1
#assert body.get("rows")
def check_physical_update_ok(response):
body = response.json()
print(body)
assert body.get("code") == 1
assert isinstance(body.get("resourceIds"), list)
def check_stop_start_pysicalmachine_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("resourceIds"), list)
# 校验查询主存储
def check_mainStorage_query_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("rows"), list)
# 校验修改主存储
def check_mainStorage_update_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("resourceIds"), list)
# 校验主存储添加集群查询集群列表
def check_query_clusterUnload_list_ok(response):
body = response.json()
assert body.get("code") == 1
assert isinstance(body.get("rows"), list)
# 校验主存储添加集群
def check_mainStorage_addCluster_ok(response, clusterId, uri):
assert response.json().get('code') == 1
#print(response.json())
result = Query()('SELECT * FROM `cl_host_inf` WHERE CLUSTERID="{0}" AND STATE=1 AND DELETED=0'.format(clusterId))
#print(result)
#print(555555555555555555555555)
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = 'kubectl get vmp|grep "{0}"|wc -l'.format(uri)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == len(result):
# print(1111)
flag = True
break
assert flag
def check_delete_mainStorage_ok(response, ids):
id_list = ids.split(",")
#result = Query()(
# 'SELECT COUNT(*) FROM `cl_host_inf` WHERE CLUSTERID="{0}" AND STATE=1 AND DELETED=0'.format(clusterid))
username = "root"
password = "user@dev"
ip = "172.16.130.254"
for id in id_list:
cmd = 'kubectl get vmp|grep "{0}"|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
try:
if not int(ret):
# print(1111)
flag = True
break
except Exception as e:
print(e)
flag = True
break
assert flag
def check_add_mirrorServer_ok(response):
print(response.json())
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = 'kubectl get vmp|grep vmdi|wc -l'
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) > 1:
# print(1111)
flag = True
break
assert flag
#校验添加云主机成功
def check_cloudHost_add_ok(response):
body = response.json()
print(body)
assert body.get("code") == 1
id = body.get("id")
id_len = len(id.split(","))
id = id.replace(",", "|")
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = 'kubectl get vm|grep -E "{0}"|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == id_len:
#print(1111)
flag = True
break
assert flag
#校验查询running状态的云主机
def check_query_vm_status_ok(response, state):
#print("zheshi jjjjjj ", state)
verify_rows = get_value("rows")
num = 0
for row in verify_rows:
if row.get("state") == state:
num += 1
local_rows = response.json().get("rows")
for row in local_rows:
assert row.get("state") == state
continue
assert len(local_rows) == num
def check_query_vm_ok(response, keyword, searchtype):
searchtype_dict = {0: "name", 2: "hostip"}
verify_rows = get_value("rows")
#print(verify_rows,"f"*30)
num = 0
for row in verify_rows:
if keyword in row.get(searchtype_dict.get(searchtype)):
num += 1
local_rows = response.json().get("rows")
for row in local_rows:
assert keyword in row.get(searchtype_dict.get(searchtype))
continue
assert len(local_rows) == num
def search_vmip_list(keyword):
des_url = "http://172.16.130.254:38080/networkCard/query.do"
vm_list = get_value("rows")
#print(vm_list, "8"*10)
vmid_list = [i.get("vmid") for i in vm_list]
result = 0
cookie = login()
for vmid in vmid_list:
params = {
"order": "asc",
"offset": 0,
"limit": 20,
"vmid": vmid
}
res = requests.get(des_url, params=params,
headers={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cookie": cookie})
#print(res.json())
rows = res.json().get("rows")
for row in rows:
if keyword in row.get("ip"):
result += 1
return result
def check_query_vm_ip_ok(response, keyword):
cmp_num = search_vmip_list(keyword=keyword)
rows = response.json().get("rows")
#print(cmp_num, "hhhhhhh")
#print(len(rows))
assert len(rows) == cmp_num
def check_reboot_vm_ok(response):
assert response.json().get("code") == 1
def check_pause_forceStop_stop_ok(response, state, hostip):
vmid = response.json().get("id")
username = "root"
password = "user@dev"
ip = hostip
cmd = 'virsh list --all|grep {0}|grep -E "{1}"|wc -l'.format(state, vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
# print(1111)
flag = True
break
assert flag
des_url = "http://172.16.130.254:38080/networkCard/query.do"
params = {
"order": "asc",
"offset": 0,
"limit": 20,
"searchtype": 0,
"keyword": None,
"state": None,
}
cookie = login()
res = requests.get(des_url, params=params,
headers={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cookie": cookie})
# print(res.json())
rows = res.json().get("rows")
if state == "shut":
st = "stopped"
elif state == "paused":
st = state
else:
st = "running"
for row in rows:
if row.get("vmid") == vmid:
assert row.get("state") == st
def check_all_vm_stop(response, ids):
username = "root"
password = "user@dev"
ip = "172.16.130.254"
for i in ids.split(","):
cmd = 'kubectl get vm|grep {0}|grep -i shut|wc -l'.format(i)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not (int(ret) - 3):
# print(1111)
flag = True
break
assert flag
def check_cloudDisk_add_ok(response, template=0):
id = response.json().get("id")
username = "root"
password = "user@dev"
ip = "172.16.130.254"
if template:
cmd = 'find /var/lib/libvirt/cstor/ -name {0}|wc -l'.format(id)
else:
cmd = 'kubectl get vmd|grep {0}|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
# print(1111)
flag = True
break
assert flag
def check_cloudDiskLoad_or_unload_ok(response, vmid, volumeid, typee=1):
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = 'kubectl get vm {0} -o yaml|grep {1}|wc -l'.format(vmid, volumeid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if typee:
if int(ret):
# print(1111)
flag = True
break
else:
if not int(ret):
# print(1111)
flag = True
break
assert flag
def check_cloudDisk_queryImageserver_ok(response):
rows = response.json().get("rows")
for row in rows:
assert row.get("state") == 1
def check_cloudDisk_snapshot_add_ok(response):
id = response.json().get('id')
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = 'kubectl get vmd|grep {0}|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_cloudDisk_setQos_ok(response, vmid, rx, tx):
assert response.json().get("id")
username = "root"
password = "user@dev"
ip = "172.16.130.254"
for i in [rx, tx]:
cmd = "kubectl get vm {0} -i yaml|grep 'text: {1}'|wc -l".format(vmid, i*1024*1024)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_cloudDisk_cancleQos_ok(response, vmid):
assert response.json().get("id")
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = "kubectl get vm {0} -i yaml|grep -E 'write|read'|wc -l".format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_cloudDisk_expandVol_ok(response, installpath, size, hostip):
assert response.json().get("id")
username = "root"
password = "user@dev"
ip = hostip
cmd = "qume-img info %s|grep virtual|awk '{print $3}'" % (installpath,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if str(ret) == size:
flag = True
break
assert flag
#这个函数本来是用来验证存储迁移查询可选择的物理机列表的,但是开发傻逼,传参没传clusterid,导致这里无法验证
def verify_query_cluster_all_phymachine_ok(response):
pass
def check_cloudDisk_migrate_ok(response, installpath, pmip, msurl, msname):
cloudDiskId = response.json().get("resourceIds")[0]
username = "root"
password = "user@dev"
ip = pmip
cmd = "kubectl get vmd|grep %s|awk '{print $3}'" % (cloudDiskId,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if msurl in str(ret) and (msurl not in installpath):
flag = True
break
assert flag
des_url = "http://172.16.130.254:38080/cloudDisk/query.do"
params = {
"order": "asc",
"offset": 0,
"limit": 20,
"searchtype": 0,
"keyword": None,
"state": None,
}
cookie = login()
res = requests.get(des_url, params=params,
headers={"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cookie": cookie})
# print(res.json())
rows = res.json().get("rows")
for row in rows:
if row.get("volumeid") == cloudDiskId:
assert row.get("msname") == msname
break
def check_query_cloudHost_loadable_or_unloadable_disk_ok(response, vmid, load=1):
if load:
sql_result = Query()("SELECT * FROM `cl_volume_inf` where STATE = 0 and VMID is null;")
else:
sql_result = Query()('SELECT * FROM `cl_volume_inf` where VMID="{0}" and TYPE=2;'.format(vmid,))
sql_volid_list = [x.get("VOLUMEID") for x in sql_result]
json_volid_list = [x.get("volumeid") for x in response.json().get("rows")]
assert len(sql_volid_list) == len(json_volid_list)
for volid in sql_volid_list:
assert volid in json_volid_list
def check_cloudHost_setHa_ok(response, vmid, hostip, cancle=0):
username = "root"
password = "user@dev"
ip = hostip
cmd = 'kubectl get vm {0} -o yaml|grep -w ha|wc -l'.format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not cancle:
if int(ret):
flag = True
break
else:
if not int(ret):
flag = True
break
assert flag
def check_cloudHost_makeSnapshot_ok(response, vmid, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "user@dev"
ip = hostip
cmd = 'kubectl get vmd|grep {0}|wc -l'.format(vmid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_makeVMimage_ok(response, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "user@dev"
ip = hostip
cmd = 'find / -name {0}|wc -l'.format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_modify_cpu_num_ok(response, cpunum_new, hostip):
id = response.json().get("id")
assert id
username = "root"
password = "user@dev"
ip = hostip
cmd = "virsh vcpucount %s|grep current|awk '{print $3}'|tail -1" % (id,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == cpunum_new:
flag = True
break
assert flag
def check_modify_mem_ok(response, memorysize, hostip):
#print(11111111111111111111111111111111111111111)
#print(response.json())
id = response.json().get("id")
#print("this is id....", id)
assert id
username = "root"
password = "user@dev"
ip = hostip
cmd = "virsh dominfo %s|grep Use|awk '{print $3}'" % (id,)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(int(ret)/(1024*1024)) == memorysize:
flag = True
break
assert flag
def check_query_cmrom_iso(response, vmid):
mirrorid_list = Query()('SELECT MIRRORID FROM `cl_mirror_inf` WHERE status=1 and MFORMAT="iso" AND '
'DOMAINID=(SELECT DOMAINID FROM `cl_vm_inf` WHERE VMID="{0}") '
'AND MIRRORID NOT IN (SELECT ISOID FROM `cl_vmcdrom_inf` WHERE'
' VMID="{1}")'.format(vmid, vmid))
rows = response.json().get("rows")
assert len(mirrorid_list) == len(rows)
for row in rows:
assert row.get("mirrorid") in mirrorid_list
def check_addCdrom_ok(vmid, mirrorid, hostip):
username = "root"
password = "user@dev"
ip = hostip
cmd = "kubectl get vm {0} -o yaml|grep {1}.iso|wc -l".format(vmid, mirrorid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if int(ret):
flag = True
break
assert flag
def check_changeBootSequence_ok(response, vmid, bootSeq, hostip):
assert response.json().get("id")
username = "root"
password = "user@dev"
ip = hostip
cmd = "kubectl get vm {0} -o yaml|grep order|cut -d: -f 2".format(vmid, )
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(180, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
ret = ret.decode("utf-8").replace("\n", "").replace(" ", "")
if bootSeq == 1:
if ret == "12":
flag = True
break
elif bootSeq == 2:
if ret == "21":
flag = True
break
assert flag
def check_changeSystem_querySystem_ok(response, vmid):
mirrorid_list = Query()('SELECT MIRRORID FROM `cl_mirror_inf` WHERE status=1 and MFORMAT!="iso" AND '
'DOMAINID=(SELECT DOMAINID FROM `cl_vm_inf` WHERE VMID="{0}") '
'AND MIRRORID NOT IN (SELECT ISOID FROM `cl_vmcdrom_inf` WHERE'
' VMID="{1}")'.format(vmid, vmid))
rows = response.json().get("rows")
assert len(mirrorid_list) == len(rows)
for row in rows:
assert row.get("mirrorid") in mirrorid_list
def check_changeOs_ok(response, template_url, rootvolumeid, hostip):
username = "root"
password = "user@dev"
ip = hostip
cmd = "diff %s `kubectl get vmd %s|tail -1|awk '{print $3}'`|wc -l" % (template_url, rootvolumeid)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_mirror_all_ok(response):
print(response.json())
def check_delete_mirrorServer_ok(response, mirrorServerId):
print(response.json())
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = "kubectl get vmp|grep {0}|wc -l".format(mirrorServerId)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_all_resource_ok(response, flag="vm"):
username = "root"
password = "user@dev"
ip = "172.16.130.254"
ids = response.json().get("id")
ids_list = ids.split(",")
for id in ids_list:
if flag == "vm":
cmd = "kubectl get vm|grep {0}|wc -l".format(id)
else:
cmd = "kubectl get vmp|grep {0}|wc -l".format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
#print("this is flag...", flag)
if not int(ret):
flag = True
break
assert flag
def check_delete_net(response, l2vmn_num=2):
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep l2network|wc -l"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
try:
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
ret = int(ret)
except Exception:
flag = True
break
if int(l2vmn_num) - ret == 2:
flag = True
break
assert flag
#l2vmn check
def check_creat_net_ok(response, l2vmn_num=0):
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep l2network|wc -l"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) - int(l2vmn_num) == 2:
flag = True
break
assert flag
def check_creat_l3_net_ok(response):
id = response.json().get("id")
assert id
username = "root"
password = "user@dev"
ip = "172.16.130.254"
cmd = "kubectl get vmn|grep {0}|wc -l".format(id)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret):
flag = True
break
assert flag
def check_creat_vxlanPool_ok(response, pool_name):
#print(response)
try:
code = response.json().get("code")
if "1" in pool_name:
assert -1 == code
else:
assert 1 == code
except Exception as e:
print(e)
assert True
def check_add_vxlan_vni_ok(response, flag):
print(response.json())
if 3 == flag:
assert response.json().get("code") == 1
if 2 == flag:
assert response.json().get("code") == -1
if 1 == flag:
assert response.json().get("code") == -1
def check_delete_vni_range_ok(response, vni_list, vnistart, endvni):
for vni in vni_list.split(","):
if vni in range(int(vnistart), int(endvni) + 1):
assert -1 == response.json().get("code")
assert 1 == response.json().get("code")
def check_delete_vxlan_net_ok(response, vni, vxlan_clusterid_list):
assert response.json().get("code") == 1
#print(vxlan_clusterid_list)
#print(7777777777777777777777777)
try:
vxlan_clusterid_list = json.loads(vxlan_clusterid_list)
except Exception:
vxlan_clusterid_list = tuple(vxlan_clusterid_list.split(","))
#print(vxlan_clusterid_list)
#print(66666666666666)
if len(vxlan_clusterid_list) > 1:
sql_cmd = 'SELECT HOSTIP FROM `cl_host_inf` WHERE STATE=1 AND DELETED=0 AND `STATUS`="Ready" and CLUSTERID IN {0};'.format(str(vxlan_clusterid_list))
else:
sql_cmd = 'SELECT HOSTIP FROM `cl_host_inf` WHERE STATE=1 AND DELETED=0 AND `STATUS`="Ready" and CLUSTERID="{0}";'.format(vxlan_clusterid_list[0])
#print(sql_cmd)
#print(555555555555555555555)
result = Query()(sql_cmd)
ip_list = []
for re in result:
ip_list.append(re.get("HOSTIP"))
username = "root"
password = "user@dev"
for ip in ip_list:
cmd = "ovs-vsctl list-br|grep vx{0}|wc -l".format(vni)
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.1)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if not int(ret):
flag = True
break
assert flag
def check_modify_l3network_mtu(response, mtu):
id = response.json().get("id")
cmd = "ovn-nbctl dhcp-options-get-options `ovn-nbctl show %s|grep dhcp|awk -F\"-\" '{print $3\"-\"$4\"-\"$5\"-\"$6\"-\"$7}'`|grep mtu|cut -d\"=\" -f2" % (id,)
username = "root"
password = "user@dev"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == int(mtu):
flag = True
break
assert flag
def check_l3network_add_dns(response, mtu, rows, nid, dns_addr):
cmd = "ovn-nbctl dhcp-options-get-options `ovn-nbctl show %s|grep dhcp|awk -F\"-\" '{print $3\"-\"$4\"-\"$5\"-\"$6\"-\"$7}'`|grep -E 'mtu|dns'|sed ':a;N;s/\n/\t/;ba;'" % (nid,)
dns_list = [row.get("dns") for row in rows]
re_mtu = 0
re_dns_list = []
username = "root"
password = "user@dev"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.2)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
tp_str = ret.split()
for i in tp_str:
if "mtu" in i:
re_mtu = int(i.split("=")[1])
elif "dns" in i:
if "," in i:
re_dns_list = i[12:-1].split(",")
else:
re_dns_list.append(i.split("=")[1])
assert int(mtu) == re_mtu
assert dns_addr in re_dns_list
flag_2 = True
for dns in dns_list:
if dns not in re_dns_list:
flag_2 = False
break
if flag_2:
flag = True
break
assert flag
def check_vpc_network_add_ok(response):
id = response.json().get("id")
assert id
cmd = "kubectl get vmn|grep {0}|wc -l".format(id,)
username = "root"
password = "user@dev"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(40, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if 1 == int(ret):
flag = True
break
assert flag
def check_vpc_router_stop_or_start(response):
id = response.json().get("id")
cmd = "kubectl get vm|grep {0}|grep -i shut|wc -l".format(id, )
username = "root"
password = "user@dev"
ip = "172.16.130.254"
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(100, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if 1 == int(ret):
flag = True
break
assert flag
def check_setConsolePasswd_ok(response, hostip, passwd=None):
id = response.json().get("id")
if passwd:
cmd = 'cat /tmp/%s.xml |grep passwd|awk -F"passwd=" \'{print $2}\'|cut -d"\"" -f2' % (id,)
else:
cmd = 'cat /tmp/%s.xml |grep passwd|wc -l' % (id,)
username = "root"
password = "user@dev"
ip = hostip
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if passwd:
if ret == str(passwd):
flag = True
break
else:
if not int(ret):
flag = True
break
assert flag
def check_modifyCpuNum_ok(response, hostip, cpunum):
id = response.json().get("id")
cmd = "virsh vcpucount %s|grep current|grep live|awk '{print $3}'" % (id,)
username = "root"
password = "user@dev"
ip = hostip
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret) == int(cpunum):
flag = True
break
assert flag
def check_modifyVpcMem_ok(response, memory, hostip):
id = response.json().get("id")
cmd = "virsh dominfo %s|grep 'Used mem'|awk '{print $3}'" % (id,)
username = "root"
password = "user@dev"
ip = hostip
flag = False
eventlet.monkey_patch()
with eventlet.Timeout(30, False):
while 1:
time.sleep(0.5)
ret = sshClient.tunction(ip=ip, username=username, password=password, cmd=cmd)
if int(ret)/(1024*1024) == int(memory):
flag = True
break
assert flag
| 31.8917
| 180
| 0.564061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,105
| 0.192314
|
a72993531283fe9cd45b23f3481f393933bdc390
| 15,777
|
py
|
Python
|
main.py
|
chilipolygon/Amazon-Requests-Module
|
20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9
|
[
"Apache-2.0"
] | 3
|
2022-01-18T20:54:08.000Z
|
2022-02-05T23:27:13.000Z
|
main.py
|
chilipolygon/Amazon-Requests-Module
|
20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
chilipolygon/Amazon-Requests-Module
|
20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9
|
[
"Apache-2.0"
] | null | null | null |
# ---------------------
from bs4 import BeautifulSoup as bs
import requests
import urllib3
import urllib
from urllib.parse import unquote
import re
import os
import sys
import json
import time
from colorama import Fore, init
from pprint import pprint
from datetime import datetime
import uuid
import threading
# ----------------------
from dhooks import Webhook
from dhooks import Webhook, Embed
# ---------------------
init()
init(autoreset=True)
urllib3.disable_warnings()
os.system('cls' if os.name == 'nt' else 'clear')
# ---------------------
# MUST HAVE PRIME
# MUST HAVE ONE CLICK
# MUST SELECT "Keep me signed in"
# MUST USE AGED ACCOUNT
# ====================================
# MUST HAVE THESE FOR BEST SUCCESS
class main:
def __init__(self, sku, code, account) -> None:
self.account = account
f = open(f'./appdata/cookies.json')
self.cookies = json.load(f)
self.sku = sku
self.code = code
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Making Session')
self.session = requests.Session()
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Fetching Cookies')
for cookie in self.cookies:
self.session.cookies.set(
self.cookies[cookie]['name'], self.cookies[cookie]['value'])
self.productPage()
def productPage(self):
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Getting Product Page')
self.asin_page = self.session.get(
'https://smile.amazon.com/dp/' + str(self.sku),
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36"}
)
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Product Price:', end=" ")
soup = bs(self.asin_page.text, "lxml")
self.og_price = soup.find(
'span', {'class': 'a-offscreen'}).getText().strip()
print(f'{self.og_price}')
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Session ID:', end=" ")
self.session_id = self.asin_page.text.split(
'id="session-id" name="session-id" value="')[1].split('"')[0]
print(f'{self.session_id}')
try:
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Offer Id:', end=" ")
self.offerListingId = re.search(
"&offerListingId=(.*?)\&", self.asin_page.text).group(1)
print(f'{self.offerListingId}')
self.promoPage() # if we find an OID, it means the the listing have an UNREDEEMED coupon
except Exception as e: # This error will occur when the coupon is redeemed OR there is no coupon
print(Fore.RED + '[-] Coupon Clipped')
self.addToCart()
pass
def promoPage(self):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36",
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'accept-encoding': 'gzip, deflate, br',
}
self.productPage = self.session.get(
f'https://smile.amazon.com/gp/aod/ajax/ref=auto_load_aod?asin={self.sku}', headers=headers)
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Getting Promo Object')
self.promoObj = {
'promoId': re.search("&promotionId=(.*?)\&", self.productPage.text).group(1),
'merchantID': re.search(";seller=(.*?)\&", self.productPage.text).group(1),
'sku': re.search("&sku=(.*?)\&", self.productPage.text).group(1),
'anti-csrftoken-a2z': re.search("&anti-csrftoken-a2z=(.*?)\'", self.productPage.text).group(1)
}
for i in self.promoObj:
print(Fore.WHITE + f"Session: {self.account} || " + Fore.YELLOW +
f'[*] {i.title()}: ' + Fore.WHITE + f'{self.promoObj[i]}')
self.clipCoupon()
# ---------------------
def clipCoupon(self):
headers = {
'anti-csrftoken-a2z': unquote(self.promoObj['anti-csrftoken-a2z']),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'x-requested-with': 'XMLHttpRequest',
'referer': f'https://www.amazon.com/dp/{self.sku}'
}
params = {
'promotionId': self.promoObj['promoId'],
'asin': self.sku,
'offerListingId': self.offerListingId,
'sku': self.promoObj['sku'],
'anti-csrftoken-a2z': unquote(self.promoObj['anti-csrftoken-a2z']),
'source': 'dp_cxcw'
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Clipping Coupon')
promoUrl = f'https://www.amazon.com/promotion/redeem/?{urllib.parse.urlencode(params)}'
while True:
clipCoupon = self.session.get(promoUrl, headers=headers)
if 'SUCCESS' in clipCoupon.text:
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.GREEN + '[+] Coupon Clipped')
break
self.addToCart()
def addToCart(self):
headers = {
'Connection': 'keep-alive',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'x-amz-checkout-entry-referer-url': 'https://smile.amazon.com/dp/' + self.sku,
'x-amz-turbo-checkout-dp-url': 'https://smile.amazon.com/dp/' + self.sku,
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'x-amz-support-custom-signin': '1',
'x-amz-checkout-csrf-token': self.session_id,
'Origin': 'https://smile.amazon.com',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://smile.amazon.com/dp/' + self.sku
}
payload = {
'addressID': 'nmqgnomolpkq',
'isAsync': '1',
'quantity.1': '1',
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[+] Adding To Cart')
while True:
try:
self.session_atc = self.session.post(
f'https://smile.amazon.com/checkout/turbo-initiate?ref_=dp_start-bbf_1_glance_buyNow_2-1&referrer=detail&pipelineType=turbo&clientId=retailwebsite&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783&temporaryAddToCart=1&asin.1={self.sku}',
data=payload, headers=headers
)
break
except self.session_atc.status_code != 200:
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.RED + '[-] Error Adding To Cart', end=" ")
time.sleep(1)
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.RED + '[-] Retrying', end=" ")
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.GREEN + '[+] Added to Cart')
checkout_url_tuple = re.search(
'\/(.*)shipmentId=(.*)\d', self.session_atc.text).group(0)
self.checkout_url_str = ''.join(checkout_url_tuple)
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting PID:', end=" ")
self.pid = re.search(
"pid=(.*?)\&", str(self.checkout_url_str)).group(1)
print(f'{self.pid}')
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting Anti CSRF:', end=" ")
self.AntiCSRF = re.search(
"anti-csrftoken-a2z'.value='(.*?)\'", str(self.session_atc.text)).group(1)
print(f'{self.AntiCSRF}') # use this to checkout
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Getting SID:', end=" ")
self.sid = re.search(
"'CacheDetection.RequestID': \"(.*?)\",", self.session_atc.text).group(1)
print(f'{self.sid}')
if not self.code: # check if there is no code
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.RED + '[-] No Code Found')
self.checkSummary()
else:
self.claimCode()
def claimCode(self):
if '' in self.code:
return
else:
headers = {
'Connection': 'keep-alive',
'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
'x-amz-checkout-entry-referer-url': 'https://smile.amazon.com/dp/' + self.sku,
'anti-csrftoken-a2z': self.AntiCSRF,
'sec-ch-ua-mobile': '?0',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'x-amz-checkout-csrf-token': self.session_id,
'Origin': 'https://smile.amazon.com',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Dest': 'empty',
'Referer': 'https://smile.amazon.com/checkout/pay?pid=' + self.pid + '&pipelineType=turbo&clientId=retailwebsite&temporaryAddToCart=1&hostPage=detail&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783'
}
payload = {
'claimcode': self.code,
'isClientTimeBased': '1'
}
print(
Fore.WHITE + f"Session: {self.account} || " + Fore.YELLOW + '[*] Applying Code')
claimurl = f'https://smile.amazon.com/checkout/pay/add-gc-promo?ref_=chk_pay_addGcPromo&referrer=pay&temporaryAddToCart=1&hostPage=detail&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783&_srcRID={self.sid}&clientId=retailwebsite&pipelineType=turbo&pid={self.pid}'
claim = self.session.post(
claimurl, headers=headers, data=payload, allow_redirects=True)
with open("./html/claimCode.html", "w", encoding='utf-8') as f:
f.write(claim.text)
self.checkSummary()
def checkSummary(self):
headers = {
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'navigate',
'sec-fetch-site': 'none',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36'
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.GREEN + '[+] Reviewing Summary')
summaryUrl = f'https://www.amazon.com/checkout/ordersummary?ref_=chk_spc_select__summary&referrer=spc&pid={self.pid}&pipelineType=turbo&clientId=retailwebsite&temporaryAddToCart=1&hostPage=detail&weblab=RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783'
summary = self.session.get(summaryUrl, headers=headers)
soup = bs(summary.text, "lxml")
self.finalPrice = soup.find(
'td', {'class': 'a-color-price a-text-right a-align-bottom a-text-bold a-nowrap'}).getText().strip()
print(Fore.WHITE + f"Session: {self.account} || " + Fore.YELLOW +
'[+] Order Total: ' + Fore.WHITE + f'{self.finalPrice}')
self.checkout()
def checkout(self):
print(Fore.WHITE +
f"Session: {self.account} || " + Fore.GREEN + '[+] Checking Out')
headers = {
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9',
'sec-ch-ua-mobile': '?0',
'sec-fetch-dest': 'document',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'sec-fetch-user': '?1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36',
'anti-csrftoken-a2z': self.AntiCSRF
}
payload = {
'x-amz-checkout-csrf-token': self.session_id,
'ref_': 'chk_summary_placeOrder',
'referrer': 'summary',
'pid': self.pid,
'pipelineType': 'turbo',
'clientId': 'retailwebsite',
'temporaryAddToCart': 1,
'hostPage': 'detail',
'weblab': 'RCX_CHECKOUT_TURBO_DESKTOP_PRIME_87783',
'isClientTimeBased': 1
}
params = {
'ref_': 'chk_summary_placeOrder',
'_srcRID': self.sid,
'clientId': 'retailwebsite',
'pipelineType': 'turbo',
'pid': self.pid
}
print(Fore.WHITE + f"Session: {self.account} || " +
Fore.YELLOW + '[*] Status: ', end=' ')
checkoutUrl = f'https://www.amazon.com/checkout/spc/place-order?{urllib.parse.urlencode(params)}'
checkout = self.session.post(
checkoutUrl, data=payload, headers=headers)
if checkout.status_code == 200:
print(Fore.GREEN + 'Success')
self.sendWebhook(self.sku, self.finalPrice)
else:
print(f'something went wrong {checkout.text}')
def sendWebhook(self, sku, finalPrice):
soup = bs(self.asin_page.text, "lxml")
title = soup.find('span', {'id': 'productTitle'}).getText().strip()
a = soup.find('div', {'id': 'imgTagWrapperId'})
if a.img:
img = a.img['src']
price = soup.find('span', {'class': 'a-offscreen'}).getText().strip()
product_url = f'https://www.amazon.com/dp/{sku}?tag=Chili'
f = open('./appdata/config.json')
data = json.load(f)
url = data['webhook']
hook = Webhook(url)
embed = Embed(
color=0x8AFF8A,
timestamp='now'
)
embed.set_title(
title='🎉Successful Checkout')
embed.set_thumbnail(img)
embed.add_field(
name='Item', value=f'[{title}]({product_url})', inline=False)
embed.add_field(name='Original Price', value=f'{price}', inline=False)
embed.add_field(name='Check Out Price',
value=f'{finalPrice}', inline=False)
embed.add_field(
name='Account', value=f'||{self.account.replace(".json", "")}||', inline=False)
embed.set_footer(
text='Made by #chili9999')
print(Fore.GREEN + '[+] Sending Webhook')
hook.send(embed=embed)
def callback(account: str):
sku = input('Put in a product asin:')
promo = input('Put in a product promo code, if none, press Enter:')
threads = []
threads.append(threading.Thread(
target=main, args=[sku, promo, account]))
for thread in threads:
thread.start()
time.sleep(.1)
for thread in threads:
thread.join()
if __name__ == "__main__":
f = open(f'./appdata/config.json')
account = json.load(f)['account']
callback(account)
# asin, promo code, email
# if you don't have a promocode, leave it as ''
| 41.518421
| 272
| 0.548647
| 14,473
| 0.917174
| 0
| 0
| 0
| 0
| 0
| 0
| 7,060
| 0.447402
|
a72b62dfb661d28b942c1bbe2cd44f6d11909efd
| 10,504
|
py
|
Python
|
tests/test_word_distance.py
|
hasibaasma/alfpy
|
c8c0c1300108015746320cede2207ac57e630d3e
|
[
"MIT"
] | 19
|
2017-02-20T17:42:02.000Z
|
2021-12-16T19:07:17.000Z
|
tests/test_word_distance.py
|
eggleader/alfpy
|
e0782e9551458ef17ab29df8af13fc0f8925e894
|
[
"MIT"
] | 3
|
2018-03-12T23:54:27.000Z
|
2020-12-09T21:53:19.000Z
|
tests/test_word_distance.py
|
eggleader/alfpy
|
e0782e9551458ef17ab29df8af13fc0f8925e894
|
[
"MIT"
] | 6
|
2016-12-06T09:12:04.000Z
|
2021-09-24T14:40:47.000Z
|
import unittest
from alfpy import word_pattern
from alfpy import word_vector
from alfpy import word_distance
from alfpy.utils import distmatrix
from . import utils
class DistanceTest(unittest.TestCase, utils.ModulesCommonTest):
def __init__(self, *args, **kwargs):
super(DistanceTest, self).__init__(*args, **kwargs)
utils.ModulesCommonTest.set_test_data()
self.pattern = word_pattern.create(self.dna_records.seq_list, 2)
self.counts = word_vector.Counts(self.dna_records.length_list,
self.pattern)
self.freqs = word_vector.Freqs(self.dna_records.length_list,
self.pattern)
def test_angle_cos_diss_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'angle_cos_diss')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.2797355 0.1500672',
'seq2 0.2797355 0.0000000 0.1261027',
'seq3 0.1500672 0.1261027 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_angle_cos_evol_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'angle_cos_evol')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.3281368 0.1625980',
'seq2 0.3281368 0.0000000 0.1347925',
'seq3 0.1625980 0.1347925 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_add_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_add')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0810458 0.0507937',
'seq2 0.0810458 0.0000000 0.0526611',
'seq3 0.0507937 0.0526611 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult1_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_mult1')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0621975 0.0501075',
'seq2 0.0621975 0.0000000 0.0955847',
'seq3 0.0501075 0.0955847 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult2_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'diff_abs_mult2')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0621975 0.0404611',
'seq2 0.0621975 0.0000000 0.0531478',
'seq3 0.0404611 0.0531478 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_seqlen1_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_seqlen1')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0065879 0.0032065',
'seq2 0.0065879 0.0000000 0.0041065',
'seq3 0.0032065 0.0041065 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_seqlen2_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_seqlen2')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.0072101 0.0038263',
'seq2 0.0072101 0.0000000 0.0039866',
'seq3 0.0038263 0.0039866 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_manhattan_freqs(self):
dist = word_distance.Distance(self.freqs, 'manhattan')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 1.2156863 0.7619048",
"seq2 1.2156863 0.0000000 0.7899160",
"seq3 0.7619048 0.7899160 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_chebyshev_freqs(self):
dist = word_distance.Distance(self.freqs, 'chebyshev')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.1936275 0.1250000",
"seq2 0.1936275 0.0000000 0.1428571",
"seq3 0.1250000 0.1428571 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_braycurtis_freqs(self):
dist = word_distance.Distance(self.freqs, 'braycurtis')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.6078431 0.3809524",
"seq2 0.6078431 0.0000000 0.3949580",
"seq3 0.3809524 0.3949580 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_diff_abs_mult_freqs(self):
dist = word_distance.Distance(self.freqs, 'diff_abs_mult')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.0621975 0.0404611",
"seq2 0.0621975 0.0000000 0.0531478",
"seq3 0.0404611 0.0531478 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_kld_freqs(self):
dist = word_distance.Distance(self.freqs, 'kld')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.0932800 0.0435210",
"seq2 0.0932800 0.0000000 0.0447391",
"seq3 0.0435210 0.0447391 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_lcc_freqs(self):
dist = word_distance.Distance(self.freqs, 'lcc')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.6205496 0.4017554",
"seq2 0.6205496 0.0000000 0.2550506",
"seq3 0.4017554 0.2550506 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_canberra_freqs(self):
dist = word_distance.Distance(self.freqs, 'canberra')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 10.3372258 7.1836838",
"seq2 10.3372258 0.0000000 6.6280959",
"seq3 7.1836838 6.6280959 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_minkowski_freqs(self):
dist = word_distance.Distance(self.freqs, 'minkowski')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.3763512 0.2532387",
"seq2 0.3763512 0.0000000 0.2603008",
"seq3 0.2532387 0.2603008 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_minkowski_throws_exception(self):
dist = word_distance.Distance(self.freqs, 'minkowski')
with self.assertRaises(Exception) as context:
dist.pwdist_minkowski(0, 1, 0.2)
self.assertIn('p must be at least 1', str(context.exception))
def test_jsd_freqs(self):
dist = word_distance.Distance(self.freqs, 'jsd')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [
" 3",
"seq1 0.0000000 0.4608882 0.2550278",
"seq2 0.4608882 0.0000000 0.2457790",
"seq3 0.2550278 0.2457790 0.0000000"
]
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_squared_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_squared')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.1416402 0.0641298',
'seq2 0.1416402 0.0000000 0.0677565',
'seq3 0.0641298 0.0677565 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_norm_counts(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.counts, 'euclid_norm')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 7.5498344 5.4772256',
'seq2 7.5498344 0.0000000 4.3588989',
'seq3 5.4772256 4.3588989 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_euclid_norm_freqs(self):
# The result of this method is identical to that from decaf+py.
dist = word_distance.Distance(self.freqs, 'euclid_norm')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.3763512 0.2532387',
'seq2 0.3763512 0.0000000 0.2603008',
'seq3 0.2532387 0.2603008 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
def test_google_freqs(self):
dist = word_distance.Distance(self.freqs, 'google')
matrix = distmatrix.create(self.dna_records.id_list, dist)
data = [' 3',
'seq1 0.0000000 0.6078431 0.3809524',
'seq2 0.6078431 0.0000000 0.3949580',
'seq3 0.3809524 0.3949580 0.0000000']
self.assertEqual(matrix.format(), "\n".join(data))
if __name__ == '__main__':
unittest.main()
| 43.949791
| 77
| 0.58035
| 10,286
| 0.979246
| 0
| 0
| 0
| 0
| 0
| 0
| 3,640
| 0.346535
|
a72d7496d5e3f428cdf8342b764e52a9a68ac6a0
| 3,092
|
py
|
Python
|
cdparser/Features.py
|
opengulf/nyc-directories-support-scripts
|
e22582b8f4cb3c365e9aac1d860d9c36831277a5
|
[
"MIT"
] | 1
|
2021-09-07T20:41:00.000Z
|
2021-09-07T20:41:00.000Z
|
cdparser/Features.py
|
opengulf/nyc-directories-support-scripts
|
e22582b8f4cb3c365e9aac1d860d9c36831277a5
|
[
"MIT"
] | null | null | null |
cdparser/Features.py
|
opengulf/nyc-directories-support-scripts
|
e22582b8f4cb3c365e9aac1d860d9c36831277a5
|
[
"MIT"
] | 2
|
2021-09-07T20:49:14.000Z
|
2021-11-05T02:03:47.000Z
|
from functools import partial
class Features:
@staticmethod
def __emit_word_features(rel_pos, word):
features = {}
for f in Features.__word_feature_functions().items():
features.update({str(rel_pos) + ":" + f[0]: f[1](word)})
return features
@staticmethod
def get_word_features(sentence,i):
features = {}
for x in range(i - 2, i + 3):
if 0 <= x < len(sentence):
features.update(Features.__emit_word_features(-(i - x), sentence[x][0]))
if i == 0:
features.update({'BOS' : True})
if i == len(sentence) - 1:
features.update({'EOS': True})
return features
@staticmethod
def __word_feature_functions():
return {
"word.junior": Features.__is_junior_token,
"word.widow.token": Features.__is_widow_token,
"word.contains.digit": Features.__contains_digit,
"word.is.delimiter": Features.__is_delimiter,
"word.is.start.token": Features.__is_start,
"word.is.end.token": Features.__is_end,
"word.is.lower": str.islower,
"word.is.title": str.istitle,
"word.is.upper": str.isupper,
"word.substr[-2:]" : partial(Features.__substr, 2),
"word.substr[-1:]": partial(Features.__substr, 1)
}
@staticmethod
def get_sentence_features(sentence):
return [Features.get_word_features(sentence, i) for i in range(len(sentence))]
@staticmethod
def get_sentence_labels(sentence):
return [label for token, label in sentence]
@staticmethod
def get_sentence_tokens(sentence):
return [token for token, label in sentence]
@staticmethod
def __contains_digit(input):
for c in input:
if c.isdigit():
return True
return False
@staticmethod
def __substr(amount, word):
return word[amount:]
@staticmethod
def __is_start(input):
if input == "START":
return True
return False
@staticmethod
def __is_end(input):
if input == "END":
return True
return False
@staticmethod
def __is_delimiter(input):
for c in input:
if c == '.' or c == ',':
return True
return False
@staticmethod
def __is_known_position_adj(input):
if len(input) == 1:
if input == 'h' or input == 'r':
return True
return False
@staticmethod
def __is_junior_token(input):
dc = input.lower()
if dc == "jr":
return True
return False
@staticmethod
def __segment_of_sentence(sent, i, div):
sent_length = len(sent)
pos = i + 1
for j in range(1,div + 1):
if pos <= j*(sent_length / float(div)):
return j
@staticmethod
def __is_widow_token(input):
dc = input.lower()
if dc == "wid" or dc == "widow":
return True
return False
| 28.366972
| 88
| 0.559185
| 3,061
| 0.989974
| 0
| 0
| 2,956
| 0.956016
| 0
| 0
| 245
| 0.079237
|
a73018c4b01cc941e04ea8bb39a52a6d8c243fb6
| 10,631
|
py
|
Python
|
IRIS_data_download/IRIS_download_support/obspy/core/tests/test_util_attribdict.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-03-05T01:03:01.000Z
|
2020-12-17T05:04:07.000Z
|
IRIS_data_download/IRIS_download_support/obspy/core/tests/test_util_attribdict.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 4
|
2021-03-31T19:25:55.000Z
|
2021-12-13T20:32:46.000Z
|
IRIS_data_download/IRIS_download_support/obspy/core/tests/test_util_attribdict.py
|
earthinversion/Fnet_IRIS_data_automated_download
|
09a6e0c992662feac95744935e038d1c68539fa1
|
[
"MIT"
] | 2
|
2020-09-08T19:33:40.000Z
|
2021-04-05T09:47:50.000Z
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import unittest
from obspy.core import AttribDict
class AttribDictTestCase(unittest.TestCase):
"""
Test suite for obspy.core.util.attribdict
"""
def test_pop(self):
"""
Tests pop method of AttribDict class.
"""
ad = AttribDict()
ad.test = 1
ad['test2'] = 'test'
# removing via pop
temp = ad.pop('test')
self.assertEqual(temp, 1)
self.assertFalse('test' in ad)
self.assertIn('test2', ad)
self.assertFalse('test' in ad.__dict__)
self.assertIn('test2', ad.__dict__)
self.assertFalse(hasattr(ad, 'test'))
self.assertTrue(hasattr(ad, 'test2'))
# using pop() for not existing element raises a KeyError
self.assertRaises(KeyError, ad.pop, 'test')
def test_popitem(self):
"""
Tests pop method of AttribDict class.
"""
ad = AttribDict()
ad['test2'] = 'test'
# removing via popitem
temp = ad.popitem()
self.assertEqual(temp, ('test2', 'test'))
self.assertFalse('test2' in ad)
self.assertFalse('test2' in ad.__dict__)
self.assertFalse(hasattr(ad, 'test2'))
# popitem for empty AttribDict raises a KeyError
self.assertRaises(KeyError, ad.popitem)
def test_delete(self):
"""
Tests delete method of AttribDict class.
"""
ad = AttribDict()
ad.test = 1
ad['test2'] = 'test'
# deleting test using dictionary
del ad['test']
self.assertFalse('test' in ad)
self.assertIn('test2', ad)
self.assertFalse('test' in ad.__dict__)
self.assertIn('test2', ad.__dict__)
self.assertFalse(hasattr(ad, 'test'))
self.assertTrue(hasattr(ad, 'test2'))
# deleting test2 using attribute
del ad.test2
self.assertFalse('test2' in ad)
self.assertFalse('test2' in ad.__dict__)
self.assertFalse(hasattr(ad, 'test2'))
def test_init(self):
"""
Tests initialization of AttribDict class.
"""
ad = AttribDict({'test': 'NEW'})
self.assertEqual(ad['test'], 'NEW')
self.assertEqual(ad.test, 'NEW')
self.assertEqual(ad.get('test'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__dict__['test'], 'NEW')
self.assertEqual(ad.__dict__.get('test'), 'NEW')
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
def test_setitem(self):
"""
Tests __setitem__ method of AttribDict class.
"""
# 1
ad = AttribDict()
ad['test'] = 'NEW'
self.assertEqual(ad['test'], 'NEW')
self.assertEqual(ad.test, 'NEW')
self.assertEqual(ad.get('test'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__dict__['test'], 'NEW')
self.assertEqual(ad.__dict__.get('test'), 'NEW')
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
# 2
ad = AttribDict()
ad.__setitem__('test', 'NEW')
self.assertEqual(ad['test'], 'NEW')
self.assertEqual(ad.test, 'NEW')
self.assertEqual(ad.get('test'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__dict__['test'], 'NEW')
self.assertEqual(ad.__dict__.get('test'), 'NEW')
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
def test_setattr(self):
"""
Tests __setattr__ method of AttribDict class.
"""
# 1
ad = AttribDict()
ad.test = 'NEW'
self.assertEqual(ad['test'], 'NEW')
self.assertEqual(ad.test, 'NEW')
self.assertEqual(ad.get('test'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__dict__['test'], 'NEW')
self.assertEqual(ad.__dict__.get('test'), 'NEW')
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
# 2
ad = AttribDict()
ad.__setattr__('test', 'NEW')
self.assertEqual(ad['test'], 'NEW')
self.assertEqual(ad.test, 'NEW')
self.assertEqual(ad.get('test'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__dict__['test'], 'NEW')
self.assertEqual(ad.__dict__.get('test'), 'NEW')
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
def test_setdefault(self):
"""
Tests setdefault method of AttribDict class.
"""
ad = AttribDict()
# 1
default = ad.setdefault('test', 'NEW')
self.assertEqual(default, 'NEW')
self.assertEqual(ad['test'], 'NEW')
self.assertEqual(ad.test, 'NEW')
self.assertEqual(ad.get('test'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__dict__['test'], 'NEW')
self.assertEqual(ad.__dict__.get('test'), 'NEW')
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
# 2 - existing key should not be overwritten
default = ad.setdefault('test', 'SOMETHINGDIFFERENT')
self.assertEqual(default, 'NEW')
self.assertEqual(ad['test'], 'NEW')
self.assertEqual(ad.test, 'NEW')
self.assertEqual(ad.get('test'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__dict__['test'], 'NEW')
self.assertEqual(ad.__dict__.get('test'), 'NEW')
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
# 3 - default value isNone
ad = AttribDict()
default = ad.setdefault('test')
self.assertEqual(default, None)
self.assertEqual(ad['test'], None)
self.assertEqual(ad.test, None)
self.assertEqual(ad.get('test'), None)
self.assertEqual(ad.__getattr__('test'), None)
self.assertEqual(ad.__getitem__('test'), None)
self.assertEqual(ad.__dict__['test'], None)
self.assertEqual(ad.__dict__.get('test'), None)
self.assertIn('test', ad)
self.assertIn('test', ad.__dict__)
def test_clear(self):
"""
Tests clear method of AttribDict class.
"""
ad = AttribDict()
ad.test = 1
ad['test2'] = 'test'
# removing via pop
ad.clear()
self.assertFalse('test' in ad)
self.assertFalse('test2' in ad)
self.assertFalse('test' in ad.__dict__)
self.assertFalse('test2' in ad.__dict__)
self.assertFalse(hasattr(ad, 'test'))
self.assertFalse(hasattr(ad, 'test2'))
# class attributes should be still present
self.assertTrue(hasattr(ad, 'readonly'))
self.assertTrue(hasattr(ad, 'defaults'))
def test_init_argument(self):
"""
Tests initialization of AttribDict with various arguments.
"""
# one dict works as expected
ad = AttribDict({'test': 1})
self.assertEqual(ad.test, 1)
# multiple dicts results into TypeError
self.assertRaises(TypeError, AttribDict, {}, {})
self.assertRaises(TypeError, AttribDict, {}, {}, blah=1)
# non-dicts results into TypeError
self.assertRaises(TypeError, AttribDict, 1)
self.assertRaises(TypeError, AttribDict, object())
def test_defaults(self):
"""
Tests default of __getitem__/__getattr__ methods of AttribDict class.
"""
# 1
ad = AttribDict()
ad['test'] = 'NEW'
self.assertEqual(ad.__getitem__('test'), 'NEW')
self.assertEqual(ad.__getitem__('xxx', 'blub'), 'blub')
self.assertEqual(ad.__getitem__('test', 'blub'), 'NEW')
self.assertEqual(ad.__getattr__('test'), 'NEW')
self.assertEqual(ad.__getattr__('xxx', 'blub'), 'blub')
self.assertEqual(ad.__getattr__('test', 'blub'), 'NEW')
# should raise KeyError without default item
self.assertRaises(KeyError, ad.__getitem__, 'xxx')
self.assertRaises(AttributeError, ad.__getattr__, 'xxx')
# 2
ad2 = AttribDict(defaults={'test2': 'NEW'})
self.assertEqual(ad2.__getitem__('test2'), 'NEW')
self.assertRaises(KeyError, ad2.__getitem__, 'xxx')
def test_set_readonly(self):
"""
Tests of setting readonly attributes.
"""
class MyAttribDict(AttribDict):
readonly = ['test']
defaults = {'test': 1}
ad = MyAttribDict()
self.assertEqual(ad.test, 1)
self.assertRaises(AttributeError, ad.__setitem__, 'test', 1)
def test_deepcopy(self):
"""
Tests __deepcopy__ method of AttribDict.
"""
class MyAttribDict(AttribDict):
defaults = {'test': 1}
ad = MyAttribDict()
ad.muh = 2
ad2 = ad.__deepcopy__()
self.assertEqual(ad2.test, 1)
self.assertEqual(ad2.muh, 2)
def test_compare_with_dict(self):
"""
Checks if AttribDict is still comparable to a dict object.
"""
adict = {'test': 1}
ad = AttribDict(adict)
self.assertEqual(ad, adict)
self.assertEqual(adict, ad)
def test_pretty_str(self):
"""
Test _pretty_str method of AttribDict.
"""
# 1
ad = AttribDict({'test1': 1, 'test2': 2})
out = ' test1: 1\n test2: 2'
self.assertEqual(ad._pretty_str(), out)
# 2
ad = AttribDict({'test1': 1, 'test2': 2})
out = ' test2: 2\n test1: 1'
self.assertEqual(ad._pretty_str(priorized_keys=['test2']), out)
# 3
ad = AttribDict({'test1': 1, 'test2': 2})
out = ' test1: 1\n test2: 2'
self.assertEqual(ad._pretty_str(min_label_length=6), out)
def suite():
return unittest.makeSuite(AttribDictTestCase, 'test')
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 35.555184
| 77
| 0.577462
| 10,246
| 0.963785
| 0
| 0
| 0
| 0
| 0
| 0
| 2,928
| 0.275421
|
a730e555a53175f843e80e26bb1889169e4678c3
| 458
|
py
|
Python
|
data/datasetFactory.py
|
dcsgfl/acceleratefl
|
9c928ff06dd4dd02eb27cb71d7d539ba4527ec58
|
[
"MIT"
] | null | null | null |
data/datasetFactory.py
|
dcsgfl/acceleratefl
|
9c928ff06dd4dd02eb27cb71d7d539ba4527ec58
|
[
"MIT"
] | null | null | null |
data/datasetFactory.py
|
dcsgfl/acceleratefl
|
9c928ff06dd4dd02eb27cb71d7d539ba4527ec58
|
[
"MIT"
] | null | null | null |
from cifar10 import CIFAR10
from mnist import MNIST
class DatasetFactory:
factories = {}
def addFactory(id, dftory):
DatasetFactory.factories.put[id] = dftory
addFactory = staticmethod(addFactory)
def getDataset(id):
if id not in DatasetFactory.factories:
DatasetFactory.factories[id] = eval(id + '.Factory()')
return DatasetFactory.factories[id].get()
getDataset = staticmethod(getDataset)
| 28.625
| 66
| 0.676856
| 405
| 0.884279
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.026201
|
a73131170f5bdfaf1161caf237d671d9dbf5663d
| 253
|
py
|
Python
|
jsonresume/__init__.py
|
kelvintaywl/jsonresume-validator
|
73ac162cb30ca70699c942def629188f7dfd4d3c
|
[
"MIT"
] | 42
|
2016-06-03T18:17:24.000Z
|
2021-12-09T04:13:14.000Z
|
jsonresume/__init__.py
|
kelvintaywl/jsonresume-validator
|
73ac162cb30ca70699c942def629188f7dfd4d3c
|
[
"MIT"
] | 3
|
2016-04-27T12:32:41.000Z
|
2020-09-29T16:43:35.000Z
|
jsonresume/__init__.py
|
kelvintaywl/jsonresume-validator
|
73ac162cb30ca70699c942def629188f7dfd4d3c
|
[
"MIT"
] | 9
|
2016-05-08T15:31:53.000Z
|
2021-04-28T09:17:47.000Z
|
# -*- coding: utf-8 -*-
"""
JSON Resume Validator
~~~~~~
JSON Resume Validator helps validate python dictionaries to
ensure they are valid representation of a JSON Resume.
"""
from jsonresume.resume import Resume
__all__ = ['Resume']
| 19.461538
| 63
| 0.675889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 199
| 0.786561
|
a731c3353defbbffeebffba89c597908966a9fbc
| 936
|
py
|
Python
|
Catchphrase.py
|
YaruKatsaros/Catchphrase
|
5d674cc251be226e233fd427f9533a56f1a24284
|
[
"MIT"
] | null | null | null |
Catchphrase.py
|
YaruKatsaros/Catchphrase
|
5d674cc251be226e233fd427f9533a56f1a24284
|
[
"MIT"
] | null | null | null |
Catchphrase.py
|
YaruKatsaros/Catchphrase
|
5d674cc251be226e233fd427f9533a56f1a24284
|
[
"MIT"
] | null | null | null |
import glob
import os
import sys
import re
savedlines = []
def startreading():
if os.path.isdir(sys.argv[1]):
os.chdir(sys.argv[1])
target = sys.argv[2] # TODO: Multiple lines.
for file in glob.glob("*.srt"):
read(sys.argv[1], file, target)
savelines()
print("Finished!")
else:
print("Not a valid path!")
def savelines():
try:
outfile = open('result.txt', 'w')
outfile.writelines(savedlines)
outfile.close()
except Exception as e:
print("Something went wrong when saving the file: " + str(e)) # TODO: Custom exception.
def read(path, file, target):
openfile = open(file, 'r')
lines = openfile.readlines()
for line in lines:
if re.search(target, line, re.IGNORECASE):
ln = line.strip()
savedlines.append(ln + "\n")
print(ln)
openfile.close()
startreading()
| 21.767442
| 96
| 0.573718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.162393
|
a733182bb7d063e48b371c3b9b8871a0afe48521
| 19,712
|
py
|
Python
|
dashboard/api/config.py
|
x3niasweden/fomalhaut-panel
|
8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6
|
[
"MIT"
] | 14
|
2017-08-01T08:28:00.000Z
|
2020-08-29T06:55:16.000Z
|
dashboard/api/config.py
|
x3niasweden/fomalhaut-panel
|
8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6
|
[
"MIT"
] | 1
|
2021-03-29T06:16:34.000Z
|
2021-03-29T06:16:34.000Z
|
dashboard/api/config.py
|
x3niasweden/fomalhaut-panel
|
8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6
|
[
"MIT"
] | 12
|
2017-07-18T02:59:03.000Z
|
2021-03-23T04:04:58.000Z
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# created by restran on 2016/1/2
from __future__ import unicode_literals, absolute_import
import traceback
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_protect
from django.db import transaction
from cerberus import Validator
import redis
from fomalhaut import settings
from ..forms import *
from common.utils import http_response_json, json_dumps, json_loads
from accounts.decorators import login_required
from common.utils import error_404
logger = logging.getLogger(__name__)
@login_required
@require_http_methods(["GET"])
def get_model_data(request, model_name):
logger.debug('run api_get_model_data')
return_data = {'success': False, 'msg': ''}
get_default_form = request.GET.get('get_default_form', False)
if model_name == 'client':
model = Client
model_form = ClientForm
data = model.get_all_in_json()
elif model_name == 'endpoint':
model = Endpoint
model_form = EndpointForm
data = model.get_all_in_json()
elif model_name == 'client_endpoint':
model = ClientEndpoint
model_form = None
client_id = request.GET.get('client_id')
data = model.get_all_in_json(client_id)
else:
model = None
model_form = None
data = []
if model is None:
raise error_404(request)
# 获取一个缺省值用来添加和编辑数据
if get_default_form:
t = model_form.get_default_form_json()
return_data['default_form'] = t
return_data['data'] = data
return_data['success'] = True
return http_response_json(return_data)
def do_create_or_update_model_data(request, model_name, is_update, post_data, form):
return_data = {'success': False, 'msg': ''}
if model_name == 'client_endpoint':
client_id = post_data.get('client_id', [])
endpoints = post_data.get('endpoints', [])
client = Client.get_client(client_id)
if client is None:
return_data['msg'] = '提交的数据有误, client_id 不存在'
return return_data
ClientEndpoint.objects.filter(client_id=client_id).delete()
endpoint_list = []
for t in endpoints:
ce = ClientEndpoint(client=client, endpoint_id=t['id'], enable=t['enable'])
endpoint_list.append(ce)
# bulk_create 不会返回 id
ClientEndpoint.objects.bulk_create(endpoint_list)
return_data['success'] = True
return_data['data'] = ClientEndpoint.get_all_in_json(client_id)
return return_data
else:
form_is_valid = form.is_valid()
return_validation = {}
acl_rules = post_data.get('acl_rules', [])
if model_name == 'endpoint':
acl_rules_validation = {'data': [], 'has_error': False, 'errors': ''}
for t in acl_rules:
tf = ACLRuleForm(t)
if not tf.is_valid():
acl_rules_validation['has_error'] = True
acl_rules_validation['errors'] = '访问控制列表数据为空或不正确'
break
return_validation['acl_rules'] = acl_rules_validation
form_is_valid = form_is_valid and not acl_rules_validation['has_error']
elif model_name == 'client_endpoint':
pass
if form_is_valid:
# logger.debug(form.cleaned_data)
logger.debug('form is valid')
entry = form.save(commit=False)
if model_name == 'endpoint':
entry.save()
acl_rules = [ACLRule(endpoint_id=entry.id,
re_uri=t['re_uri'],
is_permit=t['is_permit'])
for t in acl_rules]
# 删除旧的
ACLRule.objects.filter(endpoint_id=entry.id).delete()
# 创建 ACLRule
ACLRule.objects.bulk_create(acl_rules)
entry.acl_rules = acl_rules
else:
entry.save()
return_data['success'] = True
return_data['data'] = entry.to_json_dict()
logger.debug(return_data['data'])
else:
return_data['msg'] = '提交的数据有误'
logger.debug('form is not valid')
logger.debug(form.get_form_json())
return_validation.update(form.get_form_json())
return_data['data'] = return_validation
return return_data
@login_required
@csrf_protect
@require_http_methods(["POST"])
def create_model_data(request, model_name):
"""
创建或更新数据
:param request:
:param model_name:
:return:
"""
logger.debug('run api_create_model_data')
post_data = json_loads(request.body)
logger.debug(post_data)
if model_name == 'client':
form = ClientForm(post_data['data'])
elif model_name == 'endpoint':
form = EndpointForm(post_data['data'])
elif model_name == 'client_endpoint':
form = None
else:
form = None
return_data = do_create_or_update_model_data(
request, model_name, False, post_data, form)
return http_response_json(return_data)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def update_model_data(request, model_name, entry_id):
"""
创建或更新数据
:param request:
:param model_name:
:param entry_id:
:return:
"""
logger.debug('run api_update_model_data')
return_data = {'success': False, 'msg': ''}
if model_name == 'client':
model = Client
model_form = ClientForm
elif model_name == 'endpoint':
model = Endpoint
model_form = EndpointForm
elif model_name == 'client_endpoint':
model = None
model_form = None
else:
model = None
model_form = None
post_data = json_loads(request.body)
logger.debug(post_data)
if model_name != 'client_endpoint':
try:
entry = model.objects.get(id=entry_id)
except models.Model.DoesNotExist:
return_data['msg'] = '数据不存在'
return http_response_json(return_data)
if model_name == 'client':
form = model_form(post_data['data'], instance=entry)
elif model_name == 'endpoint':
form = model_form(post_data['data'], instance=entry)
else:
form = None
else:
form = None
return_data = do_create_or_update_model_data(
request, model_name, True, post_data, form)
return http_response_json(return_data)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def delete_model_data(request, model_name, entry_id=None):
"""
删除数据
:param request:
:param model_name:
:param entry_id:
:return:
"""
logger.debug('run api_delete_model_data')
return_data = {'success': False, 'msg': ''}
if model_name == 'client':
model = Client
elif model_name == 'endpoint':
model = Endpoint
elif model_name == 'client_endpoint':
model = ClientEndpoint
else:
model = None
if model and entry_id is not None:
try:
entry = model.objects.get(id=entry_id)
entry.delete()
return_data['success'] = True
except models.Model.DoesNotExist:
return_data['msg'] = u'数据不存在'
return http_response_json(return_data)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def update_enable_state_model_data(request, model_name, entry_id=None):
"""
点击启用按钮,更新启用状态
:param request:
:param model_name:
:param entry_id:
:return:
"""
logger.debug('run api_update_enable_state_model_data')
return_data = {'success': False, 'msg': ''}
if model_name == 'client':
model = Client
elif model_name == 'endpoint':
model = Endpoint
elif model_name == 'client_endpoint':
model = ClientEndpoint
else:
model = None
post_data = json_loads(request.body)
if model and entry_id:
try:
model.objects.filter(id=entry_id).update(enable=post_data['enable'])
return_data['success'] = True
except Exception as e:
logger.error(e.message)
return_data['msg'] = u'更新启用状态失败'
return http_response_json(return_data)
def do_import_config(upload_file):
"""
从json文件导入配置
:param upload_file:
:return:
"""
file_contents = upload_file.read()
try:
json_data = json_loads(file_contents)
except Exception as e:
logger.error(e.message)
return False, u'上传的文件不是JSON或者格式有误', []
json_data_schema = {
'clients': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'name': {
'type': 'string',
'required': True,
},
'app_id': {
'type': 'string',
'required': True,
},
'secret_key': {
'type': 'string',
'required': True,
},
'enable': {
'type': 'boolean',
'required': True,
},
'memo': {
'type': 'string',
'required': True,
}
}
}
},
'client_endpoints': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'client_id': {
'type': 'integer',
'required': True,
},
'endpoint_id': {
'type': 'integer',
'required': True,
},
'enable': {
'type': 'boolean',
'required': True,
}
}
}
},
'endpoints': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'unique_name': {
'type': 'string',
'required': True,
},
'name': {
'type': 'string',
'required': True,
},
'version': {
'type': 'string',
'required': True,
},
'url': {
'type': 'string',
'required': True,
},
'memo': {
'type': 'string',
'required': True,
},
'async_http_connect_timeout': {
'type': 'integer',
'required': True,
},
'async_http_request_timeout': {
'type': 'integer',
'required': True,
},
'enable_acl': {
'type': 'boolean',
'required': True,
},
'acl_rules': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'is_permit': {
'type': 'boolean',
'required': True,
},
're_uri': {
'type': 'string',
'required': True,
}
}
}
}
}
}
}
}
validator = Validator(json_data_schema, allow_unknown=True)
if not validator.validate(json_data):
errors = []
for (k, v) in validator.errors.items():
errors.append('%s: %s' % (k, v))
return False, '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', errors
else:
success, msg, errors = False, '', []
try:
# 出现异常的时候,会自动回滚
with transaction.atomic():
# 清除旧的数据,不包含 Client 和 Endpoint
ClientEndpoint.objects.all().delete()
ACLRule.objects.all().delete()
old_client_list = Client.objects.all()
old_client_dict = {}
for t in old_client_list:
old_client_dict[t.app_id] = t
old_endpoint_list = Endpoint.objects.all()
old_endpoint_dict = {}
for t in old_endpoint_list:
old_endpoint_dict[t.unique_name] = t
new_client_dict = {}
for t in json_data['clients']:
# del t['id']
old_client = old_client_dict.get(t['app_id'])
# 如果已存在相同的,则更新
if old_client is not None:
form = ClientForm(t, instance=old_client)
del old_client_dict[t['app_id']]
else:
form = ClientForm(t)
if not form.is_valid():
errors = []
form_errors = form.get_form_json()
for (k, v) in form_errors.items():
if v['has_error']:
errors.append('%s: %s' % (k, v['errors']))
msg, errors = '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', errors
raise Exception('error')
client = form.save()
new_client_dict[t['id']] = client
new_endpoint_dict = {}
for t in json_data['endpoints']:
# del t['id']
old_endpoint = old_endpoint_dict.get(t['unique_name'])
# 如果已存在相同的,则更新
if old_endpoint is not None:
form = EndpointForm(t, instance=old_endpoint)
del old_endpoint_dict[t['unique_name']]
else:
form = EndpointForm(t)
if not form.is_valid():
errors = []
form_errors = form.get_form_json()
for (k, v) in form_errors.items():
if v['has_error']:
errors.append('%s: %s' % (k, v['errors']))
msg, errors = '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', errors
raise Exception('error')
endpoint = form.save(commit=False)
endpoint.save()
new_endpoint_dict[t['id']] = endpoint
acl_rules = t['acl_rules']
for y in acl_rules:
# del t['id']
tf = ACLRuleForm(y)
if not tf.is_valid():
msg, errors = '上传的 JSON 配置文件格式有误,请先导出 JSON 配置文件再修改', \
['访问控制列表数据为空或不正确']
raise Exception('error')
acl_rules = [ACLRule(endpoint_id=endpoint.id,
re_uri=t['re_uri'], is_permit=t['is_permit'])
for t in acl_rules]
# 创建 ACLRule
ACLRule.objects.bulk_create(acl_rules)
# 根据新的 id 匹配正确的 client_endpoint
client_endpoint_list = []
for t in json_data['client_endpoints']:
client = new_client_dict.get(t['client_id'])
endpoint = new_endpoint_dict.get(t['endpoint_id'])
enable = t['enable']
ce = ClientEndpoint(client=client, endpoint=endpoint, enable=enable)
client_endpoint_list.append(ce)
ClientEndpoint.objects.bulk_create(client_endpoint_list)
# 删除导入的配置中,不存在的 Client
Client.objects.filter(id__in=[t.id for t in old_client_dict.values()]).delete()
# 删除导入的配置中,不存在的 Endpoint
Endpoint.objects.filter(id__in=[t.id for t in old_endpoint_dict.values()]).delete()
success, msg = True, u'导入配置成功'
except Exception as e:
logger.error(e.message)
return success, msg, errors
@login_required
@csrf_protect
@require_http_methods(["POST"])
def import_config(request):
"""
上传文件,导入配置
"""
if request.FILES:
success, msg, errors = False, '', []
for _file in request.FILES:
# 关闭了分块上传,上传上来的就是完整的
# 只允许选择一份文件,处理完就break
success, msg, errors = do_import_config(request.FILES[_file])
break
return http_response_json({'success': success, 'msg': msg, 'errors': errors})
else:
raise error_404(request)
@login_required
@csrf_protect
@require_http_methods(["POST"])
def transfer_to_redis(request):
"""
将配置数据同步到Redis中
"""
success, msg = False, ''
try:
config_data = get_config_redis_json()
logger.debug(config_data)
r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
db=settings.REDIS_DB, password=settings.REDIS_PASSWORD)
# 默认transaction=True
pipe = r.pipeline(transaction=True)
# 按模式匹配批量删除
pattern_delete_lua = """
local keys = redis.call('keys', ARGV[1])
for i = 1, table.getn(keys) do
redis.call('del', keys[i])
end
"""
pattern_delete = r.register_script(pattern_delete_lua)
pattern_delete(keys=[''], args=['%s:*' % settings.CLIENT_CONFIG_REDIS_PREFIX], client=pipe)
for t in config_data:
logger.debug(t)
#
# client = {}
# for k, v in t.iteritems():
# if k != 'endpoints':
# client[k] = v
pipe.set('%s:%s' % (settings.CLIENT_CONFIG_REDIS_PREFIX, t['app_id']), json_dumps(t))
# for s in t['endpoints']:
# pipe.set('%s:%s:%s:%s' % (settings.PROXY_CONFIG_REDIS_PREFIX, t['access_key'], s['name'], s['version']),
# json_dumps(s))
# pipe.delete('config:*')
# the EXECUTE call sends all buffered commands to the server, returning
# a list of responses, one for each command.
pipe.execute()
success = True
except Exception as e:
msg = '同步配置数据到 Redis 出现异常'
logger.error(e.message)
logger.error(traceback.format_exc())
return http_response_json({'success': success, 'msg': msg})
| 33.241147
| 122
| 0.491985
| 0
| 0
| 0
| 0
| 7,487
| 0.365184
| 0
| 0
| 4,712
| 0.229831
|
a733c76add330a704c87d51a39a3121429990715
| 2,209
|
py
|
Python
|
WX_BG.py
|
boristown/WX_BG
|
c715d1f3ffeef60187be0289f26549204d6b963f
|
[
"MIT"
] | 1
|
2019-08-17T23:21:28.000Z
|
2019-08-17T23:21:28.000Z
|
WX_BG.py
|
boristown/WX_BG
|
c715d1f3ffeef60187be0289f26549204d6b963f
|
[
"MIT"
] | null | null | null |
WX_BG.py
|
boristown/WX_BG
|
c715d1f3ffeef60187be0289f26549204d6b963f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# filename: WX_BG.py
import prices
import glob
import prediction
import os
import time
import random
#预测数据文件
prices_file_pattern = "Output\\prices\\*.csv"
#预测数据文件
predict_file_pattern = "Output\\predict\\*.csv"
#预测数据文件
prices_file_second_pattern = "Output\\prices_second\\*.csv"
#预测数据文件
predict_file_second_pattern = "Output\\predict_second\\*.csv"
modeStr = {0: "v1", 1:"v2"}
predict_batch_size = 10000
while True:
'''
randint = random.randint(0, 9)
if randint == 0:
modeType = 0
else:
modeType = 1
'''
modeType = 1
print( "mode = " + modeStr[modeType] )
#删除旧的价格数据
prices_files = glob.glob(prices_file_pattern)
for prices_file in prices_files:
os.remove(prices_file)
prices_files_second = glob.glob(prices_file_second_pattern)
for prices_file_second in prices_files_second:
os.remove(prices_file_second)
#删除旧的预测数据
predict_files = glob.glob(predict_file_pattern)
for predict_file in predict_files:
os.remove(predict_file)
predict_files_second = glob.glob(predict_file_second_pattern)
for predict_file_second in predict_files_second:
os.remove(predict_file_second)
time.sleep(10)
print("正在读取价格……")
#读取价格并生成输入数据
if modeType == 0:
symbol_id_list = prices.read_prices()
else:
symbol_id_list = prices.read_pricehistory(predict_batch_size)
try:
if len(symbol_id_list) == 0:
continue
except:
continue
print("正在执行预测……")
# 预测并读取结果
while True:
time.sleep(1)
predict_files = glob.glob(predict_file_pattern)
predict_files_second = glob.glob(predict_file_second_pattern)
if len(predict_files) == 0 or len(predict_files_second) == 0:
continue
print("检测到预测文件:", predict_files[0])
print("检测到预测文件2:", predict_files_second[0])
time.sleep(2)
if modeType == 0:
prediction.get_prediction(symbol_id_list, predict_files[0])
else:
prediction.get_predictionhistory(symbol_id_list, predict_files[0], predict_files_second[0])
break
print("预测执行完毕!")
time.sleep(20)
| 26.939024
| 103
| 0.663649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 599
| 0.249272
|
a734a04a2790536248f0af4b3c7aedde27c72873
| 929
|
py
|
Python
|
hyppo/d_variate/tests/test_dhsic.py
|
zdbzdb123123/hyppo
|
c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde
|
[
"MIT"
] | 116
|
2020-02-28T10:29:22.000Z
|
2022-03-22T12:19:39.000Z
|
hyppo/d_variate/tests/test_dhsic.py
|
zdbzdb123123/hyppo
|
c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde
|
[
"MIT"
] | 253
|
2020-02-17T16:18:56.000Z
|
2022-03-30T16:55:02.000Z
|
hyppo/d_variate/tests/test_dhsic.py
|
zdbzdb123123/hyppo
|
c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde
|
[
"MIT"
] | 27
|
2020-03-02T21:07:41.000Z
|
2022-03-08T08:33:23.000Z
|
import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from ...tools import linear, power
from .. import dHsic # type: ignore
class TestdHsicStat:
@pytest.mark.parametrize("n, obs_stat", [(100, 0.04561), (200, 0.03911)])
@pytest.mark.parametrize("obs_pvalue", [1 / 1000])
def test_linear_oned(self, n, obs_stat, obs_pvalue):
np.random.seed(123456789)
x, y = linear(n, 1)
stat, pvalue = dHsic(gamma=0.5).test(x, y)
assert_almost_equal(stat, obs_stat, decimal=2)
assert_almost_equal(pvalue, obs_pvalue, decimal=2)
class TestdHsicTypeIError:
def test_oned(self):
np.random.seed(123456789)
est_power = power(
"dhsic",
sim_type="multi",
sim="multimodal_independence",
n=100,
p=1,
alpha=0.05,
)
assert_almost_equal(est_power, 0.05, decimal=2)
| 27.323529
| 77
| 0.620022
| 771
| 0.829925
| 0
| 0
| 413
| 0.444564
| 0
| 0
| 78
| 0.083961
|
a7351f98fb299d1d929cbe7b4a8c9742f60b725d
| 2,844
|
py
|
Python
|
Pages/showHistory.py
|
ajaydeepsingh/ATLZoo
|
ab5ba27dc8602da39ce8bb47c4a050ff09d79b82
|
[
"MIT"
] | null | null | null |
Pages/showHistory.py
|
ajaydeepsingh/ATLZoo
|
ab5ba27dc8602da39ce8bb47c4a050ff09d79b82
|
[
"MIT"
] | null | null | null |
Pages/showHistory.py
|
ajaydeepsingh/ATLZoo
|
ab5ba27dc8602da39ce8bb47c4a050ff09d79b82
|
[
"MIT"
] | null | null | null |
from tkinter import *
from PIL import ImageTk, Image
import pymysql
from tkinter import messagebox
from tkinter import ttk
from datetime import datetime, timedelta
import decimal
class ATLzooShowHistory:
def __init__(self):
self.createShowHistoryWindow()
self.buildShowHistoryWindow(self.showHistoryWindow)
self.showHistoryWindow.mainloop()
sys.exit()
def createShowHistoryWindow(self):
self.showHistoryWindow=Toplevel()
self.showHistoryWindow.title("Zoo Atlanta")
self.showHistoryWindow.geometry("800x600")
def buildShowHistoryWindow(self, showHistoryWindow):
titleLabel= Label(showHistoryWindow,text = "Show History", font = "Verdana 16 bold ")
titleLabel.grid(row=1,column=2,sticky=W+E)
# Labels
showLabel = Label(showHistoryWindow,text = "Name")
showLabel.grid(row=2,column=0,pady=10)
self.showNameString = StringVar()
showNameEntry = Entry(showHistoryWindow, textvariable=self.showNameString, width=20)
showNameEntry.grid(row=2,column=1,pady=10)
exhibitLabel = Label(showHistoryWindow,text = "Exhibit")
exhibitLabel.grid(row=2,column=2,pady=10)
exhibitDefault = StringVar()
exhibitDefault.set("options")
exhibitMenu = OptionMenu(showHistoryWindow, exhibitDefault, "this","will","have","options","later")
exhibitMenu.grid(row=2, column=3,pady=10)
dateLabel = Label(showHistoryWindow,text = "Date")
dateLabel.grid(row=3, column=0,pady=10)
#showDateEntry = CalendarDialog.main()
showDateEntry= Entry(showHistoryWindow)
showDateEntry.grid(row=3, column=1,pady=10)
# Button
findShowsButton = Button(showHistoryWindow, text="Search", command=self.showHistoryWindowFindShowsButtonClicked)
findShowsButton.grid(row=3,column=2,pady=10)
selectShowTree = ttk.Treeview(showHistoryWindow, columns=("Name", "Exhibit", "Date"))
selectShowTree.heading('#0', text = "Name")
selectShowTree.heading('#1', text = "Exhibit")
selectShowTree.heading('#2', text = "Date")
selectShowTree.column('#0', width = 200, anchor = "center")
selectShowTree.column('#1', width = 200, anchor = "center")
selectShowTree.column('#2', width = 200, anchor = "center")
selectShowTree.place(x=20, y=130,width=600)
backButton = Button(showHistoryWindow, text="Back", command=self.showHistoryWindowBackButtonClicked)
backButton.place(x=310,y=370)
def showHistoryWindowFindShowsButtonClicked(self):
self.showHistoryWindow.destroy()
self.createShowsDetailWindow()
def showHistoryWindowBackButtonClicked(self):
self.showHistoryWindow.withdraw()
import visitorFunctionality
a = ATLzooShowHistory()
| 37.92
| 120
| 0.688819
| 2,637
| 0.927215
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.097046
|
a738885fc845ac09ce24d938e1de039911e09569
| 6,061
|
py
|
Python
|
python/federatedml/protobuf/generated/sample_weight_model_param_pb2.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 715
|
2019-01-24T10:52:03.000Z
|
2019-10-31T12:19:22.000Z
|
python/federatedml/protobuf/generated/sample_weight_model_param_pb2.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 270
|
2019-02-11T02:57:36.000Z
|
2019-08-29T11:22:33.000Z
|
python/federatedml/protobuf/generated/sample_weight_model_param_pb2.py
|
rubenlozanoaht3m/DataDogm
|
cd605e8072cca31e8418830c3300657ae2fa5b16
|
[
"Apache-2.0"
] | 200
|
2019-01-26T14:21:35.000Z
|
2019-11-01T01:14:36.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sample-weight-model-param.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='sample-weight-model-param.proto', package='com.webank.ai.fate.core.mlmodel.buffer', syntax='proto3', serialized_options=_b('B\033SampleWeightModelParamProto'), serialized_pb=_b(
'\n\x1fsample-weight-model-param.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\xd8\x01\n\x16SampleWeightModelParam\x12\x0e\n\x06header\x18\x01 \x03(\t\x12\x13\n\x0bweight_mode\x18\x02 \x01(\t\x12\x65\n\x0c\x63lass_weight\x18\x03 \x03(\x0b\x32O.com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry\x1a\x32\n\x10\x43lassWeightEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x42\x1d\x42\x1bSampleWeightModelParamProtob\x06proto3'))
_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY = _descriptor.Descriptor(
name='ClassWeightEntry',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry.key',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry.value',
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=242,
serialized_end=292,
)
_SAMPLEWEIGHTMODELPARAM = _descriptor.Descriptor(
name='SampleWeightModelParam',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.header', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_mode', full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.weight_mode', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weight', full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.class_weight', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=292,
)
_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY.containing_type = _SAMPLEWEIGHTMODELPARAM
_SAMPLEWEIGHTMODELPARAM.fields_by_name['class_weight'].message_type = _SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY
DESCRIPTOR.message_types_by_name['SampleWeightModelParam'] = _SAMPLEWEIGHTMODELPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SampleWeightModelParam = _reflection.GeneratedProtocolMessageType('SampleWeightModelParam', (_message.Message,), {
'ClassWeightEntry': _reflection.GeneratedProtocolMessageType('ClassWeightEntry', (_message.Message,), {
'DESCRIPTOR': _SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY,
'__module__': 'sample_weight_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry)
}),
'DESCRIPTOR': _SAMPLEWEIGHTMODELPARAM,
'__module__': 'sample_weight_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam)
})
_sym_db.RegisterMessage(SampleWeightModelParam)
_sym_db.RegisterMessage(SampleWeightModelParam.ClassWeightEntry)
DESCRIPTOR._options = None
_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 42.985816
| 502
| 0.707144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,907
| 0.314635
|
a739bd10614848db1a73028a77c6c885008e1463
| 63,679
|
py
|
Python
|
postprocessing/pyplotgen/config/Case_definitions.py
|
larson-group/clubb_release
|
b4d671e3e238dbe00752c0dead6a0d4f9897350a
|
[
"Intel",
"Unlicense",
"NetCDF"
] | null | null | null |
postprocessing/pyplotgen/config/Case_definitions.py
|
larson-group/clubb_release
|
b4d671e3e238dbe00752c0dead6a0d4f9897350a
|
[
"Intel",
"Unlicense",
"NetCDF"
] | null | null | null |
postprocessing/pyplotgen/config/Case_definitions.py
|
larson-group/clubb_release
|
b4d671e3e238dbe00752c0dead6a0d4f9897350a
|
[
"Intel",
"Unlicense",
"NetCDF"
] | 1
|
2022-01-28T22:22:04.000Z
|
2022-01-28T22:22:04.000Z
|
"""
:author: Nicolas Strike
:date: Early 2019
This file is mostly a definition of Cases. Each case is defined in the following format
using python dictionaries (values surrounded with < > must have the < > removed to be valid).
.. code-block:: python
:linenos:
CASENAME = {'name': 'casename',
'description': "",
'start_time': <numeric value>, 'end_time': <numeric value>,
'height_min_value': <numeric value>, 'height_max_value': <numeric value>,
'blacklisted_vars': ['list', 'of', 'variable', 'names', 'to', 'exclude', 'from', 'plotting'],
'sam_benchmark_file': <path to sam file>",
'clubb_file': {'zm': <path to file>,
'zt': <path to file>,
'sfc': <path to file>},
'coamps_benchmark_file': {'sm': <path to file>,
'sw': <path to file>},
'clubb_r408_benchmark_file': {'zm': <path to file>,
'zt': <path to file>,
'sfc': <path to file>},
'clubb_hoc_benchmark_file': {'zm': <path to file>',
'zt': <path to file>',
'sfc': <path to file>},
'e3sm_file': <path to file>,
'cam_file': <path to file>,
'sam_file': <path to file>,
'wrf_file': {'zm': <path to file>,
'zt': <path to file>,
'sfc': <path to file>},
'var_groups': [VariableGroupBase, <other variable groups to plot>]}
**Important note**:
When creating a new case, add it to the CASES_TO_PLOT list at the bottom of the file. Additionally, please add it in
alphabetical order.
**Case Definition values explained**:
*name*: must be the same as the filename without the extention.
E.g. to use lba_zt.nc and lba_zm.nc the case's name must be 'lba'. Extensions are determined
by the last instance of _
*start_time*: An integer value representing which timestep to begin the time-averaging interval.
Valid options are from 1 -> list minute value. Give in terms of clubb minutes.
*end_time*: An integer value representing which timestep to end the time-averaging interval.
Valid options are from 1 -> list minute value. Give in terms of clubb minutes.
Also used to determine where to stop timeseries plots
*height_min_value*: The elevation to begin height plots at
*height_max_value*: The elevation to end height plots at
*blacklisted_vars*: List of variables to avoid plotting for this case. Names must use the clubb-name version
*<model name>_file*: The path(s) to nc files for the given model.
(please use the <model name>_OUTPUT_ROOT variables as the beginning of the path).
*var_groups*: These are the groups of variables to be plotted for the given case. var_groups is defined as a
list of python class names, where the classes use the naming scheme VariableGroup____.py and define a variable
group. An example would be: 'var_groups': [VariableGroupBase, VariableGroupWs].
The variables inside a VariableGroup can be found in the file with the same name,
i.e. config/VariableGroupBase.py. An example would be thlm in VariableGroupBase.
"""
import os
from config.VariableGroupBase import VariableGroupBase
from config.VariableGroupCorrelations import VariableGroupCorrelations
from config.VariableGroupIceMP import VariableGroupIceMP
from config.VariableGroupKKMP import VariableGroupKKMP
from config.VariableGroupLiquidMP import VariableGroupLiquidMP
from config.VariableGroupSamProfiles import VariableGroupSamProfiles
from config.VariableGroupScalars import VariableGroupScalars
from config.VariableGroupWs import VariableGroupWs
from config.VariableGroupTaus import VariableGroupTaus
from config.VariableGroupNondimMoments import VariableGroupNondimMoments
from config.VariableGroupNormalizedVariations import VariableGroupNormalizedVariations
# ---------------------------
BENCHMARK_OUTPUT_ROOT = "/home/pub/les_and_clubb_benchmark_runs/"
if not os.path.isdir(BENCHMARK_OUTPUT_ROOT) and \
not os.path.islink(BENCHMARK_OUTPUT_ROOT):
print("Benchmark output was not found in " + BENCHMARK_OUTPUT_ROOT + ".\n\tChecking local location: " +
os.path.dirname(os.path.realpath(__file__)) + "/../les_and_clubb_benchmark_runs/")
BENCHMARK_OUTPUT_ROOT = os.path.dirname(os.path.realpath(__file__)) + "/../les_and_clubb_benchmark_runs/"
SAM_BENCHMARK_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "sam_benchmark_runs"
COAMPS_BENCHMARK_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "les_runs"
WRF_LASSO_BENCHMARK_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "wrf_lasso_runs"
ARCHIVED_CLUBB_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "archived_clubb_runs"
R408_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + ""
HOC_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "HOC_20051217"
# This folder is passed in as a command line parameter
# It is not capitalized because it is not intended to
# be final, i.e. is changed depending on the cmd line arg
e3sm_output_root = ""
sam_output_root = ""
wrf_output_root = ""
cam_output_root = ""
clubb_output_root = ""
# ---------------------------
# These are all the names that represent the height variable within different models
HEIGHT_VAR_NAMES = ['z', 'Z3', 'altitude', 'lev', 'CSP_Zm', 'CSP_Z8Wm'] # CSP_* added for WRF-LASSO cases
TIME_VAR_NAMES = ['time', 'XTIME']
"""
To plot only a subset of cases, reguardless of what output exists
in the clubb folder, uncomment the last line of this file and
fill that array with the cases you'd like to plot. This overwrites the
CASES_TO_PLOT variable such that pyplotgen will only know about cases in that
list and ignore all others. The name must match the python variable name
below (all caps).
For example, to plot only bomex and fire:
CASES_TO_PLOT = [BOMEX, FIRE]
"""
ARM = {'name': 'arm',
'description': "Output may differ from plotgen in some models (e.g. WRF) due to a difference in the time "
"averaging interval.",
'start_time': 481, 'end_time': 540,
'height_min_value': 0, 'height_max_value': 3500,
'blacklisted_vars': ['radht'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT + "/JULY_2017/ARM_96x96x110/GCSSARM_96x96x110_67m_40m_1s.nc"},
'clubb_file': {'zm': clubb_output_root + '/arm_zm.nc',
'zt': clubb_output_root + '/arm_zt.nc',
'sfc': clubb_output_root + '/arm_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/arm_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/arm_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/arm_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/arm_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/arm_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/arm_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/arm_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/arm_sfc.nc'},
'e3sm_file': { 'e3sm': e3sm_output_root + "/arm.nc"},
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/GCSSARM_96x96x110_67m_40m_1s.nc"},
'wrf_file': {'zm': wrf_output_root + "/arm_zm_wrf.nc",
'zt': wrf_output_root + "/arm_zt_wrf.nc",
'sfc': wrf_output_root + "/arm_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs]}
ARM_97 = {'name': 'arm_97',
'description': "",
'start_time': 4321, 'end_time': 5580,
'height_min_value': 0, 'height_max_value': 18000,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'thlp3', 'rtpthvp', 'thlpthvp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/ARM97_r1315_128x128x128_1km_Morrison/ARM9707.nc"},
'clubb_file': {'zm': clubb_output_root + '/arm_97_zm.nc',
'zt': clubb_output_root + '/arm_97_zt.nc',
'sfc': clubb_output_root + '/arm_97_sfc.nc',
'subcolumns': clubb_output_root + '/arm_97_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/ARM9707_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupIceMP]}
ASTEX_A209 = {'name': 'astex_a209',
'description': "",
'start_time': 2340, 'end_time': 2400,
'height_min_value': 0, 'height_max_value': 6000,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/astex_a209_zm.nc',
'zt': clubb_output_root + '/astex_a209_zt.nc',
'sfc': clubb_output_root + '/astex_a209_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
ATEX = {'name': 'atex',
'description': "",
'start_time': 421, 'end_time': 480,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/atex_zm.nc',
'zt': clubb_output_root + '/atex_zt.nc',
'sfc': clubb_output_root + '/atex_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/atex_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/atex_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/atex_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/atex_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/atex_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/atex_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/atex_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/atex_sfc.nc'},
'e3sm_file': None,
'cam_file': {'cam': cam_output_root + "/atex_cam.nc"},
'sam_file': None,
'wrf_file': {'zm': wrf_output_root + "/atex_zm_wrf.nc",
'zt': wrf_output_root + "/atex_zt_wrf.nc",
'sfc': wrf_output_root + "/atex_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupIceMP]}
BOMEX = {'name': 'bomex',
'description': "",
'start_time': 181, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/BOMEX_64x64x75/BOMEX_64x64x75_100m_40m_1s.nc"},
'clubb_file': {'zm': clubb_output_root + '/bomex_zm.nc',
'zt': clubb_output_root + '/bomex_zt.nc',
'sfc': clubb_output_root + '/bomex_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/bomex_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/bomex_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/bomex_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/bomex_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/bomex_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/bomex_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/bomex_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/bomex_sfc.nc'},
'e3sm_file': { 'e3sm': e3sm_output_root + '/bomex.nc'},
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/BOMEX_SAM_CLUBB.nc"},
'wrf_file': {'zm': wrf_output_root + '/bomex_zm_wrf.nc',
'zt': wrf_output_root + '/bomex_zt_wrf.nc',
'sfc': wrf_output_root + '/bomex_sfc_wrf.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
CGILS_S6 = {'name': 'cgils_s6',
'description': "",
'start_time': 12960, 'end_time': 14400,
'height_min_value': 0, 'height_max_value': 5950,
'blacklisted_vars': ['Ngm', 'rgm', 'Skrt_zt', 'Skthl_zt', 'thlp3',
'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/CLOUD_FEEDBACK_s6/ctl_s6_96x96x128_100m_DRZ_N100_tqndg.nc"},
'clubb_file': {'zm': clubb_output_root + '/cgils_s6_zm.nc',
'zt': clubb_output_root + '/cgils_s6_zt.nc',
'sfc': clubb_output_root + '/cgils_s6_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CGILS_S11 = {'name': 'cgils_s11',
'description': "",
'start_time': 12960, 'end_time': 14400,
'height_min_value': 0, 'height_max_value': 5950,
'blacklisted_vars': ['Ngm', 'rgm', 'Skthl_zt', 'Skrt_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/CLOUD_FEEDBACK_s11/ctl_s11_96x96x320_50m_DRZ_N100_ref.nc"},
'clubb_file': {'zm': clubb_output_root + '/cgils_s11_zm.nc',
'zt': clubb_output_root + '/cgils_s11_zt.nc',
'sfc': clubb_output_root + '/cgils_s11_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CGILS_S12 = {'name': 'cgils_s12',
'description': "",
'start_time': 12960, 'end_time': 14400,
'height_min_value': 0, 'height_max_value': 5950,
'blacklisted_vars': ['Ngm', 'rgm', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/CLOUD_FEEDBACK_s12/ctl_s12_96x96x192_25m_DRZ_N100_fixnudge.nc"},
'clubb_file': {'zm': clubb_output_root + '/cgils_s12_zm.nc',
'zt': clubb_output_root + '/cgils_s12_zt.nc',
'sfc': clubb_output_root + '/cgils_s12_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CLEX9_NOV02 = {'name': 'clex9_nov02',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 4000, 'height_max_value': 6072,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/clex9_nov02_zm.nc',
'zt': clubb_output_root + '/clex9_nov02_zt.nc',
'sfc': clubb_output_root + '/clex9_nov02_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_nov02_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_nov02_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CLEX9_OCT14 = {'name': 'clex9_oct14',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 2230, 'height_max_value': 6688,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/clex9_oct14_zm.nc',
'zt': clubb_output_root + '/clex9_oct14_zt.nc',
'sfc': clubb_output_root + '/clex9_oct14_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_oct14_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_oct14_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
DYCOMS2_RF01 = {'name': 'dycoms2_rf01',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF01_96x96x320/DYCOMS_RF01_96x96x320.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf01_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf01_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf01_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf01_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf01_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf01_sfc.nc'},
'e3sm_file': { 'e3sm': e3sm_output_root + "/dycoms2_rf01.nc"},
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs]}
DYCOMS2_RF01_FIXED_SST = {'name': 'dycoms2_rf01_fixed_sst',
'description': "Copied from plotgen: Ran with a 5 min timestep and a 48-level grid",
'start_time': 2520, 'end_time': 2700,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/DYCOMS_RF01_fixed_sst/DYCOMS_RF01_96x96x320_LES_fixed_sst.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf01_fixed_sst_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf01_fixed_sst_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf01_fixed_sst_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
DYCOMS2_RF02_DO = {'name': 'dycoms2_rf02_do',
'description': "",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_nosed/DYCOMS_RF02_128x128x96_dr_nosed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_do_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_do_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_do_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/DYCOMS_RF02_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
DYCOMS2_RF02_DS = {'name': 'dycoms2_rf02_ds',
'description': "",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_sed/DYCOMS_RF02_128x128x96_dr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_ds_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_ds_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_ds_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_sfc.nc'},
'e3sm_file': {'e3sm': e3sm_output_root + "/dycoms2_rf02_ds.nc"},
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
DYCOMS2_RF02_ND = {'name': 'dycoms2_rf02_nd',
'description': "Copied from plotgen: ** Generated by doing a restart run after 7200 seconds. Note: "
"t = 0 corresponds to start time of the restart run, not the original run. ** ",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['wprrp', 'wpNrp', 'corr_w_rr_1', 'corr_w_Nr_1'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_nodr_nosed/DYCOMS_RF02_128x128x96_nodr_nosed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_nd_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_nd_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_nd_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupKKMP]}
DYCOMS2_RF02_DS_RESTART = {'name': 'dycoms2_rf02_ds_restart',
'description': "Copied from plotgen: ** Uniform, coarse verticle grid spacing of 40 m. **",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_sed/DYCOMS_RF02_128x128x96_dr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_ds_restart_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_ds_restart_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_ds_restart_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP,
VariableGroupCorrelations, VariableGroupKKMP]}
DYCOMS2_RF02_SO = {'name': 'dycoms2_rf02_so',
'description': "Copied from plotgen: " +
"** WRF-type stretched (unevenly spaced) grid (grid_type = 3) ** ",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_nodr_sed/DYCOMS_RF02_128x128x96_nodr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_so_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_so_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_so_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/DYCOMS_RF02_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupKKMP]}
FIRE = {'name': 'fire',
'description': "",
'start_time': 61, 'end_time': 120,
'height_min_value': 0, 'height_max_value': 1000,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/fire_zm.nc',
'zt': clubb_output_root + '/fire_zt.nc',
'sfc': clubb_output_root + '/fire_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/fire_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/fire_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + "/fire_zm.nc",
'zt': HOC_OUTPUT_ROOT + '/fire_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/fire_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': {'zm': wrf_output_root + "/fire_zm_wrf.nc",
'zt': wrf_output_root + "/fire_zt_wrf.nc",
'sfc': wrf_output_root + "/fire_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs]}
# No budgets
GABLS2 = {'name': 'gabls2',
'description': "",
'start_time': 2101, 'end_time': 2160,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': ['tau_zm', 'radht', 'Skw_zt', 'Skrt_zt', 'Skthl_zt', 'corr_w_chi_1', 'corr_chi_eta_1',
'rcp2', 'thlpthvp', 'rtpthvp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls2_zm.nc',
'zt': clubb_output_root + '/gabls2_zt.nc',
'sfc': clubb_output_root + '/gabls2_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sw.nc",
'sfc': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sfc.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GABLS2_NIGHTLY = {'name': 'gabls2_nightly',
'description': "",
'start_time': 2101, 'end_time': 2160,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls2_zm.nc',
'zt': clubb_output_root + '/gabls2_zt.nc',
'sfc': clubb_output_root + '/gabls2_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupScalars]}
GABLS3 = {'name': 'gabls3',
'description': "",
'start_time': 1081, 'end_time': 1200,
'height_min_value': 0, 'height_max_value': 4970,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls3_zm.nc',
'zt': clubb_output_root + '/gabls3_zt.nc',
'sfc': clubb_output_root + '/gabls3_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GABLS3_NIGHT = {'name': 'gabls3_night',
'description': "Copied from plotgen: Uses a 5-min timestep with 48 levels",
'start_time': 421, 'end_time': 480,
'height_min_value': 0, 'height_max_value': 800,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/GABLS3_NIGHT/gabls3_night.nc"},
'clubb_file': {'zm': clubb_output_root + '/gabls3_night_zm.nc',
'zt': clubb_output_root + '/gabls3_night_zt.nc',
'sfc': clubb_output_root + '/gabls3_night_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GATE_SHEAR_RLSF = {'name': 'gate_shear_rlsf',
'description': "",
'start_time': 540, 'end_time': 720,
'height_min_value': 0, 'height_max_value': 24000,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/GATE_shear_rlsf/GATE_shear_rlsf_64x64x128_1km_5s.nc"},
'clubb_file': None,
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/GATE_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase]}
# Use to plot IOP forced SAM runs
IOP = {'name': 'iop',
'description': "",
'start_time': 181, 'end_time': 1440,
'height_min_value': 0, 'height_max_value': 27750,
'blacklisted_vars': [],
'clubb_datasets': None,
'sam_benchmark_file': None,
'clubb_file': None,
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'var_groups': [VariableGroupBase, VariableGroupSamProfiles]}
JUN25_ALTOCU = {'name': 'jun25_altocu',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 4825, 'height_max_value': 7290,
'blacklisted_vars': ['Ngm', 'wprrp', 'wpNrp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/jun25_altocu_zm.nc',
'zt': clubb_output_root + '/jun25_altocu_zt.nc',
'sfc': clubb_output_root + '/jun25_altocu_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/jun25_altocu_qc3_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/jun25_altocu_qc3_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
LBA = {'name': 'lba',
'description': "Note that sam-plotgen plots up to a height of 16000 not 12000.\n"
"Copied from plotgen: SAM-LES uses Morrison microphysics " +
"and CLUBB standalone uses COAMPS microphysics",
'start_time': 300, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 14000,
'blacklisted_vars': ['wprrp', 'wpNrp', 'Ngm'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/LBA_128kmx128kmx128_1km_Morrison/LBA_128kmx128kmx128_1km_Morrison.nc"},
'clubb_file': {'zm': clubb_output_root + '/lba_zm.nc',
'zt': clubb_output_root + '/lba_zt.nc',
'sfc': clubb_output_root + '/lba_sfc.nc',
'subcolumns': clubb_output_root + '/lba_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/LBA_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP, VariableGroupWs]}
MC3E = {'name': 'mc3e',
'description': "",
'start_time': 60, 'end_time': 64800,
'height_min_value': 0, 'height_max_value': 18000,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/MC3E_r1359_128x128x128_1km_Morrison/MC3E.nc"},
'clubb_file': {'zm': clubb_output_root + '/mc3e_zm.nc',
'zt': clubb_output_root + '/mc3e_zt.nc',
'sfc': clubb_output_root + '/mc3e_sfc.nc',
'subcolumns': clubb_output_root + '/mc3e_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
MPACE_A = {'name': 'mpace_a',
'description': "Copied from plotgen: SAM-LES and CLUBB standalone use Morrison microphysics",
'start_time': 4141, 'end_time': 4320,
'height_min_value': 0, 'height_max_value': 10000,
'blacklisted_vars': ['Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'Ngm', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/MPACE_A/MPACE_A_128x128x69_morr_CEM.nc"},
'clubb_file': {'zm': clubb_output_root + '/mpace_a_zm.nc',
'zt': clubb_output_root + '/mpace_a_zt.nc',
'sfc': clubb_output_root + '/mpace_a_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
MPACE_B = {'name': 'mpace_b',
'description': "Copied from plotgen: **The nightly simulation uses COAMPS microphysics**",
'start_time': 541, 'end_time': 720,
'height_min_value': 0, 'height_max_value': 2750,
'blacklisted_vars': ['Ngm', 'wpNrp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/mpace_b_zm.nc',
'zt': clubb_output_root + '/mpace_b_zt.nc',
'sfc': clubb_output_root + '/mpace_b_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sw.nc",
'sfc': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sfc.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
MPACE_B_SILHS = {'name': 'mpace_b_silhs',
'description': "",
'start_time': 541, 'end_time': 720,
'height_min_value': 0, 'height_max_value': 2750,
'blacklisted_vars': ['Ngm', 'wpNrp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/mpace_b_silhs_zm.nc',
'zt': clubb_output_root + '/mpace_b_silhs_zt.nc',
'sfc': clubb_output_root + '/mpace_b_silhs_sfc.nc',
'subcolumns': clubb_output_root + '/mpace_b_silhs_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
NOV11_ALTOCU = {'name': 'nov11_altocu',
'description': "",
'start_time': 91, 'end_time': 150,
'height_min_value': 4160, 'height_max_value': 6150,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/nov11_altocu_zm.nc',
'zt': clubb_output_root + '/nov11_altocu_zt.nc',
'sfc': clubb_output_root + '/nov11_altocu_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/nov11_altocu_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/nov11_altocu_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/nov11_altocu_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/nov11_altocu_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/nov11_altocu_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/nov11_altocu_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/nov11_altocu_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/nov11_altocu_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
RICO = {'name': 'rico',
'description': "Cam output may differ from plotgen due to a difference in time averaging.",
'start_time': 4201, 'end_time': 4320,
'height_min_value': 0, 'height_max_value': 5000,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/RICO_256x256x100_drizzle/RICO_256x256x100_drizzle.nc"},
'clubb_file': {'zm': clubb_output_root + '/rico_zm.nc',
'zt': clubb_output_root + '/rico_zt.nc',
'sfc': clubb_output_root + '/rico_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': {'e3sm': e3sm_output_root + "/rico.nc"},
'cam_file': {'cam': cam_output_root + "/rico_cam.nc"},
'sam_file': {'sam': sam_output_root + "/RICO_256x256x100_drizzle.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupWs, VariableGroupCorrelations,
VariableGroupKKMP]}
RICO_SILHS = {'name': 'rico_silhs',
'description': "Copied from plotgen: CLUBB and SAM use Khairoutdinov-Kogan microphysics",
'start_time': 4201, 'end_time': 4320,
'height_min_value': 0, 'height_max_value': 4500,
'blacklisted_vars': ['wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/RICO_256x256x100_drizzle/RICO_256x256x100_drizzle.nc"},
'clubb_file': {'zm': clubb_output_root + '/rico_silhs_zm.nc',
'zt': clubb_output_root + '/rico_silhs_zt.nc',
'sfc': clubb_output_root + '/rico_silhs_sfc.nc',
'subcolumns': clubb_output_root + '/rico_silhs_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupWs, VariableGroupCorrelations,
VariableGroupKKMP]}
NEUTRAL = {'name': 'neutral',
'description': "",
'start_time': 181, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1500,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/NEUTRAL/NEUTRAL_96x96x96_32m_10m_LES.nc"},
'clubb_file': {'zm': clubb_output_root + '/neutral_zm.nc',
'zt': clubb_output_root + '/neutral_zt.nc',
'sfc': clubb_output_root + '/neutral_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs]}
TWP_ICE = {'name': 'twp_ice',
'description': "Copied from plotgen: Both vertical and horizontal fluxes applied to THLM and RTM for LES. "
"LES nudged U, V, RTM and THLM toward observed values. Forcings for LES derived from 10mb "
"forcing data.",
'start_time': 60, 'end_time': 9900,
'height_min_value': 0, 'height_max_value': 19000,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/TWP_ICE_r1315_128x128x128_1km_Morrison/TWP_ICE.nc"},
'clubb_file': {'zm': clubb_output_root + '/twp_ice_zm.nc',
'zt': clubb_output_root + '/twp_ice_zt.nc',
'sfc': clubb_output_root + '/twp_ice_sfc.nc',
'subcolumns': clubb_output_root + '/twp_ice_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupIceMP]}
WANGARA = {'name': 'wangara',
'description': "Note that COAMPS benchmark data is actually RAMS data by default.",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1900,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/WANGARA/WANGARA_64x64x80_100m_40m_LES.nc"},
'clubb_file': {'zm': clubb_output_root + '/wangara_zm.nc',
'zt': clubb_output_root + '/wangara_zt.nc',
'sfc': clubb_output_root + '/wangara_sfc.nc'},
'coamps_benchmark_file': {'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/wangara_rams.nc",
'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/wangara_rams.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/wangara_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/wangara_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/wangara_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/wangara_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/wangara_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/wangara_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': {'zm': wrf_output_root + "/wangara_zm_wrf.nc",
'zt': wrf_output_root + "/wangara_zt_wrf.nc",
'sfc': wrf_output_root + "/wangara_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170627 = {'name': 'lasso_20170627',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-06-27/wrf_lasso_stats_2017-06-27.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-06-27_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-06-27_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-06-27_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-06-27_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170717 = {'name': 'lasso_20170717',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-07-17/wrf_lasso_stats_2017-07-17.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-07-17_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-07-17_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-07-17_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-07-17_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170728 = {'name': 'lasso_20170728',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-07-28/wrf_lasso_stats_2017-07-28.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-07-28_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-07-28_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-07-28_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-07-28_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170923 = {'name': 'lasso_20170923',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-09-23/wrf_lasso_stats_2017-09-23.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-09-23_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-09-23_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-09-23_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-09-23_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20180911 = {'name': 'lasso_20180911',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-09-11/wrf_lasso_stats_2018-09-11.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-09-11_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-09-11_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-09-11_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-09-11_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20180917 = {'name': 'lasso_20180917',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-09-17/wrf_lasso_stats_2018-09-17.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-09-17_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-09-17_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-09-17_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-09-17_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20180918 = {'name': 'lasso_20180918',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-09-18/wrf_lasso_stats_2018-09-18.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-09-18_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-09-18_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-09-18_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-09-18_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20181002 = {'name': 'lasso_20181002',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-10-02/wrf_lasso_stats_2018-10-02.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-10-02_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-10-02_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-10-02_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-10-02_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
# DO NOT EDIT THIS LIST UNLESS YOU ARE ADDING A NEW CASE. NEVER REMOVE CASES FROM THIS LIST.
# You may define a subset of cases at the end of this file.
ALL_CASES = [ARM, ARM_97, ASTEX_A209, ATEX,
BOMEX,
CGILS_S6, CGILS_S11, CGILS_S12, CLEX9_NOV02, CLEX9_OCT14,
DYCOMS2_RF01, DYCOMS2_RF01_FIXED_SST, DYCOMS2_RF02_DO,
DYCOMS2_RF02_DS, DYCOMS2_RF02_DS_RESTART,
DYCOMS2_RF02_ND, DYCOMS2_RF02_SO,
FIRE,
GABLS2, GABLS2_NIGHTLY, GABLS3, GABLS3_NIGHT, GATE_SHEAR_RLSF,
# IOP,
JUN25_ALTOCU,
LBA,
MC3E, MPACE_A, MPACE_B, MPACE_B_SILHS,
NEUTRAL, NOV11_ALTOCU,
RICO, RICO_SILHS,
TWP_ICE,
WANGARA,
LASSO_20170627, LASSO_20170717, LASSO_20170728, LASSO_20170923,
LASSO_20180911, LASSO_20180917, LASSO_20180918, LASSO_20181002
]
CASES_TO_PLOT = ALL_CASES
# If uncommented, this line will override the real CASES_TO_PLOT given above, forcing pyplotgen to only plot some cases.
# CASES_TO_PLOT = [ARM]
# CASES_TO_PLOT = CASES_TO_PLOT[:3]
| 55.181109
| 135
| 0.56254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29,881
| 0.469244
|
a739e22b895dd7f5b68d4cbbe585f6f9e1e16131
| 305
|
py
|
Python
|
docker_sdk_api/domain/services/contracts/abstract_dataset_validator_service.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | 20
|
2021-07-13T13:08:57.000Z
|
2022-03-29T09:38:00.000Z
|
docker_sdk_api/domain/services/contracts/abstract_dataset_validator_service.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | null | null | null |
docker_sdk_api/domain/services/contracts/abstract_dataset_validator_service.py
|
BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI
|
902f35a7e367e635898f687b16a830db892fbaa5
|
[
"Apache-2.0"
] | 2
|
2021-07-12T08:42:53.000Z
|
2022-03-04T18:41:25.000Z
|
from abc import ABC, ABCMeta, abstractmethod
from domain.models.datase_information import DatasetInformation
class AbstractDatasetValidatorService(ABC):
__metaclass__ = ABCMeta
@abstractmethod
def validate_dataset(self, dataset_info: DatasetInformation) -> None: raise NotImplementedError
| 27.727273
| 99
| 0.816393
| 192
| 0.629508
| 0
| 0
| 115
| 0.377049
| 0
| 0
| 0
| 0
|
a739f43b0588186a90f5d8f8245209820d58a6a6
| 1,683
|
py
|
Python
|
setup.py
|
eltonn/toki
|
22efd9ce84414380904e3a5ac84e84de9bdb5bce
|
[
"Apache-2.0"
] | 1
|
2020-11-30T16:52:50.000Z
|
2020-11-30T16:52:50.000Z
|
setup.py
|
eltonn/toki
|
22efd9ce84414380904e3a5ac84e84de9bdb5bce
|
[
"Apache-2.0"
] | 7
|
2020-05-29T23:22:21.000Z
|
2020-11-30T20:49:37.000Z
|
setup.py
|
eltonn/toki
|
22efd9ce84414380904e3a5ac84e84de9bdb5bce
|
[
"Apache-2.0"
] | 1
|
2020-04-29T21:59:25.000Z
|
2020-04-29T21:59:25.000Z
|
"""The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('docs/release-notes.md') as history_file:
history = history_file.read()
requirements = []
dev_requirements = [
# lint and tools
'black',
'flake8',
'isort',
'mypy',
'pre-commit',
'seed-isort-config',
# publishing
're-ver',
'twine',
# docs
'jupyter-book',
'Sphinx>=2.0,<3',
# tests
'responses',
# devops
'docker-compose',
]
extra_requires = {'dev': requirements + dev_requirements}
setup(
author="Ivan Ogasawara",
author_email='ivan.ogasawara@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Toki: Database Expression API",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='toki',
name='toki',
packages=find_packages(include=['toki']),
test_suite='tests',
extras_require=extra_requires,
url='https://github.com/toki-project/toki',
version='0.0.1',
zip_safe=False,
)
| 26.296875
| 61
| 0.616756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 840
| 0.499109
|
a73aed88b329c068d8782d3c38cdfcf8ff4be7a3
| 3,109
|
py
|
Python
|
dq0/sdk/estimators/data_handler/csv.py
|
gradientzero/dq0-sdk
|
90856dd5ac56216971ffe33004447fd037a21660
|
[
"0BSD"
] | 2
|
2020-09-16T09:28:00.000Z
|
2021-03-18T21:26:29.000Z
|
dq0/sdk/estimators/data_handler/csv.py
|
gradientzero/dq0-sdk
|
90856dd5ac56216971ffe33004447fd037a21660
|
[
"0BSD"
] | 22
|
2020-04-15T10:19:33.000Z
|
2022-03-12T00:20:57.000Z
|
dq0/sdk/estimators/data_handler/csv.py
|
gradientzero/dq0-sdk
|
90856dd5ac56216971ffe33004447fd037a21660
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Base data handler.
Copyright 2021, Gradient Zero
All rights reserved
"""
import logging
import dq0.sdk
from dq0.sdk.estimators.data_handler.base import BasicDataHandler
import pandas as pd
from sklearn.model_selection import train_test_split
logger = logging.getLogger(__name__)
class CSVDataHandler(BasicDataHandler):
"""Basic CSV Data Handler for all estimators"""
def __init__(self, pipeline_steps=None, pipeline_config_path=None, transformers_root_dir='.', log_key_string='.'):
super().__init__(pipeline_steps=pipeline_steps, pipeline_config_path=pipeline_config_path, transformers_root_dir=transformers_root_dir,
log_key_string=log_key_string)
self.log_key_string = log_key_string
def setup_data(self, data_source, train_size=0.66, **kwargs):
""" Setup data from CSV file. Using the CSV data source.
"""
# Check if the data source is of expected type
if not isinstance(data_source, dq0.sdk.data.text.csv.CSV):
raise ValueError("data_source attached to estimator and handled by the CSV data handler is not of Type: dq0.sdk.data.text.csv.CSV but: {}".format(type(data_source))) # noqa
if not hasattr(data_source, 'feature_cols') and not hasattr(data_source, 'target_cols'):
raise ValueError("CSV data source has not attribute feature_cols or target_cols. Please set this values on init or in the metadata")
self.data = super().setup_data(data_source=data_source, **kwargs)
# Check type of data, must be pandas.DataFrame
if not isinstance(self.data, pd.DataFrame):
raise ValueError("Data loaded is not of type pandas.DataFrame, but: {}".format(type(self.data)))
# run pipeline
if self.pipeline is not None:
self.data = self.pipeline.fit_transform(self.data)
X = self._get_X(self.data, data_source.feature_cols)
y = self._get_y(self.data, data_source.target_cols)
X_train, X_test, y_train, y_test = self._train_test_split(X, y, train_size=train_size)
return X_train, X_test, y_train, y_test
def get_input_dim(self, X):
if not len(X.shape) == 2:
raise ValueError("Feature Vector X is not 2-dim. The CSVDataHandler can only handle 2-dim DFs")
return X.shape[-1]
def get_output_dim(self, y):
return len(y.unique())
def _get_X(self, data, feature_cols):
"""Get X features vectors assuming data is a Pandas DataFrame"""
return data[feature_cols]
def _get_y(self, data, target_cols):
"""Get y target vector assuming data is a Pandas DataFrame"""
if len(target_cols) == 1:
return data[target_cols[-1]]
else:
raise ValueError("CSVDataHandler currently only supports one target_col (Check Metadata!); len(target_cols): {}".format(len(target_cols)))
def _train_test_split(self, X, y, train_size=0.66):
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=train_size)
return X_train, X_test, y_train, y_test
| 42.013514
| 184
| 0.690254
| 2,794
| 0.898681
| 0
| 0
| 0
| 0
| 0
| 0
| 945
| 0.303956
|
59519f91376cf89bef0bf6bdc3580d8bfb999e90
| 808
|
py
|
Python
|
python/163_missing_ranges.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | 17
|
2016-03-01T22:40:53.000Z
|
2021-04-19T02:15:03.000Z
|
python/163_missing_ranges.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | null | null | null |
python/163_missing_ranges.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | 3
|
2019-03-07T03:48:43.000Z
|
2020-04-05T01:11:36.000Z
|
class Solution:
def findMissingRanges(self, nums: List[int], lower: int, upper: int) -> List[str]:
range_iter = lower
num_iter = 0
ranges = []
while range_iter < upper and num_iter < len(nums):
if range_iter < nums[num_iter]:
if nums[num_iter] - 1 == range_iter:
ranges.append(str(range_iter))
else:
ranges.append("{}->{}".format(range_iter, nums[num_iter]-1))
range_iter = nums[num_iter] + 1
num_iter += 1
if num_iter >= len(nums) and range_iter == upper:
ranges.append("{}".format(range_iter))
elif range_iter < upper:
ranges.append("{}->{}".format(range_iter, upper))
return ranges
| 36.727273
| 86
| 0.516089
| 808
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.024752
|
595209a149b488a190b55a28e227e0653341e30a
| 407
|
py
|
Python
|
core/utils/template_updater.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 3
|
2021-12-22T07:02:24.000Z
|
2022-01-27T20:19:11.000Z
|
core/utils/template_updater.py
|
vulnman/vulnman
|
d48ee022bc0e4368060a990a527b1c7a5e437504
|
[
"MIT"
] | 44
|
2021-12-14T07:24:29.000Z
|
2022-03-23T07:01:16.000Z
|
core/utils/template_updater.py
|
blockomat2100/vulnman
|
835ff3aae1168d8e2fa5556279bc86efd2e46472
|
[
"MIT"
] | 1
|
2022-01-21T16:29:56.000Z
|
2022-01-21T16:29:56.000Z
|
import os
from django.conf import settings
from git import Repo
def update_vulnerability_templates():
template_dir = os.path.join(
settings.BASE_DIR, "resources/vuln_templates")
if os.path.isdir(template_dir):
repo = Repo(template_dir)
origin = repo.remotes.origin
origin.pull()
else:
Repo.clone_from(settings.VULNERABILITY_TEMPLATE_REPO, template_dir)
| 27.133333
| 75
| 0.712531
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 26
| 0.063882
|
5952b761ae49fba6ea7b48e61c02b4ec86ac2f3e
| 209
|
py
|
Python
|
Isomorphic Strings.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | 1
|
2016-12-22T04:09:25.000Z
|
2016-12-22T04:09:25.000Z
|
Isomorphic Strings.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | null | null | null |
Isomorphic Strings.py
|
HalShaw/Leetcode
|
27c52aac5a8ecc5b5f02e54096a001920661b4bb
|
[
"MIT"
] | null | null | null |
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
return map(s.index,s)==map(t.index,t)#相同格式都可以用
| 20.9
| 54
| 0.473684
| 215
| 0.955556
| 0
| 0
| 0
| 0
| 0
| 0
| 103
| 0.457778
|
5952c5d9520173eb54626c3cf8e791dbdc5d7f03
| 656
|
py
|
Python
|
pages/basket_page.py
|
Espad/stepik_autotests_final_tasks
|
2d9e3408766cc00387a8ddd656006556cce567b4
|
[
"MIT"
] | null | null | null |
pages/basket_page.py
|
Espad/stepik_autotests_final_tasks
|
2d9e3408766cc00387a8ddd656006556cce567b4
|
[
"MIT"
] | null | null | null |
pages/basket_page.py
|
Espad/stepik_autotests_final_tasks
|
2d9e3408766cc00387a8ddd656006556cce567b4
|
[
"MIT"
] | null | null | null |
from .base_page import BasePage
from .locators import BasketPageLocators
class BasketPage(BasePage):
def should_be_empty_basket_message(self):
assert self.is_element_present(*BasketPageLocators.BASKET_EMPTY_MESSAGE), \
"Empty basket message element not found on page"
assert self.browser.find_element(*BasketPageLocators.BASKET_EMPTY_MESSAGE).text == "Your basket is empty. Continue shopping", \
"Invalid Basket empty message"
def should_be_empty_basket(self):
assert self.is_not_element_present(*BasketPageLocators.BASKET_ITEM_EXIST_SELECTOR), \
"Busket is not empty, but should be"
| 41
| 135
| 0.745427
| 580
| 0.884146
| 0
| 0
| 0
| 0
| 0
| 0
| 155
| 0.23628
|
5955db7626231d3711353993b2796474b288c67c
| 169
|
py
|
Python
|
tests/collaboration/factories.py
|
cad106uk/market-access-api
|
a357c33bbec93408b193e598a5628634126e9e99
|
[
"MIT"
] | null | null | null |
tests/collaboration/factories.py
|
cad106uk/market-access-api
|
a357c33bbec93408b193e598a5628634126e9e99
|
[
"MIT"
] | 51
|
2018-05-31T12:16:31.000Z
|
2022-03-08T09:36:48.000Z
|
tests/collaboration/factories.py
|
cad106uk/market-access-api
|
a357c33bbec93408b193e598a5628634126e9e99
|
[
"MIT"
] | 2
|
2019-12-24T09:47:42.000Z
|
2021-02-09T09:36:51.000Z
|
import factory
from api.collaboration.models import TeamMember
class TeamMemberFactory(factory.django.DjangoModelFactory):
class Meta:
model = TeamMember
| 18.777778
| 59
| 0.781065
| 102
| 0.60355
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
595945cb1c25f789695dd2fae8ba200ee3b77c80
| 1,454
|
py
|
Python
|
trypython/extlib/aiohttp/aiohttp01.py
|
devlights/try-python-extlib
|
9bfb649d3f5b249b67991a30865201be794e29a9
|
[
"MIT"
] | null | null | null |
trypython/extlib/aiohttp/aiohttp01.py
|
devlights/try-python-extlib
|
9bfb649d3f5b249b67991a30865201be794e29a9
|
[
"MIT"
] | null | null | null |
trypython/extlib/aiohttp/aiohttp01.py
|
devlights/try-python-extlib
|
9bfb649d3f5b249b67991a30865201be794e29a9
|
[
"MIT"
] | null | null | null |
"""
aiohttp モジュールのサンプルです
基本的な使い方について
REFERENCES:: http://bit.ly/2O2lmeU
http://bit.ly/2O08oy3
"""
import asyncio
from asyncio import Future
from typing import List, Dict
import aiohttp
from trypython.common.commoncls import SampleBase
async def fetch_async(index: int, url: str) -> Dict:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
html = await response.read()
return {
'index': index,
'resp': response,
'length': len(html),
'url': url
}
def build_futures() -> List[Future]:
urls = [
'https://www.google.co.jp/',
'https://stackoverflow.com/',
'https://www.yahoo.co.jp/',
'https://devlights.hatenablog.com/',
'https://docs.python.org/3.7/index.html',
'https://docs.python.org/ja/3/'
]
futures = [asyncio.ensure_future(fetch_async(i, url)) for i, url in enumerate(urls, start=1)]
return futures
class Sample(SampleBase):
def exec(self):
# 結果を元の順序で取得したい場合は asyncio.gather を使う
future = asyncio.wait(build_futures(), return_when=asyncio.ALL_COMPLETED)
done, pending = asyncio.get_event_loop().run_until_complete(future)
for r in done:
tr = r.result()
print(f'{tr["index"]} {tr["url"]} {tr["length"]} bytes')
def go():
obj = Sample()
obj.exec()
| 25.068966
| 97
| 0.592847
| 408
| 0.26528
| 0
| 0
| 0
| 0
| 358
| 0.23277
| 495
| 0.321847
|
595abb6fdb13a008e2f80cf057085a05a97b14a8
| 1,860
|
py
|
Python
|
models.py
|
camerongray1515/HackDee-2015
|
6459c5bd3ad895e0a216ff61342eb73877dc9ee5
|
[
"MIT"
] | null | null | null |
models.py
|
camerongray1515/HackDee-2015
|
6459c5bd3ad895e0a216ff61342eb73877dc9ee5
|
[
"MIT"
] | 1
|
2015-04-04T20:55:52.000Z
|
2015-12-17T23:35:08.000Z
|
models.py
|
camerongray1515/HackDee-2015
|
6459c5bd3ad895e0a216ff61342eb73877dc9ee5
|
[
"MIT"
] | null | null | null |
from sqlalchemy import Column, String, Boolean, ForeignKey, Integer
from sqlalchemy.orm import relationship
from database import Base
from string import ascii_letters
from random import choice
class Playlist(Base):
__tablename__ = "playlists"
id = Column(String, primary_key=True)
name = Column(String)
def __init__(self, name):
generate = True
while generate:
random_string = "".join(choice(ascii_letters) for i in range(5))
p = Playlist.query.get(random_string)
# Only set value and exit loop if the id is not already in use
if p == None:
generate = False
self.id = random_string
self.name = name
@staticmethod
def get_videos(playlist_id):
videos = Video.query.filter(Video.playlist_id==playlist_id).order_by("rank desc")
playlist = []
for video in videos:
playlist_entry = {
"playlist_id": playlist_id,
"slug": video.slug,
"thumbnail_url": video.thumbnail_url,
"title": video.title,
"rank": video.rank
}
playlist.append(playlist_entry)
return playlist
def __repr__():
return "<Playlist ID:{0}, Name:{1}>".format(self.id, self.name)
class Video(Base):
__tablename__ = "video"
id = Column(Integer, primary_key=True)
playlist_id = Column(String, ForeignKey(Playlist.id))
playlist = relationship("Playlist")
slug = Column(String)
thumbnail_url = Column(String)
title = Column(String)
rank = Column(Integer)
def __init__(self, playlist_id, slug, thumbnail_url, title):
self.playlist_id = playlist_id
self.slug = slug
self.thumbnail_url = thumbnail_url
self.title = title
self.rank = 0
| 29.0625
| 89
| 0.614516
| 1,661
| 0.893011
| 0
| 0
| 510
| 0.274194
| 0
| 0
| 179
| 0.096237
|
595b940d98d4c9ba62ad1e7789fd5ad05f9b32ef
| 3,270
|
py
|
Python
|
Python3/726.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 854
|
2018-11-09T08:06:16.000Z
|
2022-03-31T06:05:53.000Z
|
Python3/726.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 29
|
2019-06-02T05:02:25.000Z
|
2021-11-15T04:09:37.000Z
|
Python3/726.py
|
rakhi2001/ecom7
|
73790d44605fbd51e8f7e804b9808e364fcfc680
|
[
"MIT"
] | 347
|
2018-12-23T01:57:37.000Z
|
2022-03-12T14:51:21.000Z
|
__________________________________________________________________________________________________
sample 24 ms submission
class Solution:
def countOfAtoms(self, formula: str) -> str:
stack, atom, dic, count, coeff, c = [], '', collections.defaultdict(int), 0, 1, 0
for i in formula[::-1]:
if i.isdigit():
count += int(i) * (10 ** c)
c += 1
elif i == ')':
stack.append(count)
coeff *= count
count = c = 0
elif i == '(':
coeff //= stack.pop()
count = c = 0
elif i.isupper():
atom += i
dic[atom[::-1]] += (count or 1) * coeff
atom = ''
count = c = 0
else:
atom += i
check = []
for atom in dic:
check.append((atom, dic[atom]))
check.sort(key=lambda x:x[0])
ans = ''
for atom, count in check:
ans += atom
if count > 1:
ans += str(count)
return ans
__________________________________________________________________________________________________
sample 13188 kb submission
class Solution:
def multiply(self,multiplier):
product=1
for c in multiplier:
product = product * c
return product
def sort_answer(self,dic):
output=[]
for key,val in sorted(dic.items()):
output.append(key)
if val==1:
continue
output.append(str(val))
return ''.join(output)
def countOfAtoms(self, formula: str) -> str:
from collections import defaultdict
multAr = []
parentheses=0
d=defaultdict(int)
element=None
elems=[]
num=None
i=len(formula)-1
while i>=0:
if formula[i].isdigit():
if i==len(formula)-1 or num==None:
num=formula[i]
else:
num= formula[i]+num
if formula[i].islower():
element = formula[i-1:i+1]
if num==None:
d[element]+= self.multiply(multAr)
else:
d[element]+=int(num) * self.multiply(multAr)
num=None
i-=2
elems.append(element)
continue
elif formula[i].isupper():
element=formula[i]
if num==None:
d[element]+= self.multiply(multAr)
else:
d[element]+=int(num) * self.multiply(multAr)
num=None
elems.append(element)
elif formula[i]==')':
multAr.append(int(num))
num=None
elif formula[i]=='(':
multAr.pop()
i-=1
return self.sort_answer(d)
__________________________________________________________________________________________________
| 30
| 98
| 0.45107
| 2,920
| 0.892966
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.006116
|
595ecf0b3419dbc932591ff7beb5487e3db35f47
| 932
|
py
|
Python
|
script/rmLinebyIndFile.py
|
ASLeonard/danbing-tk
|
15540124ff408777d0665ace73698b0c2847d1cc
|
[
"BSD-3-Clause"
] | 17
|
2020-08-16T14:28:11.000Z
|
2022-03-23T23:30:47.000Z
|
script/rmLinebyIndFile.py
|
ASLeonard/danbing-tk
|
15540124ff408777d0665ace73698b0c2847d1cc
|
[
"BSD-3-Clause"
] | 7
|
2021-01-25T15:26:18.000Z
|
2022-03-31T14:30:46.000Z
|
script/rmLinebyIndFile.py
|
ASLeonard/danbing-tk
|
15540124ff408777d0665ace73698b0c2847d1cc
|
[
"BSD-3-Clause"
] | 2
|
2020-11-01T20:41:38.000Z
|
2021-05-29T03:22:24.000Z
|
#!/usr/bin/env python3
import sys
import numpy as np
if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
print(
"""
Remove line indices (0-based) specified in 'index.txt'
usage: program [-k] index.txt inFile
-k Keep line indices in 'index.txt' instead of removing them.
""")
sys.exit()
rm = True
idxf = ""
infile = ""
for i, v in enumerate(sys.argv):
if i == 0:
continue
elif v == "-k":
rm = False
elif not idxf:
idxf = v
elif not infile:
infile = v
else:
assert False, f"too many arguments {v}"
if not idxf:
assert False, "index.txt not specified"
if not infile:
assert False, "inFile not specified"
ids = set(np.loadtxt(idxf, dtype=int, ndmin=1).tolist())
with open(infile) as f:
ind = 0
for line in f:
if (ind not in ids) == rm:
print(line, end='')
ind += 1
| 22.731707
| 78
| 0.55794
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 306
| 0.328326
|
595f827df47c5f2bdd1ecfb6bc095d61ca198a03
| 538
|
py
|
Python
|
dynaban/tests/postion.py
|
laukik-hase/imitation_of_human_arm_on_robotic_manipulator
|
995beb1ab41597ca6cbecd0baecdef1ef13450f9
|
[
"MIT"
] | 3
|
2021-11-13T16:54:31.000Z
|
2021-11-13T20:50:18.000Z
|
dynaban/tests/postion.py
|
laukik-hase/human_arm_imitation
|
995beb1ab41597ca6cbecd0baecdef1ef13450f9
|
[
"MIT"
] | null | null | null |
dynaban/tests/postion.py
|
laukik-hase/human_arm_imitation
|
995beb1ab41597ca6cbecd0baecdef1ef13450f9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import arm_control_utils
DURATION = 30000
TRAJ_POLY1 = [1000, 100, 100]
TORQUE_POLY1 = [1000, 100, 100]
MODE = 3
arm_control_utils.initialize_motors()
arm_control_utils.enable_state_torque()
arm_control_utils.set_debug(1, 0)
print("Ready to move")
arm_control_utils.set_position_trajectory(1, DURATION, TRAJ_POLY1, TORQUE_POLY1)
arm_control_utils.set_mode(1, MODE)
arm_control_utils.disable_state_torque()
arm_control_utils.stop_motors()
| 28.315789
| 80
| 0.702602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.066914
|
595fa12df823f48a76595c65b488cfd3266708e8
| 5,758
|
py
|
Python
|
google-datacatalog-connectors-commons/tests/google/datacatalog_connectors/commons/prepare/base_entry_factory_test.py
|
mesmacosta/datacatalog-connectors
|
74a4b6272cb00f2831b669d1a41133913f3df3fa
|
[
"Apache-2.0"
] | 53
|
2020-04-27T21:50:47.000Z
|
2022-02-18T22:08:49.000Z
|
google-datacatalog-connectors-commons/tests/google/datacatalog_connectors/commons/prepare/base_entry_factory_test.py
|
mesmacosta/datacatalog-connectors
|
74a4b6272cb00f2831b669d1a41133913f3df3fa
|
[
"Apache-2.0"
] | 20
|
2020-05-26T13:51:45.000Z
|
2022-01-25T00:06:19.000Z
|
google-datacatalog-connectors-commons/tests/google/datacatalog_connectors/commons/prepare/base_entry_factory_test.py
|
mesmacosta/datacatalog-connectors
|
74a4b6272cb00f2831b669d1a41133913f3df3fa
|
[
"Apache-2.0"
] | 12
|
2020-04-30T22:14:02.000Z
|
2021-10-09T03:44:39.000Z
|
#!/usr/bin/python
# coding=utf-8
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from google.datacatalog_connectors.commons import prepare
class BaseEntryFactoryTestCase(unittest.TestCase):
__COMMONS_PACKAGE = 'google.datacatalog_connectors.commons'
__PREPARE_PACKAGE = '{}.prepare'.format(__COMMONS_PACKAGE)
def test_format_id_should_normalize_non_compliant_id(self):
formatted_id = prepare.BaseEntryFactory._format_id(u'ã123 - b456 ')
self.assertEqual('a123_b456', formatted_id)
def test_format_id_with_hashing_should_normalize_non_compliant_id(self):
long_str = 'organization_warehouse7192ecb2__personsc3a8d512_' \
'business_area_and_segment_of_marketing'
expected_str = 'organization_warehouse7192ecb2_personsc3a8d512_' \
'business_7074c286'
formatted_id = prepare.BaseEntryFactory._format_id_with_hashing(
long_str, hash_length=8)
self.assertEqual(expected_str, formatted_id)
def test_format_id_with_provided_pattern_should_normalize_non_compliant_id( # noqa: E501
self):
long_str = 'organization__warehouse7192ecb2__personsc3a8d512_' \
'business_area_and_segment_of_marketing'
expected_str = 'organization__warehouse7192ecb2_' \
'_personsc3a8d512_businesa4f7e655'
formatted_id = prepare.BaseEntryFactory._format_id_with_hashing(
long_str, regex_pattern=r'[^a-zA-Z0-9_]+')
self.assertEqual(expected_str, formatted_id)
def test_format_display_name_should_normalize_non_compliant_name(self):
formatted_name = prepare.BaseEntryFactory._format_display_name(
u'ã123 :?: b456 ')
self.assertEqual('a123 _ b456', formatted_name)
@mock.patch(
'{}.DataCatalogStringsHelper.truncate_string'.format(__PREPARE_PACKAGE)
)
def test_format_linked_resource_should_not_normalize_compliant_string(
self, mock_truncate_string):
# Return same value received.
mock_truncate_string.side_effect = (lambda *args: args[0])
formatted_linked_resource = prepare.BaseEntryFactory.\
_format_linked_resource(
'hdfs://namenode:8020/user/hive/warehouse/table_company'
'_names_from_department_that_keeps_records_with_'
'historical_data_from_every_single_member')
self.assertEqual(
'hdfs://namenode:8020/user/hive/warehouse/'
'table_company_names_from_department_that_'
'keeps_records_with_historical_data_'
'from_every_single_member', formatted_linked_resource)
@mock.patch(
'{}.DataCatalogStringsHelper.truncate_string'.format(__PREPARE_PACKAGE)
)
def test_format_linked_resource_should_normalize_non_compliant_string(
self, mock_truncate_string):
# Return same value received.
mock_truncate_string.side_effect = (lambda *args: args[0])
formatted_linked_resource = prepare.BaseEntryFactory. \
_format_linked_resource(
'hdfs://[namenode]:8020/user/{hive}/[warehouse]/table_company'
'_names_from_?department?_that_;keeps;_records_with_'
'historical_data_from_every_single_member')
self.assertEqual(
'hdfs://_namenode_:8020/user/'
'_hive_/_warehouse_/table_company_names_from'
'__department__that__keeps__records_with_'
'historical_data_from_every_single_member',
formatted_linked_resource)
@mock.patch(
'{}.DataCatalogStringsHelper.truncate_string'.format(__PREPARE_PACKAGE)
)
def test_format_linked_resource_should_not_normalize_non_compliant_string(
self, mock_truncate_string):
# Return same value received.
mock_truncate_string.side_effect = (lambda *args: args[0])
formatted_linked_resource = prepare.BaseEntryFactory. \
_format_linked_resource(
'hdfs://[namenode]:8020/user/{hive}/[warehouse]/table_company'
'_names_from_?department?_that_;keeps;_records_with_'
'historical_data_from_every_single_member', False)
self.assertEqual(
'hdfs://[namenode]:8020/user/{hive}/[warehouse]/table_company'
'_names_from_?department?_that_;keeps;_records_with_'
'historical_data_from_every_single_member',
formatted_linked_resource)
@mock.patch(
'{}.DataCatalogStringsHelper.truncate_string'.format(__PREPARE_PACKAGE)
)
def test_format_linked_resource_should_truncate_non_compliant_string(
self, mock_truncate_string):
expected_value = 'truncated_str...'
mock_truncate_string.return_value = expected_value
formatted_linked_resource = prepare.BaseEntryFactory. \
_format_linked_resource(
'hdfs://[namenode]:8020/user/{hive}/[warehouse]/table_company'
'_names_from_?department?_that_;keeps;_records_with_'
'historical_data_from_every_single_member')
self.assertEqual(expected_value, formatted_linked_resource)
| 42.029197
| 93
| 0.70719
| 5,058
| 0.878125
| 0
| 0
| 3,383
| 0.587326
| 0
| 0
| 2,419
| 0.419965
|
5960088035b5df4aefdc1abf2b6dd9894a0c53be
| 5,978
|
py
|
Python
|
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | 20
|
2021-12-03T13:20:17.000Z
|
2022-03-20T18:58:06.000Z
|
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | null | null | null |
estimators.py
|
RakitinDen/pytorch-recursive-gumbel-max-trick
|
44f9854020e727946a074a6e53b20dd593f96cc1
|
[
"Apache-2.0"
] | null | null | null |
# Estimators are partially based on the "estimators.py" from the following repositories:
# https://github.com/agadetsky/pytorch-pl-variance-reduction
# https://github.com/sdrobert/pydrobert-pytorch
import torch
def uniform_to_exp(logits, uniform=None, enable_grad=False):
'''
Converts a tensor of independent uniform samples into a tensor of independent exponential samples
Tensor 'logits' contains log-means of the exponential distributions
Parameters of the exponentials can be represented as
lambda = exp(-logit), since expected value is equal to 1/lambda
'''
if uniform is not None:
assert uniform.size() == logits.size()
else:
uniform = torch.distributions.utils.clamp_probs(torch.rand_like(logits))
exp = torch.exp(logits + torch.log(-torch.log(uniform)))
if enable_grad:
exp.requires_grad_(True)
return exp
def reattach_exp_to_new_logits(logits, exp):
'''
Creates a new tensor of exponential variables that depends on logits in the same way
as if it was obtained by transforming uniform samples via 'uniform_to_exp'
Used in 'relax' to obtain gradient for the detached version of the logits
'''
exp = torch.exp(torch.log(exp.detach()) + logits - logits.detach())
return exp
def E_reinforce(loss_value, logits, exp, plus_samples=1, mask_unused_values=None, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the exponential score
grad = loss(X) * (d / d logits) log p(E ; logits)
If plus_samples > 1, the estimate is E-REINFORCE+ / E-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
exp = exp.detach()
log_prob = -logits - torch.exp(torch.log(exp) - logits)
if mask_unused_values is not None:
log_prob = mask_unused_values(log_prob, **kwargs)
dims_except_batch = tuple(-i for i in range(1, logits.ndimension()))
log_prob = log_prob.sum(dim=dims_except_batch)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def T_reinforce(loss_value, struct_var, logits, f_log_prob, plus_samples=1, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the score function of the execution trace
grad = loss(X) * (d / d logits) log p(T ; logits)
If plus_samples > 1, the estimate is T-REINFORCE+ / T-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
struct_var = struct_var.detach()
log_prob = f_log_prob(struct_var, logits, **kwargs)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def relax(loss_value, struct_var, logits, exp, critic, f_log_prob, f_cond, uniform=None, **kwargs):
'''
Returns the RELAX [grathwohl2017backpropagation] gradient estimate
grad = (loss(X(T)) - c(e_2)) * (d / d logits) log p(T ; logits) - (d / d logits) c(e_2) + (d / d logits) c(e_1)
e_1 ~ p(E ; logits) - exponential sample
T = T(e_1) - execution trace of the algorithm
X = X(T) - structured variable, obtained as the output of the algorithm
e_2 ~ p(E | T ; logits) - conditional exponential sample
c(.) - critic (typically, a neural network)
e_1 and e_2 are sampled using the reparameterization trick
(d / d logits) c(e_1) and (d / d logits) c(e_2) are the reparameterization gradients
In code, exp := e_1, cond_exp := e_2
'''
loss_value = loss_value.detach()
struct_var = struct_var.detach()
logits = logits.detach().requires_grad_(True)
exp = reattach_exp_to_new_logits(logits, exp)
cond_exp = f_cond(struct_var, logits, uniform, **kwargs)
baseline_exp = critic(exp)
baseline_cond = critic(cond_exp).squeeze()
diff = loss_value - baseline_cond
log_prob = f_log_prob(struct_var, logits, **kwargs)
score, = torch.autograd.grad(
[log_prob],
[logits],
grad_outputs = torch.ones_like(log_prob)
)
d_baseline_exp, = torch.autograd.grad(
[baseline_exp],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_exp)
)
d_baseline_cond, = torch.autograd.grad(
[baseline_cond],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_cond)
)
for i in range(logits.ndimension() - 1):
diff = diff.unsqueeze(-1)
grad = diff * score + d_baseline_exp - d_baseline_cond
assert grad.size() == logits.size()
return grad
| 36.674847
| 119
| 0.666109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,072
| 0.346604
|
596098c174bcd92a072f4a63dcf655eaaf7c83e8
| 1,332
|
py
|
Python
|
squareroot.py
|
martinaobrien/pands-problem-sets
|
5928f9ed2a743f46a9615f41192fd6dfb810b73c
|
[
"CNRI-Python"
] | null | null | null |
squareroot.py
|
martinaobrien/pands-problem-sets
|
5928f9ed2a743f46a9615f41192fd6dfb810b73c
|
[
"CNRI-Python"
] | null | null | null |
squareroot.py
|
martinaobrien/pands-problem-sets
|
5928f9ed2a743f46a9615f41192fd6dfb810b73c
|
[
"CNRI-Python"
] | null | null | null |
#Martina O'Brien 10/3/2019
#Problem Set 7 - squareroots
#Programming Code to determining the squareroots of positive floating point numbers
## Reference for try and expect https://www.w3schools.com/python/python_try_except.asp
while True: # this loop will run to allow the user to input a value again if they do not enter a positive integer
try:
num = input("Please enter a positive number: ") # Here the user will enter positive number.
number = float(num) # using a float(num) to allow numbers with decimal points
except ValueError:
print('Sorry this is not a number. Can you please try again and enter a positive number.')
# If the value is entered is correct then the value will move to the next statement.
continue #continue to the next interation of the loop
if number <= 0:
print('Please enter a number greater than zero')
# to ensure that the user inputs a positive number
break
# break from the while loop to the next variable
number_sqrt = (number ** 0.5)
# Using ** 0.5 gives the squareroot of the num inputted
# Using %0.1f returns the answers to one decimal point
print("The square root of %0.1f is approx %0.1f" %(number, number_sqrt))
# print the result of the variable to one decimal place.
| 45.931034
| 114
| 0.693694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,030
| 0.773273
|
596187b54ca231442ef296c49a1a09d46c903d01
| 2,843
|
py
|
Python
|
tests/org_group_tests.py
|
JonLMyers/MetroTransitAPI
|
d8f467570368cd563d69564b680cfdd47ad6b622
|
[
"MIT"
] | null | null | null |
tests/org_group_tests.py
|
JonLMyers/MetroTransitAPI
|
d8f467570368cd563d69564b680cfdd47ad6b622
|
[
"MIT"
] | null | null | null |
tests/org_group_tests.py
|
JonLMyers/MetroTransitAPI
|
d8f467570368cd563d69564b680cfdd47ad6b622
|
[
"MIT"
] | null | null | null |
import requests
import json
token = ''
email_token = ''
print("######## Pass ########")
target = 'http://127.0.0.1:5000/login'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'username': 'jon@aaxus.com', 'password': 'password125'}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
data = json.loads(r.text)
token = data['access_token']
print(token)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'name': 'Dev Ops',
'description': 'Devops',
'org_name': 'Aaxus'
}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'org_name': 'Aaxus',
'id': 'Dev Ops',
'description': 'Developer Operations Organization',
'member_username': ['spiro@aaxus.com', 'anthony@aaxus.com', 'ben@aaxus.com'],
'admin_username': ['spiro@aaxus.com', 'anthony@aaxus.com']
}
r = requests.put(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'org_name': 'Aaxus',
'id': 'Dev Ops',
'remove_admin': ['spiro@aaxus.com'],
'remove_member': ['ben@aaxus.com']
}
r = requests.put(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'name': 'Executives',
'description': 'Devops',
'org_name': 'Aaxus'
}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/view'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'org_name': 'Aaxus',
'id': 'Dev Ops',
}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/view?org_name=Aaxus'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
r = requests.get(target, headers=headers)
print(r.status_code, r.reason)
print(r.text)
| 33.05814
| 106
| 0.638762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,426
| 0.501583
|
5961e885fedcd68b3653416c363d4e461726bdc8
| 5,578
|
py
|
Python
|
pywbemtools/pywbemlistener/_context_obj.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 8
|
2017-04-01T13:55:00.000Z
|
2022-03-15T18:28:47.000Z
|
pywbemtools/pywbemlistener/_context_obj.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 918
|
2017-03-03T14:29:03.000Z
|
2022-03-29T15:32:16.000Z
|
pywbemtools/pywbemlistener/_context_obj.py
|
pywbem/pywbemtools
|
6b7c3f124324fd3ab7cffb82bc98c8f9555317e4
|
[
"Apache-2.0"
] | 2
|
2020-01-17T15:56:46.000Z
|
2020-02-12T18:49:30.000Z
|
# (C) Copyright 2021 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Click context object for the pybemlistener command.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import click_spinner
class ContextObj(object):
# pylint: disable=useless-object-inheritance, too-many-instance-attributes
"""
Click context object for the pybemlistener command.
This object is attached to the Click context, and is used as follows:
- Contains all general options for use by command functions.
- Serves as the central object for executing command functions.
- Has support for starting and stopping the Click spinner.
"""
spinner_envvar = 'PYWBEMLISTENER_SPINNER'
def __init__(self, output_format, logdir, verbose, pdb, warn):
"""
Parameters:
output_format (:term:`string` or `None`):
Value of --output-format general option, or `None` if not specified.
logdir (:term:`string` or `None`):
Value of --logdir general option, or `None` if not specified.
verbose (int):
Verbosity. See VERBOSE_* constants for a definition.
pdb (:class:`py:bool`):
Indicates whether the --pdb general option was specified.
warn (:class:`py:bool`):
Indicates whether the --warn general option was specified.
"""
self._output_format = output_format
self._logdir = logdir
self._verbose = verbose
self._pdb = pdb
self._warn = warn
self._spinner_enabled = None # Deferred init in getter
self._spinner_obj = click_spinner.Spinner()
def __repr__(self):
return 'ContextObj(at {:08x}, output_format={s.output_format}, ' \
'logdir={s.logdir}, verbose={s.verbose}, pdb={s.pdb}, ' \
'warn={s.warn}, spinner_enabled={s.spinner_enabled}' \
.format(id(self), s=self)
@property
def output_format(self):
"""
:term:`string`: String defining the output format requested. This may
be `None` meaning that the default format should be used or may be
one of the values in the TABLE_FORMATS variable.
"""
return self._output_format
@property
def logdir(self):
"""
:term:`string`: Path name of log directory for the 'run' command,
or `None` for no logging.
"""
return self._logdir
@property
def verbose(self):
"""
int: Verbosity. See VERBOSE_* constants for a definition.
"""
return self._verbose
@property
def pdb(self):
"""
bool: Indicates whether to break in the debugger.
"""
return self._pdb
@property
def warn(self):
"""
bool: Indicates whether to enable Python warnings.
"""
return self._warn
@property
def spinner_enabled(self):
"""
:class:`py:bool`: Indicates and controls whether the spinner is enabled.
If the spinner is enabled, subcommands will display a spinning wheel
while waiting for completion.
This attribute can be modified.
The initial state of the spinner is enabled, but it can be disabled by
setting the {0} environment variable to 'false', '0', or the empty
value.
""".format(self.spinner_envvar)
# Deferred initialization
if self._spinner_enabled is None:
value = os.environ.get(self.spinner_envvar, None)
if value is None:
# Default if not set
self._spinner_enabled = True
elif value == '0' or value == '' or value.lower() == 'false':
self._spinner_enabled = False
else:
self._spinner_enabled = True
return self._spinner_enabled
@spinner_enabled.setter
def spinner_enabled(self, enabled):
"""Setter method; for a description see the getter method."""
self._spinner_enabled = enabled
def spinner_start(self):
"""
Start the spinner, if the spinner is enabled.
"""
if self.spinner_enabled:
self._spinner_obj.start()
def spinner_stop(self):
"""
Stop the spinner, if the spinner is enabled.
"""
if self.spinner_enabled:
self._spinner_obj.stop()
def execute_cmd(self, cmd):
"""
Call the command function for a command, after enabling the spinner
(except when in debug mode) and after entering debug mode if desired.
"""
if not self.pdb:
self.spinner_start()
try:
if self.pdb:
import pdb # pylint: disable=import-outside-toplevel
pdb.set_trace() # pylint: disable=forgotten-debug-statement
cmd() # The command function for the pywbemlistener command
finally:
if not self.pdb:
self.spinner_stop()
| 31.693182
| 80
| 0.620115
| 4,797
| 0.859986
| 0
| 0
| 2,068
| 0.370742
| 0
| 0
| 3,434
| 0.615633
|
5962222919ba8cf295722ccc3d990ff5fdab4dcc
| 1,704
|
py
|
Python
|
ota_xml_api/util/xml_base.py
|
mihira/opentravel-xml-api
|
24d1ea4d24cf2575de474becaa665f6fc0d1971d
|
[
"MIT"
] | 3
|
2016-01-14T01:12:06.000Z
|
2021-04-16T04:00:47.000Z
|
ota_xml_api/util/xml_base.py
|
mihira/opentravel-xml-api
|
24d1ea4d24cf2575de474becaa665f6fc0d1971d
|
[
"MIT"
] | null | null | null |
ota_xml_api/util/xml_base.py
|
mihira/opentravel-xml-api
|
24d1ea4d24cf2575de474becaa665f6fc0d1971d
|
[
"MIT"
] | 2
|
2017-09-04T13:02:09.000Z
|
2018-06-09T11:10:03.000Z
|
#!/usr/bin/env python
"""
This module contains the base xml Node and Period classes
"""
from xml.dom.minidom import getDOMImplementation
from date import Period
from constants import START, END
class XmlNode(object):
"""
the name of the class will define the name of the node by default.
classes inheriting this class will have their name set.
"""
_impl = getDOMImplementation()
def __init__(self, name=None, **attributes):
if not name:
name = self.__class__.__name__
self._doc = XmlNode._impl.createDocument(None, name, None)
self.element = self._doc.documentElement
for key, value in attributes.items():
self.set_attribute(key, value)
self.parent = None
def set_attribute(self, key, value):
self.element.setAttribute(key, str(value))
def set_parent(self, parent_node):
self.parent = parent_node
def add_child(self, child_node):
child_node.set_parent(self)
self.element.appendChild(child_node.element)
return child_node
def add_text(self, data):
text = self._doc.createTextNode(data)
self.element.appendChild(text)
return text
def __repr__(self):
return self.element.toxml()
class PeriodNode(XmlNode):
def __init__(self, *args, **kwargs):
XmlNode.__init__(self, *args, **kwargs)
self._period = None
self.set_period(Period())
def get_period(self):
return self._period
def set_period(self, period):
self.set_attribute(START, period.start)
self.set_attribute(END, period.end)
self._period = period
period = property(get_period, set_period)
| 27.483871
| 70
| 0.661385
| 1,502
| 0.881455
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.133803
|
59629f7a0c5633f940aafc1f0319ef57490ea9f2
| 9,441
|
py
|
Python
|
phl_courts_scraper/court_summary/schema.py
|
PhiladelphiaController/phl-courts-scraper
|
0c3c915a7fa355538c43a138fa7b104b8bf6ef1e
|
[
"MIT"
] | null | null | null |
phl_courts_scraper/court_summary/schema.py
|
PhiladelphiaController/phl-courts-scraper
|
0c3c915a7fa355538c43a138fa7b104b8bf6ef1e
|
[
"MIT"
] | 4
|
2020-12-09T18:25:53.000Z
|
2021-03-19T22:30:18.000Z
|
phl_courts_scraper/court_summary/schema.py
|
PhiladelphiaController/phl-courts-scraper
|
0c3c915a7fa355538c43a138fa7b104b8bf6ef1e
|
[
"MIT"
] | null | null | null |
"""Define the schema for the court summary report."""
import datetime
from dataclasses import dataclass, field, fields
from typing import Any, Iterator, List, Optional, Union
import desert
import marshmallow
import pandas as pd
from ..utils import DataclassSchema
__all__ = ["CourtSummary", "Docket", "Charge", "Sentence"]
class TimeField(marshmallow.fields.DateTime):
"""Custom time field to handle string to datetime conversion."""
def _serialize(self, value, attr, obj, **kwargs):
"""Return string representation of datetime objects."""
if not value:
return ""
if isinstance(value, datetime.datetime):
return value.strftime("%m/%d/%Y")
return super()._serialize(value, attr, obj, **kwargs)
def _deserialize(self, value, attr, data, **kwargs):
"""Convert strings to datetime objects."""
if value == "":
return None
if isinstance(value, datetime.datetime):
return value
return super()._deserialize(value, attr, data, **kwargs)
@dataclass
class Sentence(DataclassSchema):
"""
A Sentence object.
Parameters
----------
sentence_type :
the sentence type
program_period : optional
the program period
sentence_length : optional
the length of the sentence
sentence_dt :
the date of the sentence
"""
sentence_type: str
sentence_dt: str = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True)
)
program_period: str = ""
sentence_length: str = ""
def __repr__(self):
cls = self.__class__.__name__
if not pd.isna(self.sentence_dt):
dt = self.sentence_dt.strftime("%m/%d/%y")
dt = f"'{dt}'"
else:
dt = "NaT"
s = f"sentence_dt={dt}, sentence_type='{self.sentence_type}'"
return f"{cls}({s})"
@dataclass
class Charge(DataclassSchema):
"""
A Charge object.
Parameters
----------
seq_no :
the charge sequence number
statute :
the statute
description : optional
description of the statute
grade : optional
the grade, e.g., felony, misdemeanor, etc.
disposition : optional
the disposition for the charge, if present
sentences : optional
list of any sentences associated with the charge
"""
seq_no: str
statute: str
description: str = ""
grade: str = ""
disposition: str = ""
sentences: List[Sentence] = field(default_factory=list)
@property
def meta(self):
"""A dict of the meta info associated with the charge"""
exclude = ["sentences"]
return {
f.name: getattr(self, f.name)
for f in fields(self)
if f.name not in exclude
}
def __iter__(self) -> Iterator[Sentence]:
"""Iterate through the sentences."""
return iter(self.sentences)
def __len__(self):
"""Return the length of the sentences."""
return len(self.sentences)
def __getitem__(self, index):
"""Index the sentences."""
return self.sentences.__getitem__(index)
def __repr__(self):
cls = self.__class__.__name__
cols = ["seq_no", "statute", "description"]
s = ", ".join([f"{col}='{getattr(self, col)}'" for col in cols])
s += f", num_sentences={len(self.sentences)}"
return f"{cls}({s})"
@dataclass
class Docket(DataclassSchema):
"""
A Docket object.
Parameters
----------
docket_number :
the docket number
proc_status :
the status of the docket proceedings
dc_no :
the DC incident number
otn :
the offense tracking number
arrest_dt :
the arrest date
county :
the PA county where case is being conducted
status :
the docket status as determined by the section on the court
summary, e.g., "Active", "Closed", etc.
extra :
list of any additional header info for the docket
psi_num : optional
pre-sentence investigation number
prob_num : optional
the probation number
disp_date : optional
date of disposition
disp_judge : optional
the disposition judge
def_atty : optional
the name of the defense attorney
trial_dt : optional
the date of the trial
legacy_no : optional
the legacy number for the docket
last_action : optional
the last action in the case
last_action_date : optional
the date of the last action
last_action_room : optional
the room where last action occurred
next_action : optional
the next action to occur
next_action_date : optional
the date of the next action
next_action_room : optional
the room where next action will occur
charges : optional
a list of charges associated with this case
"""
docket_number: str
proc_status: str
dc_no: str
otn: str
county: str
status: str
extra: List[Any]
arrest_dt: str = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True)
)
psi_num: str = ""
prob_num: str = ""
disp_judge: str = ""
def_atty: str = ""
legacy_no: str = ""
last_action: str = ""
last_action_room: str = ""
next_action: str = ""
next_action_room: str = ""
next_action_date: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
)
last_action_date: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
)
trial_dt: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
)
disp_date: Optional[str] = desert.field(
TimeField(format="%m/%d/%Y", allow_none=True), default=""
)
charges: List[Charge] = field(default_factory=list)
def to_pandas(self) -> pd.DataFrame:
"""
Return a dataframe representation of the data,
where each row represents a separate charge.
"""
# Each row is a Charge
out = pd.DataFrame([c.to_dict() for c in self])
# Convert sentences dicts to Sentence objects
out["sentences"] = out["sentences"].apply(
lambda l: [Sentence(**v) for v in l]
)
return out
@property
def meta(self):
"""A dict of the meta info associated with the docket"""
exclude = ["charges"]
return {
f.name: getattr(self, f.name)
for f in fields(self)
if f.name not in exclude
}
def __getitem__(self, index):
"""Index the charges."""
return self.charges.__getitem__(index)
def __iter__(self) -> Iterator[Charge]:
"""Iterate through the charges."""
return iter(self.charges)
def __len__(self):
"""The number of charges."""
return len(self.charges)
def __repr__(self):
cls = self.__class__.__name__
if not pd.isna(self.arrest_dt):
dt = self.arrest_dt.strftime("%m/%d/%y")
dt = f"'{dt}'"
else:
dt = "NaT"
s = [
f"{self.docket_number}",
str(self.status),
f"arrest_dt={dt}",
f"num_charges={len(self)}",
]
return f"{cls}({', '.join(s)})"
@dataclass
class CourtSummary(DataclassSchema):
"""A Court Summary object.
Parameters
----------
name :
The name of the defendant.
date_of_birth :
The defendant's date of birth.
eyes :
The defendant's eye color.
sex :
The defendant's sex.
hair :
The defendant's hair color.
race :
The defendant's race.
location :
Defendant location
aliases :
List of aliases for the defendant
dockets :
List of Docket objects on the court summary
"""
name: str
date_of_birth: str
eyes: str
sex: str
hair: str
race: str
location: str
aliases: List[str]
dockets: List[Docket]
def to_pandas(self) -> pd.DataFrame:
"""
Return a dataframe representation of the data,
where each row represents a separate docket.
"""
# Each row is a Docket
out = pd.DataFrame([c.to_dict() for c in self])
# Convert charge dicts to Charge objects
out["charges"] = out["charges"].apply(
lambda l: [Charge(**v) for v in l]
)
# Each row is a Docket
return out
@property
def meta(self):
"""A dict of the meta info associated with the court summary."""
exclude = ["dockets"]
return {
f.name: getattr(self, f.name)
for f in fields(self)
if f.name not in exclude
}
def __iter__(self) -> Iterator[Docket]:
"""Yield the object's dockets."""
return iter(self.dockets)
def __len__(self) -> int:
"""Return the number of dockets."""
return len(self.dockets)
def __getitem__(self, index):
"""Index the dockets."""
return self.dockets.__getitem__(index)
def __repr__(self) -> str:
"""Shorten the default dataclass representation."""
cls = self.__class__.__name__
return f"{cls}(name='{self.name}', num_dockets={len(self)})"
| 26.594366
| 72
| 0.586696
| 9,055
| 0.959115
| 0
| 0
| 8,366
| 0.886135
| 0
| 0
| 4,413
| 0.467429
|
5962e0c96855173baf9ead74168b62eef51ee37e
| 216
|
py
|
Python
|
Day_43/json_dump_python.py
|
kiranrraj/100Days_Of_Coding
|
ab75d83be9be87fb7bc83a3f3b72a4638dab22a1
|
[
"MIT"
] | null | null | null |
Day_43/json_dump_python.py
|
kiranrraj/100Days_Of_Coding
|
ab75d83be9be87fb7bc83a3f3b72a4638dab22a1
|
[
"MIT"
] | null | null | null |
Day_43/json_dump_python.py
|
kiranrraj/100Days_Of_Coding
|
ab75d83be9be87fb7bc83a3f3b72a4638dab22a1
|
[
"MIT"
] | null | null | null |
# Title : Json Module Module
# Author : Kiran Raj R.
# Date : 26/11/2020
python_json = {"name":"kiran", "email":"kiran@gmail.com", "isHappy": "Yes"}
import json
string_j = json.dumps(python_json)
print(string_j)
| 24
| 75
| 0.680556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.574074
|
5963d226f34e95078375678dfe6099b78982408c
| 573
|
py
|
Python
|
userbot/modules/trd.py
|
LUCKYRAJPUTOP/VibeXUserbot
|
257c86ff1775592688815435d8c5ce91e1dd299e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/trd.py
|
LUCKYRAJPUTOP/VibeXUserbot
|
257c86ff1775592688815435d8c5ce91e1dd299e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
userbot/modules/trd.py
|
LUCKYRAJPUTOP/VibeXUserbot
|
257c86ff1775592688815435d8c5ce91e1dd299e
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
import asyncio
from asyncio import sleep
from random import choice
from userbot.events import register
T_R_D = [
"@PrajjuS",
"@Vin02vin",
"@Iamsaisharan",
"@venomsamurai",
]
@register(outgoing=True, pattern="^.trd$")
async def truthrdare(trd):
"""Truth or Dare"""
await trd.edit("`Choosing Name...`")
await sleep(1.5)
await trd.edit("`..............`")
await sleep(1.5)
msg = await trd.edit("`Name is.....`")
await sleep(3)
await trd.delete()
await msg.reply("**∆ Truth or Dare ∆**\n\n__Name:__ " + choice(T_R_D))
| 22.92
| 74
| 0.602094
| 0
| 0
| 0
| 0
| 379
| 0.656846
| 336
| 0.582322
| 173
| 0.299827
|
596512b76ad497342148f69daf0ea980f36bbf49
| 2,384
|
py
|
Python
|
collectors/nct/collector.py
|
almeidaah/collectors
|
f03096855b8d702969d22af0b20a4d6a0d820bd0
|
[
"MIT"
] | 17
|
2016-06-28T21:20:21.000Z
|
2022-03-02T16:31:25.000Z
|
collectors/nct/collector.py
|
almeidaah/collectors
|
f03096855b8d702969d22af0b20a4d6a0d820bd0
|
[
"MIT"
] | 41
|
2016-04-04T10:36:45.000Z
|
2017-04-24T10:04:57.000Z
|
collectors/nct/collector.py
|
kenferrara/collectors
|
e6c1f45df3a1ffd5d60dada1816484812eb51417
|
[
"MIT"
] | 25
|
2016-05-18T09:27:42.000Z
|
2021-03-21T14:44:31.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import zipfile
import logging
import requests
import tempfile
import contextlib
from .parser import parse_record
from .. import base
logger = logging.getLogger(__name__)
# Module API
def collect(conf, conn, nct_xml_dump_url):
'''
Downloads and parse data from NCT's XML dump. Considering you want the data
from 2017-01-01 until 2017-02-01, the XML dump can be downloaded from:
https://clinicaltrials.gov/search?resultsxml=True&rcv_s=01/01/2017&rcv_e=01/02/2017
'''
base.helpers.start(conf, 'nct', {'url': nct_xml_dump_url})
with tempfile.TemporaryFile() as fp:
_download_to_file(nct_xml_dump_url, fp)
file_count = 0
for identifier, record_fp in _iter_nct_dump_files(fp):
base.config.SENTRY.extra_context({
'url': nct_xml_dump_url,
'identifier': identifier,
})
rec = parse_record(record_fp)
query = {'nct_id': rec['nct_id']}
if rec.table in conn['warehouse'].tables:
existing = conn['warehouse'][rec.table].find_one(**query)
if existing:
rec['nct_id'] = existing['nct_id']
rec.write(conf, conn)
file_count += 1
logger.info('Collected %s NCT records', file_count)
base.helpers.stop(conf, 'nct', {
'url': nct_xml_dump_url,
'collected': file_count,
})
def _download_to_file(url, fp):
CHUNK_SIZE = 1024 * 1024 # 1 MB
bytes_to_mb = lambda value: value / 1048576.0
with contextlib.closing(requests.get(url, stream=True)) as response:
completed_bytes = 0
chunk_count = 0
for block in response.iter_content(CHUNK_SIZE):
fp.write(block)
completed_bytes += len(block)
chunk_count += 1
if chunk_count % 1000 == 0:
logger.debug('Downloaded %.2f MB', bytes_to_mb(completed_bytes))
fp.seek(0)
def _iter_nct_dump_files(fp):
with zipfile.ZipFile(fp) as archive:
for filename in archive.namelist():
identifier = filename.split('.')[0]
with archive.open(filename, 'rU') as rec_file:
yield identifier, rec_file
| 32.657534
| 87
| 0.633389
| 0
| 0
| 264
| 0.110738
| 0
| 0
| 0
| 0
| 451
| 0.189178
|
5968638622036a0684e095d3de7062e4e3ce8115
| 292
|
py
|
Python
|
bigcode-fetcher/tests/fixtures/__init__.py
|
sourcery-ai-bot/bigcode-tools
|
87aaa609998017d0312b7f4f102d41cc2942fa9d
|
[
"MIT"
] | 6
|
2017-10-15T08:21:27.000Z
|
2018-05-17T12:57:41.000Z
|
bigcode-fetcher/tests/fixtures/__init__.py
|
bdqnghi/bigcode-tools
|
94ce416fbb40b9b25d49bf88284bf7ccb6132bd3
|
[
"MIT"
] | 2
|
2017-12-17T19:02:06.000Z
|
2018-03-01T04:00:26.000Z
|
bigcode-fetcher/tests/fixtures/__init__.py
|
bdqnghi/bigcode-tools
|
94ce416fbb40b9b25d49bf88284bf7ccb6132bd3
|
[
"MIT"
] | 2
|
2017-10-18T08:17:54.000Z
|
2018-06-28T09:57:36.000Z
|
from os import path
import json
from bigcode_fetcher.project import Project
FIXTURES_DIR = path.dirname(__file__)
PROJECTS_PATH = path.join(FIXTURES_DIR, "projects.json")
with open(PROJECTS_PATH, "r") as f:
JSON_PROJECTS = json.load(f)
PROJECTS = [Project(p) for p in JSON_PROJECTS]
| 20.857143
| 56
| 0.763699
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.061644
|
59692f082625d38c4980a6276af160523062869b
| 1,465
|
py
|
Python
|
examples/timeflies/timeflies_qt.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-16T09:07:13.000Z
|
2018-11-16T09:07:13.000Z
|
examples/timeflies/timeflies_qt.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
examples/timeflies/timeflies_qt.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-11-04T11:13:49.000Z
|
2021-11-04T11:13:49.000Z
|
from rx.subjects import Subject
from rx.concurrency import QtScheduler
import sys
try:
from PyQt4 import QtCore
from PyQt4.QtGui import QWidget, QLabel
from PyQt4.QtGui import QApplication
except ImportError:
try:
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
except ImportError:
from PySide import QtCore
from PySide.QtGui import QWidget, QLabel
from PySide.QtGui import QApplication
class Window(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.setWindowTitle("Rx for Python rocks")
self.resize(600, 600)
self.setMouseTracking(True)
# This Subject is used to transmit mouse moves to labels
self.mousemove = Subject()
def mouseMoveEvent(self, event):
self.mousemove.on_next((event.x(), event.y()))
def main():
app = QApplication(sys.argv)
scheduler = QtScheduler(QtCore)
window = Window()
window.show()
text = 'TIME FLIES LIKE AN ARROW'
labels = [QLabel(char, window) for char in text]
def handle_label(i, label):
def on_next(pos):
x, y = pos
label.move(x + i*12 + 15, y)
label.show()
window.mousemove.delay(i*100, scheduler=scheduler).subscribe(on_next)
for i, label in enumerate(labels):
handle_label(i, label)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 24.416667
| 77
| 0.647099
| 398
| 0.271672
| 0
| 0
| 0
| 0
| 0
| 0
| 113
| 0.077133
|
5969ba0b61715dcc3c0755544d810b16a9ba7f4b
| 6,116
|
py
|
Python
|
src/contexts/context_local_structure.py
|
aindrila-ghosh/SmartReduce
|
b2b28055bc0b269155270c1f8206445e405e8d9b
|
[
"MIT"
] | null | null | null |
src/contexts/context_local_structure.py
|
aindrila-ghosh/SmartReduce
|
b2b28055bc0b269155270c1f8206445e405e8d9b
|
[
"MIT"
] | null | null | null |
src/contexts/context_local_structure.py
|
aindrila-ghosh/SmartReduce
|
b2b28055bc0b269155270c1f8206445e405e8d9b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import Isomap
from scipy.spatial.distance import pdist
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score, LeaveOneOut
RANDOM_STATE = 42
def calculate_pairwise_distances(df_for_Box_Plot_features, points, distance='euclidean'):
"""
Computes Pairwise euclidean distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
distance: String
distance, default value is "euclidean"
Returns
----------
distance_original : nD array
euclidean distances in the original dataset
distance_embeddings : nD array
euclidean distances in the embedding
"""
distance_original = pdist(df_for_Box_Plot_features, metric=distance)
distance_embeddings = pdist(points, metric=distance)
return distance_original, distance_embeddings
def calculate_geodesic_distance(df_for_Box_Plot_features, points):
"""
Computes Pairwise geodesic distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
Returns
----------
geo_distance_original : nD array
geodesic distances in the original dataset
geo_distance_embeddings : nD array
geodesic distances in the embedding
"""
embedding = Isomap(n_components=2)
embedding.fit(df_for_Box_Plot_features)
unsquareform = lambda a: a[np.nonzero(np.triu(a, 1))] ## define a lambda to unsquare the distance matrix
geo_distance_original = unsquareform(embedding.dist_matrix_) ## get a condensed matrix of pairwise geodesic distance among points
embedding1 = Isomap(n_components=2)
embedding1.fit(points)
embedding1.dist_matrix_[embedding1.dist_matrix_ == 0] = -9999 ## turn all 0 distances to -9999
geo_distance_embeddings = unsquareform(embedding1.dist_matrix_) ## get a condensed matrix of pairwise geodesic distance among points
geo_distance_embeddings[geo_distance_embeddings == -9999] = 0 ## turn all -9999 distances back to 0
return geo_distance_original, geo_distance_embeddings
def generate_histograms(distance_original, distance_embeddings, no_of_bins):
"""
Generates histograms
Parameters
----------
distance_original : nD array
original distances
distance_embeddings : nD array
embedding distances
no_of_bins : integer
number of bins in the histogram
Returns
----------
bin_edges_original : list
bin edges
"""
countsOriginal, bin_edges_original = np.histogram(distance_original, bins = no_of_bins)
#print("Original Distance Binned Element Counts: ", countsOriginal)
countsEmbedding, bin_edges_embedding = np.histogram(distance_embeddings, bins = no_of_bins)
#print("Embedding Distance Binned Element Counts: ", countsEmbedding)
plt.figure()
plt.hist(distance_original, bins = no_of_bins)
plt.show()
plt.title("Pairwise distances in original data")
plt.hist(distance_embeddings, bins = no_of_bins)
plt.show()
plt.title("Pairwise distances in embeddings")
return bin_edges_original
def calculate_box_plot_details(distance_original, distance_embeddings, bin_edges_original):
"""
Computes the details of the Box-plots
"""
inds_original = np.digitize(distance_original, bins=bin_edges_original)
##print("number of bins = ", np.unique(inds_original))
for i in range(1,52):
globals()["array" + str(i)] = []
for j in range(0,len(inds_original)):
globals()["array" + str(inds_original[j])].append(distance_embeddings[j])
data_to_plot = [array1, array2, array3, array4, array5, array6, array7, array8, array9, array10,
array11, array12, array13, array14, array15, array16, array17, array18, array19, array20,
array21, array22, array23, array24, array25, array26, array27, array28, array29, array30,
array31, array32, array33, array34, array35, array36, array37, array38, array39, array40,
array41, array42, array43, array44, array45, array46, array47, array48, array49, array50, array51]
return data_to_plot
def generate_box_plots(data_to_plot):
"""
Generates Box-plots
"""
fig = plt.figure(1, figsize=(14, 10))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot)
# Save the figure
fig.savefig('fig1.png', bbox_inches='tight')
## add patch_artist=True option to ax.boxplot()
## to get fill color
bp = ax.boxplot(data_to_plot, patch_artist=True)
## change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#7570b3', linewidth=2)
# change fill color
box.set( facecolor = '#1b9e77' )
## change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
## change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
## change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#b2df8a', linewidth=2)
## change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
def gen_error_1_NN(embedding, labels):
"""
Computes 1-NN generalization error
Parameters
----------
embedding : nD array
embedding
labels : list
original labels
Returns
----------
gen_error : float
generalization error
"""
model = KNeighborsClassifier(n_neighbors=1)
loo = LeaveOneOut()
loo.get_n_splits(embedding)
scores = cross_val_score(model , X = embedding , y = labels, cv = loo)
gen_error = (1 - np.mean(scores))
return gen_error
| 28.985782
| 137
| 0.680347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,557
| 0.418084
|
596ab002529af664473cf2cc0c9a6d46e4922281
| 849
|
py
|
Python
|
ADAMTR.py
|
akashsuper2000/codechef-archive
|
e0e4a7daf66812ab7aa3fe42132c3d067a72457b
|
[
"bzip2-1.0.6"
] | null | null | null |
ADAMTR.py
|
akashsuper2000/codechef-archive
|
e0e4a7daf66812ab7aa3fe42132c3d067a72457b
|
[
"bzip2-1.0.6"
] | null | null | null |
ADAMTR.py
|
akashsuper2000/codechef-archive
|
e0e4a7daf66812ab7aa3fe42132c3d067a72457b
|
[
"bzip2-1.0.6"
] | null | null | null |
def swap(p,j,k,n):
a = p[j]
b = []
for m in range(n):
b.append(p[m][k])
for m in range(n):
p[m][k] = a[m]
p[j] = b
for i in range(int(input())):
n = int(input())
p,q = [],[]
for j in range(n):
p.append([int(k) for k in input().split()])
for j in range(n):
q.append([int(k) for k in input().split()])
f = 0
for j in range(n):
for k in range(n):
if(p[j][k]!=q[j][k] and p[j][k]==q[k][j]):
swap(p,j,k,n)
elif(p[j][k]==q[j][k]):
continue
else:
f = 1
for j in range(n):
for k in range(n):
if(p[j][k]!=q[j][k]):
f = 1
break
if(f==1):
break
if(f==1):
print('No')
else:
print('Yes')
| 22.342105
| 54
| 0.366313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.010601
|
596bbf6cce06d70f6a325d7a5bf75a3e2280c89c
| 1,110
|
py
|
Python
|
hparams.py
|
TanUkkii007/vqvae
|
6ac433490fd827174e5b925780d32bea14bfb097
|
[
"MIT"
] | 2
|
2019-03-30T16:49:11.000Z
|
2019-12-18T22:50:56.000Z
|
hparams.py
|
TanUkkii007/vqvae
|
6ac433490fd827174e5b925780d32bea14bfb097
|
[
"MIT"
] | null | null | null |
hparams.py
|
TanUkkii007/vqvae
|
6ac433490fd827174e5b925780d32bea14bfb097
|
[
"MIT"
] | 1
|
2020-01-06T12:37:00.000Z
|
2020-01-06T12:37:00.000Z
|
import tensorflow as tf
default_params = tf.contrib.training.HParams(
# Encoder
encoder_num_hiddens=128,
encoder_num_residual_hiddens=32,
encoder_num_residual_layers=2,
# Decoder
decoder_num_hiddens=128,
decoder_num_residual_hiddens=32,
decoder_num_residual_layers=2,
embedding_dim=64,
num_embeddings=512,
commitment_cost=0.25,
# VectorQuantizer
vector_quantizer="VectorQuantizer",
sampling_count=10,
# Training
batch_size=32,
learning_rate=3e-4,
save_summary_steps=100,
save_checkpoints_steps=500,
keep_checkpoint_max=200,
keep_checkpoint_every_n_hours=1,
log_step_count_steps=1,
shuffle_buffer_size=4,
# Validation
num_evaluation_steps=32,
eval_start_delay_secs=3600, # 1h: disable time based evaluation
eval_throttle_secs=86400, # 24h: disable time based evaluation
# Misc
logfile="log.txt",
)
def hparams_debug_string(hparams):
values = hparams.values()
hp = [' %s: %s' % (name, values[name]) for name in sorted(values)]
return 'Hyperparameters:\n' + '\n'.join(hp)
| 23.617021
| 71
| 0.711712
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.174775
|
596db7d21a1d0b9384a4b3ba2a66f7f8e7dbfeba
| 1,080
|
py
|
Python
|
coroutines.py
|
PraveenMathew92/python-chatroom-asyncio
|
8b3048f17b76e649aff6bcbb7d084362cab32b58
|
[
"MIT"
] | null | null | null |
coroutines.py
|
PraveenMathew92/python-chatroom-asyncio
|
8b3048f17b76e649aff6bcbb7d084362cab32b58
|
[
"MIT"
] | null | null | null |
coroutines.py
|
PraveenMathew92/python-chatroom-asyncio
|
8b3048f17b76e649aff6bcbb7d084362cab32b58
|
[
"MIT"
] | null | null | null |
"""
File to demonstrate the coroutines api in python
"""
import asyncio
async def coroutine(caller):
print(f'entering ${caller}')
await asyncio.sleep(1)
print(f'exited {caller}')
"""
asyncio.run takes a coroutine and
A RuntimeWarning is generated if the coroutine is not awaited
Eg: coroutine('without_run')
"""
asyncio.run(coroutine('coroutine_call'))
"""
create_task creates a task which runs a coroutine in the event loop
"""
async def task_runner():
task = asyncio.create_task(coroutine('task_call'))
await task
asyncio.run(task_runner())
print("""
\t\t\tRunning with gather task
""")
async def gather_runner():
"""
asyncio.gather takes in a bunch of coroutines and runs them concurrently
"""
await asyncio.gather(
(coroutine('gather')),
(task_runner()))
asyncio.run(gather_runner())
"""
OUTPUT:
entering $coroutine_call
exited coroutine_call
entering $task_call
exited task_call
Running with gather task
entering $gather
entering $task_call
exited gather
exited task_call
"""
| 16.363636
| 76
| 0.694444
| 0
| 0
| 0
| 0
| 0
| 0
| 413
| 0.382407
| 685
| 0.634259
|
5970d34126fb063a7fca4ff450fce1eed6c84c32
| 494
|
py
|
Python
|
projects/tornado_projects/tord/tord/urls.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | 1
|
2019-11-25T07:23:42.000Z
|
2019-11-25T07:23:42.000Z
|
projects/tornado_projects/tord/tord/urls.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | 13
|
2020-01-07T16:09:47.000Z
|
2022-03-02T12:51:44.000Z
|
projects/tornado_projects/tord/tord/urls.py
|
bigfoolliu/liu_aistuff
|
aa661d37c05c257ee293285dd0868fb7e8227628
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
from tord.handlers import (block_test, gocron, index, media, upload)
url_patterns = [
(r"/", index.IndexHandler),
(r"/books", upload.BooksHandler),
(r"/images", media.ImageHandler),
(r"/videos", media.VideoHandler),
# (r"/async/test", async_test.Handler),
(r"/block/test", block_test.BlockHandler),
# (r"/async/(?P<url>/.*)", async_demo.Handler), # FIXME:
(r"/test", gocron.TestHandler),
]
| 26
| 68
| 0.629555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 215
| 0.435223
|
597101821b26dde66f369e5d6c9ba4029fcb1428
| 140
|
py
|
Python
|
util/emojis.py
|
Lithimlin/TeaWaiter
|
fef8d6ef19b8bd10fcd48a2bb320f6cda3ac7156
|
[
"MIT"
] | null | null | null |
util/emojis.py
|
Lithimlin/TeaWaiter
|
fef8d6ef19b8bd10fcd48a2bb320f6cda3ac7156
|
[
"MIT"
] | null | null | null |
util/emojis.py
|
Lithimlin/TeaWaiter
|
fef8d6ef19b8bd10fcd48a2bb320f6cda3ac7156
|
[
"MIT"
] | null | null | null |
statusEmojis = {'yes':'✅', 'no':'❌'}
numEmojis = {1:'1️⃣', 2:'2️⃣', 3:'3️⃣', 4:'4️⃣', 5:'5️⃣', 6:'6️⃣', 7:'7️⃣', 8:'8️⃣', 9:'9️⃣', 0:'0️⃣'}
| 46.666667
| 102
| 0.328571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 109
| 0.592391
|
59728e393c4e17abe11271bfcc3dd74f28baee1f
| 28
|
py
|
Python
|
platehunter/platehunter/module/__init__.py
|
ZombieIce/A-Stock-Plate-Crawling
|
e0478c720513876562ebe2a48b9f3131dad63e47
|
[
"MIT"
] | 20
|
2018-10-09T18:53:01.000Z
|
2022-02-20T13:26:43.000Z
|
platehunter/platehunter/module/__init__.py
|
ZombieIce/A-Stock-Plate-Crawling
|
e0478c720513876562ebe2a48b9f3131dad63e47
|
[
"MIT"
] | 36
|
2018-09-20T19:27:54.000Z
|
2022-01-23T14:41:39.000Z
|
insta_hashtag_crawler/__init__.py
|
point1304/insta-hashtag-crawler
|
ee056f91d14e19404335fcc49360942acc2e15e8
|
[
"MIT"
] | 6
|
2021-09-25T14:03:57.000Z
|
2022-03-19T14:44:04.000Z
|
from .crawler import Crawler
| 28
| 28
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
5972ea55ea758af92089d41c09629539cc06ea40
| 12,048
|
py
|
Python
|
test/test_subprocess.py
|
python-useful-helpers/exec-helpers
|
3e0adfa7dded72ac1c9c93bd88db070f4c9050b6
|
[
"Apache-2.0"
] | 12
|
2018-03-23T23:37:40.000Z
|
2021-07-16T16:07:28.000Z
|
test/test_subprocess.py
|
penguinolog/exec-helpers
|
0784a4772f6e9937540b266fdbb1f5a060fd4b76
|
[
"Apache-2.0"
] | 111
|
2018-03-26T14:10:52.000Z
|
2021-07-12T07:12:45.000Z
|
test/test_subprocess.py
|
penguinolog/exec-helpers
|
0784a4772f6e9937540b266fdbb1f5a060fd4b76
|
[
"Apache-2.0"
] | 6
|
2018-03-26T13:37:21.000Z
|
2018-09-07T03:35:09.000Z
|
# Copyright 2018 - 2020 Alexey Stepanov aka penguinolog.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Standard Library
import logging
import random
import subprocess
import typing
from unittest import mock
# External Dependencies
import pytest
# Package Implementation
import exec_helpers
from exec_helpers import _subprocess_helpers
from exec_helpers import proc_enums
from exec_helpers.subprocess import SubprocessExecuteAsyncResult
pytestmark = pytest.mark.skip("Rewrite whole execute tests.")
# All test coroutines will be treated as marked.
command = "ls ~\nline 2\nline 3\nline с кирилицей"
command_log = f"Executing command:\n{command.rstrip()!r}\n"
print_stdin = 'read line; echo "$line"'
default_timeout = 60 * 60 # 1 hour
class FakeFileStream:
"""Mock-like object for stream emulation."""
def __init__(self, *args):
self.__src = list(args)
self.closed = False
def __iter__(self):
"""Normally we iter over source."""
for _ in range(len(self.__src)):
yield self.__src.pop(0)
def fileno(self):
return hash(tuple(self.__src))
def close(self):
"""We enforce close."""
self.closed = True
def read_stream(stream: FakeFileStream):
return tuple([line for line in stream])
configs = {
"positive_simple": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=None, open_stdout=True, open_stderr=True
),
"with_stderr": dict(
ec=0,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(b" \n", b"0\n", b"1\n", b" \n"),
stdin=None,
open_stdout=True,
open_stderr=True,
),
"negative": dict(
ec=1,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(b" \n", b"0\n", b"1\n", b" \n"),
stdin=None,
open_stdout=True,
open_stderr=True,
),
"with_stdin_str": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin="stdin", open_stdout=True, open_stderr=True
),
"with_stdin_bytes": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=b"stdin", open_stdout=True, open_stderr=True
),
"with_stdin_bytearray": dict(
ec=0,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(),
stdin=bytearray(b"stdin"),
open_stdout=True,
open_stderr=True,
),
"no_stderr": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=None, open_stdout=True, open_stderr=False
),
"no_stdout": dict(ec=0, stdout=(), stderr=(), stdin=None, open_stdout=False, open_stderr=False),
}
def pytest_generate_tests(metafunc):
"""Tests parametrization."""
if "run_parameters" in metafunc.fixturenames:
metafunc.parametrize(
"run_parameters",
[
"positive_simple",
"with_stderr",
"negative",
"with_stdin_str",
"with_stdin_bytes",
"with_stdin_bytearray",
"no_stderr",
"no_stdout",
],
indirect=True,
)
@pytest.fixture
def run_parameters(request):
"""Tests configuration apply."""
return configs[request.param]
@pytest.fixture
def exec_result(run_parameters):
return exec_helpers.ExecResult(
cmd=command,
stdin=run_parameters["stdin"],
stdout=tuple([line for line in run_parameters["stdout"]]) if run_parameters["stdout"] else None,
stderr=tuple([line for line in run_parameters["stderr"]]) if run_parameters["stderr"] else None,
exit_code=run_parameters["ec"],
)
@pytest.fixture
def execute(mocker, exec_result):
return mocker.patch("exec_helpers.subprocess.Subprocess.execute", name="execute", return_value=exec_result)
@pytest.fixture
def popen(mocker, run_parameters):
mocker.patch("psutil.Process")
def create_mock(
ec: typing.Union[exec_helpers.ExitCodes, int] = exec_helpers.ExitCodes.EX_OK,
stdout: typing.Optional[typing.Tuple] = None,
stderr: typing.Optional[typing.Tuple] = None,
**kwargs,
):
"""Parametrized code."""
proc = mock.Mock()
proc.configure_mock(pid=random.randint(1025, 65536))
if stdout is None:
proc.configure_mock(stdout=None)
else:
proc.attach_mock(FakeFileStream(*stdout), "stdout")
if stderr is None:
proc.configure_mock(stderr=None)
else:
proc.attach_mock(FakeFileStream(*stderr), "stderr")
proc.attach_mock(mock.Mock(return_value=int(ec)), "wait")
proc.configure_mock(returncode=int(ec))
run_shell = mocker.patch("subprocess.Popen", name="popen", return_value=proc)
return run_shell
return create_mock(**run_parameters)
def test_001_execute_async(popen, subprocess_logger, run_parameters) -> None:
"""Test low level API."""
runner = exec_helpers.Subprocess()
res = runner._execute_async(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, SubprocessExecuteAsyncResult)
assert res.interface.wait() == run_parameters["ec"]
assert res.interface.returncode == run_parameters["ec"]
stdout = run_parameters["stdout"]
stderr = run_parameters["stderr"]
if stdout is not None:
assert read_stream(res.stdout) == stdout
else:
assert res.stdout is stdout
if stderr is not None:
assert read_stream(res.stderr) == stderr
else:
assert res.stderr is stderr
if run_parameters["stdin"] is None:
stdin = None
elif isinstance(run_parameters["stdin"], bytes):
stdin = run_parameters["stdin"]
elif isinstance(run_parameters["stdin"], str):
stdin = run_parameters["stdin"].encode(encoding="utf-8")
else:
stdin = bytes(run_parameters["stdin"])
if stdin:
assert res.stdin is None
popen.assert_called_once_with(
args=[command],
stdout=subprocess.PIPE if run_parameters["open_stdout"] else subprocess.DEVNULL,
stderr=subprocess.PIPE if run_parameters["open_stderr"] else subprocess.DEVNULL,
stdin=subprocess.PIPE,
shell=True,
cwd=run_parameters.get("cwd", None),
env=run_parameters.get("env", None),
universal_newlines=False,
**_subprocess_helpers.subprocess_kw,
)
if stdin is not None:
res.interface.stdin.write.assert_called_once_with(stdin)
res.interface.stdin.close.assert_called_once()
def test_002_execute(popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test API without checkers."""
runner = exec_helpers.Subprocess()
res = runner.execute(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
popen().wait.assert_called_once_with(timeout=default_timeout)
assert subprocess_logger.mock_calls[0] == mock.call.log(level=logging.DEBUG, msg=command_log)
def test_003_context_manager(mocker, popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test context manager for threads synchronization."""
lock_mock = mocker.patch("threading.RLock")
with exec_helpers.Subprocess() as runner:
res = runner.execute(command, stdin=run_parameters["stdin"])
lock_mock.acquire_assert_called_once()
lock_mock.release_assert_called_once()
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
def test_004_check_call(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator."""
runner = exec_helpers.Subprocess()
if exec_result.exit_code == exec_helpers.ExitCodes.EX_OK:
assert runner.check_call(command, stdin=exec_result.stdin) == exec_result
else:
with pytest.raises(exec_helpers.CalledProcessError) as e:
runner.check_call(command, stdin=exec_result.stdin)
exc: exec_helpers.CalledProcessError = e.value
assert exc.cmd == exec_result.cmd
assert exc.returncode == exec_result.exit_code
assert exc.stdout == exec_result.stdout_str
assert exc.stderr == exec_result.stderr_str
assert exc.result == exec_result
assert exc.expected == (proc_enums.EXPECTED,)
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {exc.result.cmd!r} returned exit code {exc.result.exit_code!s} "
f"while expected {exc.expected!r}"
)
def test_005_check_call_no_raise(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator in permissive mode."""
runner = exec_helpers.Subprocess()
res = runner.check_call(command, stdin=exec_result.stdin, raise_on_err=False)
assert res == exec_result
if exec_result.exit_code != exec_helpers.ExitCodes.EX_OK:
expected = (proc_enums.EXPECTED,)
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {res.cmd!r} returned exit code {res.exit_code!s} while expected {expected!r}"
)
def test_006_check_call_expect(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator with custom return codes."""
runner = exec_helpers.Subprocess()
assert runner.check_call(command, stdin=exec_result.stdin, expected=[exec_result.exit_code]) == exec_result
def test_007_check_stderr(execute, exec_result, subprocess_logger) -> None:
"""Test STDERR content validator."""
runner = exec_helpers.Subprocess()
if not exec_result.stderr:
assert runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code]) == exec_result
else:
with pytest.raises(exec_helpers.CalledProcessError) as e:
runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code])
exc: exec_helpers.CalledProcessError = e.value
assert exc.result == exec_result
assert exc.cmd == exec_result.cmd
assert exc.returncode == exec_result.exit_code
assert exc.stdout == exec_result.stdout_str
assert exc.stderr == exec_result.stderr_str
assert exc.result == exec_result
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {exc.result.cmd!r} output contains STDERR while not expected\n"
f"\texit code: {exc.result.exit_code!s}"
)
def test_008_check_stderr_no_raise(execute, exec_result, subprocess_logger) -> None:
"""Test STDERR content validator in permissive mode."""
runner = exec_helpers.Subprocess()
assert (
runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code], raise_on_err=False)
== exec_result
)
def test_009_call(popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test callable."""
runner = exec_helpers.Subprocess()
res = runner(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
popen().wait.assert_called_once_with(timeout=default_timeout)
| 34.820809
| 117
| 0.664011
| 451
| 0.037403
| 140
| 0.011611
| 1,693
| 0.140405
| 0
| 0
| 2,644
| 0.219274
|
59733ab215ceaed85b6503b5568828c87eda4e73
| 1,943
|
py
|
Python
|
Code/v1.0/message.py
|
arik-le/Chips-Bits
|
fa343ea79f13ce3172292871cebd1144b2c3c1c5
|
[
"MIT"
] | 4
|
2017-11-06T15:12:07.000Z
|
2020-12-20T13:44:05.000Z
|
Code/v1.0/message.py
|
arik-le/Chips-Bits
|
fa343ea79f13ce3172292871cebd1144b2c3c1c5
|
[
"MIT"
] | 36
|
2017-11-03T12:07:40.000Z
|
2018-06-22T11:59:59.000Z
|
Code/v1.0/message.py
|
arik-le/Chips-Bits
|
fa343ea79f13ce3172292871cebd1144b2c3c1c5
|
[
"MIT"
] | null | null | null |
import pickle
import os
from constant_variable import *
# class Message
class Message:
def __init__(self,device, id, type, body): # constructor
# message will consist: type of message,content - body,device to send
self.id = id
self.type = type
self.body = body
self.device = device
# add message to queue to send in proper order
def add_to_queue(self):
file = open(MESSAGE_QUEUE_FILE,"a")
message_pickle=pickle.dumps(self)
file.write(message_pickle+BUFFER)
# update master
def update_master(self,master):
if self.device != master:
self.device = master
def __str__(self):
return "From:\t"+self.device.name+"\nMessage:\t"+self.body
# get message from queue
def get():
new_file=open(MESSAGE_QUEUE_FILE,"r")
message_list= new_file.read().split(BUFFER)
return pickle.loads(message_list[0])
# take from file and cast it to object
def file_to_objects():
if not exist():
return []
objects = []
file = open(MESSAGE_QUEUE_FILE, "r")
message_list = file.read().split(BUFFER)
for message in message_list:
# print pickle.loads(message)
try:
objects.append(pickle.loads(message))
except:
print message
return objects
# remove the message from queue
def remove_from_queue():
if exist():
file = open(MESSAGE_QUEUE_FILE, "r")
message_list = file.read().split(BUFFER)
file = open(MESSAGE_QUEUE_FILE, 'w')
file.writelines(message_list[1:])
# check if there is a message in the queue
def exist():
return os.stat(MESSAGE_QUEUE_FILE).st_size != 0
def update_queue(master):#update the master in file
messages = file_to_objects()
for message in messages:
message.update_master(master)
open(MESSAGE_QUEUE_FILE,'w').write("")
for message in messages:
message.add_to_queue()
| 26.616438
| 77
| 0.65054
| 676
| 0.347916
| 0
| 0
| 0
| 0
| 0
| 0
| 391
| 0.201235
|
597345ee49817e67d67ebede702d14893a6e8c4d
| 4,732
|
py
|
Python
|
Lib/featureMan/familyFeatures.py
|
typoman/featureman
|
f115ea8d3faae042845cfca9502d91da88405c68
|
[
"MIT"
] | 13
|
2019-07-21T14:00:49.000Z
|
2019-07-29T21:43:03.000Z
|
Lib/featureMan/familyFeatures.py
|
typoman/featureman
|
f115ea8d3faae042845cfca9502d91da88405c68
|
[
"MIT"
] | 1
|
2019-07-28T12:06:23.000Z
|
2019-07-28T12:06:23.000Z
|
Lib/featureMan/familyFeatures.py
|
typoman/featureman
|
f115ea8d3faae042845cfca9502d91da88405c68
|
[
"MIT"
] | null | null | null |
from featureMan.otSingleSubFeatures import *
from featureMan.otNumberFeatures import *
from featureMan.otLanguages import *
from featureMan.otLocalized import *
from featureMan.otLigatureFeatures import *
from featureMan.otMark import mark
from featureMan.otSyntax import fontDic, GDEF
from featureMan.otKern import kern
from featureMan.otCursive import cursive
def l2str(l):
return '\n'.join(l)
def generateFeatures(f, marksToSkip=None, include=None, base="", path=""):
from time import time
start = time()
if marksToSkip == None:
marksToSkip = set("a c d e i k l n o r s t u y z A C D E G I J K L N O R S T U Y Z dotlessi acute breve caron cedilla circumflex dieresis dotaccent grave hungarumlaut macron ogonek ring tilde acute.case breve.case caron.case circumflex.case dieresis.case dotaccent.case grave.case hungarumlaut.case macron.case ring.case tilde.case caronslovak commaturnedtop commaaccent".split(" "))
fDic = fontDic(f, marksToSkip)
aaltSet = set()
interpretTime = time()
print("Elapsed time for interpreting the ufo data: %s" %(interpretTime - start))
marksSet = set()
basesSet = set()
ligaturesSet = set()
componentsSet = set()
classes = {}
allFeatures = [
ccmpFeature, smcpFeature, caseFeature, arabicFeatures,
lnumFeature, onumFeature, pnumFeature, tnumFeature,
zeroFeature, localized,
ss01Feature, ss02Feature, ss03Feature, ss04Feature, ss05Feature, ss06Feature, ss07Feature,
ss08Feature, ss09Feature, ss10Feature, ss11Feature, ss12Feature, ss13Feature, ss14Feature,
ss15Feature, ss16Feature, ss17Feature, ss18Feature, ss19Feature, ss20Feature,
rligFeature, ligaFeature, dligFeature,
cursive, kern, mark
]
middleSyntax = []
for feaClass in allFeatures:
fea = feaClass(fDic, classes)
feaSyntax = fea.syntax()
if feaSyntax:
middleSyntax.append((fea.tag, feaSyntax))
classes.update(fea.classes)
aaltSet.update(fea.aalt)
marksSet.update(fea.mark)
basesSet.update(fea.base)
componentsSet.update(fea.component)
ligaturesSet.update(fea.ligature)
gdef = GDEF(basesSet, ligaturesSet, marksSet, componentsSet, fDic.glyphs)
finalAalt = aaltFeature(aaltSet)
langs = languages(fDic)
allFeaturesSyntax = []
allFeaturesSyntax.append(('logs' , l2str(fDic.log)))
allFeaturesSyntax.append(('lang' , langs.syntax()))
allFeaturesSyntax.append(('aalt' , finalAalt.syntax()))
allFeaturesSyntax.extend(middleSyntax)
allFeaturesSyntax.append(('gdef', gdef.syntax()))
finaFea = base
if include is not None:
if type(include) is str:
include = set(include.split(","))
elif type(include) is list:
include = set(include)
finaFea += l2str([f[1] for f in allFeaturesSyntax if f[0] in include])
else:
finaFea += l2str([f[1] for f in allFeaturesSyntax])
featTime = time()
print("Elapsed time for generating the features: %s" %(featTime - interpretTime))
fontName = ''
fontPath = ''
if f.path:
fontName = f.path.split("/")[-1].split('.')[0]
fontPath = '/'.join(f.path.split("/")[:-1])
if path:
fontPath = path
feaPath = '%s_features.fea' %(fontPath+'/'+fontName)
relativePath = '%s_features.fea' %fontName
with open(feaPath, 'w') as File:
File.write(finaFea)
f.features.text = 'include(%s);' %relativePath
f.features.changed()
print("Elapsed time for saving the features: %s" %(time() - featTime))
print("Elapsed time for the whole process: %s" %(time() - start))
if __name__ == '__main__':
import argparse
from fontParts.fontshell.font import RFont
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--ufo", help="Path to the ufo file.", type=str)
parser.add_argument("-b", "--base", help="Base features to include in the begining. It can be used to add some manual features at top of the feature file.", type=str, default="")
parser.add_argument("-o", "--only", help="Only unclude the comma seperated feature tags written here. For example: mark,gdef", type=str)
parser.add_argument("-p", "--path", help="Path to save the feature file at, default path is next to the UFO.", type=str)
args = parser.parse_args()
if args.ufo is not None:
f = RFont(args.ufo)
generateFeatures(f, marksToSkip=None, base=args.base, include=args.only, path=args.path)
else:
print('You need a UFO for the familyFeatures module to work. Use the following command for help:\npython3 "/path/to/repo/Lib/featureMan/familyFeatures.py" -h')
| 40.793103
| 391
| 0.674134
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,137
| 0.240279
|
5975a408ae1c989c338845f71aa3900205bb24fd
| 15,265
|
py
|
Python
|
FFSP/FFSP_MatNet/FFSPModel.py
|
MinahPark/MatNet
|
63342de76f6a982bdfb5c1e8d5930d64ec3efa61
|
[
"MIT"
] | 18
|
2021-11-22T09:37:52.000Z
|
2022-03-31T03:48:00.000Z
|
FFSP/FFSP_MatNet/FFSPModel.py
|
MinahPark/MatNet
|
63342de76f6a982bdfb5c1e8d5930d64ec3efa61
|
[
"MIT"
] | 1
|
2021-12-04T05:14:26.000Z
|
2021-12-14T03:04:55.000Z
|
FFSP/FFSP_MatNet/FFSPModel.py
|
MinahPark/MatNet
|
63342de76f6a982bdfb5c1e8d5930d64ec3efa61
|
[
"MIT"
] | 5
|
2021-12-15T01:56:02.000Z
|
2022-03-07T13:13:05.000Z
|
"""
The MIT License
Copyright (c) 2021 MatNet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from FFSPModel_SUB import AddAndInstanceNormalization, FeedForward, MixedScore_MultiHeadAttention
class FFSPModel(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
stage_cnt = self.model_params['stage_cnt']
self.stage_models = nn.ModuleList([OneStageModel(stage_idx, **model_params) for stage_idx in range(stage_cnt)])
def pre_forward(self, reset_state):
stage_cnt = self.model_params['stage_cnt']
for stage_idx in range(stage_cnt):
problems = reset_state.problems_list[stage_idx]
model = self.stage_models[stage_idx]
model.pre_forward(problems)
def soft_reset(self):
# Nothing to reset
pass
def forward(self, state):
batch_size = state.BATCH_IDX.size(0)
pomo_size = state.BATCH_IDX.size(1)
stage_cnt = self.model_params['stage_cnt']
action_stack = torch.empty(size=(batch_size, pomo_size, stage_cnt), dtype=torch.long)
prob_stack = torch.empty(size=(batch_size, pomo_size, stage_cnt))
for stage_idx in range(stage_cnt):
model = self.stage_models[stage_idx]
action, prob = model(state)
action_stack[:, :, stage_idx] = action
prob_stack[:, :, stage_idx] = prob
gathering_index = state.stage_idx[:, :, None]
# shape: (batch, pomo, 1)
action = action_stack.gather(dim=2, index=gathering_index).squeeze(dim=2)
prob = prob_stack.gather(dim=2, index=gathering_index).squeeze(dim=2)
# shape: (batch, pomo)
return action, prob
class OneStageModel(nn.Module):
def __init__(self, stage_idx, **model_params):
super().__init__()
self.model_params = model_params
machine_cnt_list = self.model_params['machine_cnt_list']
machine_cnt = machine_cnt_list[stage_idx]
embedding_dim = self.model_params['embedding_dim']
self.encoder = FFSP_Encoder(**model_params)
self.decoder = FFSP_Decoder(**model_params)
self.encoded_col = None
# shape: (batch, machine_cnt, embedding)
self.encoded_row = None
# shape: (batch, job_cnt, embedding)
def pre_forward(self, problems):
# problems.shape: (batch, job_cnt, machine_cnt)
batch_size = problems.size(0)
job_cnt = problems.size(1)
machine_cnt = problems.size(2)
embedding_dim = self.model_params['embedding_dim']
row_emb = torch.zeros(size=(batch_size, job_cnt, embedding_dim))
# shape: (batch, job_cnt, embedding)
col_emb = torch.zeros(size=(batch_size, machine_cnt, embedding_dim))
# shape: (batch, machine_cnt, embedding)
seed_cnt = self.model_params['one_hot_seed_cnt']
rand = torch.rand(batch_size, seed_cnt)
batch_rand_perm = rand.argsort(dim=1)
rand_idx = batch_rand_perm[:, :machine_cnt]
b_idx = torch.arange(batch_size)[:, None].expand(batch_size, machine_cnt)
m_idx = torch.arange(machine_cnt)[None, :].expand(batch_size, machine_cnt)
col_emb[b_idx, m_idx, rand_idx] = 1
# shape: (batch, machine_cnt, embedding)
self.encoded_row, self.encoded_col = self.encoder(row_emb, col_emb, problems)
# encoded_row.shape: (batch, job_cnt, embedding)
# encoded_col.shape: (batch, machine_cnt, embedding)
self.decoder.set_kv(self.encoded_row)
def forward(self, state):
batch_size = state.BATCH_IDX.size(0)
pomo_size = state.BATCH_IDX.size(1)
encoded_current_machine = self._get_encoding(self.encoded_col, state.stage_machine_idx)
# shape: (batch, pomo, embedding)
all_job_probs = self.decoder(encoded_current_machine,
ninf_mask=state.job_ninf_mask)
# shape: (batch, pomo, job)
if self.training or self.model_params['eval_type'] == 'softmax':
while True: # to fix pytorch.multinomial bug on selecting 0 probability elements
job_selected = all_job_probs.reshape(batch_size * pomo_size, -1).multinomial(1) \
.squeeze(dim=1).reshape(batch_size, pomo_size)
# shape: (batch, pomo)
job_prob = all_job_probs[state.BATCH_IDX, state.POMO_IDX, job_selected] \
.reshape(batch_size, pomo_size)
# shape: (batch, pomo)
job_prob[state.finished] = 1 # do not backprob finished episodes
if (job_prob != 0).all():
break
else:
job_selected = all_job_probs.argmax(dim=2)
# shape: (batch, pomo)
job_prob = torch.zeros(size=(batch_size, pomo_size)) # any number is okay
return job_selected, job_prob
def _get_encoding(self, encoded_nodes, node_index_to_pick):
# encoded_nodes.shape: (batch, problem, embedding)
# node_index_to_pick.shape: (batch, pomo)
batch_size = node_index_to_pick.size(0)
pomo_size = node_index_to_pick.size(1)
embedding_dim = self.model_params['embedding_dim']
gathering_index = node_index_to_pick[:, :, None].expand(batch_size, pomo_size, embedding_dim)
# shape: (batch, pomo, embedding)
picked_nodes = encoded_nodes.gather(dim=1, index=gathering_index)
# shape: (batch, pomo, embedding)
return picked_nodes
########################################
# ENCODER
########################################
class FFSP_Encoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
encoder_layer_num = model_params['encoder_layer_num']
self.layers = nn.ModuleList([EncoderLayer(**model_params) for _ in range(encoder_layer_num)])
def forward(self, row_emb, col_emb, cost_mat):
# col_emb.shape: (batch, col_cnt, embedding)
# row_emb.shape: (batch, row_cnt, embedding)
# cost_mat.shape: (batch, row_cnt, col_cnt)
for layer in self.layers:
row_emb, col_emb = layer(row_emb, col_emb, cost_mat)
return row_emb, col_emb
class EncoderLayer(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.row_encoding_block = EncodingBlock(**model_params)
self.col_encoding_block = EncodingBlock(**model_params)
def forward(self, row_emb, col_emb, cost_mat):
# row_emb.shape: (batch, row_cnt, embedding)
# col_emb.shape: (batch, col_cnt, embedding)
# cost_mat.shape: (batch, row_cnt, col_cnt)
row_emb_out = self.row_encoding_block(row_emb, col_emb, cost_mat)
col_emb_out = self.col_encoding_block(col_emb, row_emb, cost_mat.transpose(1, 2))
return row_emb_out, col_emb_out
class EncodingBlock(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
self.Wq = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.mixed_score_MHA = MixedScore_MultiHeadAttention(**model_params)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.add_n_normalization_1 = AddAndInstanceNormalization(**model_params)
self.feed_forward = FeedForward(**model_params)
self.add_n_normalization_2 = AddAndInstanceNormalization(**model_params)
def forward(self, row_emb, col_emb, cost_mat):
# NOTE: row and col can be exchanged, if cost_mat.transpose(1,2) is used
# input1.shape: (batch, row_cnt, embedding)
# input2.shape: (batch, col_cnt, embedding)
# cost_mat.shape: (batch, row_cnt, col_cnt)
head_num = self.model_params['head_num']
q = reshape_by_heads(self.Wq(row_emb), head_num=head_num)
# q shape: (batch, head_num, row_cnt, qkv_dim)
k = reshape_by_heads(self.Wk(col_emb), head_num=head_num)
v = reshape_by_heads(self.Wv(col_emb), head_num=head_num)
# kv shape: (batch, head_num, col_cnt, qkv_dim)
out_concat = self.mixed_score_MHA(q, k, v, cost_mat)
# shape: (batch, row_cnt, head_num*qkv_dim)
multi_head_out = self.multi_head_combine(out_concat)
# shape: (batch, row_cnt, embedding)
out1 = self.add_n_normalization_1(row_emb, multi_head_out)
out2 = self.feed_forward(out1)
out3 = self.add_n_normalization_2(out1, out2)
return out3
# shape: (batch, row_cnt, embedding)
########################################
# Decoder
########################################
class FFSP_Decoder(nn.Module):
def __init__(self, **model_params):
super().__init__()
self.model_params = model_params
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
self.encoded_NO_JOB = nn.Parameter(torch.rand(1, 1, embedding_dim))
self.Wq_1 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wq_2 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wq_3 = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wk = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.Wv = nn.Linear(embedding_dim, head_num * qkv_dim, bias=False)
self.multi_head_combine = nn.Linear(head_num * qkv_dim, embedding_dim)
self.k = None # saved key, for multi-head attention
self.v = None # saved value, for multi-head_attention
self.single_head_key = None # saved key, for single-head attention
def set_kv(self, encoded_jobs):
# encoded_jobs.shape: (batch, job, embedding)
batch_size = encoded_jobs.size(0)
embedding_dim = self.model_params['embedding_dim']
head_num = self.model_params['head_num']
encoded_no_job = self.encoded_NO_JOB.expand(size=(batch_size, 1, embedding_dim))
encoded_jobs_plus_1 = torch.cat((encoded_jobs, encoded_no_job), dim=1)
# shape: (batch, job_cnt+1, embedding)
self.k = reshape_by_heads(self.Wk(encoded_jobs_plus_1), head_num=head_num)
self.v = reshape_by_heads(self.Wv(encoded_jobs_plus_1), head_num=head_num)
# shape: (batch, head_num, job+1, qkv_dim)
self.single_head_key = encoded_jobs_plus_1.transpose(1, 2)
# shape: (batch, embedding, job+1)
def forward(self, encoded_machine, ninf_mask):
# encoded_machine.shape: (batch, pomo, embedding)
# ninf_mask.shape: (batch, pomo, job_cnt+1)
head_num = self.model_params['head_num']
# Multi-Head Attention
#######################################################
q = reshape_by_heads(self.Wq_3(encoded_machine), head_num=head_num)
# shape: (batch, head_num, pomo, qkv_dim)
out_concat = self._multi_head_attention_for_decoder(q, self.k, self.v,
rank3_ninf_mask=ninf_mask)
# shape: (batch, pomo, head_num*qkv_dim)
mh_atten_out = self.multi_head_combine(out_concat)
# shape: (batch, pomo, embedding)
# Single-Head Attention, for probability calculation
#######################################################
score = torch.matmul(mh_atten_out, self.single_head_key)
# shape: (batch, pomo, job_cnt+1)
sqrt_embedding_dim = self.model_params['sqrt_embedding_dim']
logit_clipping = self.model_params['logit_clipping']
score_scaled = score / sqrt_embedding_dim
# shape: (batch, pomo, job_cnt+1)
score_clipped = logit_clipping * torch.tanh(score_scaled)
score_masked = score_clipped + ninf_mask
probs = F.softmax(score_masked, dim=2)
# shape: (batch, pomo, job_cnt+1)
return probs
def _multi_head_attention_for_decoder(self, q, k, v, rank2_ninf_mask=None, rank3_ninf_mask=None):
# q shape: (batch, head_num, n, qkv_dim) : n can be either 1 or PROBLEM_SIZE
# k,v shape: (batch, head_num, job_cnt+1, qkv_dim)
# rank2_ninf_mask.shape: (batch, job_cnt+1)
# rank3_ninf_mask.shape: (batch, n, job_cnt+1)
batch_size = q.size(0)
n = q.size(2)
job_cnt_plus_1 = k.size(2)
head_num = self.model_params['head_num']
qkv_dim = self.model_params['qkv_dim']
sqrt_qkv_dim = self.model_params['sqrt_qkv_dim']
score = torch.matmul(q, k.transpose(2, 3))
# shape: (batch, head_num, n, job_cnt+1)
score_scaled = score / sqrt_qkv_dim
if rank2_ninf_mask is not None:
score_scaled = score_scaled + rank2_ninf_mask[:, None, None, :].expand(batch_size, head_num, n, job_cnt_plus_1)
if rank3_ninf_mask is not None:
score_scaled = score_scaled + rank3_ninf_mask[:, None, :, :].expand(batch_size, head_num, n, job_cnt_plus_1)
weights = nn.Softmax(dim=3)(score_scaled)
# shape: (batch, head_num, n, job_cnt+1)
out = torch.matmul(weights, v)
# shape: (batch, head_num, n, qkv_dim)
out_transposed = out.transpose(1, 2)
# shape: (batch, n, head_num, qkv_dim)
out_concat = out_transposed.reshape(batch_size, n, head_num * qkv_dim)
# shape: (batch, n, head_num*qkv_dim)
return out_concat
########################################
# NN SUB FUNCTIONS
########################################
def reshape_by_heads(qkv, head_num):
# q.shape: (batch, n, head_num*key_dim) : n can be either 1 or PROBLEM_SIZE
batch_s = qkv.size(0)
n = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, n, head_num, -1)
# shape: (batch, n, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape: (batch, head_num, n, key_dim)
return q_transposed
| 39.141026
| 123
| 0.648411
| 13,333
| 0.873436
| 0
| 0
| 0
| 0
| 0
| 0
| 4,495
| 0.294464
|
5975bf51cf6b40314443cbac07c50fa49c107d36
| 1,697
|
py
|
Python
|
compose.py
|
lvyufeng/mindspore_poems
|
2f46afa290a8065cd1c774c26a96be76da30873e
|
[
"MIT"
] | null | null | null |
compose.py
|
lvyufeng/mindspore_poems
|
2f46afa290a8065cd1c774c26a96be76da30873e
|
[
"MIT"
] | null | null | null |
compose.py
|
lvyufeng/mindspore_poems
|
2f46afa290a8065cd1c774c26a96be76da30873e
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import mindspore
from mindspore import Tensor
from mindspore import load_checkpoint, load_param_into_net
from src.model import RNNModel, RNNModelInfer
from src.utils import process_poems
start_token = 'B'
end_token = 'E'
model_dir = './ckpt/'
corpus_file = './data/poems.txt'
def to_word(predict, vocabs):
t = np.cumsum(predict)
s = np.sum(predict)
sample = int(np.searchsorted(t, np.random.rand(1) * s))
if sample > len(vocabs):
sample = len(vocabs) - 1
return vocabs[sample]
def gen_poem(begin_word):
print('## loading corpus from %s' % model_dir)
poems_vector, word_int_map, vocabularies = process_poems(corpus_file)
print(len(vocabularies))
rnn_model = RNNModel(len(vocabularies), rnn_size=128, model='lstm')
param_dict = load_checkpoint(
os.path.join(model_dir, f'poems.6.ckpt'))
param_not_load = load_param_into_net(rnn_model, param_dict)
print(param_not_load)
rnn_model = RNNModelInfer(rnn_model)
x = np.array([list(map(word_int_map.get, start_token))])
predict = rnn_model(Tensor(x, mindspore.int32))
word = begin_word or to_word(predict.asnumpy(), vocabularies)
poem_ = ''
i = 0
while word != end_token:
poem_ += word
i += 1
if i > 24:
break
x = np.array([[word_int_map[word]]])
predict = rnn_model(Tensor(x, mindspore.int32))
word = to_word(predict.asnumpy(), vocabularies)
return poem_
if __name__ == '__main__':
begin_char = input('## (输入 quit 退出)请输入第一个字 please input the first character: ')
if begin_char == 'quit':
exit()
poem = gen_poem(begin_char)
print(poem)
| 30.303571
| 83
| 0.669417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 184
| 0.10679
|
5976b5eadcdfa649651a6db9b9bd714639c5b347
| 1,523
|
py
|
Python
|
pychemia/core/from_file.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 67
|
2015-01-31T07:44:55.000Z
|
2022-03-21T21:43:34.000Z
|
pychemia/core/from_file.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 13
|
2016-06-03T19:07:51.000Z
|
2022-03-31T04:20:40.000Z
|
pychemia/core/from_file.py
|
petavazohi/PyChemia
|
e779389418771c25c830aed360773c63bb069372
|
[
"MIT"
] | 37
|
2015-01-22T15:37:23.000Z
|
2022-03-21T15:38:10.000Z
|
import os
import sys
from pychemia import HAS_PYMATGEN, pcm_log
from .structure import Structure
from pychemia.code.vasp import read_poscar
from pychemia.code.abinit import AbinitInput
def structure_from_file(structure_file):
"""
Attempts to reconstruct a PyChemia Structure from the contents of any given file. Valid entries
:param structure_file: The path to a file where the structure can be reconstructed
:type structure_file: str
:return: PyChemia Structure if succeed, None otherwise
"""
st = None
basename = os.path.basename(structure_file)
if not os.path.isfile(structure_file):
raise ValueError("ERROR: Could not open file '%s'" % structure_file)
if basename[-4:].lower() == 'json':
st = Structure.load_json(structure_file)
elif basename[-3:].lower() == 'cif' and HAS_PYMATGEN:
import pychemia.external.pymatgen
st = pychemia.external.pymatgen.cif2structure(structure_file)[0]
elif 'poscar' in basename.lower():
st = read_poscar(structure_file)
elif 'contcar' in basename.lower():
st = read_poscar(structure_file)
elif 'abinit' in basename.lower():
av = AbinitInput(structure_file)
st = av.get_structure()
else:
try:
st = read_poscar(structure_file)
except ValueError:
raise ValueError('Ćould not convert file as POSCAR')
if st is None:
pcm_log.debug("ERROR: Could not extract structure from file '%s'" % structure_file)
return st
| 37.146341
| 99
| 0.692055
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 443
| 0.290682
|
59792e136f9480b5e034aa6d01981255bd1bfdd7
| 992
|
py
|
Python
|
snptools/vc_matrix.py
|
pvanheus/variant_exploration_with_tralynca
|
4ffadc29c19d68909beed2254646e36513311847
|
[
"MIT"
] | null | null | null |
snptools/vc_matrix.py
|
pvanheus/variant_exploration_with_tralynca
|
4ffadc29c19d68909beed2254646e36513311847
|
[
"MIT"
] | null | null | null |
snptools/vc_matrix.py
|
pvanheus/variant_exploration_with_tralynca
|
4ffadc29c19d68909beed2254646e36513311847
|
[
"MIT"
] | null | null | null |
from os import listdir
import os.path
import pandas as pd
from .count_variants_per_gene import process_vcf
from .genetree import make_gene_tree
def make_variant_count_matrix(input_directory, output_filename):
gene_tree = make_gene_tree()
locus_names = sorted([ interval.data['locus'] for interval in gene_tree ])
matrix = []
futures = []
for filename in sorted(listdir(input_directory)):
if filename.endswith('.vcf.gz') or filename.endswith('.vcf'):
path = os.path.join(input_directory, filename)
counts = process_vcf(path, gene_tree)
row = [ counts.get(locus, 0) for locus in locus_names ]
matrix.append(row)
sample_names = [ filename.split('.')[0] for filename in sorted(listdir(input_directory))
if filename.endswith('.vcf.gz') or filename.endswith('.vcf') ]
data = pd.DataFrame(matrix, index=sample_names, columns=locus_names)
data.to_csv(output_filename)
| 41.333333
| 93
| 0.676411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.040323
|
5979cf5bed5000445a52e27786a6829f4458f888
| 481
|
py
|
Python
|
oarepo_records_draft/merge.py
|
oarepo/invenio-records-draft
|
6d77309996c58fde7731e5f182e9cd5400f81f14
|
[
"MIT"
] | 1
|
2020-06-03T14:44:49.000Z
|
2020-06-03T14:44:49.000Z
|
oarepo_records_draft/merge.py
|
oarepo/invenio-records-draft
|
6d77309996c58fde7731e5f182e9cd5400f81f14
|
[
"MIT"
] | 7
|
2020-06-02T14:45:48.000Z
|
2021-11-16T08:38:47.000Z
|
oarepo_records_draft/merge.py
|
oarepo/invenio-records-draft
|
6d77309996c58fde7731e5f182e9cd5400f81f14
|
[
"MIT"
] | 1
|
2019-08-15T07:59:48.000Z
|
2019-08-15T07:59:48.000Z
|
from deepmerge import Merger
def list_merge(config, path, base, nxt):
for k in range(0, min(len(base), len(nxt))):
if isinstance(base[k], (dict, list, tuple)):
draft_merger.merge(base[k], nxt[k])
else:
base[k] = nxt[k]
for k in range(len(base), len(nxt)):
base.append(nxt[k])
return base
draft_merger = Merger(
[
(list, [list_merge]),
(dict, ["merge"])
],
["override"],
["override"]
)
| 20.913043
| 52
| 0.534304
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.056133
|
597bfa5b6f7cdb21349ef3d1cce73227ae2c86fc
| 4,951
|
py
|
Python
|
source/01_make_coordinates/make_coordinates.py
|
toshi-k/kaggle-airbus-ship-detection-challenge
|
872a160057592022488b1772b6c7a8982677d1dc
|
[
"Apache-2.0"
] | 90
|
2018-11-17T21:37:41.000Z
|
2021-11-24T11:55:34.000Z
|
source/01_make_coordinates/make_coordinates.py
|
jackweiwang/kaggle-airbus-ship-detection-challenge
|
872a160057592022488b1772b6c7a8982677d1dc
|
[
"Apache-2.0"
] | 3
|
2018-11-27T14:23:15.000Z
|
2020-03-09T09:23:25.000Z
|
source/01_make_coordinates/make_coordinates.py
|
jackweiwang/kaggle-airbus-ship-detection-challenge
|
872a160057592022488b1772b6c7a8982677d1dc
|
[
"Apache-2.0"
] | 14
|
2018-11-17T21:37:44.000Z
|
2020-11-30T02:22:28.000Z
|
import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
from lib.img2_coord_ica import img2_coord_iter, coord2_img
from lib.log import Logger
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
def rle_decode(mask_rle, shape=(768, 768)):
"""
Args:
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns:
numpy array, 1 - mask, 0 - background
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 255
return img.reshape(shape).T
def main_test():
i = 5304 # 11, 15, 16, 5398
image_id = segmentations.iloc[i, 0]
truth_img = rle_decode(segmentations.iloc[i, 1])
print(np.max(truth_img))
coord = img2_coord_iter(truth_img / 255.0, threshold=0.05)
reconst_img = coord2_img(*coord)
sse = np.sum((reconst_img - truth_img) ** 2)
print('sum of squared error: {}'.format(sse))
os.makedirs('_result_sample', exist_ok=True)
Image.fromarray(reconst_img).save(os.path.join('_result_sample', image_id[:-4] + '_reconstruct.png'), format='PNG')
Image.fromarray(truth_img).save(os.path.join('_result_sample', image_id[:-4] + '_truth.png'), format='PNG')
def main():
logger = Logger('coord_ica')
list_mean_x = list()
list_mean_y = list()
list_height = list()
list_aspect_ratio = list()
list_rotate = list()
num_error = 0
num_zero_ship = 0
os.makedirs('_error_imgs', exist_ok=True)
sse_array = np.array([])
for i, image_id in tqdm(enumerate(segmentations.ImageId), total=len(segmentations)):
encoded = segmentations.iloc[i, 1]
if encoded == '':
list_mean_x.append(np.nan)
list_mean_y.append(np.nan)
list_height.append(np.nan)
list_aspect_ratio.append(np.nan)
list_rotate.append(np.nan)
num_zero_ship += 1
continue
truth_img = rle_decode(encoded)
reconst_img = np.zeros(truth_img.shape) # initialize
threshold_iter = 0.95
threshold_last = 0.6
truth_img_norm = truth_img / 255.0
try:
mean_x, mean_y, height, aspect_ratio, rotate, img_size = img2_coord_iter(truth_img_norm, threshold_iter)
reconst_img = coord2_img(mean_x, mean_y, height, aspect_ratio, rotate, img_size)
reconst_img_norm = reconst_img / 255.0
sse = np.sum((reconst_img_norm - truth_img_norm) ** 2)
sse_array = np.append(sse_array, sse)
area_intersect = np.sum(truth_img_norm * reconst_img_norm)
area_union = np.sum(truth_img_norm) + np.sum(reconst_img_norm) - area_intersect
matching_degree = area_intersect / area_union
if matching_degree < threshold_last:
logger.info('[{}] sse: {} matching_degree: {}'.format(image_id, sse, matching_degree))
raise RuntimeError
list_mean_x.append(mean_x)
list_mean_y.append(mean_y)
list_height.append(height)
list_aspect_ratio.append(aspect_ratio)
list_rotate.append(rotate)
except (RuntimeError, ValueError):
num_error += 1
list_mean_x.append(np.nan)
list_mean_y.append(np.nan)
list_height.append(np.nan)
list_aspect_ratio.append(np.nan)
list_rotate.append(np.nan)
if matching_degree < threshold_last:
try:
Image.fromarray(reconst_img).save(
os.path.join('_error_imgs', image_id[:-4] + '_deg{:.3f}_re.png'.format(matching_degree)))
Image.fromarray(truth_img).save(
os.path.join('_error_imgs', image_id[:-4] + '_deg{:.3f}_truth.png'.format(matching_degree)))
except:
pass
logger.info('mean of reconstruct error: {:.3f}'.format(np.mean(sse_array)))
logger.info('num zero ship: {0:d} / {1:d}'.format(num_zero_ship, len(segmentations)))
logger.info('num_error: {0:d} / {1:d}'.format(num_error, len(segmentations)))
result = pd.DataFrame()
result['ImageID'] = segmentations.ImageId
result['x'] = list_mean_y
result['y'] = list_mean_x
result['height'] = list_height
result['width'] = [height / ratio for height, ratio in zip(list_height, list_aspect_ratio)]
result['rotate'] = list_rotate
result.to_csv('../../input/coordinates.csv', index=False, float_format='%.4f')
if __name__ == '__main__':
segmentations = pd.read_csv('../../dataset/train_ship_segmentations_v2.csv')
print(segmentations.head())
segmentations = segmentations.fillna('')
# main_test()
main()
| 30.006061
| 119
| 0.626338
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 769
| 0.155322
|
597ddcf7272429172b7edee0cb03c0de356cd799
| 127
|
py
|
Python
|
tests/test_main.py
|
skypaw/rconcrete
|
30bc7e5ada2afa975caabcd38461707e094d695b
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
skypaw/rconcrete
|
30bc7e5ada2afa975caabcd38461707e094d695b
|
[
"MIT"
] | 2
|
2022-02-05T18:49:44.000Z
|
2022-02-06T01:11:07.000Z
|
tests/test_main.py
|
skypaw/rconcrete
|
30bc7e5ada2afa975caabcd38461707e094d695b
|
[
"MIT"
] | null | null | null |
from src.main import sample_function
def test_addition():
test = sample_function(4)
print('test')
assert 8 == test
| 21.166667
| 36
| 0.692913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.047244
|
597e7da85300fb6bd6d365c07bb2ba1dbac55565
| 1,598
|
py
|
Python
|
scripts/combine_errors.py
|
nbren12/nn_atmos_param
|
cb138f0b211fd5743e56ad659aec38c082d2b3ac
|
[
"MIT"
] | 4
|
2018-09-16T20:55:57.000Z
|
2020-12-06T11:27:50.000Z
|
scripts/combine_errors.py
|
nbren12/nn_atmos_param
|
cb138f0b211fd5743e56ad659aec38c082d2b3ac
|
[
"MIT"
] | 5
|
2018-04-07T07:40:39.000Z
|
2018-06-20T06:56:08.000Z
|
scripts/combine_errors.py
|
nbren12/nn_atmos_param
|
cb138f0b211fd5743e56ad659aec38c082d2b3ac
|
[
"MIT"
] | null | null | null |
import numpy as np
import re
import json
import xarray as xr
import pandas as pd
def read_train_loss(epoch, fname,
variables=['test_loss', 'train_loss']):
"""Read the loss.json file for the current epochs test and train loss"""
df = pd.read_json(fname)
epoch_means = df.groupby('epoch').mean()
# need to look for epoch-1 because this data is accumulated over the whole first epoch
if epoch > 0:
return epoch_means.loc[epoch-1][variables].to_dict()
else:
return {'test_loss': np.nan, 'train_loss': np.nan}
errors = []
dims = []
pattern = re.compile("data/output/model.(.*?)/(.*?)/(.*?)/error.nc")
for f in snakemake.input:
m = pattern.search(f)
if m:
model, seed, epoch = m.groups()
ds = xr.open_dataset(f)
arg_file = f"data/output/model.{model}/{seed}/arguments.json"
args = json.load(open(arg_file))
# nhidden is a list, so need to just take the first element
# since all the neural networks I fit are single layer
args['nhidden'] = args['nhidden'][0]
args.pop('seed', None)
ds = ds.assign(**args)
loss_file = f"data/output/model.{model}/{seed}/loss.json"
train_error = read_train_loss(int(epoch), loss_file)
ds = ds.assign(**train_error)
# append to lists
dims.append((model, seed, int(epoch)))
errors.append(ds)
names = ['model', 'seed', 'epoch']
dim = pd.MultiIndex.from_tuples(dims, names=names)
dim.name = 'tmp'
ds = xr.concat(errors, dim=dim).unstack('tmp')
ds.to_netcdf(snakemake.output[0])
| 30.150943
| 90
| 0.627034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 536
| 0.335419
|
59801917a885910b96ef72a02bd5c83398abe7ef
| 705
|
py
|
Python
|
tests/acceptance/selene_collection_should_test.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | null | null | null |
tests/acceptance/selene_collection_should_test.py
|
KalinkinaMaria/selene
|
859e1102c85740b52af8d0f08dd6b6490b4bd2ff
|
[
"MIT"
] | 1
|
2021-06-02T04:21:17.000Z
|
2021-06-02T04:21:17.000Z
|
tests/acceptance/selene_collection_should_test.py
|
vkarpenko/selene
|
4776357430c940be38f38be9981006dd156f9730
|
[
"MIT"
] | null | null | null |
import pytest
from selenium.common.exceptions import TimeoutException
from selene.browser import *
from selene.support.conditions import have
from selene.support.jquery_style_selectors import ss
from tests.acceptance.helpers.helper import get_test_driver
from tests.acceptance.helpers.todomvc import given_active
def setup_module(m):
set_driver(get_test_driver())
def teardown_module(m):
driver().quit()
def test_assure_passes():
given_active("a", "b")
ss("#todo-list>li").should(have.exact_texts("a", "b"))
def test_assure_fails():
given_active("a", "b")
with pytest.raises(TimeoutException):
ss("#todo-list>li").should(have.exact_texts("a.", "b."), timeout=0.1)
| 25.178571
| 77
| 0.741844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.079433
|
5980640bb02c2631ecc30d2c519d9ed76e0a3bab
| 2,422
|
py
|
Python
|
genomics_data_index/test/unit/variant/service/test_SQLQueryInBatcher.py
|
apetkau/genomics-data-index
|
d0cc119fd57b8cbd701affb1c84450cf7832fa01
|
[
"Apache-2.0"
] | 12
|
2021-05-03T20:56:05.000Z
|
2022-01-04T14:52:19.000Z
|
genomics_data_index/test/unit/variant/service/test_SQLQueryInBatcher.py
|
apetkau/thesis-index
|
6c96e9ed75d8e661437effe62a939727a0b473fc
|
[
"Apache-2.0"
] | 30
|
2021-04-26T23:03:40.000Z
|
2022-02-25T18:41:14.000Z
|
genomics_data_index/test/unit/variant/service/test_SQLQueryInBatcher.py
|
apetkau/genomics-data-index
|
d0cc119fd57b8cbd701affb1c84450cf7832fa01
|
[
"Apache-2.0"
] | null | null | null |
from genomics_data_index.storage.service import SQLQueryInBatcherDict, SQLQueryInBatcherList
def test_sql_query_in_batcher_dict():
in_data = ['A', 'B', 'C', 'D', 'E']
# Test batch size 1
batcher = SQLQueryInBatcherDict(in_data=in_data, batch_size=1)
results = batcher.process(lambda in_batch: {x: True for x in in_batch})
assert isinstance(results, dict)
assert 5 == len(results)
assert {'A', 'B', 'C', 'D', 'E'} == set(results.keys())
# Test batch size 2
batcher = SQLQueryInBatcherDict(in_data=in_data, batch_size=2)
results = batcher.process(lambda in_batch: {x: True for x in in_batch})
assert isinstance(results, dict)
assert 5 == len(results)
assert {'A', 'B', 'C', 'D', 'E'} == set(results.keys())
# Test batch size 5
batcher = SQLQueryInBatcherDict(in_data=in_data, batch_size=5)
results = batcher.process(lambda in_batch: {x: True for x in in_batch})
assert isinstance(results, dict)
assert 5 == len(results)
assert {'A', 'B', 'C', 'D', 'E'} == set(results.keys())
# Test batch size 6
batcher = SQLQueryInBatcherDict(in_data=in_data, batch_size=6)
results = batcher.process(lambda in_batch: {x: True for x in in_batch})
assert isinstance(results, dict)
assert 5 == len(results)
assert {'A', 'B', 'C', 'D', 'E'} == set(results.keys())
def test_sql_query_in_batcher_list():
in_data = ['A', 'B', 'C', 'D', 'E']
# Test batch size 1
batcher = SQLQueryInBatcherList(in_data=in_data, batch_size=1)
results = batcher.process(lambda in_batch: [x for x in in_batch])
assert isinstance(results, list)
assert 5 == len(results)
assert in_data == results
# Test batch size 2
batcher = SQLQueryInBatcherList(in_data=in_data, batch_size=2)
results = batcher.process(lambda in_batch: in_batch)
assert isinstance(results, list)
assert 5 == len(results)
assert in_data == results
# Test batch size 5
batcher = SQLQueryInBatcherList(in_data=in_data, batch_size=5)
results = batcher.process(lambda in_batch: in_batch)
assert isinstance(results, list)
assert 5 == len(results)
assert in_data == results
# Test batch size 6
batcher = SQLQueryInBatcherList(in_data=in_data, batch_size=6)
results = batcher.process(lambda in_batch: in_batch)
assert isinstance(results, list)
assert 5 == len(results)
assert in_data == results
| 36.69697
| 92
| 0.676301
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 242
| 0.099917
|
59807967b291bcc22ce0c7c760cacd407b042fe9
| 609
|
py
|
Python
|
tipico_server/utils/constants.py
|
lbusoni/tipico_server
|
390e1ad8ca6a56ac14cebc1f9c50c9cb5803e287
|
[
"MIT"
] | null | null | null |
tipico_server/utils/constants.py
|
lbusoni/tipico_server
|
390e1ad8ca6a56ac14cebc1f9c50c9cb5803e287
|
[
"MIT"
] | null | null | null |
tipico_server/utils/constants.py
|
lbusoni/tipico_server
|
390e1ad8ca6a56ac14cebc1f9c50c9cb5803e287
|
[
"MIT"
] | null | null | null |
class Constants:
METER_2_NANOMETER= 1e9
APP_NAME= "inaf.arcetri.ao.tipico_server"
APP_AUTHOR= "INAF Arcetri Adaptive Optics"
THIS_PACKAGE= 'tipico_server'
PROCESS_MONITOR_CONFIG_SECTION= 'processMonitor'
SERVER_1_CONFIG_SECTION= 'serverOfAnInstrument'
SERVER_2_CONFIG_SECTION= 'serverOfAnotherInstrument'
# TODO: must be the same of console_scripts in setup.py
START_PROCESS_NAME= 'tipico_start'
STOP_PROCESS_NAME= 'tipico_stop'
KILL_ALL_PROCESS_NAME= 'tipico_kill_all'
SERVER_1_PROCESS_NAME= 'tipico_server_1'
SERVER_2_PROCESS_NAME= 'tipico_server_2'
| 30.45
| 59
| 0.775041
| 605
| 0.993432
| 0
| 0
| 0
| 0
| 0
| 0
| 274
| 0.449918
|
5980a13b88db20b5e773819c926a4981f53bb21e
| 1,611
|
py
|
Python
|
mu.py
|
cool2645/shadowsocksrr
|
0a594857f4c3125ab14d27d7fd8143291b7c9fee
|
[
"Apache-2.0"
] | 2
|
2018-05-14T10:41:38.000Z
|
2020-05-22T12:40:57.000Z
|
mu.py
|
cool2645/shadowsocksrr
|
0a594857f4c3125ab14d27d7fd8143291b7c9fee
|
[
"Apache-2.0"
] | null | null | null |
mu.py
|
cool2645/shadowsocksrr
|
0a594857f4c3125ab14d27d7fd8143291b7c9fee
|
[
"Apache-2.0"
] | 1
|
2018-09-22T16:15:14.000Z
|
2018-09-22T16:15:14.000Z
|
import db_transfer
import config
import logging
from musdk.client import Client
class MuApiTransfer(db_transfer.TransferBase):
client = None
users = []
def __init__(self):
super(MuApiTransfer, self).__init__()
self.pull_ok = False
self.port_uid_table = {}
self.init_mu_client()
def init_mu_client(self):
mu_url = config.mu_uri
mu_token = config.token
node_id = config.node_id
mu_client = Client(mu_url, node_id, mu_token)
self.client = mu_client
def pull_db_all_user(self):
print("pull all users...")
return self.pull_db_users()
def pull_db_users(self):
users = self.client.get_users_res()
if users is None:
return self.users
for user in users:
self.port_uid_table[user['port']] = user['id']
self.users = users
return users
def update_all_user(self, dt_transfer):
print('call update all user')
print(dt_transfer)
update_transfer = {}
logs = []
for id in dt_transfer.keys():
transfer = dt_transfer[id]
if transfer[0] + transfer[1] < 1024:
continue
update_transfer[id] = transfer
uid = self.port_uid_table[id]
log = self.client.gen_traffic_log(uid, transfer[0], transfer[1])
logs.append(log)
print("logs ", logs)
ok = self.client.update_traffic(logs)
if ok is False:
logging.error("update traffic failed...")
return {}
return update_transfer
| 28.767857
| 76
| 0.590937
| 1,528
| 0.948479
| 0
| 0
| 0
| 0
| 0
| 0
| 84
| 0.052142
|
598126ffcc8da7b8ff9a91f8f601f2ef5306a660
| 2,001
|
py
|
Python
|
tests/test_json.py
|
NyntoFive/data_extractor
|
965e12570d6b7549aa2f8b3bd1951e06b010c444
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
NyntoFive/data_extractor
|
965e12570d6b7549aa2f8b3bd1951e06b010c444
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
NyntoFive/data_extractor
|
965e12570d6b7549aa2f8b3bd1951e06b010c444
|
[
"MIT"
] | null | null | null |
# Standard Library
import json
# Third Party Library
import pytest
from jsonpath_rw.lexer import JsonPathLexerError
# First Party Library
from data_extractor.exceptions import ExprError, ExtractError
from data_extractor.json import JSONExtractor
@pytest.fixture(scope="module")
def text():
return """
{
"foo": [
{
"baz": 1
},
{
"baz": 2
}
]
}
"""
@pytest.fixture(scope="module")
def element(text):
return json.loads(text)
@pytest.mark.parametrize(
"expr,expect",
[
("foo[*].baz", [1, 2]),
("foo.baz", []),
("foo[0].baz", [1]),
("foo[1].baz", [2]),
("foo[2].baz", []),
],
ids=repr,
)
def test_extract(element, expr, expect):
assert expect == JSONExtractor(expr).extract(element)
@pytest.mark.parametrize(
"expr,expect",
[
("foo[*].baz", 1),
("foo.baz", "default"),
("foo[0].baz", 1),
("foo[1].baz", 2),
("foo[2].baz", "default"),
],
ids=repr,
)
def test_extract_first(element, expr, expect):
assert expect == JSONExtractor(expr).extract_first(element, default="default")
@pytest.mark.parametrize("expr", ["foo.baz", "foo[2].baz"], ids=repr)
def test_extract_first_without_default(element, expr):
extractor = JSONExtractor(expr)
with pytest.raises(ExtractError) as catch:
extractor.extract_first(element)
exc = catch.value
assert len(exc.extractors) == 1
assert exc.extractors[0] is extractor
assert exc.element is element
@pytest.mark.parametrize("expr", ["foo..", "a[]", ""], ids=repr)
def test_invalid_css_selector_expr(element, expr):
extractor = JSONExtractor(expr)
with pytest.raises(ExprError) as catch:
extractor.extract(element)
exc = catch.value
assert exc.extractor is extractor
assert isinstance(exc.exc, (JsonPathLexerError, Exception))
| 23.267442
| 82
| 0.590705
| 0
| 0
| 0
| 0
| 1,734
| 0.866567
| 0
| 0
| 487
| 0.243378
|
59814b4554d683700762543937d73f8de4e2078a
| 938
|
py
|
Python
|
demo/predictions/visualize.py
|
qixuxiang/maskrcnn_tianchi_stage2
|
52023b64268dc91f0b5b9f085203ab00a542458a
|
[
"MIT"
] | null | null | null |
demo/predictions/visualize.py
|
qixuxiang/maskrcnn_tianchi_stage2
|
52023b64268dc91f0b5b9f085203ab00a542458a
|
[
"MIT"
] | null | null | null |
demo/predictions/visualize.py
|
qixuxiang/maskrcnn_tianchi_stage2
|
52023b64268dc91f0b5b9f085203ab00a542458a
|
[
"MIT"
] | null | null | null |
import numpy as np
from PIL import Image
import os
npy_file1 = './prediction/1110_1.npy'
npy_file2 = './prediction/1110_2.npy'
npy_file3 = './prediction/1110_3.npy'
npy_file4 = './prediction/1110_4.npy'
npy_file5 = './prediction/1110_5.npy'
arr1 = np.load(npy_file1)
arr2 = np.load(npy_file2)
arr3 = np.load(npy_file3)
arr4 = np.load(npy_file4)
arr5 = np.load(npy_file5)
print(sum(sum(arr1)))
print(sum(sum(arr2)))
print(sum(sum(arr3)))
print(sum(sum(arr4)))
print(sum(sum(arr5)))
arr1 = 50*arr1
arr2 = 50*arr2
arr3 = 50*arr3
arr4 = 50*arr4
arr5 = 50*arr5
img1 = Image.fromarray(arr1).convert("L")
img2 = Image.fromarray(arr2).convert("L")
img3 = Image.fromarray(arr3).convert("L")
img4 = Image.fromarray(arr4).convert("L")
img5 = Image.fromarray(arr5).convert("L")
img1.save("./test_pic/test1.png")
img2.save("./test_pic/test2.png")
img3.save("./test_pic/test3.png")
img4.save("./test_pic/test4.png")
img5.save("./test_pic/test5.png")
| 26.055556
| 41
| 0.715352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.266525
|
59821d30d6e5bb63ead4e418643ab63f3b0a5f6b
| 1,125
|
py
|
Python
|
examples/gbdt_classifier_example.py
|
tushushu/Imilu
|
121c79574d3e6ca35b569dd58661175e5c3668e2
|
[
"Apache-2.0"
] | 407
|
2018-08-22T05:58:33.000Z
|
2022-03-31T11:44:48.000Z
|
examples/gbdt_classifier_example.py
|
tushushu/Imilu
|
121c79574d3e6ca35b569dd58661175e5c3668e2
|
[
"Apache-2.0"
] | 9
|
2018-11-07T07:44:02.000Z
|
2021-12-10T11:59:47.000Z
|
examples/gbdt_classifier_example.py
|
tushushu/Imilu
|
121c79574d3e6ca35b569dd58661175e5c3668e2
|
[
"Apache-2.0"
] | 286
|
2018-08-22T08:00:19.000Z
|
2022-03-30T00:59:20.000Z
|
# -*- coding: utf-8 -*-
"""
@Author: tushushu
@Date: 2018-08-21 14:33:11
@Last Modified by: tushushu
@Last Modified time: 2019-05-22 15:41:11
"""
import os
os.chdir(os.path.split(os.path.realpath(__file__))[0])
import sys
sys.path.append(os.path.abspath(".."))
from imylu.ensemble.gbdt_classifier import GradientBoostingClassifier
from imylu.utils.load_data import load_breast_cancer
from imylu.utils.model_selection import train_test_split, model_evaluation
from imylu.utils.utils import run_time
@run_time
def main():
"""Tesing the performance of GBDT classifier"""
print("Tesing the performance of GBDT classifier...")
# Load data
data, label = load_breast_cancer()
# Split data randomly, train set rate 70%
data_train, data_test, label_train, label_test = train_test_split(data, label, random_state=20)
# Train model
clf = GradientBoostingClassifier()
clf.fit(data_train, label_train, n_estimators=2,
learning_rate=0.8, max_depth=3, min_samples_split=2)
# Model evaluation
model_evaluation(clf, data_test, label_test)
if __name__ == "__main__":
main()
| 28.846154
| 99
| 0.731556
| 0
| 0
| 0
| 0
| 580
| 0.515556
| 0
| 0
| 336
| 0.298667
|
5985441293e6489af243c2cd16aa10e62e49c056
| 16,658
|
py
|
Python
|
gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py
|
clairvoyant/GamestonkTerminal
|
7b40cfe61b32782e36f5de8a08d075532a08c294
|
[
"MIT"
] | null | null | null |
"""CoinGecko view"""
__docformat__ = "numpy"
import argparse
from typing import List, Tuple
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
from tabulate import tabulate
import mplfinance as mpf
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
plot_autoscale,
)
from gamestonk_terminal.feature_flags import USE_ION as ion
import gamestonk_terminal.cryptocurrency.due_diligence.pycoingecko_model as gecko
from gamestonk_terminal.cryptocurrency.dataframe_helpers import wrap_text_in_df
register_matplotlib_converters()
# pylint: disable=inconsistent-return-statements
# pylint: disable=R0904, C0302
def load(other_args: List[str]):
"""Load selected Cryptocurrency. You can pass either symbol of id of the coin
Parameters
----------
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="""Load cryptocurrency, from CoinGecko.
You will have access to a lot of statistics on that coin like price data,
coin development stats, social media and many others. Loading coin
also will open access to technical analysis menu.""",
)
parser.add_argument(
"-c",
"--coin",
required="-h" not in other_args,
type=str,
dest="coin",
help="Coin to load data for (symbol or coin id). You can use either symbol of the coin or coinId"
"You can find all coins using command `coins` or visit https://www.coingecko.com/en. "
"To use load a coin use command load -c [symbol or coinId]",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
coin = gecko.Coin(ns_parser.coin)
print("")
return coin
except KeyError:
print(f"Could not find coin with the id: {ns_parser.coin}", "\n")
return None
except SystemExit:
print("")
return None
except Exception as e:
print(e, "\n")
return None
def chart(coin: gecko.Coin, other_args: List[str]):
"""Plots chart for loaded cryptocurrency
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="chart",
description="""
Display chart for loaded coin. You can specify currency vs which you want
to show chart and also number of days to get data for.
By default currency: usd and days: 30.
E.g. if you loaded in previous step Bitcoin and you want to see it's price vs ethereum
in last 90 days range use `chart --vs eth --days 90`
""",
)
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d", "--days", default=30, dest="days", help="Number of days to get data for"
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.get_coin_market_chart(ns_parser.vs, ns_parser.days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
title = (
f"\n{coin.coin_symbol}/{ns_parser.vs} from {df.index[0].strftime('%Y/%m/%d')} "
f"to {df.index[-1].strftime('%Y/%m/%d')}",
)
mpf.plot(
df,
type="candle",
volume=False,
title=str(title[0]) if isinstance(title, tuple) else title,
xrotation=20,
style="binance",
figratio=(10, 7),
figscale=1.10,
figsize=(plot_autoscale()),
update_width_config=dict(
candle_linewidth=1.0, candle_width=0.8, volume_linewidth=1.0
),
)
if ion:
plt.ion()
plt.show()
print("")
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def load_ta_data(coin: gecko.Coin, other_args: List[str]) -> Tuple[pd.DataFrame, str]:
"""Load data for Technical Analysis
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
Returns
----------
Tuple[pd.DataFrame, str]
dataframe with prices
quoted currency
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ta",
description="""
Loads data for technical analysis. You can specify currency vs which you want
to show chart and also number of days to get data for.
By default currency: usd and days: 30.
E.g. if you loaded in previous step Bitcoin and you want to see it's price vs ethereum
in last 90 days range use `ta --vs eth --days 90`
""",
)
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d", "--days", default=30, dest="days", help="Number of days to get data for"
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return pd.DataFrame(), ""
df = coin.get_coin_market_chart(ns_parser.vs, ns_parser.days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
df.index.name = "date"
return df, ns_parser.vs
except SystemExit:
print("")
return pd.DataFrame(), ""
except Exception as e:
print(e, "\n")
return pd.DataFrame(), ""
def info(coin: gecko.Coin, other_args: List[str]):
"""Shows basic information about loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="info",
description="""
Shows basic information about loaded coin like:
Name, Symbol, Description, Market Cap, Public Interest, Supply, and Price related metrics
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = wrap_text_in_df(coin.base_info, w=80)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def web(coin: gecko.Coin, other_args: List[str]):
"""Shows found websites corresponding to loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="web",
description="""Websites found for given Coin. You can find there urls to
homepage, forum, announcement site and others.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.websites
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def social(coin: gecko.Coin, other_args: List[str]):
"""Shows social media corresponding to loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description="""Shows social media corresponding to loaded coin. You can find there name of
telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.social_media
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def dev(coin: gecko.Coin, other_args: List[str]):
"""Shows developers data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="dev",
description="""Developers data for loaded coin. If the development data is available you can see
how the code development of given coin is going on.
There are some statistics that shows number of stars, forks, subscribers, pull requests,
commits, merges, contributors on github.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.developers_data
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def ath(coin: gecko.Coin, other_args: List[str]):
"""Shows all time high data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ath",
description="""All time high data for loaded coin""",
)
parser.add_argument(
"--vs", dest="vs", help="currency", default="usd", choices=["usd", "btc"]
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.all_time_high(currency=ns_parser.vs)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def atl(coin: gecko.Coin, other_args: List[str]):
"""Shows all time low data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="atl",
description="""All time low data for loaded coin""",
)
parser.add_argument(
"--vs", dest="vs", help="currency", default="usd", choices=["usd", "btc"]
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.all_time_low(currency=ns_parser.vs)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def score(coin: gecko.Coin, other_args: List[str]):
"""Shows different kind of scores for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="score",
description="""
In this view you can find different kind of scores for loaded coin.
Those scores represents different rankings, sentiment metrics, some user stats and others.
You will see CoinGecko scores, Developer Scores, Community Scores, Sentiment, Reddit scores
and many others.
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.scores
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def bc(coin: gecko.Coin, other_args: List[str]):
"""Shows urls to blockchain explorers
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bc",
description="""
Blockchain explorers URLs for loaded coin. Those are sites like etherescan.io or polkascan.io
in which you can see all blockchain data e.g. all txs, all tokens, all contracts...
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.blockchain_explorers
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def market(coin: gecko.Coin, other_args: List[str]):
"""Shows market data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="market",
description="""
Market data for loaded coin. There you find metrics like:
Market Cap, Supply, Circulating Supply, Price, Volume and many others.
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.market_data
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
| 27.308197
| 117
| 0.551327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,541
| 0.392664
|
5985716e3511f569993e2ea970c450df3042b443
| 701
|
py
|
Python
|
source/loaders/tploaders.py
|
rodsom22/gcn_refinement
|
b1b76811b145a2fa7e595cc6d131d75c0553d5a3
|
[
"MIT"
] | 24
|
2020-05-04T20:24:35.000Z
|
2022-03-21T07:57:02.000Z
|
source/loaders/tploaders.py
|
rodsom22/gcn_refinement
|
b1b76811b145a2fa7e595cc6d131d75c0553d5a3
|
[
"MIT"
] | 3
|
2020-09-02T15:54:10.000Z
|
2021-05-27T03:09:31.000Z
|
source/loaders/tploaders.py
|
rodsom22/gcn_refinement
|
b1b76811b145a2fa7e595cc6d131d75c0553d5a3
|
[
"MIT"
] | 6
|
2020-08-03T21:01:37.000Z
|
2021-02-04T02:24:46.000Z
|
"""
Data loaders based on tensorpack
"""
import numpy as np
from utilities import nparrays as arrtools
def get_pancreas_generator(sample_name, volumes_path, references_path):
sample_vol_name = volumes_path + sample_name[0]
reference_vol_name = references_path + sample_name[1]
volume = np.load(sample_vol_name)
reference = np.load(reference_vol_name)
reference[reference != 0] = 1
y, x, z = volume.shape
for i in range(z):
vol_slice = volume[:, :, i]
reference_slice = reference[:, :, i]
vol_slice = arrtools.extend2_before(vol_slice)
reference_slice = arrtools.extend2_before(reference_slice)
yield[vol_slice, reference_slice]
| 29.208333
| 71
| 0.706134
| 0
| 0
| 594
| 0.847361
| 0
| 0
| 0
| 0
| 40
| 0.057061
|
5986324fbdcbaeae05e084715dcadf5d8b4991a3
| 1,199
|
py
|
Python
|
app/stages/management/commands/import_stages_from_csv.py
|
guilloulouis/stage_medecine
|
7ec9067402e510d812a375bbfe46f2ab545587f9
|
[
"MIT"
] | null | null | null |
app/stages/management/commands/import_stages_from_csv.py
|
guilloulouis/stage_medecine
|
7ec9067402e510d812a375bbfe46f2ab545587f9
|
[
"MIT"
] | null | null | null |
app/stages/management/commands/import_stages_from_csv.py
|
guilloulouis/stage_medecine
|
7ec9067402e510d812a375bbfe46f2ab545587f9
|
[
"MIT"
] | 1
|
2021-04-30T16:38:19.000Z
|
2021-04-30T16:38:19.000Z
|
# from django.core.management import BaseCommand
# import pandas as pd
#
# from stages.models import Category, Stage
#
#
# class Command(BaseCommand):
# help = 'Import a list of stage in the database'
#
# def add_arguments(self, parser):
# super(Command, self).add_arguments(parser)
# parser.add_argument(
# '--csv', dest='csv', default=None,
# help='Specify the csv file to parse',
# )
#
# def handle(self, *args, **options):
# csv = options.get('csv')
# csv_reader = pd.read_csv(csv)
# stages_to_create = []
# for index, item in csv_reader.iterrows():
# stage_raw = item['Stage']
# split = stage_raw.split('(')
# stage_name = split[0].strip()
# if len(split) > 1:
# category_name = split[1].replace(')', '').strip()
# category_object, created = Category.objects.get_or_create(name=category_name)
# else:
# category_object = None
# stages_to_create.append(Stage(name=stage_name, place_max=item['places'], category=category_object))
# Stage.objects.bulk_create(stages_to_create)
| 37.46875
| 113
| 0.584654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,168
| 0.974145
|
5986b5465c4c37fe33e19dc8df090df96c8f030d
| 3,137
|
py
|
Python
|
deep_learning/dl.py
|
remix-yh/moneycount
|
e8f35549ef96b8ebe6ca56417f0833f519179173
|
[
"MIT"
] | null | null | null |
deep_learning/dl.py
|
remix-yh/moneycount
|
e8f35549ef96b8ebe6ca56417f0833f519179173
|
[
"MIT"
] | 7
|
2020-09-26T00:46:23.000Z
|
2022-02-10T01:08:15.000Z
|
deep_learning/dl.py
|
remix-yh/moneycount
|
e8f35549ef96b8ebe6ca56417f0833f519179173
|
[
"MIT"
] | null | null | null |
import os
import io
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.preprocessing import image
import numpy as np
from scipy.misc import imread
import tensorflow as tf
from ssd_v2 import SSD300v2
from ssd_utils import BBoxUtility
voc_classes = ['10', '100', '5', 'Boat', 'Bottle',
'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',
'Dog', 'Horse','Motorbike', 'Person', 'Pottedplant',
'Sheep', 'Sofa', 'Train', 'Tvmonitor']
NUM_CLASSES = len(voc_classes) + 1
def initialize(weight_file_path):
np.set_printoptions(suppress=True)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.45
set_session(tf.Session(config=config))
input_shape = (300, 300, 3)
model = SSD300v2(input_shape, num_classes=NUM_CLASSES)
model.load_weights(weight_file_path, by_name=True)
return model
def predict(model, img):
inputs = []
plt.cla()
img = image.img_to_array(img)
img = np.asarray(img)
inputs.append(img.copy())
inputs = np.asarray(inputs)
inputs = preprocess_input(inputs)
preds = model.predict(inputs, batch_size=1, verbose=1)
bbox_util = BBoxUtility(NUM_CLASSES)
results = bbox_util.detection_out(preds)
# Parse the outputs.
det_label = results[0][:, 0]
det_conf = results[0][:, 1]
det_xmin = results[0][:, 2]
det_ymin = results[0][:, 3]
det_xmax = results[0][:, 4]
det_ymax = results[0][:, 5]
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.6] #0.6
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(img / 255.)
currentAxis = plt.gca()
money_total = 0
money_num_list = [10, 100, 5]
for i in range(top_conf.shape[0]):
xmin = int(round(top_xmin[i] * img.shape[1]))
ymin = int(round(top_ymin[i] * img.shape[0]))
xmax = int(round(top_xmax[i] * img.shape[1]))
ymax = int(round(top_ymax[i] * img.shape[0]))
score = top_conf[i]
label = int(top_label_indices[i])
label_name = voc_classes[label - 1]
display_txt = '{:0.2f}, {}'.format(score, label_name)
coords = (xmin, ymin), xmax-xmin+1, ymax-ymin+1
color = colors[label]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(xmin, ymin, display_txt, bbox={'facecolor':color, 'alpha':0.5})
money_total = money_total + money_num_list[label - 1]
plt.title(f'Total:{money_total} yen')
canvas = FigureCanvasAgg(currentAxis.figure)
buf = io.BytesIO()
plt.savefig(buf)
buf.seek(0)
return buf
| 31.37
| 95
| 0.667198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 227
| 0.072362
|
598893a66d83b9b4e168cfcddf559998286b638d
| 469
|
py
|
Python
|
statping/exceptions.py
|
danielpalstra/pystatping
|
eb6325229d45cd452528007b440ca545bacc3e04
|
[
"Apache-2.0"
] | null | null | null |
statping/exceptions.py
|
danielpalstra/pystatping
|
eb6325229d45cd452528007b440ca545bacc3e04
|
[
"Apache-2.0"
] | null | null | null |
statping/exceptions.py
|
danielpalstra/pystatping
|
eb6325229d45cd452528007b440ca545bacc3e04
|
[
"Apache-2.0"
] | null | null | null |
class BaseExceptions(Exception):
pass
class AuthException(BaseException):
"""Raised when an api method requires authentication"""
pass
class DeleteException(BaseException):
"""Raised when the delete of an object fails"""
pass
class UpsertException(BaseException):
"""Raised when the combined insert or update fails"""
pass
class NotFoundException(BaseException):
"""Raised when objects cannot be found by the API"""
pass
| 17.37037
| 59
| 0.714286
| 456
| 0.972281
| 0
| 0
| 0
| 0
| 0
| 0
| 207
| 0.441365
|
598974722569cb3c84cf300f7c787f22839c151a
| 2,255
|
py
|
Python
|
authors/tests/test_article_filters.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | null | null | null |
authors/tests/test_article_filters.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | 43
|
2018-10-25T10:14:52.000Z
|
2022-03-11T23:33:46.000Z
|
authors/tests/test_article_filters.py
|
andela/ah-backend-odin
|
0e9ef1a10c8a3f6736999a5111736f7bd7236689
|
[
"BSD-3-Clause"
] | 4
|
2018-10-29T07:04:58.000Z
|
2020-04-02T14:15:10.000Z
|
from . import BaseAPITestCase
class TestArticleFilters(BaseAPITestCase):
def setUp(self):
super().setUp()
self.authenticate()
def test_it_filters_articles_by_article_title(self):
self.create_article()
self.create_article(title="Some article with another title")
response = self.client.get(
"/api/articles/?title=Some article with another title"
)
self.assertEqual(len(response.data['results']), 1)
def test_it_filters_articles_by_article_tag(self):
self.create_article()
self.create_article(tagList=['learning', 'django'])
self.create_article(tagList=['learning', 'vuejs', "aws", "jest"])
response = self.client.get("/api/articles/?tag=learning")
self.assertEqual(len(response.data['results']), 2)
def test_it_filters_articles_by_article_description(self):
description = "Testing django apps"
self.create_article(description=description)
response = self.client.get(
f"/api/articles/?description={description}"
)
self.assertEqual(len(response.data['results']), 1)
def test_it_filters_articles_by_author_username(self):
self.create_articles_with_diferent_authors()
response = self.client.get("/api/articles/?author=krm")
self.assertEqual(len(response.data['results']), 1)
def test_it_filters_articles_by_author_email(self):
self.create_articles_with_diferent_authors()
response = self.client.get("/api/articles/?author=krm@example.com")
self.assertEqual(len(response.data['results']), 1)
def create_articles_with_diferent_authors(self):
self.create_article()
self.authenticate(
{"username": "krm", "email": "krm@example.com"}
)
self.create_article()
def create_article(self, **kwargs):
article = {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
"tagList": ["reactjs", "angularjs", "dragons"],
"published": True
}
data = {**article}
data.update(kwargs)
self.client.post("/api/articles/", {"article": data})
| 36.370968
| 75
| 0.640355
| 2,222
| 0.985366
| 0
| 0
| 0
| 0
| 0
| 0
| 541
| 0.239911
|
598d5551f035952fc6ef820f0bbd414d1bb129f0
| 720
|
py
|
Python
|
myexporter/tcpexporter.py
|
abh15/flower
|
7e1ab9393e0494f23df65bfa4f858cc35fea290e
|
[
"Apache-2.0"
] | null | null | null |
myexporter/tcpexporter.py
|
abh15/flower
|
7e1ab9393e0494f23df65bfa4f858cc35fea290e
|
[
"Apache-2.0"
] | null | null | null |
myexporter/tcpexporter.py
|
abh15/flower
|
7e1ab9393e0494f23df65bfa4f858cc35fea290e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import subprocess
import time
from prometheus_client import start_http_server, Gauge
def getstat():
s=subprocess.getoutput('ss -i -at \'( dport = :x11 or sport = :x11 )\' | awk \'FNR == 3 { print $4}\'')
if s == "":
return(0.0,"")
else:
rtt=s.lstrip("rtt:")
r=rtt.split("/", 1)[0]
l=subprocess.getoutput('ss -i -at \'( dport = :x11 or sport = :x11 )\' | awk \'FNR == 2 { print $5}\'')
label=l.split(":", 1)[0]
return(float(r),label)
start_http_server(9200)
latencygauge = Gauge('tcprtt', 'provides rtt to fed server using ss',['cohort'])
while True:
stat, lbl= getstat()
latencygauge.labels(cohort=lbl).set(stat)
time.sleep(2)
| 32.727273
| 112
| 0.590278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.340278
|
598f144f73e5a69e09521df868c498cc54751d48
| 516
|
py
|
Python
|
tests/features/steps/roman.py
|
TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz
|
b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd
|
[
"MIT"
] | null | null | null |
tests/features/steps/roman.py
|
TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz
|
b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd
|
[
"MIT"
] | null | null | null |
tests/features/steps/roman.py
|
TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz
|
b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd
|
[
"MIT"
] | null | null | null |
from behave import *
use_step_matcher("re")
@given("user inputs (?P<number>.+) and (?P<guess>.+)")
def step_impl(context, number, guess):
context.number = int(number)
context.user_guess = guess
@when("we run the converter")
def step_impl(context):
try:
context.res = context.roman.check_guess(context.number, context.user_guess)
except TypeError as e:
context.e = e
@then("the result should be (?P<value>.+)")
def step_impl(context, value):
assert str(context.res) == value
| 24.571429
| 83
| 0.672481
| 0
| 0
| 0
| 0
| 463
| 0.897287
| 0
| 0
| 108
| 0.209302
|
599099e8cbd4ce7be2457cb90f171f8cb872d8d1
| 1,266
|
py
|
Python
|
main.py
|
AbirLOUARD/AspiRobot
|
0ea78bfd7c20f1371c01a0e912f5e92bed6648b7
|
[
"MIT"
] | 1
|
2022-03-31T18:37:11.000Z
|
2022-03-31T18:37:11.000Z
|
main.py
|
AbirLOUARD/AspiRobot
|
0ea78bfd7c20f1371c01a0e912f5e92bed6648b7
|
[
"MIT"
] | null | null | null |
main.py
|
AbirLOUARD/AspiRobot
|
0ea78bfd7c20f1371c01a0e912f5e92bed6648b7
|
[
"MIT"
] | null | null | null |
import functions
import Aspirobot
import time
import os
import Manoir
import Capteur
import Etat
import threading
import Case
from threading import Thread
manor_size = 5
gameIsRunning = True
clearConsole = lambda: os.system('cls' if os.name in ('nt', 'dos') else 'clear')
manoir = Manoir.Manoir(manor_size, manor_size)
caseRobot = Case.Case(1, 1)
agent = Aspirobot.Aspirobot(manoir, caseRobot)
manoir.draw()
"""
while (gameIsRunning):
clearConsole()
if (functions.shouldThereBeANewDirtySpace(dirtys_number)):
functions.generateDirt(manor_dirty)
dirtys_number += 1
if (functions.shouldThereBeANewLostJewel(jewels_number)):
functions.generateJewel(manor_jewel)
jewels_number += 1
functions.drawManor(manor_dirty, manor_jewel)
time.sleep(pause_length)
"""
for init in range(10):
manoir.initialisation()
init += 1
def runAgent():
while True:
agent.run(3)
def runManoir():
while True:
#clearConsole()
manoir.ModifierPositionRobot(agent.getCase())
manoir.run()
if __name__ == "__main__":
t1 = Thread(target = runAgent)
t2 = Thread(target = runManoir)
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
t2.start()
while True:
pass
| 21.827586
| 80
| 0.691153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 442
| 0.349131
|
599104a205da723279b528df24bd43e2dcb5bdbb
| 1,169
|
py
|
Python
|
docs/src/newsgroups_data.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 61
|
2015-03-06T08:48:01.000Z
|
2021-04-26T16:13:07.000Z
|
docs/src/newsgroups_data.py
|
andrecamara/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 5
|
2016-09-08T15:47:00.000Z
|
2019-02-25T17:44:55.000Z
|
docs/src/newsgroups_data.py
|
vishalbelsare/RLScore
|
713f0a402f7a09e41a609f2ddcaf849b2021a0a7
|
[
"MIT"
] | 31
|
2015-01-28T15:05:33.000Z
|
2021-04-16T19:39:48.000Z
|
import numpy as np
from scipy import sparse as sp
from rlscore.utilities import multiclass
def load_newsgroups():
T = np.loadtxt("train.data")
#map indices from 1...n to 0...n-1
rows = T[:,0] -1
cols = T[:,1] -1
vals = T[:,2]
X_train = sp.coo_matrix((vals, (rows, cols)))
X_train = X_train.tocsc()
T = np.loadtxt("test.data")
#map indices from 1...n to 0...n-1
rows = T[:,0] -1
cols = T[:,1] -1
vals = T[:,2]
X_test = sp.coo_matrix((vals, (rows, cols)))
X_test = X_test.tocsc()
#X_test has additional features not present in X_train
X_test = X_test[:,:X_train.shape[1]]
Y_train = np.loadtxt("train.label", dtype=int)
Y_train = multiclass.to_one_vs_all(Y_train, False)
Y_test = np.loadtxt("test.label", dtype=int)
Y_test = multiclass.to_one_vs_all(Y_test, False)
return X_train, Y_train, X_test, Y_test
def print_stats():
X_train, Y_train, X_test, Y_test = load_newsgroups()
print("Train X dimensions %d %d" %X_train.shape)
print("Test X dimensions %d %d" %X_test.shape)
print("Number of labels %d" %Y_train.shape[1])
if __name__=="__main__":
print_stats()
| 30.763158
| 58
| 0.638152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.215569
|
59945bb43aee8c097a1605b49beb38bfd751d29b
| 25
|
py
|
Python
|
1795.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | 6
|
2021-04-13T00:33:43.000Z
|
2022-02-10T10:23:59.000Z
|
1795.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | null | null | null |
1795.py
|
heltonricardo/URI
|
160cca22d94aa667177c9ebf2a1c9864c5e55b41
|
[
"MIT"
] | 3
|
2021-03-23T18:42:24.000Z
|
2022-02-10T10:24:07.000Z
|
print(3 ** int(input()))
| 12.5
| 24
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
59962bcd6324fb181e2aeed2776a6d4ee13fa678
| 1,245
|
py
|
Python
|
5hours/14_dictionaries.py
|
matiasmasca/python
|
7631583820d51e3132bdb793fed28cc83f4877a2
|
[
"MIT"
] | null | null | null |
5hours/14_dictionaries.py
|
matiasmasca/python
|
7631583820d51e3132bdb793fed28cc83f4877a2
|
[
"MIT"
] | null | null | null |
5hours/14_dictionaries.py
|
matiasmasca/python
|
7631583820d51e3132bdb793fed28cc83f4877a2
|
[
"MIT"
] | null | null | null |
# como los hash de ruby, guarda "clave" "valor"
# al igual que un diccionario, esta la Palabra, que es la clave y la definción que seria el valor.
# las claves tienen que ser unicas
nombre_de_diccionario = {} #curly brackets.
monthConversions = {
"Jan": "January",
"Feb": "February",
"Mar": "March",
"Apr": "April",
"May": "May",
"Jun": "June",
"Jul": "July",
"Ago": "August",
"Sep": "September",
"Oct": "October",
"Nov": "November",
"Dic": "December",
}
# acceder a los valores del diccionario
# hay varias formas
# poner la clave entre brackets
print(monthConversions["Mar"])
# Get, permite definir que valor devuelve si no hay esa clave
print(monthConversions.get("Nov"))
print(monthConversions.get("Mat"))
print(monthConversions.get("Mat", "No es una clave valida"))
# Pueden ser claves pueden ser numericas, y los valores de diferentes tipos
monthConversions = {
1: ("January", "Enero", "Janeiro"), # un tupla
2: ["February", "Febrero", "Fevereiro"], #una lista
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
}
print(monthConversions[1])
print(monthConversions[1][1])
print(monthConversions[2][2])
| 23.055556
| 98
| 0.654618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 777
| 0.623596
|
599682564ad210bc55f3314403d4b2babc14038c
| 578
|
py
|
Python
|
tests/unit/test_runner.py
|
mariocj89/dothub
|
bcfdcc5a076e48a73c4e0827c56431522e4cc4ba
|
[
"MIT"
] | 12
|
2017-05-30T12:46:41.000Z
|
2019-08-18T18:55:43.000Z
|
tests/unit/test_runner.py
|
mariocj89/dothub
|
bcfdcc5a076e48a73c4e0827c56431522e4cc4ba
|
[
"MIT"
] | 30
|
2017-07-10T19:28:35.000Z
|
2021-11-22T11:09:25.000Z
|
tests/unit/test_runner.py
|
Mariocj89/dothub
|
bcfdcc5a076e48a73c4e0827c56431522e4cc4ba
|
[
"MIT"
] | 1
|
2017-08-02T21:04:43.000Z
|
2017-08-02T21:04:43.000Z
|
from click.testing import CliRunner
from dothub.cli import dothub
base_args = ["--user=xxx", "--token=yyy"]
def test_dothub_help():
runner = CliRunner()
result = runner.invoke(dothub, ['--help'], obj={})
assert result.exit_code == 0
def test_dothub_pull_help():
runner = CliRunner()
result = runner.invoke(dothub, base_args + ['pull', "--help"], obj={})
assert result.exit_code == 0
def test_dothub_push_help():
runner = CliRunner()
result = runner.invoke(dothub, base_args + ['push', "--help"], obj={})
assert result.exit_code == 0
| 23.12
| 74
| 0.652249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.105536
|
5997a4ecb7f8086a5d0b295c0471521ff04b54f7
| 6,985
|
py
|
Python
|
graph/__init__.py
|
worldwise001/stylometry
|
b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72
|
[
"MIT"
] | 14
|
2015-02-24T16:14:07.000Z
|
2022-02-19T21:49:55.000Z
|
graph/__init__.py
|
worldwise001/stylometry
|
b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72
|
[
"MIT"
] | 1
|
2015-02-25T09:45:13.000Z
|
2015-02-25T09:45:13.000Z
|
graph/__init__.py
|
worldwise001/stylometry
|
b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72
|
[
"MIT"
] | 4
|
2015-11-20T10:47:11.000Z
|
2021-03-30T13:14:20.000Z
|
import matplotlib
matplotlib.use('Agg')
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
def hist_prebin(filename, values, width=1, x_title='', y_title='', title=None):
if title is None:
title = filename
left = [ v[0] for v in values ]
height = [ v[1] for v in values ]
plt.figure(figsize=(24,18), dpi=600)
plt.bar(left=left, height=height, width=width)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def hist(filename, values, x_title='', y_title='', title=None):
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.hist(values, bins=20)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def generate(filename, rows, columns, x_title='', y_title='', title=None):
rows_num = range(1, len(rows)+1)
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.scatter(rows_num, columns)
locs, labels = plt.xticks(rows_num, rows)
plt.setp(labels, rotation=90)
plt.plot(rows_num, columns)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def scatter(filename, x, y, line=True, xr=None, yr=None, x_title='', y_title='', title=None):
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.scatter(x, y)
if xr is not None:
plt.xlim(xr)
if yr is not None:
plt.ylim(yr)
if line:
est = sm.OLS(y, sm.add_constant(x)).fit()
x_prime = np.linspace(min(x), max(x), 100)[:, np.newaxis]
x_prime = sm.add_constant(x_prime)
y_hat = est.predict(x_prime)
line_plot1 = plt.plot(x_prime[:, 1], y_hat, 'r', alpha=0.9, label='r^2 = %s' % est.rsquared)
#res = linregress(x,y)
#line_plot2 = plt.plot([min(x), max(x)], [res[0]*min(x)+res[1], res[0]*max(x)+res[1]],
# 'g', alpha=0.9, label='r^2 = %s' % res[2])
plt.legend(['r^2 = %s' % est.rsquared])
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
plt.close()
def roc(filename, y_truth, y_predicted, title=None):
fpr, tpr, _ = roc_curve(y_truth, y_predicted, 1)
roc_auc = auc(fpr, tpr)
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0,1], [0,1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC: %s' % title)
plt.legend(loc="lower right")
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def rocs(filename, y_truths, y_predicteds, labels, title=None):
if title is None:
title = filename
plt.figure(figsize=(24,18), dpi=600)
for i in range(0, len(y_truths)):
fpr, tpr, _ = roc_curve(y_truths[i], y_predicteds[i], 1)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, label='%s (area = %0.2f)' % (labels[i], roc_auc))
plt.plot([0,1], [0,1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC: %s' % title)
plt.legend(loc="lower right")
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
def setboxcol(bp, i, col):
plt.setp(bp['boxes'][i], color=col)
plt.setp(bp['caps'][i*2], color=col)
plt.setp(bp['caps'][i*2+1], color=col)
plt.setp(bp['whiskers'][i*2], color=col)
plt.setp(bp['whiskers'][i*2+1], color=col)
plt.setp(bp['fliers'][i*2], color=col)
plt.setp(bp['fliers'][i*2+1], color=col)
plt.setp(bp['medians'][i], color=col)
def boxplot_single(filename, data, xr=None, yr=None, x_title='', y_title='', title=None):
if title is None:
title = filename
author_labels = []
author_data = []
for author in data:
author_labels.append(author)
author_data.append(data[author])
for start in range(0, len(data), 50):
end = start+50
if end > len(data):
end = len(data)
width = end-start
fig = plt.figure(figsize=(width,12), dpi=600)
ax = plt.axes()
bp = plt.boxplot(author_data[start:end], positions=range(1, width+1), widths = 0.8)
plt.xlim(0, width+1)
ax.set_xticklabels(author_labels[start:end], rotation=70)
ax.set_xticks(range(1, width+1))
if xr is not None:
plt.xlim(xr)
if yr is not None:
plt.ylim(yr)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.tight_layout()
plt.savefig('%s_%d.png' % (filename,start), format='png')
plt.savefig('%s_%d.eps' % (filename,start), format='eps')
plt.close()
def boxplot(filename, data, groups, x_title='', y_title='', title=None):
if title is None:
title = filename
plt.figure(figsize=(1.5*len(data)+3,12), dpi=600)
ax = plt.axes()
colors=['blue', 'red', 'green']*10
i = 1
k = 0
interval = len(groups)
print(groups)
author_labels = []
author_label_pos = []
for author in data:
author_labels.append(author)
author_data = []
if interval == 0:
interval = len(data[author])
cols = []
for src_reddit in data[author]:
author_data.append(data[author][src_reddit])
print(groups.index(src_reddit))
cols.append(colors[groups.index(src_reddit)])
pos = [ i+j for j in range(0, interval) ]
bp = plt.boxplot(author_data, positions=pos, widths = 0.8)
for m in range(0, interval):
setboxcol(bp, m, cols[m])
author_label_pos.append(i + (interval/2.0))
i += interval + 1
k += 1
plt.xlim(0, i)
ax.set_xticklabels(author_labels, rotation=70)
ax.set_xticks(author_label_pos)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.title(title)
plt.tight_layout()
hB, = plt.plot([1,1],'b-')
hR, = plt.plot([1,1],'r-')
hG, = plt.plot([1,1],'g-')
plt.legend((hB, hR, hG),(groups[0], groups[1], groups[2]))
hB.set_visible(False)
hR.set_visible(False)
hG.set_visible(False)
plt.savefig('%s.png' % filename, format='png')
plt.savefig('%s.eps' % filename, format='eps')
plt.close()
| 30.502183
| 100
| 0.591553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 714
| 0.102219
|
59987eb32850dcd0908c67453364b8a38745fe6e
| 68
|
py
|
Python
|
tests/unit/test_thicket/test_finders.py
|
GabrielC101/filer
|
d506ed804d10891cea33c3884896b6f0dfa08b88
|
[
"MIT"
] | null | null | null |
tests/unit/test_thicket/test_finders.py
|
GabrielC101/filer
|
d506ed804d10891cea33c3884896b6f0dfa08b88
|
[
"MIT"
] | 1
|
2017-12-19T19:38:22.000Z
|
2017-12-19T19:38:22.000Z
|
tests/unit/test_thicket/test_finders.py
|
GabrielC101/filer
|
d506ed804d10891cea33c3884896b6f0dfa08b88
|
[
"MIT"
] | null | null | null |
from thicket import finders
def test_import():
assert finders
| 11.333333
| 27
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
59995210d6ac282b5113ee3252c96de5a50256f9
| 2,251
|
py
|
Python
|
test/test_component.py
|
gadalang/gada
|
2dd4f4dfd5b7390c06307040cad23203a015f7a4
|
[
"MIT"
] | null | null | null |
test/test_component.py
|
gadalang/gada
|
2dd4f4dfd5b7390c06307040cad23203a015f7a4
|
[
"MIT"
] | null | null | null |
test/test_component.py
|
gadalang/gada
|
2dd4f4dfd5b7390c06307040cad23203a015f7a4
|
[
"MIT"
] | 1
|
2021-06-15T13:52:33.000Z
|
2021-06-15T13:52:33.000Z
|
__all__ = ["ComponentTestCase"]
import os
import sys
import yaml
import unittest
from gada import component
from test.utils import TestCaseBase
class ComponentTestCase(TestCaseBase):
def test_load(self):
"""Test loading the testnodes package that is in PYTHONPATH."""
# Load component configuration
config = self.write_config_and_load(TestCaseBase.CONFIG_NODES)
self.assertEqual(config["runner"], "generic", "incorrect configuration")
# Get node configuration
node_config = component.get_node_config(config, "hello")
self.assertEqual(
node_config["runner"], "generic", "incorrect node configuration"
)
self.assertEqual(node_config["bin"], "python", "incorrect node configuration")
self.assertEqual(
node_config["argv"],
r"${comp_dir}/__init__.py ${argv}",
"incorrect node configuration",
)
def test_load_not_found(self):
"""Test loading a package that is not in the PYTHONPATH."""
with self.assertRaises(Exception):
comp = component.load("invalid")
def test_load_config(self):
"""Test loading config.yml file from testnodes package."""
config = self.write_config_and_load(TestCaseBase.CONFIG_NO_NODES)
self.assertEqual(
config, TestCaseBase.CONFIG_NO_NODES, "incorrect loaded configuration"
)
def test_load_config_empty(self):
"""Test loading an existing but empty config.yml file."""
with open(TestCaseBase.CONFIG_YML, "w+") as f:
f.write("")
config = self.load_config()
self.assertIsNotNone(config, "invalid configuration")
def test_load_config_not_found(self):
"""Test loading a non existing config.yml file."""
self.remove_config()
with self.assertRaises(Exception):
component.load_config(sys)
def test_get_node_config_not_found(self):
"""Test loading a config.yml file with unknown node."""
config = self.write_config_and_load(TestCaseBase.CONFIG_NODES)
with self.assertRaises(Exception):
component.get_node_config(config, "invalid")
if __name__ == "__main__":
unittest.main()
| 32.157143
| 86
| 0.662372
| 2,055
| 0.912928
| 0
| 0
| 0
| 0
| 0
| 0
| 713
| 0.316748
|
599a3aac676f1bdb004c22bf7034b685260f3101
| 17,820
|
py
|
Python
|
color pattern with threading.py
|
HashtagInnovator/Alpha-Star
|
f69a35b1924320dfec9610d6b61acae8d9de4afa
|
[
"Apache-2.0"
] | null | null | null |
color pattern with threading.py
|
HashtagInnovator/Alpha-Star
|
f69a35b1924320dfec9610d6b61acae8d9de4afa
|
[
"Apache-2.0"
] | null | null | null |
color pattern with threading.py
|
HashtagInnovator/Alpha-Star
|
f69a35b1924320dfec9610d6b61acae8d9de4afa
|
[
"Apache-2.0"
] | null | null | null |
import time
import random
from multiprocessing import pool
from playsound import playsound
from threading import Thread
i = -1
l = 0
count = 0
class loops:
def loop(self):
print(" ", end="")
def A(self):
global i
global l
global i
for j in range(i, 5):
for k in range(4, i, -1):
print(" ", end="")
print("*", end="")
if i != 0:
l = 1
for q in range(0, l):
if (i == 3):
print(" *" * 3, end="")
else:
print(" " * (i + (i - 1)), end="*")
for k in range(4, i, -1):
print(" ", end="")
x.loop()
return
def B(self):
global i
for j in range(i, 6):
print("*", end="")
if (i == 0 or i == 2 or i == 4):
print(" *" * 3, end=" ")
else:
print(" " * 6, end="*")
x.loop()
return
def C(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print(" " * 2, end=" *" * 3)
elif (i == 1 or i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" ")
else:
print("*", end=" " *7)
x.loop()
return
def D(self):
global i
for i in range(i, 5):
print("*", end=" ")
if (i == 0 or i == 4):
print("* " * 2, end=" " * 1)
elif (i == 1 or i == 3):
print(" " * 4, end="*")
else:
print(" " * 3, end=" *")
x.loop()
return
def E(self):
global i
for i in range(i, 5):
if (i == 0 or i == 2 or i == 4):
print("* " * 3, end="*")
else:
print("* ", end=" " * 5)
x.loop()
return
def F(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 2):
print("* " * 3, end=" ")
else:
print("* ", end=" " * 5)
x.loop()
return
def G(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end=" *" * 3)
print(" ", end="")
elif (i == 4):
print(" " * 2, end=" * " * 2)
print(" ", end="")
elif (i == 1):
print(" " * 1, end="*")
print(" " * 7, end="")
elif (i == 3):
print(" " * 1, end="*")
print(" " * 5, end=" *")
else:
print("*", end=" " * 2)
print(" *" * 3, end="")
x.loop()
return
def H(self):
global i
for i in range(i, 5):
if (i == 2):
print("* " * 3, end="*")
else:
print("*", end=" " * 5)
print("*", end="")
x.loop()
return
def I(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def J(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
elif (i == 3 or i == 2):
print("* ", end=" *")
print(" " * 3, end="")
elif (i == 4):
print(" ", end="*")
print(" " * 2, end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def K(self):
global i
for i in range(i, 5):
if i == 0 or i == 4:
print("*", end=" " * 3)
print("*", end="")
elif i == 1 or i == 3:
print("*", end=" " * 2)
print("* ", end=" ")
else:
print("* ", end=" *")
print(" ", end=" ")
x.loop()
return
def L(self):
global i
for i in range(i,5):
if(i==4):
print("* "*3,end="*")
else:
print("* ",end=" "*5)
x.loop()
return
def M(self):
global i
for i in range(i,5):
print("* ",end="")
if(i==1):
print("* ",end=" * ")
elif(i==2):
print(" "*2,end="* ")
else:
print(" "*3,end="")
print("*",end="")
x.loop()
return
def N(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 ):
print(" "*3,end="")
else:
print(" "*i,end="*")
print(" "*(5-i),end="")
print("*",end="")
x.loop()
return
def O(self):
global i
for i in range(i,5):
if(i==0 or i==4):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def P(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*7,end="")
x.loop()
return
def Q(self):
global i
for i in range(i,5):
if(i==0):
print(" "*4,end="*")
print(" "*3,end=" ")
elif(i==4):
print(" "*4,end="*")
print(" "*3,end="*")
elif(i==2):
print("*",end=" "*7)
print("*",end="")
elif(i==3):
print(" ",end="*")
print(" "*3,end="* * ")
else:
print(" ",end="*")
print(" ",end="* ")
x.loop()
return
def R(self):
global i
for i in range(i,5):
print("*",end="")
if(i==0 or i==2):
print(" *"*3,end=" ")
elif(i==1):
print(" "*6,end="*")
else:
print(" "*i,end=" *")
print(" ",end=" "*(4-i))
x.loop()
return
def S(self):
global i
for i in range(i, 5):
if (i == 0):
print(" " * 2, end="* " * 3)
print("", end="")
elif (i == 4):
print(" ", end="* " * 3)
print("", end="")
elif (i == 1):
print("*", end=" " * 7)
elif (i == 2):
print(" ", end="*")
print(" " * 4, end="")
else:
print("*", end=" " * 6)
print("*", end="")
x.loop()
return
def T(self):
global i
for i in range(i, 5):
if (i == 0):
print("* " * 3, end="*")
else:
print(" " * 2, end=" *")
print(" " * 2, end=" ")
x.loop()
return
def U(self):
global i
for i in range(i, 5):
if (i == 4):
print(" " * 2, end="* " * 2)
print(" " * 2, end="")
elif (i == 3):
print(" ", end="*")
print(" " * 4, end="*")
print(" ", end="")
else:
print("* ", end=" " * 5)
print("*", end="")
x.loop()
return
def V(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 7)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 5)
print("*", end=" ")
elif (i == 2):
print(" *", end=" " * 3)
print("*", end=" ")
elif (i == 3):
print(" *", end=" ")
print("*", end=" ")
else:
print(" " * 4, end="*")
print(" " * 4, end="")
x.loop()
return
def W(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 11)
print("*", end="")
elif i == 1:
print(" *", end=" " * 9)
print("", end="* ")
elif (i == 2):
print(" * ", end=" *")
print(" ", end=" ")
elif (i == 3):
print(" " * 3, end="*")
print(" * * ", end=" " * 2)
else:
print(" " * 3, end=" *")
print(" *", end=" " * 4)
x.loop()
return
def X(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1 or i == 3):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Y(self):
global i
for i in range(i, 5):
if (i == 0):
print("*", end=" " * 5)
print("*", end="")
elif (i == 1):
print(" *", end=" " * 3)
print("* ", end="")
else:
print(" " * 3, end="*")
print(" " * 3, end="")
x.loop()
return
def Z(self):
global i
for i in range(i, 5):
if (i == 0 or i == 4):
print("* " * 3, end="*")
elif (i == 1):
print(" " * 5, end="*")
print(" ", end="")
elif (i == 2):
print(" " * 3, end="*")
print(" " * 2, end=" ")
else:
print(" " * 1, end="*")
print(" " * 3, end=" ")
x.loop()
return
print()
def play():
soun = input("ENTER SOUND")
time.sleep(1.8)
print("\n"*30)
# CHANGE DIRECTORY HERE ................................................................
playsound("C:\\Users\\chetan\\Desktop\\language\\playsound\\" + soun + ".mp3")
# CHANGE DIRECTORY HERE.................................................................
time.sleep(1.1)
x = loops()
# DRIVER CODE
n = input("ENTER YOUR TEXT")
print("type any song name from here ...")
lis=["birth",'rider','standard','teri mitti me','chitrakaar']
print(lis)
#WE CAN ADD birthday and rider SONG HERE
thread=Thread(target=play)
thread.start()
time.sleep(7)
k = len(n)
aa,bb,cc,dd,ee,ff,gg,hh,ii,jj,kk,ll,mm,nn,oo,pp,qq,rr,ss,tt,uu,vv,ww,xx,yy,zz=0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
s=0.5
list=[30,31,32,33,34,35,36,37]
color=0
for o in range(5):
i = i + 1
for f in range(k):
if (n[f] == "A" or n[f] == "a"):
if(aa==0):
aa=random.choice(list)
aa=aa+1
print("\033[1;{}m".format(aa),end="")
time.sleep(s)
x.A()
elif (n[f] == "B" or n[f] == "b"):
if(bb==0):
bb=random.choice(list)
bb=bb+1
print("\033[1;{}m".format(bb),end="")
time.sleep(s)
x.B()
elif (n[f] == "C" or n[f] == "c"):
if(cc==0):
cc=random.choice(list)
cc=cc+1
print("\033[1;{}m".format(cc),end="")
time.sleep(s)
x.C()
elif (n[f] == "D" or n[f] == "d"):
if(dd==0):
dd=random.choice(list)
dd=dd+1
print("\033[1;{}m".format(dd),end="")
time.sleep(s)
x.D()
elif (n[f] == "E" or n[f] == "e"):
if(ee==0):
ee=random.choice(list)
ee=ee+1
print("\033[1;{}m".format(ee),end="")
time.sleep(s)
x.E()
elif (n[f] == "F" or n[f] == "f"):
if(ff==0):
ff=random.choice(list)
ff=ff+1
print("\033[1;{}m".format(ff),end="")
time.sleep(s)
x.F()
elif (n[f] == "G" or n[f] == "g"):
if(gg==0):
gg=random.choice(list)
gg=gg+1
print("\033[1;{}m".format(gg),end="")
time.sleep(s)
x.G()
elif (n[f] == "H" or n[f] == "h"):
if(hh==0):
hh=random.choice(list)
hh=hh+1
print("\033[1;{}m".format(hh),end="")
time.sleep(s)
x.H()
elif (n[f] == "I" or n[f] == "i"):
if(ii==0):
ii=random.choice(list)
ii=ii+1
print("\033[1;{}m".format(ii),end="")
time.sleep(s)
x.I()
elif (n[f] == "J" or n[f] == "j"):
if(jj==0):
jj=random.choice(list)
jj=jj+1
print("\033[1;{}m".format(jj),end="")
time.sleep(s)
x.J()
elif (n[f] == "K" or n[f] == "k"):
if(kk==0):
kk=random.choice(list)
kk=kk+1
print("\033[1;{}m".format(kk),end="")
time.sleep(s)
x.K()
elif (n[f] == "L" or n[f] == "l"):
if(ll==0):
ll=random.choice(list)
ll=ll+1
print("\033[1;{}m".format(ll),end="")
time.sleep(s)
x.L()
elif (n[f] == "m" or n[f] == "M"):
if(mm==0):
mm=random.choice(list)
mm=mm+1
print("\033[1;{}m".format(mm),end="")
time.sleep(s)
x.M()
elif (n[f] == "N" or n[f] == "n"):
if(nn==0):
nn=random.choice(list)
nn=nn+1
print("\033[1;{}m".format(nn),end="")
time.sleep(s)
x.N()
elif (n[f] == "O" or n[f] == "o"):
if(oo==0):
oo=random.choice(list)
oo=oo+1
print("\033[1;{}m".format(oo),end="")
time.sleep(s)
x.O()
elif (n[f] == "P" or n[f] == "p"):
if(pp==0):
pp=random.choice(list)
pp=pp+1
print("\033[1;{}m".format(pp),end="")
time.sleep(s)
x.P()
elif (n[f] == "q" or n[f] == "Q"):
if(qq==0):
qq=random.choice(list)
qq=qq+1
print("\033[1;{}m".format(qq),end="")
time.sleep(s)
x.Q()
elif (n[f] == "R" or n[f] == "r"):
if(rr==0):
rr=random.choice(list)
rr=rr+1
print("\033[1;{}m".format(rr),end="")
time.sleep(s)
x.R()
elif (n[f] == "S" or n[f] == "s"):
if(ss==0):
ss=random.choice(list)
ss=ss+1
print("\033[1;{}m".format(ss),end="")
time.sleep(s)
x.S()
elif (n[f] == "T" or n[f] == "t"):
if(tt==0):
tt=random.choice(list)
tt=tt+1
print("\033[1;{}m".format(tt),end="")
time.sleep(s)
x.T()
elif (n[f] == "U" or n[f] == "u"):
if(uu==0):
uu=random.choice(list)
uu=uu+1
print("\033[1;{}m".format(uu),end="")
time.sleep(s)
x.U()
elif (n[f] == "V" or n[f] == "v"):
if(vv==0):
vv=random.choice(list)
vv=vv+1
print("\033[1;{}m".format(vv),end="")
time.sleep(s)
x.V()
elif (n[f] == "W" or n[f] == "w"):
if(ww==0):
ww=random.choice(list)
ww=ww+1
print("\033[1;{}m".format(ww),end="")
time.sleep(s)
x.W()
elif (n[f] == "X" or n[f] == "x"):
if(xx==0):
xx=random.choice(list)
xx=xx+1
print("\033[1;{}m".format(xx),end="")
time.sleep(s)
x.X()
elif (n[f] == "Y" or n[f] == "y"):
if(yy==0):
yy=random.choice(list)
yy=yy+1
print("\033[1;{}m".format(yy),end="")
time.sleep(s)
x.Y()
elif (n[f] == "Z" or n[f] == "z"):
if(zz==0):
zz=random.choice(list)
zz=zz+1
print("\033[1;{}m".format(zz),end="")
time.sleep(s)
x.Z()
elif(n[f]==" "):
x.loop()
x.loop()
print()
time.sleep(6)
print("\n"*8)
print('THANK YOU ', end='', flush=True)
for x in range(8):
for frame in r'-\|/-\|/':
print('\b', frame, sep='', end='', flush=True)
time.sleep(0.2)
print('\b ')
thread.join()
| 26.322009
| 129
| 0.306285
| 10,632
| 0.596633
| 0
| 0
| 0
| 0
| 0
| 0
| 1,910
| 0.107183
|
599abd70ab2405fa33e84f2920872f4103dff83c
| 273
|
py
|
Python
|
tests/conftest.py
|
eddyvdaker/FlaskSimpleStarter
|
4992492ac1788d80e5914188f994b3e0ed1e75f4
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
eddyvdaker/FlaskSimpleStarter
|
4992492ac1788d80e5914188f994b3e0ed1e75f4
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
eddyvdaker/FlaskSimpleStarter
|
4992492ac1788d80e5914188f994b3e0ed1e75f4
|
[
"MIT"
] | null | null | null |
import pytest
from src.app import create_app
@pytest.fixture
def app():
app = create_app()
app.config['TESTING'] = True
ctx = app.app_context()
ctx.push()
yield app
ctx.pop()
@pytest.fixture
def client(app):
return app.test_client()
| 12.409091
| 32
| 0.6337
| 0
| 0
| 144
| 0.527473
| 221
| 0.809524
| 0
| 0
| 9
| 0.032967
|
599c63fc42e3f63659183c30e8778ab397e4a872
| 2,533
|
py
|
Python
|
amd64-linux/lib/pmon.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 1
|
2020-06-15T10:41:18.000Z
|
2020-06-15T10:41:18.000Z
|
amd64-linux/lib/pmon.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | null | null | null |
amd64-linux/lib/pmon.py
|
qiyancos/Simics-3.0.31
|
9bd52d5abad023ee87a37306382a338abf7885f1
|
[
"BSD-4-Clause",
"FSFAP"
] | 3
|
2020-08-10T10:25:02.000Z
|
2021-09-12T01:12:09.000Z
|
# This file implements the PMON firmware's LEON2 boot setup. It does not
# implement the serial port boot loading, only the initial setup.
# The PMON firmware for the LEON2 comes with a number of preprocessor defines
# that the user typically changes to match the hardware configuration.
# The PMON emulation function takes all these parameters as function arguments,
# with the exception of the clock frequency, that is picked from the cpu.
import conf
from sim_core import *
def _pmon_start(cpu, stack_init):
cpu.wim = 2
cpu.psr = 0x10e0
cpu.gprs[14] = stack_init # %sp = STACK_INIT
cpu.gprs[1] = SIM_read_phys_memory(cpu, 0x80000014, 4)
cpu.psr = cpu.psr | 7
cpu.gprs[14] = stack_init - 0x40
def _pmon_init(cpu, memcfg1, memcfg2, timer_scaler_val, uart_scaler_val):
SIM_write_phys_memory(cpu, 0x80000014, 0x1000f, 4) # cache_ctrl
SIM_write_phys_memory(cpu, 0x800000a4, 0xaa00, 4) # io_port_dir
SIM_write_phys_memory(cpu, 0x80000090, 0, 4) # irq_mask
SIM_write_phys_memory(cpu, 0x80000094, 0, 4) # irq_pending
SIM_write_phys_memory(cpu, 0x80000098, 0, 4) # irq_force
SIM_write_phys_memory(cpu, 0x80000000, memcfg1, 4) # memcfg1
SIM_write_phys_memory(cpu, 0x80000004, memcfg2, 4) # memcfg2
SIM_write_phys_memory(cpu, 0x80000060, timer_scaler_val, 4) # prescaler_counter
SIM_write_phys_memory(cpu, 0x80000064, timer_scaler_val, 4) # prescaler_reload
SIM_write_phys_memory(cpu, 0x80000044, 0xffffffff, 4) # t1_reload (-1)
SIM_write_phys_memory(cpu, 0x80000048, 7, 4) # t1_control
SIM_write_phys_memory(cpu, 0x8000007c, uart_scaler_val, 4) # uart1_scaler
SIM_write_phys_memory(cpu, 0x8000008c, uart_scaler_val, 4) # uart2_scaler
SIM_write_phys_memory(cpu, 0x80000074, 0, 4) # uart1_status
SIM_write_phys_memory(cpu, 0x80000084, 0, 4) # uart2_status
SIM_write_phys_memory(cpu, 0x80000078, 3, 4) # uart1_control
SIM_write_phys_memory(cpu, 0x80000088, 3, 4) # uart2_control
def pmon_setup(cpu, timer_scale, baud_rate, bank_size, ram_banks, mcfg1, mcfg2):
cpu_freq = int(cpu.freq_mhz * 1000000.0)
stack_init = 0x40000000 + ((bank_size * ram_banks) - 16)
timer_scaler_val = (cpu_freq/timer_scale -1)
uart_scaler_val = ((((cpu_freq*10) / (8 * baud_rate))-5)/10)
_pmon_start(cpu, stack_init)
_pmon_init(cpu, mcfg1, mcfg2, timer_scaler_val, uart_scaler_val)
| 51.693878
| 83
| 0.70075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 682
| 0.269246
|
599d3203f355bf0108b50dc6b8026b093b4736fc
| 395
|
py
|
Python
|
scripts/test_web3.py
|
AeneasHe/eth-brownie-enhance
|
e53995924ffb93239b9fab6c1c1a07e9166dd1c6
|
[
"MIT"
] | 1
|
2021-10-04T23:34:14.000Z
|
2021-10-04T23:34:14.000Z
|
scripts/test_web3.py
|
AeneasHe/eth-brownie-enhance
|
e53995924ffb93239b9fab6c1c1a07e9166dd1c6
|
[
"MIT"
] | null | null | null |
scripts/test_web3.py
|
AeneasHe/eth-brownie-enhance
|
e53995924ffb93239b9fab6c1c1a07e9166dd1c6
|
[
"MIT"
] | null | null | null |
import wpath
from web3 import Web3
from web3 import Web3, HTTPProvider, IPCProvider, WebsocketProvider
def get_web3_by_http_rpc():
address = "http://47.243.92.131:8545"
print("===>address:", address)
p = HTTPProvider(address)
web3 = Web3(p)
return web3
w3 = get_web3_by_http_rpc()
eth = w3.eth
r = eth.getBalance("0x3d32aA995FdD334c671C2d276345DE6fe2F46D88")
print(r)
| 18.809524
| 67
| 0.721519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.21519
|
599f0418376070df049179da7c8e1b8f17a142f2
| 834
|
py
|
Python
|
models/sklearn_model.py
|
Ailln/stock-prediction
|
9de77de5047446ffceeed83cb610c7edd2cb1ad3
|
[
"MIT"
] | 11
|
2020-07-11T06:14:29.000Z
|
2021-12-02T08:48:53.000Z
|
models/sklearn_model.py
|
HaveTwoBrush/stock-prediction
|
9de77de5047446ffceeed83cb610c7edd2cb1ad3
|
[
"MIT"
] | null | null | null |
models/sklearn_model.py
|
HaveTwoBrush/stock-prediction
|
9de77de5047446ffceeed83cb610c7edd2cb1ad3
|
[
"MIT"
] | 8
|
2020-04-15T14:29:47.000Z
|
2021-12-19T09:26:53.000Z
|
from sklearn import svm
from sklearn import ensemble
from sklearn import linear_model
class Model(object):
def __init__(self):
self.model_dict = {
"SGDRegressor": linear_model.SGDRegressor(max_iter=1000),
"HuberRegressor": linear_model.HuberRegressor(),
"LinearRegression": linear_model.LinearRegression(),
"LinearSVR": svm.LinearSVR(),
"BaggingRegressor": ensemble.BaggingRegressor(),
"AdaBoostRegressor": ensemble.AdaBoostRegressor(),
"ExtraTreesRegressor": ensemble.ExtraTreesRegressor(),
"RandomForestRegressor": ensemble.RandomForestRegressor(),
"GradientBoostingRegressor": ensemble.GradientBoostingRegressor()
}
def sklearn_model(self, model_name):
return self.model_dict[model_name]
| 37.909091
| 77
| 0.681055
| 745
| 0.893285
| 0
| 0
| 0
| 0
| 0
| 0
| 167
| 0.20024
|
59a09df4f04358386749f3598f84da0352793936
| 189
|
py
|
Python
|
venv/Lib/site-packages/shiboken2/_config.py
|
gabistoian/Hide-Text-in-image
|
88b5ef0bd2bcb0e222cfbc7abf6ac2b869f72ec5
|
[
"X11"
] | null | null | null |
venv/Lib/site-packages/shiboken2/_config.py
|
gabistoian/Hide-Text-in-image
|
88b5ef0bd2bcb0e222cfbc7abf6ac2b869f72ec5
|
[
"X11"
] | null | null | null |
venv/Lib/site-packages/shiboken2/_config.py
|
gabistoian/Hide-Text-in-image
|
88b5ef0bd2bcb0e222cfbc7abf6ac2b869f72ec5
|
[
"X11"
] | null | null | null |
shiboken_library_soversion = str(5.15)
version = "5.15.2.1"
version_info = (5, 15, 2.1, "", "")
__build_date__ = '2022-01-07T13:13:47+00:00'
__setup_py_package_version__ = '5.15.2.1'
| 15.75
| 44
| 0.671958
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.269841
|
59a0a3b7aa59f29b5ba0e35ea23ff02112e179f9
| 1,023
|
py
|
Python
|
00Python/day05/basic02.py
|
HaoZhang95/PythonAndMachineLearning
|
b897224b8a0e6a5734f408df8c24846a98c553bf
|
[
"MIT"
] | 937
|
2019-05-08T08:46:25.000Z
|
2022-03-31T12:56:07.000Z
|
00Python/day05/basic02.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 47
|
2019-09-17T10:06:02.000Z
|
2022-03-11T23:46:52.000Z
|
00Python/day05/basic02.py
|
Sakura-gh/Python24
|
b97e18867264a0647d5645c7d757a0040e755577
|
[
"MIT"
] | 354
|
2019-05-10T02:15:26.000Z
|
2022-03-30T05:52:57.000Z
|
"""
list元素的排序
sort() 默认无参数是从小到大
reversed(list) 整个列表直接反过来,返回值是一个新的list
"""
import random
a_list = []
for i in range(10):
a_list.append(random.randint(0, 200))
print(a_list)
a_list.sort()
print(a_list)
a_list.sort(reverse=True) # 降序,从大到小
print(a_list)
new_list = reversed(a_list) # [12,10,7,9] -> [9,7,10,12]
print(new_list)
"""
一个学校,三个办公室, 八位老师进行随机分配办公室
"""
school = [[], [], []]
teacher_list = list("ABCDEFGH")
for name in teacher_list:
index = random.randint(0,2)
school[index].append(name)
print(school)
"""
字符串表示:"", '', """"""
list表示:[], 可修改
元组的表示:(), 元组的元素不能进行修改,
元组中如果只有一个元素的话,后面加上逗号表明是一个tuple,否则就是元素真实类型
"""
a_tuple = (1, 3.14, "Hello", True)
empty_tuple = ()
empty_tuple2 = tuple()
# 特例
b_tuple = (1) # type = int
c_tuple = (1,) # type = tuple
"""
访问元组tuple
查询的话和list一样使用count, index
"""
print(a_tuple[2])
# a_tuple[1] = "哈哈" 元组的元素不能重新赋值和修改,因为tuple是不可变的
print(a_tuple.count(1)) # 元组中1对象出现的次数是2, 因为Ture在计算机眼中就是1
print(a_tuple.index(3.14))
| 18.267857
| 60
| 0.641251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 846
| 0.600426
|
59a69dfbb3f7dfb97929bbbc436b9c105fe9fa48
| 1,643
|
py
|
Python
|
ThreeBotPackages/unlock_service/scripts/restore.py
|
threefoldfoundation/tft-stellar
|
b36460e8dba547923778273b53fe4f0e06996db0
|
[
"Apache-2.0"
] | 7
|
2020-02-05T16:10:46.000Z
|
2021-04-28T10:39:20.000Z
|
ThreeBotPackages/unlock_service/scripts/restore.py
|
threefoldfoundation/tft-stellar
|
b36460e8dba547923778273b53fe4f0e06996db0
|
[
"Apache-2.0"
] | 379
|
2020-01-13T10:22:21.000Z
|
2022-03-23T08:59:57.000Z
|
ThreeBotPackages/unlock_service/scripts/restore.py
|
threefoldfoundation/tft-stellar
|
b36460e8dba547923778273b53fe4f0e06996db0
|
[
"Apache-2.0"
] | 3
|
2020-01-24T09:56:44.000Z
|
2020-08-03T21:02:38.000Z
|
#!/usr/bin/env python
# pylint: disable=no-value-for-parameter
import click
import os
import sys
import requests
import json
UNLOCK_SERVICE_DEFAULT_HOSTS = {"test": "https://testnet.threefold.io", "public": "https://tokenservices.threefold.io"}
@click.command()
@click.option("--source", default="export_data", help="Sourcefile to import data from")
@click.option("--network", type=click.Choice(["test", "public"], case_sensitive=False), default="public")
@click.option("--unlock_service_host", default=None, help="Destination to restore to (overrides the network parameter)")
def import_unlockhash_transaction_data(source, network, unlock_service_host):
if not unlock_service_host:
unlock_service_host = UNLOCK_SERVICE_DEFAULT_HOSTS[network]
print(f"Restoring data to {unlock_service_host} from {source}\n")
restored=[]
with open(source,mode="r") as f:
for line in f.readlines():
if line.strip() == "":
continue
unlockhash_transaction_data = json.loads(line)
unlockhash = unlockhash_transaction_data.get("unlockhash")
transaction_xdr = unlockhash_transaction_data.get("transaction_xdr")
if unlockhash in restored:
continue
r = requests.post(
f"{unlock_service_host}/threefoldfoundation/unlock_service/create_unlockhash_transaction",
json={"unlockhash": unlockhash, "transaction_xdr": transaction_xdr},
)
r.raise_for_status()
restored.append(unlockhash)
if __name__ == "__main__":
import_unlockhash_transaction_data()
| 37.340909
| 120
| 0.684114
| 0
| 0
| 0
| 0
| 1,322
| 0.804626
| 0
| 0
| 533
| 0.324407
|
59a7951eb259bc0943a926370fa409960f8cba7c
| 4,984
|
py
|
Python
|
pgdiff/diff/PgDiffConstraints.py
|
Onapsis/pgdiff
|
ee9f618bc339cbfaf7967103e95f9650273550f8
|
[
"MIT"
] | 2
|
2020-05-11T16:42:48.000Z
|
2020-08-27T04:11:49.000Z
|
diff/PgDiffConstraints.py
|
Gesha3809/PgDiffPy
|
00466429d0385eb999c32addcbe6e2746782cb5d
|
[
"MIT"
] | 1
|
2018-04-11T18:19:33.000Z
|
2018-04-13T15:18:40.000Z
|
diff/PgDiffConstraints.py
|
Gesha3809/PgDiffPy
|
00466429d0385eb999c32addcbe6e2746782cb5d
|
[
"MIT"
] | 1
|
2018-04-11T15:09:22.000Z
|
2018-04-11T15:09:22.000Z
|
from PgDiffUtils import PgDiffUtils
class PgDiffConstraints(object):
@staticmethod
def createConstraints(writer, oldSchema, newSchema, primaryKey, searchPathHelper):
for newTableName, newTable in newSchema.tables.items():
oldTable = None
if (oldSchema is not None):
oldTable = oldSchema.tables.get(newTableName)
# Add new constraints
for constraint in PgDiffConstraints.getNewConstraints(oldTable, newTable, primaryKey):
searchPathHelper.outputSearchPath(writer)
writer.writeln(constraint.getCreationSQL())
@staticmethod
def dropConstraints(writer, oldSchema, newSchema, primaryKey, searchPathHelper):
for newTableName in newSchema.tables:
oldTable = None
if oldSchema is not None:
oldTable = oldSchema.tables.get(newTableName)
newTable = newSchema.tables[newTableName]
# Drop constraints that no more exist or are modified
for constraint in PgDiffConstraints.getDropConstraints(oldTable, newTable, primaryKey):
searchPathHelper.outputSearchPath(writer)
writer.writeln(constraint.getDropSQL())
@staticmethod
def alterComments(writer, oldSchema, newSchema, searchPathHelper):
if oldSchema is None:
return
for oldTableName, oldTable in oldSchema.tables.items():
newTable = newSchema.tables.get(oldTableName)
if newTable is None:
continue
for oldConstraintName, oldConstraint in oldTable.constraints.items():
newConstraint = newTable.constraints.get(oldConstraintName)
if newConstraint is None:
continue
# sbSQL = []
if (oldConstraint.comment is None
and newConstraint.comment is not None
or oldConstraint.comment is not None
and newConstraint.comment is not None
and oldConstraint.comment != newConstraint.comment):
searchPathHelper.outputSearchPath(writer)
writer.write("COMMENT ON ")
if newConstraint.isPrimaryKeyConstraint():
writer.write("INDEX ")
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
else:
writer.write("CONSTRAINT ")
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
writer.write(" ON ")
writer.write(PgDiffUtils.getQuotedName(newConstraint.tableName))
writer.write(" IS ")
writer.write(newConstraint.comment)
writer.writeln(';')
elif (oldConstraint.comment is not None and newConstraint.comment is None):
searchPathHelper.outputSearchPath(writer)
writer.write("COMMENT ON ")
if newConstraint.isPrimaryKeyConstraint():
writer.write("INDEX ");
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
else:
writer.write("CONSTRAINT ");
writer.write(PgDiffUtils.getQuotedName(newConstraint.name))
writer.write(" ON ");
writer.write(PgDiffUtils.getQuotedName(newConstraint.tableName))
writer.writeln(" IS NULL;")
@staticmethod
def getNewConstraints(oldTable, newTable, primaryKey):
result = []
if newTable is not None:
if oldTable is None:
for constraintName, constraint in newTable.constraints.items():
if constraint.isPrimaryKeyConstraint() == primaryKey:
result.append(constraint)
else:
for constraintName, constraint in newTable.constraints.items():
if (constraint.isPrimaryKeyConstraint() == primaryKey
and (constraintName not in oldTable.constraints
or oldTable.constraints[constraintName] != constraint)):
result.append(constraint)
return result
@staticmethod
def getDropConstraints(oldTable, newTable, primaryKey):
result = list()
if newTable is not None and oldTable is not None:
for constraintName in oldTable.constraints:
oldConstraint = oldTable.constraints[constraintName]
newConstraint = newTable.constraints.get(constraintName)
if (oldConstraint.isPrimaryKeyConstraint() == primaryKey
and (newConstraint is None or newConstraint != oldConstraint)):
result.append(oldConstraint)
return result
| 41.190083
| 99
| 0.58427
| 4,947
| 0.992576
| 0
| 0
| 4,885
| 0.980136
| 0
| 0
| 186
| 0.037319
|
59a8688939bcf65bd9fa72756ce61831127d2530
| 7,715
|
py
|
Python
|
experiments/old_code/result_scripts.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
experiments/old_code/result_scripts.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
experiments/old_code/result_scripts.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | 1
|
2018-10-30T08:57:14.000Z
|
2018-10-30T08:57:14.000Z
|
import inspect
import logging
import os
from itertools import product
import numpy as np
import pandas as pd
from skopt import load, dump
from csrank.constants import OBJECT_RANKING
from csrank.util import files_with_same_name, create_dir_recursively, rename_file_if_exist
from experiments.util import dataset_options_dict, rankers_dict, lp_metric_dict
DIR_NAME = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def log_best_params(file):
opt = load(file)
if "ps" in opt.acq_func:
best_i = np.argmin(np.array(opt.yi)[:, 0])
best_loss = opt.yi[best_i]
best_params = opt.Xi[best_i]
logger.info(
"Best parameters so far with a loss for file {} of {:.4f} time of {:.4f}:\n {}".format(
os.path.basename(file), best_loss[0],
best_loss[1],
best_params))
else:
best_i = np.argmin(opt.yi)
best_loss = opt.yi[best_i]
best_params = opt.Xi[best_i]
logger.info(
"Best parameters so far with a loss for file {} of {:.4f}:\n {}".format(os.path.basename(file), best_loss,
best_params))
return best_loss
def remove_redundant_optimizer_models(model_path, files_list):
logger.info('Results Files {} for Path {}'.format(files_list, os.path.basename(model_path)))
minimum_error = 50000
if len(files_list) >= 2:
for file in files_list:
try:
opt = load(file)
best_loss = log_best_params(file)
if best_loss < minimum_error:
minimum_error = best_loss
if (file != model_path):
logger.info('Writing from the file {} to {}'.format(os.path.basename(file),
os.path.basename(model_path)))
os.remove(model_path)
dump(opt, model_path)
except KeyError:
logger.error('Cannot open the file {}'.format(file))
except ValueError:
logger.error('Cannot open the file {}'.format(file))
elif len(files_list) == 1:
file = files_list[0]
try:
best_loss = log_best_params(file)
except KeyError:
logger.error('Cannot open the file {}'.format(file))
except ValueError:
logger.error('Cannot open the file {}'.format(file))
if len(files_list) != 0:
files_list.remove(model_path)
for file in files_list:
logger.error('Removing the File {}'.format(file))
os.remove(file)
def remove_redundant_log_files(logs_path, logs_files_list, ranker_name, dataset):
logger.info('Log Files {} for Path {}'.format(logs_files_list, os.path.basename(logs_path)))
minimum_error = 50000
if len(logs_files_list) >= 2:
for file in logs_files_list:
lines = np.array([line.rstrip('\n') for line in open(file)])
out = 'zero_one_rank_loss'
matching = [s for s in lines if out in s]
try:
logger.info("For File {} the error is {}".format(file, matching))
err = float(matching[0].split(out + ' : ')[-1])
logger.info("For File {} the zero one rank errro is {}".format(file, err))
if err <= minimum_error:
minimum_error = err
if (file != logs_path):
logger.info('Renaming from the file {} to {}'.format(os.path.basename(file),
os.path.basename(logs_path)))
os.remove(logs_path)
os.system('mv {} {}'.format(file, logs_path))
except IndexError:
logger.error('error {} in ranker {} is not evaluated for dataset {}'.format(out, ranker_name, dataset))
except ValueError:
logger.error('error {} in ranker {} is not evaluated for dataset {}'.format(out, ranker_name, dataset))
def remove_redundant_results():
for dataset, ranker_name in product(dataset_options.keys(), ranker_options.keys()):
model_path = os.path.join(DIR_NAME, 'optimizer_results_single_fold', '{}_{}'.format(dataset, ranker_name))
files_list = files_with_same_name(model_path)
remove_redundant_optimizer_models(model_path, files_list)
logs_path = os.path.join(DIR_NAME, 'logs_single_fold', '{}_{}.log'.format(dataset, ranker_name))
logs_files_list = files_with_same_name(logs_path)
remove_redundant_log_files(logs_path, logs_files_list, ranker_name, dataset)
def generate_concise_results_for_dataset(dataset='medoid', directory='logs_single_fold', result_directory='results'):
ranker_names = list(ranker_options.keys())
ranker_names.sort()
metric_names.sort()
data = []
data.append(['**************', dataset.upper(), '**************', ""])
for ranker_name in ranker_names:
try:
log_path = os.path.join(DIR_NAME, directory, '{}_{}.log'.format(dataset, ranker_name))
lines = np.array([line.rstrip('\n') for line in open(log_path)])
except FileNotFoundError:
logger.error('File {} is not found'.format(log_path))
data.append(['NE' for i in range(len(metric_names))])
continue
one_row = []
for out in metric_names:
try:
matching = [s for s in lines if out in s][0]
if out in matching:
one_row.append(matching.split(out + ' : ')[-1])
except IndexError:
logger.error('error {} in ranker {} is not evaluated for dataset {}'.format(out, ranker_name, dataset))
one_row.append('NE')
data.append(one_row)
columns = [name.upper() for name in metric_names]
indexes = [name.upper() for name in ranker_names]
indexes.insert(0, 'DATASET')
dataFrame = pd.DataFrame(data, index=indexes, columns=columns)
file_path = os.path.join(DIR_NAME, result_directory, '{}.csv'.format(dataset))
create_dir_recursively(file_path, True)
dataFrame.to_csv(file_path)
return dataFrame
def create_concise_results(result_directory='results', directory='logs_single_fold'):
df_list = []
datasets = list(dataset_options.keys())
datasets.sort()
for dataset in datasets:
dataFrame = generate_concise_results_for_dataset(dataset=dataset, directory=directory,
result_directory=result_directory)
df_list.append(dataFrame)
full_df = pd.concat(df_list)
fout = os.path.join(DIR_NAME, result_directory, 'complete_results.csv')
full_df.to_csv(fout)
def configure_logging():
log_path = os.path.join(DIR_NAME, 'results', 'compiling_result.log')
create_dir_recursively(log_path, True)
log_path = rename_file_if_exist(log_path)
global logger
logging.basicConfig(filename=log_path, level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(name='Compiling Results')
if __name__ == '__main__':
configure_logging()
dataset_options = dataset_options_dict[OBJECT_RANKING]
ranker_options = rankers_dict[OBJECT_RANKING]
metric_names = list(lp_metric_dict[OBJECT_RANKING].keys())
remove_redundant_results()
create_concise_results()
# create_concise_results(result_directory='logs_new_experiments', directory='logs_new_experiments')
| 43.835227
| 119
| 0.608425
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,137
| 0.147375
|
59a98cedbef2ddabf9e787d32a317a09b1db8b5e
| 13,108
|
py
|
Python
|
notochord/features/BagOfWords.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
notochord/features/BagOfWords.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
notochord/features/BagOfWords.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
from .. import schema, App, QueryCache, batcher, grouper, insert_ignore, export, lookup, persist, lookup_or_persist, ABCArgumentGroup, WorkOrderArgs, filter_widgets, temptable_scope, FeatureCache
from ..ObjectStore import ABCObjectStore
from sqlalchemy import Column, Integer, String, Float, ForeignKey, UnicodeText, Unicode, LargeBinary, Boolean, Index
import collections
import csv
import os
import re
import sqlalchemy
import sys
import tempfile
import time
import stat
from sklearn.feature_extraction.text import CountVectorizer
re_word = re.compile(r'[a-zA-Z]+')
__all__ = []
class BagOfWordsArgs(ABCArgumentGroup):
def __call__(self, group):
group.add_argument("--output-feature-set", type=unicode, action="store", metavar="NAME", default=None, help="Name of output feature set (required)")
group.add_argument("--input-feature-set", type=unicode, action="store", metavar="NAME", default=None, help="Name of input feature set (required)")
group.add_argument("--input-feature", type=unicode, action="store", metavar="NAME", default=None, help="Name of input feature")
group.add_argument("--chunk-size", type=int, action="store", metavar="INT", default=None, help="Number or widgets per chunk")
@export
class BagOfWords(App):
@staticmethod
def build_parser_groups():
return [BagOfWordsArgs(), WorkOrderArgs()] + App.build_parser_groups()
def __init__(self, datadir, input_feature_set=None, output_feature_set=None, input_feature=None, min_idwidget=None, max_idwidget=None, datasources=None, chunk_size=None, **kwargs):
super(BagOfWords, self).__init__(datadir, **kwargs)
self.config['output_feature_set'] = output_feature_set or self.config['output_feature_set']
self.config['input_feature_set'] = input_feature_set or self.config['input_feature_set']
self.config['input_feature'] = input_feature or self.config.get('input_feature')
self.config['datasources'] = datasources or self.config.get('datasources')
self.config["chunk_size"] = chunk_size or self.config.get('chunk_size', 1024)
self.config['min_idwidget'] = (min_idwidget, None)[min_idwidget is None]
self.config['max_idwidget'] = (max_idwidget, None)[max_idwidget is None]
def main(self):
import MySQLdb
from warnings import filterwarnings
filterwarnings('ignore', category = MySQLdb.Warning)
import sqlalchemy
from sqlalchemy import Column, literal, tuple_, insert
from ..schema import widget as t_w
from ..schema import widget_feature as t_wf
from ..schema import feature as t_f
from ..schema import feature_set as t_fs
from ..schema import datasource as t_ds
from ..schema import object_store as t_os
with self.session_scope() as session:
self.log.info("Preparing")
fs_in = lookup(session, t_fs, name=self.config['input_feature_set'])
if fs_in is None: raise KeyError("Invalid feature set: '{}'".format(self.config['input_feature_set']))
fs_out = lookup_or_persist(session, t_fs, name=self.config['output_feature_set'])
if fs_out is None: raise KeyError("Invalid feature set: '{}'".format(self.config['output_feature_set']))
os_in = lookup(session, t_os, idobject_store=fs_in.idobject_store)
if fs_in.idobject_store is None or os_in is None:
raise ValueError("Feature set '{}' has no associated object store".format(self.config['input_feature_set']))
else:
object_store = ABCObjectStore.open(session, os_in.name)
f_in = lookup(session, t_f, name=self.config['input_feature'], idfeature_set=fs_in.idfeature_set)
if f_in is None:
if self.config['input_feature'] is not None:
raise KeyError("Invalid feature: '{}' for feature_set '{}'".format(self.config['input_feature'], self.config['input_feature_set']))
else:
raise KeyError("Invalid feature_set '{}' has no default feature".format(self.config['input_feature_set']))
q_w = session.query(t_w.idwidget)
q_w = filter_widgets(
q_w,
min_idwidget = self.config['min_idwidget'],
max_idwidget = self.config['max_idwidget'],
datasources = self.config['datasources']
)
q_wf = session.query(t_wf.idwidget, t_wf.idfeature) \
.join(t_w, t_w.idwidget == t_wf.idwidget) \
.join(t_f, t_f.idfeature == t_wf.idfeature) \
.filter(t_f.idfeature_set == fs_out.idfeature_set)
if self.config['min_idwidget'] is not None:
q_wf = q_wf.filter(t_w.idwidget >= self.config['min_idwidget'])
if self.config['max_idwidget'] is not None:
q_wf = q_wf.filter(t_w.idwidget < self.config['max_idwidget'])
if self.config['datasources'] is not None and len(self.config['datasources']) > 0:
q_wf = q_wf.join(t_ds, t_ds.iddatasource == t_w.iddatasource)
q_wf = q_wf.filter(t_ds.name.in_(self.config['datasources']))
self.log.info("Deleting old features")
#q_del = q_wf.delete()
#q_del = t_wf.__table__.delete() \
# .where(tuple_(t_wf.idwidget, t_wf.idfeature).in_(q_wf))
#self.log.debug("Delete widget query: {}".format(q_del.compile(bind=session.bind)))
#session.execute(q_del)
q_w = session.query(t_w.idwidget, t_w.uuid)
q_w = filter_widgets(
q_w,
min_idwidget = self.config['min_idwidget'],
max_idwidget = self.config['max_idwidget'],
datasources = self.config['datasources']
)
q_w = q_w.join(t_wf, t_wf.idwidget == t_w.idwidget) \
.filter(t_wf.idfeature == f_in.idfeature)
class tmp_upload(schema.TableBase):
idtmp_upload = Column(t_f.idfeature.type, nullable=False, primary_key=True)
idwidget = Column(t_wf.idwidget.type, nullable=False)
idfeature = Column(t_wf.idfeature.type, nullable=False)
value = Column(t_wf.value.type, nullable=False)
__table_args__ = ({'prefixes':["TEMPORARY"]},)
__tablename__ = "tmp_upload"
class tmp_wf(schema.TableBase):
idwidget = Column(Integer, ForeignKey('widget.idwidget', onupdate='RESTRICT', ondelete='CASCADE'), primary_key=True, nullable=False)
idfeature = Column(Integer, ForeignKey('feature.idfeature', onupdate='RESTRICT', ondelete='CASCADE'), primary_key=True, nullable=False)
value = Column(Float, nullable=True)
__table_args__ = ({'prefixes':["TEMPORARY"]},)
__tablename__ = "tmp_widget_feature"
self.log.info("Beginning Execution")
self.log.debug("Widget query: {}".format(q_w.statement.compile(bind=session.bind)))
FC = FeatureCache(1024*1024, log=self.log)
count_time = 0.0
feature_time = 0.0
widget_time = 0.0
upload_time = 0.0
primary_key_time = 0.0
num_widgets = 0
num_widget_features = 0
start_time = time.time()
if session.bind.dialect.name.lower() == 'mysql':
session.execute("SET @@foreign_key_checks=0;")
session.execute("ALTER TABLE widget_feature DISABLE KEYS;")
insert_fout, insert_file = tempfile.mkstemp()
os.close(insert_fout)
os.chmod(insert_file, stat.S_IREAD | stat.S_IWRITE | stat.S_IROTH)
begin_time = time.time()
for it, result_chunk in enumerate(grouper(q_w, self.config['chunk_size'])):
start_time = time.time()
self.log.info("Executing chunk {}".format(it))
upload_chunk = []
N = 0
words = []
widgets = []
values = []
for row in result_chunk:
if row is not None:
idwidget, uuid = row
content = object_store.get(uuid, feature=f_in.name)
if content is None:
continue
cnt = collections.Counter(x.group(0).lower() for x in re_word.finditer(content))
words.extend(cnt.iterkeys())
values.extend(cnt.itervalues())
widgets.extend(idwidget for _ in xrange(len(cnt)))
N += len(cnt)
end_time = time.time()
count_time += (end_time - start_time)
start_time = time.time()
self.log.info("Getting feature id's")
word_idents = FC(session, fs_out.idfeature_set, (w for w in words))
self.log.info("Copying into upload_chunk")
upload_chunk = [dict(idwidget=widgets[it], idfeature=word_idents[it], value=values[it]) for it in xrange(N)]
num_widget_features += len(upload_chunk)
end_time = time.time()
feature_time += (end_time - start_time)
start_time = time.time()
dialect = session.bind.dialect.name
with temptable_scope(session, tmp_upload), temptable_scope(session, tmp_wf):
self.log.info("Uploading widget_feature chunk of size: {}".format(len(upload_chunk)))
session.bulk_insert_mappings(tmp_upload, upload_chunk)
end_time = time.time()
upload_time += (end_time - start_time)
start_time = time.time()
self.log.info("Constructing primary key")
insert_stmt = insert_ignore(tmp_wf, dialect).from_select(
[tmp_wf.idwidget, tmp_wf.idfeature, tmp_wf.value],
session.query(tmp_upload.idwidget, tmp_upload.idfeature, tmp_upload.value) \
.select_from(tmp_upload) \
)
session.execute(insert_stmt)
end_time = time.time()
primary_key_time += (end_time - start_time)
start_time = time.time()
if session.bind.dialect.name.lower() == 'mysql':
with open(insert_file, 'w') as fout:
csvout = csv.writer(fout, delimiter=',', escapechar='\\')
for row in session.query(tmp_wf.idwidget, tmp_wf.idfeature, tmp_wf.value):
csvout.writerow(tuple(row))
del csvout
self.log.info("Temp file size: {}".format(os.path.getsize(insert_file)))
insert_stmt = sqlalchemy.text(r"""
LOAD DATA CONCURRENT LOCAL INFILE '{insert_file}'
IGNORE
INTO TABLE widget_feature
FIELDS TERMINATED BY ','
OPTIONALLY ENCLOSED BY '"'
ESCAPED BY '\\'
LINES TERMINATED BY '\n'
(idwidget, idfeature, value)
""".format(insert_file=insert_file))
else:
insert_stmt = insert_ignore(t_wf, dialect).from_select(
[t_wf.idwidget, t_wf.idfeature, t_wf.value],
session.query(tmp_wf.idwidget, tmp_wf.idfeature, tmp_wf.value)
)
start_time = time.time()
self.log.info("Transferring into place")
session.execute(insert_stmt)
end_time = time.time()
widget_time += (end_time - start_time)
num_widgets += len(result_chunk)
self.log.info("Average Times: {} {} {} {} {} {}".format(count_time / num_widgets, feature_time / num_widgets, upload_time / num_widgets, primary_key_time / num_widgets, widget_time / num_widgets, num_widget_features / num_widgets))
self.log.info("Average Rate: {}".format(num_widgets / (time.time() - begin_time)))
self.log.info("Max Rate: {}".format(num_widgets / widget_time))
if session.bind.dialect.name.lower() == 'mysql':
session.execute("ALTER TABLE widget_feature ENABLE KEYS;")
session.execute("SET @@foreign_key_checks=1;")
os.remove(insert_file)
tmp_upload.metadata.remove(tmp_upload.__table__)
tmp_upload.metadata.remove(tmp_wf.__table__)
if __name__ == "__main__":
A = BagOfWords.from_args(sys.argv[1:])
A.run()
| 51.403922
| 251
| 0.580333
| 12,430
| 0.948276
| 0
| 0
| 11,786
| 0.899146
| 0
| 0
| 2,369
| 0.180729
|
59ac1cf688342acfde23c07e10ca2e33caf1f078
| 450
|
py
|
Python
|
trains/ATIO.py
|
Columbine21/TFR-Net
|
1da01577542e7f477fdf7323ec0696aebc632357
|
[
"MIT"
] | 7
|
2021-11-19T01:32:01.000Z
|
2021-12-16T11:42:44.000Z
|
trains/ATIO.py
|
Columbine21/TFR-Net
|
1da01577542e7f477fdf7323ec0696aebc632357
|
[
"MIT"
] | 2
|
2021-11-25T08:28:08.000Z
|
2021-12-29T08:42:55.000Z
|
trains/ATIO.py
|
Columbine21/TFR-Net
|
1da01577542e7f477fdf7323ec0696aebc632357
|
[
"MIT"
] | 1
|
2021-12-02T09:42:51.000Z
|
2021-12-02T09:42:51.000Z
|
"""
AIO -- All Trains in One
"""
from trains.baselines import *
from trains.missingTask import *
__all__ = ['ATIO']
class ATIO():
def __init__(self):
self.TRAIN_MAP = {
# single-task
'tfn': TFN,
'mult': MULT,
'misa': MISA,
# missing-task
'tfr_net': TFR_NET,
}
def getTrain(self, args):
return self.TRAIN_MAP[args.modelName.lower()](args)
| 19.565217
| 59
| 0.52
| 330
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.202222
|
59ac4ecc150b88338555999e74b36af7366e76c2
| 271
|
py
|
Python
|
method/boardInfo.py
|
gary920209/LightDance-RPi
|
41d3ef536f3874fd5dbe092f5c9be42f7204427d
|
[
"MIT"
] | 2
|
2020-11-14T17:13:55.000Z
|
2020-11-14T17:42:39.000Z
|
method/boardInfo.py
|
gary920209/LightDance-RPi
|
41d3ef536f3874fd5dbe092f5c9be42f7204427d
|
[
"MIT"
] | null | null | null |
method/boardInfo.py
|
gary920209/LightDance-RPi
|
41d3ef536f3874fd5dbe092f5c9be42f7204427d
|
[
"MIT"
] | null | null | null |
import os
from .baseMethod import BaseMethod
# BoardInfo
class BoardInfo(BaseMethod):
def method(self, payload):
info = [
"boardInfo",
{"name": os.name, "type": "dancer", "OK": True, "msg": "Success"},
]
return info
| 19.357143
| 78
| 0.553506
| 210
| 0.774908
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.221402
|
59ad06dd6ba9abadeea6a1f889a37f3edb2cafd7
| 4,928
|
py
|
Python
|
split_data.py
|
Anchorboy/PR_FinalProject
|
e744723c9c9dd55e6995ae5929eb45f90c70819b
|
[
"MIT"
] | null | null | null |
split_data.py
|
Anchorboy/PR_FinalProject
|
e744723c9c9dd55e6995ae5929eb45f90c70819b
|
[
"MIT"
] | null | null | null |
split_data.py
|
Anchorboy/PR_FinalProject
|
e744723c9c9dd55e6995ae5929eb45f90c70819b
|
[
"MIT"
] | null | null | null |
import os
import cv2
import random
import shutil
import numpy as np
def split_img(input_path):
split_ratio = 0.8
for dir_name in xrange(10):
dir_name += 1
dir_name = str(dir_name)
dir_path = os.path.join(input_path, dir_name)
img_in_class = os.listdir(dir_path)
rand_train_img = set(random.sample(img_in_class, int(len(img_in_class) * split_ratio)))
rand_test_img = set(img_in_class) - rand_train_img
for img_name in rand_train_img:
img_path = os.path.join(dir_path, img_name)
if not os.path.exists("train/"+dir_name):
os.mkdir("train/"+dir_name)
shutil.copyfile(img_path, "train/"+dir_name+"/"+img_name)
for img_name in rand_test_img:
img_path = os.path.join(dir_path, img_name)
if not os.path.exists("test/"+dir_name):
os.mkdir("test/"+dir_name)
shutil.copyfile(img_path, "test/"+dir_name+"/"+img_name)
def split_data(samples):
split_rate = 0.6
train_all = []
test_all = []
for class_id, img_in_class in enumerate(samples):
rand_ind = [ i for i in xrange(len(img_in_class)) ]
rand_train_ind = set(random.sample(rand_ind, int(len(img_in_class) * split_rate)))
rand_test_ind = set(rand_ind) - rand_train_ind
# train_in_class = []
# test_in_class = []
for ind in rand_train_ind:
img_vec = img_in_class[ind]
img_vec = img_vec.reshape(img_vec.shape[0] * img_vec.shape[1] * img_vec.shape[2],)
train_all.append((class_id, img_vec))
for ind in rand_test_ind:
img_vec = img_in_class[ind]
img_vec = img_vec.reshape(img_vec.shape[0] * img_vec.shape[1] * img_vec.shape[2],)
test_all.append((class_id, img_vec))
# train_all.append(train_in_class)
# test_all.append(test_in_class)
return train_all, test_all
def read_img(input_path):
img_size = (200, 200)
sample_all = []
for dir_name in xrange(10):
dir_name += 1
dir_name = str(dir_name)
dir_path = os.path.join(input_path, dir_name)
img_in_class = []
for img_name in os.listdir(dir_path):
img_path = os.path.join(dir_path, img_name)
img_vec = cv2.imread(img_path, flags=1)
# print img_vec.shape
# res = cv2.resize(img_vec, (int(img_vec.shape[0]*0.5), int(img_vec.shape[1]*0.5)), interpolation=cv2.INTER_CUBIC)
res = cv2.resize(img_vec, img_size, interpolation=cv2.INTER_CUBIC)
nor_res = np.zeros_like(res)
nor_res = cv2.normalize(src=res, dst=nor_res, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
img_in_class.append(nor_res)
sample_all.append(img_in_class)
train_all, test_all = split_data(sample_all)
return train_all, test_all
def read_data():
img_size = (200, 200)
train_all = []
test_all = []
current_base = os.path.abspath('.')
train_path = os.path.join(current_base, "train")
test_path = os.path.join(current_base, "test")
# read train
for dir_name in os.listdir(train_path):
dir_path = os.path.join(train_path, dir_name)
img_in_class = []
for img_name in os.listdir(dir_path):
img_path = os.path.join(dir_path, img_name)
img_vec = cv2.imread(img_path, flags=1)
# print img_vec.shape
# res = cv2.resize(img_vec, (int(img_vec.shape[0]*0.5), int(img_vec.shape[1]*0.5)), interpolation=cv2.INTER_CUBIC)
res = cv2.resize(img_vec, img_size, interpolation=cv2.INTER_CUBIC)
nor_res = np.zeros_like(res)
nor_res = cv2.normalize(src=res, dst=nor_res, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
img_in_class.append(nor_res)
train_all.append(img_in_class)
# read test
for dir_name in os.listdir(test_path):
dir_path = os.path.join(test_path, dir_name)
img_in_class = []
for img_name in os.listdir(dir_path):
img_path = os.path.join(dir_path, img_name)
img_vec = cv2.imread(img_path, flags=1)
# print img_vec.shape
# res = cv2.resize(img_vec, (int(img_vec.shape[0]*0.5), int(img_vec.shape[1]*0.5)), interpolation=cv2.INTER_CUBIC)
res = cv2.resize(img_vec, img_size, interpolation=cv2.INTER_CUBIC)
nor_res = np.zeros_like(res)
nor_res = cv2.normalize(src=res, dst=nor_res, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
img_in_class.append(nor_res)
test_all.append(img_in_class)
return train_all, test_all
if __name__ == "__main__":
current_base = os.path.abspath('.')
input_base = os.path.join(current_base, 'data')
split_img(input_base)
# train_all, test_all = read_data()
# print train_all
| 36.503704
| 126
| 0.631494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 673
| 0.136567
|