max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
src/amuse/community/adaptb/__init__.py
|
sibonyves/amuse
| 0
|
12782051
|
from .interface import Adaptb
| 1.148438
| 1
|
sagas/nlu/legacy/corenlp_procs.py
|
samlet/stack
| 3
|
12782052
|
<gh_stars>1-10
import sagas
def words_table(sents, lang):
from sagas.nlu.corenlp_helper import get_nlp
nlp=get_nlp(lang)
doc = nlp(sents)
sentence=doc.sentences[0]
rows=[[word.text, word.lemma, word.upos, word.xpos,
word.dependency_relation, word.governor,
word.feats] for word in sentence.words]
return sagas.to_df(rows, ['text','lemma', 'upos', 'xpos', 'dep', 'head', 'feats'])
class CorenlpProcs(object):
def testings(self):
"""
$ python -m sagas.nlu.corenlp_procs testings
:return:
"""
ds=[words_table('عمري تسعة عشر عاماً.', 'ar'),
words_table('آخرین کسی که به کامپیوتر وصل شد، کی بود؟', 'fa')
]
for df in ds:
sagas.print_df(df)
if __name__ == '__main__':
import fire
fire.Fire(CorenlpProcs)
| 2.75
| 3
|
mri_tools/topup_eddy/nipype_overwrite/all_peb_pipeline.py
|
movermeyer/mri-tools
| 0
|
12782053
|
from nipype.interfaces.ants.base import ANTSCommandInputSpec, ANTSCommand
from nipype.interfaces.ants.segmentation import N4BiasFieldCorrectionOutputSpec
from nipype.interfaces.base import (File, traits, isdefined)
from nipype.utils.filemanip import split_filename
import nipype.pipeline.engine as pe
import nipype.interfaces.utility as niu
import nipype.interfaces.fsl as fsl
import os
from nipype.workflows.dmri.fsl.artifacts import _xfm_jacobian, _checkrnum
from nipype.workflows.dmri.fsl.utils import b0_average, apply_all_corrections, insert_mat, \
rotate_bvecs, vsm2warp, extract_bval, recompose_xfm, recompose_dwi, _checkinitxfm, enhance
__author__ = '<NAME>'
__date__ = "2015-05-08"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
"""This module overwrites some parts of Nipype since they did not work correctly.
The idea is that at a low level two functions did not work correctly. To enable nipype to use the fixed versions of
these functions we have to copy the entire chain to make it work.
Also, the original implementation calculated the read out times from the EPI parameters. This implementation requires
you to predefine the read out times.
"""
def all_peb_pipeline(name='hmc_sdc_ecc',
epi_params={'read_out_times': None, 'enc_dir': 'y-'},
altepi_params={'read_out_times': None, 'enc_dir': 'y'}):
"""
Builds a pipeline including three artifact corrections: head-motion
correction (HMC), susceptibility-derived distortion correction (SDC),
and Eddy currents-derived distortion correction (ECC).
.. warning:: this workflow rotates the gradients table (*b*-vectors)
[Leemans09]_.
Examples
--------
>>> from nipype.workflows.dmri.fsl.artifacts import all_peb_pipeline
>>> allcorr = all_peb_pipeline()
>>> allcorr.inputs.inputnode.in_file = 'epi.nii'
>>> allcorr.inputs.inputnode.alt_file = 'epi_rev.nii'
>>> allcorr.inputs.inputnode.in_bval = 'diffusion.bval'
>>> allcorr.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> allcorr.run() # doctest: +SKIP
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bvec',
'in_bval', 'alt_file']), name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_mask',
'out_bvec']), name='outputnode')
avg_b0_0 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'],
output_names=['out_file'], function=b0_average),
name='b0_avg_pre')
avg_b0_1 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'],
output_names=['out_file'], function=b0_average),
name='b0_avg_post')
bet_dwi0 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True),
name='bet_dwi_pre')
bet_dwi1 = pe.Node(fsl.BET(frac=0.3, mask=True, robust=True),
name='bet_dwi_post')
hmc = hmc_pipeline()
sdc = sdc_peb(epi_params=epi_params, altepi_params=altepi_params)
ecc = ecc_pipeline()
unwarp = apply_all_corrections()
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, hmc, [('in_file', 'inputnode.in_file'),
('in_bvec', 'inputnode.in_bvec'),
('in_bval', 'inputnode.in_bval')]),
(inputnode, avg_b0_0, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(avg_b0_0, bet_dwi0, [('out_file', 'in_file')]),
(bet_dwi0, hmc, [('mask_file', 'inputnode.in_mask')]),
(hmc, sdc, [('outputnode.out_file', 'inputnode.in_file')]),
(bet_dwi0, sdc, [('mask_file', 'inputnode.in_mask')]),
(inputnode, sdc, [('in_bval', 'inputnode.in_bval'),
('alt_file', 'inputnode.alt_file')]),
(inputnode, ecc, [('in_file', 'inputnode.in_file'),
('in_bval', 'inputnode.in_bval')]),
(bet_dwi0, ecc, [('mask_file', 'inputnode.in_mask')]),
(hmc, ecc, [('outputnode.out_xfms', 'inputnode.in_xfms')]),
(ecc, avg_b0_1, [('outputnode.out_file', 'in_dwi')]),
(inputnode, avg_b0_1, [('in_bval', 'in_bval')]),
(avg_b0_1, bet_dwi1, [('out_file', 'in_file')]),
(inputnode, unwarp, [('in_file', 'inputnode.in_dwi')]),
(hmc, unwarp, [('outputnode.out_xfms', 'inputnode.in_hmc')]),
(ecc, unwarp, [('outputnode.out_xfms', 'inputnode.in_ecc')]),
(sdc, unwarp, [('outputnode.out_warp', 'inputnode.in_sdc')]),
(hmc, outputnode, [('outputnode.out_bvec', 'out_bvec')]),
(unwarp, outputnode, [('outputnode.out_file', 'out_file')]),
(bet_dwi1, outputnode, [('mask_file', 'out_mask')])
])
return wf
def hmc_pipeline(name='motion_correct'):
"""
HMC stands for head-motion correction.
Creates a pipeline that corrects for head motion artifacts in dMRI
sequences.
It takes a series of diffusion weighted images and rigidly co-registers
them to one reference image. Finally, the `b`-matrix is rotated accordingly
[Leemans09]_ making use of the rotation matrix obtained by FLIRT.
Search angles have been limited to 4 degrees, based on results in
[Yendiki13]_.
A list of rigid transformation matrices is provided, so that transforms
can be chained.
This is useful to correct for artifacts with only one interpolation process
(as previously discussed `here
<https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_),
and also to compute nuisance regressors as proposed by [Yendiki13]_.
.. warning:: This workflow rotates the `b`-vectors, so please be advised
that not all the dicom converters ensure the consistency between the
resulting nifti orientation and the gradients table (e.g. dcm2nii
checks it).
.. admonition:: References
.. [Leemans09] <NAME>, and <NAME>, `The B-matrix must be rotated
when correcting for subject motion in DTI data
<http://dx.doi.org/10.1002/mrm.21890>`_,
Magn Reson Med. 61(6):1336-49. 2009. doi: 10.1002/mrm.21890.
.. [Yendiki13] <NAME> et al., `Spurious group differences due to head
motion in a diffusion MRI study
<http://dx.doi.org/10.1016/j.neuroimage.2013.11.027>`_.
Neuroimage. 21(88C):79-90. 2013. doi: 10.1016/j.neuroimage.2013.11.027
Example
-------
>>> from nipype.workflows.dmri.fsl.artifacts import hmc_pipeline
>>> hmc = hmc_pipeline()
>>> hmc.inputs.inputnode.in_file = 'diffusion.nii'
>>> hmc.inputs.inputnode.in_bvec = 'diffusion.bvec'
>>> hmc.inputs.inputnode.in_bval = 'diffusion.bval'
>>> hmc.inputs.inputnode.in_mask = 'mask.nii'
>>> hmc.run() # doctest: +SKIP
Inputs::
inputnode.in_file - input dwi file
inputnode.in_mask - weights mask of reference image (a file with data \
range in [0.0, 1.0], indicating the weight of each voxel when computing the \
metric.
inputnode.in_bvec - gradients file (b-vectors)
inputnode.ref_num (optional, default=0) index of the b0 volume that \
should be taken as reference
Outputs::
outputnode.out_file - corrected dwi file
outputnode.out_bvec - rotated gradient vectors table
outputnode.out_xfms - list of transformation matrices
"""
from nipype.workflows.data import get_flirt_schedule
params = dict(dof=6, bgvalue=0, save_log=True, no_search=True,
# cost='mutualinfo', cost_func='mutualinfo', bins=64,
schedule=get_flirt_schedule('hmc'))
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'ref_num',
'in_bvec', 'in_bval', 'in_mask']), name='inputnode')
split = pe.Node(niu.Function(function=hmc_split,
input_names=['in_file', 'in_bval', 'ref_num'],
output_names=['out_ref', 'out_mov', 'out_bval', 'volid']),
name='SplitDWI')
flirt = dwi_flirt(flirt_param=params)
insmat = pe.Node(niu.Function(input_names=['inlist', 'volid'],
output_names=['out'], function=insert_mat),
name='InsertRefmat')
rot_bvec = pe.Node(niu.Function(input_names=['in_bvec', 'in_matrix'],
output_names=['out_file'], function=rotate_bvecs),
name='Rotate_Bvec')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file',
'out_bvec', 'out_xfms']),
name='outputnode')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, split, [('in_file', 'in_file'),
('in_bval', 'in_bval'),
('ref_num', 'ref_num')]),
(inputnode, flirt, [('in_mask', 'inputnode.ref_mask')]),
(split, flirt, [('out_ref', 'inputnode.reference'),
('out_mov', 'inputnode.in_file'),
('out_bval', 'inputnode.in_bval')]),
(flirt, insmat, [('outputnode.out_xfms', 'inlist')]),
(split, insmat, [('volid', 'volid')]),
(inputnode, rot_bvec, [('in_bvec', 'in_bvec')]),
(insmat, rot_bvec, [('out', 'in_matrix')]),
(rot_bvec, outputnode, [('out_file', 'out_bvec')]),
(flirt, outputnode, [('outputnode.out_file', 'out_file')]),
(insmat, outputnode, [('out', 'out_xfms')])
])
return wf
def ecc_pipeline(name='eddy_correct'):
"""
ECC stands for Eddy currents correction.
Creates a pipeline that corrects for artifacts induced by Eddy currents in
dMRI sequences.
It takes a series of diffusion weighted images and linearly co-registers
them to one reference image (the average of all b0s in the dataset).
DWIs are also modulated by the determinant of the Jacobian as indicated by
[Jones10]_ and [Rohde04]_.
A list of rigid transformation matrices can be provided, sourcing from a
:func:`.hmc_pipeline` workflow, to initialize registrations in a *motion
free* framework.
A list of affine transformation matrices is available as output, so that
transforms can be chained (discussion
`here <https://github.com/nipy/nipype/pull/530#issuecomment-14505042>`_).
.. admonition:: References
.. [Jones10] Jones DK, `The signal intensity must be modulated by the
determinant of the Jacobian when correcting for eddy currents in
diffusion MRI
<http://cds.ismrm.org/protected/10MProceedings/files/1644_129.pdf>`_,
Proc. ISMRM 18th Annual Meeting, (2010).
.. [Rohde04] Rohde et al., `Comprehensive Approach for Correction of
Motion and Distortion in Diffusion-Weighted MRI
<http://stbb.nichd.nih.gov/pdf/com_app_cor_mri04.pdf>`_, MRM
51:103-114 (2004).
Example
-------
>>> from nipype.workflows.dmri.fsl.artifacts import ecc_pipeline
>>> ecc = ecc_pipeline()
>>> ecc.inputs.inputnode.in_file = 'diffusion.nii'
>>> ecc.inputs.inputnode.in_bval = 'diffusion.bval'
>>> ecc.inputs.inputnode.in_mask = 'mask.nii'
>>> ecc.run() # doctest: +SKIP
Inputs::
inputnode.in_file - input dwi file
inputnode.in_mask - weights mask of reference image (a file with data \
range sin [0.0, 1.0], indicating the weight of each voxel when computing the \
metric.
inputnode.in_bval - b-values table
inputnode.in_xfms - list of matrices to initialize registration (from \
head-motion correction)
Outputs::
outputnode.out_file - corrected dwi file
outputnode.out_xfms - list of transformation matrices
"""
from nipype.workflows.data import get_flirt_schedule
params = dict(dof=12, no_search=True, interp='spline', bgvalue=0,
schedule=get_flirt_schedule('ecc'))
# cost='normmi', cost_func='normmi', bins=64,
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bval',
'in_mask', 'in_xfms']), name='inputnode')
avg_b0 = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval'],
output_names=['out_file'], function=b0_average),
name='b0_avg')
pick_dws = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval', 'b'],
output_names=['out_file'], function=extract_bval),
name='ExtractDWI')
pick_dws.inputs.b = 'diff'
flirt = dwi_flirt(flirt_param=params, excl_nodiff=True)
mult = pe.MapNode(fsl.BinaryMaths(operation='mul'), name='ModulateDWIs',
iterfield=['in_file', 'operand_value'])
thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'],
name='RemoveNegative')
split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
get_mat = pe.Node(niu.Function(input_names=['in_bval', 'in_xfms'],
output_names=['out_files'], function=recompose_xfm),
name='GatherMatrices')
merge = pe.Node(niu.Function(input_names=['in_dwi', 'in_bval', 'in_corrected'],
output_names=['out_file'], function=recompose_dwi), name='MergeDWIs')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_xfms']),
name='outputnode')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, avg_b0, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(inputnode, pick_dws, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(inputnode, merge, [('in_file', 'in_dwi'),
('in_bval', 'in_bval')]),
(inputnode, flirt, [('in_mask', 'inputnode.ref_mask'),
('in_xfms', 'inputnode.in_xfms'),
('in_bval', 'inputnode.in_bval')]),
(inputnode, get_mat, [('in_bval', 'in_bval')]),
(avg_b0, flirt, [('out_file', 'inputnode.reference')]),
(pick_dws, flirt, [('out_file', 'inputnode.in_file')]),
(flirt, get_mat, [('outputnode.out_xfms', 'in_xfms')]),
(flirt, mult, [(('outputnode.out_xfms', _xfm_jacobian),
'operand_value')]),
(flirt, split, [('outputnode.out_file', 'in_file')]),
(split, mult, [('out_files', 'in_file')]),
(mult, thres, [('out_file', 'in_file')]),
(thres, merge, [('out_file', 'in_corrected')]),
(get_mat, outputnode, [('out_files', 'out_xfms')]),
(merge, outputnode, [('out_file', 'out_file')])
])
return wf
def sdc_peb(name='peb_correction',
epi_params={'read_out_times': None, 'enc_dir': 'y-'},
altepi_params={'read_out_times': None, 'enc_dir': 'y'}):
"""
SDC stands for susceptibility distortion correction. PEB stands for
phase-encoding-based.
The phase-encoding-based (PEB) method implements SDC by acquiring
diffusion images with two different enconding directions [Andersson2003]_.
The most typical case is acquiring with opposed phase-gradient blips
(e.g. *A>>>P* and *P>>>A*, or equivalently, *-y* and *y*)
as in [Chiou2000]_, but it is also possible to use orthogonal
configurations [Cordes2000]_ (e.g. *A>>>P* and *L>>>R*,
or equivalently *-y* and *x*).
This workflow uses the implementation of FSL
(`TOPUP <http://fsl.fmrib.ox.ac.uk/fsl/fslwiki/TOPUP>`_).
Example
-------
>>> from nipype.workflows.dmri.fsl.artifacts import sdc_peb
>>> peb = sdc_peb()
>>> peb.inputs.inputnode.in_file = 'epi.nii'
>>> peb.inputs.inputnode.alt_file = 'epi_rev.nii'
>>> peb.inputs.inputnode.in_bval = 'diffusion.bval'
>>> peb.inputs.inputnode.in_mask = 'mask.nii'
>>> peb.run() # doctest: +SKIP
.. admonition:: References
.. [Andersson2003] <NAME>L et al., `How to correct susceptibility
distortions in spin-echo echo-planar images: application to diffusion
tensor imaging <http://dx.doi.org/10.1016/S1053-8119(03)00336-7>`_.
Neuroimage. 2003 Oct;20(2):870-88. doi: 10.1016/S1053-8119(03)00336-7
.. [Cordes2000] <NAME> et al., Geometric distortion correction in EPI
using two images with orthogonal phase-encoding directions, in Proc.
ISMRM (8), p.1712, Denver, US, 2000.
.. [Chiou2000] <NAME>, and <NAME>, A simple method to correct
off-resonance related distortion in echo planar imaging, in Proc.
ISMRM (8), p.1712, Denver, US, 2000.
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['in_file', 'in_bval',
'in_mask', 'alt_file', 'ref_num']),
name='inputnode')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file', 'out_vsm',
'out_warp']), name='outputnode')
b0_ref = pe.Node(fsl.ExtractROI(t_size=1), name='b0_ref')
b0_alt = pe.Node(fsl.ExtractROI(t_size=1), name='b0_alt')
b0_comb = pe.Node(niu.Merge(2), name='b0_list')
b0_merge = pe.Node(fsl.Merge(dimension='t'), name='b0_merged')
topup = pe.Node(fsl.TOPUP(), name='topup')
topup.inputs.encoding_direction = [epi_params['enc_dir'],
altepi_params['enc_dir']]
readout = epi_params['read_out_time']
topup.inputs.readout_times = [readout,
altepi_params['read_out_time']]
unwarp = pe.Node(fsl.ApplyTOPUP(in_index=[1], method='jac'), name='unwarp')
# scaling = pe.Node(niu.Function(input_names=['in_file', 'enc_dir'],
# output_names=['factor'], function=_get_zoom),
# name='GetZoom')
# scaling.inputs.enc_dir = epi_params['enc_dir']
vsm2dfm = vsm2warp()
vsm2dfm.inputs.inputnode.enc_dir = epi_params['enc_dir']
vsm2dfm.inputs.inputnode.scaling = readout
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, b0_ref, [('in_file', 'in_file'),
(('ref_num', _checkrnum), 't_min')]),
(inputnode, b0_alt, [('alt_file', 'in_file'),
(('ref_num', _checkrnum), 't_min')]),
(b0_ref, b0_comb, [('roi_file', 'in1')]),
(b0_alt, b0_comb, [('roi_file', 'in2')]),
(b0_comb, b0_merge, [('out', 'in_files')]),
(b0_merge, topup, [('merged_file', 'in_file')]),
(topup, unwarp, [('out_fieldcoef', 'in_topup_fieldcoef'),
('out_movpar', 'in_topup_movpar'),
('out_enc_file', 'encoding_file')]),
(inputnode, unwarp, [('in_file', 'in_files')]),
(unwarp, outputnode, [('out_corrected', 'out_file')]),
# (b0_ref, scaling, [('roi_file', 'in_file')]),
# (scaling, vsm2dfm, [('factor', 'inputnode.scaling')]),
(b0_ref, vsm2dfm, [('roi_file', 'inputnode.in_ref')]),
(topup, vsm2dfm, [('out_field', 'inputnode.in_vsm')]),
(topup, outputnode, [('out_field', 'out_vsm')]),
(vsm2dfm, outputnode, [('outputnode.out_warp', 'out_warp')])
])
return wf
def hmc_split(in_file, in_bval, ref_num=0, lowbval=25.0):
"""
Selects the reference and moving volumes from a dwi dataset
for the purpose of HMC.
"""
import numpy as np
import nibabel as nb
import os.path as op
from nipype.interfaces.base import isdefined
im = nb.load(in_file)
data = im.get_data()
hdr = im.get_header().copy()
bval = np.loadtxt(in_bval)
lowbs = np.where(bval <= lowbval)[0]
volid = lowbs[0]
if (isdefined(ref_num) and (ref_num < len(lowbs))):
volid = [ref_num]
# todo add next two lines in Nipype git
if len(volid) == 1:
volid = volid[0]
if volid == 0:
data = data[..., 1:]
bval = bval[1:]
elif volid == (data.shape[-1] - 1):
data = data[..., :-1]
bval = bval[:-1]
else:
data = np.concatenate((data[..., :volid], data[..., (volid + 1):]),
axis=3)
bval = np.hstack((bval[:volid], bval[(volid + 1):]))
out_ref = op.abspath('hmc_ref.nii.gz')
out_mov = op.abspath('hmc_mov.nii.gz')
out_bval = op.abspath('bval_split.txt')
refdata = data[..., volid]
hdr.set_data_shape(refdata.shape)
nb.Nifti1Image(refdata, im.get_affine(), hdr).to_filename(out_ref)
hdr.set_data_shape(data.shape)
nb.Nifti1Image(data, im.get_affine(), hdr).to_filename(out_mov)
np.savetxt(out_bval, bval)
return [out_ref, out_mov, out_bval, volid]
class N4BiasFieldCorrectionInputSpec(ANTSCommandInputSpec):
# todo dimensionality in Nipype git
dimension = traits.Enum(3, 2, argstr='--image-dimensionality %d',
usedefault=True,
desc='image dimension (2 or 3)')
input_image = File(argstr='--input-image %s', mandatory=True,
desc=('image to apply transformation to (generally a '
'coregistered functional)'))
mask_image = File(argstr='--mask-image %s')
weight_image = File(argstr='--weight-image %s')
output_image = traits.Str(argstr='--output %s',
desc='output file name', genfile=True,
hash_files=False)
bspline_fitting_distance = traits.Float(argstr="--bspline-fitting %s")
bspline_order = traits.Int(requires=['bspline_fitting_distance'])
shrink_factor = traits.Int(argstr="--shrink-factor %d")
n_iterations = traits.List(traits.Int(), argstr="--convergence %s",
requires=['convergence_threshold'])
convergence_threshold = traits.Float(requires=['n_iterations'])
save_bias = traits.Bool(False, mandatory=True, usedefault=True,
desc=('True if the estimated bias should be saved'
' to file.'), xor=['bias_image'])
bias_image = File(desc='Filename for the estimated bias.',
hash_files=False)
class N4BiasFieldCorrection(ANTSCommand):
"""N4 is a variant of the popular N3 (nonparameteric nonuniform normalization)
retrospective bias correction algorithm. Based on the assumption that the
corruption of the low frequency bias field can be modeled as a convolution of
the intensity histogram by a Gaussian, the basic algorithmic protocol is to
iterate between deconvolving the intensity histogram by a Gaussian, remapping
the intensities, and then spatially smoothing this result by a B-spline modeling
of the bias field itself. The modifications from and improvements obtained over
the original N3 algorithm are described in [Tustison2010]_.
.. [Tustison2010] <NAME> et al.,
N4ITK: Improved N3 Bias Correction, IEEE Transactions on Medical Imaging,
29(6):1310-1320, June 2010.
Examples
--------
>>> import copy
>>> from nipype.interfaces.ants import N4BiasFieldCorrection
>>> n4 = N4BiasFieldCorrection()
>>> n4.inputs.dimension = 3
>>> n4.inputs.input_image = 'structural.nii'
>>> n4.inputs.bspline_fitting_distance = 300
>>> n4.inputs.shrink_factor = 3
>>> n4.inputs.n_iterations = [50,50,30,20]
>>> n4.inputs.convergence_threshold = 1e-6
>>> n4.cmdline
'N4BiasFieldCorrection --bspline-fitting [ 300 ] \
--image-dimension 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_2 = copy.deepcopy(n4)
>>> n4_2.inputs.bspline_order = 5
>>> n4_2.cmdline
'N4BiasFieldCorrection --bspline-fitting [ 300, 5 ] \
--image-dimension 3 --input-image structural.nii \
--convergence [ 50x50x30x20, 1e-06 ] --output structural_corrected.nii \
--shrink-factor 3'
>>> n4_3 = N4BiasFieldCorrection()
>>> n4_3.inputs.input_image = 'structural.nii'
>>> n4_3.inputs.save_bias = True
>>> n4_3.cmdline
'N4BiasFieldCorrection --image-dimension 3 --input-image structural.nii \
--output [ structural_corrected.nii, structural_bias.nii ]'
"""
_cmd = 'N4BiasFieldCorrection'
input_spec = N4BiasFieldCorrectionInputSpec
output_spec = N4BiasFieldCorrectionOutputSpec
def _gen_filename(self, name):
if name == 'output_image':
output = self.inputs.output_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_corrected' + ext
return output
if name == 'bias_image':
output = self.inputs.bias_image
if not isdefined(output):
_, name, ext = split_filename(self.inputs.input_image)
output = name + '_bias' + ext
return output
return None
def _format_arg(self, name, trait_spec, value):
if ((name == 'output_image') and
(self.inputs.save_bias or isdefined(self.inputs.bias_image))):
bias_image = self._gen_filename('bias_image')
output = self._gen_filename('output_image')
newval = '[ %s, %s ]' % (output, bias_image)
return trait_spec.argstr % newval
if name == 'bspline_fitting_distance':
if isdefined(self.inputs.bspline_order):
newval = '[ %g, %d ]' % (value, self.inputs.bspline_order)
else:
newval = '[ %g ]' % value
return trait_spec.argstr % newval
if ((name == 'n_iterations') and
(isdefined(self.inputs.convergence_threshold))):
newval = '[ %s, %g ]' % ('x'.join([str(elt) for elt in value]),
self.inputs.convergence_threshold)
return trait_spec.argstr % newval
return super(N4BiasFieldCorrection,
self)._format_arg(name, trait_spec, value)
def _parse_inputs(self, skip=None):
if skip is None:
skip = []
skip += ['save_bias', 'bias_image']
return super(N4BiasFieldCorrection, self)._parse_inputs(skip=skip)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output_image'] = os.path.abspath(self._gen_filename('output_image'))
if self.inputs.save_bias or isdefined(self.inputs.bias_image):
outputs['bias_image'] = os.path.abspath(self._gen_filename('bias_image'))
return outputs
# todo remove this if N4BiasFieldCorrection works again
def dwi_flirt(name='DWICoregistration', excl_nodiff=False,
flirt_param={}):
"""
Generates a workflow for linear registration of dwi volumes
"""
inputnode = pe.Node(niu.IdentityInterface(fields=['reference',
'in_file', 'ref_mask', 'in_xfms', 'in_bval']),
name='inputnode')
initmat = pe.Node(niu.Function(input_names=['in_bval', 'in_xfms',
'excl_nodiff'], output_names=['init_xfms'],
function=_checkinitxfm), name='InitXforms')
initmat.inputs.excl_nodiff = excl_nodiff
dilate = pe.Node(fsl.maths.MathsCommand(nan2zeros=True,
args='-kernel sphere 5 -dilM'), name='MskDilate')
split = pe.Node(fsl.Split(dimension='t'), name='SplitDWIs')
pick_ref = pe.Node(niu.Select(), name='Pick_b0')
n4 = pe.Node(N4BiasFieldCorrection(dimension=3), name='Bias')
enhb0 = pe.Node(niu.Function(input_names=['in_file', 'in_mask',
'clip_limit'], output_names=['out_file'],
function=enhance), name='B0Equalize')
enhb0.inputs.clip_limit = 0.015
enhdw = pe.MapNode(niu.Function(input_names=['in_file', 'in_mask'],
output_names=['out_file'], function=enhance),
name='DWEqualize', iterfield=['in_file'])
flirt = pe.MapNode(fsl.FLIRT(**flirt_param), name='CoRegistration',
iterfield=['in_file', 'in_matrix_file'])
thres = pe.MapNode(fsl.Threshold(thresh=0.0), iterfield=['in_file'],
name='RemoveNegative')
merge = pe.Node(fsl.Merge(dimension='t'), name='MergeDWIs')
outputnode = pe.Node(niu.IdentityInterface(fields=['out_file',
'out_xfms']), name='outputnode')
wf = pe.Workflow(name=name)
wf.connect([
(inputnode, split, [('in_file', 'in_file')]),
(inputnode, dilate, [('ref_mask', 'in_file')]),
(inputnode, enhb0, [('ref_mask', 'in_mask')]),
(inputnode, initmat, [('in_xfms', 'in_xfms'),
('in_bval', 'in_bval')]),
(inputnode, n4, [('reference', 'input_image'),
('ref_mask', 'mask_image')]),
(dilate, flirt, [('out_file', 'ref_weight'),
('out_file', 'in_weight')]),
(n4, enhb0, [('output_image', 'in_file')]),
(split, enhdw, [('out_files', 'in_file')]),
(dilate, enhdw, [('out_file', 'in_mask')]),
(enhb0, flirt, [('out_file', 'reference')]),
(enhdw, flirt, [('out_file', 'in_file')]),
(initmat, flirt, [('init_xfms', 'in_matrix_file')]),
(flirt, thres, [('out_file', 'in_file')]),
(thres, merge, [('out_file', 'in_files')]),
(merge, outputnode, [('merged_file', 'out_file')]),
(flirt, outputnode, [('out_matrix_file', 'out_xfms')])
])
return wf
| 1.929688
| 2
|
zeex/core/models/bookmark.py
|
zbarge/dbtrix
| 10
|
12782054
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 2 14:02:26 2016
MIT License
Copyright (c) 2016 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from zeex.core.ctrls.bookmark import BookmarkManager
from zeex.core.compat import QtGui, QtCore
Qt = QtCore.Qt
class BookMarkModel(QtGui.QStandardItemModel):
"""
A QStandardItemModel representing the data
stored in a BookmarkManager.
"""
header = ['name', 'file_path']
def __init__(self, manager: BookmarkManager):
QtGui.QStandardItemModel.__init__(self)
self.manager = manager
self.header = self.header.copy()
def headerData(self, col, orientation, role):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self.header[col]
elif orientation == Qt.Vertical:
return col
return None
def rowCount(self):
return len(self.manager.names)
def columnCount(self):
return len(self.header)
def data(self, index, role):
if not index.isValid():
return None
elif role not in (Qt.DisplayRole, Qt.EditRole):
return None
mark = self.manager.names[index.row()]
row = self.manager.bookmark(mark)
name = self.fields[index.column()]
return str(getattr(row, name))
def flags(self):
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled | Qt.ItemIsDropEnabled
| 1.78125
| 2
|
ydkgen/builder/_types_extractor.py
|
viardant/ydk-gen
| 4
|
12782055
|
# ----------------------------------------------------------------
# YDK - YANG Development Kit
# Copyright 2016-2019 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
# This file has been modified by <NAME>, YDK Solutions.
# All modifications in original under CiscoDevNet domain
# introduced since October 2019 are copyrighted.
# All rights reserved under Apache License, Version 2.0.
# ------------------------------------------------------------------
"""
_types_extractor.py
Extractor for types
"""
from pyang.types import EnumTypeSpec, BitTypeSpec, UnionTypeSpec, PathTypeSpec, IdentityrefTypeSpec
class TypesExtractor(object):
def __init__(self):
self.get_enum_type_stmt = lambda stmt: self._get_type_stmt(stmt, EnumTypeSpec)
self.get_identity_ref_type_stmt = lambda stmt: self._get_type_stmt(
stmt, IdentityrefTypeSpec)
self.get_bits_type_stmt = lambda stmt: self._get_type_stmt(stmt, BitTypeSpec)
self.get_union_type_stmt = lambda stmt: self._get_type_stmt(stmt, UnionTypeSpec)
def _get_type_stmt(self, stmt, typeSpec):
if stmt.keyword == 'type':
type_stmt = stmt
else:
type_stmt = stmt.search_one('type')
if hasattr(type_stmt, 'i_typedef') and type_stmt.i_typedef is not None:
typedef_stmt = type_stmt.i_typedef
return self._get_type_stmt(typedef_stmt, typeSpec)
elif hasattr(type_stmt, 'i_type_spec'):
type_spec = type_stmt.i_type_spec
while isinstance(type_spec, PathTypeSpec):
if not hasattr(type_spec, 'i_target_node'):
return None
type_stmt = type_spec.i_target_node.search_one('type')
type_spec = type_stmt.i_type_spec
if hasattr(type_stmt, 'i_typedef') and type_stmt.i_typedef is not None:
typedef_stmt = type_stmt.i_typedef
return self._get_type_stmt(typedef_stmt, typeSpec)
if isinstance(type_spec, typeSpec):
return type_stmt
else:
return None
else:
return None
def get_property_type(self, type_stmt):
enum_type_stmt = self.get_enum_type_stmt(type_stmt)
bits_type_stmt = self.get_bits_type_stmt(type_stmt)
union_type_stmt = self.get_union_type_stmt(type_stmt)
contained_property_type = type_stmt.i_type_spec
if isinstance(contained_property_type, IdentityrefTypeSpec):
contained_property_type = contained_property_type.idbases[0].i_identity.i_class
elif enum_type_stmt is not None:
# this is an enumeration
contained_property_type = enum_type_stmt.i_enum
elif bits_type_stmt is not None:
# bits
contained_property_type = bits_type_stmt.i_bits
elif union_type_stmt is not None:
contained_property_type = union_type_stmt
return contained_property_type
| 1.578125
| 2
|
services/director-v2/src/simcore_service_director_v2/modules/dynamic_sidecar/scheduler/__init__.py
|
colinRawlings/osparc-simcore
| 25
|
12782056
|
from .task import DynamicSidecarsScheduler, setup_scheduler, shutdown_scheduler
| 1.023438
| 1
|
idest_dash_components/_imports_.py
|
idest/dash_components
| 0
|
12782057
|
<filename>idest_dash_components/_imports_.py<gh_stars>0
from .ExampleComponent import ExampleComponent
from .Import import Import
from .Slider import Slider
__all__ = [
"ExampleComponent",
"Import",
"Slider",
]
| 1.09375
| 1
|
system_tests/test_fit_predict.py
|
h2oai/dask-lightgbm
| 81
|
12782058
|
<reponame>h2oai/dask-lightgbm
import os
import dask.dataframe as dd
import pytest
from dask.distributed import Client
import dask_lightgbm.core as dlgbm
@pytest.fixture(scope='module')
def client():
with Client(os.getenv('SCHEDULER')) as client:
yield client
@pytest.fixture()
def listen_port():
listen_port.port += 10
return listen_port.port
listen_port.port = 12400
def test_classify_newsread(client, listen_port):
data = dd.read_csv('./system_tests/data/*.gz', compression='gzip', blocksize=None)
dX = data.iloc[:, :-1]
dy = data.iloc[:, -1]
d_classif = dlgbm.LGBMClassifier(n_estimators=50, local_listen_port=listen_port)
d_classif.fit(dX, dy)
dy_pred = d_classif.predict(dX, client=client)
acc_score = (dy == dy_pred).sum() / len(dy)
acc_score = acc_score.compute()
print(acc_score)
assert acc_score > 0.8
def test_regress_newsread(client, listen_port):
data = dd.read_csv('./system_tests/data/*.gz', compression='gzip', blocksize=None)
dX = data.iloc[:, 1:]
dy = data.iloc[:, 0]
d_regress = dlgbm.LGBMRegressor(n_estimators=50, local_listen_port=listen_port)
d_regress.fit(dX, dy)
dy_pred = d_regress.predict(dX, client=client)
# The dask_ml.metrics.r2_score method fails with dataframes so we compute the R2 score ourselves
numerator = ((dy - dy_pred) ** 2).sum()
denominator = ((dy - dy.mean()) ** 2).sum()
r2_score = 1 - numerator / denominator
r2_score = r2_score.compute()
print(r2_score)
assert r2_score > 0.8
| 2.265625
| 2
|
AtCoder/BeginnerContest138/e.py
|
scnsh/CompetitiveProgramming
| 1
|
12782059
|
<reponame>scnsh/CompetitiveProgramming<gh_stars>1-10
import copy
s = input()
t = input()
if(set(t) - set(s)):
print(-1)
exit()
t_index = 0
s_index = 0
count = 0
copy_s = copy.deepcopy(s)
while len(t) > t_index:
index = copy_s.find(t[t_index])
if index >= 0:
t_index += 1
copy_s = copy_s[index+1:]
else:
copy_s = copy.deepcopy(s)
count += 1
print(count*len(s)+len(s)-len(copy_s))
| 2.78125
| 3
|
dhdt/generic/mapping_io.py
|
GO-Eratosthenes/dhdt
| 0
|
12782060
|
import os
import glob
import numpy as np
import pandas as pd
# geospatial libaries
from osgeo import gdal, osr
from xml.etree import ElementTree
def read_geo_info(fname):
""" This function takes as input the geotiff name and the path of the
folder that the images are stored, reads the geographic information of
the image
Parameters
----------
fname : string
path and file name of a geotiff image
Returns
-------
spatialRef : string
osr.SpatialReference in well known text
geoTransform : tuple, size=(8,1)
affine transformation coefficients, but also giving the image dimensions
targetprj : osgeo.osr.SpatialReference() object
coordinate reference system (CRS)
rows : integer
number of rows in the image, that is its height
cols : integer
number of collumns in the image, that is its width
bands : integer
number of bands in the image, that is its depth
See Also
--------
read_geo_image : basic function to import geographic imagery data
"""
assert len(glob.glob(fname)) != 0, ('file does not seem to be present')
img = gdal.Open(fname)
spatialRef = img.GetProjection()
geoTransform = img.GetGeoTransform()
targetprj = osr.SpatialReference(wkt=img.GetProjection())
rows = img.RasterYSize
cols = img.RasterXSize
bands = img.RasterCount
geoTransform += (rows, cols,)
return spatialRef, geoTransform, targetprj, rows, cols, bands
def read_geo_image(fname, boi=np.array([])):
""" This function takes as input the geotiff name and the path of the
folder that the images are stored, reads the image and returns the data as
an array
Parameters
----------
fname : string
geotiff file name and path.
boi : numpy.array, size=(k,1)
bands of interest, if a multispectral image is read, a selection can
be specified
Returns
-------
data : numpy.array, size=(m,n), ndim=2
data array of the band
spatialRef : string
osr.SpatialReference in well known text
geoTransform : tuple, size=(6,1)
affine transformation coefficients.
targetprj : osgeo.osr.SpatialReference() object
coordinate reference system (CRS)
See Also
--------
make_geo_im : basic function to write out geographic data
read_geo_info : basic function to get meta data of geographic imagery
Example
-------
>>> import os
>>> fpath = os.path.join(os.getcwd(), "data.jp2" )
>>> (I, spatialRef, geoTransform, targetPrj) = read_geo_image(fpath)
>>> I_ones = np.zeros(I.shape, dtype=bool)
>>> make_geo_im(I_ones, geoTransformM, spatialRefM, "ones.tif")
assert os.path.exists(fname), ('file must exist')
"""
assert len(glob.glob(fname)) != 0, ('file does not seem to be present')
img = gdal.Open(fname)
# imagery can consist of multiple bands
if len(boi) == 0:
for counter in range(img.RasterCount):
band = np.array(img.GetRasterBand(counter+1).ReadAsArray())
data = band if counter == 0 else np.dstack((data,
band[:,:,np.newaxis]))
else:
num_bands = img.RasterCount
assert (np.max(boi)+1)<=num_bands, 'bands of interest is out of range'
for band_id, counter in enumerate(boi):
band = np.array(img.GetRasterBand(band_id+1).ReadAsArray())
data = band if counter==0 else np.dstack((data,
band[:, :, np.newaxis]))
spatialRef = img.GetProjection()
geoTransform = img.GetGeoTransform()
targetprj = osr.SpatialReference(wkt=img.GetProjection())
return data, spatialRef, geoTransform, targetprj
# output functions
def make_geo_im(I, R, crs, fName, meta_descr='project Eratosthenes',
no_dat=np.nan, sun_angles='az:360-zn:90', date_created='-0276-00-00'):
""" Create georeferenced tiff file (a GeoTIFF)
Parameters
----------
I : numpy.array, size=(m,n)
band image
R : list, size=(1,6)
GDAL georeference transform of an image
crs : string
coordinate reference string
fname : string
filename for the image with extension
no_dat : datatype, integer
no data value
sun_angles : string
string giving meta data about the illumination angles
date_created : string
string given the acquistion date in YYYY-MM-DD
Example
-------
>>> import os
>>> fpath = os.path.join(os.getcwd(), "data.jp2")
>>> (I, spatialRef, geoTransform, targetPrj) = read_geo_image(fpath)
>>> I_ones = np.zeros(I.shape, dtype=bool)
>>> make_geo_im(I_ones, geoTransformM, spatialRefM, ‘ones.tif’)
"""
drv = gdal.GetDriverByName("GTiff") # export image
if I.ndim == 3:
bands=I.shape[2]
else:
bands = 1
# make it type dependent
if I.dtype == 'float64':
ds = drv.Create(fName,xsize=I.shape[1], ysize=I.shape[0],bands=bands,
eType=gdal.GDT_Float64)
elif I.dtype == 'float32':
ds = drv.Create(fName,xsize=I.shape[1], ysize=I.shape[0],bands=bands,
eType=gdal.GDT_Float32)
elif I.dtype == 'bool':
ds = drv.Create(fName, xsize=I.shape[1], ysize=I.shape[0], bands=bands,
eType=gdal.GDT_Byte)
else:
ds = drv.Create(fName, xsize=I.shape[1], ysize=I.shape[0], bands=bands,
eType=gdal.GDT_Int32)
# set metadata in datasource
ds.SetMetadata({'TIFFTAG_SOFTWARE':'dhdt v0.1',
'TIFFTAG_ARTIST':'bas altena and team Atlas',
'TIFFTAG_COPYRIGHT': 'contains modified Copernicus data',
'TIFFTAG_IMAGEDESCRIPTION': meta_descr,
'TIFFTAG_RESOLUTIONUNIT' : sun_angles,
'TIFFTAG_DATETIME': date_created})
# set georeferencing metadata
if len(R)!=6: R = R[:6]
ds.SetGeoTransform(R)
if not isinstance(crs, str):
crs = crs.ExportToWkt()
ds.SetProjection(crs)
if I.ndim == 3:
for count in np.arange(1,I.shape[2]+1,1):
band = ds.GetRasterBand(int(count))
band.WriteArray(I[:,:,count-1],0,0)
if count==1:
band.SetNoDataValue(no_dat)
band = None
else:
ds.GetRasterBand(1).WriteArray(I)
ds.GetRasterBand(1).SetNoDataValue(no_dat)
ds = None
del ds
def make_multispectral_vrt(df, fpath=None, fname='multispec.vrt'):
""" virtual raster tile (VRT) is a description of datasets written in an XML
format, it eases the display of multi-spectral data or other means.
Parameters
----------
df : pandas.DataFrame
organization of the different spectral bands
fpath : string
path of the directory of interest
fname : string
file name of the virtual raster tile
"""
assert isinstance(df, pd.DataFrame), ('please provide a dataframe')
assert 'filepath' in df, ('please first run "get_S2_image_locations"'+
' to find the proper file locations')
if fpath is None:
fpath = os.path.commonpath(df.filepath.tolist())
ffull = os.path.join(fpath, fname)
vrt_options = gdal.BuildVRTOptions(resampleAlg=gdal.GRA_NearestNeighbour,
addAlpha=False,
separate=True,
srcNodata=0)
my_vrt = gdal.BuildVRT(ffull, [f+'.jp2' for f in df['filepath']],
options=vrt_options)
my_vrt = None
# modify the vrt-file to include band names
tree = ElementTree.parse(ffull)
root = tree.getroot()
for idx, band in enumerate(root.iter("VRTRasterBand")):
description = ElementTree.SubElement(band, "Description")
description.text = df.common_name[idx]
tree.write(ffull) # update the file on disk
return
| 3.28125
| 3
|
SC/northwind.py
|
lfr4704/DS-Unit-3-Sprint-2-SQL-and-Databases
| 0
|
12782061
|
<filename>SC/northwind.py
import os
import sqlite3
#Part 2
# construct a path to wherever your database exists
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "SC", "northwind_small.sqlite3")
connection = sqlite3.connect(DB_FILEPATH)
print("CONNECTION:", connection)
#connection.row_factory = sqlite3.Row
cursor = connection.cursor()
print("CURSOR", cursor)
# - What are the ten most expensive items (per unit price) in the database?
query = """
SELECT ProductName, UnitPrice
FROM Product
ORDER BY UnitPrice DESC
LIMIT 10
"""
# - What is the average age of an employee at the time of their hiring? (Hint: a
# lot of arithmetic works with dates.)
query2 = """
SELECT AVG(hiredate - birthDate)
from Employee
"""
# - (*Stretch*) How does the average age of employee at hire vary by city?
query3 = """
SELECT AVG(hiredate - birthDate), City
from Employee
group by City
"""
#part 3
# - What are the ten most expensive items (per unit price) in the database *and*
# their suppliers?
query4 = """
SELECT ProductName, CompanyName, UnitPrice
FROM Product
JOIN Supplier on Product.SupplierId = Supplier.Id
ORDER BY UnitPrice DESC
LIMIT 10
"""
# - What is the largest category (by number of unique products in it)?
query5 = """
SELECT COUNT(CategoryId),
CategoryName
FROM Product
LEFT JOIN Category on Category.Id = Product.CategoryId
GROUP by CategoryName
ORDER BY 1 DESC
Limit 1
"""
# - (*Stretch*) Who's the employee with the most territories? Use `TerritoryId`
# (not name, region, or other fields) as the unique identifier for territories.
query6 = """
SELECT FirstName, LastName, count(EmployeeId)
FROM EmployeeTerritory
JOIN Employee on Employee.Id = EmployeeTerritory.EmployeeId
GROUP BY EmployeeId
ORDER BY COUNT(EmployeeId) DESC
LIMIT 1
"""
result = cursor.execute(query).fetchall()
result2 = cursor.execute(query2).fetchall()
result3 = cursor.execute(query3).fetchall()
result4 = cursor.execute(query4).fetchall()
result5 = cursor.execute(query5).fetchall()
result6 = cursor.execute(query6).fetchall()
print("RESULT 1:", result)
print("RESULT 2:", result2)
print("RESULT 3:", result3)
print("RESULT 4:", result4)
print("RESULT 5:", result5)
print("RESULT 6:", result6)
cursor.close()
connection.close()
| 4.03125
| 4
|
ahvl/process.py
|
gardar/ahvl
| 4
|
12782062
|
#
# import modules
#
from ahvl.helper import AhvlMsg, AhvlHelper
import subprocess
#
# helper/message
#
msg = AhvlMsg()
hlp = AhvlHelper()
#
# process
#
class Process(object):
def __init__(self, proc=None, cmd=[], failonstderr=True, shell=False):
# set process name and command
self.setprocess(proc) # set process name
self.setcmd(cmd) # set command
self.failonstderr = failonstderr # fail on stderr
self.stderr = None
self.stdout = None
self.stderrlines = []
self.stdoutlines = []
self.shell = shell
# function to remove sensitive information from commands
# by deleting the arguments from a copy of the list
def __get_safe_args(self):
# set list for unknown processes
sensitive = []
# check for which process the arguments need to be cleaned
if self.proc == "ssh-keygen":
sensitive = []#["-f", "-N", "-P"]
if self.proc == "openssl":
sensitive = ["pass:", "-passin", "-passout"]
if self.proc == "puttygen":
sensitive = ["--password-file"]
if self.proc == "gpg":
sensitive = ["--passphrase-file"]
# create a copy of the list to prevent iteration issues when removing items
safeargs = list(self.cmd)
for a in self.cmd:
if a.strip('"').strip("'").startswith(tuple(sensitive)):
safeargs.remove(a)
# return safe to print argument list
return safeargs
# useless lines removed
def __clean_stderr(self):
# remove empty lines
self.stderrlines = list(filter(None, self.stderrlines))
# function to fail on stderr messages
def __fail_on_stderr(self):
# clean output
self.__clean_stderr()
# check if stderr contains any lines
if len(self.stderrlines) > 0 and self.failonstderr:
msg.fail("the process generated an error:\n{}".format("\n".join(self.stderrlines)))
# set stderr and stdout
def __set_result(self):
# convert stdout and stderr to individual lines
self.stdoutlines = self.stdout.rstrip('\n').split("\n")
self.stderrlines = self.stderr.rstrip('\n').split("\n")
# set process to run; accepts known processes only
def setprocess(self, proc):
# sanity check
accepted = ["ssh-keygen", "openssl", "puttygen", "gpg"]
if not proc in accepted:
msg.fail("given process name [{}] is unknown".format(proc))
# set process and return
self.proc = proc
return self
# set command to run
def setcmd(self, cmd):
self.cmd = cmd
return self
# determine if process should fail if any stderr messages are generated
def setfailonstderr(self, fail):
self.failonstderr = fail
# return stdout messages
def getstdout(self):
return self.stdoutlines
# return stderr messages
def getstderr(self):
return self.stderrlines
# run the process
def run(self):
# output debug info
if self.shell == True:
msg.vvvv("about to run the following subprocess (shell): [{}]".format(self.proc))
msg.vvvv("[{}]".format(self.cmd))
else:
# remove sensitive arguments before printing debug info
printable = self.__get_safe_args()
msg.vvvv("about to run the following subprocess (sensitive information has been removed): [{}]".format(self.proc))
msg.vvvv("[{}]".format(subprocess.list2cmdline(printable)))
# spawn subprocess
sp = subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=self.shell)
(self.stdout, self.stderr) = sp.communicate();
rc = sp.returncode # get the exit code
# check exit/return code
if rc != 0:
msg.fail("an error occurred for [{}]; the process exited with code [{}]\n".format(self.proc, rc) +
"the process provided the following output: [{}]".format(self.stderr))
# set result and fail on error
self.__set_result()
self.__fail_on_stderr()
# return the result
return self
| 2.46875
| 2
|
app/main/routes/routes.py
|
awtrimpe/socks-chat
| 0
|
12782063
|
import json
import sys
from flask import (Blueprint, Markup, flash, g, jsonify, redirect,
render_template, request, session, url_for)
from flask_login import current_user, login_user, logout_user
from app.main.admin import (get_admin_control_by_id, get_admin_control_by_name,
get_admin_controls)
from app.main.database.tables import User
from app.main.forms import LoginForm, RegisterForm
from app.main.helpers import svg_contents
from app.main.roles import change_user_permission, set_user_permission
from app.main.users import (delete_user, get_all_users_with_permissions,
get_user, is_admin, register_user)
bp = Blueprint('main', __name__)
@bp.route('/', methods=['GET', 'POST'])
def index():
'''Login form to enter a room.'''
if current_user.is_authenticated:
return redirect(url_for('.chat'))
admin = is_admin(g.session, current_user)
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
password = form.password.data
try:
user = get_user(g.session, username, password)
login_user(user)
session['username'] = username
session['name'] = f'{user.first_name} {user.last_name}'
session['room'] = form.room.data
return redirect(url_for('.chat'))
except Exception as err:
return render_template('index.html',
msg=str(err),
form=form,
admin=admin,
svg=Markup(svg_contents('./app/static/socks.svg')))
elif request.method == 'GET':
form.username.data = session.get('name', '')
form.room.data = session.get('room', '')
return render_template('index.html', form=form, admin=admin, svg=Markup(svg_contents('./app/static/socks.svg')))
@bp.route('/about')
def about():
admin = is_admin(g.session, current_user)
return render_template('about.html',
svg=Markup(svg_contents('./app/static/socks.svg')),
admin=admin,
github=Markup(svg_contents('./app/static/github.svg')))
@bp.route('/admin', methods=['GET', 'PATCH', 'DELETE'])
def admin():
admin = is_admin(g.session, current_user)
if admin:
if request.method == 'GET':
users = get_all_users_with_permissions(g.session)
controls = get_admin_controls(g.session)
return render_template('admin.html',
svg=Markup(svg_contents(
'./app/static/socks.svg')),
trash=Markup(svg_contents(
'./app/static/trash.svg')),
admin=admin,
users=users,
controls=controls)
elif request.method == 'PATCH':
if request.json.get('control'):
try:
control_id = request.json.get('control')
get_admin_control_by_id(g.session, control_id).switch()
g.session.commit()
return jsonify({'msg': f'Control ID: {control_id} successfull changed'}), 200
except:
return jsonify({'msg': 'Something went wrong changing the control'}), 500
elif request.json.get('user'):
try:
user_id = request.json.get('user')
change_user_permission(g.session, user_id)
g.session.commit()
return jsonify({'msg': f'User permissions changed for ID {user_id}'}), 200
except Exception as err:
return jsonify({'msg': str(err)}), 500
except:
return jsonify({'msg': 'Something went wrong changing the user permission'}), 500
else:
return jsonify({'msg': 'A known value was not supplied'}), 400
else: # request.method == 'DELETE':
if request.json.get('user'):
user_id = request.json.get('user')
delete_user(g.session, user_id)
g.session.commit()
return jsonify({'msg': f'User with ID {user_id} successfully deleted'}), 200
else:
return jsonify({'msg': 'A known value was not supplied'}), 400
else:
return 'Access denied', 401
@bp.route('/chat')
def chat():
'''Chat room. The user's name and room must be stored in the session.'''
admin = is_admin(g.session, current_user)
username = session.get('username', '')
name = session.get('name', '')
room = session.get('room', '')
if name == '' or room == '':
flash('You must be logged in to access the chatroom')
return redirect(url_for('.index'))
return render_template('chat.html',
name=name,
room=room,
admin=admin,
svg=Markup(svg_contents('./app/static/socks.svg')),
send_logo=Markup(svg_contents('./app/static/send.svg')))
@bp.route('/logout', methods=['GET'])
def logout():
admin = is_admin(g.session, current_user)
logout_user()
session.clear()
flash('You have been successfully logged out')
return redirect(url_for('.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
admin = is_admin(g.session, current_user)
form = RegisterForm()
# Check if 'new_users' is turned on or off
if not get_admin_control_by_name(g.session, 'new_users').value:
return render_template('register.html',
form=form,
admin=admin,
msg='New user registration has been disabled at this time',
svg=Markup(svg_contents('./app/static/socks.svg')))
if request.method == 'GET':
return render_template('register.html', form=form, svg=Markup(svg_contents('./app/static/socks.svg')))
elif request.method == 'POST':
if form.validate_on_submit():
username = request.form.get('username')
password = request.form.get('password')
password_conf = request.form.get('password_conf')
first_name = request.form.get('first_name')
last_name = request.form.get('last_name')
if password != password_conf:
return render_template('register.html',
form=form,
admin=admin,
msg='Passwords did not match',
svg=Markup(svg_contents('./app/static/socks.svg')))
try:
new_user = register_user(
g.session, username, password, first_name, last_name)
try:
# add the new user to the database
g.session.add(new_user)
g.session.commit()
except:
g.session.rollback()
raise Exception('Error adding new user')
# Set user's role as 'user'
user_permission = set_user_permission(
g.session, 'user', new_user.id)
try:
# add the new user's related permission to the database
g.session.add(user_permission)
g.session.commit()
except:
g.session.rollback()
raise Exception('Error setting user permissions')
except Exception as err:
return render_template('register.html',
form=form,
admin=admin,
msg=str(err),
svg=Markup(svg_contents('./app/static/socks.svg')))
except:
return render_template('register.html',
form=form,
admin=admin,
msg=f'Unexpected error: {sys.exc_info()[0]}',
svg=Markup(svg_contents('./app/static/socks.svg')))
else:
return render_template('register.html',
form=form,
admin=admin,
msg='Not all required fields provided',
svg=Markup(svg_contents('./app/static/socks.svg')))
flash('Registration successful')
return redirect(url_for('.index'))
| 2.265625
| 2
|
loop control statements/break_statements.py
|
shashikant-sk/python_learning
| 0
|
12782064
|
# Break Transfers control to the statements right after the loops
x = "<NAME>"
for i in x :
if i ==".":
break
print(i,end=" ")
| 3.59375
| 4
|
quelfilm/engine.py
|
thomas-marquis/quelfilm-chatbot-app
| 0
|
12782065
|
import os
from nlp_tools.preprocessing import Preprocessing
from nlp_tools.loaders import MdLoader
from nlp_tools.representations import MergedMatrixRepresentation
from nlp_tools.classifiers import ClassificationProcessor, NaiveBayseTfIdfClassifier
from nlp_tools.utils import get_random_message
from quelfilm.settings import *
def build_classifier():
loader = MdLoader(TRAINING_PATH)
processor = Preprocessing(loader)
repres = MergedMatrixRepresentation(processor.data)
classifier = ClassificationProcessor(NaiveBayseTfIdfClassifier(), repres.data)
classifier.train()
def predict(text: str):
message = repres.process_new_data(processor.process_sentence(text))
intent, score = classifier.predict(message)
response = get_random_message(processor.responses[intent])
return intent, score, response
return predict
class Classifier:
def __init__(self):
self.predict = build_classifier()
| 2.359375
| 2
|
Oops/Practics-Oops-11.py
|
Danish-Belal/Python-Basic-to-Advance
| 0
|
12782066
|
<filename>Oops/Practics-Oops-11.py<gh_stars>0
# Q1 Create a 3d vector using 2d vector.
'''
class C2dvec:
def __init__(self, i, j):
self.icap = i
self.jcap = j
def __str__(self):
return f"{self.icap}i + {self.jcap}k"
class C3dvec(C2dvec):
def __init__(self, i, j, k):
super().__init__(i, j)
self.kcap = k
def __str__(self):
return f"{self.icap}i + {self.jcap}j+ {self.kcap}k"
vec2 = C2dvec(1, 3)
vec3 = C3dvec(1, 9, 7)
print(vec2)
print(vec3)
'''
# Q2- create a class pet form class aminal and further create a class dog form pet . Add method bark to class Dog
'''
class Animal:
print("Animals Are good.")
class Pet(Animal):
print("This is a Pet")
class Dog(Pet):
@staticmethod
def bark():
print("Dog is Barking")
Tommy = Dog()
Tommy.bark()
'''
# Q3 - use setter and getter to chang attribute of a class employee
'''
class Employee:
salary = 1000
increment = 1.5
@property
def salaryAfterIncrement(self):
return (self.salary*self.increment)
@salaryAfterIncrement.setter
def salaryAfterIncrement(self , inc):
self.increment = inc /self.salary
e= Employee()
print(e.salaryAfterIncrement)
print(e.increment)
e.salaryAfterIncrement = 2000
print(e.salaryAfterIncrement)
print(e.salary)
print(e.increment)
'''
# Q4- Create a complex class to write a equation of complex no. Overloding + and * Operator
# Multiplication of complex no --> (a+bi)(c+di) = (ac-bd) + (ad+bc)i
'''
class complex:
def __init__(self , r , i):
self.real = r
self.imagnery = i
def __add__(self , m): # Uploading + operators
return complex(self.real+ m.real , self.imagnery+m.imagnery)
def __mul__(self , m): # uploading * operators
mulReal =(self.real*m.real - self.imagnery*m.imagnery)
mulImg = (self.real*m.imagnery + self.imagnery*m.real)
return complex(mulReal , mulImg)
def __str__(self):
if self.imagnery<0:
return f"{self.real} - {-self.imagnery}i"
else:
return f"{self.real} + {self.imagnery}i"
n1 = complex(1, -4)
n2 = complex(331 , -37)
print( n1+n2)
print(n1*n2)
'''
# Q5 - Write a __str__() method to print the vector as 7i^ + 8j^ + 10k^ a three dimention vector. find length of list
'''
class Vector:
def __init__(self, vec):
self.vec = vec
def __str__(self):
return f"{self.vec[0]}i + {self.vec[1]}j + {self.vec[2]}k"
def __len__(self):
return len(self.vec)
v1 = Vector([1, 4, 6 ])
v2 = Vector([1, 6, 9])
print(v1)
print(v2)
print(len(v1))
print(len(v2))
'''
# upload + and * OPerator in vector. And find length.
class Vector:
def __init__(self , vec):
self.vec = vec
def __str__(self):
str1 = ""
index = 0
for i in self.vec:
str1 += f"{i}a{index} +"
index +=1
return str1[:-1]
def __add__(self , vec2):
newList=[]
for i in range(len(self.vec)):
newList.append(self.vec[i] + vec2.vec[i])
return Vector(newList)
def __mul__(self , vec2):
sum = 0
for i in range(len(self.vec)):
sum += self.vec[1]*vec2.vec[i]
return sum
def __len__(self):
return len(self.vec)
v1 = Vector([2,3 ,7])
v2 = Vector([3,6,8])
print(v1+v2)
print(v1*v2)
print(len(v1))
print(len(v2))
| 4.15625
| 4
|
dffml/operation/db.py
|
purnimapatel/dffml
| 1
|
12782067
|
<filename>dffml/operation/db.py
import inspect
from typing import Dict, Any, Optional, List
from ..base import config
from ..df.base import op
from ..db.base import Conditions, BaseDatabase
from ..df.types import Definition
# definitions
QUERY_TABLE = Definition(name="query_table", primitive="str")
QUERY_DATA = Definition(name="query_data", primitive="Dict[str, Any]")
QUERY_CONDITIONS = Definition(name="query_conditions", primitive="Conditions")
QUERY_COLS = Definition(name="query_cols", primitive="Dict[str, str]")
QUERY_LOOKUPS = Definition(name="query_lookups", primitive="Dict[str, Any]")
@config
class DatabaseQueryConfig:
database: BaseDatabase
# TODO Remove this?
# TODO Figure out a way to handle defaults so that all inputs need not be passed to the
# flow on execution
# Note : Add `query_type`:str to `DatabaseQueryConfig` before use.
@op(
inputs={
"table_name": QUERY_TABLE,
"data": QUERY_DATA,
"conditions": QUERY_CONDITIONS,
"cols": QUERY_COLS,
},
outputs={"lookups": QUERY_LOOKUPS},
config_cls=DatabaseQueryConfig,
imp_enter={"database": (lambda self: self.config.database)},
ctx_enter={"dbctx": (lambda self: self.parent.database())},
)
async def db_query(
self,
*,
table_name: str,
data: Dict[str, Any] = {},
conditions: Conditions = [],
cols: List[str] = [],
) -> Optional[Dict[str, Any]]:
frame = inspect.currentframe()
args, _, _, values = inspect.getargvalues(frame)
kwargs = {arg: values[arg] for arg in args[1:]}
query_fn = self.config.query_type
if "create" in query_fn:
query_fn = "create_table"
allowed = ["create_table", "remove", "update", "insert", "lookup"]
if not query_fn in allowed:
raise ValueError(f"Only queries of type {allowed} is allowed")
query_fn = getattr(self.dbctx, query_fn)
try:
await query_fn(**kwargs)
return {"lookups": {}}
except TypeError as e:
if "async_gen" in repr(e):
result = query_fn(**kwargs)
return {"lookups": [res async for res in result]}
else:
raise e
@op(
inputs={"table_name": QUERY_TABLE, "cols": QUERY_COLS},
outputs={},
config_cls=DatabaseQueryConfig,
imp_enter={"database": (lambda self: self.config.database)},
ctx_enter={"dbctx": (lambda self: self.parent.database())},
)
async def db_query_create_table(
self, *, table_name: str, cols: List[str] = []
):
await self.dbctx.create_table(table_name=table_name, cols=cols)
@op(
inputs={"table_name": QUERY_TABLE, "data": QUERY_DATA},
outputs={},
config_cls=DatabaseQueryConfig,
imp_enter={"database": (lambda self: self.config.database)},
ctx_enter={"dbctx": (lambda self: self.parent.database())},
)
async def db_query_insert(self, *, table_name: str, data: Dict[str, Any]):
await self.dbctx.insert(table_name=table_name, data=data)
@op(
inputs={
"table_name": QUERY_TABLE,
"data": QUERY_DATA,
"conditions": QUERY_CONDITIONS,
},
outputs={},
config_cls=DatabaseQueryConfig,
imp_enter={"database": (lambda self: self.config.database)},
ctx_enter={"dbctx": (lambda self: self.parent.database())},
)
async def db_query_update(
self, *, table_name: str, data: Dict[str, Any], conditions: Conditions = []
):
await self.dbctx.update(
table_name=table_name, data=data, conditions=conditions
)
@op(
inputs={"table_name": QUERY_TABLE, "conditions": QUERY_CONDITIONS},
outputs={},
config_cls=DatabaseQueryConfig,
imp_enter={"database": (lambda self: self.config.database)},
ctx_enter={"dbctx": (lambda self: self.parent.database())},
)
async def db_query_remove(
self, *, table_name: str, conditions: Conditions = []
):
await self.dbctx.remove(table_name=table_name, conditions=conditions)
@op(
inputs={
"table_name": QUERY_TABLE,
"cols": QUERY_COLS,
"conditions": QUERY_CONDITIONS,
},
outputs={"lookups": QUERY_LOOKUPS},
config_cls=DatabaseQueryConfig,
imp_enter={"database": (lambda self: self.config.database)},
ctx_enter={"dbctx": (lambda self: self.parent.database())},
)
async def db_query_lookup(
self, *, table_name: str, cols: List[str] = [], conditions: Conditions = []
) -> Dict[str, Any]:
result = self.dbctx.lookup(
table_name=table_name, cols=cols, conditions=conditions
)
return {"lookups": [res async for res in result]}
@op(
inputs={"table_name": QUERY_TABLE, "data": QUERY_DATA},
outputs={},
config_cls=DatabaseQueryConfig,
imp_enter={"database": (lambda self: self.config.database)},
ctx_enter={"dbctx": (lambda self: self.parent.database())},
)
async def db_query_insert_or_update(
self, *, table_name: str, data: Dict[str, Any]
):
await self.dbctx.insert_or_update(table_name=table_name, data=data)
| 2.34375
| 2
|
tkdet/data/samplers/__init__.py
|
tkhe/tkdetection
| 1
|
12782068
|
from .distributed_sampler import *
from .grouped_batch_sampler import *
| 1.007813
| 1
|
Basics of the Python/DSA with Python/BST.py
|
suy1968/Hands-on-ML-Basic-to-Advance-
| 60
|
12782069
|
<gh_stars>10-100
class Node:
def __init__(self,key):
self.key=key
self.left=None
self.right=None
def insert(node,key):
if node is None:
return Node(key)
if key < node.key:
node.left = insert(node.left,key)
else:
node.right = insert(node.right,key)
return node
def search(node,key):
if node is None:
return node
if key == node.key:
return node
if key > node.key:
return search(node.right,key)
if key < node.key:
return search(node.left,key)
# to find in-order succesor
def minValueNode(node):
current=node
while(current.left is not None):
current=current.left
return current
def deleteNode(root, key):
# Return if the tree is empty
if root is None:
return root
# Find the node to be deleted
if key < root.key:
root.left = deleteNode(root.left, key)
elif(key > root.key):
root.right = deleteNode(root.right, key)
else:
# If the node is with only one child or no child
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
# If the node has two children,
# place the inorder successor in position of the node to be deleted
temp = minValueNode(root.right)
root.key = temp.key
# Delete the inorder successor
root.right = deleteNode(root.right, temp.key)
return root
root = None
root = insert(root,12)
root = insert(root,23)
root = insert(root,10)
root = insert(root,100)
ok=search(root,100)
print(ok)
root=deleteNode(root,100)
ok=search(root,100)
print(ok)
| 3.734375
| 4
|
P1/HTTPClient/web_client.py
|
ykzzyk/Networking
| 0
|
12782070
|
from socket import *
import argparse
# Parameters
#TCP_IP = 'localhost'
#TCP_PORT = 12003
BUFFER_SIZE = 1024
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument('server_host')
parser.add_argument('server_port')
parser.add_argument('filename')
args = parser.parse_args()
# Prepare a client socket
clientSocket = socket(AF_INET, SOCK_STREAM)
clientSocket.connect((args.server_host, int(args.server_port)))
# Send message to GET HTML file
# Filename: HelloWorld.html
MESSAGE = f'GET {args.filename}'
MESSAGE = bytes(MESSAGE, 'utf-8')
clientSocket.send(MESSAGE)
# GET the full content from the HTML file
full_content = ''
while True:
data = clientSocket.recv(BUFFER_SIZE)
if not data:
break
data = data.decode('utf-8')
full_content += data
with open('files_from_server/HelloWorld.html', 'w') as f:
f.write(full_content)
print("received data:", full_content)
# Close Client
clientSocket.close()
print("\n\nClient close successfully!")
| 3.359375
| 3
|
scvelo/plotting/scatter.py
|
fidelram/scvelo
| 0
|
12782071
|
<reponame>fidelram/scvelo<filename>scvelo/plotting/scatter.py
from .. import settings
from .. import AnnData
from .utils import make_dense, is_categorical, update_axes, set_label, set_title, interpret_colorkey, set_colorbar, \
default_basis, default_color, default_size, default_color_map, get_components, savefig_or_show, make_unique_list, \
plot_linear_fit, plot_density, default_legend_loc, make_unique_valid_list
from .docs import doc_scatter, doc_params
from matplotlib import rcParams
import matplotlib.pyplot as pl
import numpy as np
import pandas as pd
@doc_params(scatter=doc_scatter)
def scatter(adata=None, x=None, y=None, basis=None, vkey=None, color=None, use_raw=None, layer=None, color_map=None,
colorbar=True, palette=None, size=None, alpha=None, linewidth=None, perc=None, sort_order=True, groups=None,
components=None, projection='2d', legend_loc=None, legend_fontsize=None, legend_fontweight=None,
right_margin=None, left_margin=None, xlabel=None, ylabel=None, title=None, fontsize=None, figsize=None,
xlim=None, ylim=None, show_density=None, show_assigments=None, show_linear_fit=None, dpi=None, frameon=None,
show=True, save=None, ax=None, zorder=None, ncols=None, **kwargs):
"""\
Scatter plot along observations or variables axes.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
x: `str`, `np.ndarray` or `None` (default: `None`)
x coordinate
y: `str`, `np.ndarray` or `None` (default: `None`)
y coordinate
{scatter}
Returns
-------
If `show==False` a `matplotlib.Axis`
"""
scatter_kwargs = {"use_raw": use_raw, "sort_order": sort_order, "alpha": alpha, "components": components,
"projection": projection, "groups": groups, "palette": palette, "legend_fontsize": legend_fontsize,
"legend_fontweight": legend_fontweight, "right_margin": right_margin, "left_margin": left_margin,
"show": False, "save": None}
adata = AnnData(np.stack([x, y]).T) if adata is None and (x is not None and y is not None) else adata
colors, layers, bases = make_unique_list(color, allow_array=True), make_unique_list(layer), make_unique_valid_list(adata, basis)
multikey = colors if len(colors) > 1 else layers if len(layers) > 1 else bases if len(bases) > 1 else None
if multikey is not None:
if isinstance(title, (list, tuple)): title *= int(np.ceil(len(multikey) / len(title)))
ncols = len(multikey) if ncols is None else min(len(multikey), ncols)
nrows = int(np.ceil(len(multikey) / ncols))
figsize = rcParams['figure.figsize'] if figsize is None else figsize
ax = []
for i, gs in enumerate(
pl.GridSpec(nrows, ncols, pl.figure(None, (figsize[0] * ncols, figsize[1] * nrows), dpi=dpi))):
if i < len(multikey):
ax.append(scatter(adata, x=x, y=y, size=size, linewidth=linewidth, xlabel=xlabel, ylabel=ylabel, vkey=vkey,
color_map=color_map, colorbar=colorbar, perc=perc, frameon=frameon, zorder=zorder,
legend_loc=legend_loc, fontsize=fontsize, xlim=xlim, ylim=ylim, ax=pl.subplot(gs),
show_density=show_density, show_assigments=show_assigments, show_linear_fit=show_linear_fit,
color=colors[i] if len(colors) > 1 else color,
layer=layers[i] if len(layers) > 1 else layer,
basis=bases[i] if len(bases) > 1 else basis,
title=title[i] if isinstance(title, (list, tuple)) else title, **scatter_kwargs, **kwargs))
savefig_or_show('' if basis is None else basis, dpi=dpi, save=save, show=show)
if not show: return ax
else:
color, layer, basis = colors[0], layers[0], bases[0]
color = default_color(adata) if color is None else color
color_map = default_color_map(adata, color) if color_map is None else color_map
is_embedding = ((x is None) | (y is None)) and basis not in adata.var_names
basis = default_basis(adata) if basis is None and is_embedding else basis
size = default_size(adata) if size is None else size
linewidth = 1 if linewidth is None else linewidth
frameon = frameon if frameon is not None else True if not is_embedding else settings._frameon
if projection == '3d':
from mpl_toolkits.mplot3d import Axes3D
ax = pl.figure(None, figsize, dpi=dpi).gca(projection=projection) if ax is None else ax
else:
ax = pl.figure(None, figsize, dpi=dpi).gca() if ax is None else ax
if is_categorical(adata, color) and is_embedding:
from scanpy.api.pl import scatter as scatter_
legend_loc = default_legend_loc(adata, color, legend_loc)
ax = scatter_(adata, basis=basis, color=color, color_map=color_map, size=size, frameon=frameon, ax=ax,
title=title, legend_loc=legend_loc, **scatter_kwargs, **kwargs)
else:
if basis in adata.var_names:
xkey, ykey = ('spliced', 'unspliced') if use_raw or 'Ms' not in adata.layers.keys() else ('Ms', 'Mu')
x = make_dense(adata[:, basis].layers[xkey]).flatten()
y = make_dense(adata[:, basis].layers[ykey]).flatten()
xlabel = 'spliced' if xlabel is None else xlabel
ylabel = 'unspliced' if ylabel is None else ylabel
title = basis if title is None else title
elif is_embedding:
X_emb = adata.obsm['X_' + basis][:, get_components(components, basis)]
x, y = X_emb[:, 0], X_emb[:, 1]
elif isinstance(x, str) and isinstance(y, str):
xlabel = x if xlabel is None else xlabel
ylabel = y if ylabel is None else ylabel
if x in adata.var_names and y in adata.var_names:
x = adata[:, x].layers[layer] if layer in adata.layers.keys() else adata[:, x].X
y = adata[:, y].layers[layer] if layer in adata.layers.keys() else adata[:, y].X
elif x in adata.var.keys() and y in adata.var.keys():
x, y = adata.var[x], adata.var[y]
elif x in adata.obs.keys() and y in adata.obs.keys():
x, y = adata.obs[x], adata.obs[y]
else:
x = x.A1 if isinstance(x, np.matrix) else x.ravel()
y = y.A1 if isinstance(y, np.matrix) else y.ravel()
if basis in adata.var_names and isinstance(color, str) and color in adata.layers.keys():
c = interpret_colorkey(adata, basis, color, perc)
else:
c = interpret_colorkey(adata, color, layer, perc)
if layer is not None and any(l in layer for l in ['spliced', 'Ms', 'Mu', 'velocity']) \
and isinstance(color, str) and color in adata.var_names:
ub = np.percentile(np.abs(c), 98)
if "vmax" not in kwargs:
kwargs.update({"vmax": ub})
if "vmin" not in kwargs and 'velocity' in layer:
kwargs.update({"vmin": -ub})
if "vmid" in kwargs:
if not isinstance(c, str) and not isinstance(c[0], str):
vmid, lb, ub = kwargs["vmid"], np.min(c), np.max(c)
crange = min(np.abs(vmid - lb), np.abs(ub - vmid))
kwargs.update({"vmin": vmid - crange, "vmax": vmid + crange})
kwargs.pop("vmid")
if groups is not None or np.any(pd.isnull(c)):
zorder = 0 if zorder is None else zorder
ax = scatter(adata, basis=basis, color='lightgrey', ax=ax, zorder=zorder, **scatter_kwargs)
zorder += 1
if basis in adata.var_names:
fits = plot_linear_fit(adata, basis, vkey, xkey, linewidth)
from .simulation import show_full_dynamics
if 'true_alpha' in adata.var.keys():
fit = show_full_dynamics(adata, basis, 'true', use_raw, linewidth)
fits.append(fit)
if 'fit_alpha' in adata.var.keys() and (vkey is None or 'dynamic' in vkey):
fit = show_full_dynamics(adata, basis, 'fit', use_raw, linewidth, show_assigments=show_assigments)
fits.append(fit)
if len(fits) > 0 and legend_loc is not False and legend_loc is not 'none':
pl.legend(fits, fontsize=legend_fontsize, loc='lower right' if legend_loc is None else legend_loc)
if use_raw and perc is not None:
pl.xlim(right=np.percentile(x, 99.9 if not isinstance(perc, int) else perc) * 1.05)
pl.ylim(top=np.percentile(y, 99.9 if not isinstance(perc, int) else perc) * 1.05)
pl.scatter(x, y, c=c, cmap=color_map, s=size, alpha=alpha, edgecolors='none', marker='.', zorder=zorder, **kwargs)
if show_density:
plot_density(x, y)
if show_linear_fit:
xnew = np.linspace(0, x.max() * 1.02)
pl.plot(xnew, xnew * (x * y).sum() / (x ** 2).sum())
set_label(xlabel, ylabel, fontsize, basis)
set_title(title, layer, color, fontsize)
ax = update_axes(ax, xlim, ylim, fontsize, is_embedding, frameon)
if colorbar and not is_categorical(adata, color): set_colorbar(ax)
savefig_or_show('' if basis is None else basis, dpi=dpi, save=save, show=show)
if not show: return ax
| 2.515625
| 3
|
python_work/Chapter8/making_pizzas.py
|
Elektra-2/python_crash_course_2nd
| 1
|
12782072
|
<filename>python_work/Chapter8/making_pizzas.py
import pizza
pizza.make_pizza(16, 'pepperoni')
pizza.make_pizza(17, 'cheese', 'ham')
| 2.3125
| 2
|
a.sikorska/CASE/case_trojkat_A_Sikorska.py
|
alsikorska/python_wprowadzenie_warsztaty_2021
| 0
|
12782073
|
class TriangleChecker():
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
print("Sprawdźmy czy można utworzyć trójkąt!")
def is_triangle(self):
if self.check_is_triangle() :
print("Hurra, możesz zbudować trójkąt!")
elif self.a<0 or self.b<0 or self.c<0 :
print("Przy liczbach ujemnych nic z tego nie wyjdzie!")
else:
print("Szkoda, ale nie da się z tego zrobić trójkąta.")
def check_is_triangle(self):
return self.a+self.b>self.c and self.a+self.c>self.b and self.b+self.c>self.a
def calculateArea(self):
if self.check_is_triangle():
import math
p = (self.a+self.b+self.c)/2
S2 = p*(p-self.a)*(p-self.b)*(p-self.c)
pole = pow(S2, 1/2)
print(f"Pole tego trójkąta wynosi {pole}")
def calculatePerimeter(self):
if self.check_is_triangle():
obwod = self.a+self.b+self.c
print(f"Obwód trójkąta wynosi {obwod}")
def calculateCosinuses(self):
if self.check_is_triangle():
cos_a = (self.a**2 + self.c**2 - self.b**2) / (2*self.a*self.c)
cos_b = (self.a**2 + self.b**2 - self.c**2) / (2*self.a*self.b)
cos_y = (self.b**2 + self.c**2 - self.a**2) / (2*self.c*self.b)
print(f"Cos(a) wynosi {cos_a}, \nCos(b) wynosi {cos_b}, \ncos(y) wynosi {cos_y}.")
def start():
try:
a = float(input("Wprowadź długość pierwszego odcinka :"))
b = float(input("Wprowadź długość drugiego odcinka :"))
c = float(input("Wprowadź długość trzeciego odcinka :"))
except ValueError:
print("Wystarczy wpisać cyfry!")
start()
trojkat1 = TriangleChecker(a, b, c)
trojkat1.is_triangle()
trojkat1.calculateArea()
trojkat1.calculatePerimeter()
trojkat1.calculateCosinuses()
start()
| 3.9375
| 4
|
Assignements/2A_WillhelmRendahl.py
|
Dazpoet/Learning-python
| 0
|
12782074
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Assignement:
#Create an implementation of the game "Gissa mitt tal" where the user is
#tasked with finding a random number between 1-100
#Use try-except to catch errors
import pickle
import random
import sys
def welcome_text(isfirst): #We call on this to welcome the user, if its their first time we inform them about progression
print("Välkommen till spelet 'Gissa mitt tal'"
"\nDu kommer att få gissa vilket tal jag tänker på. Talet är som standard mellan 1 och 100."
"\nNär du gissar kommer jag att berätta om svaret är högre eller lägre än det du gissat"
"\nFörsök att lista ut numret på så få gissnigar som möjligt")
if isfirst:
print("\nNär du klarat utmaningen en gång kommer det finnas fler möjligheter.")
else:
print("\nVälj hur du vill spela ur menyn.")
def easter_egg(): #Easter egg for the initiated
print ("\n\nDu hittade den magiska Dazrullen!")
print(""" _
.--""--.___.._
( <__> ) `-.
|`--..--'| <|
| :| /
| :|--""-./
`.__ __;' o!O
""
------------------------------------------------
This ASCII art was fetched from http://www.ascii-art.de/ascii/t/toiletpaper.txt
Artist: Ojoshiro
If you are the copyright-holder of this art and would like it removed please open an issue at:
https://github.com/Dazpoet/Learning-python/issues and it will be removed with due haste.
""")
def user_input(): #We break this into its own function as to not bloat main with exception-catching
values = []
while True:
try:
start = int(input("Vilken siffra vill du att ditt spann ska börja på?: "))
stop = int(input("Vilken siffra vill du att ditt spann ska sluta på?: "))
if start > stop:
print("Startvärdet måste vara mindre än stoppvärdet. Starta om och försök igen")
else:
values.append(start)
values.append(stop)
return values
except ValueError:
print("Du måste ange heltal, annars fungerar inte spelet.")
except (EOFError, KeyboardInterrupt):
sys.exit("\nAvslutar programmet")
def number_game(first_number,last_number): #This is the actual game, we call this and supply the range we want to randomize between
CORRECT_ANSWER = random.randint(first_number,last_number)
guess_counter = 0
while True:
try:
guess = int(input("\nGissa på ett nummer så ska jag berätta om det är rätt eller ge dig en ledtråd: "))
guess_counter += 1
if guess > CORRECT_ANSWER:
if guess_counter == 1:
print("\nTalet jag tänker på är lägre än", guess, "Du har gissat", guess_counter, "gång")
else:
print("\nTalet jag tänker på är lägre än", guess, "Du har gissat", guess_counter, "gånger")
elif guess < CORRECT_ANSWER:
if guess_counter == 1:
print("\nTalet jag tänker på är högre än", guess, "Du har gissat", guess_counter, "gång")
else:
print("\nTalet jag tänker på är högre än", guess, "Du har gissat", guess_counter, "gånger")
else:
if guess_counter == 1:
print("\nDu måste ha fuskat! Jag tänkte på", CORRECT_ANSWER, "och du kom på det på första försöket. Är du en tankeläsare?")
return guess_counter
else:
print("\nRätt! Talet jag tänkte på var", CORRECT_ANSWER, "och du kom på det efter", guess_counter, "gissningar")
return guess_counter
except ValueError:
print("Du måste ange heltal")
except (EOFError, KeyboardInterrupt):
sys.exit("\nAvslutar programmet")
def open_data_store(): #Check if there is any saved data and if so, return it, else return what we expect for a first time user
try:
savefile = pickle.load(open('data_store.p','rb'))
return savefile
except FileNotFoundError: #We assume that no saved data = first time user
savefile = {'FIRST_TIME':True, 'HIGH_SCORE':100}
return savefile
except PermissionError:
print("Det går inte att öppna din sparfil, försäkra dig om att du har läsrättigheter i mappen du kör ifrån och försök igen")
sys.exit(1) #Supposedly this raises an alert of some kind in a system-log, making it better than sys.exit(0) since an exception is thrown
def save_game(savefile): #This is its own function since it's called on twice and repeat code == bad code
try:
pickle.dump(savefile,open('data_store.p','wb'))
except PermissionError:
print("Det går inte att spara din progression, försäkra dig om att du har skrivrättigheter till mappen där spelet ligger")
def main_menu():
counter = 0
while True: #We add this loop so the counter will work
try:
choice = int(input("\nVad vill du göra?"
"\n1. Spela standardspelet"
"\n2. Spela men med mitt egenvalda spann"
"\n3. Veta hur snabbt jag vunnit som snabbast"
"\n4. Återställa mina snabbaste lösning till standardvärdet (100)"
"\n5. Avsluta programmet\n"))
return choice
except ValueError: #The code below could be put into it's own function but I've chosen not to since I like toying with hiding things in errors
counter += 1
if counter <= 5:
print("Du måste ange ett existerande alternativ, det finns inga överraskningar här" + "."*(counter+1))
elif counter > 5 and counter <= 10:
if counter > 5 and counter <= 10:
print("\nGe dig, det finns inget här, du ödslar bara tid" * (counter - 5))
else:
print("Ok, ok... du får en ledtråd -> DET FINNS INGET HÄR!")
elif counter == 15:
easter_egg()
except (EOFError, KeyboardInterrupt):
sys.exit("\nAvslutar programmet")
def compare_to_highscore(old_hs, new_score): #Does some logic, congratulates if higher, asks to try harder if lower, returns new best score
if old_hs < new_score:
print("Tyvärr slog du inte ditt gamla rekord på", old_hs, "gissningar. Bättre lycka nästa gång!")
return old_hs
elif new_score == 1:
print("Eftersom det nu är omöjligt att slå ditt rekord av", new_score, "gissning så rekommenderar jag att du nollställer räknaren och utmanar dig själv med svårare spann")
elif old_hs == new_score:
print("Du tangerade ditt gamla rekord", old_hs, "gissningar. Försök igen för att se om du kan slå det också.")
return old_hs
else:
print("Grattis! Du har slagit ditt gamla rekord", old_hs, "gissningar, med dina",new_score, "gissningar. Det är", old_hs - new_score, "färre gissningar. Försök igen och se om du kan slå det!")
return new_score
def main():
DATA_STORE = open_data_store()
welcome_text(DATA_STORE['FIRST_TIME'])
if DATA_STORE['FIRST_TIME'] == True: #First time users get thrown directly into the game and only get one run
score = number_game(1,101)
DATA_STORE['FIRST_TIME'] = False
DATA_STORE['HIGH_SCORE'] = score
save_game(DATA_STORE)
#Restart to make sure we catch potential erros with the savefile before the user has a lot of progress to lose
sys.exit("Du har nu klarat standardspelet, nästa gång du kör programmet kommer du kunna göra mer.")
else: #Users with saved data get access to the menu
while True:
choice = main_menu() #The menu itself is sectioned of into a function
if choice == 1: #The following choices are kept in main for better readability
score = number_game(1,100)
DATA_STORE['HIGH_SCORE'] = compare_to_highscore(DATA_STORE['HIGH_SCORE'], score)
elif choice == 2:
user_range = user_input()
score = number_game(user_range.pop(0),user_range.pop()) #There just has to be a better way to do this, using a tuple?
DATA_STORE['HIGH_SCORE'] = compare_to_highscore(DATA_STORE['HIGH_SCORE'], score)
elif choice == 3:
print("Din snabbaste lösningen var", DATA_STORE['HIGH_SCORE'], "gissningar")
elif choice == 4:
DATA_STORE['HIGH_SCORE'] = 100
print("Snabbaste lösning återställd till 100")
elif choice == 5:
save_game(DATA_STORE)
sys.exit(0)
else:
print("Du måste välja ett värde mellan 1 och 5. Lustigt nog är de siffrorna även relevanta för en helt annan del av programmet...")
main()
| 3.71875
| 4
|
src/amuse/ext/cloud.py
|
rknop/amuse
| 131
|
12782075
|
<reponame>rknop/amuse<filename>src/amuse/ext/cloud.py<gh_stars>100-1000
import inspect
import numpy
from amuse.units import generic_unit_system
from amuse import datamodel
def fill_grid_with_cloud_and_medium(
grid,
center = None,
radius = None,
rho_medium = 1.0 | generic_unit_system.mass / generic_unit_system.length**3,
rho_cloud = 0.1 | generic_unit_system.mass / generic_unit_system.length**3,
gamma = 5.0 / 3.0,
):
pass
def fill_grid_with_spherical_cloud(
grid,
center = None,
radius = None,
rho = 1.0 | generic_unit_system.mass / generic_unit_system.length**3,
rhovx = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
rhovy = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
rhovz = 0.0 | generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2),
energy = 1.0 | generic_unit_system.mass / (generic_unit_system.time**2 * generic_unit_system.length),
subgridsize = 4,
):
radii = (grid.position - center).lengths()
if subgridsize <= 1:
selection = radii <= radius
else:
dr = grid.cellsize().length()
selection = radii < (radius - dr)
grid.rho[selection] = rho(radii) if inspect.isroutine(rho) else rho
grid.rhovx[selection] = rhovx
grid.rhovy[selection] = rhovy
grid.rhovz[selection] = rhovz
grid.energy[selection] = energy
if subgridsize <= 1:
return
selection = numpy.logical_and( radii >= (radius-dr), radii <= (radius+dr))
subgrid = datamodel.Grid.create((subgridsize, subgridsize, subgridsize), grid.cellsize())
subgrid.x -= grid.cellsize()[0] / 2.0
subgrid.y -= grid.cellsize()[1] / 2.0
subgrid.z -= grid.cellsize()[2] / 2.0
x_indices, y_indices, z_indices = grid.indices()
x_indices = x_indices[selection]
y_indices = y_indices[selection]
z_indices = z_indices[selection]
position = subgrid.position
centers = center - grid.position[selection]
subgrid_rho = rho * numpy.ones_like(subgrid.x.number)
subgrid_rhovx = rhovx * numpy.ones_like(subgrid.x.number)
subgrid_rhovy = rhovy * numpy.ones_like(subgrid.x.number)
subgrid_rhovz = rhovz * numpy.ones_like(subgrid.x.number)
subgrid_energy = energy * numpy.ones_like(subgrid.x.number)
update_grid_rho = grid.rho[selection]
update_grid_rhovx = grid.rhovx[selection]
update_grid_rhovy = grid.rhovy[selection]
update_grid_rhovz = grid.rhovz[selection]
update_grid_energy = grid.energy[selection]
for i in range(len(x_indices)):
x_index = x_indices[i]
y_index = y_indices[i]
z_index = z_indices[i]
center_of_cloud_for_subgrid = centers[i]
radii = (position - center_of_cloud_for_subgrid).lengths()
subgrid_rho[...] = update_grid_rho[i]
subgrid_rhovx[...] = update_grid_rhovx[i]
subgrid_rhovy[...] = update_grid_rhovy[i]
subgrid_rhovz[...] = update_grid_rhovz[i]
subgrid_energy[...] = update_grid_energy[i]
subgrid_selection = radii <= radius
subgrid_rho[subgrid_selection] = rho
subgrid_rhovx[subgrid_selection] = rhovx
subgrid_rhovy[subgrid_selection] = rhovy
subgrid_rhovz[subgrid_selection] = rhovz
subgrid_energy[subgrid_selection] = energy
update_grid_rho[i] = subgrid_rho.mean()
update_grid_rhovx[i] = subgrid_rhovx.mean()
update_grid_rhovy[i] = subgrid_rhovy.mean()
update_grid_rhovz[i] = subgrid_rhovz.mean()
update_grid_energy[i] = subgrid_energy.mean()
grid.rho[selection] = update_grid_rho
grid.rhovx[selection] = update_grid_rhovx
grid.rhovy[selection] = update_grid_rhovy
grid.rhovz[selection] = update_grid_rhovz
grid.energy[selection] = update_grid_energy
def fill_grid_with_cloud_shock(
grid,
center = None,
radius = None,
ratio_densities = 10.0,
mach_number = 2.7,
gamma = 5.0/3.0,
subgridsize = 4,
):
velocity_unit = generic_unit_system.length / generic_unit_system.time
momentum_unit = generic_unit_system.mass / (generic_unit_system.time * generic_unit_system.length**2)
density_unit = generic_unit_system.mass / generic_unit_system.length**3
energy_unit = generic_unit_system.mass / (generic_unit_system.time**2 * generic_unit_system.length)
velocity_of_medium = (numpy.sqrt(gamma*(gamma-1.0)*ratio_densities) * mach_number) | velocity_unit
rho_in_cloud = 1.0 | density_unit
rhovx_in_cloud = 0.0 | momentum_unit
rhovy_in_cloud = 0.0 | momentum_unit
rhovz_in_cloud = 0.0 | momentum_unit
energy_in_cloud = 1.0 | energy_unit
rho_in_medium = 1.0 / ratio_densities | density_unit
rhovx_in_medium = 0.0 | momentum_unit
rhovy_in_medium = rho_in_medium * velocity_of_medium
rhovz_in_medium = 0.0 | momentum_unit
energy_in_medium = (1.0 | energy_unit) + (0.5* rho_in_medium * velocity_of_medium**2)
grid.rho = rho_in_medium
grid.rhovx = rhovx_in_medium
grid.rhovy = rhovy_in_medium
grid.rhovz = rhovz_in_medium
grid.energy = energy_in_medium
fill_grid_with_spherical_cloud(grid, center, radius, rho_in_cloud, rhovx_in_cloud, rhovy_in_cloud, rhovz_in_cloud, energy_in_cloud, subgridsize)
| 2.328125
| 2
|
vgablog.py
|
PeterFidelman/vga-blog
| 0
|
12782076
|
#!/usr/bin/python
import os
import datetime
import PyRSS2Gen
kInDir = "raw_post"
kTmplDir = "template"
kBlogDir = "site/blog"
kPostsDir = "site/blog/posts"
def main():
postlist = posts()
archive(postlist)
def posts():
postlist = []
# Create the output directory if it doesn't already exist
os.makedirs(kPostsDir, exist_ok=True)
postHeader = getTemplate("posthead.htm")
postFooter = getTemplate("postfoot.htm")
postTitle = getTemplate("postitle.htm")
for fInName in os.listdir(kInDir):
fInPath = os.path.join(kInDir, fInName)
fOutName = os.path.splitext(fInName)[0] + ".htm"
fOutPath = os.path.join(kPostsDir, fOutName)
fIn = open(fInPath, "r")
fOut = open(fOutPath, "w")
# emit post header
fOut.write(postHeader)
# parse & consume 1st input line -> title
title = fIn.readline()
# parse & consume 2nd input line -> date
date = fIn.readline()
# store (title, date, filename)
postlist.append((title, date, fOutName))
# emit post titlebox
fOut.write(postTitle % (len(title) + 4, title, date))
# write remaining lines
# wrapping with <pre></pre> unless input was a .htm file
if not fInName.endswith(".htm"):
fOut.write("<pre>\n")
while 1:
line = fIn.readline()
if not line:
break
fOut.write(line)
if not fInName.endswith(".htm"):
fOut.write("</pre>\n")
# emit post footer
fOut.write(postFooter)
fIn.close()
# close post htm file
fOut.close()
return postlist
def archive(postlist):
archiveHeader = getTemplate("archhead.htm")
archiveFooter = getTemplate("archfoot.htm")
archiveDiv = getTemplate("archdiv.htm")
redirectHtml = getTemplate("redirect.htm")
# sort the (title, date, filename) data structure by date
# (ASCIIbetical descending)
postlist.sort(key=lambda t: t[1], reverse=True)
# create redirect htm file
fRdOutPath = os.path.join(kBlogDir, "index.htm")
with open(fRdOutPath, "w") as f:
# emit filename of newest post
f.write(redirectHtml % postlist[0][2])
# create archive htm file
fOutPath = os.path.join(kPostsDir, "index.htm")
fOut = open(fOutPath, "w")
# create archive rss feed
rss = PyRSS2Gen.RSS2(
title = "VGA Blog",
link = "http://example.com/blog",
description = "",
lastBuildDate = datetime.datetime.now())
# emit archive header
fOut.write(archiveHeader)
# for each datum
for tup in postlist:
(title, date, filename) = tup
date = date.strip()
# emit div
s = archiveDiv % (date.strip(), filename, title.strip())
fOut.write(s)
# emit rss entry
rss.items.append(
PyRSS2Gen.RSSItem(
title = title,
link =
"https://example.com/blog/posts/%s" % filename,
description = "",
pubDate = datetime.datetime.strptime(date, "%Y-%m-%d")))
# emit archive footer
fOut.write(archiveFooter)
# close archive htm file
fOut.close()
# write rss feed
with open(os.path.join(kBlogDir, "rss.xml"), "w") as rssFile:
rss.write_xml(rssFile)
def getTemplate(name):
path = os.path.join(kTmplDir, name)
with open(path, "r") as f: contents = "".join(f.readlines())
return contents
if __name__ == "__main__":
main()
| 2.84375
| 3
|
texsymdetect/service/lib/symbol_search.py
|
andrewhead/symbol-extractor
| 1
|
12782077
|
import logging
import math
import re
from collections import defaultdict
from dataclasses import dataclass
from typing import Dict, Iterable, Iterator, List, Optional, Sequence, Set, Tuple
import cv2
import numpy as np
import scipy.spatial
from lib.image_processing import (
Point,
Rectangle,
_contains_start_graphic,
find_boxes_with_rgb,
find_in_image,
)
from lib.instrument_tex import Detectable, FontSize
from lib.parse_formula_tex import TexSymbol, TexToken
logger = logging.getLogger("texsymdetect")
PageNumber = int
MathMl = str
@dataclass(frozen=True)
class Id:
"""
To uniquely identify a symbol in the symbol search functionality (i.e., not confuse
two symbols with each other), one needs both the MathML for the symbol, and the
size it was rendered at.
"""
mathml: str
level: FontSize
@dataclass
class TokenInstance:
id_: Id
location: Rectangle
@dataclass
class SymbolInstance:
id_: Id
location: Rectangle
@dataclass
class TokenTemplate:
symbol: Id
images: List[np.array]
@dataclass
class Component:
symbol_id: Id
center: Point
" Position of center of component, relative to center of anchor component. "
@dataclass
class SymbolTemplate:
anchor: Id
" Leftmost member of the composite template. "
members: List[Component]
" All members of the composite template except for the anchor. "
def create_symbol_template(
symbol_image: np.array,
token_images: Dict[MathMl, Dict[FontSize, List[np.array]]],
token_mathmls: Iterable[str],
require_blank_border_around_tokens: bool = True,
) -> Optional[SymbolTemplate]:
# Unpack token images into a 1-D list.
token_image_list: List[np.array] = []
mathmls: List[MathMl] = []
font_sizes: List[FontSize] = []
for mathml, sizes in token_images.items():
if mathml not in token_mathmls:
continue
for font_size, images in sizes.items():
for image in images:
token_image_list.append(image)
font_sizes.append(font_size)
mathmls.append(mathml)
# Search in image for tokens.
rects = find_in_image(
token_image_list,
symbol_image,
require_blank_border=require_blank_border_around_tokens,
)
# Unroll tokens into a 1-D list.
rects_unrolled: List[Rectangle] = []
mathmls_unrolled: List[MathMl] = []
font_sizes_unrolled: List[FontSize] = []
for mathml, font_size, rect_list in zip(mathmls, font_sizes, rects):
for rect in rect_list:
rects_unrolled.append(rect)
mathmls_unrolled.append(mathml)
font_sizes_unrolled.append(font_size)
# Find positions of child symbols in the composite symbol image.
components: List[Component] = []
# Add tokens to the template left-to-right.
for (mathml, font_size, rect) in sorted(
zip(mathmls_unrolled, font_sizes_unrolled, rects_unrolled),
key=lambda t: t[2].left,
):
if mathml in token_mathmls:
center = Point(rect.left + rect.width / 2.0, rect.top + rect.height / 2.0)
component = Component(Id(mathml, font_size), center)
if component not in components:
components.append(component)
# Composite symbol needs at least one component.
if not components:
return None
# Select 'anchor' for the template as the leftmost component.
components.sort(key=lambda c: c.center.x)
anchor = components.pop(0)
# Normalize the positions of components relative to the anchor.
for component in components:
component.center.x -= anchor.center.x
component.center.y -= anchor.center.y
# assert (
# False
# ), "May want to filter out overlapping tokens... for instance, by blanking out the part of the image that matches."
return SymbolTemplate(anchor.symbol_id, components)
def extract_templates(
page_images: Dict[PageNumber, np.array], detectables: Sequence[Detectable],
) -> Tuple[Dict[Detectable, List[np.array]], Dict[Detectable, SymbolTemplate]]:
"""
Given images of pages from a paper that has been modified to include appearances of many tokens
and symbols (i.e., 'detectables'), extract templates for those tokens and symbols
that can be used to identify them in other documents.
Returns a collection of token templates (images), and symbol templates
(a flexible template format).
Note that both tokens and symbols must be passed in as detectables;
symbols cannot be found without first finding their component tokens. All
detectables should be provided in the order that they appear in the TeX,
which should include all tokens first, followed by all symbols.
"""
sorted_page_images = [page_images[pn] for pn in sorted(page_images.keys())]
def dequeue_page() -> Optional[np.array]:
" Remove image of the next page from the list of all pages in the document. "
if not sorted_page_images:
return None
image = sorted_page_images.pop(0)
return image
page_image = dequeue_page()
next_page_image = dequeue_page()
# Scan all pages until the marker is found that suggests that the original LaTeX
# document has ended, and the detectables (i.e., colorized tokens and symbols)
# are about to appear.
while True:
if not _contains_start_graphic(page_image):
page_image = next_page_image
next_page_image = dequeue_page()
continue
# Once the marker has been found, skip forward one more page so that
# symbols and tokens will be detected on the page after the marker.
page_image = next_page_image
next_page_image = dequeue_page()
break
# Templates are extracted for detecting both tokens and symbols. Templates
# for tokens are images of single letters or marks. Templates for symbols
# are groups of tokens and the expected (but somewhat flexible) spatial
# relationships between them.
token_images: Dict[Detectable, List[np.array]] = defaultdict(list)
token_images_lookup: Dict[MathMl, Dict[FontSize, List[np.array]]] = defaultdict(
dict
)
symbol_templates: Dict[Detectable, SymbolTemplate] = {}
for d in detectables:
# Find a bounding box around the token / symbol.
red, green, blue = d.color
rects = find_boxes_with_rgb(page_image, red, green, blue)
if next_page_image is not None:
if not rects:
rects = find_boxes_with_rgb(next_page_image, red, green, blue)
if not rects:
logger.warning("Could not find detectable %s.", d)
continue
page_image = next_page_image
next_page_image = dequeue_page()
else:
rects.extend(find_boxes_with_rgb(next_page_image, red, green, blue))
if len(rects) > 1:
logger.warning(
"Unexpectedly more than one instance of detectable %s. "
+ "There may have been a problem in the coloring code.",
d,
)
if not rects:
logger.warning("Could not find detectable %s.", d)
box = rects[0]
logger.debug(f"Found symbol at {box}.")
# Extract a cropped, black-and-white image of the token or symbol.
cropped_bw = page_image[
box.top : box.top + box.height, box.left : box.left + box.width
]
cropped_bw[
np.where(
(cropped_bw[:, :, 0] != 255)
| (cropped_bw[:, :, 1] != 255)
| (cropped_bw[:, :, 2] != 255)
)
] = [0, 0, 0]
cropped_bw = cv2.cvtColor(cropped_bw, cv2.COLOR_BGR2GRAY)
# For simple symbols, extract images.
if isinstance(d.entity, TexToken):
# Only save a template if it has a different appearance from the other templates
# saved for a symbol. This is important as a bunch of templates for the symbol
# at the same size are created to try to make sure that templates are saved for
# every way that extra space might have been introduced between characters in the
# symbol when the PDF was rendered to an image.
already_saved = False
for img in token_images[d]:
if np.array_equal(img, cropped_bw):
already_saved = True
break
if not already_saved:
token_images[d].append(cropped_bw)
lookup_dict = token_images_lookup[d.entity.mathml]
if d.font_size not in lookup_dict:
lookup_dict[d.font_size] = []
lookup_dict[d.font_size].append(cropped_bw)
# Note that, if the caller of this function did their job in ordering the list of
# detectables, symbols will be processed only after all tokens have been processed.
if isinstance(d.entity, TexSymbol):
token_mathmls = [t.mathml for t in d.entity.tokens]
template = create_symbol_template(
cropped_bw, token_images_lookup, token_mathmls
)
if template:
symbol_templates[d] = template
return token_images, symbol_templates
class TokenIndex:
" Index of appearances of all tokens on a page. "
def __init__(self, tokens: Iterable[TokenInstance]) -> None:
self._tokens: List[TokenInstance] = list(tokens)
# Build a KD search tree over symbols to support faster spatial querying.
token_centers = [
(
t.location.left + t.location.width / 2.0,
t.location.top + t.location.height / 2.0,
)
for t in tokens
]
if not tokens:
token_centers = np.empty(shape=(0, 2))
self._tree = scipy.spatial.KDTree(token_centers)
def get_instances(self, id_: Id = None) -> List[TokenInstance]:
" Get all tokens with a specific key. "
if not id_:
return list(self._tokens)
return [t for t in self._tokens if t.id_ == id_]
def find(
self, id_: Id, center: Point, tolerance: Optional[Point] = None,
) -> List[TokenInstance]:
"""
Get all tokens near a specific point matching a specification for the token
(its key and level). Matching tokens are returned if:
* its center x falls within [center[0] - tolerance[0], center[0] + tolerance[0]]
* its center y falls within [center[1] - tolerance[1], center[1] + tolerance[1]]
"""
tolerance = tolerance or Point(1.0, 1.0)
# Initial query for candidate symbols is made using the KDTree 'query_ball_point' method,
# as it will in many cases filter symbols according to position in two-dimensional space
# than an iteratively searching over a list of all symbols.
radius = math.sqrt(tolerance.x * tolerance.x + tolerance.y * tolerance.y)
nearby_points = self._tree.query_ball_point(x=[center.x, center.y], r=radius)
matches = []
for token_i in nearby_points:
# Rule out symbols that are not the requested symbol.
token = self._tokens[token_i]
if token.id_ != id_:
continue
# Rule out symbols that are not within the tolerated distance of the query point.
token_center_x = token.location.left + token.location.width / 2.0
token_center_y = token.location.top + token.location.height / 2.0
if (
abs(token_center_x - center.x) > tolerance.x
or abs(token_center_y - center.y) > tolerance.y
):
continue
matches.append(token)
return matches
def detect_tokens(
page_images: Dict[PageNumber, np.array],
token_images: Dict[Detectable, List[np.array]],
require_blank_border: bool = True,
) -> Dict[PageNumber, TokenIndex]:
"""
Detect appearances of tokens in images of pages. If 'require_blank_border' is set,
filter the detected tokens to just those that are surrounded with whitespace. This
option is intended to help reduce the number of false positives. See the
implementation comments below for more details.
"""
tokens: Dict[PageNumber, TokenIndex] = {}
# Unpack token images into a 1-D list.
token_image_list = []
token_list = []
for (token, images) in token_images.items():
for image in images:
token_image_list.append(image)
token_list.append(token)
for page_no, page_image in sorted(page_images.items(), key=lambda t: t[0]):
logger.debug("Detecting tokens on page %d.", page_no)
page_image_gray = cv2.cvtColor(page_image, cv2.COLOR_BGR2GRAY)
rects = find_in_image(
token_image_list,
page_image_gray,
require_blank_border=require_blank_border,
)
token_instances: List[TokenInstance] = []
for (token, rect_list) in zip(token_list, rects):
for rect in rect_list:
token_instances.append(
TokenInstance(
id_=Id(token.entity.mathml, token.font_size), location=rect
)
)
tokens[page_no] = TokenIndex(token_instances)
return tokens
def detect_symbols(
token_instances: Dict[PageNumber, TokenIndex],
symbol_templates: Dict[Detectable, SymbolTemplate],
) -> Dict[PageNumber, List[SymbolInstance]]:
symbol_instances: Dict[PageNumber, List[SymbolInstance]] = defaultdict(list)
for page_no, token_index in token_instances.items():
logger.debug("Scanning page %d for symbols.", page_no)
for detectable, template in symbol_templates.items():
for rect in find_symbols(template, token_index):
instance = SymbolInstance(
Id(detectable.entity.mathml, detectable.font_size), rect
)
# Deduplicate symbols, in case two symbols are actually the same symbol (as
# may happen if two symbols had different TeX, but the same MathML).
if instance not in symbol_instances[page_no]:
symbol_instances[page_no].append(instance)
return symbol_instances
def find_symbols(template: SymbolTemplate, index: TokenIndex) -> Iterator[Rectangle]:
"""
Search for appearances of a symbol given an index of tokens.
"""
# Search for anchors---that is, leftmost glyphs in a symbol, relative
# to which all other tokens in a composite symbol will be searched.
anchor_candidates = index.get_instances(template.anchor)
# For each anchor found, attempt to fill out the rest of the composite symbol template.
for a in anchor_candidates:
template_incomplete = False
member_matches: List[TokenInstance] = []
anchor_center_x = a.location.left + a.location.width / 2.0
anchor_center_y = a.location.top + a.location.height / 2.0
# For each expected member of the composite symbol (i.e., all simple symbols the composite
# symbol should be made up of), search for appearances of the member at the expected
# location relative to the anchor.
for member in template.members:
expected_center = Point(
anchor_center_x + member.center.x, anchor_center_y + member.center.y
)
# Note that the tolerance for the position of a member symbol is higher the further away
# that member is from the anchor, as it is assumed that TeX might insert or remove space
# between members, which will accumulate the further away the member is from the anchor.
tolerance = Point(
math.ceil(abs(member.center.x) / 5.0) + 1,
math.ceil(abs(member.center.y) / 5.0) + 1,
)
member_candidates = index.find(
id_=member.symbol_id, center=expected_center, tolerance=tolerance,
)
# If multiple symbols could fill the member slot in the composite symbol, select the
# leftmost symbol that has not yet been used to fill a slot.
member_found = False
member_candidates.sort(key=lambda c: c.location.left)
for m in member_candidates:
if m not in member_matches:
member_matches.append(m)
member_found = True
break
# If any member slot of the template cannot be filled, a composite symbol cannot be
# created. Advance to the next potential anchor.
if not member_found:
template_incomplete = True
break
# Create an instance of the composite symbol if the template has been completed.
if not template_incomplete:
tokens = [a] + member_matches
left = min([t.location.left for t in tokens])
top = min([t.location.top for t in tokens])
right = max([t.location.left + t.location.width for t in tokens])
bottom = max([t.location.top + t.location.height for t in tokens])
yield Rectangle(left, top, right - left, bottom - top)
| 2.296875
| 2
|
Reg_exp_Emp_fields.py
|
abhisek1985/Python-Unit-Testing
| 1
|
12782078
|
import re
def check_userID(ID):
''' Rule : UserID consists of [A-Z|a-z|0-9] '''
match = re.search(r'\w+', ID)
#print match.group(0),
if match:
return True
else:
return False
def check_jobTitleName(Job_Title):
''' Rule: Employee Job title starts with [A-Z] then multiple occurrence of [a-z] and 1 space, [A-Z] and multiple
occurrences of [a-z] as optional. '''
match = re.search(r'(^[A-Z][a-z]+)( [A-Z][a-z]+)?$', Job_Title)
if match:
'''print "Job Title", match.group(0),
print "First part of Job title:", match.group(1),
print "Second part of Job title:", match.group(2), '''
return True
else:
return False
def check_firstName(First_Name):
''' Rule: Starts with [A-Z] the multiple occurrences of [a-z]. '''
match = re.search(r'^[A-z][a-z]+$', First_Name)
if match:
#print match.group(0),
return True
else:
return False
def check_lastName(Last_Name):
''' Rule: Starts with [A-Z] the multiple occurrences of [a-z]. '''
match = re.search(r'^[A-z][a-z]+$', Last_Name)
if match:
#print match.group(0),
return True
else:
return False
def check_preferredFullName(Full_Name):
''' Rule: Combination of first and last names. '''
match = re.search(r'(^[A-Z][a-z]+) ([A-Z][a-z]+)$', Full_Name)
if match:
'''print "Full Name:", match.group(0),
print "First Name:", match.group(1),
print "Last Name:", match.group(2), '''
return True
else:
return False
def check_employeeCode(Emp_Code):
''' Rule: Starts with 'E' and followed by multiple occurrences of [0-9]. '''
match = re.search(r'^E\d+', Emp_Code)
if match:
#print match.group(0),
return True
else:
return False
def check_region(Working_Place):
''' Rule: Short form of states in US. '''
match = re.search(r'[A-Z]{2}', Working_Place)
if match:
#print match.group(0),
return True
else:
return False
def check_phoneNumber(Contact_Number):
''' Rule: Total 10 digits. First 3 digits for province code then followed by - and 7 digits. '''
match = re.search(r'\d{3}-\d{7}', Contact_Number)
if match:
#print match.group(0),
return True
else:
return False
def check_emailAddress(Email_Address):
''' Rule: <host name>@<provider name>.<DNS type> '''
match = re.search(r'(^\w+\.?\w+)@(\w+\.\w+$)', Email_Address)
if match:
'''print "Email Address:", match.group(0),
print "Host part:", match.group(1),
print "Domain part:", match.group(2), '''
return True
else:
return False
| 3.453125
| 3
|
src/hera/workflow.py
|
bchalk101/hera-workflows
| 0
|
12782079
|
"""The implementation of a Hera workflow for Argo-based workflows"""
from typing import Dict, List, Optional, Tuple
from argo_workflows.models import (
IoArgoprojWorkflowV1alpha1DAGTemplate,
IoArgoprojWorkflowV1alpha1Template,
IoArgoprojWorkflowV1alpha1VolumeClaimGC,
IoArgoprojWorkflowV1alpha1Workflow,
IoArgoprojWorkflowV1alpha1WorkflowSpec,
IoArgoprojWorkflowV1alpha1WorkflowTemplateRef,
LocalObjectReference,
ObjectMeta,
)
from hera.affinity import Affinity
from hera.host_alias import HostAlias
from hera.security_context import WorkflowSecurityContext
from hera.task import Task
from hera.ttl_strategy import TTLStrategy
from hera.volume_claim_gc import VolumeClaimGCStrategy
from hera.workflow_editors import add_head, add_tail, add_task, add_tasks, on_exit
from hera.workflow_service import WorkflowService
class Workflow:
"""A workflow representation.
The workflow is used as a functional representation for a collection of tasks and
steps. The workflow context controls the overall behaviour of tasks, such as whether to notify completion, whether
to execute retires, overall parallelism, etc. The workflow can be constructed and submitted to multiple Argo
endpoints as long as a token can be associated with the endpoint at the given domain.
Parameters
----------
name: str
The workflow name. Note that the workflow initiation will replace underscores with dashes.
service: Optional[WorkflowService] = None
A workflow service to use for submissions. See `hera.v1.workflow_service.WorkflowService`.
parallelism: int = 50
The number of parallel tasks to run in case a task group is executed for multiple tasks.
service_account_name: Optional[str] = None
The name of the service account to use in all workflow tasks.
labels: Optional[Dict[str, str]] = None
A Dict of labels to attach to the Workflow object metadata
annotations: Optional[Dict[str, str]] = None
A Dict of annotations to attach to the Workflow object metadata
namespace: Optional[str] = 'default'
The namespace to use for creating the Workflow. Defaults to "default"
security_context: Optional[WorkflowSecurityContext] = None
Define security settings for all containers in the workflow.
image_pull_secrets: Optional[List[str]] = None
A list of image pull secrets. This is used to authenticate with the private image registry of the images
used by tasks.
workflow_template_ref: Optional[str] = None
The name of the workflowTemplate reference. WorkflowTemplateRef is a reference to a WorkflowTemplate resource.
If you create a WorkflowTemplate resource either clusterWorkflowTemplate or not (clusterScope attribute bool)
you can reference it again and again when you create a new Workflow without specifying the same tasks and
dependencies. Official doc: https://argoproj.github.io/argo-workflows/fields/#workflowtemplateref
ttl_strategy: Optional[TTLStrategy] = None
The time to live strategy of the workflow.
volume_claim_gc_strategy: Optional[VolumeClaimGCStrategy] = None
Define how to delete volumes from completed Workflows.
host_aliases: Optional[List[HostAlias]] = None
Mappings between IP and hostnames.
node_selectors: Optional[Dict[str, str]] = None
A collection of key value pairs that denote node selectors. This is used for scheduling purposes. If the task
requires GPU resources, clients are encouraged to add a node selector for a node that can satisfy the
requested resources. In addition, clients are encouraged to specify a GPU toleration, depending on the platform
they submit the workflow to.
affinity: Optional[Affinity] = None
The task affinity. This dictates the scheduling protocol of the pods running the tasks of the workflow.
"""
def __init__(
self,
name: str,
service: Optional[WorkflowService] = None,
parallelism: int = 50,
service_account_name: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
annotations: Optional[Dict[str, str]] = None,
namespace: Optional[str] = None,
security_context: Optional[WorkflowSecurityContext] = None,
image_pull_secrets: Optional[List[str]] = None,
workflow_template_ref: Optional[str] = None,
ttl_strategy: Optional[TTLStrategy] = None,
volume_claim_gc_strategy: Optional[VolumeClaimGCStrategy] = None,
host_aliases: Optional[List[HostAlias]] = None,
node_selectors: Optional[Dict[str, str]] = None,
affinity: Optional[Affinity] = None,
):
self.name = f'{name.replace("_", "-")}' # RFC1123
self.namespace = namespace or 'default'
self.service = service or WorkflowService()
self.parallelism = parallelism
self.security_context = security_context
self.service_account_name = service_account_name
self.labels = labels
self.annotations = annotations
self.image_pull_secrets = image_pull_secrets
self.workflow_template_ref = workflow_template_ref
self.node_selector = node_selectors
self.ttl_strategy = ttl_strategy
self.affinity = affinity
self.dag_template = IoArgoprojWorkflowV1alpha1DAGTemplate(tasks=[])
self.exit_template = IoArgoprojWorkflowV1alpha1Template(
name='exit-template',
steps=[],
dag=IoArgoprojWorkflowV1alpha1DAGTemplate(tasks=[]),
parallelism=self.parallelism,
)
self.template = IoArgoprojWorkflowV1alpha1Template(
name=self.name,
steps=[],
dag=self.dag_template,
parallelism=self.parallelism,
)
if self.workflow_template_ref:
self.workflow_template = IoArgoprojWorkflowV1alpha1WorkflowTemplateRef(name=self.workflow_template_ref)
self.spec = IoArgoprojWorkflowV1alpha1WorkflowSpec(
workflow_template_ref=self.workflow_template,
entrypoint=self.workflow_template_ref,
volumes=[],
volume_claim_templates=[],
parallelism=self.parallelism,
)
else:
self.spec = IoArgoprojWorkflowV1alpha1WorkflowSpec(
templates=[self.template],
entrypoint=self.name,
volumes=[],
volume_claim_templates=[],
parallelism=self.parallelism,
)
if ttl_strategy:
setattr(self.spec, 'ttl_strategy', ttl_strategy.argo_ttl_strategy)
if volume_claim_gc_strategy:
setattr(
self.spec,
'volume_claim_gc',
IoArgoprojWorkflowV1alpha1VolumeClaimGC(strategy=volume_claim_gc_strategy.value),
)
if host_aliases:
setattr(self.spec, 'host_aliases', [h.argo_host_alias for h in host_aliases])
if self.security_context:
security_context = self.security_context.get_security_context()
setattr(self.spec, 'security_context', security_context)
if self.service_account_name:
setattr(self.template, 'service_account_name', self.service_account_name)
setattr(self.spec, 'service_account_name', self.service_account_name)
if self.image_pull_secrets:
secret_refs = [LocalObjectReference(name=name) for name in self.image_pull_secrets]
setattr(self.spec, 'image_pull_secrets', secret_refs)
if self.affinity:
setattr(self.exit_template, 'affinity', self.affinity.get_spec())
setattr(self.template, 'affinity', self.affinity.get_spec())
self.metadata = ObjectMeta(name=self.name)
if self.labels:
setattr(self.metadata, 'labels', self.labels)
if self.annotations:
setattr(self.metadata, 'annotations', self.annotations)
if self.node_selector:
setattr(self.dag_template, 'node_selector', self.node_selector)
setattr(self.template, 'node_selector', self.node_selector)
setattr(self.exit_template, 'node_selector', self.node_selector)
self.workflow = IoArgoprojWorkflowV1alpha1Workflow(metadata=self.metadata, spec=self.spec)
def add_task(self, t: Task) -> None:
add_task(self, t)
def add_tasks(self, *ts: Task) -> None:
add_tasks(self, *ts)
def add_head(self, t: Task, append: bool = True) -> None:
add_head(self, t, append=append)
def add_tail(self, t: Task, append: bool = True) -> None:
add_tail(self, t, append=append)
def create(self, namespace: Optional[str] = None) -> IoArgoprojWorkflowV1alpha1Workflow:
"""Creates the workflow"""
if namespace is None:
namespace = self.namespace
return self.service.create(self.workflow, namespace)
def on_exit(self, *t: Task) -> None:
on_exit(self, *t)
def delete(self, namespace: Optional[str] = None) -> Tuple[object, int, dict]:
"""Deletes the workflow"""
if namespace is None:
namespace = self.name
return self.service.delete(self.name)
| 2.375
| 2
|
242. Valid Anagram/solution1.py
|
sunshot/LeetCode
| 0
|
12782080
|
import collections
class Solution:
def isAnagram(self, s: str, t: str) -> bool:
if not s and not t:
return True
if not s or not t:
return False
if len(s) != len(t):
return False
s_hash = collections.defaultdict(int)
t_hash = collections.defaultdict(int)
for i in range(len(s)):
s_hash[s[i]] += 1
t_hash[t[i]] += 1
if len(s_hash) != len(t_hash):
return False
for x in s_hash:
if s_hash[x] != t_hash[x]:
return False
return True
if __name__== '__main__':
solution = Solution()
s = "anagram"
t = "nagaram"
result = solution.isAnagram(s, t)
print(result)
| 3.6875
| 4
|
pyyacp/testing/test_format_encoder.py
|
ODInfoBiz/pyyacp
| 1
|
12782081
|
<filename>pyyacp/testing/test_format_encoder.py
# -*- coding: utf-8 -*-
import codecs
import unittest
import string
import sys
from pyyacp.column_format_detector import translate
def for_examples(parameters):
def tuplify(x):
if not isinstance(x, tuple):
return (x,)
return x
def decorator(method, parameters=parameters):
for parameter in (tuplify(x) for x in parameters):
def method_for_parameter(self, method=method, parameter=parameter):
method(self, *parameter)
args_for_parameter = ",".join(repr(v) for v in parameter)
name_for_parameter = method.__name__ + "(" + args_for_parameter + ")"
frame = sys._getframe(1) # pylint: disable-msg=W0212
frame.f_locals[name_for_parameter] = method_for_parameter
return None
return decorator
test_cases=[ (c,'1') for c in string.digits]
test_cases.extend([ (c,'a') for c in string.ascii_lowercase])
test_cases.extend([ (c,'A') for c in string.ascii_uppercase])
encoding=[('ä','a'),('ö','a'),('ü','a'),
('Ä', 'A'), ('Ö', 'A'), ('Ü', 'A')]
special=[('?','?')]
mix=[('Wien 1110', 'Aaaa 1111')]
class TestFormatEncoder(unittest.TestCase):
@for_examples(test_cases)
def test_basic(self, x, y):
self.assertEqual(translate(x), y)
@for_examples(encoding)
def test_encoding(self, x, y):
self.assertEqual(translate(x), y)
@for_examples(special)
def test_special(self, x, y):
self.assertEqual(translate(x), y)
@for_examples(mix)
def test_mix(self, x, y):
self.assertEqual(translate(x), y)
if __name__ == '__main__':
unittest.main()
| 2.671875
| 3
|
func_saver/func_saver_helpers.py
|
sfinktah/func_saver
| 0
|
12782082
|
<gh_stars>0
import os, sys
import ida_auto
import ida_bytes
import ida_funcs
import ida_ida
import ida_name
import ida_search
import ida_ua
import idaapi
import idautils
import idc
import tempfile
import json
from string_between import string_between
from superhex import hex
try:
import __builtin__ as builtins
integer_types = (int, long)
string_types = (str, unicode)
string_type = unicode
byte_type = str
long_type = long
except:
import builtins
integer_types = (int,)
string_types = (str, bytes)
byte_type = bytes
string_type = str
long_type = int
long = int
if 'debug' not in globals():
debug = 0
class ChunkFailure(Exception):
pass
class AdvanceFailure(Exception):
pass
# An iterable object is an object that implements __iter__, which is expected
# to return an iterator object.
def isIterable(o): return hasattr(o, '__iter__') and not hasattr(o, 'ljust')
def _isAnyJmp_mnem(mnem): return mnem.startswith("j")
def _isCall_mnem(mnem): return mnem.startswith("call")
def _isConditionalJmp_mnem(mnem): return mnem.startswith("j") and not mnem.startswith("jmp")
def _isInterrupt_mnem(mnem): return mnem.startswith("int")
def _isJmpOrCall(mnem): return mnem.startswith(("j", "call"))
def _isJmp_mnem(mnem): return mnem.startswith("jmp")
def _isNop_mnem(mnem): return mnem.startswith("nop") or mnem.startswith("pop")
def _isPushPop_mnem(mnem): return mnem.startswith("push") or mnem.startswith("pop")
def _isRet_mnem(mnem): return mnem.startswith("ret")
def _isUnconditionalJmpOrCall_mnem(mnem): return isUnconditionalJmp(mnem) or isCall(mnem)
def _isUnconditionalJmp_mnem(mnem): return mnem.startswith("jmp")
def isInt(o): return isinstance(o, integer_types)
def _isUnlikely_mnem(mnem): return mnem in ["in", "out", "loop", "cdq",
"lodsq", "xlat", "clc", "adc", "stc", "iret", "stosd", "bswap",
"wait", "sbb", "pause", "retf", "retnf", "test", "scasb", "cmc",
"insb", "hlt", "setnle"]
def _isFlowEnd_mnem(mnem): return mnem in ('ret', 'retn', 'jmp', 'int', 'ud2', 'leave', 'iret')
def perform(fun, *args):
return fun(*args)
def preprocessIsX(fun, arg):
if not arg:
raise Exception("Invalid argument: {}".format(type(arg)))
if isinstance(arg, str):
return perform(fun, arg)
if isinstance(arg, integer_types):
mnem = GetInsnMnem(arg)
if not mnem:
return False
return perform(fun, mnem)
raise Exception("Unknown type: {}".format(type(arg)))
def isUnlikely(arg): return preprocessIsX(_isUnlikely_mnem, arg)
def isFlowEnd(arg): return preprocessIsX(_isFlowEnd_mnem, arg)
def isAnyJmp(arg): return preprocessIsX(_isAnyJmp_mnem, arg)
def isJmpOrCall(arg): return preprocessIsX(_isJmpOrCall, arg)
def isCall(arg): return preprocessIsX(_isCall_mnem, arg)
def isJmpOrObfuJmp(ea, patch=0):
if ea is None:
return ValueError("ea was None")
if isJmp(ea):
return True
if idc.get_wide_dword(ea) == 0x24648d48:
searchstr = "55 48 8d 2d ?? ?? ?? ?? 48 87 2c 24 c3"
found = ida_search.find_binary(ea, ea + div3(len(searchstr)), searchstr, 16, idc.SEARCH_CASE | idc.SEARCH_DOWN | idc.SEARCH_NOSHOW)
if found == ea:
return True
def isCallOrObfuCall(ea, patch=0):
if isCall(ea):
return True
if idc.get_wide_dword(ea) == 0x24648d48:
searchstr = '48 8d 64 24 f8 48 89 2c 24 48 8d 2d ?? ?? ?? ?? 48 87 2c 24 55 48 8d 2d ?? ?? ?? ?? 48 87 2c 24 c3'
found = ida_search.find_binary(ea, ea + div3(len(searchstr)), searchstr, 16, idc.SEARCH_CASE | idc.SEARCH_DOWN | idc.SEARCH_NOSHOW)
if found == ea:
if patch:
l = [0xe8] + list(struct.unpack('4B', struct.pack('I', idc.get_wide_dword(ea + 0x18) + 0x17))) + \
[0xe9] + list(struct.unpack('4B', struct.pack('I', idc.get_wide_dword(ea + 0x0c) + 0x06)))
PatchBytes(ea, l)
SetFuncEnd(ea, ea + 10)
if IsFuncHead(ea):
LabelAddressPlus(ea, 'StraightCall')
return True
def isCallOrObfuCallPatch(ea): return isCallOrObfuCall(ea, 1)
def isConditionalJmp(arg): return preprocessIsX(_isConditionalJmp_mnem, arg)
def isJmp(arg): return preprocessIsX(_isJmp_mnem, arg)
def isPushPop(arg): return preprocessIsX(_isPushPop_mnem, arg)
def isNop(ea):
insn = ida_ua.insn_t()
inslen = ida_ua.decode_insn(insn, get_ea_by_any(ea))
if inslen == 0:
return None
if insn.itype == idaapi.NN_nop:
return True
return idc.get_wide_word(ea) == 0x9066
def isUnconditionalJmp(arg): return preprocessIsX(_isUnconditionalJmp_mnem, arg)
def isOpaqueJmp(ea):
if isUnconditionalJmp(ea):
opType0 = idc.get_operand_type(ea, 0)
if opType0 in (idc.o_near, idc.o_mem):
return False
if opType0 == idc.o_reg:
disasm = idc.GetDisasm(ea)
if get_ea_by_any(string_between('; ', '', disasm)) != idc.BADADDR:
return False
return True
return False
def isUnconditionalJmpOrCall(arg): return preprocessIsX(_isUnconditionalJmpOrCall_mnem, arg)
def isInterrupt(arg): return preprocessIsX(_isInterrupt_mnem, arg)
def isRet(arg): return preprocessIsX(_isRet_mnem, arg)
def IsChunkHead(ea): return GetFuncStart(get_ea_by_any(ea)) != ea and GetChunkStart(get_ea_by_any(ea)) == ea
def IsFuncHead(ea): return GetFuncStart(get_ea_by_any(ea)) == ea # idaapi.is_func(idc.get_full_flags(ea))
IsChunkStart = IsChunkHead
IsFuncStart = IsFuncHead
def IsFunc_(ea): return idaapi.get_func(get_ea_by_any(ea)) is not None
def IsCode_(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.MS_CLS) == idc.FF_CODE
def IsData(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.MS_CLS) == idc.FF_DATA
def IsTail(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.MS_CLS) == idc.FF_TAIL
def IsUnknown(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.MS_CLS) == idc.FF_UNK
def IsHead(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_DATA) != 0
def IsFlow(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_FLOW) != 0
def IsExtra(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_LINE) != 0
def IsRef(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_REF) != 0
def HasName(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_NAME) != 0
def HasLabel(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_LABL) != 0
def HasUserName(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_ANYNAME) == idc.FF_NAME
def HasAnyName(ea): return (idc.get_full_flags(get_ea_by_any(ea)) & idc.FF_ANYNAME) != 0
def isString(o):
return isinstance(o, string_types)
def div3(n):
return (n + 1) // 3
def PatchBytes(ea, patch=None, comment=None, code=False):
"""
@param ea [optional]: address to patch (or ommit for screen_ea)
@param patch list|string|bytes: [0x66, 0x90] or "66 90" or b"\x66\x90" (py3)
@param comment [optional]: comment to place on first patched line
@returns int containing nbytes patched
Can be invoked as PatchBytes(ea, "66 90"), PatchBytes("66 90", ea),
or just PatchBytes("66 90").
"""
if 'record_patched_bytes' in globals():
globals()['record_patched_bytes'].append([ea, patch, comment])
if isinstance(ea, (list, bytearray) + string_types):
ea, patch = patch, ea
if ea is None:
ea = idc.get_screen_ea()
was_code = code or idc.is_code(idc.get_full_flags(ea))
if isinstance(patch, str):
# unicode for py3, bytes for py2 - but "default" form for
# passing "06 01 05" type arguments, which is all that counts.
# -- pass a `bytearray` if you want faster service :)
def int_as_byte(i, byte_len=0):
# empty byte container without using
# py3 `bytes` type
b = bytearray()
while byte_len > 0:
b.append(i & 255)
i >>= 8
byte_len -= 1
for b8bit in b:
yield b8bit;
if '?' not in patch:
# patch = hex_pattern_as_bytearray(patch.split(' '))
patch = bytearray().fromhex(patch)
else:
patch = [-1 if '?' in x else long_type(x, 16) for x in patch.split(' ')]
length = len(patch)
# deal with fixups
fx = idaapi.get_next_fixup_ea(ea - 1)
while fx < ea + length:
idaapi.del_fixup(fx)
fx = idaapi.get_next_fixup_ea(fx)
cstart, cend = idc.get_fchunk_attr(ea, idc.FUNCATTR_START), \
idc.get_fchunk_attr(ea, idc.FUNCATTR_END)
if cstart == idc.BADADDR: cstart = ea
if cend == idc.BADADDR: cend = 0
# disable automatic tracing and such to prevent function trucation
# with InfAttr(idc.INF_AF, lambda v: v & 0xdfe60008):
# old_auto = ida_auto.enable_auto(False)
# for _ea in range(ea, ea+length):
# MyMakeUnknown(_ea, 1)
# code_heads = genAsList( NotHeads(ea, ea + length + 16, IsCode) )
# [0x140a79dfd, 0x140a79e05, 0x140a79e09, 0x140a79e0a]
if isinstance(patch, bytearray):
# fast patch
idaapi.patch_bytes(ea, byte_type(patch))
else:
# slower patch to allow for unset values
[idaapi.patch_byte(ea + i, patch[i]) for i in range(length) if patch[i] != -1]
# if was_code:
# if debug: print("was_code")
# pos = ea + length
# while code_heads:
# if code_heads[0] < pos:
# code_heads = code_heads[1:]
# else:
# break
# if code_heads:
# next_code_head = code_heads[0]
# else:
# next_code_head = idc.next_head(pos)
# if next_code_head > pos:
# idaapi.patch_bytes(pos, byte_type(bytearray([0x90] * (next_code_head - pos))))
#
if debug: print("ida_auto.plan_and_wait({:#x}, {:#x})".format(ea, ea + length))
if was_code: EaseCode(ea, ea + length, noFlow=1, forceStart=1, noExcept=1)
ida_auto.plan_and_wait(ea, ea + length)
# EaseCode(ea, next_code_head)
# ida_auto.enable_auto(old_auto)
# this may seem superfluous, but it stops wierd things from happening
# if was_code:
# remain = len(patch)
# cpos = cstart
# length = idc.create_insn(cstart)
# while length > 0:
# remain -= length
# cpos += length
# if remain <= 0:
# break
# length = idc.create_insn(cpos)
# if was_code:
# idc.auto_wait()
# EaseCode(ea, end=ea+length, create=1)
# ida_auto.plan_and_wait(cstart, cend or (cstart + length))
# ensures the resultant patch stays in the chunk and as code
# if was_code:
# ida_auto.plan_and_wait(cstart, cend or (cstart + length))
# idc.auto_wait()
return
def MyGetMnem(ea):
if idc.get_wide_word(ea) == 0x9066:
return "nop"
mnem = idc.print_insn_mnem(ea)
return mnem
GetMnen = GetInsnMnem = MyGetMnem
def GetChunkStart(ea=None):
ea = eax(ea)
return idc.get_fchunk_attr(ea, idc.FUNCATTR_START)
def GetChunkEnd(ea=None):
ea = eax(ea)
return idc.get_fchunk_attr(ea, idc.FUNCATTR_END)
def GetChunkNumber(ea=None, funcea=None):
"""
Get number of chunk in function
@param ea: linear address
@return: chunk number
-1 - ea is not a chunk
0 - ea is in head chunk
1..n - tail chunk number
"""
ea = eax(ea)
if funcea is None:
owner = ida_funcs.get_func(ea)
# if debug: print(f"[idapy] owner = ida_funcs.get_func({ea:#x}):\n{pfh(owner)}")
elif isinstance(funcea, ida_funcs.func_t):
pass
else:
owner = ida_funcs.get_func(eax(funcea))
# if debug: print(f"[idapy] owner = ida_funcs.get_func({funcea:#x}):\n" + pfh(owner))
r = ida_funcs.get_func_chunknum(owner, ea)
# if debug: print(f"[idapy] ida_funcs.get_func_chunknum(owner, {ea:#x}): {r}")
return r
def SetFuncEnd(funcea, end):
# func = clone_items(ida_funcs.get_func(funcea))
# if func:
# idc.auto_wait()
if funcea == idc.BADADDR:
return False
if IsTail(end):
new_end = idc.get_item_head(end)
print("[warn] SetFuncEnd: end {:#x} is not an itemhead, did you mean {:#x}?".format(end, new_end))
globals()['warn'] += 1
# end = new_end
return False
ida_auto.plan_range(funcea, end)
if not ida_funcs.set_func_end(funcea, end):
print("ida_funcs.set_func_end(0x{:x}, 0x{:x})".format(funcea, end))
idc.auto_wait()
func_start = GetFuncStart(funcea)
func_end = GetFuncEnd(funcea)
cstart, cend = GetChunkStart(funcea), GetChunkEnd(funcea)
# dprint("[SetFuncENd] funcea, func_start, end, func_end")
print(
"[SetFuncEnd] funcea:{:x}, end:{:x}, func_start:{:x}, func_end:{:x}".format(funcea, end, func_start, func_end))
# if cstart != func_start:
# print("[warn] Not a head chunk, consider using SetChunkEnd | {:x}\u2013{:x}" \
# .format(
# # idc.get_func_name(func_start),
# # func_start, func_end,
# # idc.get_func_name(cstart),
# cstart, cend
# ))
# return SetChunkEnd(funcea, end)
if debug: print(
"func {}: {:x}\u2013{:x} chunk {}: {:x}\u2013{:x}".format(idc.get_name(func_start), func_start, func_end,
idc.get_name(cstart), cstart, cend))
if end == cend:
return True
if not ida_funcs.is_same_func(funcea, idc.prev_head(end)):
# if debug: print("[warn] set_func_end: end {:#x} or {:#x} should be part of function {:#x} or {:#x}".format(end, idc.prev_head(end), func_start, funcea))
print("[warn] chunk owner '{}' does not match func owner '{}' | {:x}\u2013{:x}" \
.format(
idc.get_func_name(funcea),
idc.get_func_name(idc.prev_head(end)),
cstart, cend,
))
globals()['warn'] += 1
# ptr = idc.prev_head(idc.get_item_head(end))
# ptr = idc.get_item_head(end-1)
ptr = end
happy = 0
heads = []
for r in range(16):
# print("[debug] ptr is {:#x}".format(ptr))
if IsFuncHead(ptr):
heads.append(ptr)
# print("[debug] adding head {:#x}".format(ptr))
# else:
# print("[debug] not head {:#x}".format(ptr))
ptr = idc.prev_head(ptr)
if ida_funcs.is_same_func(funcea, ptr):
happy = 1
break
if happy:
if heads:
print("[info] deleting func_heads: {}".format(hex(heads)))
for head in heads:
idc.del_func(head)
ce = GetChunkEnd(ptr)
idc.del_items(ce, ida_bytes.DELIT_NOTRUNC, end - ce)
print("idc.append_func_tail(0x{:x}, 0x{:x}, 0x{:x})".format(ptr, ce, end))
if not idc.append_func_tail(ptr, ce, end):
print("[warn] idc.append_func_tail({:#x}, {:#x}, {:#x}) failed".format(ptr, ce, end))
globals()['warn'] += 1
else:
print("[info] idc.append_func_tail({:#x}, {:#x}, {:#x}) ok".format(ptr, ce, end))
else:
if idc.set_func_end(funcea, end):
print("[info] set_func_end({:#x}, {:#x})".format(funcea, end))
else:
print("[warn] set_func_end({:#x}, {:#x}) failed".format(funcea, end))
globals()['warn'] += 1
result = GetChunkEnd(funcea)
if result != end:
print("[warn] SetFuncEnd: GetChunkEnd({:#x}) == {:#x}".format(funcea, result))
globals()['warn'] += 1
# raise Exception("Terrible")
return result == end
def IsHeadChunk(ea):
return GetChunkNumber(ea) == 0
def IsChunk(ea=None, owner=None):
"""
Is address in a tail chunk
@param ea: linear address
@return: 1-yes, 0-no
"""
# if not isInt(ea) and not isString(ea):
# print("[IsChunk] typeof ea: {}".format(type(ea)))
if isinstance(ea, ida_funcs.func_t):
return ea.flags & ida_funcs.FUNC_TAIL
ea = eax(ea)
if GetChunkNumber(ea) == 0:
return False
if GetChunkOwners(ea, includeOwner=1):
return True
return False
def SetFuncOrChunkEnd(ea, value):
if IsHeadChunk(ea):
return SetFuncEnd(ea, value)
elif IsChunk(ea, value):
return SetChunkEnd(ea, value)
else:
print("[SetFuncOrChunkEnd] {:x} Not a chunk/func head)".format(ea))
return False
def IsChunked(ea):
# return idc.get_fchunk_attr(address, FUNCATTR_START) < BADADDR
return len(list(idautils.Chunks(ea))) > 1
def GetChunk(ea=None):
"""
GetChunk
@param ea: linear address
"""
ea = eax(ea)
func = ida_funcs.get_fchunk(ea)
# if debug: print("[idapy] ida_funcs.get_fchunk(0x{:x}):\n{}".format(ea, pfh(func)))
return func
def IsChunked(ea):
# return idc.get_fchunk_attr(address, FUNCATTR_START) < BADADDR
return len(list(idautils.Chunks(ea))) > 1
def SetChunkEnd(ea, value):
# idc.set_fchunk_attr(ea, FUNCATTR_END, value)
if not IsChunked(ea):
raise TypeError("0x%x is not a chunk" % ea)
if GetChunkEnd(ea) == value:
return True
# get_fchunk(ea) # will return chunk ptr, to any function
tail = GetChunk(ea)
if tail.flags & idc.FUNC_TAIL == 0:
raise ChunkFailure("SetChunkEnd: {:x} was a funchead".format(ea))
# get_func_chunknum(GetFunc(ea), ea) -> int
return ida_funcs.set_func_end(tail.start_ea, value)
# return SetFuncEnd(ea, value)
def GetFuncEnd(ea=None):
ea = eax(ea)
"""
Determine a new function boundaries
@param ea: address inside the new function
@return: if a function already exists, then return its end address.
If a function end cannot be determined, the return BADADDR
otherwise return the end address of the new function
"""
# return idc.find_func_end(ea)
func = ida_funcs.get_func(ea)
if not func:
return idc.BADADDR
return func.end_ea
def MyMakeUnknown(ea, nbytes, flags=ida_bytes.DELIT_NOTRUNC):
r"""
@param ea: any address within the first item to delete (C++: ea_t)
@param nbytes: number of bytes in the range to be undefined (C++: asize_t)
@param flags: combination of: DELIT_EXPAND DELIT_DELNAMES
ida_bytes.DELIT_NOTRUNC DELIT_NOUNAME
DELIT_NOCMT DELIT_KEEPFUNC
@param may_destroy: optional callback invoked before deleting a head item.
if callback returns false then deletion and operation
fail. (C++: may_destroy_cb_t *)
@return: true on sucessful operation, otherwise false
Convert item (instruction/data) to unexplored bytes. The whole item
(including the head and tail bytes) will be destroyed.
"""
# check if caller has invoked with (start_ea, end_ea)
if nbytes > ea:
nbytes = nbytes - ea
result = idaapi.del_items(ea, flags, nbytes)
if not result:
return result
# check for fixups that must be removed
# https://reverseengineering.stackexchange.com/questions/27339/
fx = idaapi.get_next_fixup_ea(ea - 1)
while fx < ea + nbytes:
idaapi.del_fixup(fx)
fx = idaapi.get_next_fixup_ea(fx)
return result
def GetFuncStart(ea=None):
ea = eax(ea)
"""
@param ea: address inside the new function
"""
func = ida_funcs.get_func(ea)
if not func:
return idc.BADADDR
return func.start_ea
def json_load(_fn):
with open(_fn, 'r') as f:
return json_load_byteified(f)
def _byteify(data, ignore_dicts=False):
if isinstance(data, str):
return data
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.items() # changed to .items() for python 2.7/3
}
# python 3 compatible duck-typing
# if this is a unicode string, return its string representation
if str(type(data)) == "<type 'unicode'>":
return data.encode('utf-8')
# if it's anything else, return it in its original form
return data
# https://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-of-unicode-from-json/33571117#33571117
def json_load_byteified(file_handle):
return _byteify(
json.load(file_handle, object_hook=_byteify),
ignore_dicts=True
)
def json_save_safe(dst, json_object):
dirname, basename = os.path.split(dst)
try:
with tempfile.NamedTemporaryFile(prefix=basename, mode='w', dir=dirname, delete=False) as filename:
filename.file.write(json.dumps(json_object))
filename.file.close()
print("replace({}, {})".format(filename.name, dst))
print("file_exists", os.path.exists(filename.name))
os.replace(filename.name, dst)
if os.path.exists(filename.name):
os.unlink(filename.name)
except IOError:
print("file not writable or some such")
except Exception as e:
print("**EXCEPTION** {}".format(e))
def get_ea_by_any(val, d=object):
"""
returns the address of a val (and if address is
a number, looks up the val first).
an easy way to accept either address or val as input.
"""
if isinstance(val, list):
return [get_ea_by_any(x) for x in val]
if isinstance(val, str):
r = idaapi.str2ea(val)
if r and r != idc.BADADDR:
return r
match = re.match(r'(sub|off|loc|byte|word|dword|qword|nullsub|locret)_([0-9A-F]+)$', val)
if match:
return long_type(match.group(2), 16)
return 0
if isinstance(val, idaapi.vdui_t):
val = val.cfunc
if val is None:
return idc.get_screen_ea() if d == object else d
if isinstance(val, (int, long)):
return val
try:
for attr_name in ['start_ea', 'ea', 'entry_ea', 'start', 'min_ea']:
if hasattr(val, attr_name):
return getattr(val, attr_name)
except AttributeError:
pass
raise ValueError("Don't know how to convert {} '{}' to address".format(type(val), val))
def eax(*args):
return get_ea_by_any(*args)
def MakeSigned(number, size = 32):
number = number & (1<<size) - 1
return number if number < 1<<size - 1 else - (1<<size) - (~number + 1)
def MyGetInstructionLength(*args):
if len(args) == 1:
if not isInt(args[0]):
print("return_unless: isInt(args[0])")
return
ea = args[0]
insn = ida_ua.insn_t()
inslen = ida_ua.decode_insn(insn, ea)
if inslen:
return inslen
else:
return ida_ua.decode_insn(*args)
GetInsnLen = InsnLen = MyGetInstructionLength
def IsValidEA(ea=None):
"""
IsValidEA
@param ea: linear address
"""
ea = eax(ea)
return ida_ida.cvar.inf.min_ea <= ea < ida_ida.cvar.inf.max_ea
def ValidateEA(ea=None):
if not IsValidEA(ea):
raise AdvanceFailure("Invalid Address 0x{:x}".format(ea))
def RemoveChunk(*args):
"""
@brief RemoveChunk
Removes a single chunk from a function.
@param [optional] functionAddress: any address inside the function and chunk
@param chunkAddress: any address inside the chunk
"""
from inspect import getframeinfo, currentframe, getdoc
if len(args) == 2:
funcStart = args[0]
chunkAddr = args[1]
elif len(args) == 1:
chunkAddr = args[0]
funcStart = GetFuncStart(chunkAddr)
if funcStart == idc.BADADDR:
print("Couldn't find function for chunk at {:x}".format(chunkAddr))
return
else:
# https://stackoverflow.com/questions/8822701/how-to-print-docstring-of-python-function-from-inside-the-function-itself
print(getdoc(globals()[getframeinfo(currentframe()).function]))
return idc.remove_fchunk(funcStart, chunkAddr)
def UnpatchUntilChunk(ea, _range=1024):
if ea is idc.BADADDR:
return
nextChunkStart = 0
ourFunc = idc.BADADDR
if IsFunc_(ea):
ourFunc = GetFuncStart(ea)
# print("[info] ourFunc is {:x}".format(ourFunc))
# print("[info] checking range ... {:#x}".format(ea))
for r in range(_range):
fs = GetFuncStart(ea + r)
if fs != idc.BADADDR and fs != ourFunc:
nextChunkStart = ea + r
# print("[info] stopping at {:x} because GetFuncStart is {:x}".format(nextChunkStart, GetFuncStart(ea+r)))
# print("[info] checking for patches {:#x} - {:#x}".format(ea, nextChunkStart))
break
if nextChunkStart > ea:
return UnPatch(ea, nextChunkStart)
def GetTarget(ea, flow=0, calls=1, conditionals=1, operand=0, failnone=False):
ea = eax(ea)
if isJmpOrObfuJmp(ea) and not isJmp(ea):
return MakeSigned(idc.get_wide_dword(ea + 4)) + ea + 7
mnem = idc.print_insn_mnem(ea)
disasm = idc.GetDisasm(ea)
if not mnem:
print("{:x} couldn't get mnem from '{}'".format(ea, disasm))
return None if failnone else False # idc.BADADDR
if mnem == "jmp" or (calls and mnem == "call") or (conditionals and mnem[0] == "j"):
opType = idc.get_operand_type(ea, operand)
if opType in (idc.o_near, idc.o_mem):
return idc.get_operand_value(ea, operand)
if opType == idc.o_reg:
# 'call rax ; j_smth_metric_tamper'
s = string_between('; ', '', disasm).strip()
if s:
result = eax(s)
if ida_ida.cvar.inf.min_ea <= result < ida_ida.cvar.inf.max_ea:
return result
# print("[warn] can't follow opType {} from {:x}".format(opType, ea))
if flow:
if idc.next_head(ea) == ea + idc.get_item_size(ea) and idc.is_flow(idc.get_full_flags(idc.next_head(ea))):
return idc.next_head(ea)
else:
if debug: print("{:x} no flow".format(ea))
return None if failnone else idc.BADADDR
def is_sequence(arg):
""" https://stackoverflow.com/questions/1835018/how-to-check-if-an-object-is-a-list-or-tuple-but-not-string/1835259#1835259
"""
return (not hasattr(arg, "strip") and
hasattr(arg, "__getitem__") or
hasattr(arg, "__iter__"))
patchedBytes = []
def RecordPatchedByte(ea, fpos, org_val, patch_val):
# print("%x, %x, %x, %x" % (ea, fpos, org_val, patch_val))
patchedBytes.append([ea - 0x140000000, patch_val])
# idaapi.patch_byte(ea, org_value)
def RecordPatches1(ranges):
global patchedBytes
del patchedBytes[:]
# patchedBytes=[]
# for i in ranges: idaapi.visit_patched_bytes(i[0] + 0x140000000, i[1] + i[0] + 0x140000000, RecordPatchedByte)
if ranges:
for start, end in ranges:
idaapi.visit_patched_bytes(start, end, RecordPatchedByte)
else:
idaapi.visit_patched_bytes(0, idaapi.BADADDR, RecordPatchedByte)
n = 0
c = dict()
lastEa = 0
startEa = 0
for i in patchedBytes:
a, b = i
if a == lastEa + 1:
c[startEa].append(b)
else:
startEa = a
c[a] = [b]
lastEa = a
return c
def forceCode(start, end=None, trim=False, delay=None, origin=None):
log = []
ea = eax(start)
ValidateEA(ea)
log.append("start: {:x}".format(ea))
if ea == idc.BADADDR or not ea:
return 0, 0, 0, 0
insn_len = GetInsnLen(ea) or 15
end = end or ea + insn_len
# print("end, start: {}, {}".format(end, start))
if end < idaapi.cvar.inf.minEA and end < start:
end = start + end
log.append("end: {:x}".format(end))
# if ea == forceCode.last:
# if _.all(forceCode.last, lambda x, *a: x == ea):
# raise RuntimeError("Repeated calls for forceCode for same address")
# forceCode.last.append(ea)
if debug:
# dprint("[forceCode] start, end, trim, delay")
print("[forceCode] start:{:x}, end:{:x}, trim:{}, delay:{}".format(start, end, trim, delay))
last_jmp_or_ret = 0
last_addr = 0
trimmed_end = 0
happy = 0
# dprint("[forceCode] start")
# print("[forceCode] start:{:x}".format(start))
func_end = GetFuncEnd(start)
# dprint("[forceCode] func_end")
# print("[forceCode] func_end:{:x}".format(func_end))
func_start = GetFuncStart(start)
chunk_end = GetChunkEnd(start)
chunk_start = GetChunkStart(start)
if debug:
print("func_start, func_end", hex(func_start), hex(func_end))
print("chunk_start, chunk_end", hex(func_start), hex(func_end))
# idc.del_items(start, idc.DELIT_EXPAND, end - start)
if GetInsnLen(ea) == 2 and GetInsnMnem(ea) == 'push':
log.append("{:x} insnlen == 2".format(ea))
old_type = idc.get_type(ea + 1) if not idc.get_type(ea) else None
old_name = idc.get_name(ea + 1) if HasUserName(ea + 1) and not HasUserName(ea) else None
idc.del_items(ea, idc.DELIT_DELNAMES, 2)
size = idc.create_insn(ea)
if size == 2:
if old_name:
LabelAddressPlus(ea, old_name)
if old_type:
idc.SetType(ea, old_type)
ea += 2
while ea < end:
log.append("{:x} {}".format(ea, idc.GetDisasm(ea)))
happy = 0
last_addr = ea
if idc.is_tail(idc.get_full_flags(ea)):
head = idc.get_item_head(ea)
if head == ea:
print("[warn] item_head == ea {:x} | fn: {:x} chunk: {:x}\u2013{:x}".format(ea, start, start, end))
# if not idc.del_items(ea, 0, 1):
if not idc.MakeUnknown(ea, 1, 0):
print("[warn] couldn't del item at {:x} | fn: {:x} chunk: {:x}\u2013{:x}".format(ea, start, start, end))
else:
if debug: print(
"[debug] deleted item at {:x} | fn: {:x} chunk: {:x}\u2013{:x}".format(ea, start, start, end))
if idc.is_code(idc.get_full_flags(ea)):
# seems to be that deleting the code and remaking it is the only way to ensure everything works ok
# .. and it seems that deleting and remaking triggered stupid stupid things like the generation of nullsubs out of `retn` statements
# .. but i think we will cheat and match the instruction against GetFuncEnd, since undefining the end of a chunk is what shrinks it.
insn_len = idc.get_item_size(ea)
if debug: print(
"[info] {:x} code exists for {} bytes | {}".format(ea, insn_len, idc.generate_disasm_line(ea, 0)))
ea += insn_len
happy = 1
if not happy:
insn_len = idc.create_insn(ea)
if debug: print(
"[info] idc.create_insn len: {} | fn: {:x} chunk: {:x}\u2013{:x}".format(insn_len, ea, start, end))
if not insn_len:
# this
MyMakeUnknown(ea + 1, GetInsnLen(ea) or 1, idc.DELIT_DELNAMES | ida_bytes.DELIT_NOTRUNC)
# or this (same result)
for r in range(ea + 1, GetInsnLen(ea) or 1):
if HasAnyName(r):
LabelAddressPlus(r, '')
if debug: print("[info] removing label at {:x}".format(r))
insn_len = idc.create_insn(ea)
if debug: print(
"[info] idc.create_insn len: {} | fn: {:x} chunk: {:x}\u2013{:x}".format(insn_len, ea, start, end))
# restore function end if we just removed the last insn in a chunk
if insn_len and insn_len + ea == chunk_end:
if debug: print(
"[info] restoring chunk_end to {:x} | fn: {:x} chunk: {:x}\u2013{:x}".format(chunk_end, chunk_start,
start, end))
SetFuncEnd(chunk_start, chunk_end)
if not insn_len:
# record existing code heads
existing_code = [x for x in range(ea, ea + 15) if IsCode_(x)]
idc.del_items(ea, 0, 15)
insn_len = idc.create_insn(ea)
if not insn_len and existing_code:
[idc.create_insn(x) for x in existing_code]
if not insn_len:
trimmed_end = last_jmp_or_ret + idc.get_item_size(
last_jmp_or_ret) if last_jmp_or_ret else last_addr or ea
if not trim:
print("[warn] couldn't create instruction at {:x}".format(ea))
print("\n".join(log))
UnpatchUntilChunk(ea)
if idc.create_insn(ea):
print("[info] unpatching {:x} seemed to help".format(ea))
else:
print(
"[warn] couldn't create instruction at {:x}, shortening chunk to {:x} | fn: {:x} chunk: {:x}\u2013{:x}".format(
ea, trimmed_end, ea, start, end))
if idc.get_func_name(start):
if not idc.set_func_end(start, trimmed_end):
print(
"[warn] couldn't set func end at {:x} or {:x} or {:x} or {:x} | fn: {:x} chunk: {:x}\u2013{:x}".format(
end, last_jmp_or_ret, last_addr, ea, start, start, end))
idc.del_items(end, 0, end - trimmed_end)
else:
happy = 1
ea += insn_len
if not happy:
return ea - start, start, end, trimmed_end
mnem = idc.print_insn_mnem(last_addr).split(' ', 2)[0]
if mnem in ('jmp', 'ret', 'retn', 'int'):
last_jmp_or_ret = last_addr
if func_start == start:
idc.add_func(func_start)
return ea - start, start, end, trimmed_end
def GetChunkOwner(ea=None):
"""
GetChunkOwner
@param ea: linear address
"""
ea = eax(ea)
r = idc.get_fchunk_attr(ea, idc.FUNCATTR_OWNER)
# if debug: print("[idapy] idc.get_fchunk_attr(0x{:x}, FUNCATTR_OWNER): {:x}".format(ea, r))
return r
def GetChunkOwners(ea=None, includeOwner=False):
"""
GetChunkOwners
@param ea: linear address
"""
ea = eax(ea)
# https://www.hex-rays.com/products/ida/support/sdkdoc/classfunc__parent__iterator__t.html
# func_parent_iterator_t fpi(fnt);
# for ( bool ok=fpi.first(); ok; ok=fpi.next() )
# ea_t parent = fpi.parent();
# func = GetChunk(ea)
func = ida_funcs.get_fchunk(ea)
# if debug: print("[idapy] ida_funcs.get_fchunk(0x{:x}):\n{}".format(ea, pfh(func)))
if not func:
return []
# func = ida_funcs.func_t(ea)
it = ida_funcs.func_parent_iterator_t(func)
ok = it.first()
if not ok:
return [func.start_ea]
owners = []
while ok:
parent = it.parent()
owners.append(parent)
ok = it.next()
if includeOwner:
r = idc.get_fchunk_attr(ea, idc.FUNCATTR_OWNER)
if r != idc.BADADDR:
if r not in owners:
# print("[GetChunkOwners] FUNCATTR_OWNER: {:x} not listed in owners".format(r))
# owners.append(r)
pass
for owner in owners[:]:
if owner & 0xff00000000000000:
print("[GetChunkOwners] removing BADADDR: {:x}".format(owner))
owners.remove(owner)
if not idaapi.is_func(idc.get_full_flags(owner)):
if idaapi.get_func(owner) is None:
print("[GetChunkOwners] stated owner {:x} of chunk {:x} is not a function".format(owner, ea))
else:
print("[GetChunkOwners] stated owner {:x} of chunk {:x} is not the function head".format(owner, ea))
return owners
def EaseCode(ea=None, end=None, forceStart=False, forceStartIfHead=False, noExcept=False, noFlow=False, unpatch=False, ignoreMnem=[], create=None, fixChunks=False, origin=None):
"""
EaseCode
@param ea: linear address
"""
ea = eax(ea)
if not (ida_ida.cvar.inf.min_ea <= ea < ida_ida.cvar.inf.max_ea):
raise AdvanceFailure("Invalid Address 0x{:x}".format(ea))
if debug:
print("[EaseCode] {:x}".format(ea))
stk = []
for i in range(len(inspect.stack()) - 1, 0, -1):
stk.append(inspect.stack()[i][3])
print((" -> ".join(stk)))
# d = ["{:x} {}".format(x, idc.generate_disasm_line(x, 0)) for x in range(ea, end or (ea+0x1000)) if not IsTail(x)]
# if debug:
# print("[EaseCode] pre-disasm\n{}".format("\n".join(d)))
if not IsCode_(ea):
if forceStartIfHead and IsHead(ea):
r = forceCode(ea, GetInsnLen(ea), origin=origin)
if debug: print("forceStartIfHead: {:x} {}".format(ea, diida(ea)))
elif forceStart:
r = forceCode(ea, GetInsnLen(ea), origin=origin)
if debug: print("forceStart: {:x} {}".format(ea, diida(ea)))
elif not idc.create_insn(ea):
if noExcept:
return AdvanceFailure("0x{:x} EaseCode must start at valid code head".format(ea))
else:
raise AdvanceFailure("0x{:x} EaseCode must start at valid code head".format(ea))
ida_auto.revert_ida_decisions(ea, GetInsnLen(ea))
ida_auto.auto_recreate_insn(ea)
start_ea = ea
last_ea = ea
at_end = False
at_flow_end = False
unhandled = code = tail = unknown = flow = False
owners = GetChunkOwners(ea, includeOwner=1)
_start = True
_fixChunk = False
while ea != idc.BADADDR and (end is None or ea < end):
if _start:
_start = False
else:
last_ea = ea
ea = ea + insn_len
if last_ea == start_ea and at_flow_end:
if debug:
print("[EaseCode] ignoring at_flow_end during second loop")
at_flow_end = False
if at_end or at_flow_end:
break
if unpatch:
UnPatch(ea, ea + 15)
idc.GetDisasm(ea)
insn_len = GetInsnLen(ea)
if not insn_len:
if noExcept:
return AdvanceFailure("0x{:x} EaseCode couldn't advance past 0x{:x} ".format(start_ea, ea))
raise AdvanceFailure("0x{:x} EaseCode couldn't advance past 0x{:x} ".format(start_ea, ea))
_owners = GetChunkOwners(ea, includeOwner=1)
if _owners:
if _owners != owners:
if debug: print("[EaseCode] _owners != owners; break")
break
else:
owners = _owners
unhandled = code = tail = unknown = flow = False
next_head = idc.next_head(ea)
mnem = ''
if IsCode_(ea):
# if debug: print("0x{:x} IsCode".format(ea))
code = True
mnem = idc.print_insn_mnem(ea)
if mnem.startswith(('ret', 'jmp', 'int', 'ud2')):
at_end = True
if create: # or mnem.startswith(('ret', 'jmp', 'int', 'ud2', 'leave')):
# raise RuntimeError("don't")
ida_auto.revert_ida_decisions(ea, GetInsnLen(ea))
ida_auto.auto_recreate_insn(ea)
idc.auto_wait()
else:
if IsTail(ea):
# if debug: print("0x{:x} IsTail".format(ea))
tail = True
if IsUnknown(ea) or IsData(ea):
# if debug: print("0x{:x} IsUnknown".format(ea))
unknown = True
if not (code or tail or unknown):
if debug: print("0x{:x} unhandled flags".format(ea))
if debug: debug_fflags(ea)
if IsFlow(ea):
if debug: print("0x{:x} IsFlow ({}) +{}".format(ea, mnem, insn_len))
flow = True
elif ea != start_ea:
prev_ea = last_ea
prev_mnem = idc.print_insn_mnem(prev_ea)
if prev_mnem not in ('ret', 'retn', 'jmp', 'int', 'ud2', 'leave', 'iret', 'retf'):
if prev_mnem != 'call' or ida_funcs.func_does_return(GetTarget(prev_ea)):
print("{:x} Flow ended {:x} with '{}' (fixing)".format(ea, prev_ea, prev_mnem))
if fixChunks:
_fixChunk = True
ida_auto.auto_recreate_insn(prev_ea)
ida_auto.auto_wait()
# ea1 = prev_ea
# ea2 = idc.next_head(ea)
# ida_auto.auto_apply_tail(ea1, ea2)
# print("ida_auto results: {}".format([
# ida_auto.revert_ida_decisions(ea1, ea2), #
# [ida_auto.auto_recreate_insn(x) for x in Heads(ea1, ea2)],
# [ida_auto.plan_ea(x) for x in Heads(ea1, ea2)], #
# ida_auto.auto_wait_range(ea1, ea2),
# ida_auto.plan_and_wait(ea1, ea2),
# ida_auto.plan_and_wait(ea1, ea2, True),
# ida_auto.plan_range(ea1, ea2), #
# ida_auto.auto_wait()
# ]))
# idaapi.del_items(prev_ea, ida_bytes.DELIT_NOTRUC, ea - prev_ea)
# if not idc.create_insn(prev_ea):
# print("[EaseCode] couldn't recreate insn at {:x}".format(prev_ea))
# ida_auto.auto_recreate_insn(idc.prev_head(prev_ea))
# idc.auto_wait()
GetDisasm(prev_ea)
flow = True
# TODO: amalgamate these two, they're basically the same
if code and isFlowEnd(ea):
if debug: print("0x{:x} code and isFlowEnd; at_end".format(ea))
ida_auto.auto_recreate_insn(ea)
at_flow_end = True
elif not flow: # or isFlowEnd(ea):
if not noFlow and mnem not in ignoreMnem:
if debug: print("0x{:x} no flow; at_end".format(ea))
at_flow_end = True
if tail:
if debug: print("0x{:x} tail; break".format(ea))
break
if unknown:
# dprint("[debug] next_head, ea, insn_len")
if debug: print("[debug] next_head:{:x}, ea:{:x}, insn_len:{:x}".format(next_head, ea, insn_len))
if next_head == ea + insn_len:
pass
# print("0x{:x} next_head == ea + insn_len".format(ea))
elif next_head > ea + insn_len:
pass
# print("0x{:x} next_head > ea + insn_len".format(ea))
else:
# print("0x{:x} next_head < ea + insn_len; forcing space to instruction".format(ea))
idaapi.del_items(ea, ida_bytes.DELIT_NOTRUNC, insn_len)
if not idc.create_insn(ea):
if debug: print("0x{:x} couldn't idc.make_insn(0x{:x}); break".format(ea, ea))
break
if unpatch:
UnPatch(start_ea, ea)
# ida_auto.plan_and_wait(start_ea, ea)
# ida_auto.plan_range(start_ea, ea)
# idc.auto_wait()
if _fixChunk and GetChunkEnd(start_ea) < ea:
SetFuncOrChunkEnd(start_ea, ea)
return ea
def UnPatch(start, end = None):
if end is None:
if is_sequence(start):
try:
end = start[1]
if end is not None:
return UnPatch(start[0], end)
except TypeError:
return 0
except ValueError:
return 0
end = InsnLen(start) + start
if end < start and end < 16364:
end = start + end
count = 0
if isinstance(start, (int, long)) and isinstance(end, (int, long)):
while start < end:
if idc.get_cmt(start, 0):
idc.set_cmt(start, '', 0)
if ida_bytes.revert_byte(start):
count += 1
start += 1
return count
print("Unexpected type: %s" + type(start))
def LabelAddressPlus(ea, name, force=False, append_once=False, unnamed=False, nousername=False, named=False, throw=False):
"""
Label an address with name (forced) or an alternative_01
:param ea: address
:param name: desired name
:param force: force name (displace existing name)
:param append_once: append `name` if not already ending with `name`
:param named: [str, callable(addr, name)] name for things with existing usernames
:return: success as bool
"""
def ThrowOnFailure(result):
if not result and throw:
raise RuntimeError("Couldn't label address {:x} with \"{}\"".format(ea, name))
return result
def MakeUniqueLabel(name, ea=idc.BADADDR):
fnLoc = idc.get_name_ea_simple(name)
if fnLoc == idc.BADADDR or fnLoc == ea:
return name
fmt = "%s_%%i" % name
for i in range(100000):
tmpName = fmt % i
fnLoc = idc.get_name_ea_simple(tmpName)
if fnLoc == idc.BADADDR or fnLoc == ea:
return tmpName
return ""
if nousername:
unnamed = nousername
if ea < idc.BADADDR:
if HasUserName(ea):
if named:
if callable(named):
_name = idc.get_name(ea)
_name = named(ea, _name, name)
else:
name = named
elif unnamed:
return
fnName = idc.get_name(ea)
if append_once:
if not fnName.endswith(name):
name += fnName
else:
return ThrowOnFailure(False)
fnLoc = idc.get_name_ea_simple(name)
if fnLoc == idc.BADADDR:
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
elif fnLoc == ea:
return ThrowOnFailure(True)
else:
if force:
idc.set_name(fnLoc, "", idc.SN_AUTO | idc.SN_NOWARN)
idc.Wait()
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
else:
name = MakeUniqueLabel(name, ea)
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
else:
print("0x0%0x: Couldn't label %s, BADADDR" % (ea, name))
return False
def SkipJumps(ea, name=None, until=None, untilInclusive=0, notPatched=False, skipShort=False, skipNops=False,
iteratee=None, apply=False, *args, **kwargs):
if isIterable(ea):
return [SkipJumps(x, name=name, until=until, untilInclusive=untilInclusive, notPatched=notPatched,
skipShort=skipShort, skipNops=skipNops, iteratee=iteratee, apply=apply, *args, **kwargs)
for x in ea]
if not isInt(ea):
print("ea was not int: {}".format(type(ea)))
# apply = 0
target = ea
count = 0
jumps = [ea]
targets = [ea]
if callable(iteratee):
iteratee(ea, *args, **kwargs)
while target != idc.BADADDR:
if until:
endix = max(0, len(targets) - 2 + untilInclusive)
# dprint("[debug] endix")
# print("[debug] endix:{}".format(endix))
if isInt(until):
if target == until:
return targets[endix]
elif callable(until):
r = until(target)
if r:
if r < 0:
return r
return targets[endix]
# print(("0x%x: target: 0x%x: %s" % (ea, target, dii(target))))
insn = idautils.DecodeInstruction(target)
if not insn:
print("Couldn't find insn at {:x}".format(target))
return target
_tgt = GetTarget(target)
if _tgt == False:
print("Invalid _tgt: {:x}, {}".format(_tgt, hex(jumps)))
if not IsValidEA(_tgt):
if _tgt != idc.BADADDR:
print("Invalid _tgt: {:x}".format(_tgt))
# UnPatch(target, InsnLen(target))
ida_auto.auto_recreate_insn(target)
idc.auto_wait()
_tgt = GetTarget(target)
if count == 0 and insn.itype == idaapi.NN_call and SkipJumps(_tgt) != _tgt:
newTarget = SkipJumps(_tgt)
return newTarget
if insn.itype == idaapi.NN_jmp and (not skipShort or GetInsnLen(target) > 2):
if insn.Op1.type in (idc.o_mem, idc.o_near):
if notPatched:
if ida_bytes.get_original_byte(target) != idc.get_wide_byte(target):
break
newTarget = insn.Op1.addr
if newTarget and newTarget != idc.BADADDR:
count += 1
jumps.append(target)
if name:
LabelAddressPlus(newTarget, name, *args, **kwargs)
while skipNops and isNop(newTarget):
newTarget = newTarget + GetInsnLen(newTarget)
if not IsCode_(newTarget):
print("SkipJumps: Skipped NOPs right into a non-instruction: {:x} jumps".format(newTarget))
return -1
if iteratee:
rv = iteratee(newTarget, *args, **kwargs)
if rv and isInt(rv) and rv > 1:
newTarget = rv
targets.append(newTarget)
target = newTarget
continue
break
return target
def GetFuncName(ea, end = None):
if isinstance(ea, list):
return [GetFuncName(x) for x in ea]
if end is None:
if ea is None:
ea = idc.get_screen_ea()
if isInt(ea):
r = idc.get_func_name(ea)
# if debug: print("[idapy] idc.get_func_name(0x{:x}): {}".format(ea, r))
return r
if isInt(end):
if end > ea:
fnNames = set()
heads = idautils.Heads(ea, end)
if heads:
for head in idautils.Heads(ea, end):
fnNames.add(GetFuncName(head))
if '' in fnNames:
fnNames.remove('')
return fnNames
return ''
def GetAllNames(ea):
"""GetAllNames.
Args:
ea:
"""
fnName = GetFuncName(ea)
locName = idc.get_name(ea, ida_name.GN_VISIBLE)
if not fnName: return locName
if not locName: return fnName
if fnName == locName: return fnName
return "%s %s" % (fnName, locName)
def GetChunkAddressesZeroOffset(ea = 0):
"""GetChunkAddresses.
Args:
ea:
"""
chunks = idautils.Chunks(ea)
return [[x[0] - 0x140000000, x[1] - x[0]] for x in chunks]
| 2.421875
| 2
|
yaetos/etl_utils.py
|
mcarbonell-paymefy/yaetos
| 0
|
12782083
|
<reponame>mcarbonell-paymefy/yaetos<filename>yaetos/etl_utils.py
"""
Helper functions. Setup to run locally and on cluster.
"""
# TODO:
# - add linter
# - finish _metadata.txt file content.
# - get inputs and output by commandline (with all related params used in yml, like 'type', 'incr'...).
# - better check that db copy is in sync with S3.
# - way to run all jobs from 1 cmd line.
import sys
import inspect
import yaml
from datetime import datetime
import os
import boto3
import argparse
from time import time
from io import StringIO
import networkx as nx
import random
import pandas as pd
import os
import sys
from configparser import ConfigParser
import numpy as np
#from sklearn.externals import joblib # TODO: re-enable later after fixing lib versions.
import gc
from pprint import pformat
import smtplib, ssl
from pyspark.sql.window import Window
from pyspark.sql import functions as F
from pyspark.sql.types import StructType
from yaetos.git_utils import Git_Config_Manager
from dateutil.relativedelta import relativedelta
from yaetos.logger import setup_logging
logger = setup_logging('Job')
# User settable params below can be changed from command line or yml or job inputs.
JOBS_METADATA_FILE = 'conf/jobs_metadata.yml'
AWS_CONFIG_FILE = 'conf/aws_config.cfg'
CONNECTION_FILE = 'conf/connections.cfg'
CLUSTER_APP_FOLDER = '/home/hadoop/app/'
LOCAL_APP_FOLDER = os.environ.get('PYSPARK_AWS_ETL_HOME', '') # PYSPARK_AWS_ETL_HOME set to end with '/'
LOCAL_JOB_REPO_FOLDER = os.environ.get('PYSPARK_AWS_ETL_JOBS_HOME', '')
AWS_SECRET_ID = '/yaetos/connections'
JOB_FOLDER = 'jobs/'
PACKAGES_EMR = ['com.databricks:spark-redshift_2.11:2.0.1', 'org.apache.spark:spark-avro_2.11:2.4.0', 'mysql:mysql-connector-java:8.0.22', 'org.postgresql:postgresql:42.2.18'] # necessary for reading/writing to redshift, mysql & clickhouse using spark connector.
PACKAGES_EMR_ALT = ['io.github.spark-redshift-community:spark-redshift_2.12:5.0.3', 'org.apache.spark:spark-avro_2.12:3.1.1', 'mysql:mysql-connector-java:8.0.22', 'org.postgresql:postgresql:42.2.18'] # same but compatible with spark 3.
PACKAGES_LOCAL = PACKAGES_EMR + ['com.amazonaws:aws-java-sdk-pom:1.11.760', 'org.apache.hadoop:hadoop-aws:2.7.0']
PACKAGES_LOCAL_ALT = PACKAGES_EMR_ALT + ['com.amazonaws:aws-java-sdk-pom:1.11.760', 'org.apache.hadoop:hadoop-aws:2.7.0'] # will probably need to be moved to hadoop-aws:3.2.1 to work locally.
JARS = 'https://s3.amazonaws.com/redshift-downloads/drivers/jdbc/1.2.41.1065/RedshiftJDBC42-no-awssdk-1.2.41.1065.jar' # not available in public repo so cannot be put in "packages" var.
class ETL_Base(object):
TABULAR_TYPES = ('csv', 'parquet', 'df', 'mysql', 'clickhouse')
FILE_TYPES = ('csv', 'parquet', 'txt')
SUPPORTED_TYPES = set(TABULAR_TYPES).union(set(FILE_TYPES)).union({'other', 'None'})
def __init__(self, pre_jargs={}, jargs=None, loaded_inputs={}):
self.loaded_inputs = loaded_inputs
self.jargs = self.set_jargs(pre_jargs, loaded_inputs) if not jargs else jargs
if self.jargs.manage_git_info:
git_yml = Git_Config_Manager().get_config(mode=self.jargs.mode, local_app_folder=LOCAL_APP_FOLDER, cluster_app_folder=CLUSTER_APP_FOLDER)
[git_yml.pop(key, None) for key in ('diffs_current', 'diffs_yaetos') if git_yml]
logger.info('Git info {}'.format(git_yml))
def etl(self, sc, sc_sql):
""" Main function. If incremental, reruns ETL process multiple time until
fully loaded, otherwise, just run ETL once.
It's a way to deal with case where full incremental rerun from scratch would
require a larger cluster to build in 1 shot than the typical incremental.
"""
try:
if not self.jargs.is_incremental:
output = self.etl_one_pass(sc, sc_sql, self.loaded_inputs)
else:
output = self.etl_multi_pass(sc, sc_sql, self.loaded_inputs)
except Exception as err:
if self.jargs.mode in ('prod_EMR') and self.jargs.merged_args.get('owners'):
self.send_job_failure_email(err)
raise Exception("Job failed, error: \n{}".format(err))
return output
def etl_multi_pass(self, sc, sc_sql, loaded_inputs={}):
needs_run = True
ii = 0
while needs_run: # TODO: check to rewrite as for loop. Simpler and avoiding potential infinite loops.
# TODO: isolate code below into separate functions.
ii+=1
if self.jargs.merged_args.get('job_increment') == 'daily':
if ii == 1:
first_day = self.jargs.merged_args['first_day']
last_run_period = self.get_last_run_period_daily(sc, sc_sql)
periods = Period_Builder().get_last_output_to_last_day(last_run_period, first_day)
if len(periods) == 0:
logger.info('Output up to date. Nothing to run. last processed period={} and last period from now={}'.format(last_run_period, Period_Builder.get_last_day()))
output = sc_sql.createDataFrame([], StructType([]))
self.final_inc = True # remove "self." when sandbox job doesn't depend on it.
else:
logger.info('Periods remaining to load: {}'.format(periods))
period = periods[0]
logger.info('Period to be loaded in this run: {}'.format(period))
self.period = period # to be captured in etl_one_pass, needed for in database filtering.
self.period_next = periods[1] if len(periods)>=2 else None # same
self.jargs.merged_args['file_tag'] = period
output = self.etl_one_pass(sc, sc_sql, loaded_inputs)
self.final_inc = period == periods[-1]
periods.pop(0) # for next increment.
else:
raise Exception("'job_increment' param has to be set to 'daily'")
if self.jargs.rerun_criteria == 'last_date': # i.e. stop when reached final increment, i.e. current period is last to process. Pb: can go in infinite loop if missing data.
needs_run = not self.final_inc
elif self.jargs.rerun_criteria == 'output_empty': # i.e. stop when current inc is empty. Good to deal with late arriving data, but will be a pb if some increment doesn't have data and will never have.
needs_run = not self.output_empty
elif self.jargs.rerun_criteria == 'both':
needs_run = not (self.output_empty or self.final_inc)
if needs_run:
del(output)
gc.collect()
logger.info('Incremental build needs other run -> {}'.format(needs_run))
# TODO: check to change output to reload all outputs from inc build
return output
def etl_one_pass(self, sc, sc_sql, loaded_inputs={}):
""" Main etl function, loads inputs, runs transform, and saves output."""
logger.info("-------Starting running job '{}'--------".format(self.jargs.job_name))
start_time = time()
self.start_dt = datetime.utcnow() # attached to self so available within "transform()" func.
output, schemas = self.etl_no_io(sc, sc_sql, loaded_inputs)
if output is None:
if self.jargs.is_incremental:
logger.info("-------End job '{}', increment with empty output--------".format(self.jargs.job_name))
self.output_empty = True
else:
logger.info("-------End job '{}', no output--------".format(self.jargs.job_name))
# TODO: add process time in that case.
return None
if not self.jargs.no_fw_cache or (self.jargs.is_incremental and self.jargs.rerun_criteria == 'output_empty'):
logger.info('Output sample:')
try:
output.show()
except Exception as e:
logger.info("Warning: Failed showing table sample with error '{}'.".format(e))
pass
count = output.count()
logger.info('Output count: {}'.format(count))
logger.info("Output data types: {}".format(pformat([(fd.name, fd.dataType) for fd in output.schema.fields])))
self.output_empty = count == 0
self.save_output(output, self.start_dt)
end_time = time()
elapsed = end_time - start_time
logger.info('Process time to complete (post save to file but pre copy to db if any, also may not include processing if output not saved): {} s'.format(elapsed))
if self.jargs.save_schemas and schemas:
schemas.save_yaml(self.jargs.job_name)
# self.save_metadata(elapsed) # disable for now to avoid spark parquet reading issues. TODO: check to re-enable.
if self.jargs.merged_args.get('copy_to_redshift') and self.jargs.enable_redshift_push:
self.copy_to_redshift_using_spark(output) # to use pandas: self.copy_to_redshift_using_pandas(output, self.OUTPUT_TYPES)
if self.jargs.merged_args.get('copy_to_clickhouse') and self.jargs.enable_redshift_push: # TODO: rename enable_redshift_push to enable_db_push since not redshift here.
self.copy_to_clickhouse(output)
if self.jargs.merged_args.get('copy_to_kafka'):
self.push_to_kafka(output, self.OUTPUT_TYPES)
output.unpersist()
end_time = time()
elapsed = end_time - start_time
logger.info('Process time to complete job (post db copies if any): {} s'.format(elapsed))
logger.info("-------End job '{}'--------".format(self.jargs.job_name))
return output
def etl_no_io(self, sc, sc_sql, loaded_inputs={}, jargs=None):
""" Function to load inputs (including from live vars) and run transform. No output to disk.
Having this code isolated is useful for cases with no I/O possible, like testing."""
self.jargs = jargs or self.jargs
self.sc = sc
self.sc_sql = sc_sql
self.app_name = sc.appName
self.logger = logger
if self.jargs.job_name != self.app_name:
logger.info("... part of spark app '{}'".format(self.app_name))
loaded_datasets = self.load_inputs(loaded_inputs)
output = self.transform(**loaded_datasets)
if output and self.jargs.output['type'] in self.TABULAR_TYPES:
if self.jargs.add_created_at=='true':
output = output.withColumn('_created_at', F.lit(self.start_dt))
output.cache()
schemas = Schema_Builder()
schemas.generate_schemas(loaded_datasets, output)
else:
schemas = None
return output, schemas
def etl_no_io_function(self, sc, sc_sql, transform=None, transform_args={}, loaded_inputs={}):
""" Used for testing internal functions"""
# self.jargs = jargs
self.sc = sc
self.sc_sql = sc_sql
self.app_name = sc.appName
self.logger = logger
loaded_datasets = self.load_inputs(loaded_inputs)
output = transform(**transform_args)
return output
def transform(self, **app_args):
""" The function that needs to be overriden by each specific job."""
raise NotImplementedError
def get_last_run_period_daily(self, sc, sc_sql):
previous_output_max_timestamp = self.get_previous_output_max_timestamp(sc, sc_sql)
last_run_period = previous_output_max_timestamp.strftime("%Y-%m-%d") if previous_output_max_timestamp else None # TODO: if get_output_max_timestamp()=None, means new build, so should delete instance in DBs.
return last_run_period
def set_jargs(self, pre_jargs, loaded_inputs={}):
""" jargs means job args. Function called only if running the job directly, i.e. "python some_job.py"""
py_job = self.set_py_job()
job_name = Job_Yml_Parser.set_job_name_from_file(py_job)
return Job_Args_Parser(defaults_args=pre_jargs['defaults_args'], yml_args=None, job_args=pre_jargs['job_args'], cmd_args=pre_jargs['cmd_args'], job_name=job_name, loaded_inputs=loaded_inputs) # set yml_args=None so loading yml is handled in Job_Args_Parser()
def set_py_job(self):
""" Returns the file being executed. For ex, when running "python some_job.py", this functions returns "some_job.py".
Only gives good output when the job is launched that way."""
py_job = inspect.getsourcefile(self.__class__)
logger.info("py_job: '{}'".format(py_job))
return py_job
def load_inputs(self, loaded_inputs):
app_args = {}
for item in self.jargs.inputs.keys():
# Load from memory if available
if item in loaded_inputs.keys():
app_args[item] = loaded_inputs[item]
logger.info("Input '{}' passed in memory from a previous job.".format(item))
continue
# Skip "other" types
if self.jargs.inputs[item]['type'] == "other":
app_args[item] = None
logger.info("Input '{}' not loaded since type set to 'other'.".format(item))
continue
# Load from disk
app_args[item] = self.load_input(item)
logger.info("Input '{}' loaded.".format(item))
if self.jargs.is_incremental and self.jargs.inputs[item]['type'] not in ('mysql', 'clickouse'):
if self.jargs.merged_args.get('motm_incremental'):
app_args = self.filter_incremental_inputs_motm(app_args)
else:
app_args = self.filter_incremental_inputs_period(app_args)
self.sql_register(app_args)
return app_args
def filter_incremental_inputs_motm(self, app_args):
"""Filter based on Min Of The Max (motm) of all inputs. Good to deal with late arriving data or async load but
gets stuck if 1 input never has any new data arriving.
Assumes increment fields are datetime."""
min_dt = self.get_previous_output_max_timestamp(self.sc, self.sc_sql) if len(app_args.keys()) > 0 else None
# Get latest timestamp in common across incremental inputs
maxes = []
for item in app_args.keys():
input_is_tabular = self.jargs.inputs[item]['type'] in self.TABULAR_TYPES
inc = self.jargs.inputs[item].get('inc_field', None)
if input_is_tabular and inc:
max_dt = app_args[item].agg({inc: "max"}).collect()[0][0]
maxes.append(max_dt)
max_dt = min(maxes) if len(maxes)>0 else None
# Filter
for item in app_args.keys():
input_is_tabular = self.jargs.inputs[item]['type'] in self.TABULAR_TYPES
inc = self.jargs.inputs[item].get('inc_field', None)
if inc:
if input_is_tabular:
# TODO: add limit to amount of input data, and set self.final_inc=False
inc_type = {k:v for k, v in app_args[item].dtypes}[inc]
logger.info("Input dataset '{}' will be filtered for min_dt={} max_dt={}".format(item, min_dt, max_dt))
if min_dt:
# min_dt = to_date(lit(s)).cast(TimestampType() # TODO: deal with dt type, as coming from parquet
app_args[item] = app_args[item].filter(app_args[item][inc] > min_dt)
if max_dt:
app_args[item] = app_args[item].filter(app_args[item][inc] <= max_dt)
else:
raise Exception("Incremental loading is not supported for unstructured input. You need to handle the incremental logic in the job code.")
return app_args
def filter_incremental_inputs_period(self, app_args):
"""Filter based on period defined in. Simple but can be a pb if late arriving data or dependencies not run.
Inputs filtered inside source database will be filtered again."""
for item in app_args.keys():
input_is_tabular = self.jargs.inputs[item]['type'] in self.TABULAR_TYPES
inc = self.jargs.inputs[item].get('inc_field', None)
if inc:
if input_is_tabular:
# TODO: add limit to amount of input data, and set self.final_inc=False
inc_type = {k:v for k, v in app_args[item].dtypes}[inc]
logger.info("Input dataset '{}' will be filtered for {}='{}'".format(item, inc, self.period))
app_args[item] = app_args[item].filter(app_args[item][inc] == self.period)
else:
raise Exception("Incremental loading is not supported for unstructured input. You need to handle the incremental logic in the job code.")
return app_args
def sql_register(self, app_args):
for item in app_args.keys():
input_is_tabular = hasattr(app_args[item], "rdd") # assuming DataFrame will keep 'rdd' attribute
# ^ better than using self.jargs.inputs[item]['type'] in self.TABULAR_TYPES since doesn't require 'type' being defined.
if input_is_tabular:
app_args[item].createOrReplaceTempView(item)
def load_input(self, input_name):
input_type = self.jargs.inputs[input_name]['type']
if input_type in self.FILE_TYPES:
path = self.jargs.inputs[input_name]['path']
path = path.replace('s3://', 's3a://') if self.jargs.mode == 'dev_local' else path
logger.info("Input '{}' to be loaded from files '{}'.".format(input_name, path))
path = Path_Handler(path, self.jargs.base_path).expand_later(self.jargs.storage)
if input_type == 'txt':
rdd = self.sc.textFile(path)
logger.info("Input '{}' loaded from files '{}'.".format(input_name, path))
return rdd
# Tabular types
if input_type == 'csv':
delimiter = self.jargs.merged_args.get('csv_delimiter', ',')
sdf = self.sc_sql.read.option("delimiter", delimiter).csv(path, header=True)
logger.info("Input '{}' loaded from files '{}'.".format(input_name, path))
elif input_type == 'parquet':
sdf = self.sc_sql.read.parquet(path)
logger.info("Input '{}' loaded from files '{}'.".format(input_name, path))
elif input_type == 'mysql':
sdf = self.load_mysql(input_name)
logger.info("Input '{}' loaded from mysql".format(input_name))
elif input_type == 'clickhouse':
sdf = self.load_clickhouse(input_name)
logger.info("Input '{}' loaded from clickhouse".format(input_name))
else:
raise Exception("Unsupported input type '{}' for path '{}'. Supported types are: {}. ".format(input_type, self.jargs.inputs[input_name].get('path'), self.SUPPORTED_TYPES))
logger.info("Input data types: {}".format(pformat([(fd.name, fd.dataType) for fd in sdf.schema.fields])))
return sdf
def load_data_from_files(self, name, path, type, sc, sc_sql):
"""Loading any dataset (input or not) and only from file system (not from DBs). Used by incremental jobs to load previous output.
Different from load_input() which only loads input (input jargs hardcoded) and from any source."""
# TODO: integrate with load_input to remove duplicated code.
input_type = type
input_name = name
path = path.replace('s3://', 's3a://') if self.jargs.mode == 'dev_local' else path
logger.info("Dataset '{}' to be loaded from files '{}'.".format(input_name, path))
path = Path_Handler(path, self.jargs.base_path).expand_later(self.jargs.storage)
if input_type == 'txt':
rdd = self.sc.textFile(path)
logger.info("Dataset '{}' loaded from files '{}'.".format(input_name, path))
return rdd
# Tabular types
if input_type == 'csv':
sdf = sc_sql.read.csv(path, header=True) # TODO: add way to add .option("delimiter", ';'), useful for metric_budgeting.
logger.info("Dataset '{}' loaded from files '{}'.".format(input_name, path))
elif input_type == 'parquet':
# TODO: check to add ...read.option("mergeSchema", "true").parquet...
sdf = sc_sql.read.parquet(path)
logger.info("Dataset '{}' loaded from files '{}'.".format(input_name, path))
else:
raise Exception("Unsupported dataset type '{}' for path '{}'. Supported types are: {}. ".format(input_type, path, self.SUPPORTED_TYPES))
# New param "custom_schema" to work for both db and file inputs (instead of just db). TODO: finish.
# df_custom_schema = self.jargs.merged_args.get('df_custom_schema')
# if df_custom_schema:
# for field, type in df_custom_schema.items():
# table_to_copy = table_to_copy.withColumn(field, table_to_copy[field].cast(type))
logger.info("Dataset data types: {}".format(pformat([(fd.name, fd.dataType) for fd in sdf.schema.fields])))
return sdf
def load_mysql(self, input_name):
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, creds=self.jargs.connection_file)
creds_section = self.jargs.inputs[input_name]['creds']
db = creds[creds_section]
extra_params = '' # can use '?zeroDateTimeBehavior=CONVERT_TO_NULL' to help solve "java.sql.SQLException: Zero date value prohibited" but leads to other error msg.
url = 'jdbc:mysql://{host}:{port}/{service}{extra_params}'.format(host=db['host'], port=db['port'], service=db['service'], extra_params=extra_params)
dbtable = self.jargs.inputs[input_name]['db_table']
inc_field = self.jargs.inputs[input_name].get('inc_field')
if not inc_field:
logger.info('Pulling table "{}" from mysql'.format(dbtable))
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "com.mysql.cj.jdbc.Driver") \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("dbtable", dbtable)\
.load()
else:
inc_field = self.jargs.inputs[input_name]['inc_field']
period = self.period
# query_str = "select * from {} where {} = '{}'".format(dbtable, inc_field, period)
higher_limit = "AND {inc_field} < '{period_next}'".format(inc_field=inc_field, period_next=self.period_next) if self.period_next else ''
query_str = "select * from {dbtable} where {inc_field} >= '{period}' {higher_limit}".format(dbtable=dbtable, inc_field=inc_field, period=self.period, higher_limit=higher_limit)
logger.info('Pulling table from mysql with query_str "{}"'.format(query_str))
# if self.jargs.merged_args.get('custom_schema', '')
# db_overridden_types_str = ', '.join([k + ' ' + v for k, v in db_overridden_types.items()])
# TODO: check if it should use com.mysql.cj.jdbc.Driver instead as above
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "com.mysql.jdbc.Driver") \
.option('fetchsize', 10000) \
.option('numPartitions', 3) \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("customSchema", self.jargs.merged_args.get('jdbc_custom_schema', '')) \
.option("query", query_str) \
.load()
return sdf
def load_clickhouse(self, input_name):
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, creds=self.jargs.connection_file)
creds_section = self.jargs.inputs[input_name]['creds']
db = creds[creds_section]
url = 'jdbc:postgresql://{host}/{service}'.format(host=db['host'], service=db['service'])
dbtable = self.jargs.inputs[input_name]['db_table']
inc_field = self.jargs.inputs[input_name].get('inc_field')
if not inc_field:
logger.info('Pulling table "{}" from Clickhouse'.format(dbtable))
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "org.postgresql.Driver") \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("dbtable", dbtable)\
.load()
else:
inc_field = self.jargs.inputs[input_name]['inc_field']
period = self.period
query_str = "select * from {} where {} = '{}'".format(dbtable, inc_field, period)
logger.info('Pulling table from Clickhouse with query_str "{}"'.format(query_str))
sdf = self.sc_sql.read \
.format('jdbc') \
.option('driver', "org.postgresql.Driver") \
.option('fetchsize', 10000) \
.option('numPartitions', 3) \
.option("url", url) \
.option("user", db['user']) \
.option("password", db['password']) \
.option("query", query_str) \
.load()
return sdf
def get_previous_output_max_timestamp(self, sc, sc_sql):
path = self.jargs.output['path'] # implies output path is incremental (no "{now}" in string.)
path += '*' if self.jargs.merged_args.get('incremental_type') == 'no_schema' else '' # '*' to go into output subfolders.
try:
df = self.load_data_from_files(name='output', path=path, type=self.jargs.output['type'], sc=sc, sc_sql=sc_sql)
except Exception as e: # TODO: don't catch all
logger.info("Previous increment could not be loaded or doesn't exist. It will be ignored. Folder '{}' failed loading with error '{}'.".format(path, e))
return None
dt = self.get_max_timestamp(df)
logger.info("Max timestamp of previous increment: '{}'".format(dt))
return dt
def get_max_timestamp(self, df):
return df.agg({self.jargs.output['inc_field']: "max"}).collect()[0][0]
def save_output(self, output, now_dt=None):
self.path = self.save(output=output,
path=self.jargs.output['path'],
base_path=self.jargs.base_path,
type=self.jargs.output['type'],
now_dt=now_dt,
is_incremental=self.jargs.is_incremental,
incremental_type=self.jargs.merged_args.get('incremental_type', 'no_schema'),
partitionby=self.jargs.output.get('inc_field') or self.jargs.merged_args.get('partitionby'),
file_tag=self.jargs.merged_args.get('file_tag')) # TODO: make param standard in cmd_args ?
def save(self, output, path, base_path, type, now_dt=None, is_incremental=None, incremental_type=None, partitionby=None, file_tag=None):
"""Used to save output to disk. Can be used too inside jobs to output 2nd output for testing."""
path = Path_Handler(path, base_path).expand_now(now_dt)
if type == 'None':
logger.info('Did not write output to disk')
return None
if is_incremental and incremental_type == 'no_schema':
current_time = now_dt.strftime('%Y%m%d_%H%M%S_utc') # no use of now_dt to make it updated for each inc.
file_tag = ('_' + file_tag) if file_tag else "" # TODO: make that param standard in cmd_args ?
path += 'inc_{}{}/'.format(current_time, file_tag)
# TODO: rename 'partitioned' to 'spark_partitions' and 'no_schema' to 'yaetos_partitions'
write_mode = 'append' if incremental_type == 'partitioned' or partitionby else 'error'
partitionby = partitionby.split(',') if partitionby else []
# TODO: deal with cases where "output" is df when expecting rdd, or at least raise issue in a cleaner way.
if type == 'txt':
output.saveAsTextFile(path)
elif type == 'parquet':
output.write.partitionBy(*partitionby).mode(write_mode).parquet(path)
elif type == 'csv':
output.write.partitionBy(*partitionby).mode(write_mode).option("header", "true").csv(path)
else:
raise Exception("Need to specify supported output type, either txt, parquet or csv.")
logger.info('Wrote output to ' + path)
return path
def save_metadata(self, elapsed):
fname = self.path + '_metadata.txt'
content = """
-- app_name: %s
-- job_name: %s
-- time (s): %s
-- cluster_setup : TBD
-- input folders : TBD
-- output folder : TBD
-- github hash: TBD
-- code: TBD
"""%(self.app_name, self.jargs.job_name, elapsed)
FS_Ops_Dispatcher().save_metadata(fname, content, self.jargs.storage)
def query(self, query_str):
logger.info('Query string:\n' + query_str)
df = self.sc_sql.sql(query_str)
df.cache()
return df
def copy_to_redshift_using_pandas(self, output, types):
# import put here below to avoid loading heavy libraries when not needed (optional feature).
from yaetos.redshift_pandas import create_table
from yaetos.db_utils import cast_col
df = output.toPandas()
df = cast_col(df, types)
connection_profile = self.jargs.copy_to_redshift['creds']
schema, name_tb = self.jargs.copy_to_redshift['table'].split('.')
schema = schema.format(schema=self.jargs.schema) if '{schema}' in schema else schema
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, creds=self.jargs.connection_file)
create_table(df, connection_profile, name_tb, schema, types, creds, self.jargs.is_incremental)
del(df)
def copy_to_redshift_using_spark(self, sdf):
# import put here below to avoid loading heavy libraries when not needed (optional feature).
from yaetos.redshift_spark import create_table
connection_profile = self.jargs.copy_to_redshift['creds']
schema, name_tb= self.jargs.copy_to_redshift['table'].split('.')
schema = schema.format(schema=self.jargs.schema) if '{schema}' in schema else schema
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, creds=self.jargs.connection_file)
create_table(sdf, connection_profile, name_tb, schema, creds, self.jargs.is_incremental, self.jargs.redshift_s3_tmp_dir, self.jargs.merged_args.get('spark_version', '2.4'))
def copy_to_clickhouse(self, sdf):
# import put here below to avoid loading heavy libraries when not needed (optional feature).
from yaetos.clickhouse import create_table
connection_profile = self.jargs.copy_to_clickhouse['creds']
schema, name_tb= self.jargs.copy_to_clickhouse['table'].split('.')
schema = schema.format(schema=self.jargs.schema) if '{schema}' in schema else schema
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, creds=self.jargs.connection_file)
create_table(sdf, connection_profile, name_tb, schema, creds, self.jargs.is_incremental)
def push_to_kafka(self, output, types):
""" Needs to be overriden by each specific job."""
raise NotImplementedError
def send_msg(self, msg, recipients=None):
""" Sending message to recipients (list of email addresse) or, if not specified, to yml 'owners'.
Pulling email sender account info from connection_file."""
if not recipients:
recipients = self.jargs.merged_args.get('owners')
if not recipients:
logger.error("Email can't be sent since no recipient set in {}, .\nMessage : \n{}".format(self.jargs.job_param_file, msg))
return None
creds = Cred_Ops_Dispatcher().retrieve_secrets(self.jargs.storage, creds=self.jargs.connection_file)
creds_section = self.jargs.email_cred_section
sender_email = creds.get(creds_section, 'sender_email')
password = creds.get(creds_section, 'password')
smtp_server = creds.get(creds_section, 'smtp_server')
port = creds.get(creds_section, 'port')
for recipient in recipients:
send_email(msg, recipient, sender_email, password, smtp_server, port)
logger.info('Email sent to {}'.format(recipient))
def send_job_failure_email(self, error_msg):
message = """Subject: [Data Pipeline Failure] {name}\n\nA Data pipeline named '{name}' failed.\nError message:\n{error}\n\nPlease check logs in AWS.""".format(name=self.jargs.job_name, error=error_msg)
self.send_msg(message)
@staticmethod
def check_pk(df, pks):
count = df.count()
count_pk = df.select(pks).dropDuplicates().count()
if count != count_pk:
logger.error("PKs not unique. count={}, count_pk={}".format(count, count_pk))
return False
else:
logger.info("Confirmed fields given are PKs (i.e. unique). count=count_pk={}".format(count))
return True
def identify_non_unique_pks(self, df, pks):
windowSpec = Window.partitionBy([F.col(item) for item in pks])
df = df.withColumn('_count_pk', F.count('*').over(windowSpec)) \
.where(F.col('_count_pk') >= 2)
# Debug: df.repartition(1).write.mode('overwrite').option("header", "true").csv('data/sandbox/non_unique_test/')
return df
class Period_Builder():
@staticmethod
def get_last_day(as_of_date=datetime.utcnow()):
last_day_dt = as_of_date + relativedelta(days=-1)
last_day = last_day_dt.strftime("%Y-%m-%d")
return last_day
@staticmethod
def get_first_to_last_day(first_day, as_of_date=datetime.utcnow()):
now = as_of_date
start = datetime.strptime(first_day, "%Y-%m-%d")
delta = now - start
number_days = delta.days
periods = []
iter_days = start
for item in range(number_days):
periods.append(iter_days.strftime("%Y-%m-%d"))
iter_days = iter_days + relativedelta(days=+1)
return periods
def get_last_output_to_last_day(self, last_run_period, first_day_input, as_of_date=datetime.utcnow()):
periods = self.get_first_to_last_day(first_day_input, as_of_date)
if last_run_period:
periods = [item for item in periods if item > last_run_period]
# periods = [item for item in periods if item < '2021-01-02'] # TODO: make end period parametrizable from args.
return periods
class Schema_Builder():
TYPES_FOLDER = 'schemas/'
def generate_schemas(self, loaded_datasets, output):
yml = {'inputs':{}}
for key, value in loaded_datasets.items():
yml['inputs'][key] = {fd.name: fd.dataType.__str__() for fd in value.schema.fields}
yml['output'] = {fd.name: fd.dataType.__str__() for fd in output.schema.fields}
self.yml = yml
def save_yaml(self, job_name):
job_name = job_name.replace('.py', '')
fname = self.TYPES_FOLDER + job_name+'.yaml'
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'w') as file:
ignored = yaml.dump(self.yml, file)
class Job_Yml_Parser():
"""Functions to load and parse yml, and functions to get job_name, which is the key to the yml info."""
def __init__(self, job_name, job_param_file, mode, skip_job=False):
self.yml_args = self.set_job_yml(job_name, job_param_file, mode, skip_job)
self.yml_args['job_name'] = job_name
self.yml_args['py_job'] = self.yml_args.get('py_job') or self.set_py_job_from_name(job_name)
self.yml_args['sql_file'] = self.set_sql_file_from_name(job_name, mode)
@staticmethod
def set_job_name_from_file(job_file):
# when run from Flow(), job_file is full path. When run from ETL directly, job_file is "jobs/..." .
if job_file.startswith(CLUSTER_APP_FOLDER+'jobs/'):
job_name = job_file[len(CLUSTER_APP_FOLDER+'jobs/'):]
elif job_file.startswith(CLUSTER_APP_FOLDER+'scripts.zip/jobs/'):
job_name = job_file[len(CLUSTER_APP_FOLDER+'scripts.zip/jobs/'):]
elif job_file.startswith(LOCAL_APP_FOLDER+'jobs/'):
job_name = job_file[len(LOCAL_APP_FOLDER+'jobs/'):]
elif job_file.startswith(LOCAL_JOB_REPO_FOLDER+'jobs/'): # when run from external repo.
job_name = job_file[len(LOCAL_JOB_REPO_FOLDER+'jobs/'):]
elif job_file.startswith('jobs/'):
job_name = job_file[len('jobs/'):]
elif job_file.__contains__('/scripts.zip/jobs/'):
# To deal with cases like job_file = '/mnt/tmp/spark-48e465ad-cca8-4216-a77f-ce069d04766f/userFiles-b1dad8aa-76ea-4adf-97da-dc9273666263/scripts.zip/jobs/infojobs/churn_prediction/users_inscriptions_daily.py' that appeared in new emr version.
job_name = job_file[job_file.find('/scripts.zip/jobs/')+len('/scripts.zip/jobs/'):]
else:
# To deal with case when job is defined outside of this repo (and not in jobs/ folder in external folder), i.e. isn't located in 'jobs/' folder. In this case, job name in metadata file should include full path (inc job base path).
job_name = job_file
logger.info("job_name: '{}', from job_file: '{}'".format(job_name, job_file))
return job_name
@staticmethod
def set_py_job_from_name(job_name):
py_job='jobs/{}'.format(job_name)
logger.info("py_job: '{}', from job_name: '{}'".format(py_job, job_name))
return py_job
@staticmethod
def set_sql_file_from_name(job_name, mode):
if not job_name.endswith('.sql'):
return None
if mode in ('dev_EMR', 'prod_EMR'):
sql_file=CLUSTER_APP_FOLDER+'jobs/{}'.format(job_name)
elif mode == 'dev_local':
sql_file='jobs/{}'.format(job_name)
else:
raise Exception("Mode not supported in set_sql_file_from_name(): {}".format(mode))
logger.info("sql_file: '{}', from job_name: '{}'".format(sql_file, job_name))
return sql_file
def set_job_yml(self, job_name, job_param_file, yml_mode, skip_job):
if job_param_file is None:
return {}
yml = self.load_meta(job_param_file)
if job_name not in yml['jobs'] and not skip_job:
raise KeyError("Your job '{}' can't be found in jobs_metadata file '{}'. Add it there or make sure the name matches".format(job_name, job_param_file))
elif job_name not in yml['jobs'] and skip_job:
job_yml = {}
else:
job_yml = yml['jobs'][job_name]
if yml_mode not in yml['common_params']['mode_specific_params']:
raise KeyError("Your yml mode '{}' can't be found in jobs_metadata file '{}'. Add it there or make sure the name matches".format(yml_mode, job_param_file))
mode_spec_yml = yml['common_params']['mode_specific_params'][yml_mode]
out = yml['common_params']['all_mode_params']
out.update(mode_spec_yml)
out.update(job_yml)
return out
@staticmethod
def load_meta(fname):
with open(fname, 'r') as stream:
yml = yaml.load(stream)
return yml
class Job_Args_Parser():
DEPLOY_ARGS_LIST = ['aws_config_file', 'aws_setup', 'leave_on', 'push_secrets', 'frequency', 'start_date',
'email', 'mode', 'deploy', 'terminate_after', 'spark_version']
def __init__(self, defaults_args, yml_args, job_args, cmd_args, job_name=None, loaded_inputs={}):
"""Mix all params, add more and tweak them when needed (like depending on storage type, execution mode...).
If yml_args not provided, it will go and get it.
Sets of params:
- defaults_args: defaults command line args, as defined in define_commandline_args()
- yml_args: args for specific job from yml. If = None, it will rebuild it using job_name param.
- job_args: args passed to "Commandliner(Job, **args)" in each job file
- cmd_args: args passed in commandline, like "python some_job.py --some_args=xxx", predefined in define_commandline_args() or not
- job_name: to use only when yml_args is set to None, to specify what section of the yml to pick.
"""
if yml_args is None:
# Getting merged args, without yml (order matters)
args = defaults_args.copy()
args.update(job_args)
args.update(cmd_args)
args.update({'job_name':job_name} if job_name else {})
args['mode'] = 'dev_EMR' if args['mode'] == 'dev_local' and args['deploy'] in ('EMR', 'EMR_Scheduled') else args['mode']
assert 'job_name' in args.keys()
yml_args = Job_Yml_Parser(args['job_name'], args['job_param_file'], args['mode'], args.get('skip_job', False)).yml_args
# Get merged args, with yml (order matters)
# TODO: need to add business of flatten/unflatten so they can be merged cleanely.
args = defaults_args.copy()
args.update(yml_args)
args.update(job_args)
args.update(cmd_args)
args['mode'] = 'dev_EMR' if args['mode'] == 'dev_local' and args['deploy'] in ('EMR', 'EMR_Scheduled') else args['mode']
args = self.update_args(args, loaded_inputs)
[setattr(self, key, value) for key, value in args.items()] # attach vars to self.*
# Other access to vars
self.merged_args = args
self.defaults_args = defaults_args
self.yml_args = yml_args
self.job_args = job_args
self.cmd_args = cmd_args
logger.info("Job args: \n{}".format(pformat(args)))
def get_deploy_args(self):
return {key: value for key, value in self.merged_args.items() if key in self.DEPLOY_ARGS_LIST}
def get_app_args(self):
return {key: value for key, value in self.merged_args.items() if key not in self.DEPLOY_ARGS_LIST or key=='mode'}
def update_args(self, args, loaded_inputs):
""" Updating params or adding new ones, according to execution environment (local, prod...)"""
args['inputs'] = self.set_inputs(args, loaded_inputs)
# args['output'] = self.set_output(cmd_args, yml_args) # TODO: fix later
args['is_incremental'] = self.set_is_incremental(args.get('inputs', {}), args.get('output', {}))
args['output']['type'] = args.pop('output.type', None) or args['output']['type']
return args
# TODO: modify later since not used now
def set_inputs(self, args, loaded_inputs):
# inputs_in_args = any([item.startswith('input_') for item in cmd_args.keys()])
# if inputs_in_args:
# # code below limited, will break in non-friendly way if not all input params are provided, doesn't support other types of inputs like db ones. TODO: make it better.
# input_paths = {key.replace('input_path_', ''): {'path': val} for key, val in cmd_args.items() if key.startswith('input_path_')}
# input_types = {key.replace('input_type_', ''): {'type': val} for key, val in cmd_args.items() if key.startswith('input_type_')}
# inputs = {key: {'path': val['path'], 'type':input_types[key]['type']} for key, val in input_paths.items()}
# return inputs
if loaded_inputs:
return {key: {'path': val, 'type': 'df'} for key, val in loaded_inputs.items()}
else:
return args.get('inputs', {})
# TODO: modify later since not used now
# def set_output(self, cmd_args, yml_args):
# output_in_args = any([item == 'output_path' for item in cmd_args.keys()])
# if output_in_args:
# # code below limited, will break in non-friendly way if not all output params are provided, doesn't support other types of outputs like db ones. TODO: make it better.
# output = {'path':cmd_args['output_path'], 'type':cmd_args['output_type']}
# return output
# elif cmd_args.get('job_param_file'): # should be before loaded_inputs to use yaml if available. Later function load_inputs uses both self.jargs.inputs and loaded_inputs, so not incompatible.
# return yml_args.get('output', {})
# elif cmd_args.get('mode_no_io'):
# output = {}
# logger.info("No output given")
# else:
# raise Exception("No output given")
# return output
def set_is_incremental(self, inputs, output):
return any(['inc_field' in inputs[item] for item in inputs.keys()]) or 'inc_field' in output
class FS_Ops_Dispatcher():
# TODO: remove 'storage' var not used anymore accross all functions below, since now infered from path
@staticmethod
def is_s3_path(path):
return path.startswith('s3://') or path.startswith('s3a://')
# --- save_metadata set of functions ----
def save_metadata(self, fname, content, storage):
self.save_metadata_cluster(fname, content) if self.is_s3_path(fname) else self.save_metadata_local(fname, content)
@staticmethod
def save_metadata_local(fname, content):
fh = open(fname, 'w')
fh.write(content)
fh.close()
logger.info("Created file locally: {}".format(fname))
@staticmethod
def save_metadata_cluster(fname, content):
fname_parts = fname.split('s3://')[1].split('/')
bucket_name = fname_parts[0]
bucket_fname = '/'.join(fname_parts[1:])
fake_handle = StringIO(content)
s3c = boto3.Session(profile_name='default').client('s3')
s3c.put_object(Bucket=bucket_name, Key=bucket_fname, Body=fake_handle.read())
logger.info("Created file S3: {}".format(fname))
# --- save_file set of functions ----
def save_file(self, fname, content, storage):
self.save_file_cluster(fname, content) if self.is_s3_path(fname) else self.save_file_local(fname, content)
@staticmethod
def save_file_local(fname, content):
folder = os.path.dirname(fname)
if not os.path.exists(folder):
os.makedirs(folder)
joblib.dump(content, fname)
logger.info("Saved content to new file locally: {}".format(fname))
def save_file_cluster(self, fname, content):
fname_parts = fname.split('s3://')[1].split('/')
bucket_name = fname_parts[0]
bucket_fname = '/'.join(fname_parts[1:])
s3c = boto3.Session(profile_name='default').client('s3')
local_path = CLUSTER_APP_FOLDER+'tmp/local_'+fname_parts[-1]
self.save_file_local(local_path, content)
fh = open(local_path, 'rb')
s3c.put_object(Bucket=bucket_name, Key=bucket_fname, Body=fh)
logger.info("Pushed local file to S3, from '{}' to '{}' ".format(local_path, fname))
# --- load_file set of functions ----
def load_file(self, fname, storage):
return self.load_file_cluster(fname) if self.is_s3_path(fname) else self.load_file_local(fname)
@staticmethod
def load_file_local(fname):
return joblib.load(fname)
@staticmethod
def load_file_cluster(fname):
fname_parts = fname.split('s3://')[1].split('/')
bucket_name = fname_parts[0]
bucket_fname = '/'.join(fname_parts[1:])
local_path = CLUSTER_APP_FOLDER+'tmp/s3_'+fname_parts[-1]
s3c = boto3.Session(profile_name='default').client('s3')
s3c.download_file(bucket_name, bucket_fname, local_path)
logger.info("Copied file from S3 '{}' to local '{}'".format(fname, local_path))
model = joblib.load(local_path)
return model
# --- listdir set of functions ----
def listdir(self, path, storage):
return self.listdir_cluster(path) if self.is_s3_path(path) else self.listdir_local(path)
@staticmethod
def listdir_local(path):
return os.listdir(path)
@staticmethod
def listdir_cluster(path): # TODO: rename to listdir_s3, same for similar functions from FS_Ops_Dispatcher
# TODO: better handle invalid path. Crashes with "TypeError: 'NoneType' object is not iterable" at last line.
if path.startswith('s3://'):
s3_root = 's3://'
elif path.startswith('s3a://'):
s3_root = 's3a://' # necessary when pulling S3 to local automatically from spark.
else:
raise ValueError('Problem with path. Pulling from s3, it should start with "s3://" or "s3a://". Path is: {}'.format(path))
fname_parts = path.split(s3_root)[1].split('/')
bucket_name = fname_parts[0]
prefix = '/'.join(fname_parts[1:])
client = boto3.Session(profile_name='default').client('s3')
paginator = client.get_paginator('list_objects')
objects = paginator.paginate(Bucket=bucket_name, Prefix=prefix, Delimiter='/')
paths = [item['Prefix'].split('/')[-2] for item in objects.search('CommonPrefixes')]
return paths
# --- dir_exist set of functions ----
def dir_exist(self, path, storage):
return self.dir_exist_cluster(path) if self.is_s3_path(path) else self.dir_exist_local(path)
@staticmethod
def dir_exist_local(path):
return os.path.isdir(path)
@staticmethod
def dir_exist_cluster(path):
raise Exception("Not implemented")
class Cred_Ops_Dispatcher():
def retrieve_secrets(self, storage, creds='conf/connections.cfg'):
creds = self.retrieve_secrets_cluster() if storage=='s3' else self.retrieve_secrets_local(creds)
return creds
@staticmethod
def retrieve_secrets_cluster():
client = boto3.Session(profile_name='default').client('secretsmanager')
response = client.get_secret_value(SecretId=AWS_SECRET_ID)
logger.info('Read aws secret, secret_id:'+AWS_SECRET_ID)
logger.debug('get_secret_value response: '+str(response))
content = response['SecretString']
fake_handle = StringIO(content)
config = ConfigParser()
config.readfp(fake_handle)
return config
@staticmethod
def retrieve_secrets_local(creds):
config = ConfigParser()
assert os.path.isfile(creds)
config.read(creds)
return config
class Path_Handler():
def __init__(self, path, base_path=None):
if base_path:
path = path.format(base_path=base_path, latest='{latest}', now='{now}')
self.path = path
def expand_later(self, storage):
path = self.path
if '{latest}' in path:
upstream_path = path.split('{latest}')[0]
paths = FS_Ops_Dispatcher().listdir(upstream_path, storage)
latest_date = max(paths)
path = path.format(latest=latest_date)
return path
def expand_now(self, now_dt):
path = self.path
if '{now}' in path:
current_time = now_dt.strftime('date%Y%m%d_time%H%M%S_utc')
path = path.format(now=current_time)
return path
def get_base(self):
if '{latest}' in self.path:
return self.path.split('{latest}')[0]
elif '{now}' in self.path:
return self.path.split('{now}')[0]
else:
return self.path
class Commandliner():
def __init__(self, Job, **job_args):
parser, defaults_args = self.define_commandline_args()
cmd_args = self.set_commandline_args(parser)
# Building "job", which will include all job args.
if Job is None: # when job run from "python launcher.py --job_name=some_name_from_job_metadata_file"
jargs = Job_Args_Parser(defaults_args=defaults_args, yml_args=None, job_args=job_args, cmd_args=cmd_args, loaded_inputs={})
Job = get_job_class(jargs.py_job)
job = Job(jargs=jargs)
else: # when job run from "python some_job.py"
job = Job(pre_jargs={'defaults_args':defaults_args, 'job_args': job_args, 'cmd_args':cmd_args}) # can provide jargs directly here since job_file (and so job_name) needs to be extracted from job first. So, letting job build jargs.
# Executing or deploying
if job.jargs.deploy in ('none'): # when executing job code
self.launch_run_mode(job)
elif job.jargs.deploy in ('EMR', 'EMR_Scheduled', 'code'): # when deploying to AWS for execution there
self.launch_deploy_mode(job.jargs.get_deploy_args(), job.jargs.get_app_args())
@staticmethod
def set_commandline_args(parser):
"""Command line arguments take precedence over function ones."""
cmd_args, cmd_unknown_args = parser.parse_known_args()
cmd_args = {key: value for (key, value) in cmd_args.__dict__.items() if value is not None}
cmd_unknown_args = dict([item[2:].split('=') for item in cmd_unknown_args]) # imposes for unknown args to be defined with '=' and to start with '--'
cmd_args.update(cmd_unknown_args)
return cmd_args
@staticmethod
def define_commandline_args():
# Defined here separatly for overridability.
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--deploy", choices=set(['none', 'EMR', 'EMR_Scheduled', 'EMR_DataPipeTest', 'code']), help="Choose where to run the job.")
parser.add_argument("-m", "--mode", choices=set(['dev_local', 'dev_EMR', 'prod_EMR']), help="Choose which set of params to use from jobs_metadata.yml file.")
parser.add_argument("-j", "--job_param_file", help="Identify file to use. It can be set to 'False' to not load any file and provide all parameters through job or command line arguments.")
parser.add_argument("-n", "--job_name", help="Identify registry job to use.")
parser.add_argument("-q", "--sql_file", help="Path to an sql file to execute.")
parser.add_argument("--connection_file", help="Identify file to use. Default to repo one.")
parser.add_argument("--jobs_folder", help="Identify the folder where job code is. Necessary if job code is outside the repo, i.e. if this is used as an external library. By default, uses the repo 'jobs/' folder.")
parser.add_argument("-s", "--storage", choices=set(['local', 's3']), help="Choose 'local' (default) or 's3'.")
parser.add_argument("-x", "--dependencies", action='store_true', help="Run the job dependencies and then the job itself")
parser.add_argument("-c", "--rerun_criteria", choices=set(['last_date', 'output_empty', 'both']), help="Choose criteria to rerun the next increment or not. 'last_date' usefull if we know data goes to a certain date. 'output_empty' not to be used if increment may be empty but later ones not. Only relevant for incremental job.")
parser.add_argument("--chain_dependencies", action='store_true', help="Run dependant jobs in a chained way, i.e. passing output to next step without dropping to disk. Only useful if ran with dependencies (-x) and requires output to be dataframes.")
parser.add_argument("-l", "--load_connectors", choices=set(['all', 'none']), help="Load java packages to enable spark connectors (s3, redshift, mysql). Set to 'none' to have faster spark start time and smaller log when connectors are not necessary. Only useful when mode=dev_local.")
parser.add_argument("-t", "--output.type", choices=set(['csv', 'parquet']), help="Override output type. Useful for development. Can be ignored otherwise.")
# Deploy specific
parser.add_argument("--aws_config_file", help="Identify file to use. Default to repo one.")
parser.add_argument("-a", "--aws_setup", help="Choose aws setup from conf/aws_config.cfg, typically 'prod' or 'dev'. Only relevant if choosing to deploy to a cluster.")
parser.add_argument("-o", "--leave_on", action='store_true', help="Use arg to not terminate cluster after running the job. Mostly for testing. Only relevant when creating a new cluster when deploy=EMR.")
parser.add_argument("-p", "--push_secrets", action='store_true', help="Pushing secrets to cluster. Only relevant if choosing to deploy to a cluster.")
# --inputs and --output args can be set from job or commandline too, just not set here.
defaults = {
'deploy': 'none',
'mode': 'dev_local',
'job_param_file': JOBS_METADATA_FILE,
'job_name': None,
'sql_file': None,
'connection_file': CONNECTION_FILE,
'jobs_folder': JOB_FOLDER,
'storage': 'local',
# 'dependencies': False, # only set from commandline
'rerun_criteria': 'last_date',
# 'chain_dependencies': False, # only set from commandline
'load_connectors': 'all',
# 'output.type': 'csv', # skipped on purpose to avoid setting it if not set in cmd line.
#-- Deploy specific below --
'aws_config_file': AWS_CONFIG_FILE,
'aws_setup': 'dev',
'code_source': 'lib', # Other options: 'repo'
# 'leave_on': False, # only set from commandline
# 'push_secrets': False, # only set from commandline
#-- Not added in command line args:
'enable_redshift_push': True,
'base_path': '',
'save_schemas': False,
'manage_git_info': False,
'add_created_at': 'true', # set as string to be overrideable in cmdline.
'no_fw_cache': False,
}
return parser, defaults
def launch_run_mode(self, job):
app_name = job.jargs.job_name
sc, sc_sql = self.create_contexts(app_name, job.jargs) # TODO: set spark_version default upstream, remove it from here and from deploy.py.
if not job.jargs.dependencies:
job.etl(sc, sc_sql)
else:
Flow(job.jargs, app_name).run_pipeline(sc, sc_sql)
def launch_deploy_mode(self, deploy_args, app_args):
# Load deploy lib here instead of at module level to remove dependency on it when running code locally
from yaetos.deploy import DeployPySparkScriptOnAws
DeployPySparkScriptOnAws(deploy_args, app_args).run()
def create_contexts(self, app_name, jargs):
# Load spark here instead of at module level to remove dependency on spark when only deploying code to aws.
from pyspark.sql import SQLContext
from pyspark.sql import SparkSession
from pyspark import SparkConf
conf = SparkConf()
# TODO: move spark-submit params here since it is more generic than in spark submit, params like "spark.driver.memoryOverhead" cause pb in spark submit.
if jargs.merged_args.get('driver-memoryOverhead'): # For extra overhead for python in driver (typically pandas)
conf = conf.set("spark.driver.memoryOverhead", jargs.merged_args['driver-memoryOverhead'])
if jargs.mode == 'dev_local' and jargs.load_connectors == 'all':
# Env vars for S3 access
credentials = boto3.Session(profile_name='default').get_credentials()
os.environ['AWS_ACCESS_KEY_ID'] = credentials.access_key
os.environ['AWS_SECRET_ACCESS_KEY'] = credentials.secret_key
# JARs
package = PACKAGES_LOCAL if jargs.merged_args.get('spark_version', '2.4') == '2.4' else PACKAGES_LOCAL_ALT
package_str = ','.join(package)
conf = conf \
.set("spark.jars.packages", package_str) \
.set("spark.jars", JARS)
# Setup above not needed when running from EMR where setup done in spark-submit.
if jargs.merged_args.get('emr_core_instances') == 0:
conf = conf \
.set("spark.hadoop.fs.s3a.buffer.dir", '/tmp') \
.set("spark.hadoop.fs.s3a.fast.upload.active.blocks", '1')
spark = SparkSession.builder \
.appName(app_name) \
.config(conf=conf) \
.getOrCreate()
sc = spark.sparkContext
sc_sql = SQLContext(sc)
logger.info('Spark Config: {}'.format(sc.getConf().getAll()))
return sc, sc_sql
class Flow():
def __init__(self, launch_jargs, app_name):
self.app_name = app_name
df = self.create_connections_jobs(launch_jargs.storage, launch_jargs.merged_args)
logger.debug('Flow app_name : {}, connection_table: {}'.format(app_name, df))
graph = self.create_global_graph(df) # top to bottom
tree = self.create_local_tree(graph, nx.DiGraph(), app_name) # bottom to top
self.leafs = self.get_leafs(tree, leafs=[]) # bottom to top
logger.info('Sequence of jobs to be run: {}'.format(self.leafs))
logger.info('-'*80)
logger.info('-')
launch_jargs.cmd_args.pop('job_name', None) # removing since it should be pulled from yml and not be overriden by cmd_args.
launch_jargs.job_args.pop('job_name', None) # same
self.launch_jargs = launch_jargs
def run_pipeline(self, sc, sc_sql):
"""Load all job classes and run them"""
df = {}
for job_name in self.leafs:
logger.info('About to run job_name: {}'.format(job_name))
# Get yml
yml_args = Job_Yml_Parser(job_name, self.launch_jargs.job_param_file, self.launch_jargs.mode).yml_args
# Get loaded_inputs
loaded_inputs = {}
if self.launch_jargs.chain_dependencies:
if yml_args.get('inputs', 'no input') == 'no input':
raise Exception("Pb with loading job_yml or finding 'inputs' parameter in it, so 'chain_dependencies' argument not useable in this case.")
for in_name, in_properties in yml_args['inputs'].items():
if in_properties.get('from'):
loaded_inputs[in_name] = df[in_properties['from']]
# Get jargs
jargs = Job_Args_Parser(self.launch_jargs.defaults_args, yml_args, self.launch_jargs.job_args, self.launch_jargs.cmd_args, loaded_inputs=loaded_inputs)
Job = get_job_class(yml_args['py_job'])
job = Job(jargs=jargs, loaded_inputs=loaded_inputs)
df[job_name] = job.etl(sc, sc_sql) # at this point df[job_name] is unpersisted. TODO: keep it persisted.
if not self.launch_jargs.chain_dependencies:
df[job_name].unpersist()
del df[job_name]
gc.collect()
logger.info('-'*80)
logger.info('-')
@staticmethod
def create_connections_jobs(storage, args):
yml = Job_Yml_Parser.load_meta(args['job_param_file'])
connections = []
for job_name, job_meta in yml['jobs'].items():
dependencies = job_meta.get('dependencies') or []
for dependency in dependencies:
row = {'source_job': dependency, 'destination_job': job_name}
connections.append(row)
return pd.DataFrame(connections)
@staticmethod
def create_global_graph(df):
""" Directed Graph from source to target. df must contain 'source_dataset' and 'target_dataset'.
All other fields are attributed to target."""
DG = nx.DiGraph()
for ii, item in df.iterrows():
item = item.to_dict()
source_dataset = item.pop('source_job')
target_dataset = item.pop('destination_job')
item.update({'name':target_dataset})
DG.add_edge(source_dataset, target_dataset)
DG.add_node(source_dataset, name=source_dataset) # (source_dataset, **{'name':source_dataset})
DG.add_node(target_dataset, **item)
return DG
def create_local_tree(self, DG, tree, ref_node):
""" Builds tree recursively. Uses graph data structure but enforces tree to simplify downstream."""
nodes = DG.predecessors(ref_node)
tree.add_node(ref_node, name=DG.nodes[ref_node])
for item in nodes:
if not tree.has_node(item):
tree.add_edge(ref_node, item)
tree.add_node(item, name=DG.nodes[item])
self.create_local_tree(DG, tree, item)
return tree
def get_leafs(self, tree, leafs):
"""Recursive function to extract all leafs in order out of tree.
Each pass, jobs are moved from "tree" to "leafs" variables until done.
"""
cur_leafs = [node for node in tree.nodes() if tree.in_degree(node)!=0 and tree.out_degree(node)==0]
leafs += cur_leafs
for leaf in cur_leafs:
tree.remove_node(leaf)
if len(tree.nodes()) >= 2:
self.get_leafs(tree, leafs)
return leafs + list(tree.nodes())
def get_job_class(py_job):
name_import = py_job.replace('/','.').replace('.py','')
import_cmd = "from {} import Job".format(name_import)
namespace = {}
exec(import_cmd, namespace)
return namespace['Job']
def send_email(message, receiver_email, sender_email, password, smtp_server, port):
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.starttls(context=context)
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
| 1.835938
| 2
|
qubellclient/private/application.py
|
dmakhno/contrib-python-qubell-client
| 0
|
12782084
|
# Copyright (c) 2013 Qubell Inc., http://qubell.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, Qubell.com"
__license__ = "Apache"
__version__ = "1.0.1"
__email__ = "<EMAIL>"
import logging as log
import requests
import simplejson as json
from qubellclient.private.organization import Organization
import qubellclient.tools as tools
class Application(Organization):
"""
Base class for applications. It should create application and services+environment requested
"""
rawResponse = None
def __parse(self, values):
ret = {}
for val in values:
ret[val['id']] = val['value']
return ret
def __init__(self, context, id=None, manifest=None, name=None):
self.context = context
self.name = name or "test-app-"+tools.rand()
self.manifest = manifest
# Create application
if not id:
newapp = self._create()
assert newapp
self.applicationId = newapp['id']
# Use existing app
else:
self.applicationId = id
self.context.applicationId = self.applicationId
def _create(self):
log.info("Creating application: %s" % self.name)
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications.json'
resp = requests.post(url, files={'path': self.manifest.content}, data={'manifestSource': 'upload', 'name': self.name}, verify=False, cookies=self.context.cookies)
log.debug(resp.text)
if resp.status_code == 200:
return resp.json()
else:
return False
def delete(self):
log.info("Removing application: %s" % self.name)
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'.json'
resp = requests.delete(url, verify=False, cookies=self.context.cookies)
log.debug(resp.text)
if resp.status_code == 200:
return True
else:
return False
def clean(self):
instances = self.instances
import instance
if instances:
for ins in instances:
obj = instance.Instance(context=self.context, id=ins['id'])
st = obj.status
if st not in ['Destroyed', 'Destroying', 'Launching', 'Executing']: # Tests could fail and we can get any statye here
log.info("Destroying instance %s" % obj.name)
obj.delete()
assert obj.destroyed(timeout=10)
revisions = self.revisions
import revision
if revisions:
for rev in revisions:
obj = revision.Revision(context=self.context, id=rev['id'])
obj.delete()
return True
def json(self, key=None):
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'.json'
resp = requests.get(url, cookies=self.context.cookies, data="{}", verify=False)
log.debug(resp.text)
self.rawRespose = resp
if resp.status_code == 200:
# return same way public api does
# if key and (key in ['instances', 'environments']):
# return self.__parse(resp.json()[key])
# else:
# return resp.json()[key]
return resp.json()
else:
return None
def __getattr__(self, key):
resp = self.json()
return resp[key] or False
def upload(self, manifest):
log.info("Uploading manifest")
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/manifests.json'
resp = requests.post(url, files={'path': manifest.content}, data={'manifestSource': 'upload', 'name': self.name}, verify=False, cookies=self.context.cookies)
log.debug(resp.text)
self.rawResponse = resp
if resp.status_code == 200:
self.manifest = manifest
return resp.json()
else:
log.error('Cannot upload manifest: %s' % resp.content)
return False
def launch(self, **argv):
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/launch.json'
headers = {'Content-Type': 'application/json'}
data = json.dumps(argv)
resp = requests.post(url, cookies=self.context.cookies, data=data, verify=False, headers=headers)
log.debug(resp.text)
self.rawResponse = resp
if resp.status_code == 200:
instance_id = resp.json()['id']
from qubellclient.private.instance import Instance
return Instance(context=self.context, id=instance_id)
else:
log.error('Unable to launch instance: %s' % resp.content)
return False
def revisionCreate(self, name, instance, parameters=[], version=None):
if not version:
version=self.getManifest()['version']
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/revisions.json'
headers = {'Content-Type': 'application/json'}
payload = json.dumps({ 'name': name,
'parameters': parameters,
'submoduleRevisions': {},
'returnValues': [],
'applicationId': self.context.applicationId,
'applicationName': "api",
'version': version,
'instanceId': instance.instanceId})
resp = requests.post(url, cookies=self.context.cookies, data=payload, verify=False, headers=headers)
log.debug(resp.text)
self.rawRespose = resp
if resp.status_code==200:
import revision
return revision.Revision(context=self.context, name=name, id=resp.json()['id'])
else:
return False
def getManifest(self):
url = self.context.api+'/organizations/'+self.context.organizationId+'/applications/'+self.applicationId+'/refreshManifest.json'
headers = {'Content-Type': 'application/json'}
payload = json.dumps({})
resp = requests.post(url, cookies=self.context.cookies, data=payload, verify=False, headers=headers)
log.debug(resp.text)
self.rawRespose = resp
if resp.status_code == 200:
return resp.json()
else:
return False
| 2.03125
| 2
|
users/migrations/0014_auto_20200313_0548.py
|
RaghibMrz/webapp-testing
| 2
|
12782085
|
<filename>users/migrations/0014_auto_20200313_0548.py
# Generated by Django 3.0.1 on 2020-03-13 05:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0013_auto_20200313_0546'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='gotAccount',
field=models.CharField(default='0', max_length=1),
),
]
| 1.351563
| 1
|
Regex/function-compile.py
|
SpenceGuo/py3-learning
| 1
|
12782086
|
<filename>Regex/function-compile.py<gh_stars>1-10
"""
compile 函数
compile 函数用于编译正则表达式,生成一个正则表达式( Pattern )对象,供 match() 和 search() 这两个函数使用。
语法格式为:
re.compile(pattern[, flags])
参数:
pattern : 一个字符串形式的正则表达式
flags 可选,表示匹配模式,比如忽略大小写,多行模式等,具体参数为:
re.I 忽略大小写
re.L 表示特殊字符集 \w, \W, \b, \B, \s, \S 依赖于当前环境
re.M 多行模式
re.S 即为' . '并且包括换行符在内的任意字符(' . '不包括换行符)
re.U 表示特殊字符集 \w, \W, \b, \B, \d, \D, \s, \S 依赖于 Unicode 字符属性数据库
re.X 为了增加可读性,忽略空格和' # '后面的注释
"""
# 实例
# >>>import re
# >>> pattern = re.compile(r'\d+') # 用于匹配至少一个数字
# >>> m = pattern.match('one12twothree34four') # 查找头部,没有匹配
# >>> print( m )
# None
# >>> m = pattern.match('one12twothree34four', 2, 10) # 从'e'的位置开始匹配,没有匹配
# >>> print( m )
# None
# >>> m = pattern.match('one12twothree34four', 3, 10) # 从'1'的位置开始匹配,正好匹配
# >>> print( m ) # 返回一个 Match 对象
# <_sre.SRE_Match object at 0x10a42aac0>
# >>> m.group(0) # 可省略 0
# '12'
# >>> m.start(0) # 可省略 0
# 3
# >>> m.end(0) # 可省略 0
# 5
# >>> m.span(0) # 可省略 0
# (3, 5)
"""
在上面,当匹配成功时返回一个 Match 对象,其中:
group([group1, …]) 方法用于获得一个或多个分组匹配的字符串,当要获得整个匹配的子串时,可直接使用 group() 或 group(0);
start([group]) 方法用于获取分组匹配的子串在整个字符串中的起始位置(子串第一个字符的索引),参数默认值为 0;
end([group]) 方法用于获取分组匹配的子串在整个字符串中的结束位置(子串最后一个字符的索引+1),参数默认值为 0;
span([group]) 方法返回 (start(group), end(group))。
"""
import re
pattern = re.compile(r'\d+') # 用于匹配至少一个数字
m = pattern.match('one12twothree34four') # 查找头部,没有匹配
print(m)
m = pattern.match('one12twothree34four', 2, 10) # 从'e'的位置开始匹配,没有匹配
print(m)
m = pattern.match('one12twothree34four', 3, 10) # 从'1'的位置开始匹配,正好匹配
print(m) # 返回一个 Match 对象
# >>> m.group(0) # 可省略 0
# '12'
# >>> m.start(0) # 可省略 0
# 3
# >>> m.end(0) # 可省略 0
# 5
# >>> m.span(0) # 可省略 0
# (3, 5)
# 在上面,当匹配成功时返回一个 Match 对象,其中:
#
# group([group1, …]) 方法用于获得一个或多个分组匹配的字符串,当要获得整个匹配的子串时,可直接使用 group() 或 group(0);
# start([group]) 方法用于获取分组匹配的子串在整个字符串中的起始位置(子串第一个字符的索引),参数默认值为 0;
# end([group]) 方法用于获取分组匹配的子串在整个字符串中的结束位置(子串最后一个字符的索引+1),参数默认值为 0;
# span([group]) 方法返回 (start(group), end(group))。
# 再看看一个例子:
#
# 实例
# >>>import re
# >>> pattern = re.compile(r'([a-z]+) ([a-z]+)', re.I) # re.I 表示忽略大小写
# >>> m = pattern.match('Hello World Wide Web')
# >>> print( m ) # 匹配成功,返回一个 Match 对象
# <_sre.SRE_Match object at 0x10bea83e8>
# >>> m.group(0) # 返回匹配成功的整个子串
# 'Hello World'
# >>> m.span(0) # 返回匹配成功的整个子串的索引
# (0, 11)
# >>> m.group(1) # 返回第一个分组匹配成功的子串
# 'Hello'
# >>> m.span(1) # 返回第一个分组匹配成功的子串的索引
# (0, 5)
# >>> m.group(2) # 返回第二个分组匹配成功的子串
# 'World'
# >>> m.span(2) # 返回第二个分组匹配成功的子串索引
# (6, 11)
# >>> m.groups() # 等价于 (m.group(1), m.group(2), ...)
# ('Hello', 'World')
# >>> m.group(3) # 不存在第三个分组
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: no such group
# findall
# 在字符串中找到正则表达式所匹配的所有子串,并返回一个列表,如果没有找到匹配的,则返回空列表。
#
# 注意: match 和 search 是匹配一次 findall 匹配所有。
#
# 语法格式为:
#
# re.findall(pattern, string, flags=0)
# 或
# pattern.findall(string[, pos[, endpos]])
# 参数:
#
# pattern 匹配模式。
# string 待匹配的字符串。
# pos 可选参数,指定字符串的起始位置,默认为 0。
# endpos 可选参数,指定字符串的结束位置,默认为字符串的长度。
"""
查找字符串中的所有数字:
实例
import re
result1 = re.findall(r'\d+','runoob 123 google 456')
pattern = re.compile(r'\d+') # 查找数字
result2 = pattern.findall('runoob 123 google 456')
result3 = pattern.findall('run88oob123google456', 0, 10)
print(result1)
print(result2)
print(result3)
输出结果:
['123', '456']
['123', '456']
['88', '12']
"""
"""
re.finditer
和 findall 类似,在字符串中找到正则表达式所匹配的所有子串,并把它们作为一个迭代器返回。
re.finditer(pattern, string, flags=0)
参数:
参数 描述
pattern 匹配的正则表达式
string 要匹配的字符串。
flags 标志位,用于控制正则表达式的匹配方式,如:是否区分大小写,多行匹配等等。
实例
import re
it = re.finditer(r"\d+", "12a32bc43jf3")
for match in it:
print (match.group() )
输出结果:
12
32
43
3
"""
# ---------------------------------------------------------
"""
re.split
split 方法按照能够匹配的子串将字符串分割后返回列表,它的使用形式如下:
re.split(pattern, string[, maxsplit=0, flags=0])
参数:
参数 描述
pattern 匹配的正则表达式
string 要匹配的字符串。
maxsplit 分隔次数,maxsplit=1 分隔一次,默认为 0,不限制次数。
flags 标志位,用于控制正则表达式的匹配方式,如:是否区分大小写,多行匹配等等。参见:正则表达式修饰符 - 可选标志
实例
>>>import re
>>> re.split('\W+', 'runoob, runoob, runoob.')
['runoob', 'runoob', 'runoob', '']
>>> re.split('(\W+)', ' runoob, runoob, runoob.')
['', ' ', 'runoob', ', ', 'runoob', ', ', 'runoob', '.', '']
>>> re.split('\W+', ' runoob, runoob, runoob.', 1)
['', 'runoob, runoob, runoob.']
>>> re.split('a*', 'hello world') # 对于一个找不到匹配的字符串而言,split 不会对其作出分割
['hello world']
"""
| 3.015625
| 3
|
src/tuplenet/lcp/commit_ovs.py
|
darcy-xiao/TupleNet
| 1
|
12782087
|
<gh_stars>1-10
import sys
import json
import subprocess
import logging
import threading
import struct, socket
import logicalview as lgview
from pyDatalog import pyDatalog
from onexit import on_parent_exit
from run_env import is_gateway_chassis
logger = logging.getLogger(__name__)
flow_lock = threading.Lock()
class OVSToolErr(Exception):
pass
def call_popen(cmd, commu=None, shell=False):
child = subprocess.Popen(cmd, shell, stdout=subprocess.PIPE,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
if commu is None:
output = child.communicate()
else:
output = child.communicate(commu)
if child.returncode:
raise RuntimeError("error executing %s" % (cmd))
if len(output) == 0 or output[0] is None:
output = ""
else:
output = output[0].decode("utf8").strip()
return output
def call_ovsprog(prog, args_list, commu=None):
cmd = [prog, "--timeout=5"] + args_list
retry_n = 0
while True:
try:
return call_popen(cmd, commu)
except Exception as err:
retry_n += 1
if retry_n == 3:
raise err
continue
def ovs_ofctl(*args):
return call_ovsprog("ovs-ofctl", list(args))
def aggregate_flows(flows):
length = 500
flow_seg = [flows[x : x + length] for x in range(0, len(flows), length)]
for seg in flow_seg:
yield '\n'.join(seg)
def ovs_ofctl_delflows_batch(br, flows):
for flow_combind in aggregate_flows(flows):
call_ovsprog("ovs-ofctl", ['del-flows', br, '--strict', '-'],
commu=flow_combind)
def ovs_ofctl_addflows_batch(br, flows):
for flow_combind in aggregate_flows(flows):
call_ovsprog("ovs-ofctl", ['add-flow', br, '-'], commu=flow_combind)
def ovs_vsctl(*args):
return call_ovsprog("ovs-vsctl", list(args))
def parse_map(map_list):
ret_map = {}
if map_list[0] != 'map':
return None
for entry in map_list[1]:
ret_map[entry[0]] = entry[1]
return ret_map
def update_ovsport(record, entity_zoo):
action_type = record[1]
# insert action does not contain ofport, ignore it.
if action_type == 'insert':
return
elif action_type in ['new', 'initial', 'delete', 'old']:
# some operations may not contain some essential fields
if record[3] == None or record[4] == None:
logger.info('action %s does not container enough info, msg:%s',
action_type, record)
return
else:
logger.warning("unknow action_type:%s", action_type)
return
logger.info("ovsport action type:%s", action_type)
ofport = record[2]
name = record[3]
external_ids = record[4]
external_ids = parse_map(external_ids)
if external_ids.has_key('iface-id'):
uuid = external_ids['iface-id']
is_remote = False
entity_type = lgview.LOGICAL_ENTITY_TYPE_OVSPORT
elif external_ids.has_key('chassis-id'):
uuid = external_ids['chassis-id']
is_remote = True
entity_type = lgview.LOGICAL_ENTITY_TYPE_OVSPORT_CHASSIS
else:
logger.info('external_ids has no chassis-id or iface-id, record:%s',
record)
return
if action_type in ['old', 'delete']:
logger.info("try to move port to sink %s uuid:%s", name, uuid)
entity_zoo.move_entity2sink(entity_type, name)
else:
logger.info("try to add ovsport entity %s ofport:%d, uuid:%s in zoo",
name, ofport, uuid)
entity_zoo.add_entity(entity_type, name, uuid, ofport, is_remote)
return
def monitor_ovsdb(entity_zoo, extra):
pyDatalog.Logic(extra['logic'])
cmd = ['ovsdb-client', 'monitor', 'Interface',
'ofport', 'name', 'external_ids', '--format=json']
logger.info("start ovsdb-client instance")
try:
child = subprocess.Popen(cmd, stdout=subprocess.PIPE,
preexec_fn=on_parent_exit('SIGTERM'))
with extra['lock']:
extra['ovsdb-client'] = child
logger.info("monitoring the ovsdb")
while child.poll() == None:
json_str = child.stdout.readline().strip()
output = json.loads(json_str)
with entity_zoo.lock:
for record in output['data']:
update_ovsport(record, entity_zoo)
except ValueError as err:
if json_str != "":
logger.warning("cannot parse %s to json object", json_str)
else:
logger.info('json_str is empty, maybe we should exit')
subprocess.Popen.kill(child)
return
except Exception as err:
logger.exception("exit ovsdb-client monitor, err:%s", str(err))
subprocess.Popen.kill(child)
return
def start_monitor_ovsdb(entity_zoo, extra):
t = threading.Thread(target = monitor_ovsdb,
args=(entity_zoo, extra))
t.setDaemon(True)
t.start()
return t
def clean_ovs_flows(br = 'br-int'):
try:
ovs_ofctl("del-flows", br)
except Exception as err:
logger.error("failed to clean bridge %s flows", br)
raise OVSToolErr("failed to clean ovs flows")
def system_id():
cmd = ['ovsdb-client', '-v', 'transact',
'["Open_vSwitch",{"op":"select", \
"table":"Open_vSwitch","columns":["external_ids"], \
"where":[]}]']
try:
json_str = call_popen(cmd, shell=True)
except Exception:
logger.error('failed to get system-id')
return
output = json.loads(json_str)[0]
external_ids = parse_map(output['rows'][0]['external_ids'])
if external_ids.has_key('system-id'):
return external_ids['system-id']
def remove_all_tunnles():
cmd = ['ovsdb-client', '-v', 'transact',
'["Open_vSwitch",{"op":"select", \
"table":"Interface","columns":["name"], \
"where":[["type","==","geneve"]]}]']
try:
json_str = call_popen(cmd, shell=True)
except Exception:
logger.error("failed to get geneve tunnels")
raise OVSToolErr("failed to get geneve tunnels")
try:
output = json.loads(json_str)[0]
tunnel_name_list = output['rows']
for entry in tunnel_name_list:
name = entry['name']
remove_tunnel_by_name(name)
except Exception as err:
logger.error("failed to remove tunnel, err:%s", err)
raise OVSToolErr("failed to remove tunnel")
def remove_tunnel_by_name(portname):
try:
ovs_vsctl('get', 'interface', portname, 'name')
except Exception as err:
# cannot found this port, return immedately
logger.debug("port %s is not exist, no need to remove it", portname)
return
try:
ovs_vsctl('del-port', 'br-int', portname)
except Exception as err:
logger.info("cannot delete tunnel port:%s", err)
return
def get_tunnel_chassis_id(portname):
try:
return ovs_vsctl('get', 'interface', portname,
'external_ids:chassis-id').strip("\"")
except Exception as err:
return ""
def chassis_ip_to_portname(ip):
chassis_ip_int = struct.unpack("!L", socket.inet_aton(ip))[0]
portname = 'tupleNet-{}'.format(chassis_ip_int)
return portname
def remove_tunnel_by_ip(ip):
portname = chassis_ip_to_portname(ip)
remove_tunnel_by_name(portname)
def create_tunnel(ip, uuid):
portname = chassis_ip_to_portname(ip)
if get_tunnel_chassis_id(portname) == uuid:
logger.info("found a exist tunnel ovsport has "
"same chassis-id and portname, skip adding tunnel ovsport")
return
remove_tunnel_by_name(portname)
cmd = ['add-port', 'br-int', portname, '--', 'set', 'interface',
portname, 'type=geneve', 'options:remote_ip={}'.format(ip),
'options:key=flow', 'options:csum=true',
'external_ids:chassis-id={}'.format(uuid)]
try:
ovs_vsctl(*cmd)
except Exception as err:
logger.warning('cannot create tunnle, cmd:%s, err:%s', cmd, err)
# if this host is a gateway, then we should enable bfd for each tunnle port
if is_gateway_chassis():
logger.info("local host is gateway, enable bfd on %s", portname)
config_ovsport_bfd(portname, 'enable=true')
def create_flowbased_tunnel(chassis_id):
portname = "tupleNet-flowbased"
remove_tunnel_by_name(portname)
cmd = ['add-port', 'br-int', portname, '--', 'set', 'interface',
portname, 'type=geneve', 'options:remote_ip=flow',
'options:key=flow', 'options:csum=true',
'external_ids:chassis-id={}'.format(chassis_id)]
try:
ovs_vsctl(*cmd)
except Exception as err:
logger.warning('cannot create flow-based tunnle, cmd:%s, err:%s', cmd, err)
def create_patch_port(uuid, peer_bridge):
portname = 'patch-' + 'br-int' + peer_bridge + str(uuid)[0:8]
peername = 'patch-' + peer_bridge + 'br-int' + str(uuid)[0:8]
cmd = ['add-port', 'br-int', portname, '--', 'set', 'interface',
portname, 'type=patch',
'external_ids:iface-id={}'.format(uuid),
'options:peer={}'.format(peername)]
ovs_vsctl(*cmd)
cmd = ['add-port', peer_bridge, peername, '--', 'set', 'interface',
peername, 'type=patch',
'options:peer={}'.format(portname)]
ovs_vsctl(*cmd)
def commit_flows(add_flows, del_flows):
# consume batch method to insert/delete flows first.
# update ovs flow one by one if updateing flow hit issue.
with flow_lock:
try:
total_flow_n = len(del_flows) + len(add_flows)
if len(del_flows) > 0:
ovs_ofctl_delflows_batch('br-int', del_flows)
del_flows = []
if len(add_flows) > 0:
ovs_ofctl_addflows_batch('br-int', add_flows)
add_flows = []
return total_flow_n
except Exception as err:
logger.warn("failed to batch modify flows, "
"will try to update one by one, err:%s", err)
# insert/delete flow one by one once the batch processing hit error
cm_cnt = 0
for flow in del_flows:
try:
ovs_ofctl('del-flows', 'br-int', flow, '--strict')
except Exception as err:
logger.error('failed to delete flow(%s) at ovs, err:%s',
flow, err)
continue;
cm_cnt += 1
for flow in add_flows:
try:
ovs_ofctl('add-flow', 'br-int', flow)
except Exception as err:
logger.error('failed to add flow(%s) at ovs, err:%s',
flow, err)
continue;
cm_cnt += 1
return cm_cnt;
def build_br_integration(br = 'br-int'):
try:
ovs_vsctl('br-exists', br)
logger.info("the bridge %s is exist", br)
# if we hit no issue, then it means the bridge is exist
return
except Exception as err:
logger.info("the bridge %s is not exist, try to create a new one", br)
try:
ovs_vsctl('add-br', br)
logger.info("create bridge %s for integration", br)
except Exception as err:
logger.error("failed to create %s", br)
raise OVSToolErr("failed to create integration bridge")
def set_tunnel_tlv(vipclass = 0xffee, br = 'br-int'):
try:
ovs_ofctl('del-tlv-map', br)
except Exception as err:
logger.error("failed to clean %s tlv, err:%s", br, err)
raise OVSToolErr("failed to clean tlv")
tlv = "{{class={},type=0,len=8}}->tun_metadata0".format(vipclass)
try:
ovs_ofctl('add-tlv-map', br, tlv)
except Exception as err:
logger.error('failed to config tlv %s to %s, err:%s', tlv, br, err)
raise OVSToolErr("failed to config tlv")
def set_upcall_rate(br = 'br-int', rate = 100):
#TODO we need to limite the packet rate of upcalling to packet_controller
pass
def config_ovsport_bfd(portname, config):
try:
ovs_vsctl('set', 'Interface', portname, 'bfd:{}'.format(config))
except Exception as err:
logger.info("failed to config %s bfd to %s, "
"port may not exist, err:%s",
portname, config, err)
def inject_pkt_to_ovsport(cmd_id, packet_data, ofport):
try:
ovs_ofctl('packet-out', 'br-int', 'NONE',
'load:{}->NXM_OF_IN_PORT[],'
'load:{}->NXM_NX_REG10[16..31],'
'load:1->NXM_NX_REG10[1],resubmit(,0)'.format(ofport, cmd_id),
packet_data)
except Exception as err:
logger.warning("failed to inject packet %s to ofport %d",
packet_data, ofport)
| 2
| 2
|
sc_config/config.py
|
Scott-Lau/sc-conf
| 1
|
12782088
|
"""Configurations
Copyright (c) 2021 <NAME>
"""
# The MIT License (MIT)
#
# Copyright (c) 2021. <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
from config42 import ConfigManager
class Config:
@staticmethod
def _get_cur_dir_config_file_path(environment):
"""Top priority configuration file
:param environment:
:return:
"""
filename = '{}.yml'.format(environment)
return os.path.join(os.getcwd(), filename)
@staticmethod
def _get_user_dir_config_file_path(project_name, environment):
"""Second priority configuration file
:param environment:
:return:
"""
config_directory = '{}'.format(project_name)
filename = '{}/{}.yml'.format(config_directory, environment)
return os.path.join(os.path.expanduser('~'), filename)
@staticmethod
def _get_config_file_path(project_name, environment):
"""Third priority configuration file
:param environment:
:return:
"""
config_directory = '{}'.format(project_name)
filename = '{}/{}.yml'.format(config_directory, environment)
return os.path.join('/var/opt/sc', filename)
@staticmethod
def create(*, project_name, encoding='utf-8', environment=None, defaults=None):
if defaults is None:
defaults = {}
# load defaults from home directory
config_file = Config._get_config_file_path(project_name, "default")
found_config_file = False
if os.path.exists(config_file):
logging.getLogger(__name__).info("loading default configurations from %s", config_file)
config = ConfigManager(path=config_file, encoding=encoding, defaults=defaults)
found_config_file = True
# load environment configurations from environment variables
# fix prefix to be SC
prefix = "SC"
env_config = ConfigManager(prefix=prefix)
key_env = "environment"
if environment is None:
environment = env_config.get(key_env)
if environment is None:
# use production configuration if not specified environment
environment = "production"
logging.getLogger(__name__).info("did not specify environment, using %s", environment)
else:
logging.getLogger(__name__).info("using environment: %s", environment)
# load environment configurations from /var/opt/sc directory
env_config_file = Config._get_config_file_path(project_name, environment)
if os.path.exists(env_config_file):
logging.getLogger(__name__).info("loading environmental configurations from %s", env_config_file)
if not found_config_file:
config = ConfigManager(path=env_config_file, encoding=encoding, defaults=defaults)
found_config_file = True
else:
config.set_many(ConfigManager(path=env_config_file, encoding=encoding).as_dict())
# load environment configurations from user directory
user_config_file = Config._get_user_dir_config_file_path(project_name, environment)
if os.path.exists(user_config_file):
logging.getLogger(__name__).info("loading user directory configurations from %s", user_config_file)
if not found_config_file:
config = ConfigManager(path=user_config_file, encoding=encoding, defaults=defaults)
found_config_file = True
else:
config.set_many(ConfigManager(path=user_config_file, encoding=encoding).as_dict())
# load environment configurations from current directory
current_dir_config_file = Config._get_cur_dir_config_file_path(environment)
if os.path.exists(current_dir_config_file):
logging.getLogger(__name__).info("loading current directory configurations from %s", current_dir_config_file)
logging.getLogger(__name__).info(f"found_config_file: {found_config_file}")
if not found_config_file:
config = ConfigManager(path=current_dir_config_file, encoding=encoding, defaults=defaults)
found_config_file = True
else:
config.set_many(ConfigManager(path=current_dir_config_file, encoding=encoding).as_dict())
if not found_config_file:
config = ConfigManager(defaults=defaults)
config.set_many(env_config.as_dict())
config.set(key_env, environment)
return config
| 2.15625
| 2
|
src/main.py
|
pqtrng/wine
| 0
|
12782089
|
import logging
from pathlib import Path
import hydra
from omegaconf import DictConfig
from omegaconf import OmegaConf
from src.evaluate import evaluate
from src.plot import plot_feature
from src.plot import plot_residual
from src.train import train
logger = logging.getLogger(__name__)
@hydra.main(config_path="../configs", config_name="default")
def main(cfg: DictConfig):
logger.info(OmegaConf.to_yaml(cfg=cfg))
path = Path(hydra.utils.get_original_cwd())
logger.info("Train model")
X_train, y_train, X_test, y_test, model = train(
current_path=path, data_config=cfg.data, model_config=cfg.model
)
logger.info("Evaluate model")
evaluate(
x_train=X_train,
y_train=y_train,
x_test=X_test,
y_test=y_test,
model=model,
)
logger.info("Plot features")
plot_feature(
model=model,
labels=X_train.columns,
image_config=cfg.visualization.image,
)
logger.info("Plot residual")
plot_residual(
x_test=X_test,
y_test=y_test,
model=model,
image_config=cfg.visualization.image,
)
if __name__ == "__main__":
main()
| 2.265625
| 2
|
ifirma/yaml_parser.py
|
pnowosie/firma-api
| 0
|
12782090
|
<filename>ifirma/yaml_parser.py
from datetime import date
from yaml import load, FullLoader
from ifirma.invoice import Invoice, InvoicePosition
today = date.today()
DEFAULT_EMAIL_MESSAGE = (
f"W załączeniu przesyłam fakturę za {today.year}/{today.month:02}.\nPozdrawiam :)"
)
def parse(str):
doc = load(str, Loader=FullLoader)
invoice = Invoice(
issue_date=doc.get("issue_date"),
issuer=doc.get("issuer"),
place=doc.get("issue_place"),
).with_known_customer(
doc["known_customer"]["name"], doc["known_customer"]["tax_id"]
)
if doc.get("ban"):
invoice.bank_account_no = doc["ban"]
if doc.get("comments"):
invoice.comments = doc["comments"].strip()
invoice.positions = list(
map(
lambda pos: InvoicePosition(
pos["name"], pos["amount"], flat_rate=pos.get("flat_rate")
),
doc["positions"],
)
)
task = dict(invoice=invoice)
if doc.get("send_to"):
task["send_to"] = doc["send_to"]
task["message"] = doc.get("message") or DEFAULT_EMAIL_MESSAGE
if doc.get("download"):
task["download"] = doc["download"]
return task
| 2.578125
| 3
|
sidekick-seq/sidekick/seq/iter.py
|
fabiommendes/sidekick
| 32
|
12782091
|
import itertools
import operator
from functools import wraps, cached_property
from .._utils import safe_repr
from ..functions import fn
from ..typing import Iterator, Tuple, T, TYPE_CHECKING
if TYPE_CHECKING:
from .. import api as sk # noqa: F401
NOT_GIVEN = object()
_iter = iter
class Iter(Iterator[T]):
"""
Base sidekick iterator class.
This class extends classical Python iterators with a few extra operators.
Sidekick iterators accepts slicing, indexing, concatenation (with the + sign)
repetition (with the * sign) and pretty printing.
Operations that return new iterators (e.g., slicing, concatenation, etc)
consume the data stream. Operations that simply peek at data execute the
generator (and thus may produce side-effects), but cache values and do not
consume data stream.
"""
__slots__ = ("_iterator", "_size_hint")
_iterator: Iterator[T]
if TYPE_CHECKING:
from .. import seq as _mod
_mod = _mod
else:
@cached_property
def _mod(self):
from .. import seq
return seq
def __new__(cls, iterator: Iterator[T], size_hint: int = None):
if isinstance(iterator, Iter):
return iterator
new = object.__new__(cls)
new._iterator = _iter(iterator)
new._size_hint = size_hint
return new
def __next__(self, _next=next):
return _next(self._iterator)
def __iter__(self):
return self._iterator
def __repr__(self):
it = self._iterator
head = []
for _ in range(7):
try:
head.append(next(it))
except StopIteration:
display = map(safe_repr, head)
self._iterator = _iter(head)
self._size_hint = len(head)
break
except Exception as ex:
ex_name = type(ex).__name__
display = [*map(safe_repr, head), f"... ({ex_name})"]
self._iterator = yield_and_raise(head, ex)
self._size_hint = len(head)
break
else:
self._iterator = itertools.chain(_iter(head), it)
display = [*map(safe_repr, head[:-1]), "..."]
data = ", ".join(display)
return f"sk.iter([{data}])"
def __getitem__(self, item, _chain=itertools.chain):
if isinstance(item, int):
if item >= 0:
head = []
for i, x in enumerate(self._iterator):
head.append(x)
if i == item:
self._iterator = _chain(head, self._iterator)
return x
else:
self._iterator = _iter(head)
self._size_hint = len(head)
raise IndexError(item)
else:
raise IndexError("negative indexes are not supported")
elif isinstance(item, slice):
a, b, c = item.start, item.step, item.stop
return Iter(itertools.islice(self._iterator, a, b, c))
elif callable(item):
return Iter(filter(item, self._iterator), self._size_hint)
elif isinstance(item, list):
if not item:
return []
if isinstance(item[0], bool):
self._iterator, data = itertools.tee(self._iterator, 2)
return [x for key, x in zip(item, data) if key]
elif isinstance(item[0], int):
self._iterator, data = itertools.tee(self._iterator, 2)
data = list(itertools.islice(data, max(item) + 1))
return [data[i] for i in item]
else:
raise TypeError("index must contain only integers or booleans")
else:
size = operator.length_hint(item, -1)
size = None if size == -1 else size
return Iter(compress_or_select(item, self._iterator), size)
def __add__(self, other, _chain=itertools.chain):
if hasattr(other, "__iter__"):
return Iter(_chain(self._iterator, other))
return NotImplemented
def __radd__(self, other, _chain=itertools.chain):
if hasattr(other, "__iter__"):
return Iter(_chain(other, self._iterator))
return NotImplemented
def __iadd__(self, other, _chain=itertools.chain):
self._iterator = _chain(self._iterator, other)
def __mul__(self, other):
if isinstance(other, int):
if other < 0:
raise ValueError("cannot multiply by negative integers")
return Iter(cycle_n(self._iterator, other))
try:
data = _iter(other)
except TypeError:
return NotImplemented
return Iter(itertools.product([self._iterator, data]))
def __rmul__(self, other):
if isinstance(other, int):
return self.__mul__(other)
try:
data = _iter(other)
except TypeError:
return NotImplemented
return Iter(itertools.product([data, self._iterator]))
def __rmatmul__(self, func):
if callable(func):
return Iter(map(func, self._iterator), self._size_hint)
return NotImplemented
def __length_hint__(self):
if self._size_hint is None:
return operator.length_hint(self._iterator)
return self._size_hint
#
# Conversion to collections
#
def list(self) -> list:
"""
Convert iterator to list consuming iterator.
Infinite operators do not terminate.
"""
return list(self)
def tuple(self) -> tuple:
"""
Convert iterator to tuple consuming iterator.
Infinite operators do not terminate.
"""
return tuple(self)
def set(self) -> set:
"""
Convert iterator to tuple consuming iterator.
Infinite operators do not terminate.
"""
return set(self)
def frozenset(self) -> frozenset:
"""
Convert iterator to tuple consuming iterator.
Infinite operators do not terminate.
"""
return frozenset(self)
def str(self) -> str:
"""
Convert iterator to string consuming iterator and concatenating
elements.
Infinite operators do not terminate.
"""
return "".join(self)
def bytes(self) -> str:
"""
Convert iterator to bytes consuming iterator and concatenating
elements.
Infinite operators do not terminate.
"""
return b"".join(self)
#
# API
#
def copy(self) -> "Iter":
"""
Return a copy of iterator. Consuming the copy do not consume the
original iterator.
Internally, this method uses itertools.tee to perform the copy. If you
known that the iterator will be consumed, it is faster and more memory
efficient to convert it to a list and produce multiple iterators.
"""
self._iterator, other = itertools.tee(self._iterator, 2)
return Iter(other, self._size_hint)
def tee(self, n=1) -> Tuple["Iter", ...]:
"""
Split iterator into n additional copies.
The copy method is simply an alias to iter.tee(1)[0]
"""
self._iterator, *rest = itertools.tee(self._iterator, n + 1)
n = self._size_hint
return tuple(Iter(it, n) for it in rest)
def peek(self, n: int) -> Tuple:
"""
Peek the first n elements without consuming the iterator.
"""
data = tuple(itertools.islice(self._iterator, n))
self._iterator = itertools.chain(data, self._iterator)
return data
#
# Wrapping the iterator API
#
def cycle_n(seq, n):
data = []
store = data.append
consumed = False
while n > 0:
if consumed:
yield from data
else:
for x in seq:
store(x)
yield x
if data:
consumed = True
else:
return
n -= 1
def compress(keys, seq):
for x, pred in zip(seq, keys):
if pred:
yield x
def select(keys, seq):
data = []
for i in keys:
try:
yield data[i]
except IndexError:
data.extend(itertools.islice(seq, i - len(data) + 1))
yield data[i]
def compress_or_select(keys, seq):
keys = _iter(keys)
seq = _iter(seq)
try:
key = next(keys)
if key is True:
func = compress
yield next(seq)
elif key is False:
func = compress
next(seq)
elif isinstance(key, int):
func = select
keys = itertools.chain([key], keys)
else:
raise TypeError(f"invalid key: {key!r}")
except StopIteration:
return
yield from func(keys, seq)
@fn
def generator(func):
"""
Decorates generator function to return a sidekick iterator instead of a
regular Python generator.
Examples:
>>> @sk.generator
... def fibonacci():
... x = y = 1
... while True:
... yield x
... x, y = y, x + y
>>> fibonacci()
sk.iter([1, 1, 2, 3, 5, 8, ...])
"""
@fn
@wraps(func)
def gen(*args, **kwargs):
return Iter(func(*args, **kwargs))
return gen
def stop(x=None):
"""
Raise StopIteration with the given argument.
"""
raise StopIteration(x)
def yield_and_raise(data, exc):
"""
Return content from data and then raise exception afterwards.
"""
yield from data
raise exc
fn.generator = staticmethod(generator)
| 2.78125
| 3
|
py-app/productiveware/widgets/main_window.py
|
productiveware-xhacks/productiveware
| 0
|
12782092
|
import requests
import webbrowser
from os.path import dirname, exists, join, realpath
from typing import List
from PySide6.QtCore import QTimer, Qt, Slot
from PySide6.QtGui import QIcon
from PySide6.QtWidgets import *
from win10toast import ToastNotifier
from pathlib import Path
from productiveware import encryption
from productiveware.client import base_url, check_cookie, get_headers
from productiveware.config import *
from productiveware.widgets.log import LogWidget
from productiveware.widgets.login import LoginWidget
todo_url = f'{base_url}/todo'
test_url = f'{base_url}/api/user'
icon_path = str(Path.cwd().joinpath("productiveware", "widgets", "res", "productiveware.ico"))
toaster = ToastNotifier()
class MainWidget(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('productiveware')
self.setWindowIcon(
QIcon(join(dirname(realpath(__file__)), 'res/productiveware.png')))
widget = QWidget()
layout = QGridLayout()
# Backend stuff
self.status = QStatusBar()
self.status_refresh = QPushButton('Refresh Connection')
self.sent_no_encrypt_message = False
self.set_connected(self._check_connection())
# Profile specific elements
self.pw_profile = QPushButton('View Todo List')
self.pw_logout = QPushButton('Log out')
# Directory list elements
self.dir_list = QListWidget()
self.dir_add = QPushButton('Add Directory')
self.dir_browse = QPushButton('Browse Directory...')
self.dir_remove = QPushButton('Remove Directory')
for path in get_target_folders():
self.dir_list.addItem(QListWidgetItem(path))
self.old_list = self._get_list_items()
# Encryption/decryption elements
self.decrypt_select = QPushButton('Decrypt file...')
self.decrypt_log = QPushButton('View encryption log...')
self.timer = QTimer()
self.delay = 5000
self.timer.timeout.connect(self.on_timer_timeout)
# Save state elements
self.save_list = QPushButton('Save')
self.save_list.setEnabled(False)
# Directory list events
self.dir_list.itemDoubleClicked.connect(
self.on_dir_list_double_clicked)
self.dir_list.currentItemChanged.connect(self.on_dir_list_item_changed)
# Button events
self.pw_profile.clicked.connect(self.on_pw_profile_clicked)
self.pw_logout.clicked.connect(self.on_pw_logout_clicked)
self.dir_add.clicked.connect(self.on_dir_add_clicked)
self.dir_browse.clicked.connect(self.on_dir_browse_clicked)
self.dir_remove.clicked.connect(self.on_dir_remove_clicked)
self.decrypt_select.clicked.connect(self.on_decrypt_select_clicked)
self.status_refresh.clicked.connect(self.on_status_refresh_clicked)
self.save_list.clicked.connect(self.on_save_list_clicked)
self.decrypt_log.clicked.connect(self.on_decrypt_log_clicked)
layout.addWidget(self.pw_profile, 0, 0, Qt.AlignLeft)
# layout.addWidget(QLabel('Targeted files: '), 0, 1)
# layout.addWidget(QLabel('Encrypted files: '), 0, 2)
layout.addWidget(self.pw_logout, 0, 3, Qt.AlignRight)
layout.addWidget(self.dir_list, 1, 0, 5, 3)
layout.addWidget(self.dir_add, 1, 3)
layout.addWidget(self.dir_browse, 2, 3)
layout.addWidget(self.dir_remove, 3, 3)
layout.addWidget(QLabel('Decryptions earned: '),
4, 3, Qt.AlignBottom)
layout.addWidget(self.decrypt_select, 5, 3)
layout.addWidget(self.status_refresh, 6, 0, Qt.AlignLeft)
layout.addWidget(self.save_list, 6, 2, Qt.AlignRight)
layout.addWidget(self.decrypt_log, 6, 3)
widget.setLayout(layout)
self.setCentralWidget(widget)
self.setStatusBar(self.status)
# Children widgets
self.window_log = LogWidget()
self.window_login = LoginWidget(self)
if not check_cookie():
self.window_login.setFixedSize(300, 150)
self.window_login.show()
else:
self.timer.start(self.delay)
self.resize(800, 500)
self.show()
@Slot()
def on_timer_timeout(self):
try:
response = requests.get(f"{base_url}/api/todos/overdue", headers=get_headers())
except requests.exceptions.ConnectionError:
return
if response.status_code == 200:
for todo in response.json()["todos"]:
if not todo["encrypted"]:
try:
path = encryption.encrypt_random_file()
except RuntimeError:
if not self.sent_no_encrypt_message:
toaster.show_toast("You missed a todo!", f"Since you missed the due date for your todo \"{todo['text']}\", we tried to encrypt one of your files. Lucky for you, we couldn't find anything to encrypt.", icon_path=icon_path, threaded=True)
self.sent_no_encrypt_message = True
else:
toaster.show_toast("You missed a todo!", f"Since you missed the due date for your todo \"{todo['text']}\", we encrypted this file: {path}", icon_path=icon_path, threaded=True)
requests.put(f"{base_url}/api/todos/encrypt", headers=get_headers(), json={"id": todo["_id"]})
self.timer.start(self.delay)
@Slot()
def on_pw_profile_clicked(self):
webbrowser.open(todo_url)
@Slot()
def on_pw_logout_clicked(self):
set_cookie(None)
self.hide()
self.window_login.show()
@Slot()
def on_dir_list_double_clicked(self, item: QListWidgetItem):
item.setFlags(item.flags() | Qt.ItemIsEditable)
@Slot()
def on_dir_list_item_changed(self, current: QListWidgetItem, prev: QListWidgetItem):
new_list = self._get_list_items()
if new_list != self.old_list:
self.save_list.setEnabled(True)
if prev is not None and prev.flags() & Qt.ItemIsEditable != 0:
prev.setFlags(prev.flags() ^ Qt.ItemIsEditable)
@Slot()
def on_dir_add_clicked(self):
self.dir_list.addItem(QListWidgetItem('Double click to edit...'))
@Slot()
def on_dir_browse_clicked(self):
browser = QFileDialog(self)
browser.setFileMode(QFileDialog.Directory)
if browser.exec():
self.dir_list.addItems(browser.selectedFiles())
self.save_list.setEnabled(True)
@Slot()
def on_dir_remove_clicked(self):
current = self.dir_list.currentItem()
if current is not None:
remove_target_folder(current.text())
self.dir_list.takeItem(self.dir_list.row(current))
@Slot()
def on_decrypt_select_clicked(self):
browser = QFileDialog(self, filter='*.pw_encrypt')
browser.setFileMode(QFileDialog.ExistingFiles)
if browser.exec():
for target in browser.selectedFiles():
encryption.decrypt_file(target)
@Slot()
def on_status_refresh_clicked(self):
if self._check_connection():
self.status_refresh.setEnabled(False)
@Slot()
def on_save_list_clicked(self):
items = self._get_list_items()
clear_target_folders()
for item in items:
if not exists(item):
warn = QMessageBox(QMessageBox.Warning, 'Invalid Path', f'The entry "{item}" is invalid.',
QMessageBox.Ok)
warn.show()
return warn.exec()
for item in items:
add_target_folder(item)
self.sent_no_encrypt_message = False
self.save_list.setEnabled(False)
@Slot()
def on_decrypt_log_clicked(self):
self.window_log.resize(700, 400)
self.window_log.show()
def set_connected(self, connected: bool):
if connected:
self.status.setStyleSheet('QStatusBar { color: green; }')
self.status.showMessage('Connected')
else:
self.status.setStyleSheet('QStatusBar { color: red; }')
self.status.showMessage('Disconnected')
def _get_list_items(self) -> List[str]:
items = []
for i in range(self.dir_list.count()):
items.append(self.dir_list.item(i).text())
return items
def _check_connection(self) -> bool:
try:
# Not the greatest solution but it works
requests.get(test_url)
self.set_connected(True)
self.status_refresh.setEnabled(False)
return True
except requests.exceptions.ConnectionError:
self.set_connected(False)
not_connected = QMessageBox(QMessageBox.Critical, 'Unable to Connect',
'The productiveware client was unable to connect to the server. ' +
'Please check your internet connection and click on "Refresh Connection".',
QMessageBox.Ok)
not_connected.show()
not_connected.exec()
return False
| 2.1875
| 2
|
arcade_universe/tests/gen_pylab_img.py
|
caglar/Arcade-Universe
| 24
|
12782093
|
<reponame>caglar/Arcade-Universe<filename>arcade_universe/tests/gen_pylab_img.py
#!/usr/bin/env python
from web_graphics import gradient, RADIAL, NO_NOISE, get_pixel_intensities
from itertools import izip_longest
import numpy as np
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
width = 100
height = 100
img = get_pixel_intensities(width, height, gradient(RADIAL(0.5, 0.5), NO_NOISE,
[(1.0, (0xDD, 0xDD, 0xDD), (0x10, 0x12, 0x13)),]))
img = np.array([sum(group) for group in (grouper(3, img, 0))])
img *= (255/img.max())
import pylab
img_ = img.reshape(width, height)
pylab.axis('off')
pylab.gray()
pylab.imshow(img_)
pylab.show()
| 2.359375
| 2
|
maomew/views.py
|
gitgitcode/myflask
| 0
|
12782094
|
<filename>maomew/views.py
from flask import render_template
from flask import login_required
from . import app
from .util import check_expired
@app.route('/use_app')
@login_required
@check_expired
def use_app():
""
| 1.820313
| 2
|
main.py
|
pdoro/pytracker
| 0
|
12782095
|
<gh_stars>0
import sys
import re
import json
import csv
def load_tracked_products(path):
with open(path, 'r') as data_file:
data = json.load(data_file)
return data
def load_historical_data(path):
with open(path, 'r') as data_file:
return [{k: int(v) for k, v in row.items()} for row in csv.DictReader(data_file, skipinitialspace=True)]
def main (argv):
tracked_products = [TrackedProduct.json_decoder(item) for item in load_tracked_products('products.json')]
historical_data = load_historical_data('prices.csv')
for price_row in historical_data:
historical_data[price_row.id].price_variations.append(PriceVariation(price_row.date, -1, price_row.price))
############################################################
class TrackedProduct(object):
percentageMatcher = re.compile("([+-]?\d+)%")
def __init__(self, id, name, url, selector, monitor_interval):
self.id = id
self.name = name
self.url = url
self.price_selector = selector
self.triggers = []
self.emails_to_notify = []
self.monitor_interval = monitor_interval
self.price_variations = []
def add_trigger(self, trigger_text):
trigger = self.parse_trigger(trigger_text)
self.triggers.append(trigger)
def add_email(self, email):
self.emails_to_notify.append(email)
def add_price_variation(self, price_variation):
self.price_variations.append(price_variation)
def parse_trigger(self, trigger_text):
matches = TrackedProduct.percentageMatcher.search(trigger_text).group()
if matches is not None:
return self.relative_variation(int(matches[0]))
else:
if trigger_text.startsWith('<'):
return self.absolute_variation(-int(trigger_text[:1]))
else:
return self.absolute_variation(int(trigger_text[:1]))
def relative_variation(self, variation):
def calculate(prev_price, new_price):
percentage_diff = (new_price / prev_price * 100) - 100;
if variation < 0:
return percentage_diff <= variation
else:
return percentage_diff > variation
return calculate
def absolute_variation(self, variation):
def calculate(new_price):
if variation < 0:
return new_price <= variation
else:
return new_price > variation
return calculate
def json_encoder(self):
pass
def json_decoder(self, obj):
return TrackedProduct(obj['id'],
obj['name'],
obj['url'],
obj['price_selector'],
[self.parse_trigger(trigger) for trigger in obj['triggers']],
obj['emails_to_notify'],
obj['monitor_interval'],
[])
##########################################################
class PriceVariation(object):
def __init__(self, date, previous_price, current_price):
self.date = date
self.previous_price = previous_price
self.current_price = current_price
##########################################################
def main(argv):
# My code here
pass
if __name__ == "__main__":
main(sys.argv)
product1 = TrackedProduct(1, "Item", "www.amazon.es", "p.price > li", 32000)
jsonProd1 = json.dumps(product1.__dict__)
print(jsonProd1)
| 2.8125
| 3
|
web/apps/mortality/migrations/0001_initial.py
|
TomaszKaleczyc/PLMoMo
| 0
|
12782096
|
# Generated by Django 3.1 on 2022-02-28 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MortalityFact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gender', models.IntegerField(choices=[(0, 'Male'), (1, 'Female')])),
('age_group', models.IntegerField(choices=[(0, '0 - 4'), (1, '5 - 9'), (2, '10 - 14'), (3, '15 - 19'), (4, '20 - 24'), (5, '25 - 29'), (6, '30 - 34'), (7, '35 - 39'), (8, '40 - 44'), (9, '45 - 49'), (10, '50 - 54'), (11, '55 - 59'), (12, '60 - 64'), (13, '65 - 69'), (14, '70 - 74'), (15, '75 - 79'), (16, '80 - 84'), (17, '85 - 89'), (18, '90 +')])),
('region', models.IntegerField(choices=[(0, 'Dolnośląskie'), (1, 'Kujawsko-pomorskie'), (2, 'Lubelskie'), (3, 'Lubuskie'), (4, 'Łódzkie'), (5, 'Małopolskie'), (6, 'Mazowieckie'), (7, 'Opolskie'), (8, 'Podkarpackie'), (9, 'Podlaskie'), (10, 'Pomorskie'), (11, 'Śląskie'), (12, 'Świętokrzyskie'), (13, 'Warmińsko-mazurskie'), (14, 'Wielkopolskie'), (15, 'Zachodniopomorskie')])),
('year', models.IntegerField()),
('week', models.IntegerField()),
('deceased_actuals', models.IntegerField()),
],
),
]
| 1.992188
| 2
|
Backend/clear_db.py
|
nemonanja/furry-ironman
| 4
|
12782097
|
import sqlite3
conn = sqlite3.connect('swp.db')
c = conn.cursor()
c.execute('DELETE FROM data')
c.execute('DELETE FROM api')
c.execute("VACUUM")
conn.commit()
conn.close()
print "db cleared"
| 2.5625
| 3
|
iq/filter_bed.py
|
MelbourneGenomics/iq
| 0
|
12782098
|
<reponame>MelbourneGenomics/iq
# remove genes from a bed file
# usage filter_bed.py genes_file < exons.bed > filtered.bed
import sys
def filter_bed(exons, target, genes):
exclude = set()
for line in genes:
exclude.add( line.strip().split()[0].upper() )
sys.stderr.write( '{0} genes selected for exclusion\n'.format( len( exclude ) ) )
skipped = 0
i = 0
for i, line in enumerate(exons):
fields = line.strip().split()
if len(fields) > 3 and fields[3].upper() in exclude:
skipped += 1
else:
target.write(line)
sys.stderr.write( 'Skipped {0} out of {1}\n'.format( skipped, i ) )
if __name__ == '__main__':
filter_bed(sys.stdin, sys.stdout, open(sys.argv[1], 'r'))
| 3.25
| 3
|
pyhandsontable/core.py
|
patarapolw/pyhandsontable
| 10
|
12782099
|
from jinja2 import Environment, PackageLoader
from threading import Timer
import os
from collections import OrderedDict
from IPython.display import IFrame
env = Environment(
loader=PackageLoader('pyhandsontable', 'templates')
)
def generate_html(data, **kwargs):
renderers = kwargs.pop('renderers', dict())
config = kwargs.pop('config', dict())
if isinstance(data[0], (dict, OrderedDict)):
headers = sum((list(d.keys()) for d in data), list())
headers = [h for i, h in enumerate(headers) if h not in headers[:i]]
config['colHeaders'] = list(headers)
else:
headers = range(len(data[0]))
columns = []
for header in headers:
columnData = {
'data': header,
'renderer': 'jsonRenderer'
}
if header in renderers.keys():
columnData['renderer'] = renderers.get(header)
columns.append(columnData)
template = env.get_template('sheet.html')
return template.render(data=data, columns=columns, config=config, **kwargs)
def view_table(data, width=1000, height=500,
filename='temp.handsontable.html', autodelete=True, **kwargs):
# A TemporaryFile does not work with Jupyter Notebook
try:
with open(filename, 'w') as f:
f.write(generate_html(data=data, width=width, height=height, **kwargs))
return IFrame(filename, width=width, height=height)
finally:
if autodelete:
Timer(5, os.unlink, args=[filename]).start()
| 2.46875
| 2
|
entity.py
|
kozbot/kecb
| 4
|
12782100
|
<gh_stars>1-10
from affine import Affine
from typing import List
from anytree import NodeMixin
from config import POLE_OFFSET
class Transform:
__slots__ = ['origin', 'offset', 'rotation', 'scale']
def __init__(self, origin=(0, 0), offset=(0, 0), rotation=0, scale=1):
super().__init__()
self.origin = origin
self.offset = offset
self.rotation = rotation
self.scale = scale
def __add__(self, other):
if not other:
return self
elif type(other) is Transform:
return Transform(origin=(self.origin[0] + other.origin[0], self.origin[1] + other.origin[1]),
offset=(self.offset[0] + other.offset[0], self.offset[1] + other.offset[1]),
rotation=self.rotation + other.rotation,
scale=self.scale * other.scale)
else:
raise TypeError
class Entity(NodeMixin):
_bounds = None
def __init__(self, *args, **kwargs):
super().__init__()
self.parent = None
self.linetype = kwargs.get('linetype', None)
def calculate_bounds(self):
raise NotImplementedError()
def bounds(self):
if self._bounds is not None:
return self._bounds
else:
self.calculate_bounds()
return self._bounds
def translate(self, xoff, yoff):
raise NotImplementedError
def duplicate(self):
raise NotImplementedError
class Point(Entity):
def __init__(self, x: float, y: float):
super().__init__()
self.x = x
self.y = y
def calculate_bounds(self):
self._bounds = Rect([Point(self.x, self.y),
Point(self.x, self.y),
Point(self.x, self.y),
Point(self.x, self.y)])
def __getitem__(self, item):
if item == 0:
return self.x
elif item == 1:
return self.y
else:
raise IndexError
def translate(self, xoff, yoff):
self.x += xoff
self.y += yoff
return self
def duplicate(self):
return Point(x=self.x, y=self.y)
class Line(Entity):
def __init__(self, start: Point, end: Point, *args, **kwargs):
super().__init__(*args, **kwargs)
self.start = start
self.end = end
def calculate_bounds(self):
self._bounds = resolve_rect([self.start, self.end])
def translate(self, xoff, yoff):
self.start.translate(xoff, yoff)
self.end.translate(xoff, yoff)
return self
def duplicate(self):
return Line(start=self.start.duplicate(), end=self.end.duplicate())
class PolyLine(Entity):
def __init__(self, points: List[Point], closed: bool, *args, **kwargs):
super().__init__(*args, **kwargs)
self.points = points
self.closed = closed
def calculate_bounds(self):
self._bounds = resolve_rect(points=self.points)
def translate(self, xoff, yoff):
for point in self.points:
point.translate(xoff=xoff, yoff=yoff)
return self
def duplicate(self):
points = []
for p in self.points:
points.append(p.duplicate())
return PolyLine(points=points, closed=self.closed)
class Rect(Entity):
def __init__(self, *args, points: List[Point], **kwargs):
super().__init__(*args, **kwargs)
if len(points) is not 4:
raise ValueError("Rect should have 4 points.")
self.points = points
def calculate_bounds(self):
self._bounds = Rect(self.points)
@staticmethod
def __add__(self, other):
if isinstance(other, Rect):
return resolve_rect(self.points + other.points)
@staticmethod
def identity():
return Rect(points=[Point(0, 0),
Point(0, 0),
Point(0, 0),
Point(0, 0)])
def translate(self, xoff, yoff):
for p in self.points:
p.translate(xoff=xoff, yoff=yoff)
def duplicate(self):
points = []
for p in self.points:
points.append(p.duplicate())
return Rect(points=points)
def resolve_rect(points: list):
if len(points) < 2:
raise NotImplementedError(
"Only a list of 2 or more points supported.")
xlist = []
ylist = []
for p in points:
xlist.append(p.x)
ylist.append(p.y)
left = min(xlist)
right = max(xlist)
top = max(ylist)
bottom = min(ylist)
# if left == right:
# raise ValueError("X coordinates must not be the same.")
# if top == bottom:
# raise ValueError("Y coordinates must not be the same.")
return Rect(points=[Point(left, top),
Point(right, top),
Point(right, bottom),
Point(left, bottom)])
class Arc(Entity):
center: Point
radius: float
start: float
end: float
fit: Rect
def __init__(self):
super().__init__()
# TODO: This is not correct, work out the math later.
def calculate_bounds(self):
self._bounds = Rect.identity()
def translate(self, xoff, yoff):
self.center.translate(xoff=xoff, yoff=yoff)
self.fit.translate(xoff=xoff, yoff=yoff)
return self
def duplicate(self):
a = Arc()
a.center = self.center.duplicate()
a.radius = self.radius
a.start = self.start
a.end = self.end
a.fit = self.fit
return a
@staticmethod
def from_crse(center: Point, radius, start, end):
a = Arc()
a.center = center
a.radius = radius
if start == end:
raise ValueError("Start and End angles cannot be the same.")
if start > 360 or start < 0:
a.start = start % 360
else:
a.start = start
if end > 360 or end < 0:
a.end = end % 360
else:
a.end = end
a.fit = resolve_rect(points=[Point(a.center.x - a.radius, a.center.y + a.radius),
Point(a.center.x + a.radius, a.center.y - a.radius)])
return a
class Circle(Entity):
def __init__(self, center: Point, radius: float):
super().__init__()
self.transform = Affine.identity()
self.center: Point = center
self.radius: float = radius
def calculate_bounds(self):
self._bounds = Rect([Point(self.center.x - self.radius, self.center.y + self.radius),
Point(self.center.x + self.radius, self.center.y + self.radius),
Point(self.center.x + self.radius, self.center.y - self.radius),
Point(self.center.x - self.radius, self.center.y - self.radius)])
def translate(self, xoff, yoff):
self.center.translate(xoff, yoff)
return self
def duplicate(self):
return Circle(center=self.center.duplicate(), radius=self.radius)
class Group(Entity):
def __init__(self):
super().__init__()
self.entities: List[Entity] = []
self.origin: Point = Point(0, 0)
self.rotation = 0
self.scale = 1
def calculate_bounds(self):
for e in self.children:
self._bounds = self._bounds + e.calculate_bounds()
def _post_detach_children(self, children):
self._bounds = Rect.identity()
self.calculate_bounds()
def translate(self, xoff=0, yoff=0):
self.origin.translate(xoff, yoff)
return self
def rotate(self, rotation):
self.rotation = rotation
return self
def affine(self):
return Transform(origin=(self.origin.x, self.origin.y), scale=self.scale, rotation=self.rotation)
def duplicate(self):
new = []
for child in self.children:
new.append(child.duplicate())
newgroup = Group()
newgroup.children = new
return newgroup
class CodedSymbol(Group):
min_pole = 1
max_pole = 1
def __init__(self, *args, **kwargs):
super().__init__()
self.poles = kwargs.get("poles", 1)
self.children = self.generate()
if self.poles > 1:
self.children = self.generate_multipole(poles=self.poles)
def generate(self):
raise NotImplementedError
def generate_multipole(self, poles=1):
return self.generate_multipole_basic(poles=poles)
def generate_multipole_basic(self, poles):
px, py = 0, POLE_OFFSET
entities = []
for child in self.children:
if isinstance(child, CodedSymbol):
entities.extend(child.generate_multipole(poles=poles))
else:
for i in range(0, poles):
entities.append(
child.duplicate().translate(xoff=(i * px) + self.origin.x,
yoff=(i * py) + self.origin.y))
return entities
# class Arc(object):
#
# def __init__(self, *args, **kwargs):
# super().__init__()
# if kwargs.keys() & {'center'} and kwargs.keys() & {'xc', 'yc'}:
# if kwargs.keys() >= {'width', 'height', 'x', 'y'}:
# self._center = (kwargs['x'] + (kwargs['width'] / 2.0),
# kwargs['y'] - (kwargs['height'] / 2.0))
# elif kwargs.keys() >= {'rect'}:
# self._center
#
# print(self._center)
| 2.3125
| 2
|
scripts/get_url_special_encodings.py
|
Rubix982/Web-Proxy-Golang
| 0
|
12782101
|
<gh_stars>0
import requests as req
from bs4 import BeautifulSoup
import os
res = req.get("https://www.w3schools.com/tags/ref_urlencode.asp")
soup = BeautifulSoup(res.text, features='lxml')
table_row_parse_tree = soup.findAll('table')[0].findAll('tr')[1:-1]
encodings = []
numerical_skip = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
uppercase_alphabatical_skip = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
lowercase_alphabatical_skip = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
misc_skip = ['/', '\\']
total_skip_characters = numerical_skip + uppercase_alphabatical_skip + \
lowercase_alphabatical_skip + misc_skip
for idx, row in enumerate(table_row_parse_tree):
character = table_row_parse_tree[idx].findAll('td')[0].contents[0]
if character in total_skip_characters:
continue
if character == 'space':
character = ' '
utf_8 = table_row_parse_tree[idx].findAll('td')[2].contents[0]
encodings.append([(character, utf_8)])
folder_path = str(os.path.abspath(__file__))[
0:-len(os.path.basename(__file__))] + "data/"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
with open(os.path.join(folder_path, f"encodings.csv"), encoding='utf-8', mode='w') as file:
for entry in encodings:
file.write(f"'{entry[0][0]}','{entry[0][1]}',\n")
| 2.703125
| 3
|
processClubsEmails.py
|
dhicks6345789/data-tools
| 0
|
12782102
|
<filename>processClubsEmails.py
#!/usr/bin/python
import os
import re
import io
import sys
import pandas
import subprocess
import dataLib
# A function to remove "nan" strings from data - /really/ shouldn't be needed...
def noNan(theString):
if str(theString) == "nan" or str(theString) == "0":
return ""
return str(theString)
def normaliseDescription(theDescription):
result = ""
for theChar in theDescription:
if theChar in "1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz/-();£& ":
result = result + theChar
return result.replace("/","-").replace("&","&").strip()
# Writes out the given data to the given file, but only if the data has changed. Returns True if there was a change, False if not.
def writeCSV(csvPath, newCSV):
currentCSV = ""
if os.path.exists(csvPath):
currentCSV = dataLib.readFile(csvPath)
if not currentCSV == newCSV:
print("Writing " + csvPath)
dataLib.writeFile(csvPath, newCSV)
return True
return False
# Reads the given file, returns the entire contents as a single string.
def readFile(theFilename):
inHandle = open(theFilename)
result = inHandle.read()
inHandle.close()
return result
# Handy utility function to write a file. Takes a file path and either a single string or an array of strings. If an array, will write each
# string to the given file path with a newline at the end.
def writeFile(theFilename, theFileData):
fileDataHandle = open(theFilename, "w")
if isinstance(theFileData, str):
newFileData = theFileData.encode(encoding="UTF-8", errors="replace")
fileDataHandle.write(str(newFileData))
#fileDataHandle.write(theFileData)
else:
for dataLine in theFileData:
fileDataHandle.write((str(dataLine) + "\n").encode())
fileDataHandle.close()
# Runs the given command, returns the entire output as a single string.
def runCommand(theCommand):
commandHandle = subprocess.Popen(theCommand, stdout=subprocess.PIPE)
result = commandHandle.communicate()
resultText = result[0].decode("UTF-8")
return resultText
#commandHandle = os.popen(theCommand)
#result = commandHandle.read()
#commandHandle.close()
#return result
# Load the config file (set by the system administrator).
config = dataLib.loadConfig(["dataFolder"])
# Make sure the output folders exist.
clubsRoot = config["dataFolder"] + os.sep + "Clubs"
os.makedirs(clubsRoot, exist_ok=True)
emailsRoot = clubsRoot + os.sep + "Emails"
os.makedirs(emailsRoot, exist_ok=True)
csvsRootStudents = clubsRoot + os.sep + "CSVs" + os.sep + "Students"
os.makedirs(csvsRootStudents, exist_ok=True)
csvsRootTeachers = clubsRoot + os.sep + "CSVs" + os.sep + "Teachers"
os.makedirs(csvsRootTeachers, exist_ok=True)
if len(sys.argv) > 1:
if sys.argv[1] == "-flushCache":
os.system("erase \"" + csvsRootStudents + os.sep + "*.csv\"")
os.system("erase \"" + csvsRootTeachers + os.sep + "*.csv\"")
# Load the user options. These are a set of simple key:values in an Excel spreadsheet. Available options:
# dateFrom: The date at which to start processing emails from. Means the user can define which emails to process rather than simply
# processing all emails from years back.
# User: The username of the inbox to extract emails from.
options = {}
teachers = []
optionsDataframe = pandas.read_excel(clubsRoot + os.sep + "options.xlsx", header=None)
for optionIndex, optionValue in optionsDataframe.iterrows():
if not optionIndex == 0:
optionName = noNan(optionsDataframe.at[optionIndex, 0]).replace(":","").strip()
if not optionName == "":
options[optionName] = optionsDataframe.at[optionIndex, 1]
teacher = noNan(optionsDataframe.at[optionIndex, 2]).strip()
if not teacher == "":
teachers.append(teacher)
# Use GAM to get a set of emails from GMail. The content of each email is cached locally so we don't have to query GMail for every single
# email each time the script runs.
cachedEmails = []
for emailIndex, emailValue in pandas.read_csv(io.StringIO(dataLib.runCommand("gam user " + options["user"] + " print messages query \"after:" + str(options["dateFrom"].year) + "/" + str(options["dateFrom"].month) + "/" + str(options["dateFrom"].day) + " AND from:<EMAIL> AND subject:'Knightsbridge School: A New Order has Arrived'\""))).iterrows():
filenamePath = emailsRoot + os.sep + emailValue["id"] + ".txt"
cachedEmails.append(emailValue["id"] + ".txt")
if not os.path.exists(filenamePath):
print("Caching email " + emailValue["id"] + "...", flush=True)
for emailWithBodyIndex, emailWithBodyValue in pandas.read_csv(io.StringIO(runCommand("gam user " + options["user"] + " print messages ids " + emailValue["id"] + " showbody"))).iterrows():
writeFile(filenamePath, dataLib.removeBlanks(emailWithBodyValue["Body"]))
# Clear out any un-used emails from the local cache.
for cachedEmail in os.listdir(emailsRoot):
if not cachedEmail in cachedEmails:
os.remove(emailsRoot + os.sep + cachedEmail)
# Read the existing list of clubs from an Excel file, or create a new one if needed.
clubsListChanged = False
clubsListRoot = clubsRoot + os.sep + "clubsList.xlsx"
clubsListColumns = ["club","teacher"]
if os.path.exists(clubsListRoot):
clubsList = pandas.read_excel(clubsListRoot, dtype=str)
else:
clubsListChanged = True
clubsList = pandas.DataFrame(columns=clubsListColumns)
# Read the existing clubs data from an Excel file, or create a new one if needed.
rawDataChanged = False
rawDataRoot = clubsRoot + os.sep + "clubsEmailsRawData.xlsx"
clubsColumns = ["orderNumber","orderDate","orderTime","parentName","parentEmail","itemDescription","itemCode","firstChildName","firstChildClass","firstChildUsername","secondChildName","secondChildClass","secondChildUsername"]
if os.path.exists(rawDataRoot):
clubs = pandas.read_excel(rawDataRoot, dtype=str)
else:
rawDataChanged = True
clubs = pandas.DataFrame(columns=clubsColumns)
# Go through each email and extract data.
emailIndex = len(clubs.index)
existingOrderNumbers = clubs["orderNumber"].tolist()
for emailFilePath in os.listdir(emailsRoot):
orderNumber = ""
emailText = readFile(emailsRoot + os.sep + emailFilePath)
matchResult = re.match(".*Order #(\d*?)\. Placed on (.*?) at (\d*?:\d*? ..).*", emailText, re.DOTALL)
if matchResult == None:
print("Skipping email " + emailFilePath, flush=True)
else:
orderNumber = matchResult[1].strip()
if not orderNumber in existingOrderNumbers:
result = {}
rawDataChanged = True
result["orderNumber"] = matchResult[1].strip()
result["orderDate"] = matchResult[2].strip()
result["orderTime"] = matchResult[3].strip()
matchResult = re.match(".*TO:\n(.*?)\n.*\n(.*?@.*?)\n.*ITEM.*", emailText, re.DOTALL)
if not matchResult == None:
result["parentName"] = matchResult[1].strip()
result["parentEmail"] = matchResult[2].strip()
matchResult = re.match(".*Name of your Child:\n(.*?)\nClass/Year:\n(.*?)\nName of Second Child:(.*?)\nClass/Year:\n(.*?)\n.*", emailText, re.DOTALL)
if not matchResult == None:
result["firstChildName"] = matchResult[1].strip()
result["firstChildClass"] = matchResult[2].strip()
result["secondChildName"] = matchResult[3].strip()
if matchResult[4].startswith("blog <"):
result["secondChildClass"] = ""
else:
result["secondChildClass"] = matchResult[4].strip()
matchResult = re.match(".*SUBTOTAL(.*?)TOTAL", emailText, re.DOTALL)
if not matchResult == None:
itemDescription = ""
for resultLine in matchResult[1].split("\n"):
if resultLine.strip().startswith("SQ"):
clubs.at[emailIndex, "itemDescription"] = itemDescription
clubs.at[emailIndex, "itemCode"] = resultLine.strip()
for field in result.keys():
clubs.at[emailIndex, field] = result[field]
emailIndex = emailIndex + 1
else:
itemDescription = normaliseDescription(resultLine.strip())
# Make sure the "clubs" DataFrame is formatted as strings, and remove any "nan" values.
clubs = clubs.astype(str)
for clubIndex, clubValue in clubs.iterrows():
for clubsColumn in clubsColumns:
clubs.at[clubIndex, clubsColumn] = noNan(clubs.at[clubIndex, clubsColumn])
# Read the existing basic pupils data.
pupils = pandas.read_csv(config["dataFolder"] + os.sep + "pupils.csv", header=0)
# Try and match up pupil name strings (which are from a free-typed input box, so might have errors) and usernames.
for clubIndex, clubValue in clubs.iterrows():
firstChildName = clubs.at[clubIndex, "firstChildName"].lower().strip()
secondChildName = clubValue["secondChildName"].lower().strip()
for pupilIndex, pupilValue in pupils.iterrows():
pupilName = pupilValue["GivenName"].lower() + " " + pupilValue["FamilyName"].lower()
if pupilName == firstChildName and clubValue["firstChildUsername"] == "":
clubs.at[clubIndex, "firstChildUsername"] = pupilValue["Username"]
rawDataChanged = True
if pupilName == secondChildName and clubValue["secondChildUsername"] == "":
clubs.at[clubIndex, "secondChildUsername"] = pupilValue["Username"]
rawDataChanged = True
# We only write out a new Excel file if some data has actually changed, that
# way we don't re-sync an identical file to Google Drive every time we run.
if rawDataChanged:
print("Writing " + rawDataRoot)
clubs.to_excel(rawDataRoot, index=False)
# Get a current list of Google Classrooms.
classrooms = pandas.read_csv(io.StringIO(dataLib.runCommand("gam print courses")))
# Generate a list of clubs from the emails.
clubMembers = {}
for clubIndex, clubValue in clubs.iterrows():
if not clubValue["itemDescription"] == "":
clubMembers[clubValue["itemDescription"]] = []
clubsListEnd = len(clubsList)
clubsListNames = clubsList["club"].tolist()
for clubName in clubMembers.keys():
if not clubName in clubsListNames:
print("New club: " + clubName)
clubsList.at[clubsListEnd, "club"] = clubName
clubsList.at[clubsListEnd, "teacher"] = ""
clubsListChanged = True
clubsListEnd = clubsListEnd + 1
# Make sure the "clubsList" DataFrame is formatted as strings, and remove any "nan" values.
clubsList = clubsList.astype(str)
for clubsListIndex, clubsListValue in clubsList.iterrows():
for clubsListColumn in clubsListColumns:
clubs.at[clubsListIndex, clubsListColumn] = noNan(clubsList.at[clubsListIndex, clubsListColumn])
if clubsListChanged:
print("Writing " + clubsListRoot)
clubsList.to_excel(clubsListRoot, index=False)
# Assign pupils to each club and write out a CSV file of members for each one.
changedClubMembers = {}
for clubName in clubMembers.keys():
for clubIndex, clubValue in clubs.iterrows():
if not clubValue["firstChildUsername"] == "" and clubValue["itemDescription"] == clubName:
clubMembers[clubName].append(clubValue["firstChildUsername"])
if not clubValue["secondChildUsername"] == "" and clubValue["itemDescription"] == clubName:
clubMembers[clubName].append(clubValue["secondChildUsername"])
csvPath = csvsRootStudents + os.sep + clubName + ".csv"
if writeCSV(csvPath, "\n".join(str(clubMembers[clubName])).strip()):
changedClubMembers[clubName] = clubMembers[clubName]
# For each club listed in the options sheet, make sure a matching Google Classroom exists and set teachers and pupil membership.
for clubsListIndex, clubsListValue in clubsList.iterrows():
classroomID = ""
for classroomIndex, classroomValue in classrooms.iterrows():
if classroomValue["name"] == clubsListValue["club"]:
classroomID = str(classroomValue["id"])
if classroomID == "":
courseTeacher = clubsListValue["teacher"].replace(" ","").replace("nan","").split(",")[0]
if not courseTeacher == "":
os.system("gam create course name \"" + clubsListValue["club"] + "\" teacher " + courseTeacher + " status ACTIVE")
else:
csvPath = csvsRootTeachers + os.sep + clubsListValue["club"] + ".csv"
if writeCSV(csvPath, "\n".join(teachers) + "\n" + clubsListValue["teacher"].replace(" ","").replace(",","\n")):
os.system("gam course " + classroomID + " sync teachers file \"" + csvPath + "\"")
if clubsListValue["club"] in changedClubMembers.keys():
# We can either sync students (and remove extra students)...
#os.system("gam course " + classroomID + " sync students file \"" + csvsRootStudents + os.sep + clubsListValue["club"] + ".csv\"")
# ...or add pupils, leaving any manually-added ones in place.
for pupilUsername in dataLib.readFile(csvsRootStudents + os.sep + clubsListValue["club"] + ".csv").split("\n"):
pupilUsername = pupilUsername.strip()
if not pupilUsername == "":
os.system("gam course " + classroomID + " add student " + pupilUsername)
| 2.90625
| 3
|
micropython_workshop/projects/project_4_sensor/sensor.py
|
NetApp-PTC/YWIT-2021
| 0
|
12782103
|
<gh_stars>0
"""
This program uses a DHT11 temperature and humidity sensor. It will
periodically take measurements and report the values. The information
will be printed to the serial connection and optionally to an ss1306
OLED screen.
"""
import dht
from machine import Pin, I2C
import ssd1306
import time
USE_OLED = True
# set up the DHT11 sensor's data pin
d = dht.DHT11(Pin(0))
if USE_OLED:
# create an I2C device with the clock on pin 5 and the data on pin 4
i2c = I2C(-1, scl=Pin(5), sda=Pin(4))
# set up the OLED screen with a width of 128 and height of 64 and
# attach it to our I2C pins
oled = ssd1306.SSD1306_I2C(128, 64, i2c)
while True:
# take a measurement and retrieve the temperature and humidity
d.measure()
temp_f = d.temperature() * 9/5 + 32
humidity = d.humidity()
# output the values to the serial console
temp_str = "Temp: %s" % temp_f
humid_str = "Hum : %s" % humidity
print(temp_str)
print(humid_str)
# optionally output the values to our screen
if USE_OLED:
oled.fill(0)
oled.text(temp_str, 0, 0)
oled.text(humid_str, 0, 10)
oled.show()
# the DHT11 device can at most retrieve 1 sample each second
# but we're going to only poll once every 5 seconds
time.sleep(5)
| 3.625
| 4
|
service_canary/db/riak_check.py
|
dbrandt/service_canary
| 0
|
12782104
|
import sys
import socket
try:
import riak
except ImportError:
print("Riak test requested, but riak library not installed. "
"Try 'pip install riak' and try again.")
sys.exit(1)
def riak_check(config):
host = config.get("host", "localhost")
port = int(config.get("port", 8087))
def_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(1.5)
rc = riak.RiakClient(nodes=[{"host": host, "pb_port": port}])
try:
res = rc.ping()
except:
return False
else:
return True
finally:
rc.close()
socket.setdefaulttimeout(def_timeout)
| 2.546875
| 3
|
accounts/views.py
|
edumaxsantos/ecommerceedu
| 0
|
12782105
|
from django.shortcuts import render
from django.views.generic import CreateView
from django.core.urlresolvers import reverse_lazy
from .models import User
from .forms import UserAdminCreationForm
class RegisterView(CreateView):
model = User
template_name = 'accounts/register.html'
form_class = UserAdminCreationForm
success_url = reverse_lazy('index')
register = RegisterView.as_view()
| 1.742188
| 2
|
acmicpc/10987.py
|
juseongkr/BOJ
| 7
|
12782106
|
l = ['a', 'e', 'i', 'o', 'u']
c = 0
for i in input():
for j in l:
if i == j:
c += 1
print(c)
| 3.109375
| 3
|
1c. beginner_path_3_How to Think Like a Computer Scientist Learning with Python 3/thinkcs-python3-solutions/Chapter 11/E3.py
|
codeclubbentleigh/Python
| 12
|
12782107
|
arm = [1, 2, 3]
b = arm[:]
# b is now [1,2,3]
b[0] = 5
# b is now [5,2,3]
| 3.046875
| 3
|
cose/messages/sign1message.py
|
mpzaborski/pycose
| 0
|
12782108
|
<filename>cose/messages/sign1message.py
from typing import Optional, Union, TYPE_CHECKING
import cbor2
from cose import CoseMessage
from cose.messages import cosemessage, signcommon
from cose.keys.ec2 import EC2
from cose.keys.okp import OKP
if TYPE_CHECKING:
from cose.attributes.algorithms import CoseAlgorithms, CoseEllipticCurves
@cosemessage.CoseMessage.record_cbor_tag(18)
class Sign1Message(cosemessage.CoseMessage, signcommon.SignCommon):
context = "Signature1"
cbor_tag = 18
@classmethod
def from_cose_obj(cls, cose_obj) -> 'Sign1Message':
msg = super().from_cose_obj(cose_obj)
msg.signature = cose_obj.pop(0)
return msg
def __init__(self,
phdr: Optional[dict] = None,
uhdr: Optional[dict] = None,
payload: bytes = b'',
external_aad: bytes = b''):
super().__init__(phdr, uhdr, payload, external_aad)
self.signature = b''
@property
def signature(self):
return self._signature
@signature.setter
def signature(self, value):
if not isinstance(value, bytes):
TypeError("Signature must be of type 'bytes'")
self._signature = value
@property
def _sig_structure(self):
"""
Create the sig_structure that needs to be signed
:return: to_be_signed
"""
sig_structure = [self.context]
sig_structure = self._base_structure(sig_structure)
sig_structure.append(self.payload)
return cbor2.dumps(sig_structure)
def encode(self,
private_key: Union[EC2, OKP],
alg: Optional['CoseAlgorithms'] = None,
curve: Optional['CoseEllipticCurves'] = None,
tagged: bool = True,
sign: bool = True) -> bytes:
""" Encodes the message into a CBOR array with or without a CBOR tag. """
if sign:
message = [self.encode_phdr(),
self.encode_uhdr(),
self.payload,
self.compute_signature(alg=alg, private_key=private_key, curve=curve)]
else:
message = [self.encode_phdr(), self.encode_uhdr(), self.payload]
if tagged:
res = cbor2.dumps(cbor2.CBORTag(self.cbor_tag, message), default=self._special_cbor_encoder)
else:
res = cbor2.dumps(message, default=self._special_cbor_encoder)
return res
def __repr__(self) -> str:
return f'<COSE_Sign1: [{self._phdr}, {self._uhdr}, {CoseMessage._truncate(self._payload)}, ' \
f'{CoseMessage._truncate(self._signature)}]>'
| 2.125
| 2
|
aos_sw_api/enums/snmp_trap_mode.py
|
KennethSoelberg/AOS-Switch
| 0
|
12782109
|
<filename>aos_sw_api/enums/snmp_trap_mode.py
from enum import Enum
class SnmpTrapModeEnum(str, Enum):
STM_ENABLE = "STM_ENABLE"
STM_DISABLE = "STM_DISABLE"
STM_NONE = "STM_NONE"
| 1.953125
| 2
|
tests/test_circleci-cli.py
|
luiscachog/ansible-circleci-cli
| 0
|
12782110
|
<filename>tests/test_circleci-cli.py
from __future__ import absolute_import
from __future__ import unicode_literals
from testinfra.utils.ansible_runner import AnsibleRunner
import os
import pytest
import logging
import testinfra.utils.ansible_runner
import collections
logging.basicConfig(level=logging.DEBUG)
# # DEFAULT_HOST = 'all'
VAR_FILE = "../../vars/main.yml"
DEFAULT_FILE = "../../defaults/main.yml"
TESTINFRA_HOSTS = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
inventory = os.environ['MOLECULE_INVENTORY_FILE']
runner = AnsibleRunner(inventory)
# runner.get_hosts(DEFAULT_HOST)
@pytest.fixture()
def ansible_os_family(Ansible):
return Ansible("setup")["ansible_facts"]["ansible_os_family"]
@pytest.fixture
def ansible_variables(host, ansible_os_family):
variables = runner.run(
TESTINFRA_HOSTS,
'include_vars',
VAR_FILE
)
return variables['ansible_facts']
@pytest.fixture
def ansible_defaults(host, ansible_os_family):
variables = runner.run(
TESTINFRA_HOSTS,
'include_vars',
DEFAULT_FILE
)
return variables['ansible_facts']
@pytest.fixture
def ansible_group_variables(host, ansible_os_family):
if ansible_os_family == "Debian":
vars_file = "../../vars/debian.yml"
elif ansible_os_family == "Archlinux":
vars_file = "../../vars/archlinux.yml"
else:
raise ValueError("Unsupported distribution: " + ansible_os_family)
vars = runner.run(
TESTINFRA_HOSTS,
"include_vars",
vars_file
)
return vars["ansible_facts"]
def converttostr(data):
if isinstance(data, str):
return str(data)
elif isinstance(data, collections.Mapping):
return dict(list(map(converttostr, iter(data.items()))))
elif isinstance(data, collections.Iterable):
return type(data)(list(map(converttostr, data)))
else:
return data
def test_binary_link(host, ansible_defaults):
dict_defaults = converttostr(ansible_defaults)
myversion = dict_defaults['circleci_version']
myplatform = dict_defaults['circleci_platform']
myfile = '/usr/local/bin/circleci'
assert host.file(myfile).exists
assert host.file(myfile).is_symlink
assert host.file(myfile).linked_to == \
'/opt/circleci/circleci-'+myversion+'/circleci-cli_'+myversion+'_linux_'+myplatform+'/circleci'
| 1.96875
| 2
|
skeleton.py
|
Seanny123/py_bvh
| 2
|
12782111
|
<gh_stars>1-10
# BVHplay is copyright (c) 2008 <NAME>.
#
# BVHplay is usable as open source software under the terms of version
# 3.0 of the Gnu Public License, which is available at
# www.gnu.org/licenses/gpl.html
#
# The author of BVHplay can be reached at <EMAIL>
"""
AVOIDING OFF-BY-ONE ERRORS:
Let N be the total number of keyframes in the BVH file. Then:
- bvh.keyframes[] is an array that runs from 0 to N-1
- skeleton.keyframes[] is another reference to bvh.keyframes and similarly
runs from 0 to N-1
- skeleton.edges{t} is a dict where t can run from 1 to N
- joint.trtr{t} is a dict where t can run from 1 to N
- joint.worldpos{t} is a dict where t can run from 1 to N
So if you're talking about raw BVH keyframe rows from the file,
you use an array and the values run from 0 to N-1. This is an artifact
of using .append to create bvh.keyframes.
By contrast, if you're talking about a non-keyframe data structure
derived from the BVH keyframes, such as matrices or edges, it's a
dictionary and the values run from 1 to N.
"""
from cgkit_bvh import BVHReader
from geo import WorldVert, WorldEdge
from numpy import dot
import numpy as np
class Joint(object):
"""
A BVH "joint" is a single vertex with potentially MULTIPLE
edges. It's not accurate to call these "bones" because if
you rotate the joint, you rotate ALL attached bones.
"""
def __init__(self, name):
self.name = name
self.children = []
self.channels = []
# list entry is one of [XYZ]position, [XYZ]rotation
self.hasparent = 0 # flag
self.parent = None
# static translation vector (x, y, z)
self.strans = np.zeros(3)
# Transformation matrices:
self.stransmat = np.zeros((4, 4))
# A premultiplied series of translation and rotation matrices
self.trtr = {}
# Time-based worldspace xyz position of the joint's endpoint. A list of vec4's
self.worldpos = {}
def info(self):
print("Joint name:", self.name)
print(" %s is connected to " % self.name, )
if len(self.children) == 0:
print("nothing")
else:
for child in self.children:
print("%s " % child.name, )
print()
for child in self.children:
child.info()
def __repr__(self): # Recursively build up text info
str2 = self.name + " at strans=" + str(self.strans) + " is connected to "
if len(self.children) == 0:
str2 = str2 + "nothing\n"
else:
for child in self.children:
str2 = str2 + child.name + " "
str2 = str2 + "\n"
str3 = ""
for child in self.children:
str3 = str3 + child.__repr__()
str1 = str2 + str3
return str1
def addchild(self, childjoint):
self.children.append(childjoint)
childjoint.hasparent = 1
childjoint.parent = self
def create_edges_recurse(self, edgelist, t, debug=0):
if debug:
print("create_edge_recurse starting for joint ", self.name)
if self.hasparent:
temp1 = self.parent.worldpos[t] # Faster than triple lookup below?
temp2 = self.worldpos[t]
v1 = WorldVert(temp1[0], temp1[1], temp1[2], description=self.parent.name)
v2 = WorldVert(temp2[0], temp2[1], temp2[2], description=self.name)
descr = "%s to %s" % (self.parent.name, self.name)
myedge = WorldEdge(v1, v2, description=descr)
edgelist.append(myedge)
for child in self.children:
if debug:
print(" Recursing for child ", child.name)
child.create_edges_recurse(edgelist, t, debug)
class Skeleton(object):
"""
This class is actually for a skeleton plus some time-related info
frames: number of frames in the animation
dt: delta-t in seconds per frame (default: 30fps i.e. 1/30)
"""
def __init__(self, hips, keyframes, frames=0, dt=.033333333):
self.hips = hips
self.keyframes = keyframes
self.frames = frames
self.dt = dt
self.edges = {}
# Precompute hips min and max values in all 3 dimensions.
# First determine how far into a keyframe we need to look to find the
# XYZ hip positions
offset = 0
xoffset = 0
yoffset = 0
zoffset = 0
for channel in self.hips.channels:
if channel == "Xposition":
xoffset = offset
if channel == "Yposition":
yoffset = offset
if channel == "Zposition":
zoffset = offset
offset += 1
self.minx = np.inf
self.miny = np.inf
self.minz = np.inf
self.maxx = -np.inf
self.maxy = -np.inf
self.maxz = -np.inf
# We can't just look at the keyframe values, we also have to correct
# by the static hips OFFSET value, since sometimes this can be quite
# large. I feel it's bad BVH file form to have a non-zero HIPS offset
# position, but there are definitely files that do this.
xcorrect = self.hips.strans[0]
ycorrect = self.hips.strans[1]
zcorrect = self.hips.strans[2]
for keyframe in self.keyframes:
x = keyframe[xoffset] + xcorrect
y = keyframe[yoffset] + ycorrect
z = keyframe[zoffset] + zcorrect
if x < self.minx:
self.minx = x
if x > self.maxx:
self.maxx = x
if y < self.miny:
self.miny = y
if y > self.maxy:
self.maxy = y
if z < self.minz:
self.minz = z
if z > self.maxz:
self.maxz = z
def __repr__(self):
return "frames = %s, dt = %s\n%s" % (self.frames, self.dt, self.hips.__repr__())
def create_edges_onet(self, t, debug=0):
if debug:
print("create_edges_onet starting for t=", t)
# Before we can compute edge positions, we need to have called
# process_bvhkeyframe for time t, which computes trtr and worldpos
# for the joint hierarchy at time t. Since we no longer precompute
# this information when we read the BVH file, here's where we do it.
# This is on-demand computation of trtr and worldpos.
if t not in self.hips.worldpos:
if debug:
print("create_edges_onet: about to call process_bvhkeyframe for t=", t)
process_bvhkeyframe(self.keyframes[t - 1], self.hips, t, debug=debug)
edgelist = []
if t not in self.edges:
if debug:
print("create_edges_onet: creating edges for t=", t)
self.hips.create_edges_recurse(edgelist, t, debug=debug)
self.edges[t] = edgelist # dictionary entry
if debug:
print("create_edges edge list at timestep %d:" % t)
print(edgelist)
class ReadBvh(BVHReader):
def __init__(self, filename):
super(ReadBvh, self).__init__(filename)
self.root = None
self.dt = None
self.frames = None
self.keyframes = None
def on_hierarchy(self, root):
self.root = root
self.keyframes = []
def on_motion(self, frames, dt):
self.frames = frames
self.dt = dt
def on_frame(self, values):
self.keyframes.append(values)
def process_bvhnode(node, parentname='hips'):
"""
Recursively process a BVHReader node object and return the root joint
of a bone hierarchy. This routine creates a new joint hierarchy.
It isn't a Skeleton yet since we haven't read any keyframes or
created a Skeleton class yet.
Steps:
1. Create a new joint
2. Copy the info from Node to the new joint
3. For each Node child, recursively call myself
4. Return the new joint as retval
We have to pass in the parent name because this routine
needs to be able to name the leaves "parentnameEnd" instead
of "End Site"
"""
name = node.name
if (name == "End Site") or (name == "end site"):
name = parentname + "End"
b1 = Joint(name)
b1.channels = node.channels
b1.strans[0] = node.offset[0]
b1.strans[1] = node.offset[1]
b1.strans[2] = node.offset[2]
# Compute static translation matrix from vec3 b1.strans
b1.stransmat = np.eye(4)
b1.stransmat[0, 3] = b1.strans[0]
b1.stransmat[1, 3] = b1.strans[1]
b1.stransmat[2, 3] = b1.strans[2]
for child in node.children:
b2 = process_bvhnode(child, name) # Creates a child joint "b2"
b1.addchild(b2)
return b1
def process_bvhkeyframe(keyframe, joint, t, debug=0):
"""
Recursively extract (occasionally) translation and (mostly) rotation
values from a sequence of floats and assign to joints.
Takes a keyframe (a list of floats) and returns a new keyframe that
contains the not-yet-processed (not-yet-eaten) floats of the original
sequence of floats. Also assigns the eaten floats to the appropriate
class variables of the appropriate Joint object.
This function could technically be a class function within the Joint
class, but to maintain similarity with process_bvhnode I won't do that.
"""
counter = 0
dotrans = 0
xpos = 0
ypos = 0
zpos = 0
xrot = 0
yrot = 0
zrot = 0
# We have to build up drotmat one rotation value at a time so that
# we get the matrix multiplication order correct.
drotmat = np.eye(4)
if debug:
print(" process_bvhkeyframe: doing joint %s, t=%d" % (joint.name, t))
print(" keyframe has %d elements in it." % (len(keyframe)))
# Suck in as many values off the front of "keyframe" as we need
# to populate this joint's channels. The meanings of the keyvals
# aren't given in the keyframe itself; their meaning is specified
# by the channel names.
for channel in joint.channels:
keyval = keyframe[counter]
if channel == "Xposition":
dotrans = 1
xpos = keyval
elif channel == "Yposition":
dotrans = 1
ypos = keyval
elif channel == "Zposition":
dotrans = 1
zpos = keyval
elif channel == "Xrotation":
xrot = keyval
theta = np.radians(xrot)
mycos = np.cos(theta)
mysin = np.sin(theta)
drotmat2 = np.eye(4)
drotmat2[1, 1] = mycos
drotmat2[1, 2] = -mysin
drotmat2[2, 1] = mysin
drotmat2[2, 2] = mycos
drotmat = np.dot(drotmat, drotmat2)
elif channel == "Yrotation":
yrot = keyval
theta = np.radians(yrot)
mycos = np.cos(theta)
mysin = np.sin(theta)
drotmat2 = np.eye(4)
drotmat2[0, 0] = mycos
drotmat2[0, 2] = mysin
drotmat2[2, 0] = -mysin
drotmat2[2, 2] = mycos
drotmat = np.dot(drotmat, drotmat2)
elif channel == "Zrotation":
zrot = keyval
theta = np.radians(zrot)
mycos = np.cos(theta)
mysin = np.sin(theta)
drotmat2 = np.eye(4)
drotmat2[0, 0] = mycos
drotmat2[0, 1] = -mysin
drotmat2[1, 0] = mysin
drotmat2[1, 1] = mycos
drotmat = dot(drotmat, drotmat2)
else:
print("Fatal error in process_bvhkeyframe: illegal channel name ", channel)
return 0
counter += 1
if dotrans: # If we are the hips...
# Build a translation matrix for this keyframe
dtransmat = np.eye(4)
dtransmat[0, 3] = xpos
dtransmat[1, 3] = ypos
dtransmat[2, 3] = zpos
if debug:
print("Joint %s: xpos ypos zpos is %s %s %s" % (joint.name, xpos, ypos, zpos))
if debug:
print("Joint %s: xrot yrot zrot is %s %s %s" % (joint.name, xrot, yrot, zrot))
localtoworld = np.dot(joint.stransmat, dtransmat)
else:
parent_trtr = joint.parent.trtr[t] # Dictionary-based rewrite
localtoworld = np.dot(parent_trtr, joint.stransmat)
# At this point we should have computed:
# stransmat (computed previously in process_bvhnode subroutine)
# dtransmat (only if we're the hips)
# drotmat
# We now have enough to compute joint.trtr and also to convert
# the position of this joint (vertex) to worldspace.
#
# For the non-hips case, we assume that our parent joint has already
# had its trtr matrix appended to the end of self.trtr[]
# and that the appropriate matrix from the parent is the LAST item
# in the parent's trtr[] matrix list.
#
# Worldpos of the current joint is localtoworld = TRTR...T*[0,0,0,1]
# which equals parent_trtr * T*[0,0,0,1]
# In other words, the rotation value of a joint has no impact on
# that joint's position in space, so drotmat doesn't get used to
# compute worldpos in this routine.
#
# However we don't pass localtoworld down to our child -- what
# our child needs is trtr = TRTRTR...TR
#
# The code below attempts to optimize the computations so that we
# compute localtoworld first, then trtr.
trtr = np.dot(localtoworld, drotmat)
joint.trtr[t] = trtr # New dictionary-based approach
# numpy conversion: eliminate the matrix multiplication entirely,
# since all we're doing is extracting the last column of worldpos.
worldpos = np.array([localtoworld[0, 3], localtoworld[1, 3], localtoworld[2, 3], localtoworld[3, 3]])
joint.worldpos[t] = worldpos
if debug:
print("Joint %s: here are some matrices" % joint.name)
print("stransmat:")
print(joint.stransmat)
if not joint.hasparent: # if hips
print(" dtransmat:")
print(dtransmat)
print(" drotmat:")
print(drotmat)
print(" localtoworld:")
print(localtoworld)
print(" trtr:")
print(trtr)
print(" worldpos:", worldpos)
print()
newkeyframe = keyframe[counter:]
for child in joint.children:
# Here's the recursion call. Each time we call process_bvhkeyframe,
# the returned value "newkeyframe" should shrink due to the slicing
# process
newkeyframe = process_bvhkeyframe(newkeyframe, child, t, debug=debug)
if newkeyframe == 0:
print("Passing up fatal error in process_bvhkeyframe")
return 0
return newkeyframe
def process_bvhfile(filename, debug=0):
"""
The caller of this routine should cover possible exceptions.
Here are two possible errors:
IOError: [Errno 2] No such file or directory: 'fizzball'
raise SyntaxError, "Syntax error in line %d: 'HIERARCHY' expected, \
got '%s' instead"%(self.linenr, tok)
Here's some information about the two mybvh calls:
mybvh.read() returns a readbvh instance:
retval from readbvh() is <skeleton.readbvh instance at 0x176dcb0>
So this isn't useful for error-checking.
mybvh.read() returns None on success and throws an exception on failure.
"""
mybvh = ReadBvh(filename)
retval = mybvh.read()
hips = process_bvhnode(mybvh.root) # Create joint hierarchy
myskeleton = Skeleton(hips, keyframes=mybvh.keyframes, frames=mybvh.frames, dt=mybvh.dt)
if debug:
print("skeleton is: ", myskeleton)
return myskeleton
| 2.078125
| 2
|
auto_augment.py
|
jsiloto/keras_auto_augment
| 0
|
12782112
|
<gh_stars>0
import random
import numpy as np
import scipy
from scipy import ndimage
from PIL import Image, ImageEnhance, ImageOps
operations = {
'ShearX': lambda img, magnitude: shear_x(img, magnitude),
'ShearY': lambda img, magnitude: shear_y(img, magnitude),
'TranslateX': lambda img, magnitude: translate_x(img, magnitude),
'TranslateY': lambda img, magnitude: translate_y(img, magnitude),
'Rotate': lambda img, magnitude: rotate(img, magnitude),
'AutoContrast': lambda img, magnitude: auto_contrast(img, magnitude),
'Invert': lambda img, magnitude: invert(img, magnitude),
'Equalize': lambda img, magnitude: equalize(img, magnitude),
'Solarize': lambda img, magnitude: solarize(img, magnitude),
'Posterize': lambda img, magnitude: posterize(img, magnitude),
'Contrast': lambda img, magnitude: contrast(img, magnitude),
'Color': lambda img, magnitude: color(img, magnitude),
'Brightness': lambda img, magnitude: brightness(img, magnitude),
'Sharpness': lambda img, magnitude: sharpness(img, magnitude),
'Cutout': lambda img, magnitude: cutout(img, magnitude),
}
def apply_policy(img, policy):
if random.random() < policy[1]:
img = operations[policy[0]](img, policy[2])
if random.random() < policy[4]:
img = operations[policy[3]](img, policy[5])
return img
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = offset_matrix @ matrix @ reset_matrix
return transform_matrix
def shear_x(img, magnitude):
magnitudes = np.linspace(-0.3, 0.3, 11)
transform_matrix = np.array([[1, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 0],
[0, 1, 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
return img
def shear_y(img, magnitude):
magnitudes = np.linspace(-0.3, 0.3, 11)
transform_matrix = np.array([[1, 0, 0],
[random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]), 1, 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
return img
def translate_x(img, magnitude):
magnitudes = np.linspace(-150/331, 150/331, 11)
transform_matrix = np.array([[1, 0, 0],
[0, 1, img.shape[1]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
return img
def translate_y(img, magnitude):
magnitudes = np.linspace(-150/331, 150/331, 11)
transform_matrix = np.array([[1, 0, img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])],
[0, 1, 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
return img
def rotate(img, magnitude):
magnitudes = np.linspace(-30, 30, 11)
theta = np.deg2rad(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
transform_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = transform_matrix_offset_center(transform_matrix, img.shape[0], img.shape[1])
affine_matrix = transform_matrix[:2, :2]
offset = transform_matrix[:2, 2]
img = np.stack([ndimage.interpolation.affine_transform(
img[:, :, c],
affine_matrix,
offset) for c in range(img.shape[2])], axis=2)
return img
def auto_contrast(img, magnitude):
img = Image.fromarray(img)
img = ImageOps.autocontrast(img)
img = np.array(img)
return img
def invert(img, magnitude):
img = Image.fromarray(img)
img = ImageOps.invert(img)
img = np.array(img)
return img
def equalize(img, magnitude):
img = Image.fromarray(img)
img = ImageOps.equalize(img)
img = np.array(img)
return img
def solarize(img, magnitude):
magnitudes = np.linspace(0, 256, 11)
img = Image.fromarray(img)
img = ImageOps.solarize(img, random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
img = np.array(img)
return img
def posterize(img, magnitude):
magnitudes = np.linspace(4, 8, 11)
img = Image.fromarray(img)
img = ImageOps.posterize(img, int(round(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))))
img = np.array(img)
return img
def contrast(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = Image.fromarray(img)
img = ImageEnhance.Contrast(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
img = np.array(img)
return img
def color(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = Image.fromarray(img)
img = ImageEnhance.Color(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
img = np.array(img)
return img
def brightness(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = Image.fromarray(img)
img = ImageEnhance.Brightness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
img = np.array(img)
return img
def sharpness(img, magnitude):
magnitudes = np.linspace(0.1, 1.9, 11)
img = Image.fromarray(img)
img = ImageEnhance.Sharpness(img).enhance(random.uniform(magnitudes[magnitude], magnitudes[magnitude+1]))
img = np.array(img)
return img
def cutout(org_img, magnitude=None):
magnitudes = np.linspace(0, 60/331, 11)
img = np.copy(org_img)
mask_val = img.mean()
if magnitude is None:
mask_size = 16
else:
mask_size = int(round(img.shape[0]*random.uniform(magnitudes[magnitude], magnitudes[magnitude+1])))
top = np.random.randint(0 - mask_size//2, img.shape[0] - mask_size)
left = np.random.randint(0 - mask_size//2, img.shape[1] - mask_size)
bottom = top + mask_size
right = left + mask_size
if top < 0:
top = 0
if left < 0:
left = 0
img[top:bottom, left:right, :].fill(mask_val)
return img
def main():
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
img = x_train[0]
for key, op in zip(operations.keys(), operations.values()):
print(key)
dst = op(img, random.randint(0, 9))
plt.imshow(dst)
plt.axis('off')
plt.show()
if __name__ == '__main__':
main()
| 2.171875
| 2
|
TPGEgeneration.py
|
oomlout/oomlout-TPGE
| 0
|
12782113
|
from TPGExml import TPGEgetValueExtra
from TPGExml import TPGEgetValue
from TPGExml import TPGEgetValueWhere
from TPGExml import TPGEgetValueIndex
from random import randint
import codecs
import sys
import time
from datetime import date,datetime
from TPGExml import TPGEgetAllFilesIterate
import xml.etree.ElementTree as ET
import os.path
templateFileName = "template/PROJ-template.tmpl"
tempPath = "tmp/"
tempCombinedXMLFileName = tempPath + str(randint(0,999999)) + "tempCombinedXML.xml"
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
# Main Routine
def TPGEgeneratePages(idString, baseDirectory, xmlAdd, extraXML,template,output, root):
#print (" TPGE -- Generating Pages")
done = False
#getting template name trying in ID directory
##Need to ad a for each
templateFileName = template
try:
templateFile = open(templateFileName,"r")
except IOError:
print "Can't find template: " + templateFileName
templateFileName = "template/TEST-template.tmpl"
templateFile = open("template/TEST-template.tmpl","r+")
#print "Using Template: " + templateFileName
output = TPGEreplaceLine(idString, output, root, baseDirectory)
outputFileName = output
path = os.path.dirname(outputFileName)
print "Path: " + path
if not os.path.exists(path):
os.makedirs(path)
outputFile = open(outputFileName, "w+")
#replaceTags
runLine = ""
running = False
for line in templateFile.readlines():
#test for multiline entry
if line.startswith("::::",0,4) or running == True:
#print " ML " + line
running = True
if running:
if line.startswith(";;;;",0,4):
line = TPGEreplaceLine(idString, runLine, root, baseDirectory)
runLine = ""
running = False
#print " FINISHED MULTILINE"
line = line.replace("::::","")
line = line.replace(";;;;","")
line = line.rstrip()
line = line.lstrip()
line = line + "\n"
if line <> "":
outputFile.write(line.encode('utf8'))
else:
#print " Adding to line"
line = line.replace("\n","")
line = line.replace("\r","")
runLine = runLine + line
else:
#print " RL" + line + "()"
runLine = line
line = TPGEreplaceLine(idString, runLine, root, baseDirectory)
runLine = ""
if line <> "":
#print "----" + line + ">>>>>>"
if "%$%DELETE FILE%$%" in line:
outputFile.close()
os.remove(outputFileName)
print " +++++++++++++++++++++NOT CREATING FILE DUE TO TEST++++++++++++++++++++++++++++++"
done = True
break
else:
outputFile.write(line.encode('utf8'))
if done:
break
if done:
break
outputFile.close()
def TPGEreplaceLine(idString, line, root, baseDirectory):
includeLine = True
##REPLACE ALL TAGS FIRST
if includeLine:
#####REPLACE TAGS
#Replace all occurances of ID
####MAGIC WORDS
line = line.replace("%%ID%%", idString)
line = line.replace("%%YEAR%%", str(date.today().year).zfill(2))
line = line.replace("%%MONTH%%", str(date.today().month).zfill(2))
line = line.replace("%%DAY%%", str(date.today().day).zfill(2))
line = line.replace("%%HOUR%%", str(datetime.now().hour).zfill(2))
line = line.replace("%%MINUTE%%", str(datetime.now().minute).zfill(2))
if line[:1] == "#":
#skip line
r = 0
else:
######HANDLE LOOPING
###^^0,12,%%U%%^^
if find_between(line, "^^", "^^") != "":
tag = find_between(line, "^^", "^^")
#print "Loop Tag Found: " + tag
details = tag.split(",")
frontBit=""
backBit =""
if '""' in line:
splitString = line[line.find("^^"):line.rfind('""')]
line2 = splitString
line2 = line2.replace('^^' + tag + '^^',"")
#print line2
frontBit = line[0:line.find("^^")]
backBit = line[line.rfind('""')+2:len(line)]
#print "Front Bit: " + frontBit
#print "Line 2 " + line2
#print "RESULTING STRING " + line2
#sys.stdout.write('.')
else:
splitString = line.rpartition("^^" + tag + '^^')
line2 = splitString[2]
frontBit = splitString[0]
#print "RESULTING STRING " + line2
#sys.stdout.write('.')
line = "" #reset line to nil
try:
for b in range(int(details[0]),int(details[1])+1):
#print b
#sys.stdout.write('.')
#print "Looping: " + str(b)
line3 = line2.replace(details[2],str(b))
result = TPGEreplaceLine(idString,line3,root, baseDirectory)
#print result
if result <> "" and result <> "\n":
line = line + result
includeLine = True
except:
print "Problem with line: " + line2
line = frontBit + line + TPGEreplaceLine(idString,backBit,root, baseDirectory) #Re add front bit
#print ""
####### COMPLEX TAGS WITH INDEX
while find_between(line, "!!", "!!") != "":
#find first tag
tag = find_between(line, "!!", "!!")
#print "Tag = " + tag
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
value = TPGEgetValueIndex(details[0], root, details[1], details[2])
#print "Replacing Tag Index:" + tag + " " + value[0:20]
line = line.replace("!!" + tag + "!!", value,1)
for g in range(1,9):
while find_between(line, "@" + str(g), "@" + str(g)) != "":
#find first tag
tag = find_between(line, "@" + str(g), "@" + str(g))
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
value = TPGEgetValueWhere(details[0], root, details[1], details[2])
#print "Replacing Tag " + tag + " " + value[0:20]
line = line.replace("@" + str(g) + tag + "@" + str(g), value,1)
while find_between(line, "@@", "@@") != "":
#find first tag
tag = find_between(line, "@@", "@@")
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
#print "Details: "
#print " Tag:" + tag
#print " D1" + details[0]
#print " D2" + details[1]
#print " D3" + details[2]
value = TPGEgetValueWhere(details[0], root, details[1], details[2])
#print "Replacing Tag " + tag + " " + value[0:20]
line = line.replace("@@" + tag + "@@", value,1)
while find_between(line, "''", "''") != "":
tag = find_between(line, "''", "''")
details = tag.split(",")
if os.path.isfile(baseDirectory + details[0]):
value = details[1]
else:
try:
value = details[2]
except:
value = ""
line = line.replace("''" + tag + "''", value,1)
while find_between(line, "()", "()") != "":
#find first tag
tag = find_between(line, "()", "()")
details = tag.split(",")
#print "TESTING FOR CREATION: " + details[0] + " " + details[1]
if details[0] != details[1]:
replaceValue = "%$%DELETE FILE%$%"
else:
replaceValue = ""
line = line.replace("()" + tag + "()", replaceValue,1)
#print line
while find_between(line, "!)", "!)") != "":
#find first tag
tag = find_between(line, "!)", "!)")
details = tag.split(",")
details = tag.split(",")
details = tag.split(",")
#print "TESTING FOR CREATION: " + details[0] + " " + details[1]
if details[0] == details[1]:
replaceValue = "%$%DELETE FILE%$%"
else:
replaceValue = ""
line = line.replace("!)" + tag + "!)", replaceValue,1)
#print line
while find_between(line, "(*", "(*") != "":
#find first tag
tag = find_between(line, "(*", "(*")
details = tag.split(",")
if os.path.isfile(baseDirectory + details[0]):
replaceValue = details[1]
else:
replaceValue = details[2]
line = line.replace("(*" + tag + "(*", replaceValue,1)
#print line
while find_between(line, "<<", "<<") != "":
#find first tag
tag = find_between(line, "<<", "<<")
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
replaceValue = ""
if details[0] == details[1]:
replaceValue = details[2]
else:
try:
replaceValue = details[3]
except:
replaceValue = ""
line = line.replace("<<" + tag + "<<", replaceValue,1)
while find_between(line, ">>", ">>") != "":
#find first tag
tag = find_between(line, ">>", ">>")
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
replaceValue = ""
if details[0] != details[1]:
replaceValue = details[2]
line = line.replace(">>" + tag + ">>", replaceValue,1)
includeLine = True
##AFTER REPLACMENT TEST FOR INCLUSION
if line[:1] == "#":
#skip line as template comment
#print"Skipping Line COMMENT " + line[0:20]
includeLine = False
elif line[:1] == "$":
#Test for file existance id directory based
testFile =find_between(line, "$$", "$$")
line = line.replace("$$" + testFile + "$$", "")
#print "Testing File: " + baseDirectory + testFile
if os.path.isfile(baseDirectory + testFile):
#includeLine = True
f=0
else:
#print" Skipping Line FILE DOESN'T EXIST " + line[0:20]
includeLine = False
elif find_between(line, "**", "**") != "":
while find_between(line, "**", "**") != "":
#Test for tag existance
#find first tag
tag = find_between(line, "**", "**")
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
#print tag
try:
value = TPGEgetValueWhere(details[0], root, details[1], details[2])
except IndexError:
print "ERROR IN LINE: " + tag + "LINE: " + line
raise IndexError
#print "Replacing Tag " + tag + " " + value[0:20]
line = line.replace("**" + tag + "**", "",1)
if value <> "":
line = line.replace("**" + tag + "**", "")
#includeLine = True
else:
#print" Skipping Line TAG DOESN'T EXIST " + line[0:20]
#print "Skipping Due To ** " + tag
includeLine = False
elif line[:1] == "=":
#Test for tag existance
#find first tag
tag = find_between(line, "==", "==")
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
#print tag
try:
value = TPGEgetValueIndex(details[0], root, details[1], details[2])
#print "Replacing Tag " + tag + " " + value[0:20]
line = line.replace("==" + tag + "==", "",1)
if value <> "":
line = line.replace("==" + tag + "==", "")
#includeLine = True
else:
#print" Skipping Line TAG DOESN'T EXIST " + line[0:20]
includeLine = False
except IndexError:
pass
#print "ERROR IN LINE: Bypassed for wiki formatting" + tag
#raise IndexError
#added to allow for WIKImedia style formating
elif find_between(line, "++", "++") != "":
while find_between(line, "++", "++") != "":
#find first tag
tag = find_between(line, "++", "++")
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
#@@oompPart.oompID,name@@
#print "Testing Equal: " +details[0] + " " + details[1]
if details[0] != details[1]:
#print " EXCLUDING"
includeLine=False
#print "Skipping Due To ++ " + tag
line = line.replace("++" + tag + "++", "")
elif find_between(line, "--", "--") != "":
while find_between(line, "--", "--") != "":
#find first tag
tag = find_between(line, "--", "--")
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
#print "Testing Equal: " +details[0] + " " + details[1]
try:
#was killing one so fixed
if details[0] == details[1]:
#print " EXCLUDING"
includeLine=False
#print "Skipping Due To --"
line = line.replace("--" + tag + "--", "")
except IndexError:
line = line.replace("--" + tag + "--", "")
elif find_between(line, "??", "??") != "":
while find_between(line, "??", "??") != "":
#find first tag
tag = find_between(line, "??", "??")
#print "Tag = " + tag
details = tag.split(",")
#TPGEgetValueWhere(id, tree, testField, resultField)
#TPGEgetValueWhere("BOLT-M3-M-12-01", root, "oompPart.oompID", "name")
#@@oompPart.oompID,name@@
#print "Testing Equal: " +details[0] + " " + details[1]
if details[0] == "inFamily":
#print "Details " + details[1] + " " + details[2]
noneCount=0
#testingType
extraItem = "Type"
partTest = TPGEgetValueWhere(details[1], root, "oompPart.oompID", "oomp"+ extraItem)
familyTest = TPGEgetValueWhere(details[2], root, "oompFamily.familyName","family"+ extraItem)
if familyTest <> "":
if partTest != familyTest:
includeLine = False
else:
noneCount += 1
#testingSize
extraItem = "Size"
partTest = TPGEgetValueWhere(details[1], root, "oompPart.oompID", "oomp"+ extraItem)
familyTest = TPGEgetValueWhere(details[2], root, "oompFamily.familyName","family"+ extraItem)
if familyTest <> "":
if partTest != familyTest:
includeLine = False
else:
noneCount += 1
#testingColor
extraItem = "Color"
partTest = TPGEgetValueWhere(details[1], root, "oompPart.oompID", "oomp"+ extraItem)
familyTest = TPGEgetValueWhere(details[2], root, "oompFamily.familyName","family"+ extraItem)
if familyTest <> "":
if partTest != familyTest:
includeLine = False
else:
noneCount += 1
#testingDesc
extraItem = "Desc"
partTest = TPGEgetValueWhere(details[1], root, "oompPart.oompID", "oomp"+ extraItem)
familyTest = TPGEgetValueWhere(details[2], root, "oompFamily.familyName","family"+ extraItem)
if familyTest <> "":
if partTest != familyTest:
includeLine = False
else:
noneCount += 1
#testingIndex
extraItem = "Index"
partTest = TPGEgetValueWhere(details[1], root, "oompPart.oompID", "oomp"+ extraItem)
familyTest = TPGEgetValueWhere(details[2], root, "oompFamily.familyName","family"+ extraItem)
if familyTest <> "":
if partTest != familyTest:
includeLine = False
else:
noneCount += 1
if noneCount > 1:
includeLine = False
line = line.replace("??" + tag + "??", "")
#special tests
else:
r=7
if includeLine:
##Add new line's
line = line.replace("~~", "\n")
####MAGIC WORDS
line = line.replace("%%ID%%", idString)
line = line.replace("%%YEAR%%", str(date.today().year).zfill(2))
line = line.replace("%%MONTH%%", str(date.today().month).zfill(2))
line = line.replace("%%DAY%%", str(date.today().day).zfill(2))
line = line.replace("%%HOUR%%", str(datetime.now().hour).zfill(2))
line = line.replace("%%MINUTE%%", str(datetime.now().minute).zfill(2))
line = line.replace("%%CRLF%%", "\n")
#print "BOTTOM " + line
return line
else:
return ""
def TPGEcreateXMLList(list, baseDirectory):
xmlFiles = list
try:
os.stat(tempPath)
except:
os.mkdir(tempPath)
f = codecs.open(tempCombinedXMLFileName,'w+', encoding='utf-8')
f.write("<xml>".encode('utf-8'))
print "---------"
for item in xmlFiles:
if item <> "":
for line in open(item):
if ("<xml>" in line) or ("</xml>" in line):
t = 0
# sys.stdout.write("S")
else:
value = unicode(line, 'utf-8', errors='ignore')
f.write(value)
# sys.stdout.write(".")
#print ""
f.write("</xml>".encode('utf-8'))
f.close()
def TPGEcreateXML(idString, baseDirectory, xmlAdd, extraXML):
if extraXML != "":
extraXML = extraXML + "," + baseDirectory
else:
extraXML = baseDirectory + ","
xmlFileName = TPGEgetAllFilesIterate("tags/", ".xml")
fileList = extraXML.split(",")
moreXML = []
for item in fileList:
print "Loading XML Files From: " + item
if os.path.isdir(item):
moreXML = moreXML + TPGEgetAllFilesIterate(item, xmlAdd)
else:
moreXML.append(item)
moreXML = moreXML + xmlFileName
TPGEcreateXMLList(moreXML, baseDirectory)
def TPGEloadXML():
print "Loading XML"
tree = ET.parse(tempCombinedXMLFileName)
print "Done Loading XML"
return tree.getroot()
| 2.453125
| 2
|
tests/test_train.py
|
hadware/pyannote-audio
| 1
|
12782114
|
import pytest
from pytorch_lightning import Trainer
from pyannote.audio.models.segmentation.debug import SimpleSegmentationModel
from pyannote.audio.tasks import (
OverlappedSpeechDetection,
Segmentation,
VoiceActivityDetection,
)
from pyannote.database import FileFinder, get_protocol
@pytest.fixture()
def protocol():
return get_protocol(
"Debug.SpeakerDiarization.Debug", preprocessors={"audio": FileFinder()}
)
def test_train_segmentation(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_train_voice_activity_detection(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_train_overlapped_speech_detection(protocol):
overlapped_speech_detection = OverlappedSpeechDetection(protocol)
model = SimpleSegmentationModel(task=overlapped_speech_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_with_task_that_does_not_need_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
voice_activity_detection = VoiceActivityDetection(protocol)
model.task = voice_activity_detection
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_with_task_that_needs_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_with_task_that_does_not_need_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
voice_activity_detection = VoiceActivityDetection(protocol)
model.task = voice_activity_detection
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_with_task_that_needs_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_freeze_with_task_that_needs_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_finetune_freeze_with_task_that_does_not_need_setup_for_specs(protocol):
vad = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=vad)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
vad = VoiceActivityDetection(protocol)
model.task = vad
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_freeze_with_task_that_does_not_need_setup_for_specs(protocol):
segmentation = Segmentation(protocol)
model = SimpleSegmentationModel(task=segmentation)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
voice_activity_detection = VoiceActivityDetection(protocol)
model.task = voice_activity_detection
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
def test_transfer_freeze_with_task_that_needs_setup_for_specs(protocol):
voice_activity_detection = VoiceActivityDetection(protocol)
model = SimpleSegmentationModel(task=voice_activity_detection)
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
segmentation = Segmentation(protocol)
model.task = segmentation
model.freeze_up_to("mfcc")
trainer = Trainer(fast_dev_run=True)
trainer.fit(model)
| 2.234375
| 2
|
play_dodge.py
|
Dolesaurus/PyDodge
| 0
|
12782115
|
<filename>play_dodge.py
# -*- coding: utf-8 -*-
"""
Description
"""
from PyQt4 import QtGui, QtCore
import sys
import pydodge
dt = 30 #somehow relate to the rate of spawning
class gui(QtGui.QMainWindow):
def __init__(self):
super().__init__()
self.resize(pydodge.WIDTH,pydodge.HEIGHT)
self.move(400,300)
self.setWindowTitle('PyDodge')
self.colorb = QtGui.QColor(0,70,170)
self.colorp = QtGui.QColor(170,70,0)
self.rp = 4 #radius player
self.rb = 3 #radius bullet
self.show()
self.t = 0
self.gaming = False
self.game = pydodge.game_body()
def new_game(self):
self.gaming = True
self.game.initialize()
self.u = 4
self.t = 0
self.tid = self.startTimer(dt)
def keyPressEvent(self,e):
if not self.gaming: self.new_game()
if e.key() == QtCore.Qt.Key_W: #consider dictionary?
self.u = 0
elif e.key() == QtCore.Qt.Key_S:
self.u = 1
elif e.key() == QtCore.Qt.Key_A:
self.u = 2
elif e.key() == QtCore.Qt.Key_D:
self.u = 3
elif e.key() == QtCore.Qt.Key_Up:
self.u = 0
elif e.key() == QtCore.Qt.Key_Down:
self.u = 1
elif e.key() == QtCore.Qt.Key_Left:
self.u = 2
elif e.key() == QtCore.Qt.Key_Right:
self.u = 3
def keyReleaseEvent(self,e): #TODO: but when there are multiple inputs
self.u = 4
def timerEvent(self,e):
self.t += dt
if self.game.update(self.u):
self.killTimer(self.tid)
self.gaming = False
self.update()
def mousePressEvent(self, e):
if not self.gaming: self.new_game()
def paintEvent(self,e):
qp = QtGui.QPainter()
qp.begin(self)
qp.drawText(10,20,'{:.2f}'.format(self.t/1000))
qp.drawText(pydodge.WIDTH-25,20,'{:3d}'.format(len(self.game.bullets)))
qp.setPen(self.colorp)
qp.setBrush(self.colorp)
qp.drawEllipse(self.game.x-self.rp,self.game.y-self.rp,2*self.rp,2*self.rp)
qp.setPen(self.colorb)
qp.setBrush(self.colorb)
for b in self.game.bullets:
qp.drawEllipse(b.state[0]-self.rb, b.state[1]-self.rb,2*self.rb,2*self.rb)
qp.end()
def main():
app = QtGui.QApplication(sys.argv)
_ = gui()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 2.671875
| 3
|
zkstark/quadratic_prover_test.py
|
kevaundray/research
| 1,351
|
12782116
|
import quadratic_provers as q
data = q.eval_across_field([1, 2, 3, 4], 11)
qproof = q.mk_quadratic_proof(data, 4, 11)
assert q.check_quadratic_proof(data, qproof, 4, 5, 11)
data2 = q.eval_across_field(range(36), 97)
cproof = q.mk_column_proof(data2, 36, 97)
assert q.check_column_proof(data2, cproof, 36, 10, 97)
| 2.296875
| 2
|
anealing/samples/image_search/scripts/model/__init__.py
|
arkmohba/quantum_computing_memo
| 0
|
12782117
|
from .image_feature import ImageDoubleFeature
from .base import Base
| 1
| 1
|
Assignment6/Lab6_Q1_Creating Calculator.py
|
mjmaisha527/python-projects-cse111
| 0
|
12782118
|
<filename>Assignment6/Lab6_Q1_Creating Calculator.py<gh_stars>0
class Calculator:
def __init__(self,value1,operator,value2):
self.value1=value1
self.operator=operator
self.value2=value2
def add (self):
return self.value1+self.value2
def subtract (self):
return self.value1-self.value2
def multiply (self):
return self.value1*self.value2
def divide(self):
return self.value1/self.value2
x=int(input())
sign=input()
y=int(input())
print("Let's Calculate!")
calculate=Calculator(value1=x,operator=sign,value2=y)
if calculate.operator=="+":
print("Value 1:",calculate.value1)
print("Operator:", calculate.operator)
print("Value 2:", calculate.value2)
print("Result:", calculate.add())
elif calculate.operator=="-":
print("Value 1:",calculate.value1)
print("Operator:", calculate.operator)
print("Value 2:", calculate.value2)
print("Result:", calculate.subtract())
elif calculate.operator=="*":
print("Value 1:",calculate.value1)
print("Operator:", calculate.operator)
print("Value 2:", calculate.value2)
print("Result:", calculate.multiply())
elif calculate.operator=="/":
print("Value 1:",calculate.value1)
print("Operator:", calculate.operator)
print("Value 2:", calculate.value2)
print("Result:", calculate.divide())
| 4.125
| 4
|
utils.py
|
jananises/Dwave
| 3
|
12782119
|
<gh_stars>1-10
from collections import defaultdict
from tabulate import tabulate
import os
from dimod import BINARY, INTEGER, sym, ConstrainedQuadraticModel
def print_cqm_stats(cqm: ConstrainedQuadraticModel) -> None:
"""Print some information about the CQM model
Args:
cqm: a dimod cqm model (dimod.cqm)
"""
if not isinstance(cqm, ConstrainedQuadraticModel):
raise ValueError("input instance should be a dimod CQM model")
num_binaries = sum(cqm.vartype(v) is BINARY for v in cqm.variables)
num_integers = sum(cqm.vartype(v) is INTEGER for v in cqm.variables)
num_discretes = len(cqm.discrete)
num_linear_constraints = sum(
constraint.lhs.is_linear() for constraint in cqm.constraints.values())
num_quadratic_constraints = sum(
not constraint.lhs.is_linear() for constraint in
cqm.constraints.values())
num_le_inequality_constraints = sum(
constraint.sense is sym.Sense.Le for constraint in
cqm.constraints.values())
num_ge_inequality_constraints = sum(
constraint.sense is sym.Sense.Ge for constraint in
cqm.constraints.values())
num_equality_constraints = sum(
constraint.sense is sym.Sense.Eq for constraint in
cqm.constraints.values())
assert (num_binaries + num_integers == len(cqm.variables))
assert (num_quadratic_constraints + num_linear_constraints ==
len(cqm.constraints))
print(" \n" + "=" * 25 + "MODEL INFORMATION" + "=" * 25)
print(
' ' * 10 + 'Variables' + " " * 10 + 'Constraints' + " " * 15 +
'Sensitivity')
print('-' * 20 + " " + '-' * 28 + ' ' + '-' * 18)
print(tabulate([["Binary", "Integer", "Quad", "Linear", "One-hot", "EQ ",
"LT", "GT"],
[num_binaries, num_integers, num_quadratic_constraints,
num_linear_constraints, num_discretes,
num_equality_constraints,
num_le_inequality_constraints,
num_ge_inequality_constraints]],
headers="firstrow"))
def read_instance(instance_path: str) -> dict:
"""A method that reads input instance file
Args:
instance_path: path to the job shop instance file
Returns:
Job_dict: dictionary containing jobs as keys and a list of tuple of
machines and their processing time as values.
"""
job_dict = defaultdict(list)
with open(instance_path) as f:
for i, line in enumerate(f):
if i == 0:
num_jobs = int(line.split()[-1])
elif i == 1:
num_machines = int(line.split()[-1])
elif 2 <= i <= 4:
continue
else:
job_task = list(map(int, line.split()))
job_dict[i - 5] = [x for x in
zip(job_task[1::2], # machines
job_task[2::2] # processing duration
)]
assert (len(job_dict) == num_jobs)
assert (len(job_dict[0]) == num_machines)
return job_dict
def write_solution_to_file(data, solution: dict, completion: int,
solution_file_path: str) -> None:
"""Write solution to a file.
Args:
data: a class containing JSS data
solution: a dictionary containing solution
completion: completion time or objective function of the the JSS problem
solution_file_path: path to the output solution file. If doesn't exist
a new file is created
"""
main_header = " " * 10
for i in range(data.num_machines):
main_header += " " * 8 + f'machine {i}' + " " * 7
header = ['job id']
for i in range(data.num_machines):
header.extend(['task', 'start', 'dur'])
job_sol = {}
for j in range(data.num_jobs):
job_sol[j] = [j]
for i in range(data.num_machines):
job_sol[j].extend(list(solution[j, i]))
with open(solution_file_path, 'w') as f:
f.write('#Number of jobs: ' + str(data.num_jobs) + '\n')
f.write('#Number of machines: ' + str(data.num_machines) + '\n')
f.write('#Completion time: ' + str(completion) + '\n\n')
f.write(main_header)
f.write("\n")
f.write(tabulate([header, *[v for l, v in job_sol.items()]],
headers="firstrow"))
f.close()
print(f'\nSaved schedule to '
f'{os.path.join(os.getcwd(), solution_file_path)}')
def write_solution_to_file_1(data, solution: dict, completion: int,
solution_file_path: str) -> None:
"""Write solution to a file.
Args:
data: a class containing JSS data
solution: a dictionary containing solution
completion: completion time or objective function of the the JSS problem
solution_file_path: path to the output solution file. If doesn't exist
a new file is created
"""
with open(solution_file_path, 'w') as f:
f.write('#Number of jobs: ' + str(data.num_jobs) + '\n')
f.write('#Number of machines: ' + str(data.num_machines) + '\n')
f.write('#Completion time: ' + str(
completion) + '\n\n')
f.write('#' + '_' * 150 + '\n')
for j in range(data.num_jobs):
print()
f.write(' '.
join([str(int(solution[(j, i)])) + ' ' +
str(data.task_duration[
(j, data.machine_task[(j, i)])])
for i in range(data.num_machines)]) + '\n')
f.close()
print(f'Saved schedule to '
f'{os.path.join(os.getcwd(), solution_file_path)}')
| 2.609375
| 3
|
ChristmasSocks/src/client/core/consts.py
|
dsw7/ChristmasSocks
| 0
|
12782120
|
<gh_stars>0
PROJECT_TITLE = '--- ChristmasSocks Remote Server Management Software ---'
PANEL_MARGIN = 3
| 0.96875
| 1
|
shot_detector/objects/time/time_float.py
|
w495/shot_detector
| 18
|
12782121
|
<gh_stars>10-100
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import datetime
class TimeFloat(float):
"""
...
"""
def td(self):
"""
:return:
"""
td = datetime.timedelta(
seconds=self
)
td.total_seconds()
return td
def hms(self):
"""
:return:
"""
td = self.td()
str_td = str(td)
return str_td
def mns(self):
"""
:return:
"""
minute = int(self) // 60
second = int(self) % 60
minute_second = "%s.%s" % (minute, second)
human_minute = float(minute_second)
return human_minute
def minute(self):
"""
:return:
"""
return self / 60.0
| 2.9375
| 3
|
sgts/mad_root_distn.py
|
Tancata/phylo
| 5
|
12782122
|
<reponame>Tancata/phylo
import os, sys
#use MAD to root a MCMC sample of trees, outputting a sample of rooted trees (for analysis with e.g. RootAnnotator)
#python3 combine_treelists.py 300 treelist1 treelist2 treelist3 ...
burnin = int(sys.argv[1]) + 1
treelists = sys.argv[2:]
outfile = sys.argv[2] + ".combined"
finalfile = sys.argv[2] + ".rooted_sample"
if os.path.exists(outfile):
os.unlink(outfile)
for t in treelists:
os.system("tail -n +" + str(burnin) + " " + t + " >> " + outfile + ".tmp1")
os.system("awk '!(NR%10)' " + outfile + ".tmp1 > " + outfile) #thins the chain to take every 10th tree
os.unlink(outfile + ".tmp1")
os.system("mad.py " + outfile + " -m") #needs mad.py to be in your path
os.system("grep -v '^>>\|^<\|^>\|^$' " + outfile + ".rooted > " + finalfile)
| 2.4375
| 2
|
technology/setup_scripts/setup_openram_scn3me_subm.py
|
kabylkas/OpenRAM
| 0
|
12782123
|
<filename>technology/setup_scripts/setup_openram_scn3me_subm.py
#!/usr/bin/python
"""
This type of setup script should be placed in the setup_scripts directory in the trunk
"""
import sys
import os
TECHNOLOGY = "scn3me_subm"
##########################
# CDK paths
# os.environ["CDK_DIR"] = CDK_DIR #PDK path
# os.environ["SYSTEM_CDS_LIB_DIR"] = "{0}/cdssetup".format(CDK_DIR)
# os.environ["CDS_SITE"] = CDK_DIR
os.environ["MGC_TMPDIR"] = "/tmp"
###########################
# OpenRAM Paths
try:
DRCLVS_HOME = os.path.abspath(os.environ.get("DRCLVS_HOME"))
except:
OPENRAM_TECH=os.path.abspath(os.environ.get("OPENRAM_TECH"))
DRCLVS_HOME=OPENRAM_TECH+"/scn3me_subm/tech"
os.environ["DRCLVS_HOME"] = DRCLVS_HOME
# try:
# SPICE_MODEL_DIR = os.path.abspath(os.environ.get("SPICE_MODEL_DIR"))
# except:
OPENRAM_TECH=os.path.abspath(os.environ.get("OPENRAM_TECH"))
os.environ["SPICE_MODEL_DIR"] = "{0}/{1}/models".format(OPENRAM_TECH, TECHNOLOGY)
##########################
# Paths required for OPENRAM to function
LOCAL = "{0}/..".format(os.path.dirname(__file__))
sys.path.append("{0}/{1}".format(LOCAL,TECHNOLOGY))
| 1.929688
| 2
|
wildfire/deps/conll.py
|
speedcell4/wildfire
| 0
|
12782124
|
from pathlib import Path
from typing import NamedTuple
__all__ = [
'CTB5_CHAR_FORMAT', 'CTB5_WORD_FORMAT',
'CTB7_CHAR_FORMAT', 'CTB7_WORD_FORMAT',
'Token', 'CoNLLReader',
]
CTB5_WORD_FORMAT = '{ix}\t{form}\t{form}\t{wpos}\t{wpos}_\t{head}\t{drel}'
CTB5_CHAR_FORMAT = '{ix}\t{form}\t{form}\t{wpos}\t{cpos}_\t{head}\t{drel}'
CTB7_WORD_FORMAT = '{ix}\t{form}\t{wpos}\t{head}\t{drel}'
CTB7_CHAR_FORMAT = '{ix}\t{form}\t{wpos}\t{cpos}\t{head}\t{drel}'
class Token(NamedTuple):
form: str
wpos: str
head: int
drel: str
class CoNLLReader(object):
def __init__(self, path: Path, encoding: str = 'utf-8', separator: str = '\t',
form: int = 1, wpos: int = 3, head: int = 6, drel: int = 7) -> None:
self.path = path
self.encoding = encoding
self.separator = separator
self.form = form
self.wpos = wpos
self.head = head
self.drel = drel
def __repr__(self) -> str:
return f'<{self.__class__.__name__}, {self.path}>'
def __iter__(self):
with self.path.open(mode='r', encoding=self.encoding) as fp:
sentence = []
for line in fp:
line = line.strip()
if line.__len__() > 0:
tokens = line.split(self.separator)
form = str(tokens[self.form])
wpos = str(tokens[self.wpos])
head = int(tokens[self.head])
drel = str(tokens[self.drel])
sentence.append(Token(form, wpos, head, drel))
elif sentence.__len__() > 0:
yield sentence
sentence = []
@staticmethod
def dump_stream(stream, path: Path, format_string: str, encoding: str = 'utf-8') -> None:
with path.open(mode='w', encoding=encoding) as fp:
for sentence in stream:
for ix, token in enumerate(sentence):
print(format_string.format(ix, **token._asdict()), file=fp)
print('\n', file=fp)
| 2.640625
| 3
|
asset-transfer/wallet.py
|
defineapoorv/asset-transfer
| 0
|
12782125
|
<reponame>defineapoorv/asset-transfer
import datetime
import hashlib
import json
import pyelliptic
import random
import requests
from node import NodeMixin, BALANCE_URL, FULL_NODE_PORT, TRANSACTION_HISTORY_URL
class Client(NodeMixin):
__private_key__ = None
__public_key__ = None
def __init__(self, private_key=None, public_key=None):
if private_key is not None and public_key is not None:
self.__private_key__ = private_key.decode('hex')
self.__public_key__ = public_key.decode('hex')
self.ecc = self.generate_ecc_instance()
def generate_ecc_instance(self):
if self.__private_key__ is None or self.__public_key__ is None:
print "ECC keys not provided. Generating ECC keys"
ecc = pyelliptic.ECC(curve='secp256k1')
self.__private_key__ = ecc.get_privkey()
self.__public_key__ = ecc.get_pubkey()
else:
ecc = pyelliptic.ECC(curve='secp256k1', privkey=self.__private_key__, pubkey=self.__public_key__)
return ecc
def get_pubkey(self, hex=True):
return self.ecc.get_pubkey().encode('hex') if hex else self.ecc.get_pubkey()
def get_privkey(self, hex=True):
return self.ecc.get_privkey().encode('hex') if hex else self.ecc.get_privkey()
def sign(self, message):
return self.ecc.sign(message).encode('hex')
def verify(self, signature, message, public_key=None):
if public_key is not None:
return pyelliptic.ECC(curve='secp256k1', pubkey=public_key.decode('hex')).verify(signature.decode('hex'), message)
return self.ecc.verify(signature, message)
def get_balance(self, address=None, node=None):
if address is None:
address = self.get_pubkey()
if node is None:
node = random.sample(self.full_nodes, 1)[0]
url = BALANCE_URL.format(node, FULL_NODE_PORT, address)
try:
response = requests.get(url)
return response.json()
except requests.exceptions.RequestException as re:
pass
return None
def get_transaction_history(self, address=None, node=None):
if address is None:
address = self.get_pubkey()
if node is None:
node = random.sample(self.full_nodes, 1)[0]
url = TRANSACTION_HISTORY_URL.format(node, FULL_NODE_PORT, address)
try:
response = requests.get(url)
return response.json()
except requests.exceptions.RequestException as re:
pass
return None
def create_transaction(self, to, amount):
timestamp = datetime.datetime.utcnow().isoformat()
signature = self.sign(
self.generate_signable_transaction(
self.get_pubkey(),
to,
amount,
timestamp))
transaction = {
"from": self.get_pubkey(),
"to": to,
"amount": amount,
"signature": signature,
"timestamp": timestamp,
}
transaction["hash"] = self.calculate_transaction_hash(transaction)
return self.broadcast_transaction(transaction)
def calculate_transaction_hash(self, transaction):
"""
Calculates sha-256 hash of transaction
:param transaction: transaction
:type transaction: dict(from, to, amount, timestamp, signature, (hash))
:return: sha256 hash
:rtype: str
"""
# pop hash so method can calculate transactions pre or post hash
data = transaction.copy()
data.pop("hash", None)
data_json = json.dumps(data, sort_keys=True)
hash_object = hashlib.sha256(data_json)
return hash_object.hexdigest()
def generate_signable_transaction(self, from_address, to_address, amount, timestamp):
return ":".join((from_address, to_address, str(amount), str(timestamp)))
if __name__ == "__main__":
pass
| 2.40625
| 2
|
src/spn/algorithms/sklearn.py
|
QueensGambit/SPFlow
| 0
|
12782126
|
import logging
from typing import List
import numpy as np
import tensorflow as tf
try:
import tensorflow_probability as tfp
distributions = tfp.distributions
except:
distributions = tf.distributions
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils import check_X_y
from sklearn.utils.validation import check_array, check_is_fitted
from spn.algorithms.LearningWrappers import learn_classifier, learn_parametric
from spn.algorithms.MPE import mpe
from spn.gpu.TensorFlow import optimize_tf
from spn.structure.Base import Context, get_nodes_by_type
from spn.structure.leaves.parametric.Parametric import Categorical, Gaussian, Parametric
logger = logging.getLogger(__name__)
class SPNClassifier(BaseEstimator, ClassifierMixin):
"""
:class:`SPNClassifier` wraps the SPN structure learning, tensorflow weight optimization and MPE procedures into a single
class that follows the sklearn estimator interace. Therefore, :class:`SPNClassifier` is usable in the sklearn framework as
estimator in :meth:`sklearn.model_selection.cross_val_score`, :meth:`sklearn.model_selection.GridSearchCV` and more.
"""
def __init__(
self,
parametric_types: List[Parametric] = None,
n_jobs=-1,
tf_optimize_weights=False,
tf_n_epochs=100,
tf_batch_size: int = None,
tf_optimizer=tf.compat.v1.train.AdamOptimizer(learning_rate=0.001),
tf_pre_optimization_hook=None,
tf_post_optimization_hook=None,
):
"""
Create an :class:`SPNClassifier`.
Parameters:
parametric_types : List
Parametric types of leaf nodes. If None, all are assumed to be Gaussian
n_jobs : int
Number of parallel jobs for learning the SPN structure
tf_optimize_weights : bool
Optimize weights in tensorflow
tf_n_epochs : int
Number of tensorflow optimization epochs
tf_batch_size : int
Batch size for tensorflow optimization
tf_optimizer
Tensorflow optimizer to use for optimization
tf_pre_optimization_hook
Hook that takes an SPN and returns an SPN before the optimization step
tf_post_optimization_hook
Hook that takes an SPN and returns an SPN after the optimization step
"""
self.n_jobs = n_jobs
self.tf_optimize_weights = tf_optimize_weights
self.tf_n_epochs = tf_n_epochs
self.tf_optimizer = tf_optimizer
self.tf_batch_size = tf_batch_size
self.parametric_types = parametric_types
self.tf_pre_optimization_hook = tf_pre_optimization_hook
self.tf_post_optimization_hook = tf_post_optimization_hook
def fit(self, X, y):
"""
Fit the :class:`SPNClassifier` object.
Parameters
----------
X : np.ndarray
Training variables
y : np.ndarray
Training labels
Returns
-------
SPNClassifier
Fitted classifier
"""
# Check that X and y have correct shape
X, y = check_X_y(X, y, multi_output=True)
# Merge X and y
train_data = np.c_[X, y].astype(np.float32)
# If no parametric types were given: Assumen that all leafs are gaussian
if self.parametric_types is None:
parametric_types = [Gaussian] * X.shape[1] + [Categorical]
else:
parametric_types = self.parametric_types
# Learn classifier
self._spn = learn_classifier(
train_data,
ds_context=Context(parametric_types=parametric_types).add_domains(train_data),
spn_learn_wrapper=learn_parametric,
label_idx=X.shape[1],
cpus=self.n_jobs,
)
# If pre optimization hook has been defined, run now
if self.tf_pre_optimization_hook:
self._spn = self.tf_pre_optimization_hook(self._spn)
# If optimization flag is set: optimize weights in tf
if self.tf_optimize_weights:
self._spn, self.loss = optimize_tf(
spn=self._spn,
data=train_data,
optimizer=self.tf_optimizer,
batch_size=self.tf_batch_size,
epochs=self.tf_n_epochs,
return_loss=True,
)
# If post optimization hook has been defined, run now
if self.tf_post_optimization_hook:
self._spn = self.tf_post_optimization_hook(self._spn)
self.X_ = X
self.y_ = y
# Return the classifier
return self
def predict(self, X):
"""
Make a prediction of the given data.
Parameters
----------
X : np.ndarray
Test data
Returns
-------
np.ndarray
Label predictions for the given test data
"""
# Check is fit had been called
check_is_fitted(self, ["X_", "y_"])
# Input validation
X = check_array(X)
# Classify
n_test = X.shape[0]
y_empty = np.full((n_test, 1), fill_value=np.nan)
data = np.c_[X, y_empty]
data_filled = mpe(self._spn, data)
y_pred = data_filled[:, -1]
return y_pred
def get_params(self, deep=True):
"""Method to make SPNClassifier usable in sklearn procedures such as cross_val_score etc."""
return {
"parametric_types": self.parametric_types,
"n_jobs": self.n_jobs,
"tf_optimize_weights": self.tf_optimize_weights,
"tf_n_epochs": self.tf_n_epochs,
"tf_batch_size": self.tf_batch_size,
"tf_optimizer": self.tf_optimizer,
"tf_pre_optimization_hook": self.tf_pre_optimization_hook,
"tf_post_optimization_hook": self.tf_post_optimization_hook,
}
def set_params(self, **parameters):
"""Method to make SPNClassifier usable in sklearn procedures such as cross_val_score etc."""
for parameter, value in parameters.items():
setattr(self, parameter, value)
return self
def classification_categorical_to_tf_graph(
node, data_placeholder=None, log_space=True, variable_dict=None, dtype=np.float32
):
"""
Fix categorical to tf graph for classification problem.
For a binary class label, there will be two categorical leaf nodes in the SPN. One which one-hot encodes the first
class as [0, 1] and one that encodes the second clas as [1, 0].
Since the tf optimizes the log likelihood, these one-hot represented probabilities will be projected into logspace
which results in log([1,0])=[0, -inf] and therefore NaNs in further computations.
Therefore, this custom method adds a small epsilon, such that the zero probability value in the one-hot vector will
not degrade to negative infinity.
"""
with tf.compat.v1.variable_scope("%s_%s" % (node.__class__.__name__, node.id)):
p = np.array(node.p, dtype=dtype)
# Epsilon to make sure there are no zero values
eps = 1e-20
p += eps
# Renormalize such that the sum over all probabilities is one
p /= np.sum(p)
assert np.all(p > 0), "Probabilities in the class leaf nodes have to be greater than zero but were %s" % p
softmaxInverse = np.log(p / np.max(p)).astype(dtype)
probs = tf.nn.softmax(tf.constant(softmaxInverse))
variable_dict[node] = probs
if log_space:
return distributions.Categorical(probs=probs).log_prob(data_placeholder[:, node.scope[0]])
return distributions.Categorical(probs=probs).prob(data_placeholder[:, node.scope[0]])
| 2.28125
| 2
|
productionLine.py
|
kiiski10/meatGrinder
| 0
|
12782127
|
import pygame, random, time
from machine import Machine
import utilities
from equipment import *
from character import Fighter
class Section:
def __init__(self, pos, prodLine):
#self.image = img
self.prodLine = prodLine
self.tilePos = pos
self.machine = None
self.neighbors = []
class ProductionLine:
def __init__(self, factory, inGate):
print("production line init")
self.fighters = []
self.inGate = inGate
self.outGates = [(0,0), (0,9)]
self.factory = factory
self.debugLayer = pygame.Surface(self.factory.surface.get_rect().size)
self.debugLayer.set_colorkey((255, 0, 255))
self.debugLayer.fill((255, 0, 255))
self.stats = {
"step": 0
}
self.line = {
utilities.tilePosId(self.inGate): Section(self.inGate, self),
}
for s in self.factory.getTilesByLayer("prodLine"):
newSection = Section(s, self)
self.line[utilities.tilePosId(s)] = newSection
# add connections
for section in self.line:
pos = self.line[section].tilePos
#print("section in", pos)
for n in self.neighboringSections(pos):
n.neighbors.append(self.line[section])
self.line[section].neighbors.append(n)
pygame.draw.line(
self.debugLayer,
[242, 132, 45],
utilities.tilePosToScreenPos(48, pos),
utilities.tilePosToScreenPos(48, n.tilePos),
5
)
# add machines to random sections (not on the sides)
for s in self.line:
if self.line[s].tilePos[0] not in [0, 9] and self.line[s].tilePos[1] not in [0, 9]:
if random.randint(0, 100) < 20:
self.line[s].machine = Machine(self.line[s])
def availableDirections(self, fromPos):
destSections = []
if not fromPos in self.line:
return(destSections)
destSections += self.line[fromPos].neighbors
#print("destinations from", fromPos, len(destSections))
return(destSections)
def neighboringSections(self, pos):
neighbors = []
posString = utilities.tilePosId(pos)
if posString in self.line:
for x, y in [[-1, 0], [1, 0], [0, -1], [0, 1]]:
testKey = utilities.tilePosId((pos[0] + x, pos[1] + y))
if testKey in self.line:
n = self.line[testKey]
neighbors.append(n)
#print(" is connected to", len(neighbors))
return(neighbors)
def addFighter(self, newFighter):
tilePos = utilities.screenPosToTilePos(48, newFighter.rect.center)
newFighter.prodLineLastSections = [tilePos]
posString = utilities.tilePosId(tilePos)
newFighter.state = posString
#print(self.stats["step"], "add fighter to factory tile", tilePos)
self.fighters.append(newFighter)
def fightersAt(self, pos):
posString = utilities.tilePosId(pos)
occupiers = []
for f in self.fighters:
if utilities.screenPosToTilePos(48, f.rect.center) == pos:
occupiers.append(f)
return(occupiers)
def lineAdvance(self):
# move fighters
fightersToGrinder = []
for fighter in self.fighters:
if fighter.state == "IN_MACHINE":
continue
if self.stats["step"] - fighter.timeStamps["move"] < 10 + random.randint(0, 10):
continue
if fighter.prodLineLastSections[-1] in self.outGates:
fightersToGrinder.append(fighter)
for sect in self.availableDirections(fighter.state):
if not sect.tilePos in fighter.prodLineLastSections:
if len(self.fightersAt(sect.tilePos)) == 0:
fighter.state = utilities.tilePosId(sect.tilePos)
fighter.rect.center = utilities.tilePosToScreenPos(48, sect.tilePos)
fighter.timeStamps["move"] = self.stats["step"]
fighter.prodLineLastSections.append(sect.tilePos)
break
for f in fightersToGrinder:
self.fighters.remove(f)
fightersToGrinder.remove(f)
f.kill()
x, y = utilities.tilePosToScreenPos(48, f.prodLineLastSections[-1])
x = self.factory.grinder.surface.get_width() - 12
y -= 24
self.factory.grinder.fighters.append(Fighter(
world=self.factory.grinder,
team=self.factory.team,
spawnPos=[x, y],
speed=1,
selectedEquipment=[Skin(), Fist()]
))
# step all machines
for s in self.line:
if self.line[s].machine:
self.line[s].machine.step()
def step(self):
self.stats["step"] += 1
self.lineAdvance()
| 2.578125
| 3
|
client/game/panel/buttongrp.py
|
rrsilaya/spaceteam
| 0
|
12782128
|
import tkinter as tk
from utils.fonts import _getFont
from re import search
Y_OFFSET = 220
PANEL_HEIGHT = 127
PANEL_WIDTH = 140
class ButtonGroup:
def __init__(self, root, label, id, position, buttons, callback=None):
self.id = id
self.root = root
self.x, self.y = position
self.label = label
self.buttons = buttons
self.callback = callback
self._loadPanel()
def _sendPacket(self, tag):
packet = self.root.udpPacket
panel = int(search(r'([12])L?$', tag)[1])
payload = packet.CommandPacket()
payload.type = packet.COMMAND
payload.command = self.id
payload.panel = self.buttons[panel - 1].upper()
self.root.gameConnection.send(payload)
def toggleButton(self, tag, flag):
if flag:
self.root.itemconfig(tag, image=self.root.btn_on)
else:
self.root.itemconfig(tag, image=self.root.btn_off)
self._sendPacket(tag)
def _loadPanel(self):
btn_off = tk.PhotoImage(file='assets/controls/TextButtonOff.png')
btn_on = tk.PhotoImage(file='assets/controls/TextButtonOn.png')
if not hasattr(self.root, 'btn_off'):
self.root.btn_off = btn_off
self.root.btn_on = btn_on
self.root.addPanel(width=2, height=2, gridPos=(self.x, self.y))
self.root.create_text(145 + PANEL_WIDTH * self.x, Y_OFFSET + 50 + PANEL_HEIGHT * self.y, text=self.label, fill='black', font=_getFont('body'))
self.root.create_image(145 + PANEL_WIDTH * self.x, Y_OFFSET + 105 + PANEL_HEIGHT * self.y, image=self.root.btn_off, tags='{}1'.format(self.id))
self.root.create_image(145 + PANEL_WIDTH * self.x, Y_OFFSET + 180 + PANEL_HEIGHT * self.y, image=self.root.btn_off, tags='{}2'.format(self.id))
self.root.create_text(145 + PANEL_WIDTH * self.x, Y_OFFSET + 105 + PANEL_HEIGHT * self.y, text=self.buttons[0], fill='white', font=_getFont('heading-2s'), tags='{}1L'.format(self.id))
self.root.create_text(145 + PANEL_WIDTH * self.x, Y_OFFSET + 180 + PANEL_HEIGHT * self.y, text=self.buttons[1], fill='white', font=_getFont('heading-2s'), tags='{}2L'.format(self.id))
self.root.tag_bind('{}1'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}1'.format(self.id), True))
self.root.tag_bind('{}1'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}1'.format(self.id), False))
self.root.tag_bind('{}2'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}2'.format(self.id), True))
self.root.tag_bind('{}2'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}2'.format(self.id), False))
self.root.tag_bind('{}1L'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}1'.format(self.id), True))
self.root.tag_bind('{}1L'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}1'.format(self.id), False))
self.root.tag_bind('{}2L'.format(self.id), '<Button-1>', lambda _: self.toggleButton('{}2'.format(self.id), True))
self.root.tag_bind('{}2L'.format(self.id), '<ButtonRelease-1>', lambda _: self.toggleButton('{}2'.format(self.id), False))
| 2.40625
| 2
|
auth/drivers/root.py
|
borysvorona/carrier-auth
| 0
|
12782129
|
# Copyright 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from base64 import b64decode
from time import time
from flask import current_app, session, request, redirect, make_response, Blueprint
from auth.drivers.oidc import _validate_basic_auth, _validate_token_auth
from auth.utils.redis_client import RedisClient
bp = Blueprint("root", __name__)
def handle_auth(auth_header: str):
redis_client = RedisClient()
if redis_client.check_auth_token(auth_header=auth_header):
return make_response("OK", 200)
try:
auth_key, auth_value = auth_header.strip().split(" ")
except ValueError:
return make_response("KO", 401)
else:
if auth_key.lower() == "basic":
username, password = b64decode(auth_value.strip()).decode().split(":", 1)
if _validate_basic_auth(username, password):
redis_client.set_auth_token(auth_header=auth_header)
return make_response("OK", 200)
elif auth_key.lower() == "bearer":
if _validate_token_auth(auth_value):
redis_client.set_auth_token(auth_header=auth_header)
return make_response("OK", 200)
return make_response("KO", 401)
@bp.route("/auth")
def auth():
if "X-Forwarded-Uri" in request.headers and request.headers["X-Forwarded-Uri"].startswith("/static"):
return make_response("OK")
# Check if need to login
target = request.args.get("target")
scope = request.args.get("scope")
for header in ("X-Forwarded-Proto", "X-Forwarded-Host", "X-Forwarded-Port", "X-Forwarded-Uri"):
if header in request.headers:
session[header] = request.headers[header]
if "Authorization" in request.headers:
return handle_auth(auth_header=request.headers.get("Authorization", ""))
if not session.get("auth_attributes") or session["auth_attributes"]["exp"] < int(time()):
return redirect(current_app.config["auth"]["login_handler"], 302)
if not session.get("auth", False) and not current_app.config["global"]["disable_auth"]:
# Redirect to login
return redirect(current_app.config["auth"].get("auth_redirect",
f"{request.base_url}{request.script_root}/login"))
if target is None:
target = "raw"
# Map auth response
response = make_response("OK")
try:
mapper = importlib.import_module(f"auth.mappers.{target}")
response = mapper.auth(scope, response)
except (ImportError, AttributeError, TypeError):
from traceback import format_exc
current_app.logger.error(f"Failed to map auth data {format_exc()}")
return response
@bp.route("/token")
def token():
return redirect(current_app.config["auth"]["token_handler"], 302)
@bp.route("/login")
def login():
return redirect(current_app.config["auth"]["login_handler"], 302)
@bp.route("/logout")
def logout():
to = request.args.get("to")
return redirect(current_app.config["auth"]["logout_handler"] + (f"?to={to}" if to is not None else ""))
| 2.34375
| 2
|
technocup/2016/elimination_round_2/b.floors.py
|
dluschan/olymp
| 0
|
12782130
|
n, m, k = map(int, input().split())
a, b = map(int, input().split())
a -= 1
b -= 1
entrance_time, floor_time, lift_delay, lift_time = 15, 5, 10, 1
entrance_a = a // (m * k)
entrance_b = b // (m * k)
floor_a = a % (m * k) // k
floor_b = b % (m * k) // k
time = entrance_time * min(n - abs(entrance_a - entrance_b), abs(entrance_a - entrance_b))
if entrance_a == entrance_b:
time += min(floor_time * abs(floor_a - floor_b), lift_time * abs(floor_a - floor_b) + lift_delay)
else:
time += min(floor_time * floor_a, lift_time * floor_a + lift_delay)
time += min(floor_time * floor_b, lift_time * floor_b + lift_delay)
print(time)
| 2.625
| 3
|
pypenumbra/sinogram.py
|
Brikwerk/pypenumbra
| 0
|
12782131
|
<filename>pypenumbra/sinogram.py
"""
pypenumbra.sinogram
~~~~~~~~~~~~~~
Defines the logic to enable the construction of a sinogram
from a penumbra blob.
:copyright: 2019 <NAME>
:license: MIT
"""
import math
import numpy as np
from skimage.exposure import equalize_adapthist
from skimage import img_as_ubyte
from skimage.transform import rotate
import math
import cv2
from . import imgutil
def construct_sinogram(float_image, uint8_image, angular_steps=360, debug=False):
"""Constructs a sinogram from the detected penumbra blob
in the passed images. The uint8 image is used for blob detection
and the float image is used for value calculations.
:param float_image: A float64 image used for value calculations
:param uint8_image: A uint8 image used for blob detection
:param angular_steps: The number of slices to slice the blob into
:returns: A float64 sinogram image
"""
# Detecting penumbra blob and getting properties
threshold = imgutil.threshold(uint8_image)
center_x, center_y, radius = imgutil.get_center(threshold)
if debug:
disk_lines = cv2.cvtColor(uint8_image, cv2.COLOR_GRAY2RGB)
cv2.line(disk_lines, (center_x, center_y), (center_x+radius, center_y), (0, 255, 0), thickness=3)
cv2.circle(disk_lines, (center_x, center_y), 5, (0, 255, 0), thickness=5)
imgutil.save_debug_image("1 - original_image.png", uint8_image)
imgutil.save_debug_image("2 - threshold_raw_image.png", threshold)
imgutil.save_debug_image("3 - disk_stats.png", disk_lines)
print("---")
print("Original Image Disk Identification Results:")
print("Center X: %d | Center Y: %d | Radius: %d" % (center_x, center_y, radius))
if radius < 1:
raise ValueError("Radius is of improper length")
PADDING = int(round(radius * 0.1)) # Relative padding
# Dictates how large the area around the penumbra is when cropping
# Also dictates the ultimate x/y size of the focal spot output
radius = radius + PADDING # Padding radius
# Padding image if circle + padding doesn't fit
pad_center_x, pad_center_y, uint8_image = imgutil.pad_to_fit(radius, center_x, center_y, uint8_image)
pad_center_x, pad_enter_y, float_image = imgutil.pad_to_fit(radius, center_x, center_y, float_image)
center_x = pad_center_x
center_y = pad_center_y
# Slicing penumbra blob into sinogram
sinogram = slice_penumbra_blob(center_x, center_y, radius, angular_steps, float_image, uint8_image, debug=debug)
top, bottom, center = get_sinogram_size(sinogram, PADDING, debug=debug)
if debug:
rs_height, rs_width = sinogram.shape
rs_lines = cv2.cvtColor(img_as_ubyte(sinogram), cv2.COLOR_GRAY2RGB)
cv2.line(rs_lines, (0, top), (rs_width, top), (0, 255, 0), thickness=2)
cv2.line(rs_lines, (0, center), (rs_width, center), (0, 255, 0), thickness=2)
cv2.line(rs_lines, (0, bottom), (rs_width, bottom), (0, 255, 0), thickness=2)
imgutil.save_debug_image("5 - radial_slices.png", img_as_ubyte(sinogram))
imgutil.save_debug_image("7 - sinogram_lines.png", rs_lines)
print("---")
print("Sinogram Identification Stats:")
print("Top of Sinogram: %d | Center of Sinogram: %d | Bottom of Sinogram: %d" % (top, center, bottom))
# Applying first derivative on the vertical direction
derivative_sinogram = imgutil.apply_first_derivative(sinogram)
if debug:
imgutil.save_debug_image("8 - derivative_sinogram.png", derivative_sinogram)
# Cropping image around sinogram's center axis
height, width = derivative_sinogram.shape
crop_sinogram = derivative_sinogram[top:bottom, 0:width]
return crop_sinogram
def slice_penumbra_blob(center_x, center_y, radius, angular_steps, float_image, uint8_image, debug=False):
"""Slices a penumbra blob into a specified number of slices
:param center_x: The x-coordinate of the center of the penumbra blob
:param center_y: The y-coordinate of the center of the penumbra blob
:param radius: The radius of the penumbra blob (can include padding)
:param angular_steps: How many slices to slice the blob into
:param float_image: A float64 image used to source the slices from
:returns: Slices compiled into an image
"""
# Setting up values for sinogram extraction
ARC_ANGLE = 360.0
RADS_PER_SLICE = (math.pi/180.0) * (ARC_ANGLE/angular_steps)
sinogram = np.zeros(shape=(angular_steps, radius), dtype="float64")
outer_x = 0
outer_y = 0
if debug:
drawn_sino = img_as_ubyte(equalize_adapthist(float_image))
drawn_sino = cv2.cvtColor(drawn_sino, cv2.COLOR_GRAY2RGB)
# Assembling sinogram slices from the image
for i in range(0, angular_steps):
# Rotating around the penumbra blob in a circle by RADS_PER_SLICE
angle = i * RADS_PER_SLICE
outer_x = center_x + radius * math.cos(angle)
outer_y = center_y - radius * math.sin(angle)
if debug:
drawn_sino = cv2.line(drawn_sino,(center_x, center_y),(int(round(outer_x)), int(round(outer_y))),(0,255,0),1)
col = imgutil.get_line(center_x, center_y, outer_x, outer_y, float_image)
sinogram[i] = col
if debug:
imgutil.save_debug_image("4 - slice_lines.png", drawn_sino)
sinogram = np.rot90(sinogram, axes=(1,0))
return sinogram
def get_sinogram_size(sinogram_input, padding, debug=False):
"""Gets the top, bottom, and center Y-coordinate
:param sinogram_input: A sinogram image
:param padding: How much padding (in pixels) to put around top/bottom Y-coordinates
:returns: The top, bottom, and center of the sinogram as integers
"""
sinogram = img_as_ubyte(sinogram_input)
# Attempting to isolate the circle within the pre-sinogram image
bsize = 20
blur = cv2.bilateralFilter(sinogram, bsize, bsize*2, bsize/2)
thresh = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 17, 2)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, np.ones((3,3),np.uint8))
if debug:
imgutil.save_debug_image("6 - threshold_sinogram.png", thresh)
# Iterating over columns to find the center/radius
# of the pre-sinogram. We assume that the black portion
# of the pre-sinogram is on the bottom.
thresh_rotate = rotate(thresh, 90, resize=True)
height, width = thresh_rotate.shape
sinogram_top = math.inf
sinogram_bottom = 0
# Iterating over each col (or row now, since we rotated)
for col_idx in range(0, height):
col = thresh_rotate[col_idx]
sinogram_temp_top = math.inf
sinogram_temp_bottom = 0
# Iterate through each value
for value_idx in range(0, len(col)):
value = col[value_idx]
if value == 0:
if value_idx < sinogram_temp_top:
sinogram_temp_top = value_idx
if value_idx > sinogram_bottom:
sinogram_temp_bottom = value_idx
# Update if the edge is a new low or high
if sinogram_temp_top < sinogram_top:
sinogram_top = sinogram_temp_top
if sinogram_temp_bottom > sinogram_bottom:
sinogram_bottom = sinogram_temp_bottom
top = int(round(sinogram_top - padding))
bottom = int(round(sinogram_bottom + padding))
center = int(round(sinogram_top + (sinogram_bottom - sinogram_top)/2))
# Checking if bottom or top exceed the image
# If so, we take the bigger difference between the two
# and subtract from the distance from the center.
top_diff = bottom_diff = 0
if top < 0:
top_diff = abs(top)
if bottom > width:
bottom_diff = bottom - width
diff = max(top_diff, bottom_diff)
# Adjusting top/bottom by the diff
if diff > 0:
top = top + diff
bottom = bottom - diff
return top, bottom, center
| 3.0625
| 3
|
render_all.py
|
alexandor91/Data-Generation-Tool
| 22
|
12782132
|
""" render_rgb.py renders obj file to rgb image
Aviable function:
- clear_mash: delete all the mesh in the secene
- scene_setting_init: set scene configurations
- node_setting_init: set node configurations
- render: render rgb image for one obj file and one viewpoint
- render_obj_by_vp_lists: wrapper function for render() render
one obj file by multiple viewpoints
- render_objs_by_one_vp: wrapper function for render() render
multiple obj file by one viewpoint
- init_all: a wrapper function, initialize all configurations
= set_image_path: reset defualt image output folder
author baiyu
"""
import sys
import os
import pickle
import numpy as np
import bpy
from mathutils import Matrix
import argparse
abs_path = os.path.abspath(__file__)
sys.path.append(os.path.dirname(abs_path))
from render_helper import *
from settings import *
from data_config import camera_setting_path, total_view_nums
def clear_mesh():
""" clear all meshes in the secene
"""
for block in bpy.data.meshes:
if block.users == 0:
bpy.data.meshes.remove(block)
for block in bpy.data.materials:
if block.users == 0:
bpy.data.materials.remove(block)
for block in bpy.data.textures:
if block.users == 0:
bpy.data.textures.remove(block)
for block in bpy.data.images:
if block.users == 0:
bpy.data.images.remove(block)
bpy.ops.object.select_all(action='DESELECT')
for obj in bpy.data.objects:
if obj.type == 'MESH' or obj.type == 'EMPTY':
obj.select = True
bpy.ops.object.delete()
def scene_setting_init(use_gpu):
"""initialize blender setting configurations
"""
sce = bpy.context.scene.name
bpy.data.scenes[sce].render.engine = g_engine_type
bpy.data.scenes[sce].cycles.film_transparent = g_use_film_transparent
#output
bpy.data.scenes[sce].render.image_settings.color_mode = g_color_mode
bpy.data.scenes[sce].render.image_settings.color_depth = g_color_depth
bpy.data.scenes[sce].render.image_settings.file_format = g_file_format
bpy.data.scenes[sce].render.use_file_extension = g_use_file_extension
#dimensions
bpy.data.scenes[sce].render.resolution_x = g_resolution_x
bpy.data.scenes[sce].render.resolution_y = g_resolution_y
bpy.data.scenes[sce].render.resolution_percentage = g_resolution_percentage
if use_gpu:
bpy.data.scenes[sce].render.engine = 'CYCLES' #only cycles engine can use gpu
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
bpy.data.scenes[sce].render.tile_x = g_hilbert_spiral
cycles_prefs = bpy.context.user_preferences.addons['cycles'].preferences
for device in cycles_prefs.devices:
if device.type == 'CUDA':
device.use = True
bpy.context.user_preferences.addons['cycles'].preferences.compute_device_type = 'CUDA'
bpy.types.CyclesRenderSettings.device = 'GPU'
bpy.data.scenes[sce].cycles.device = 'GPU'
def node_setting_init():
"""node settings for render rgb images
mainly for compositing the background images
"""
bpy.context.scene.use_nodes = True
tree = bpy.context.scene.node_tree
links = tree.links
for node in tree.nodes:
tree.nodes.remove(node)
image_node = tree.nodes.new('CompositorNodeImage')
scale_node = tree.nodes.new('CompositorNodeScale')
alpha_over_node = tree.nodes.new('CompositorNodeAlphaOver')
render_layer_node = tree.nodes.new('CompositorNodeRLayers')
img_file_output_node = tree.nodes.new('CompositorNodeOutputFile')
depth_file_output_node = tree.nodes.new("CompositorNodeOutputFile")
scale_node.space = g_scale_space
img_file_output_node.format.color_mode = g_rgb_color_mode
img_file_output_node.format.color_depth = g_rgb_color_depth
img_file_output_node.format.file_format = g_rgb_file_format
img_file_output_node.base_path = g_syn_data_folder
depth_file_output_node.format.color_mode = g_depth_color_mode
depth_file_output_node.format.color_depth = g_depth_color_depth
depth_file_output_node.format.file_format = g_depth_file_format
depth_file_output_node.base_path = g_syn_data_folder
links.new(image_node.outputs[0], scale_node.inputs[0])
links.new(scale_node.outputs[0], alpha_over_node.inputs[1])
links.new(render_layer_node.outputs[0], alpha_over_node.inputs[2])
links.new(alpha_over_node.outputs[0], img_file_output_node.inputs[0])
links.new(render_layer_node.outputs['Depth'], depth_file_output_node.inputs[0])
def render(viewpoint, viewpoint_id, rendering_dir):
"""render rgb image and depth maps
render a object rgb image by a given camera viewpoint and
choose random image as background, only render one image
at a time.
Args:
viewpoint: a vp parameter(contains azimuth,elevation,tilt angles and distance)
viewpoint_id: the index of viewpoint
rendering_dir: path to store camera info
"""
vp = viewpoint
cam_location = camera_location(vp.azimuth, vp.elevation, vp.distance)
cam_rot = camera_rot_XYZEuler(vp.azimuth, vp.elevation, vp.tilt)
cam_obj = bpy.data.objects['Camera']
cam_obj.location[0] = cam_location[0]
cam_obj.location[1] = cam_location[1]
cam_obj.location[2] = cam_location[2]
cam_obj.rotation_euler[0] = cam_rot[0]
cam_obj.rotation_euler[1] = cam_rot[1]
cam_obj.rotation_euler[2] = cam_rot[2]
if g_background_image_path == 'TRANSPARENT':
bpy.context.scene.render.alpha_mode = g_background_image_path
else:
background_images = os.listdir(g_background_image_path)
image_name = random.choice(background_images)
image_path = os.path.join(g_background_image_path, image_name)
image_node = bpy.context.scene.node_tree.nodes[0]
image_node.image = bpy.data.images.load(image_path)
img_file_output_node = bpy.context.scene.node_tree.nodes[4]
img_file_output_node.file_slots[0].path = 'color_###.png' # blender placeholder #
depth_file_output_node = bpy.context.scene.node_tree.nodes[5]
depth_file_output_node.file_slots[0].path = 'depth_###.exr' # blender placeholder #
#start rendering
bpy.context.scene.frame_set(viewpoint_id + 1)
bpy.ops.render.render(write_still=True)
# write camera info
cam_K_file = os.path.join(cam_K_path, 'cam_K.txt')
if (not os.path.isfile(cam_K_file)) or (len(os.listdir(cam_RT_path))<total_view_nums):
K, RT = get_3x4_P_matrix_from_blender(cam_obj)
np.savetxt(cam_K_file, K)
np.savetxt(os.path.join(cam_RT_path, 'cam_RT_{0:03d}.txt'.format(viewpoint_id + 1)), RT)
print('Camera parameters written.')
def render_obj_by_vp_lists(rendering_dir, viewpoints):
""" render one obj file by a given viewpoint list
a wrapper function for render()
Args:
rendering_dir: a string variable indicate the rendering path of the model.
viewpoints: an iterable object of vp parameter(contains azimuth,elevation,tilt angles and distance)
"""
if isinstance(viewpoints, tuple):
vp_lists = [viewpoints]
try:
vp_lists = iter(viewpoints)
except TypeError:
print("viewpoints is not an iterable object")
for vp_id, vp in enumerate(vp_lists):
set_image_path(rendering_dir)
set_depth_path(rendering_dir)
render(vp, vp_id, rendering_dir)
def render_objs_by_one_vp(obj_pathes, viewpoint):
""" render multiple obj files by a given viewpoint
Args:
obj_paths: an iterable object contains multiple
obj file pathes
viewpoint: a namedtuple object contains azimuth,
elevation,tilt angles and distance
"""
if isinstance(obj_pathes, str):
obj_lists = [obj_pathes]
try:
obj_lists = iter(obj_lists)
except TypeError:
print("obj_pathes is not an iterable object")
for obj_path in obj_lists:
rendering_dir = os.path.join(output_folder, obj_path.split('/')[4])
if not os.path.exists(rendering_dir):
os.makedirs(rendering_dir)
clear_mesh()
bpy.ops.import_scene.obj(filepath=obj_path)
set_image_path(rendering_dir)
set_depth_path(rendering_dir)
render(viewpoint, 1, rendering_dir)
def camera_setting_init():
""" camera settings for renderer
"""
bpy.data.objects['Camera'].rotation_mode = g_rotation_mode
def light_setting_init():
""" light settings for renderer
"""
# Make light just directional, disable shadows.
world = bpy.data.worlds['World']
world.use_nodes = True
# changing these values does affect the render.
bg = world.node_tree.nodes['Background']
bg.inputs[1].default_value = 10.0
def init_all():
"""init everything we need for rendering
an image
"""
scene_setting_init(g_gpu_render_enable)
camera_setting_init()
node_setting_init()
light_setting_init()
def set_image_path(new_path):
""" set image output path to new_path
Args:
new rendered image output path
"""
file_output_node = bpy.context.scene.node_tree.nodes[4]
file_output_node.base_path = new_path
def set_depth_path(new_path):
""" set image output path to new_path
Args:
new rendered depth output path
"""
file_output_node = bpy.context.scene.node_tree.nodes[5]
file_output_node.base_path = new_path
#---------------------------------------------------------------
# 3x4 P matrix from Blender camera
#---------------------------------------------------------------
# BKE_camera_sensor_size
def get_sensor_size(sensor_fit, sensor_x, sensor_y):
if sensor_fit == 'VERTICAL':
return sensor_y
return sensor_x
# BKE_camera_sensor_fit
def get_sensor_fit(sensor_fit, size_x, size_y):
if sensor_fit == 'AUTO':
if size_x >= size_y:
return 'HORIZONTAL'
else:
return 'VERTICAL'
return sensor_fit
# Build intrinsic camera parameters from Blender camera data
#
# See notes on this in
# blender.stackexchange.com/questions/15102/what-is-blenders-camera-projection-matrix-model
# as well as
# https://blender.stackexchange.com/a/120063/3581
def get_calibration_matrix_K_from_blender(camd):
if camd.type != 'PERSP':
raise ValueError('Non-perspective cameras not supported')
scene = bpy.context.scene
f_in_mm = camd.lens
scale = scene.render.resolution_percentage / 100
resolution_x_in_px = scale * scene.render.resolution_x
resolution_y_in_px = scale * scene.render.resolution_y
sensor_size_in_mm = get_sensor_size(camd.sensor_fit, camd.sensor_width, camd.sensor_height)
sensor_fit = get_sensor_fit(
camd.sensor_fit,
scene.render.pixel_aspect_x * resolution_x_in_px,
scene.render.pixel_aspect_y * resolution_y_in_px
)
pixel_aspect_ratio = scene.render.pixel_aspect_y / scene.render.pixel_aspect_x
if sensor_fit == 'HORIZONTAL':
view_fac_in_px = resolution_x_in_px
else:
view_fac_in_px = pixel_aspect_ratio * resolution_y_in_px
pixel_size_mm_per_px = sensor_size_in_mm / f_in_mm / view_fac_in_px
s_u = 1 / pixel_size_mm_per_px
s_v = 1 / pixel_size_mm_per_px / pixel_aspect_ratio
# Parameters of intrinsic calibration matrix K
u_0 = resolution_x_in_px / 2 - camd.shift_x * view_fac_in_px
v_0 = resolution_y_in_px / 2 + camd.shift_y * view_fac_in_px / pixel_aspect_ratio
skew = 0 # only use rectangular pixels
K = Matrix(
((s_u, skew, u_0),
( 0, s_v, v_0),
( 0, 0, 1)))
return K
# Returns camera rotation and translation matrices from Blender.
#
# There are 3 coordinate systems involved:
# 1. The World coordinates: "world"
# - right-handed
# 2. The Blender camera coordinates: "bcam"
# - x is horizontal
# - y is up
# - right-handed: negative z look-at direction
# 3. The desired computer vision camera coordinates: "cv"
# - x is horizontal
# - y is down (to align to the actual pixel coordinates
# used in digital images)
# - right-handed: positive z look-at direction
def get_3x4_RT_matrix_from_blender(cam):
# bcam stands for blender camera
R_blender2shapenet = Matrix(
((1, 0, 0),
(0, 0, -1),
(0, 1, 0)))
R_bcam2cv = Matrix(
((1, 0, 0),
(0, -1, 0),
(0, 0, -1)))
# Transpose since the rotation is object rotation,
# and we want coordinate rotation
# R_world2bcam = cam.rotation_euler.to_matrix().transposed()
# T_world2bcam = -1*R_world2bcam * location
#
# Use matrix_world instead to account for all constraints
location, rotation = cam.matrix_world.decompose()[0:2]
R_world2bcam = rotation.to_matrix().transposed()
# Convert camera location to translation vector used in coordinate changes
# T_world2bcam = -1*R_world2bcam*cam.location
# Use location from matrix_world to account for constraints:
T_world2bcam = -1*R_world2bcam * location
# Build the coordinate transform matrix from world to computer vision camera
R_world2cv = R_bcam2cv*R_world2bcam*R_blender2shapenet
T_world2cv = R_bcam2cv*T_world2bcam
# put into 3x4 matrix
RT = Matrix((
R_world2cv[0][:] + (T_world2cv[0],),
R_world2cv[1][:] + (T_world2cv[1],),
R_world2cv[2][:] + (T_world2cv[2],)
))
return RT
def get_3x4_P_matrix_from_blender(cam):
K = get_calibration_matrix_K_from_blender(cam.data)
RT = get_3x4_RT_matrix_from_blender(cam)
return K, RT
### YOU CAN WRITE YOUR OWN IMPLEMENTATION TO GENERATE DATA
def parse_args():
argv = sys.argv
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:] # get all args after "--"
parser = argparse.ArgumentParser(description='Blender renderer.')
parser.add_argument("dict", type=str,
help="model-view file for rendering.")
args = parser.parse_args(argv)
return args
if __name__ == '__main__':
args = parse_args()
init_all()
result_list = pickle.load(open(args.dict, 'rb'))
cam_K_path = os.path.join(camera_setting_path, 'cam_K')
cam_RT_path = os.path.join(camera_setting_path, 'cam_RT')
if not os.path.exists(cam_K_path):
os.makedirs(cam_K_path)
if not os.path.exists(cam_RT_path):
os.makedirs(cam_RT_path)
for model in result_list:
cat = model.path.split('/')[3]
output_folder = os.path.join(g_syn_data_folder, cat)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
rendering_dir = os.path.join(output_folder, model.path.split('/')[4])
if not os.path.exists(rendering_dir):
os.makedirs(rendering_dir)
if len(os.listdir(rendering_dir)) == 40:
print('Rendering has been done with this model.')
continue
clear_mesh()
bpy.ops.import_scene.obj(filepath=model.path)
render_obj_by_vp_lists(rendering_dir, model.vps)
| 2.421875
| 2
|
src/codegen/base_code_gen.py
|
jwpttcg66/ExcelToTransfer
| 1
|
12782133
|
# -*- coding: utf-8 -*-
import os
INDENTS = [" " * (i * 4) for i in xrange(10)]
class BaseCodeGen(object):
def __init__(self, module, module_name, output_path, generator_info):
super(BaseCodeGen, self).__init__()
self.content = []
self.module = module
self.module_name = module_name
self.output_path = output_path
self.generator_info = generator_info
def run(self):
pass
def save_to_file(self, path):
content = "".join(self.content)
self.content = []
origin_content = None
if os.path.exists(path):
with open(path, "rb") as f:
origin_content = f.read()
if content == origin_content: return
print "生成", path
with open(path, "wb") as f:
f.write(content)
return
def output(self, *args):
self.content.extend(args)
def write(self, indent, *args):
assert(type(indent) == int)
if indent > 0: self.content.append(INDENTS[indent])
self.content.extend(args)
def write_line(self, indent = 0, *args):
assert(type(indent) == int)
if indent > 0: self.content.append(INDENTS[indent])
self.content.extend(args)
self.content.append("\n")
| 2.78125
| 3
|
wickedhot/__init__.py
|
thatch/wickedhot
| 0
|
12782134
|
from wickedhot.one_hot_encoder import OneHotEncoder
__version__ = "0.1.6"
| 1.117188
| 1
|
tests/test_graph.py
|
gerrymandr/graphmaker
| 0
|
12782135
|
<filename>tests/test_graph.py
class TestGraph:
pass
| 0.71875
| 1
|
src/AoC_2015/d20_elf_visits_counting_generators_factors_and_set_algebra/elf_delivery.py
|
derailed-dash/Advent-of-Code
| 9
|
12782136
|
<gh_stars>1-10
"""
Author: Darren
Date: 19/03/2021
Solving https://adventofcode.com/2015/day/20
Infinite elves deliver to infinite houses numbered sequentially.
Each elf is assigned a number and a progression.
Elf e visits houses eh. E.g.
elf 1 visits 1, 2, 3, 4, 5, 6, 7, 8, 9 ...
elf 2 visits 2 4 6 8 ...
elf 3 visits 3 6 9 ...
At each house h, elf e delivers 10x presents. Thus:
house 1 gets 10, house 2 gets 30, house 3 gets 40...
Solution:
My approach seems inefficient. It takes ~30s for both parts.
Part 1:
E.g. for house 6, we must determine all factors of 6. Why?
Because the factors are the elves that will visit this house.
Thus, house 6 is visted by elves 1, 2, 3, and 6.
Use a generator to get factors for next house.
Use a map to multiply each factor by the per_elf number.
Part 2:
Elves now have a limit on the number of houses they visit. (50.)
So, we need to count the occurences of each factor (i.e. each elf visit).
When each elf reaches its limit of visits, add them to an exclude list.
Use set difference to remove these from factors returned.
(Having the exclude list as a set rather than a list is MUCH faster!)
"""
import os
import time
from collections import defaultdict
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
TARGET = 36000000
MAX_HOUSES_PER_ELF = 50
def main():
# Part 1
gen = generate_presents_for_house(10)
presents_dropped = 0
house = 0
while presents_dropped < TARGET:
house, presents_dropped = next(gen)
print(f"Part 1: House {house} = {presents_dropped}")
# Part 2
gen = generate_presents_for_house(11, MAX_HOUSES_PER_ELF)
presents_dropped = 0
house = 0
while presents_dropped < TARGET:
house, presents_dropped = next(gen)
print(f"Part 2: House {house} = {presents_dropped}")
def generate_presents_for_house(per_elf_multiplier: int, elf_visit_limit: int = 0):
"""
Generator function that returns the number of presents dropped at a given house.
Each elf drops a certain number of presents at each house
Args:
per_elf_multiplier (int): Elves drop e*q presents per house, where e is elf number and q is the multiplier
Yields:
[tuple]: Current house number, total presents dropped at this house
"""
house_num = 1
factors_for_house = set()
factors_counter = defaultdict(int)
factors_to_exclude = set()
while True:
factors_for_house = get_factors(house_num)
factors_for_house.difference_update(factors_to_exclude)
for factor in factors_for_house:
factors_counter[factor] += 1
# if an elf has reached the limit, it won't do any more drops
if elf_visit_limit and factors_counter[factor] >= elf_visit_limit:
factors_to_exclude.add(factor)
presents_dropped = sum(map(lambda x: (x * per_elf_multiplier), factors_for_house))
# print(f"House {house_num} visited by: {factors_for_house[house_num]}")
# print(f"Presents dropped: {presents_dropped}")
# print(f"Factors counter: {factors_counter}")
yield house_num, presents_dropped
house_num += 1
def get_factors(num: int) -> set[int]:
"""
Gets the factors for a given house number.
Here, this determines the elves that visit each house.
Args:
num (int): the house number we want to get factors for
Returns:
set[int]: The set of factors (which represent elf numbers)
"""
factors = set()
# E.g. factors of 8 = 1, 2, 4, 8
# Iterate from 1 to sqrt of 8, where %=0, i.e. 1 and 2
# E.g. for i=1, we add factors 1 and 8
# for i=2, we add factors 2 and 4
# Use a set to eliminate duplicates, e.g. if i is 4, we only want one 2
for i in range(1, (int(num**0.5) + 1)):
if num%i == 0:
factors.add(i)
factors.add(num//i)
return factors
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
| 3.203125
| 3
|
presentations/20210526_simple_character_recognition/image.py
|
kevin-kaianalytics/bcgov-python-presentations
| 5
|
12782137
|
'''read ENVI/raw binary format. Dimensions from header, data from .bin file..
..then segment image using flood-fill segmentation'''
import os
import sys
import pickle
import numpy as np
from flood import flood
import matplotlib.pyplot as plt
from dist import normalize, to_list, centroid
def read_hdr(hdr): # read the image dimensions
cols, rows, bands = 0, 0, 0
for line in open(hdr).readlines():
chunks = line.strip().split('=')
try: # pull off two chunks delimited by '='
f, g = [x.strip() for x in chunks[0:2]]
if f == 'samples':
cols = g
if f == 'lines':
rows = g
if f == 'bands':
bands = g
except:
pass
return [int(x) for x in [cols, rows, bands]] # string to int
def read_float(fn): # read the raw binary file
return np.fromfile(fn, dtype=np.float32) / 255. # put data in range [0, 1]
'''pixel @ (row, col) = (i, j):
npx = nrow * ncol # number of pixels in image
red value: dat[ i * ncol + j]
grn value: dat[ npx + i * ncol + j]
blu value: dat[2 * npx + i * ncol + j]'''
def plot(dat, rows, cols, bands, file_name): # plot "raw binary" image
dat = dat.reshape((bands, rows * cols))
rgb = np.zeros((rows, cols, bands))
for i in range(bands):
rgb[:, :, i] = dat[i, :].reshape((rows, cols))
plt.imshow(rgb)
plt.show() # might uncomment this to zoom in to determine line numbers
plt.savefig(file_name)
plt.close()
class image:
def __init__(self, fn=None):
if fn:
self.fn = fn
self.load()
def load(self):
self.cols, self.rows, self.bands = read_hdr(self.fn[:-4] + '.hdr')
self.dat, self.npx = read_float(self.fn), self.rows * self.cols
plot(self.dat, self.rows, self.cols, self.bands, self.fn[:-4] + '.png')
def png(self):
if type(self.dat) == list:
self.dat = np.array(self.dat)
plot(self.dat, self.rows, self.cols, self.bands, self.fn + '.png')
def gather_points(self): # list points for each label
self.points = [[] for i in range(self.next_label)]
for i in range(self.rows):
for j in range(self.cols):
ix = i * self.cols + j # linear index
if self.labels[ix] > 0: # skip background
label = self.labels[ix] # label this point
self.points[label] += [[i, j]]
c = {} # count the number of pixels per segment
for point in self.points:
n = len(point)
c[n] = (c[n] + 1) if (n in c) else 1
counts = [[k, c[k]] for k in c] # sort the counts
counts.sort()
ffn = self.fn + '_seg_count.png'
if not os.path.exists(ffn):
print('+w ' + ffn)
plt.figure(figsize=(8, 8))
fig = plt.barh([str(x[0]) for x in counts],
[str(x[1]) for x in counts])
plt.title("Pixel-count vs. number of segments w that count " +
"(total segments: " + str(len(self.points)) + ")")
plt.xlabel("Number of segments with a given pixel count")
plt.ylabel("Pixel-count for a segment (total pixel counts = " +
str(len(counts)) + ")")
plt.tight_layout()
plt.savefig(ffn)
plt.close()
def segment(self, flood_lines=None, use_normalize=False):
print('segment ' + self.fn)
self.name = self.fn[:-4]
a = os.system('mkdir -p ' + self.name)
self.rgb = [[self.dat[i], # format data into list of rgb tuples
self.dat[self.npx + i],
self.dat[2 * self.npx + i]] for i in range(0, self.npx)]
c = {} # count rgb values
for x in self.rgb:
x = str(x)
c[x] = c[x] + 1 if x in c else 1
ffn = self.fn + '_rgb_count.png'
if not os.path.exists(ffn):
plt.figure()
plt.bar(c.keys(), np.log(list(c.values())) / np.log(10.))
plt.title("Log of count of color values")
print('+w ' + ffn)
plt.savefig(ffn)
plt.close()
counts = [[c[k], k] for k in c]
counts.sort()
self.max_color = counts[-1][1] # assume most-prevalent col is bg
if sys.getrecursionlimit() < self.npx: # increase recursion limit
sys.setrecursionlimit(self.npx)
# labels for segmentation
self.labels = [0 for i in range(self.npx)] # 0 == unlabelled!
self.next_label = 1
r_i = flood_lines if flood_lines else range(self.rows)
for i in r_i:
for j in range(self.cols):
flood(self, i, j)
self.gather_points() # list (i,j) points by segment
fn = None
is_truth = (self.name == 'truth') # is this truth data?
truth = None
if is_truth:
truth = [x for x in open('truth_chars.txt').read()]
for pi in range(len(self.points)): # plot image rep. of each truth
point = self.points[pi]
if pi > 0: # 0 is bg / unlabelled
try:
ns = truth[pi - 1] if is_truth else str(pi)
fn = self.name + os.path.sep + ns + '.png'
if not os.path.exists(fn):
plt.figure()
plt.scatter([x[1] for x in point],
[-x[0] for x in point])
plt.title(ns)
print('+w ' + fn)
if use_normalize:
plt.xlim([-.5, self.cols - .5])
plt.ylim([-(self.rows - .5), .5])
plt.xlabel('col ix')
plt.ylabel('-row ix')
plt.savefig(fn)
plt.close()
fn = self.name + os.path.sep + ns + '.centroid'
if not os.path.exists(fn):
print(' +w ' + fn)
xL, yL = to_list(point)
cX, cY = centroid(xL, yL)
open(fn, 'wb').write((str(cX) + ' ' +
str(cY)).encode())
# nb run cleanup.py before changing truth inputs
fn = self.name + os.path.sep + ns + '.p'
if not os.path.exists(fn):
print(' +w ' + fn)
pickle.dump(point, open(fn, 'wb'))
except:
pass # don't plot / save the background
if __name__ == "__main__": # example image data to demonstrate floodfill
args = sys.argv
if len(args) < 2:
dat = [0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1,
0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0,
0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0]
a = image()
a.dat, a.rows, a.cols, a.bands = dat, 4, 4, 3
a.npx = a.rows * a.cols
a.fn = '4x4.bin'
a.png()
a.segment(use_normalize=False)
else:
a = image('truth.bin', [745, 838, 932])
a.segment()
| 3.015625
| 3
|
TFile.py
|
NoOneXXX/magic-wormhole-client
| 0
|
12782138
|
<filename>TFile.py
# from src.gui.main_dialog import Ui_MainWindow
# from src.gui.main_window import Ui_MainWindow
from src.gui.windows_dialog.main_window import Ui_MainWindow
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5 import QtGui
from src.service.wormhole_transfer import transfer_file
import twisted.internet
import qt5reactor
import sys
from src.service import picture_rc
# Fix for pyinstaller packages app to avoid ReactorAlreadyInstalledError
# See https://github.com/kivy/kivy/issues/4182 and
# https://github.com/pyinstaller/pyinstaller/issues/3390
if "twisted.internet.reactor" in sys.modules:
del sys.modules["twisted.internet.reactor"]
app = QApplication([])
qt5reactor.install()
# class MainWindow(QMainWindow, Ui_MainWindow):
# def __init__(self, reactors_, *args, **kwargs):
# super(MainWindow, self).__init__(*args, **kwargs)
# tf = transfer_file(reactors_)
# self.setupUi(self, tf)
# self.show()
# self.setWindowIcon(QtGui.QIcon(os.path.join('src/picture', 'piecasso.ico')))
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self, reactors_, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
tf = transfer_file(reactors_)
self.setupUi(self, tf)
self.show()
self.setWindowIcon(QtGui.QIcon(u":/pic/picture/piecasso.ico"))
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
app.setApplicationName("文件传输")
reactor = twisted.internet.reactor
window = MainWindow(reactor)
sys.exit(reactor.run())
| 2.171875
| 2
|
client_linux/helpers/utils.py
|
carlosgprado/BrundleFuzz
| 90
|
12782139
|
##################################################################
# Utils.py
# Client side utilities
##################################################################
import platform
class Utils(object):
def __init__(self, parent):
self.parent = parent
self.ae = parent.ae
self.cfg = parent.cfg
def get_size_uint32(self):
"""
Let's do things right.
Calculate the size of an unsigned long
for this architecture. Or 4 :)
"""
try:
import ctypes
return ctypes.sizeof(ctypes.c_uint32)
except:
self.ae.m_warn("WARNING: Could not find ctypes. Assuming uint32 is 4 bytes :(")
return 4
def hit_bin(self, n):
"""
Given a hit number, return the corresponding bin
Hit bins: {1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+}
"""
# TODO: fix this monkey code!
if n < 4:
return n
elif n << 3 == 0:
return 4
elif n << 4 == 0:
return 5
elif n << 5 == 0:
return 6
elif n >= 32 and n <= 127:
return 7
else:
return 8
def get_platform_info(self):
"""
Information regarding the computer
where the fuzzer is running
"""
try:
node_properties = {
'node_name' : platform.node(),
'os_release': platform.release(),
'os_version': platform.version(),
'machine' : platform.machine(),
'processor' : platform.processor()
}
except:
self.ae.m_alert('[x] Error getting platform information')
return None
return node_properties
| 2.359375
| 2
|
wagtail/admin/viewsets/chooser.py
|
PaarthAgarwal/wagtail
| 0
|
12782140
|
from django.db.models import ForeignKey
from django.urls import path
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from wagtail.admin.forms.models import register_form_field_override
from wagtail.admin.views.generic import chooser as chooser_views
from wagtail.admin.widgets.chooser import BaseChooser
from .base import ViewSet
class ChooserViewSet(ViewSet):
"""
A viewset that creates a chooser modal interface for choosing model instances.
"""
icon = "snippet" #: The icon to use in the header of the chooser modal, and on the chooser widget
choose_one_text = _(
"Choose"
) #: Label for the 'choose' button in the chooser widget when choosing an initial item
page_title = None #: Title text for the chooser modal (defaults to the same as ``choose_one_text``)`
choose_another_text = _(
"Choose another"
) #: Label for the 'choose' button in the chooser widget, when an item has already been chosen
edit_item_text = _("Edit") #: Label for the 'edit' button in the chooser widget
#: The view class to use for the overall chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseView``.
choose_view_class = chooser_views.ChooseView
#: The view class used to render just the results panel within the chooser modal; must be a subclass of ``wagtail.admin.views.generic.chooser.ChooseResultsView``.
choose_results_view_class = chooser_views.ChooseResultsView
#: The view class used after an item has been chosen; must be a subclass of ``wagtail.admin.views.generic.chooser.ChosenView``.
chosen_view_class = chooser_views.ChosenView
#: The base Widget class that the chooser widget will be derived from.
base_widget_class = BaseChooser
#: Defaults to True; if False, the chooser widget will not automatically be registered for use in admin forms.
register_widget = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.page_title is None:
self.page_title = self.choose_one_text
@property
def choose_view(self):
return self.choose_view_class.as_view(
model=self.model,
chosen_url_name=self.get_url_name("chosen"),
results_url_name=self.get_url_name("choose_results"),
icon=self.icon,
page_title=self.page_title,
)
@property
def choose_results_view(self):
return self.choose_results_view_class.as_view(
model=self.model,
chosen_url_name=self.get_url_name("chosen"),
results_url_name=self.get_url_name("choose_results"),
)
@property
def chosen_view(self):
return self.chosen_view_class.as_view(
model=self.model,
)
@cached_property
def widget_class(self):
"""
Returns the form widget class for this chooser.
"""
return type(
"%sChooserWidget" % self.model.__name__,
(self.base_widget_class,),
{
"model": self.model,
"choose_one_text": self.choose_one_text,
"choose_another_text": self.choose_another_text,
"link_to_chosen_text": self.edit_item_text,
"chooser_modal_url_name": self.get_url_name("choose"),
"icon": self.icon,
},
)
def get_urlpatterns(self):
return super().get_urlpatterns() + [
path("", self.choose_view, name="choose"),
path("results/", self.choose_results_view, name="choose_results"),
path("chosen/<str:pk>/", self.chosen_view, name="chosen"),
]
def on_register(self):
if self.register_widget:
register_form_field_override(
ForeignKey, to=self.model, override={"widget": self.widget_class}
)
| 2.109375
| 2
|
tests/apps/orders/views/rest/test_order.py
|
leonardon473/my-dinner-backend
| 0
|
12782141
|
<filename>tests/apps/orders/views/rest/test_order.py
import pytest
from freezegun import freeze_time
from model_bakery import baker
from rest_framework.test import APIClient
from apps.users.models import Client, ClientAddress
from apps.menu.models import MenuItem, TypeOfCuisine
pytestmark = pytest.mark.django_db
order_in_available_time = freeze_time("2021-12-10 16:00 -06:00")
class TestCreateOrderView:
endpoint = "/orders/orders/"
def setup_method(self, method):
self.api_client = APIClient()
self.client = baker.make(Client)
self.client_address = baker.make(ClientAddress, client=self.client)
self.type_of_cuisine = baker.make(TypeOfCuisine)
# @order_in_available_time
# def test_repeated_elements_return_a_error(self):
# # Arrange
# menu_item_1 = baker.make(MenuItem, price="200.0")
# payload = {
# "order_menu_items": [
# {"menu_item": menu_item.menu_item_id, "quantity": 1},
# {"menu_item": menu_item.menu_item_id, "quantity": 1},
# ],
# "client": self.client.client_id,
# "client_address": self.client_address.client_address_id,
# }
# # Act
# response = self.api_client.post(self.endpoint, data=payload, format="json")
# # Assert
# data = response.json()
# assert response.status_code == 400
# assert isinstance(data, dict)
@order_in_available_time
def test_raise_error_with_less_than_two_order_menu_items(self):
# Arrange
menu_item_1 = baker.make(MenuItem, price="300.0")
payload = {
"order_menu_items": [
{"menu_item": menu_item_1.menu_item_id, "quantity": 2},
],
"client": self.client.client_id,
"client_address": self.client_address.client_address_id,
}
# Act
response = self.api_client.post(self.endpoint, data=payload, format="json")
# Assert
data = response.json()
assert response.status_code == 400
assert isinstance(data, dict)
assert "order_menu_items" in data
@order_in_available_time
def test_order_amounts(self):
# Arrange
menu_item_1 = baker.make(MenuItem, price="300.0")
menu_item_2 = baker.make(MenuItem, price="400.0")
payload = {
"order_menu_items": [
{"menu_item": menu_item_1.menu_item_id, "quantity": 2},
{"menu_item": menu_item_2.menu_item_id, "quantity": 1},
],
"client": self.client.client_id,
"client_address": self.client_address.client_address_id,
}
# Act
response = self.api_client.post(self.endpoint, data=payload, format="json")
# Assert
data = response.json()
assert response.status_code == 201
assert isinstance(data, dict)
assert data["total_amount"] == "1000.00"
assert data["order_menu_items"][0]["subtotal"] == "600.00"
assert data["order_menu_items"][1]["subtotal"] == "400.00"
@freeze_time("2021-12-10 15:59 -06:00")
def test_raise_error_time_less_than_unavailable_time(self):
# Arrange
menu_item_1 = baker.make(MenuItem, price="100.0")
menu_item_2 = baker.make(MenuItem, price="100.0")
payload = {
"order_menu_items": [
{"menu_item": menu_item_1.menu_item_id, "quantity": 1},
{"menu_item": menu_item_2.menu_item_id, "quantity": 1},
],
"client": self.client.client_id,
"client_address": self.client_address.client_address_id,
}
# Act
response = self.api_client.post(self.endpoint, data=payload, format="json")
# Assert
data = response.json()
assert response.status_code == 400
assert isinstance(data, dict)
assert "non_field_errors" in data
@freeze_time("2021-12-10 21:01 -06:00")
def test_raise_error_time_greater_than_unavailable_time(self):
# Arrange
menu_item_1 = baker.make(MenuItem, price="100.0")
menu_item_2 = baker.make(MenuItem, price="100.0")
payload = {
"order_menu_items": [
{"menu_item": menu_item_1.menu_item_id, "quantity": 1},
{"menu_item": menu_item_2.menu_item_id, "quantity": 1},
],
"client": self.client.client_id,
"client_address": self.client_address.client_address_id,
}
# Act
response = self.api_client.post(self.endpoint, data=payload, format="json")
# Assert
data = response.json()
assert response.status_code == 400
assert isinstance(data, dict)
assert "non_field_errors" in data
@order_in_available_time
def test_raise_error_for_incorrect_client_address(self):
# Arrange
random_client_address = baker.make(ClientAddress)
menu_item_1 = baker.make(MenuItem, price="100.0")
menu_item_2 = baker.make(MenuItem, price="100.0")
payload = {
"order_menu_items": [
{"menu_item": menu_item_1.menu_item_id, "quantity": 1},
{"menu_item": menu_item_2.menu_item_id, "quantity": 1},
],
"client": self.client.client_id,
"client_address": random_client_address.client_address_id,
}
# Act
response = self.api_client.post(self.endpoint, data=payload, format="json")
# Assert
data = response.json()
assert response.status_code == 400
assert isinstance(data, dict)
assert "client_address" in data
| 2.359375
| 2
|
addition/addition.py
|
sowmiya710/squad-repo
| 0
|
12782142
|
<filename>addition/addition.py<gh_stars>0
a=10
b=100
print(a+b)
| 2.25
| 2
|
solutions/ex3_4.py
|
pycam/python-functions-and-modules
| 16
|
12782143
|
<filename>solutions/ex3_4.py
from ex3_3 import gc_content, extract_sub_sequences
def gc_content_along_the_chain(dna_sequence, window_size):
"""Returns a list of GC along the DNA sequence
given a DNA sequence and the size of the sliding window
"""
sub_sequences = extract_sub_sequences(dna_sequence, window_size)
gc_results = []
for sub_sequence in sub_sequences:
gc_results.append(gc_content(sub_sequence))
return gc_results
dna = 'ATGGTGCATCTGACTCCTGAGGAGAAGTCTGCCGTTACTGCCCTGTGGGGCAAGGTG'
print(gc_content(dna))
print(extract_sub_sequences(dna, 5))
print(gc_content_along_the_chain(dna, 5))
| 3.296875
| 3
|
src/media_etl.py
|
SchootHuang/DSC180B-Coronavirus-Wikipedia
| 0
|
12782144
|
import pandas as pd
import zipfile
import os
def get_media_data(**cfg):
'''retrive media dataset'''
for url in cfg['URLs']:
print(url)
infile = os.path.join(cfg['outpath'],url.split('/')[-1])
os.system(cfg['wget_fmt']%(url,infile))
print(infile)
with zipfile.ZipFile(infile, 'r') as zip_ref:
zip_ref.extractall(infile)
def process_media_data(**cfg):
'''get COVID-19 related news out of news dataset'''
result = []
for chunk in pd.read_csv(cfg['infile'],usecols=range(2,12),
parse_dates=['date'], chunksize=chunksize):
result.append(chunk[chunk.year==2020])
all2020 = pd.concat(result)
COVID = all2020[
(all2020.title.str.contains('virus'))|
(all2020.title.str.contains('COVID'))|
(all2020.title.str.contains('stay-at-home'))|
(all2020.title.str.contains('COVID-19'))|
# (all2020.title.str.contains('toll'))|
(all2020.title.str.contains('coronavirus'))
].reset_index(drop=True)
COVID.to_csv(cfg['outfile'])
| 3.046875
| 3
|
core/mongodb.py
|
naolArega/buchi
| 0
|
12782145
|
import asyncio
from decouple import config
from motor.motor_asyncio import AsyncIOMotorClient
cs = config("MONGODB_CS")
client = AsyncIOMotorClient(cs)
client.get_io_loop = asyncio.get_running_loop
buchi = client.get_database('buchi')
| 1.820313
| 2
|
socialProject/urls.py
|
sasuke22/socialProject
| 1
|
12782146
|
<reponame>sasuke22/socialProject
from django.conf.urls import include, url
from django.contrib import admin
from account.views import LoginView, AboutView, TermsView, PrivacyView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^$', LoginView.as_view(), name='login'),
url(r'^about/$', AboutView.as_view(), name='about'),
url(r'^terms/$', TermsView.as_view(), name='terms'),
url(r'^privacy/$', PrivacyView.as_view(), name='privacy'),
url(r'^', include('account.urls', namespace='account'))
]
| 1.882813
| 2
|
graph.py
|
blimmo/tfp
| 0
|
12782147
|
<filename>graph.py
import itertools
import random
from collections import defaultdict
from copy import deepcopy
from common import twos
class Graph:
def __init__(self, ln):
self.ln = ln
self.n = n = 2 ** ln
self.v = v = frozenset(range(n))
self.e = frozenset(itertools.combinations(v, 2))
self.data = defaultdict(set)
self.feedback = None
self.order = None
def __setitem__(self, key, value):
self.data[key] = value
def __getitem__(self, item):
return self.data[item]
def __str__(self):
return str(self.data)
def make_tournament(self, order=None, feedback=()):
"""Make the graph a tournament obeying a topological sort of order except feedback"""
if order is None:
order = tuple(range(self.n))
self.order = order
self.feedback = tuple(feedback)
self.data.clear()
# add all edges according to order
for u, v in self.e:
for w in order:
if w == u:
self.data[u].add(v)
break
elif w == v:
self.data[v].add(u)
break
# swap the feedback edges
for u, v in feedback:
if v in self.data[u]:
self.data[v].add(u)
self.data[u].remove(v)
else: # u in G[v]
self.data[u].add(v)
self.data[v].remove(u)
def make_from_sba(self, winner, arb_comp=lambda u, v: random.choice((True, False))):
self.data.clear()
feedback = set()
remaining = list(self.v)
remaining_e = {frozenset((u, v)) for u, v in self.e}
while len(remaining) > 1:
for u, v in twos(remaining.copy()):
if u == winner:
first, second = u, v
elif v == winner:
first, second = v, u
else:
# arbitrary
if arb_comp(u, v):
first, second = u, v
else:
first, second = v, u
self.data[first].add(second)
if first > second:
feedback.add((first, second))
remaining.remove(second)
remaining_e.remove(frozenset((u, v)))
for u, v in remaining_e:
if arb_comp(u, v):
self.data[u].add(v)
if u > v:
feedback.add((u, v))
else:
self.data[v].add(u)
if u < v:
feedback.add((u, v))
self.feedback = feedback
self.order = tuple(range(self.n))
def find_feedback(self):
for k in range(len(self.e)):
print(k)
f = self.feedback_rec(self.data, k)
if f is not None:
break
self.feedback = f
return f
def feedback_rec(self, d, k):
possible = list(itertools.chain.from_iterable(
((u, v), (v, w), (w, u))
for u, v, w in itertools.permutations(d.keys(), 3)
if v in d[u] and w in d[v] and u in d[w]
))
if len(possible) == 0:
# acyclic
return ()
if k == 0:
return None
for x, y in possible:
# swap (x, y)
d_new = deepcopy(d)
d_new[x].remove(y)
d_new[y].add(x)
ret = self.feedback_rec(d_new, k - 1)
if ret is not None:
return ret + ((x, y),)
| 3.3125
| 3
|
day1.py
|
stefan-bordei/AOC2021
| 0
|
12782148
|
<reponame>stefan-bordei/AOC2021<gh_stars>0
with open("./data/day1.txt", "r") as scan_results:
contents = [int(x) for x in scan_results]
# Part 1
count_part1 = 0
for i in range(len(contents)):
if contents[i-1] < contents[i]:
count_part1 += 1
print(f'Part1: {count_part1}')
# Part 2
count_part2 = 0
for i in range(len(contents)):
if sum(contents[i:i+3]) < sum(contents[i+1:i+4]):
count_part2 += 1
print(f'Part2: {count_part2}')
| 2.984375
| 3
|
Exercise02/2-21.py
|
ywyz/IntroducingToProgrammingUsingPython
| 0
|
12782149
|
'''
@Date: 2019-07-10 17:12:59
@Author: ywyz
@LastModifiedBy: ywyz
@Github: https://github.com/ywyz
@LastEditors: ywyz
@LastEditTime: 2019-07-10 18:05:17
'''
monthlySavingAmount = eval(input("Enter the monthly saving amount: "))
firstMonth = monthlySavingAmount * (1 + 0.00417)
secondMonth = (firstMonth + monthlySavingAmount) * (1 + 0.00417)
thirdMonth = (secondMonth + monthlySavingAmount) * (1 + 0.00417)
fourthMonth = (thirdMonth + monthlySavingAmount) * (1 + 0.00417)
fifthMonth = (fourthMonth + monthlySavingAmount) * (1 + 0.00417)
sixthMonth = (fifthMonth + monthlySavingAmount) * (1 + 0.00417)
print("After the sixth month, the account value is ", sixthMonth)
| 3.640625
| 4
|
echelon.py
|
jnurmine/tmux-echelon
| 1
|
12782150
|
<gh_stars>1-10
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
# (C) 2018 <NAME> <<EMAIL>>
#
# Echelon adds logic to your tmux panes
#
import subprocess
import sys
from pexpect import fdpexpect
class TmuxInfo:
def __init__(self, session=None, window=None, pane=None):
self.session = session
self.window = window
self.pane = pane
def __str__(self):
return "%s:%s.%s" % (self.session, self.window, self.pane)
def tmux_safe_quote(msg):
# tmux display-message expands e.g.
# ".%12" to ". %12" (string length 12)
# Not sure what it is supposed to b e but it messes the display-message output
return msg.replace("%", "%%")
def shell_safe_quote(msg):
# Avoid shell expansions, use single quotes
return msg.replace("'", "'\"'\"'")
def tmux_display_msg(msg):
global tmux_info
return subprocess.call("tmux display-message -t '%s' '%s'" %
(tmux_info, shell_safe_quote(tmux_safe_quote(msg))), shell=True)
def tmux_send_keys(msg):
global tmux_info
return subprocess.call("tmux send-keys -t '%s' '%s'" %
(tmux_info, shell_safe_quote(msg)), shell=True)
def prep_string(b):
if sys.version_info[0] >= 3:
return b.decode()
else:
return b
def send(s):
# monkeypatch to make send/sendline go to tmux
global p
s = p._coerce_send_string(s)
b = p._encoder.encode(s, final=False)
return tmux_send_keys(prep_string(b))
tmux_info = TmuxInfo(
session = sys.argv[1],
window = sys.argv[2],
pane = sys.argv[3])
s = "Listening in this pane. Target: session: %s, window: %s, pane: %s (%s)" % \
(tmux_info.session, tmux_info.window, tmux_info.pane, tmux_info)
print(s)
tmux_display_msg(s)
p = fdpexpect.fdspawn(sys.stdin)
# monkeypatch
p.send = send
# aux monkeypatch, enable feedback from the script
p.tmux_display_msg = tmux_display_msg
# Run the script
import script
while script.run(p):
pass
p.close()
print("Bye")
| 2.375
| 2
|