max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
securetea/common.py | adeyosemanputra/SecureTea-Project | 0 | 12765551 | # -*- coding: utf-8 -*-
u"""Common module for SecureTea.
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: <NAME> <<EMAIL>> , Jan 30 2019
Version: 1.1
Module: SecureTea
"""
import time
def getdatetime():
"""Date and time.
Returns:
TYPE: String with the current date and time
"""
return str(time.strftime("%Y-%m-%d %H:%M:%S"))
def check_config(cred):
"""
Check whether the credentials are valid or not.
Args:
-----
:cred : dict
Credentials dictionary
Raises:
-------
None
Returns:
--------
TYPE: Bool
True if valid else False
"""
for key in cred:
if cred[key] == "XXXX":
return False
return True
| 2.59375 | 3 |
scripts/SCZ_RNAseq/syn4590909/prepare_individual_gene_clusters.py | omarmaddouri/GCNCC_cross_validated | 1 | 12765552 | <gh_stars>1-10
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from collections import OrderedDict
import csv
path="../../data/SCZ_RNAseq/output/syn4590909/"
dataset="PPI"
dict = OrderedDict()
for i in range(19576):
dict["Center_{}".format(i)] = i
with open("{}{}.clusters_individual_gene.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_top_clusters = csv.writer(f, delimiter ='\t')
for key in dict.keys():
w_top_clusters.writerow([key, dict[key]]) | 2.28125 | 2 |
tools/latlon_reader.py | bjlittle/mint | 0 | 12765553 | import netCDF4
import numpy
import vtk
from reader_base import ReaderBase
class LatLonReader(ReaderBase):
def __init__(self, filename, padding=0):
"""
Constructor
@param filename UM netCDF file
@param padding number of extra cells to add on the high end of longitudes
@note padding add extra cells on the high end of longitudes
"""
super(LatLonReader, self).__init__()
# read file
nc = netCDF4.Dataset(filename, 'r')
lon_units = ''
lat_units = ''
# gather all the latitudes and longitudes
lats, lons = None, None
lats_0, lons_0 = None, None
for varname in nc.variables:
var = nc.variables[varname]
if hasattr(var, 'standard_name'):
if var.standard_name == 'longitude':
if varname.find('_0') >= 0:
lons_0 = var[:]
else:
lons = var[:]
lons_units = var.units
elif var.standard_name == 'latitude':
if varname.find('_0') >= 0:
lats_0 = var[:]
else:
lats = var[:]
lats_units = var.units
ncells_lat, ncells_lon = len(lats_0), len(lons_0)
ncells = ncells_lat * (ncells_lon + padding)
# construct the unstructured grid as a collection of
# 2D cells
pointArray = numpy.zeros((4 * ncells, 3))
self.vtk['pointArray'] = pointArray
pointData = self.vtk['pointData']
pointData.SetNumberOfComponents(3)
pointData.SetNumberOfTuples(4 * ncells)
pointData.SetVoidArray(pointArray, 4 * ncells * 3, 1)
points = self.vtk['points']
points.SetNumberOfPoints(4 * ncells)
points.SetData(pointData)
grid = self.vtk['grid']
grid.Allocate(ncells, 1)
ptIds = vtk.vtkIdList()
ptIds.SetNumberOfIds(4)
periodicity_length = 360. # in deg
icell = 0
for j0 in range(ncells_lat):
j1 = j0 + 1
for i in range(ncells_lon + padding):
i0 = (i + 0) % ncells_lon
i1 = (i + 1) % ncells_lon
offset0 = periodicity_length * ((i + 0) // ncells_lon)
offset1 = periodicity_length * ((i + 1) // ncells_lon)
lon00, lat00 = lons[i0] + offset0, lats[j0]
lon10, lat10 = lons[i1] + offset1, lats[j0]
lon11, lat11 = lons[i1] + offset1, lats[j1]
lon01, lat01 = lons[i0] + offset0, lats[j1]
k0 = 4*icell
k1, k2, k3 = k0 + 1, k0 + 2, k0 + 3
# storing coords as lon, lat, 0
pointArray[k0, :] = lon00, lat00, 0.
pointArray[k1, :] = lon10, lat10, 0.
pointArray[k2, :] = lon11, lat11, 0.
pointArray[k3, :] = lon01, lat01, 0.
ptIds.SetId(0, k0)
ptIds.SetId(1, k1)
ptIds.SetId(2, k2)
ptIds.SetId(3, k3)
grid.InsertNextCell(vtk.VTK_QUAD, ptIds)
icell += 1
grid.SetPoints(points)
###############################################################################
def main():
import argparse
from numpy import pi, cos, sin, exp
parser = argparse.ArgumentParser(description='Read ugrid file')
parser.add_argument('-i', dest='input', default='ll.nc', help='Specify UM input netCDF file')
parser.add_argument('-p', dest='padding', type=int, default=0,
help='Specify by how much the grid should be padded on the high lon side')
parser.add_argument('-V', dest='vtk_file', default='lonlat.vtk', help='Save grid in VTK file')
parser.add_argument('-b', dest='binary', action='store_true', help='Write binary file')
parser.add_argument('-stream', dest='streamFunc', default='x',
help='Stream function as a function of x (longitude in rad) and y (latitude in rad)')
args = parser.parse_args()
reader = LatLonReader(filename=args.input, padding=args.padding)
if args.streamFunc:
# compute the edge velocity if user provides the stream function
x, y = reader.getLonLat()
streamData = eval(args.streamFunc)
edgeVel = reader.getEdgeFieldFromStreamData(streamData)
reader.setEdgeField('edge_integrated_velocity', edgeVel)
loopIntegrals = reader.getLoopIntegralsFromStreamData(streamData)
reader.setLoopIntegrals('cell_loop_integrals', loopIntegrals)
if args.vtk_file:
reader.saveToVtkFile(args.vtk_file, binary=args.binary)
if __name__ == '__main__':
main()
| 2.75 | 3 |
selectinf/learning/Rutils.py | TianXie1999/selective-inference | 51 | 12765554 | import os, glob, tempfile, warnings
import numpy as np
from traitlets import (HasTraits,
Integer,
Unicode,
Float,
Integer,
Instance,
Dict,
Bool,
default)
# Rpy
try:
import rpy2.robjects as rpy
from rpy2.robjects import numpy2ri
rpy.r('library(knockoff); library(glmnet)')
from rpy2 import rinterface
except ImportError:
warnings.warn("rpy2 with knockoff and glmnet unavailable")
def null_print(x):
pass
# Knockoff selection
methods = {}
class generic_method(HasTraits):
need_CV = False
selectiveR_method = False
wide_ok = True # ok for p>= n?
# Traits
q = Float(0.2)
method_name = Unicode('Generic method')
model_target = Unicode()
@classmethod
def setup(cls, feature_cov):
cls.feature_cov = feature_cov
def __init__(self, X, Y, l_theory, l_min, l_1se, sigma_reid):
(self.X,
self.Y,
self.l_theory,
self.l_min,
self.l_1se,
self.sigma_reid) = (X,
Y,
l_theory,
l_min,
l_1se,
sigma_reid)
def select(self):
raise NotImplementedError('abstract method')
@classmethod
def register(cls):
methods[cls.__name__] = cls
def selected_target(self, active, beta):
C = self.feature_cov[active]
Q = C[:,active]
return np.linalg.inv(Q).dot(C.dot(beta))
def full_target(self, active, beta):
return beta[active]
def get_target(self, active, beta):
if self.model_target not in ['selected', 'full', 'debiased']:
raise ValueError('Gaussian methods only have selected or full targets')
if self.model_target in ['full', 'debiased']:
return self.full_target(active, beta)
else:
return self.selected_target(active, beta)
class lasso_glmnet(generic_method):
def select(self, CV=True, seed=0):
numpy2ri.activate()
rpy.r.assign('X', self.X.copy())
rpy.r.assign('Y', self.Y.copy())
rpy.r('X = as.matrix(X)')
rpy.r('Y = as.numeric(Y)')
rpy.r('set.seed(%d)' % seed)
rpy.r('cvG = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')
rpy.r("L1 = cvG[['lambda.min']]")
rpy.r("L2 = cvG[['lambda.1se']]")
if CV:
rpy.r("L = L1")
else:
rpy.r("L = 0.99 * L2")
rpy.r("G = glmnet(X, Y, intercept=FALSE, standardize=FALSE)")
n, p = self.X.shape
L = rpy.r('L')
rpy.r('B = as.numeric(coef(G, s=L, exact=TRUE, x=X, y=Y))[-1]')
B = np.asarray(rpy.r('B'))
selected = (B != 0)
if selected.sum():
V = np.nonzero(selected)[0]
return V, V
else:
return [], []
lasso_glmnet.register()
def factor_knockoffs(feature_cov, method='asdp'):
numpy2ri.activate()
rpy.r.assign('Sigma', feature_cov)
rpy.r.assign('method', method)
rpy.r('''
# Compute the Cholesky -- from create.gaussian
Sigma = as.matrix(Sigma)
diag_s = diag(switch(method, equi = create.solve_equi(Sigma),
sdp = create.solve_sdp(Sigma), asdp = create.solve_asdp(Sigma)))
if (is.null(dim(diag_s))) {
diag_s = diag(diag_s, length(diag_s))
}
SigmaInv_s = solve(Sigma, diag_s)
Sigma_k = 2 * diag_s - diag_s %*% SigmaInv_s
chol_k = chol(Sigma_k)
''')
knockoff_chol = np.asarray(rpy.r('chol_k'))
SigmaInv_s = np.asarray(rpy.r('SigmaInv_s'))
diag_s = np.asarray(rpy.r('diag_s'))
np.savez('.knockoff_factorizations/%s.npz' % (os.path.split(tempfile.mkstemp()[1])[1],),
method=method,
feature_cov=feature_cov,
knockoff_chol=knockoff_chol)
return knockoff_chol
def cv_glmnet_lam(X, Y, seed=0):
"""
Some calculations that can be reused by methods:
lambda.min, lambda.1se, lambda.theory and Reid et al. estimate of noise
"""
numpy2ri.activate()
rpy.r('set.seed(%d)' % seed)
rpy.r.assign('X', X.copy())
rpy.r.assign('Y', Y.copy())
rpy.r('X=as.matrix(X)')
rpy.r('Y=as.numeric(Y)')
rpy.r('set.seed(1)')
rpy.r('G = cv.glmnet(X, Y, intercept=FALSE, standardize=FALSE)')
rpy.r("L = G[['lambda.min']]")
rpy.r("L1 = G[['lambda.1se']]")
L = rpy.r('L')
L1 = rpy.r('L1')
numpy2ri.deactivate()
return float(1.00001 * L[0]), float(1.00001 * L1[0]),
| 2.25 | 2 |
kidsdata/ftsdata.py | abeelen/kidsdata | 0 | 12765555 | import numpy as np
import warnings
from copy import deepcopy
from scipy.signal import fftconvolve, medfilt
import astropy.units as u
import astropy.constants as cst
from astropy.io import fits, registry
from astropy.wcs import WCS
from astropy.nddata import NDDataArray, StdDevUncertainty, InverseVariance
from astropy.nddata.ccddata import _known_uncertainties
from astropy.nddata.ccddata import _unc_name_to_cls, _unc_cls_to_name, _uncertainty_unit_equivalent_to_parent
def forman(M):
"""Return Forman window.
The Forman window is defined in (E-4) [1]_.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
See Also
--------
numpy.bartlett, numpy.blackman, numpy.hamming, numpy.kaiser, numpy.hanning
References
----------
..[1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
"""
if M < 1:
return np.array([])
if M == 1:
return np.ones(1, float)
n = np.arange(0, M)
return (1 - ((n - M / 2) / M) ** 2) ** 2
class FTSData(NDDataArray):
"""Class to handle OPD or spectral FTS cubes.
Parameters
----------
data : `~numpy.ndarray` or `FTSData`
The actual data contained in this `FTSData` object. Not that this
will always be copies by *reference* , so you should make copy
the ``data`` before passing it in if that's the desired behavior.
uncertainty : `~astropy.nddata.NDUncertainty`, optional
Uncertainties on the data.
mask : `~numpy.ndarray`-like, optional
Mask for the data, given as a boolean Numpy array or any object that
can be converted to a boolean Numpy array with a shape
matching that of the data. The values must be ``False`` where
the data is *valid* and ``True`` when it is not (like Numpy
masked arrays). If ``data`` is a numpy masked array, providing
``mask`` here will causes the mask from the masked array to be
ignored.
hits : `~numpy.ndarray`-like, optional
Hit map for the data, given as a int Numpy array or any object that
can be converted to a int Numpy array with a shape
matching that of the data.
flags : `~numpy.ndarray`-like or `~astropy.nddata.FlagCollection`, optional
Flags giving information about each pixel. These can be specified
either as a Numpy array of any type (or an object which can be converted
to a Numpy array) with a shape matching that of the
data, or as a `~astropy.nddata.FlagCollection` instance which has a
shape matching that of the data.
wcs : `~astropy.wcs.WCS`, optional
WCS-object containing the world coordinate system for the data.
meta : `dict`-like object, optional
Metadata for this object. "Metadata" here means all information that
is included with this object but not part of any other attribute
of this particular object. e.g., creation date, unique identifier,
simulation parameters, exposure time, telescope name, etc.
unit : `~astropy.units.UnitBase` instance or str, optional
The units of the data.
"""
__opd_idx = None
__freq_idx = None
hits = None
def __init__(self, *args, hits=None, **kwargs):
# Initialize with the parent...
super().__init__(*args, **kwargs)
# Additionnal data
if hits is not None:
self.hits = np.array(hits).astype(int)
# Set Internal indexes on the wcs object
if self.wcs is not None:
opd_idx = np.argwhere("opd" == np.char.lower(self.wcs.wcs.ctype)).squeeze()
self.__opd_idx = opd_idx.item() if opd_idx.size == 1 else None
freq_idx = np.argwhere("freq" == np.char.lower(self.wcs.wcs.ctype)).squeeze()
self.__freq_idx = freq_idx.item() if freq_idx.size == 1 else None
@property
def __is_opd(self):
return self.__opd_idx is not None
@property
def __is_freq(self):
return self.__freq_idx is not None
@property
def opd_axis(self):
if self.__is_opd:
return self.wcs.sub([self.__opd_idx + 1]).pixel_to_world(np.arange(self.shape[0]))
@property
def spectral_axis(self):
if self.__is_freq:
return self.wcs.sub([self.__freq_idx + 1]).pixel_to_world(np.arange(self.shape[0]))
@property
def _is_doublesided(self):
"""Return True is the cube is double sided, also enforce positive increments."""
return (np.sum(self.wcs.sub([self.__opd_idx + 1]).all_pix2world([0, self.shape[0] - 1], 0)) == 0) & (
self.wcs.wcs.cdelt[self.__opd_idx] > 0
)
@property
def _is_onesided(self):
"""Return True is the cube is one sided, also enforce positive increments."""
return (np.sum(self.wcs.sub([self.__opd_idx + 1]).all_pix2world(0, 0)) == 0) & (
self.wcs.wcs.cdelt[self.__opd_idx] > 0
)
# from CCDData
def _slice_wcs(self, item):
"""
Override the WCS slicing behaviour so that the wcs attribute continues
to be an `astropy.wcs.WCS`.
"""
if self.wcs is None:
return None
try:
return self.wcs[item]
except Exception as err:
self._handle_wcs_slicing_error(err, item)
def _extract_doublesided(self):
"""Return the largest doublesided OPD cube from the data.
Returns
-------
output : FTSData
A doublesided interferograms cube
"""
assert self.__is_opd, "Intput should be OPD cube"
opd_wcs = self.wcs.sub([self.__opd_idx + 1])
opds = opd_wcs.all_pix2world(np.arange(self.data.shape[0]), 0)[0]
_maxopd = np.min([-opds.min(), opds.max()])
signed = np.sign(opd_wcs.wcs.cdelt[0])
slice_idx = opd_wcs.all_world2pix([-signed * _maxopd, signed * _maxopd], 0)[0].astype(int)
slice_idx += [0, 1] # Inclusive end
_slice = slice(*slice_idx)
wcs = deepcopy(self.wcs)
wcs.wcs.crpix[self.__opd_idx] -= _slice.start
meta = deepcopy(self.meta)
meta["HISTORY"] = "extract_doublesided"
mask = self.mask[_slice] if self.mask is not None else None
hits = self.hits[_slice] if self.hits is not None else None
result = self.__class__(self.data[_slice], wcs=wcs, mask=mask, meta=meta, hits=hits)
return result
def _to_onesided(self):
"""Return a onesided OPD cube from the data.
Returns
-------
output : FTSData
A onesided interferograms cube
"""
zpd_idx = self.wcs.sub([self.__opd_idx + 1]).world_to_pixel(0 * self.wcs.wcs.cunit[self.__opd_idx]).astype(int)
extrema_opd = np.abs(self.wcs.sub([self.__opd_idx + 1]).pixel_to_world([0, self.shape[0] - 1]))
if extrema_opd[1] >= extrema_opd[0]:
# Positive single sided : longer right hand side...
# Or doublesided
extract_slice = slice(zpd_idx, None)
os_slice = slice(0, zpd_idx + 1)
db_slice = slice(zpd_idx, None, -1)
elif extrema_opd[1] < extrema_opd[0]:
# Negative single sided : longer left hand side...
# Or double sided
extract_slice = slice(zpd_idx, None, -1)
os_slice = slice(0, self.data.shape[0] - zpd_idx)
db_slice = slice(zpd_idx, None)
# TODO: self.mask ??
# Extract the longest part
onesided_itg = self.data[extract_slice].copy()
onesided_hits = self.hits[extract_slice].copy() if self.hits is not None else None
# Take the mean with the other half on the double sided part
onesided_itg[os_slice] += self.data[db_slice]
onesided_itg[os_slice] /= 2
if onesided_hits is not None:
onesided_hits[os_slice] += self.hits[db_slice]
onesided_hits[os_slice] /= 2
wcs = deepcopy(self.wcs)
wcs.wcs.crpix[self.__opd_idx] = 1
output = FTSData(onesided_itg, wcs=wcs, meta=self.meta, hits=onesided_hits)
return output
def __invert_doublesided(self, apodization_function=None):
"""Invert a doublesided interferograms cube.
Parameters
----------
apodization_function : func
Apodization function to be used on the interferograms (default: None)
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice can be made among the function available in numpy at [1]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
assert self.__is_opd, "Intput should be OPD cube"
assert self._is_doublesided, "Not a doublesided interferogram cube"
cdelt_opd = self.wcs.wcs.cdelt[self.__opd_idx]
cunit_opd = u.Unit(self.wcs.wcs.cunit[self.__opd_idx])
naxis_opd = self.shape[0]
# freq = np.fft.fftfreq(naxis_opd, d=cdelt_opd * cunit_opd) * cst.c
if apodization_function is None:
apodization_function = np.ones
_cube = np.ma.array(self.data, mask=self.mask).filled(0) * np.expand_dims(
apodization_function(naxis_opd), tuple(np.arange(1, self.ndim))
)
# Spencer 2005 Eq 2.29, direct fft
spectra = np.fft.fft(np.fft.ifftshift(_cube, axes=0), axis=0)
# Factor of 2 because we used the fourier transform
spectra *= (4 * cdelt_opd * cunit_opd).decompose().value
spectra = np.fft.fftshift(spectra, axes=0)
# freq = np.fft.fftshift(freq)
# Build new wcs
wcs = deepcopy(self.wcs)
wcs.wcs.ctype[self.__opd_idx] = "FREQ"
wcs.wcs.cunit[self.__opd_idx] = "Hz"
# TODO: (cst.c / (cdelt_opd * cunit_opd) / (naxis_opd-1)).to(u.Hz).value give the 1/2L resolution, but fails in the tests
wcs.wcs.cdelt[self.__opd_idx] = (cst.c / (cdelt_opd * cunit_opd) / naxis_opd).to(u.Hz).value
wcs.wcs.crpix[self.__opd_idx] = (naxis_opd - 1) / 2 + 1
wcs.wcs.crval[self.__opd_idx] = 0
# TODO: Estimate uncertainty/hits
output = FTSData(spectra, meta=self.meta, wcs=wcs)
return output
def __invert_onesided(self, apodization_function=None):
"""Invert a onesided interferograms cube.
Parameters
----------
apodization_function : func
Apodization function to be used on the interferograms (default: None)
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice can be made among the function available in numpy at [1]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
.. [1] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
assert self.__is_opd, "Intput should be OPD cube"
assert self._is_onesided, "Not a one sided interferogram cube"
cdelt_opd = self.wcs.wcs.cdelt[self.__opd_idx]
cunit_opd = u.Unit(self.wcs.wcs.cunit[self.__opd_idx])
naxis_opd = self.shape[0]
if apodization_function is None:
apodization_function = np.ones
_cube = np.ma.array(self.data, mask=self.mask).filled(0) * np.expand_dims(
apodization_function(2 * naxis_opd)[naxis_opd:], tuple(np.arange(1, self.ndim))
)
# Spencer 2005 Eq 2.29, direct fft
# Trick is to use the unnormalized irfft
output_shape = 2 * naxis_opd - 1
spectra = np.fft.irfft(_cube, n=output_shape, axis=0) * output_shape
# Factor of 2 because we used the fourier transform
spectra *= (4 * cdelt_opd * cunit_opd).decompose().value
spectra = np.fft.fftshift(spectra, axes=0)
# Build new wcs
wcs = deepcopy(self.wcs)
wcs.wcs.ctype[self.__opd_idx] = "FREQ"
wcs.wcs.cunit[self.__opd_idx] = "Hz"
# (cst.c / (cdelt_opd * cunit_opd) / (output_shape-1)).to(u.Hz).value give the 1/2L resolution, but fails in the tests
wcs.wcs.cdelt[self.__opd_idx] = (cst.c / (cdelt_opd * cunit_opd) / output_shape).to(u.Hz).value
wcs.wcs.crpix[self.__opd_idx] = naxis_opd
wcs.wcs.crval[self.__opd_idx] = 0
# TODO: Estimate uncertainty/hits
output = FTSData(spectra, meta=self.meta, wcs=wcs)
return output
def _get_phase_correction_function(
self,
niter=1,
doublesided_apodization=None,
medfilt_size=None,
deg=None,
fitting_func="polynomial",
pcf_apodization=None,
plot=False,
**kwargs
):
"""Compute the phase correction function for the current cube
This follow the description in [1]_ with some additionnal features.
Parameters
----------
niter : [int], optional
number of iterations, by default 1
doublesided_apodization : [function], optional
apodization function for the double sided inversion, by default None, but see Notes
medfilt_size : [int], optional
size of the median filtering window to be applied (before polynomial fitting), by default None
deg : [int], optional
the polynomial degree to fit to the phase, by default None
fitting_func : [str], ("polynomial"|"chebysev"), optional
fitting function class, either polynomial or chebyshev, by default, "polynomial"
pcf_apodization : [function], optional
apodization function for the phase correction function, by default None
plot : bool, optional
diagnostic plots, by default False
Returns
-------
array_like (cube shape)
the phase correction function to be used as convolution kernel for the interferograms
Notes
-----
Choice of apodization function can be made among the function available in numpy at [2]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
"""
if pcf_apodization is None:
pcf_apodization = np.ones
# Working copy
itg = deepcopy(self._extract_doublesided())
# Reference iterferogram
itg_ma = np.ma.array(itg.data, mask=itg.mask, copy=True).filled(0)
# Null starting phase (take only the upper part)
phase = np.zeros(((itg.shape[0] - 1) // 2 + 1, *itg.shape[1:]))
# Loop Here
for i in range(niter):
cube = itg._FTSData__invert_doublesided(apodization_function=doublesided_apodization)
# Spencer 2.39 , well actually phases are -pi/pi so arctan2 or angle
_phase = np.angle(cube.data[(itg.shape[0] - 1) // 2 :])
# Replace bad phase :
_phase[np.isnan(_phase)] = 0
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ncols=4)
(freq,) = cube.wcs.sub([self.__opd_idx + 1]).all_pix2world(np.arange(cube.shape[0]), 0)
axes[1].plot(freq, cube.data[:, :, 0])
axes[2].plot(freq, _phase[:, :, 0])
if medfilt_size is not None:
# Median filtering of the phases
_phase = medfilt(_phase, kernel_size=(medfilt_size, *(1,) * (len(itg.shape) - 1)))
if deg is not None:
if fitting_func == "polynomial":
polyfit, polyval = np.polynomial.polynomial.polyfit, np.polynomial.polynomial.polyval
elif fitting_func == "chebychev":
polyfit, polyval = np.polynomial.chebyshev.chebfit, np.polynomial.chebyshev.chebval
else:
raise ValueError('fitting_func should be in ("polynomial"|"chebychev")')
# polynomial fit on the phase, weighted by the intensity
p = []
idx = np.linspace(0, 1, _phase.shape[0])
# np.polynomail.polynomial.polyfit do not accept a (`M`, `K`) array for the weights, so need to loop....
for spec, weight in zip(
_phase.reshape(_phase.shape[0], -1).T,
np.abs(cube.data[(itg.shape[0] - 1) // 2 :]).reshape(_phase.shape[0], -1).T,
):
p.append(polyfit(idx, spec, deg, w=weight))
p = np.asarray(p).T
# evaluate the polynomal all at once :
_phase = polyval(idx, p).T.reshape(_phase.shape)
# Wrap back the phases to -pi pi, uncessary, but just in case
_phase = (_phase + np.pi) % (2 * np.pi) - np.pi
"""
fit data also incorporates smoothing in the
out of band region to ensure zero phase and derivative discontinuities and zero amplitude at
zero and Nyquist frequency.
"""
if plot:
axes[2].plot(freq, _phase[:, :, 0], linestyle="--")
phase += _phase
# Spencer 3.30
# Using rfft leads pure real pcf and strangely could lead to wrong results
# phase_correction_function = np.fft.irfft(np.exp(-1j * phase), axis=0, n=2*(phase.shape[0]-1)+1)
phase_correction_function = np.fft.ifft(
np.exp(-1j * np.fft.fftshift(np.concatenate([-phase[:0:-1], phase]), axes=0)), axis=0
)
# Apodization of the PCF along the first axis
phase_correction_function = (
np.fft.fftshift(phase_correction_function, axes=0).T
* pcf_apodization(phase_correction_function.shape[0])
).T
if plot:
(x,) = itg.wcs.sub([3]).all_pix2world(np.arange(itg.shape[0]), 0)
axes[3].plot(x, phase_correction_function[:, :, 0])
axes[3].set_xlim(-1, 1)
axes[0].plot(x, itg.data[:, :, 0])
axes[0].set_xlim(-1, 1)
# Correct the initial dataset with the current phase for the next iteration
corrected_itg = fftconvolve(itg_ma, phase_correction_function, mode="same", axes=0).real
itg.data[:] = corrected_itg
return phase_correction_function
def to_spectra(self, onesided_apodization=None, **kwargs):
"""Invert an interferograms cube using the (enhanced) Forman method.
This follow the description in [1]_.
Parameters
----------
onesided_apodization : [function], optional
apodization function to be used on the one sided interferograms, by default None
niter : [int], optional
number of iterations, by default 1
doublesided_apodization : [function], optional
apodization function for the double sided inversion, by default None, but see Notes
medfilt_size : [int], optional
size of the median filtering window to be applied (before polynomial fitting), by default None
deg : [int], optional
the polynomial degree to fit to the phase, by default None
pcf_apodization : [function], optional
apodization function for the phase correction function, by default None
Returns
-------
output : FTSData
The corresponding spectral cube
Notes
-----
Choice of apodization function can be made among the function available in numpy at [2]_, namely
`numpy.hanning`, `numpy.hamming`, `numpy.bartlett`, `numpy.blackman`, `numpy.kaiser`
or any custom routine following the same convention.
References
----------
.. [1] <NAME>., (2005) Spectral Characterization of the Herschel SPIRE
Photometer, 2005MsT..........1S
.. [2] https://docs.scipy.org/doc/numpy/reference/routines.window.html
"""
phase_correction_function = self._get_phase_correction_function(**kwargs)
# Convolved the interferograms and hits
itg = np.ma.array(self.data, mask=self.mask).filled(0)
corrected_itg = fftconvolve(itg, phase_correction_function, mode="same", axes=0).real
corrected_hits = None
if self.hits is not None:
hits = np.ma.array(self.hits, mask=self.mask).filled(0)
corrected_hits = fftconvolve(hits, phase_correction_function, mode="same", axes=0).real
corrected = FTSData(corrected_itg, wcs=self.wcs, hits=corrected_hits)
onesided = corrected._to_onesided()
return onesided.__invert_onesided(apodization_function=onesided_apodization)
def to_hdu(
self,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_hits="HITS",
hdu_flags=None,
wcs_relax=True,
key_uncertainty_type="UTYPE",
):
"""Creates an HDUList object from a FTSData object.
Parameters
----------
hdu_mask, hdu_uncertainty, hdu_flags, hdu_hits : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty, ``'HITS'`` for hits and
``None`` for flags.
wcs_relax : bool
Value of the ``relax`` parameter to use in converting the WCS to a
FITS header using `~astropy.wcs.WCS.to_header`. The common
``CTYPE`` ``RA---TAN-SIP`` and ``DEC--TAN-SIP`` requires
``relax=True`` for the ``-SIP`` part of the ``CTYPE`` to be
preserved.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a astropy uncertainty type.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
Returns
-------
hdulist : `~astropy.io.fits.HDUList`
"""
if isinstance(self.meta, fits.Header):
# Copy here so that we can modify the HDU header by adding WCS
# information without changing the header of the CCDData object.
header = self.meta.copy()
else:
header = fits.Header(self.meta)
if self.unit is not None and self.unit is not u.dimensionless_unscaled:
header["bunit"] = self.unit.to_string()
if self.wcs:
# Simply extending the FITS header with the WCS can lead to
# duplicates of the WCS keywords; iterating over the WCS
# header should be safer.
#
# Turns out if I had read the io.fits.Header.extend docs more
# carefully, I would have realized that the keywords exist to
# avoid duplicates and preserve, as much as possible, the
# structure of the commentary cards.
#
# Note that until astropy/astropy#3967 is closed, the extend
# will fail if there are comment cards in the WCS header but
# not header.
wcs_header = self.wcs.to_header(relax=wcs_relax)
header.extend(wcs_header, useblanks=False, update=True)
hdus = [fits.PrimaryHDU(self.data, header)]
if hdu_mask and self.mask is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.mask, "shape"):
raise ValueError("only a numpy.ndarray mask can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduMask = fits.ImageHDU(self.mask.astype(np.uint8), name=hdu_mask)
hdus.append(hduMask)
if hdu_uncertainty and self.uncertainty is not None:
# We need to save some kind of information which uncertainty was
# used so that loading the HDUList can infer the uncertainty type.
# No idea how this can be done so only allow StdDevUncertainty.
uncertainty_cls = self.uncertainty.__class__
if uncertainty_cls not in _known_uncertainties:
raise ValueError("only uncertainties of type {} can be saved.".format(_known_uncertainties))
uncertainty_name = _unc_cls_to_name[uncertainty_cls]
hdr_uncertainty = fits.Header()
hdr_uncertainty[key_uncertainty_type] = uncertainty_name
# Assuming uncertainty is an StdDevUncertainty save just the array
# this might be problematic if the Uncertainty has a unit differing
# from the data so abort for different units. This is important for
# astropy > 1.2
if hasattr(self.uncertainty, "unit") and self.uncertainty.unit is not None and self.unit is not None:
if not _uncertainty_unit_equivalent_to_parent(uncertainty_cls, self.uncertainty.unit, self.unit):
raise ValueError(
"saving uncertainties with a unit that is not "
"equivalent to the unit from the data unit is not "
"supported."
)
hduUncert = fits.ImageHDU(self.uncertainty.array, hdr_uncertainty, name=hdu_uncertainty)
hdus.append(hduUncert)
if hdu_hits and self.hits is not None:
# Always assuming that the mask is a np.ndarray (check that it has
# a 'shape').
if not hasattr(self.hits, "shape"):
raise ValueError("only a numpy.ndarray hits can be saved.")
# Convert boolean mask to uint since io.fits cannot handle bool.
hduHits = fits.ImageHDU(self.hits.astype(np.uint16), name=hdu_hits)
hdus.append(hduHits)
if hdu_flags and self.flags:
raise NotImplementedError("adding the flags to a HDU is not " "supported at this time.")
hdulist = fits.HDUList(hdus)
return hdulist
@classmethod
def from_array(cls, opd, data, hits=None, mask=None, **kwargs):
"""Construct FTS data from arrays.
Parameters
----------
opd : array_like or Quantity (M,)
the optical path difference, by default 'mm'
data : array_like (M, *)
the corresponding data, first dimension must match opd
hits : array_like, optionnal
the corresponding hits
mask : array_like, optionnal
the corresponding mask
Returns
-------
data : FTSData
the corresponding FTSData objects
"""
naxis = len(data.shape)
wcs = WCS(naxis=naxis)
if not isinstance(opd, u.Quantity):
opd = u.Quantity(opd, "mm")
zpd_idx = np.argmin(np.abs(opd))
if opd[zpd_idx] != 0:
print("Shifting opd by {} for 0".format(opd[zpd_idx]))
opd -= opd[zpd_idx]
dpd = np.diff(opd)
np.testing.assert_almost_equal(
np.median(dpd).to(dpd.unit).value, dpd.value, err_msg="Problem on opd differences"
)
wcs.wcs.ctype[naxis - 1] = "OPD"
wcs.wcs.cunit[naxis - 1] = opd.unit
wcs.wcs.crpix[naxis - 1] = zpd_idx + 1
wcs.wcs.crval[naxis - 1] = opd[zpd_idx].value
wcs.wcs.cdelt[naxis - 1] = np.median(dpd).value
if mask is None:
mask = False
return cls(data, wcs=wcs, hits=hits, mask=mask | np.isnan(data), **kwargs)
def fits_ftsdata_writer(
fts_data,
filename,
hdu_mask="MASK",
hdu_uncertainty="UNCERT",
hdu_hits="HITS",
hdu_flags=None,
key_uncertainty_type="UTYPE",
**kwd
):
"""
Write CCDData object to FITS file.
Parameters
----------
filename : str
Name of file.
hdu_mask, hdu_uncertainty, hdu_hits, hdu_flags : str or None, optional
If it is a string append this attribute to the HDUList as
`~astropy.io.fits.ImageHDU` with the string as extension name.
Flags are not supported at this time. If ``None`` this attribute
is not appended.
Default is ``'MASK'`` for mask, ``'UNCERT'`` for uncertainty ``'HITS'`` for hits and
``None`` for flags.
key_uncertainty_type : str, optional
The header key name for the class name of the uncertainty (if any)
that is used to store the uncertainty type in the uncertainty hdu.
Default is ``UTYPE``.
.. versionadded:: 3.1
kwd :
All additional keywords are passed to :py:mod:`astropy.io.fits`
Raises
-------
ValueError
- If ``self.mask`` is set but not a `numpy.ndarray`.
- If ``self.uncertainty`` is set but not a
`~astropy.nddata.StdDevUncertainty`.
- If ``self.uncertainty`` is set but has another unit then
``self.data``.
NotImplementedError
Saving flags is not supported.
"""
hdu = fts_data.to_hdu(
hdu_mask=hdu_mask,
hdu_uncertainty=hdu_uncertainty,
hdu_hits=hdu_hits,
key_uncertainty_type=key_uncertainty_type,
hdu_flags=hdu_flags,
)
hdu.writeto(filename, **kwd)
with registry.delay_doc_updates(FTSData):
# registry.register_reader('fits', CCDData, fits_ccddata_reader)
registry.register_writer("fits", FTSData, fits_ftsdata_writer)
registry.register_identifier("fits", FTSData, fits.connect.is_fits)
| 2.203125 | 2 |
mattermost_handler/__init__.py | aymericderbois/py-mattermost-webhooks-log-handler | 0 | 12765556 | # coding=utf-8
name = "mattermost_handler"
import logging
import requests
logger = logging.getLogger(__name__)
class MattermostIncomeWebhookHandler(logging.Handler):
def __init__(self, url):
super(MattermostIncomeWebhookHandler, self).__init__()
self.url = url
if self.url is None:
logger.warning("Mattermost webhook url cannot be None")
def emit(self, record):
if self.url is not None:
requests.post(self.url, json={"text": self.format(record)})
| 2.421875 | 2 |
1629 Slowest Key.py | AtharvRedij/leetcode-solutions | 0 | 12765557 | '''
URL: https://leetcode.com/problems/slowest-key/
Difficulty: Easy
Description: Slowest Key
A newly designed keypad was tested, where a tester pressed a sequence of n keys, one at a time.
You are given a string keysPressed of length n, where keysPressed[i] was the ith key pressed in the testing sequence, and a sorted list releaseTimes, where releaseTimes[i] was the time the ith key was released. Both arrays are 0-indexed. The 0th key was pressed at the time 0, and every subsequent key was pressed at the exact time the previous key was released.
The tester wants to know the key of the keypress that had the longest duration. The ith keypress had a duration of releaseTimes[i] - releaseTimes[i - 1], and the 0th keypress had a duration of releaseTimes[0].
Note that the same key could have been pressed multiple times during the test, and these multiple presses of the same key may not have had the same duration.
Return the key of the keypress that had the longest duration. If there are multiple such keypresses, return the lexicographically largest key of the keypresses.
Example 1:
Input: releaseTimes = [9,29,49,50], keysPressed = "cbcd"
Output: "c"
Explanation: The keypresses were as follows:
Keypress for 'c' had a duration of 9 (pressed at time 0 and released at time 9).
Keypress for 'b' had a duration of 29 - 9 = 20 (pressed at time 9 right after the release of the previous character and released at time 29).
Keypress for 'c' had a duration of 49 - 29 = 20 (pressed at time 29 right after the release of the previous character and released at time 49).
Keypress for 'd' had a duration of 50 - 49 = 1 (pressed at time 49 right after the release of the previous character and released at time 50).
The longest of these was the keypress for 'b' and the second keypress for 'c', both with duration 20.
'c' is lexicographically larger than 'b', so the answer is 'c'.
Example 2:
Input: releaseTimes = [12,23,36,46,62], keysPressed = "spuda"
Output: "a"
Explanation: The keypresses were as follows:
Keypress for 's' had a duration of 12.
Keypress for 'p' had a duration of 23 - 12 = 11.
Keypress for 'u' had a duration of 36 - 23 = 13.
Keypress for 'd' had a duration of 46 - 36 = 10.
Keypress for 'a' had a duration of 62 - 46 = 16.
The longest of these was the keypress for 'a' with duration 16.
Constraints:
releaseTimes.length == n
keysPressed.length == n
2 <= n <= 1000
1 <= releaseTimes[i] <= 109
releaseTimes[i] < releaseTimes[i+1]
keysPressed contains only lowercase English letters.
'''
class Solution:
def slowestKey(self, releaseTimes, keysPressed):
maxDur = releaseTimes[0]
answers = set(keysPressed[0])
for i in range(1, len(releaseTimes)):
if releaseTimes[i] - releaseTimes[i-1] > maxDur:
answers = set(keysPressed[i])
maxDur = releaseTimes[i] - releaseTimes[i-1]
elif releaseTimes[i] - releaseTimes[i-1] == maxDur:
answers.add(keysPressed[i])
return max(answers)
| 3.546875 | 4 |
dynamic_unet/utils.py | dthiagarajan/dynamic_unet | 1 | 12765558 | # AUTOGENERATED! DO NOT EDIT! File to edit: 02_utils.ipynb (unless otherwise specified).
__all__ = ['load_camvid_dataset', 'display_segmentation', 'display_segmentation_from_file', 'CamvidDataset']
# Cell
import matplotlib.pyplot as plt
import os
import torch
import torchvision.transforms.functional as tf
from PIL import Image
# Cell
def load_camvid_dataset(data_directory):
with open(os.path.join(data_directory, "valid.txt"), "r") as f:
val_names = [line.strip() for line in f]
with open(os.path.join(data_directory, "codes.txt"), "r") as f:
label_mapping = {l.strip(): i for i, l in enumerate(f)}
data = []
image_index_mapping = {}
for im_f in os.listdir(os.path.join(data_directory, "images")):
if im_f.split('.')[-1] != 'png':
continue
image_index_mapping[im_f] = len(data)
fp = os.path.join(data_directory, "images", im_f)
data.append(fp)
for label_f in os.listdir(os.path.join(data_directory, "labels")):
im_f = label_f.split('.')
im_f[0] = '_'.join(im_f[0].split('_')[:-1])
im_f = '.'.join(im_f)
index = image_index_mapping[im_f]
fp = os.path.join(data_directory, "labels", label_f)
data[index] = (data[index], fp)
val_indices = [image_index_mapping[name] for name in val_names]
return data, val_indices, label_mapping
# Cell
def display_segmentation(image, target, ax=None):
if ax:
ax.imshow(image, cmap='gray')
else:
plt.imshow(image, cmap='gray')
if ax:
ax.imshow(target, cmap='jet', alpha=0.5)
else:
plt.imshow(target, cmap='jet', alpha=0.5)
plt.show()
def display_segmentation_from_file(im_f, label_f):
im, label = Image.open(im_f), Image.open(label_f)
display_segmentation(im, label)
# Cell
class CamvidDataset(torch.utils.data.Dataset):
def __init__(self, data, resize_shape=(360, 480), is_train=True):
self.images, self.labels = [tpl[0] for tpl in data], \
[tpl[1] for tpl in data]
self.resize_shape = resize_shape
self.is_train = is_train
def transform(self, index):
input, target = map(
Image.open, (self.images[index], self.labels[index]))
input, target = (
tf.resize(input, self.resize_shape),
tf.resize(target, self.resize_shape, interpolation=Image.NEAREST)
)
if self.is_train:
horizontal_draw = torch.rand(1).item()
vertical_draw = torch.rand(1).item()
if horizontal_draw > 0.5:
input, target = tf.hflip(input), tf.hflip(target)
if vertical_draw > 0.5:
input, target = tf.vflip(input), tf.vflip(target)
input, target = map(tf.to_tensor, (input, target))
torch.clamp((255 * target), 0, 32, out=target)
return tf.normalize(input, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]), target.long()
def __getitem__(self, index):
return self.transform(index)
def __len__(self):
return len(self.images) | 2.5 | 2 |
krysztalki/workDir/tests/test matrices/test_matrices_like_3_xxx.py | woblob/Crystal_Symmetry | 0 | 12765559 | import matrices_new_extended as mne
import numpy as np
import sympy as sp
from equality_check import Point
x, y, z = sp.symbols("x y z")
Point.base_point = np.array([x, y, z, 1])
class Test_Axis_3_xxx:
def test_matrix_3_xxx(self):
expected = Point([ z, x, y, 1])
calculated = Point.calculate(mne._matrix_3_xxx)
assert calculated == expected
def test_matrix_3_1_mtmHxx_ttt(self):
expected = Point([ z, 1+x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_1_mtmHxx_ttt)
assert calculated == expected
def test_matrix_3_1_HxmHxx_ttt(self):
expected = Point([ 1+z, x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_1_HxmHxx_ttt)
assert calculated == expected
def test_matrix_3_1_Hxtxx_ttt(self):
expected = Point([ 1+z, 1+x, y, 1])
calculated = Point.calculate(mne._matrix_3_1_Hxtxx_ttt)
assert calculated == expected
def test_matrix_3_xxx_hhh(self):
expected = Point([ 1+z, 1+x, 1+y, 1])
calculated = Point.calculate(mne._matrix_3_xxx_hhh)
assert calculated == expected
| 2.578125 | 3 |
q2_convex_hull/tests/test_classifier.py | dpear/convex-hull | 1 | 12765560 | <reponame>dpear/convex-hull<gh_stars>1-10
from unittest import TestCase, main
class ClassifierTests(TestCase):
def test_run_random_forests(self):
# take pandas series describing binary labels for samples
# take a feature table
# construct a classifier
# return measures indicating the performance of the classifier
self.fail()
def test_append_to_results_hierarchy(self):
# take classification performance results
# label the results by the ncbi taxonomy id describing the clade
# add them to a node in our results hierarchy
self.fail()
def test_save_results_hierarchy(self):
# do smart stuff
self.fail()
if __name__ == '__main__':
main()
| 2.734375 | 3 |
primeira_lista/ex002.py | PedroSantana2/exercicios-em-python | 1 | 12765561 | '''
Faça um Programa que peça um número e então mostre a mensagem: O número informado foi [número].
'''
numero = input('Digite um número: ')
print('O número informado foi: {}'.format(numero))
| 4.125 | 4 |
mqtt_gateways/utils/app_properties.py | ppt000/mqtt_gateways | 0 | 12765562 | '''
docstring
'''
from collections import namedtuple
import logging
import os.path
import sys
_THIS = sys.modules[__name__]
AppProperties = namedtuple('AppProperties', ('name', 'path', 'root_logger', 'init', 'get_path', 'get_logger'))
def __dummy(*args, **kwargs):
pass
def __get_logger(fullmodulename):
''' docstring '''
if fullmodulename == '__main__' or fullmodulename == _THIS.Properties.name:
logname = _THIS.Properties.name
else:
modulename = fullmodulename.split('.')[-1]
if not modulename: logname = _THIS.Properties.name
else: logname = '.'.join((_THIS.Properties.name, modulename))
return logging.getLogger(logname)
def __get_path(extension, path_given=None):
'''
Generates the full absolute path of a file.
This function builds an absolute path to a file based on 3 'default' arguments
(the basename of the file, the extension of the file, and an absolute path) and
an extra argument that represents a valid path.
Depending on what represents this path (a directory, a file, an absolute or a
relative reference) the function will generate a full absolute path, relying on the
'default' parameters if and when necessary.
The generation of the full path follows those rules:
- the default name is made of the default basename and the default extension;
- if the path given is empty, then the full path is the default absolute path
with the default filename;
- if the path given contains a filename at the end, this is the filename to be used;
- if the path given contains an absolute path at the beginning, that is the
absolute path that will be used;
- if the path given contains only a relative path at the beginning, then
the default absolute path will be prepended to the path given.
Args:
basename (string): basename without extension, usually the application name
absdirpath (string): the absolute path of the current application
ext (string): the extension of the file, in the form '.xxx'. i.e. with the dot
pathgiven (string): the path given as alternative to the default
Returns:
string: a full absolute path
'''
dfltname = ''.join((_THIS.Properties.name, extension))
if path_given == '':
filepath = os.path.join(_THIS.Properties.path, dfltname)
else:
dirname, filename = os.path.split(path_given.strip())
if dirname != '': dirname = os.path.normpath(dirname)
if filename == '': filename = dfltname
if dirname == '': dirname = _THIS.Properties.path
elif not os.path.isabs(dirname): dirname = os.path.join(_THIS.Properties.path, dirname)
filepath = os.path.join(dirname, filename)
return os.path.normpath(filepath)
def __init_properties(full_path):
name = os.path.splitext(os.path.basename(full_path))[0] # first part of the filename, without extension
path = os.path.realpath(os.path.dirname(full_path)) # full path of the launching script
root_logger = logging.getLogger(name)
_THIS.Properties = AppProperties(name, path, root_logger, __dummy, __get_path, __get_logger)
Properties = AppProperties('', '', None, __init_properties, __dummy, __dummy)
| 3.046875 | 3 |
datasette/database.py | heussd/datasette | 0 | 12765563 | <filename>datasette/database.py
from pathlib import Path
from .utils import (
QueryInterrupted,
detect_fts,
detect_primary_keys,
detect_spatialite,
get_all_foreign_keys,
get_outbound_foreign_keys,
sqlite3,
table_columns,
)
from .inspect import inspect_hash
class Database:
def __init__(self, ds, path=None, is_mutable=False, is_memory=False):
self.ds = ds
self.path = path
self.is_mutable = is_mutable
self.is_memory = is_memory
self.hash = None
self.cached_size = None
self.cached_table_counts = None
if not self.is_mutable:
p = Path(path)
self.hash = inspect_hash(p)
self.cached_size = p.stat().st_size
# Maybe use self.ds.inspect_data to populate cached_table_counts
if self.ds.inspect_data and self.ds.inspect_data.get(self.name):
self.cached_table_counts = {
key: value["count"]
for key, value in self.ds.inspect_data[self.name]["tables"].items()
}
def connect(self):
if self.is_memory:
return sqlite3.connect(":memory:")
# mode=ro or immutable=1?
if self.is_mutable:
qs = "mode=ro"
else:
qs = "immutable=1"
return sqlite3.connect(
"file:{}?{}".format(self.path, qs), uri=True, check_same_thread=False
)
@property
def size(self):
if self.is_memory:
return 0
if self.cached_size is not None:
return self.cached_size
else:
return Path(self.path).stat().st_size
async def table_counts(self, limit=10):
if not self.is_mutable and self.cached_table_counts is not None:
return self.cached_table_counts
# Try to get counts for each table, $limit timeout for each count
counts = {}
for table in await self.table_names():
try:
table_count = (
await self.ds.execute(
self.name,
"select count(*) from [{}]".format(table),
custom_time_limit=limit,
)
).rows[0][0]
counts[table] = table_count
# In some cases I saw "SQL Logic Error" here in addition to
# QueryInterrupted - so we catch that too:
except (QueryInterrupted, sqlite3.OperationalError):
counts[table] = None
if not self.is_mutable:
self.cached_table_counts = counts
return counts
@property
def mtime_ns(self):
return Path(self.path).stat().st_mtime_ns
@property
def name(self):
if self.is_memory:
return ":memory:"
else:
return Path(self.path).stem
async def table_exists(self, table):
results = await self.ds.execute(
self.name,
"select 1 from sqlite_master where type='table' and name=?",
params=(table,),
)
return bool(results.rows)
async def table_names(self):
results = await self.ds.execute(
self.name, "select name from sqlite_master where type='table'"
)
return [r[0] for r in results.rows]
async def table_columns(self, table):
return await self.ds.execute_against_connection_in_thread(
self.name, lambda conn: table_columns(conn, table)
)
async def primary_keys(self, table):
return await self.ds.execute_against_connection_in_thread(
self.name, lambda conn: detect_primary_keys(conn, table)
)
async def fts_table(self, table):
return await self.ds.execute_against_connection_in_thread(
self.name, lambda conn: detect_fts(conn, table)
)
async def label_column_for_table(self, table):
explicit_label_column = self.ds.table_metadata(self.name, table).get(
"label_column"
)
if explicit_label_column:
return explicit_label_column
# If a table has two columns, one of which is ID, then label_column is the other one
column_names = await self.ds.execute_against_connection_in_thread(
self.name, lambda conn: table_columns(conn, table)
)
# Is there a name or title column?
name_or_title = [c for c in column_names if c in ("name", "title")]
if name_or_title:
return name_or_title[0]
if (
column_names
and len(column_names) == 2
and ("id" in column_names or "pk" in column_names)
):
return [c for c in column_names if c not in ("id", "pk")][0]
# Couldn't find a label:
return None
async def foreign_keys_for_table(self, table):
return await self.ds.execute_against_connection_in_thread(
self.name, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def hidden_table_names(self):
# Mark tables 'hidden' if they relate to FTS virtual tables
hidden_tables = [
r[0]
for r in (
await self.ds.execute(
self.name,
"""
select name from sqlite_master
where rootpage = 0
and sql like '%VIRTUAL TABLE%USING FTS%'
""",
)
).rows
]
has_spatialite = await self.ds.execute_against_connection_in_thread(
self.name, detect_spatialite
)
if has_spatialite:
# Also hide Spatialite internal tables
hidden_tables += [
"ElementaryGeometries",
"SpatialIndex",
"geometry_columns",
"spatial_ref_sys",
"spatialite_history",
"sql_statements_log",
"sqlite_sequence",
"views_geometry_columns",
"virts_geometry_columns",
] + [
r[0]
for r in (
await self.ds.execute(
self.name,
"""
select name from sqlite_master
where name like "idx_%"
and type = "table"
""",
)
).rows
]
# Add any from metadata.json
db_metadata = self.ds.metadata(database=self.name)
if "tables" in db_metadata:
hidden_tables += [
t
for t in db_metadata["tables"]
if db_metadata["tables"][t].get("hidden")
]
# Also mark as hidden any tables which start with the name of a hidden table
# e.g. "searchable_fts" implies "searchable_fts_content" should be hidden
for table_name in await self.table_names():
for hidden_table in hidden_tables[:]:
if table_name.startswith(hidden_table):
hidden_tables.append(table_name)
continue
return hidden_tables
async def view_names(self):
results = await self.ds.execute(
self.name, "select name from sqlite_master where type='view'"
)
return [r[0] for r in results.rows]
async def get_all_foreign_keys(self):
return await self.ds.execute_against_connection_in_thread(
self.name, get_all_foreign_keys
)
async def get_outbound_foreign_keys(self, table):
return await self.ds.execute_against_connection_in_thread(
self.name, lambda conn: get_outbound_foreign_keys(conn, table)
)
async def get_table_definition(self, table, type_="table"):
table_definition_rows = list(
await self.ds.execute(
self.name,
"select sql from sqlite_master where name = :n and type=:t",
{"n": table, "t": type_},
)
)
if not table_definition_rows:
return None
return table_definition_rows[0][0]
async def get_view_definition(self, view):
return await self.get_table_definition(view, "view")
def __repr__(self):
tags = []
if self.is_mutable:
tags.append("mutable")
if self.is_memory:
tags.append("memory")
if self.hash:
tags.append("hash={}".format(self.hash))
if self.size is not None:
tags.append("size={}".format(self.size))
tags_str = ""
if tags:
tags_str = " ({})".format(", ".join(tags))
return "<Database: {}{}>".format(self.name, tags_str)
| 2.390625 | 2 |
trusat/satid.py | TruSat/trusat-orb | 40 | 12765564 | #!/usr/bin/env python
from __future__ import print_function
from __future__ import division # Eliminate need for decimals on whole values
import sys
# As of 28 July 2019, python3.6 is the default "python3" in apt-get install python3
if sys.version_info[0] != 3 or sys.version_info[1] < 6:
print("This script requires Python version 3.6")
sys.exit(1)
import configparser # config file parsing
import argparse # command line parsing
import os
from datetime import date, timedelta, datetime
from time import time # For performance timing
from math import acos, asin, atan, cos, sin, tan, degrees # Fast/precise math functions
import numpy as np
import logging
import string
from spacetrack import SpaceTrackClient
# These are necessary until <NAME> approves pull requests
# https://github.com/brandon-rhodes/python-sgp4/pull/35
sys.path.insert(1, '../python-sgp4')
# https://github.com/skyfielders/python-skyfield/pull/276
sys.path.insert(2, '/Users/chris/Dropbox/code/preMVP/python-skyfield')
# FIXME: Note python-skyfield is not currently compatible with cythonized python-SGP4
from skyfield.iokit import Loader, download, parse_tle
from skyfield import sgp4lib
# The following 5 lines are necessary until our modules are public
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
tle_path = os.path.join(parentdir, "sathunt-tle")
sys.path.insert(1,tle_path)
from tle_util import make_tle, append_tle_file
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / mag(vector)
def proj(v2, v1):
""" Returns the unit vector projection of v1 onto v2 """
b = np.dot(v2, v1)/np.dot(v2, v2)
temp = np.multiply(b, v2)
# Make unit vector
vp = unit_vector(temp)
return vp
def flat_proj(v1, v2):
""" Returns the flat projection of direction unit vector, v1 onto v2 """
temp1 = np.cross(v1, v2)
temp2 = np.cross(temp1, v1)
return proj(temp2, v2)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
Partially Ref: angle(vec1,vec2) in python-sgp4/ext.py
"""
small = 0.00000001
undefined = 999999.1
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
magv1 = mag(v1)
magv2 = mag(v1)
if (magv1 * magv2 > small * small):
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
else:
return undefined
def mag(v):
""" Computes the magnitude of a vector ||v||
Renamed from norm(v) used in original Scott Campbell code
to better correspond to function names in SGP4 code.
"""
mag = np.sqrt(np.dot(v, v))
return mag
def main():
""" Interactive tool for finding an unknown TLE object within a library of TLEs
TODO:
- Implment argv[1] = unid.txt, argv[2] = refer.tle
- Make non-interactive callable version
- Make stand-alone verison that uses python-SGP exclusively, not tle_util
- Incorporate logic to read the (first?) TLE from the UNID file
- Incorporate logic to warn/error the user if no TLEs found
- Incorporate Perr/Alpha inputs as command line/config flags
- Put in more compares for altitude, velocity, etc.
"""
t0 = time()
# Read commandline options
conf_parser = argparse.ArgumentParser(description='Utility to assist in ID of an unidentified (unid) satellite')
conf_parser.add_argument("-c", "--conf_file",
help="Specify configuration file. [Default configuration.ini]",
dest='conf_file',
nargs='?',
const=1,
default='configuration.ini',
type=str,
metavar="FILE")
conf_parser.add_argument("-d", "--datadir",
help="data directory [default ./data]",
dest='datadir',
default='./data',
nargs='?',
const=1,
type=str,
metavar="DIR")
conf_parser.add_argument("--tleref",
help="Specify TLE reference file. [Default refer.tle]",
dest='tle_ref',
nargs='?',
type=str,
metavar="REFTLE")
conf_parser.add_argument("--tleunid",
help="Specify TLE unid file. [Default unid.tle]",
dest='tle_unid',
nargs='?',
type=str,
metavar="UNID")
conf_parser.add_argument("--update", help="update TLEs from online sources",
action="store_true")
conf_parser.add_argument("-dbname", "--database",
help="database to USE",
dest='dbname',
default='opensatcat_dev',
nargs='?',
const=1,
type=str,
metavar="NAME")
conf_parser.add_argument("-H", "--hostname",
help="database hostname",
dest='dbhostname',
default='opensatcat.cvpypmmxjtv1.us-east-2.rds.amazonaws.com',
nargs='?',
const=1,
type=str,
metavar="HOSTNAME")
conf_parser.add_argument("-u", "--user",
help="database user name",
dest='dbusername',
nargs='?',
type=str,
metavar="USER")
conf_parser.add_argument("-p", "--password",
help="database user password",
dest='dbpassword',
nargs='?',
type=str,
metavar="PASSWD")
conf_parser.add_argument("-t", "--dbtype",
help="database type [INFILE, sqlserver, sqlite] \
default: INFILE",
dest='dbtype',
nargs='?',
choices=['INFILE', 'sqlserver', 'sqlite'],
default='INFILE',
type=str,
metavar="TYPE")
conf_parser.add_argument("-i", "--import", help="Import TLEs to database",
dest='importTLE',
action="store_true")
conf_parser.add_argument("-q", "--quiet", help="Suppress console output",
dest='quiet',
action="store_true")
conf_parser.add_argument("-V", "--verbose",
help="increase verbosity: 0 = only warnings, 1 = info, 2 = debug. No number means info. Default is no verbosity.",
const=1,
default=1,
type=int,
nargs="?")
# Process commandline options and parse configuration
cfg = configparser.ConfigParser(inline_comment_prefixes=('#', ';'))
args = conf_parser.parse_args()
log = logging.getLogger(__name__)
# make it print to the console.
console = logging.StreamHandler()
log.addHandler(console)
conf_file = args.conf_file
tle_ref = args.tle_ref
tle_unid = args.tle_unid
update = args.update
datadir = args.datadir
dbname = args.dbname
dbhostname = args.dbhostname
dbusername = args.dbusername
dbpassword = args.dbpassword
dbtype = args.dbtype
importTLE = args.importTLE
verbose = args.verbose
quiet = args.quiet
# Set our python-skyfield data directory
load = Loader(datadir)
ts = load.timescale()
if (quiet == False):
if verbose == 0:
log.setLevel(logging.WARN)
elif verbose == 1:
log.setLevel(logging.INFO)
elif verbose == 2:
log.setLevel(logging.DEBUG)
log.debug("Log level set to {}".format(log.level))
if verbose:
for arg in vars(args):
log.debug("%s : %s",arg, getattr(args, arg))
cfg.read([args.conf_file])
log.info("Reading config from: {}".format(args.conf_file))
# 1st arg in original version
if not (tle_ref):
try:
tle_ref = cfg.get('Common', 'tle_ref')
except KeyError:
tle_ref = "refer.tle"
# 2nd arg in original version
if not (tle_unid):
try:
tle_unid = cfg.get('Common', 'tle_unid')
except KeyError:
tle_unid = "unid.txt"
# # Read single (first?) TLE from UNIDentified TLE file
# TLE_UNID = tle_util.TLEFile(tle_unid,strict=False)
# for sat_num in TLE_UNID.Satellites: # Need learn a better method to get just the first/only record
# #// id_sat comparison variables
# #// Date t1(tle);
# #// Satellite id_sat(t1.jd, ii, om, ec, ww, ma, nn, bstar);
# UNIDsat = TLE_UNID.Satellites[sat_num]
# # echo tle to screen
# log.info("{LINE1}\n{LINE2}".format(LINE1=UNIDsat.line1, LINE2=UNIDsat.line2))
# # Most popular const used by TLEs
# whichconst = sgp4.earth_gravity.wgs72
# afspc_mode = False
# (satn, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno, xnodeo) = UNIDsat.satrec
# # id_satrec = sgp4init(whichconst, afspc_mode, satn, epoch, xbstar, xecco, xargpo, xinclo, xmo, xno, xnodeo)
# # (rr,vv) = sgp4(id_satrec, tsince=0, whichconst=whichconst)
# id_sat = sgp4.io.twoline2rv(UNIDsat.line1, UNIDsat.line2, whichconst, afspc_mode=False)
# (year, monnth, day, hour, minute, second) = UNIDsat.epoch.timetuple()[:6]
UNIDtle = load.tle(url=tle_unid,reload=False)
# Make sure UNID satellite appears only once
# UNIDtle = set(UNIDtle.values())
if(not UNIDtle):
log.error("No TLEs found in file: {}".format(tle_unid))
log.error("Run elfind first?")
sys.exit()
# Get the single item out of the list
# [UNID] = UNIDtle
for satnum in UNIDtle: break
UNID = UNIDtle[satnum]
# t_unid = ts.ut1(jd=UNID.model.jdsatepoch)
t_unid = UNID.epoch
# Get r,v data at its own EPOCH
# (rr, vv) = id_sat.propagate(year, monnth, day, hour, minute, second)
(rr, vv, id_sat_err) = UNID._position_and_velocity_TEME_km(t_unid)
id_sat_rr = np.array(rr)
id_sat_vv = np.array(vv)
# print(id_sat_rr)
# print(id_sat_vv)
# flat projection of UNID satellite direction unit vector, vp1
vp1 = flat_proj(rr, vv)
# Set Perr error bound
err1 = input(" position error, degrees [2]: ")
err1 = err1 or 2
err1 = float(err1)
# Set alpha error bound
err2 = input(" track angle error, degrees [20]: ")
err2 = err2 or 20
err2 = float(err2)
# Read in REFERENCE element list, and loop through for potential solutions within error bounds
REFtle = load.tle(url=tle_ref,reload=False)
# Make sure REFtle satellites appears only once
REFtle = set(REFtle.values())
for ref_sat in REFtle:
# log.debug("Comparing against {}".format(sat_num))
# if(ref_sat.model.satnum == 26905):
# print("here")
# Get r,v data at UNID epoch
(rr, vv, ref_sat_err) = ref_sat._position_and_velocity_TEME_km(t_unid)
ref_sat_rr = np.array(rr)
ref_sat_vv = np.array(vv)
# delr - satellite delta r vector
delr = np.subtract(id_sat_rr, ref_sat_rr)
# delr - flat projection of delta r unit vector
delr = flat_proj(id_sat_rr, delr)
# alpha - angle between delr and id_sat.vv, radians
alpha = angle_between(delr, id_sat_vv)
# Per - angle between position unit vectors, radians
Perr = angle_between(ref_sat_rr, id_sat_rr)
# delta - magnitude of Perr in direction of id_sat.vv (UNID velocity), radians
delt = atan(tan(Perr) * cos(alpha))
# delta_t - time of flight to Closest Point of Approach (cpa) seconds
# rr, vv already in units of km, km/s. No need to convert.
delta_t = delt * mag(id_sat_rr) / mag(id_sat_vv)
# cpa - Closest Point of Approach (cpa), radians
cpa = asin(sin(alpha) * sin(Perr))
# vp2 - flat projection of REF satellite direction unit vector
vp2 = flat_proj(ref_sat_rr, ref_sat_vv)
# alpha - angle between direction unit vectors, radians
alpha = acos(np.dot(vp1, vp2))
# Calculate REF deltas from UNID
try:
alpha = acos(cos(alpha)/cos(delt))
except ValueError:
alpha = float('nan')
# Prepare for presentation to user
alpha = degrees(alpha) # angle between direction unit vectors
Perr = degrees(Perr) # angle between position unit vectors
# Compare UNID to REF using osculating elements (close enough)
if((Perr < err1) and (alpha < err2)):
# tle = None # epoch of elements in tle format
# ii = None # inclination, degrees
# om = None # right ascension of ascending node, degrees
# ec = None # eccentricity
# ww = None # argument of the perigee, degrees
# ma = None # mean anomaly, degrees
# nn = None # mean motion, revolutions/day
# uu = None # true longitude
# c2 = None # bstar coefficient
# bstar = None # BSTAR drag term
# name[81] = None
# visually check match parameters using advanced mean elements
# Write tle to screen
(tle_line0, tle_line1, tle_line2) = make_tle(
name=ref_sat.name,
ssn=ref_sat.model.satnum,
epoch_datetime=ref_sat.epoch.utc_datetime(),
xincl=ref_sat.model.inclo,
xnodeo=ref_sat.model.nodeo,
eo=ref_sat.model.ecco,
omegao=ref_sat.model.argpo,
xmo=ref_sat.model.mo,
xno=degrees(ref_sat.model.no_kozai*1440.0)/360.0,
deg=False)
log.info(" position error {:4.1f}".format(Perr))
log.info("track angle error {:4.1f}\n".format(alpha))
log.info(" time error {:4.0f}".format(delta_t))
log.info(" to closest point {:4.1f}\n".format(degrees(cpa)))
tle_file_path = os.path.join(datadir, tle_unid)
append_tle_file(tle_file_path, tle_line0, tle_line1, tle_line2)
get_continue = input("\n[Next]")
# // s_in("\n[Next]", buf);
# // } // if match
# // } // while
# // s_in("\n[Done]", buf);
get_continue = input("\n[Done]")
# // system(id_file);
# // } // end main
if __name__ == '__main__':
main() | 2.046875 | 2 |
tests/compute_diversity.py | ariel415el/GPDM | 18 | 12765565 | <filename>tests/compute_diversity.py
import os
from collections import defaultdict
import cv2
import numpy as np
def read_image_as_grayscale(path):
ref = cv2.imread(path)
return cv2.cvtColor(ref, cv2.COLOR_BGR2GRAY)
def compute_images_diversity(ref_dir, image_dirs):
"""To quantify the diversity of the generated images,
for each training example we calculated the standard devia-
tion (std) of the intensity values of each pixel over 100 gen-
erated images, averaged it over all pixels, and normalized
by the std of the intensity values of the training image.
"""
all_diversities = []
for ref_filename in os.listdir(ref_dir):
ref_gray = read_image_as_grayscale(os.path.join(ref_dir, ref_filename))
images = []
for images_dir in image_dirs:
images.append(read_image_as_grayscale(os.path.join(images_dir, ref_filename)))
images = np.stack(images)
diversity = np.std(images, axis=0).mean() / np.std(ref_gray)
all_diversities.append(diversity)
return np.mean(all_diversities)
| 3.046875 | 3 |
hard-gists/5720029/snippet.py | jjhenkel/dockerizeme | 21 | 12765566 | import arcpy
import os
def create_path(path):
if not os.path.exists(path):
print "Creating directory {}".format(path)
os.makedirs(path)
# default application data dir; e.g. c:\Users\scw\AppData\Roaming
app_data_path = os.getenv('APPDATA')
# get current ArcGIS version
arc_version = arcpy.GetInstallInfo()['Version']
# change this path if you'd like the spatial references to be written
# out elsewhere, this is the default directory for .prj files.
output_base = '{0}\\ESRI\\Desktop{1}\\ArcMap\\Coordinate Systems'.format(app_data_path, arc_version)
create_path(output_base)
for sr_name in arcpy.ListSpatialReferences():
# spatial reference names use a single \ separator, double them up.
sr_filename = "{}.prj".format(sr_name.replace("/", "\\"))
output_file = os.path.join(output_base, sr_filename)
output_dir = os.path.dirname(output_file)
# create this parent directory, if needed
create_path(output_dir)
with open(output_file, 'w') as prj_file:
sr = arcpy.SpatialReference(sr_name)
prj_file.write(sr.exportToString()) | 2.703125 | 3 |
rover_ml/behavior_cloning/src/behavior_cloning/train_model.py | shawshany/diy_driverless_car_ROS | 1 | 12765567 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#https://github.com/juano2310/CarND-Behavioral-Cloning-P3-Juan/blob/master/model.py
#https://github.com/udacity/self-driving-car/blob/master/steering-models/community-models/rambo/train.py
import os
import csv
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Lambda, Cropping2D
from keras.layers.convolutional import Convolution2D
from keras.layers.core import Flatten, Dense, Dropout, SpatialDropout2D
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
import sklearn
from sklearn.model_selection import train_test_split
samples = []
with open('../../../output/conde_gazebo/interpolated.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
sklearn.utils.shuffle(samples)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
print("Number of traing samples: ",len(train_samples))
print("Number of validation samples: ",len(validation_samples))
#index,timestamp,width,height,frame_id,filename,angle,speed
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
#print(batch_samples)
images = []
angles = []
for batch_sample in batch_samples:
if batch_sample[5] != "filename":
path = os.path.normpath(batch_sample[5]).split(os.path.sep)
name = '../../../output/conde_gazebo/center/'+path[1].split('\\')[-1]
center_image = cv2.imread(name)
center_image = cv2.resize(center_image, (320,180)) #resize from 720x1280 to 180x320
#plt.imshow(left_image)
#plt.show()
angle = float(batch_sample[6])
images.append(center_image)
angles.append(angle)
flip_image = np.fliplr(center_image)
flip_angle = -1 * angle
images.append(flip_image)
angles.append(flip_angle)
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
batch_size_value = 32
n_epoch = 150
train_generator = generator(train_samples, batch_size=batch_size_value)
validation_generator = generator(validation_samples, batch_size=batch_size_value)
model = Sequential()
# trim image to only see section with road
model.add(Cropping2D(cropping=((50,20), (0,0)), input_shape=(180,320,3)))
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x / 255.0) - 0.5))
#Nvidia model
model.add(Convolution2D(24, (5, 5), activation="relu", name="conv_1", strides=(2, 2)))
model.add(Convolution2D(36, (5, 5), activation="relu", name="conv_2", strides=(2, 2)))
model.add(Convolution2D(48, (5, 5), activation="relu", name="conv_3", strides=(2, 2)))
model.add(SpatialDropout2D(.5, dim_ordering='default'))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_4", strides=(1, 1)))
model.add(Convolution2D(64, (3, 3), activation="relu", name="conv_5", strides=(1, 1)))
model.add(Flatten())
model.add(Dense(1164))
model.add(Dropout(.5))
model.add(Dense(100, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(50, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(10, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
# checkpoint
filepath="../../weights/weights-improvement-{epoch:02d}-{val_loss:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=1)
callbacks_list = [checkpoint]
# Fit the model
history_object = model.fit_generator(train_generator, steps_per_epoch=(len(train_samples) / batch_size_value), validation_data=validation_generator, validation_steps=(len(validation_samples)/batch_size_value), callbacks=callbacks_list, epochs=n_epoch)
# Save model
model.save('model.h5')
with open('model.json', 'w') as output_json:
output_json.write(model.to_json())
# Save TensorFlow model
tf.train.write_graph(K.get_session().graph.as_graph_def(), logdir='.', name='model.pb', as_text=False)
# Plot the training and validation loss for each epoch
print('Generating loss chart...')
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('model.png')
# Done
print('Done.')
| 2.5 | 2 |
git_repos/bin/app/uix/canvasbasics/quicar.py | saidino84/kivymd_projects_2021_10_16 | 1 | 12765568 | from kivy.uix.widget import Widget
from kivy.graphics.instructions import Canvas
from kivy.graphics.vertex_instructions import Line,Ellipse ,Ellipse,Rectangle
from kivy.graphics.context_instructions import Color
from kivy.metrics import dp
# from kivy.clock import Clock
from kivy.properties import Clock
from kivymd.uix.button import MDRaisedButton
class Quicar(Widget):
#@overrided
def on_size(self,*args):
"""triged when the window is changed its size /resized"""
print(f'{self.width}')
# vou centralzar a bola
self.ball.pos=(self.center_x-self.ball_size/2, self.center_y-self.ball_size/2)
# def on_touch_down(self,kv):
# print('start')
# self.start()
def start_game(self,*args):
print(f'check play: {self.opacity}')
if(self.opacity==1):
print('play started')
self.start()
return
print('Unable to play')
def __init__(self,**k):
super().__init__(**k)
self.ball_size=dp(50)
# veloidades
self.vx=dp(3)
self.vy=dp(3)
self.bouncing=False
# self.add_widget(MDRaisedButton(text='Begin',id='btn',on_press=self.start))
with self.canvas:
self.rect =Rectangle(pos=(200,220),size=(60,60))#filed rectangle
Color(1,1,1,1)
self.ball=Ellipse(pos=self.center, size=(self.ball_size,self.ball_size))
def move(self,arg):
x,y =self.rect.pos
w,h=self.rect.size
incrementer=dp(10)
diff =self.width-(x+w)*2
if(diff < incrementer):
incrementer=diff
x+=incrementer
self.rect.pos =(x,y)
def start(self):
if(self.opacity==1):
# Clock.schedule_interval(self.move,1/60)
self.bouncing =not self.bouncing
print(self.bouncing)
if self.bouncing==True:
Clock.unschedule(self.move_ball,1/60)
self.ball.pos=self.center
return
Clock.schedule_interval(self.move_ball,1/60)
def update_ui_txts(self,x,y):
if(self.opacity==1):
self.parent.ids['res'].text=f"""
x:{x} y:{y} pos: {self.ball.pos}
"""
else:
self.parent.ids['res'].text=''
def move_ball(self,dt):
"""TODO
fazer abolar quicar se movendo nos
eixos x e y e se passar fora do window do pai
eles devem voltar pra atras
VIDEO 2:21:10
"""
x,y=self.ball.pos
'QUANDO TOCAR EM CIMA'
"""se abola.y+size dele for maior que atela
precisa contrariar decrementando o incrementador de posy do mesmo
que eh vy
"""
if y + self.ball_size > self.height:
self.vy=-self.vy
if x +self.ball_size >self.width:
self.vx = -self.vx
'QUANDO TOCAR NO CHAO'
"""se abola tocar no y do chao self.vy estava negativo e pra contrariar
anegatividade dele para positivo preciso fazer
self.vy=-(self.vy)
ex:vy=-10 then vy=-vy==+10
"""
if x<0:
x=0
self.vx =-self.vx
if y<0:
y=0
self.vy =-self.vy
x+=self.vx
y+=self.vy
self.ball.pos=(x,y)
self.update_ui_txts(x,y)
| 2.75 | 3 |
timesheet_utils/auth.py | MirkoPs/timesheet-utils | 0 | 12765569 | from timesheet_utils.service_comunication import request
from werkzeug.exceptions import Unauthorized, Forbidden
import os
def get_logged_user():
users_service_url_port = os.path.expandvars(
os.environ.get('USERS_SERVICE_URL_PORT')
)
data = request(
'{}{}/me/'.format(
users_service_url_port,
os.environ.get('USERS_SERVICE_PREFIX')
),
check_ok=False
)
if data.status_code != 200:
print("'{}/me/' returned '{}' status".format(
os.environ.get('USERS_SERVICE_PREFIX'),
data.status_code
))
raise Unauthorized(data.json()['msg'])
return data.json()
def require_login(
only_with_roles: list = None,
only_with_all_permissions: list = None,
only_with_any_permissions: list = None,
):
def decorator(func):
def wrapper(self, *args, **kwargs):
user = get_logged_user()
kwargs['user'] = user
user_roles = user['roles']
user_permissions = user['permissions']
for role in user_roles:
if only_with_roles and role.name not in only_with_roles:
raise Forbidden(
"'{}' role is not allowed to get this content!".format(role.name)
)
if (only_with_all_permissions and not all(item in user_permissions for item in only_with_all_permissions)) or (only_with_any_permissions and not any(item in user_permissions for item in only_with_any_permissions)):
raise Forbidden(
"You are not allowed to get this content!"
)
return func(self, *args, **kwargs)
return wrapper
return decorator
| 2.421875 | 2 |
footkit/__init__.py | KhaitovR/footkit | 1 | 12765570 | <gh_stars>1-10
from .Parser import Parser
from .Preprocess import Preprocess
from .models import Validation
from .Odds import OddsParser
from .Calculation import Calculation
from .ImageDraw import PlotPredict
# from .Insta import InstaBot
from .FeatureSelector import FeatureSelector
# InstaBot,
__all__ = [Parser, Preprocess, Validation, OddsParser, Calculation, PlotPredict, FeatureSelector]
| 1.242188 | 1 |
pyorbit/classes/model_container_multinest.py | lucaborsato/PyORBIT | 12 | 12765571 | from pyorbit.classes.common import *
from pyorbit.classes.model_container_abstract import ModelContainer
class ModelContainerMultiNest(ModelContainer):
def __init__(self):
super(self.__class__, self).__init__()
self.include_priors = False
self.nested_sampling_parameters = {'nlive_mult': 25,
'base_dir': './',
'verbose': True,
'sampling_efficiency': 0.3,
'shutdown_jitter': False,
'include_priors': False}
self.pymultinest_signature = [
'n_params',
'n_clustering_params',
'wrapped_params',
'importance_nested_sampling',
'multimodal',
'const_efficiency_mode',
'n_live_points',
'evidence_tolerance',
'sampling_efficiency',
'n_iter_before_update',
'null_log_evidence',
'max_modes',
'mode_tolerance',
'outputfiles_basename',
'seed',
'verbose',
'resume',
'context',
'write_output',
'log_zero',
'max_iter',
'init_MPI',
'dump_callback']
self.output_directory = None
def multinest_priors(self, cube, ndim, nparams):
for i in range(0, ndim):
cube[i] = nested_sampling_prior_compute(cube[i], self.priors[i][0], self.priors[i][2], self.spaces[i])
def multinest_call(self, theta1, ndim, nparams):
# Workaround for variable selection: if a variable as null index
# (i.e. it has not been included in the model)
# the numpy array will give back an empty list, the ctype will give back an error
theta = np.empty(ndim)
for i in range(0, ndim):
theta[i] = theta1[i]
chi_out = self(theta, self.include_priors)
if chi_out < -0.5e30:
return -0.5e30
return chi_out
| 2.125 | 2 |
torchrecipes/vision/image_classification/tests/test_train_app.py | nateanl/recipes-1 | 0 | 12765572 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python3
# pyre-strict
import os
from copy import deepcopy
from typing import List, Optional
import torch
from torch import nn
from torchrecipes.core.test_utils.test_base import BaseTrainAppTestCase
from torchrecipes.utils.test import tempdir
from torchrecipes.vision.core.ops.fine_tuning_wrapper import FineTuningWrapper
from torchrecipes.vision.core.utils.model_weights import load_model_weights
from torchrecipes.vision.image_classification.train_app import (
ImageClassificationTrainApp,
)
from torchvision.models.resnet import resnet18
from torchvision.ops.misc import FrozenBatchNorm2d
class TestTrainApp(BaseTrainAppTestCase):
def _get_train_app(
self, tb_save_dir: str, test_overrides: Optional[List[str]] = None
) -> ImageClassificationTrainApp:
overrides: List[str] = [
"datamodule/datamodule=fake_data",
"+module.model.num_classes=10",
f"+tb_save_dir={tb_save_dir}",
]
app = self.create_app_from_hydra(
config_module="torchrecipes.vision.image_classification.conf",
config_name="train_app",
overrides=test_overrides if test_overrides else overrides,
)
self.mock_trainer_params(app, {"logger": True})
# pyre-fixme[7]: Expected `ImageClassificationTrainApp` but got `BaseTrainApp`.
return app
@tempdir
def test_train_model(self, root_dir: str) -> None:
train_app = self._get_train_app(tb_save_dir=root_dir)
# Train the model with the config
train_app.train()
@tempdir
def test_fine_tuning(self, root_dir: str) -> None:
pretrained = resnet18()
weights_path = os.path.join(root_dir, "weights.pth")
torch.save(pretrained.state_dict(), weights_path)
# prepare model for fine-tuning
trunk = resnet18(norm_layer=FrozenBatchNorm2d)
load_model_weights(trunk, weights_path)
head = nn.Linear(in_features=512, out_features=10)
fine_tune_model = FineTuningWrapper(trunk, "flatten", head)
origin_trunk = deepcopy(fine_tune_model.trunk)
# start fine-tuning
classification_train_app = self._get_train_app(tb_save_dir=root_dir)
# pyre-ignore[16]: ImageClassificationModule has model
classification_train_app.module.model = fine_tune_model
classification_train_app.train()
with torch.no_grad():
inp = torch.randn(1, 3, 28, 28)
origin_out = origin_trunk(inp)
# pyre-ignore[16]: ImageClassificationModule has model
tuned_out = classification_train_app.module.model.trunk(inp)
self.assertTrue(torch.equal(origin_out["flatten"], tuned_out["flatten"]))
| 1.78125 | 2 |
minemeld/ft/azure.py | mikejrizzo/minemeld-core | 0 | 12765573 | # Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import itertools
import functools
import requests
import netaddr
import lxml.etree
import bs4
from . import basepoller
LOG = logging.getLogger(__name__)
AZUREXML_URL = \
'https://www.microsoft.com/EN-US/DOWNLOAD/confirmation.aspx?id=41653'
AZUREJSON_URL = 'https://www.microsoft.com/en-us/download/confirmation.aspx?id=56519'
REGIONS_XPATH = '/AzurePublicIpAddresses/Region'
def _build_IPv4(nodename, region, iprange):
iprange = iprange.get('Subnet', None)
if iprange is None:
LOG.error('%s - No Subnet', nodename)
return {}
try:
netaddr.IPNetwork(iprange)
except:
LOG.exception('%s - Invalid ip range: %s', nodename, iprange)
return {}
item = {
'indicator': iprange,
'type': 'IPv4',
'confidence': 100,
'azure_region': region,
'sources': ['azure.xml']
}
return item
def _build_IP(nodename, address_prefix, **keywords):
try:
ap = netaddr.IPNetwork(address_prefix)
except Exception:
LOG.exception('%s - Invalid ip range: %s', nodename, address_prefix)
return {}
if ap.version == 4:
type_ = 'IPv4'
elif ap.version == 6:
type_ = 'IPv6'
else:
LOG.error('{} - Unknown IP version: {}'.format(nodename, ap.version))
return {}
item = {
'indicator': address_prefix,
'type': type_,
'confidence': 100,
'sources': [nodename]
}
item.update(keywords)
return item
class AzureXML(basepoller.BasePollerFT):
def configure(self):
super(AzureXML, self).configure()
self.polling_timeout = self.config.get('polling_timeout', 20)
self.verify_cert = self.config.get('verify_cert', True)
def _process_item(self, item):
indicator = item.pop('indicator', None)
return [[indicator, item]]
def _build_request(self, now):
r = requests.Request(
'GET',
AZUREXML_URL
)
return r.prepare()
def _build_iterator(self, now):
_iterators = []
rkwargs = dict(
stream=False,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
AZUREXML_URL,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
html_soup = bs4.BeautifulSoup(r.content, "lxml")
a = html_soup.find('a', class_='failoverLink')
if a is None:
LOG.error('%s - failoverLink not found', self.name)
raise RuntimeError('{} - failoverLink not found'.format(self.name))
LOG.debug('%s - download link: %s', self.name, a['href'])
rkwargs = dict(
stream=True,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
a['href'],
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
parser = lxml.etree.XMLParser()
for chunk in r.iter_content(chunk_size=10 * 1024):
parser.feed(chunk)
rtree = parser.close()
regions = rtree.xpath(REGIONS_XPATH)
for r in regions:
LOG.debug('%s - Extracting region: %s', self.name, r.get('Name'))
ipranges = r.xpath('IpRange')
_iterators.append(itertools.imap(
functools.partial(_build_IPv4, self.name, r.get('Name')),
ipranges
))
return itertools.chain(*_iterators)
class AzureJSON(basepoller.BasePollerFT):
def configure(self):
super(AzureJSON, self).configure()
self.polling_timeout = self.config.get('polling_timeout', 20)
self.verify_cert = self.config.get('verify_cert', True)
def _process_item(self, item):
indicator = item.pop('indicator', None)
return [[indicator, item]]
def _build_request(self, now):
r = requests.Request(
'GET',
AZUREJSON_URL
)
return r.prepare()
def _build_iterator(self, now):
_iterators = []
rkwargs = dict(
stream=False,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
AZUREJSON_URL,
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
html_soup = bs4.BeautifulSoup(r.content, "lxml")
a = html_soup.find('a', class_='failoverLink')
if a is None:
LOG.error('%s - failoverLink not found', self.name)
raise RuntimeError('{} - failoverLink not found'.format(self.name))
LOG.debug('%s - download link: %s', self.name, a['href'])
rkwargs = dict(
stream=True,
verify=self.verify_cert,
timeout=self.polling_timeout
)
r = requests.get(
a['href'],
**rkwargs
)
try:
r.raise_for_status()
except:
LOG.error('%s - exception in request: %s %s',
self.name, r.status_code, r.content)
raise
rtree = r.json()
values = rtree.get('values', None)
if values is None:
LOG.error('{} - no values in JSON response'.format(self.name))
return []
for v in values:
LOG.debug('{} - Extracting value: {!r}'.format(self.name, v.get('id', None)))
id_ = v.get('id', None)
name = v.get('name', None)
props = v.get('properties', None)
if props is None:
LOG.error('{} - no properties in value'.format(self.name))
continue
region = props.get('region', None)
platform = props.get('platform', None)
system_service = props.get('systemService', None)
address_prefixes = props.get('addressPrefixes', [])
_iterators.append(itertools.imap(
functools.partial(
_build_IP,
self.name,
azure_name=name,
azure_id=id_,
azure_region=region,
azure_platform=platform,
azure_system_service=system_service
),
address_prefixes
))
return itertools.chain(*_iterators)
| 1.867188 | 2 |
tests/apps/packages/test_xmlrpc.py | tranarthur/localshop | 162 | 12765574 | import xmlrpc.client as xmlrpclib
import pytest
from tests.factories import ReleaseFactory
@pytest.fixture(params=['/RPC2', '/pypi'])
def rpc_endpoint(request):
return request.param
@pytest.mark.django_db
def test_search_package_name(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(
package__name='my-package', package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': 'my-package'})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_package_summary(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(
package__name='my-package', package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'summary': ['Test summary']})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_operator_and(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(package__name='my-package-1',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='arcoiro',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='my-package-2',
package__repository=repository,
summary='arcoiro')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'],
'summary': ['Test summary']}, 'and')
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package-1',
'summary': 'Test summary',
'version': '1.0.0'}]
@pytest.mark.django_db
def test_search_operator_or(client, admin_user, live_server, repository,
rpc_endpoint):
ReleaseFactory(package__name='my-package-1',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='arcoiro',
package__repository=repository,
summary='Test summary')
ReleaseFactory(package__name='my-package-2',
package__repository=repository,
summary='arcoiro')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'],
'summary': ['Test summary']}, 'or')
assert response == [{
'_pypi_ordering': 0,
'name': 'arcoiro',
'summary': 'Test summary',
'version': '1.0.0'
},
{
'_pypi_ordering': 0,
'name': 'my-package-1',
'summary': 'Test summary',
'version': '1.0.0'
},
{
'_pypi_ordering': 0,
'name': 'my-package-2',
'summary': 'arcoiro',
'version': '1.0.0'
}]
@pytest.mark.django_db
def test_search_invalid_fields_are_ignores(client, admin_user, live_server,
repository, rpc_endpoint):
ReleaseFactory(package__name='my-package',
package__repository=repository,
summary='Test summary')
client = xmlrpclib.ServerProxy(live_server + rpc_endpoint)
response = client.search({'name': ['my-package'], 'invalid': ['Ops']})
assert response == [{
'_pypi_ordering': 0,
'name': 'my-package',
'summary': 'Test summary',
'version': '1.0.0'}]
| 2.078125 | 2 |
python/python-algorithm-intervew/18-binary-search/68-two-sum-ii-input-arrayt-is-sorted-2.py | bum12ark/algorithm | 1 | 12765575 | <filename>python/python-algorithm-intervew/18-binary-search/68-two-sum-ii-input-arrayt-is-sorted-2.py
"""
* 두수의 합 II
정렬된 배열을 받아 덧셈하여 타겟을 만들 수 있는 배열의 두 숫자 인덱스를 리턴하라.
주의 : 이 문제에서 배열은 0이 아닌 1부터 시작하는 것으로 한다.
Input: numbers = [2, 7, 11, 15], target = 9
Output: [1, 2]
"""
from typing import List
class Solution:
# 현재 값을 기준으로 나머지 값이 맞는지 확인하는 형태의 이진 검색 풀이
def twoSum(self, numbers: List[int], target: int) -> List[int]:
for i, n in enumerate(numbers):
left, right = i + 1, len(numbers) - 1
expected = target - n
while left <= right:
mid = left + (right - left) // 2
if numbers[mid] < expected:
left = mid + 1
elif numbers[mid] > expected:
right = mid - 1
else:
return i + 1, mid + 1
if __name__ == '__main__':
numbers = [2, 7, 11, 15]
target = 9
print(Solution().twoSum(numbers, target))
| 3.8125 | 4 |
mlir_linalg/dsl/tc_model.py | stellaraccident/linalgpy-hacking | 3 | 12765576 | <reponame>stellaraccident/linalgpy-hacking
"""Model classes representing a tensor-comprehension."""
from typing import Callable, Dict, List, Optional, Sequence, Set, Tuple, Union
from mlir import ir as _ir
from .affine import *
from .scalar_expr import *
from .types import *
from .yaml_helper import *
# Type aliases.
AffineDimList = Dict[str, _ir.AffineExpr]
class TensorExpression:
"""An expression that can appear on the RHS of a comprehension."""
def to_scalar_expression(self) -> ScalarExpression:
raise NotImplementedError()
def visit_affine_exprs(self, callback):
"""Visits all affine expressions reachable by the expression."""
pass
def _get_all_dim_defs(self) -> Set[DimDef]:
"""Recursively gets all DimDef affine expressions that are referenced."""
results = set()
def visitor(affine_expr):
if isinstance(affine_expr, DimDef):
results.add(affine_expr)
self.visit_affine_exprs(visitor)
return results
def collect_uses(self, uses: Set["TensorUse"]):
"""Collects all TensorUses reachable through this expression."""
pass
def __add__(self, rhs: "TensorExpression") -> "TensorExpression":
return PrimFn.add(self, rhs)
def __mul__(self, rhs) -> "TensorExpression":
return PrimFn.mul(self, rhs)
def __sub__(self, rhs) -> "TensorExpression":
return PrimFn.sub(self, rhs)
def __hash__(self):
return hash(id(self))
class TensorUse(TensorExpression):
"""A used tensor represented by its (tensor_name, indices).
Note that forming a comprehension via direct assignment is performed through
__setitem__ on the TensorDef level. However, performing a reduction with
compound ops (+=, *=, etc) is done by doing a:
TensorDef.__getitem__
TensorUse.__iadd__
TensorDef.__setitem__
"""
def __init__(self, tensor_def: "TensorDef", indices: Sequence[AffineExprDef]):
self.tensor_def = tensor_def
self.indices = tuple(indices)
def to_scalar_expression(self) -> ScalarExpression:
assert self.tensor_def.tensor_name is not None
return ScalarArg(self.tensor_def.tensor_name).expr()
@property
def tensor_name(self) -> str:
n = self.tensor_def.tensor_name
assert n is not None, "TensorDef not attached"
return n
def visit_affine_exprs(self, callback):
for ind in self.indices:
ind.visit_affine_exprs(callback)
def collect_uses(self, uses: Set["TensorUse"]):
uses.add(self)
def __iadd__(self, rhs: TensorExpression) -> TensorExpression:
return ReduceFn.add(*self._compute_reduce_dims(rhs))(rhs)
def _compute_reduce_dims(self, rhs: TensorExpression) -> Set[DimDef]:
"""For implicit reductions, computes default reduction dims.
Assumes that the rhs is the expression being reduced and self is being
reduced into. Any indices referenced on the rhs and not in self are
considered reduction dims and will be ordered as encountered on the rhs.
"""
rhs_dims = rhs._get_all_dim_defs()
lhs_dims = self._get_all_dim_defs()
return rhs_dims - lhs_dims
def __repr__(self):
return f"{self.tensor_name}[{', '.join([repr(i) for i in self.indices])}]"
class TensorDef:
"""Bookkeeping of a single registered tensor, held in dict by name."""
def __init__(self,
type_var: TypeVar,
*shape: AffineExprDef,
indexing_map: Optional[_ir.AffineMap] = None,
output: bool = False):
if not isinstance(type_var, TypeVar):
raise ValueError(f"TensorDef requires a TypeVar. Got: {repr(type_var)}")
self.owner = None # type: Optional["TcOpDef"]
self.type_var = type_var
self.shape = shape
self.indexing_map = indexing_map
self.output = output
self.tensor_name = None # type: Optional[str]
self.registered_index = -1 # type: int
@property
def rank(self) -> int:
"""The rank of the tensor."""
return len(self.shape)
def attach(self, index: int, tensor_name: str, owner: "TcOpDef"):
if self.owner:
raise ValueError(f"TensorDef already registered with op: {self}")
self.registered_index = index
self.tensor_name = tensor_name
self.owner = owner
def __getitem__(self, dims) -> TensorUse:
assert self.owner, "TensorDef is not attached to an op"
state = AffineBuildState(global_state=self.owner._affine_state,
allow_new_symbols=False)
if not isinstance(dims, tuple):
dims = (dims,) # Handle single subscript case.
# Special case: (None) is a 0d-scalar use.
if dims == (None,):
dims = ()
exprs = []
for expr_def in dims:
if not isinstance(expr_def, AffineExprDef):
raise KeyError(
"A TensorDef can only be subscripted by a tuple of affine dims")
exprs.append(expr_def)
return TensorUse(self, exprs)
def __setitem__(self, dims, value):
"""Creates a new 1:1 comprehension by binding this tensor to an expression.
Note that due to the way assignment works in Python, we have to capture
direct assignment as a setitem on the TensorDef.
"""
if not isinstance(value, TensorExpression):
raise ValueError(f"Only TensorExpressions can be assigned to TensorDefs. "
f"Got: {repr(value)}")
use = self[dims]
comp = Comprehension((use, value))
self.owner.comprehensions.append(comp)
def __hash__(self):
return hash(id(self))
def __repr__(self):
output = "OUTPUT " if self.output else ""
return (f"{self.tensor_name}:TensorDef({output}{repr(self.type_var)}, "
f"shape={self.shape})")
class Comprehension:
"""Represents a single comprehension."""
def __init__(self, *bindings: Tuple[TensorUse, TensorExpression]):
self.definitions = list() # List[TensorUse]
self.values = list() # List[TensorExpression]
# Find the lhs to reduction rhs.
for assign, value in bindings:
if isinstance(value, ReduceApply):
if value.lhs:
raise ValueError(f"Reduction expression already assigns: {value}")
value.lhs = assign
self.definitions.append(assign)
self.values.append(value)
@property
def all_reduction_dims(self) -> Set[Tuple[DimDef, ...]]:
"""Gets the reduction dims for the comprehension or None."""
result = set()
for use in self.values:
if isinstance(use, ReduceApply):
result.add(use.reduce.reduce_dims)
else:
result.add(tuple())
return result
def __repr__(self):
if len(self.definitions) > 1:
defs_repr = f"({', '.join(repr(d) for d in self.definitions)})"
values_repr = f"({', '.join(repr(v) for v in self.values)})"
else:
defs_repr = f"{repr(self.definitions[0])}"
values_repr = f"{repr(self.values[0])}"
return f"{defs_repr} = {values_repr}"
class PrimFnType:
"""Primitive operations."""
def __init__(self, prim_name: str):
self.prim_name = prim_name
def __call__(self, *args):
return PrimApply(self, args)
def reduce(self, *reduce_dims: DimDef):
"""Shortcut to create a Reduce operation from this primitive."""
return ReduceFnType(self, *reduce_dims)
def __repr__(self):
return f"{self.prim_name}"
class PrimFn:
add = PrimFnType("add")
exp = PrimFnType("exp")
log = PrimFnType("log")
mul = PrimFnType("mul")
max = PrimFnType("max")
sub = PrimFnType("sub")
class ReduceFnType:
"""A reduction operator that reduces into its LHS from its RHS."""
def __init__(self, operator: PrimFnType, *reduce_dims: DimDef):
"""Initializes the ReduceFn with a primitive function and dims."""
if not isinstance(operator, PrimFnType):
raise ValueError(f"Reduce expected a Prim operator. Got: {operator}")
self.operator = operator
self.reduce_dims = tuple(reduce_dims)
def __call__(self, *args: TensorExpression):
return ReduceApply(self, args)
def __repr__(self):
return (f"reduce_{self.operator.prim_name}"
f"({', '.join(repr(d) for d in self.reduce_dims)})")
class ReduceFn:
add = PrimFn.add.reduce
mul = PrimFn.mul.reduce
max = PrimFn.max.reduce
class PrimApply(TensorExpression):
"""Application of a primitive."""
def __init__(self, prim: PrimFnType, args: Sequence[TensorExpression]):
self.prim = prim
self.args = tuple(args)
def to_scalar_expression(self) -> ScalarExpression:
return ScalarApplyFn(self.prim.prim_name,
*[arg.to_scalar_expression() for arg in self.args
]).expr()
def visit_affine_exprs(self, callback):
for arg in self.args:
arg.visit_affine_exprs(callback)
def collect_uses(self, uses: Set["TensorUse"]):
for arg in self.args:
arg.collect_uses(uses)
def __repr__(self):
return f"{repr(self.prim)}({', '.join(repr(a) for a in self.args)})"
class cast(TensorExpression):
"""Casts the element type to a type (typically symbolic TypeVar)."""
def __init__(self, to_type: TypeVar, operand: TensorExpression):
self.to_type = to_type
self.operand = operand
def to_scalar_expression(self) -> ScalarExpression:
return ScalarSymbolicCast(self.to_type,
self.operand.to_scalar_expression()).expr()
def visit_affine_exprs(self, callback):
self.operand.visit_affine_exprs(callback)
def collect_uses(self, uses: Set["TensorUse"]):
self.operand.collect_uses(uses)
def __repr__(self):
return f"cast({self.to_type}, {repr(self.operand)})"
class ReduceApply(TensorExpression):
"""Application of a reduction.
This captures the lhs separately (initial value) separately from the rhs.
"""
def __init__(self, reduce: ReduceFnType, args: Sequence[TensorExpression]):
self.reduce = reduce
self.lhs = None # type: Optional[TensorUse]
self.args = tuple(args)
def to_scalar_expression(self) -> ScalarExpression:
if self.lhs is None:
raise ValueError(f"Cannot scalarize a ReduceApply that has not been "
f"bound to its lhs: {self}")
full_args = [self.lhs.to_scalar_expression()
] + [arg.to_scalar_expression() for arg in self.args]
return ScalarApplyFn(self.reduce.operator.prim_name, *full_args).expr()
def visit_affine_exprs(self, callback):
for ind in self.reduce.reduce_dims:
ind.visit_affine_exprs(callback)
for arg in self.args:
arg.visit_affine_exprs(callback)
def collect_uses(self, uses: Set["TensorUse"]):
for arg in self.args:
arg.collect_uses(uses)
def __repr__(self):
return f"{repr(self.reduce)}({', '.join(repr(a) for a in self.args)})"
class OpInterfaceDef:
"""An interface that an op implements."""
def __init__(self, cpp_name: str):
self.cpp_name = cpp_name
ContractionOpInterface = OpInterfaceDef("LinalgContractionOpInterface")
class OpMetadataDef(YAMLObject):
"""Metadata about the op (generally not behavior impacting)."""
yaml_tag = "!LinalgOpMetadata"
def __init__(self, name: str, cpp_op_name: Optional[str], doc: Optional[str]):
self.name = name
self.cpp_op_name = cpp_op_name if cpp_op_name is not None else name
self.doc = doc
self.implements = [] # type: List[OpInterfaceDef]
def to_yaml_custom_dict(self):
d = dict(
name=self.name,
cpp_op_name=self.cpp_op_name,
doc=self.doc,
)
if self.implements:
d["implements"] = [intr.cpp_name for intr in self.implements]
return d
class TcOpDef:
"""Definition of a named op.
>>> with _ir.Context():
... od = TcOpDef('matmul')
... A, B, C = od.add_tensor(
... A=TensorDef('f32', shape=(S.M, S.K)),
... B=TensorDef('f32', shape=(S.M, S.N)),
... C=TensorDef('f32', shape=(S.M, S.N), output=True))
... _ = od.add_comprehension(A[D.n, D.m])
... od
TcOpDef(matmul -> matmul,
A:TensorDef(type_pred=f32, shape=()[s0, s1] -> (s0, s1))
B:TensorDef(type_pred=f32, shape=()[s0, s1] -> (s0, s2))
C:TensorDef(OUTPUT type_pred=f32, shape=()[s0, s1] -> (s0, s2))
"""
def __init__(self,
name: str,
cpp_op_name: Optional[str] = None,
doc: Optional[str] = None):
self.metadata = OpMetadataDef(name=name, cpp_op_name=cpp_op_name, doc=doc)
self.registered_tensors = dict() # type: Dict[str, TensorDef]
self.comprehensions = list() # type: List[Comprehension]
self._affine_state = AffineBuildState()
@property
def inputs(self) -> Sequence[TensorDef]:
return [t for t in self.registered_tensors.values() if not t.output]
@property
def outputs(self) -> Sequence[TensorDef]:
return [t for t in self.registered_tensors.values() if t.output]
def add_tensor(self, tensor_name: str, tensor: TensorDef):
"""Registers a tensor."""
if tensor_name in self.registered_tensors:
raise ValueError(f"Tensor {tensor_name} is already registered "
f"to {self.registered_tensors['tensor_name']}")
tensor.attach(len(self.registered_tensors), tensor_name, self)
self.registered_tensors[tensor_name] = tensor
def tensor(self, name):
"""Gets a registered tensor by name."""
try:
return self.registered_tensors[name]
except KeyError:
raise KeyError(f"Tensor {name} is not registered")
def __repr__(self):
lines = [f"TcOpDef({self.metadata.name} -> {self.metadata.cpp_op_name},"]
for name, tensor in self.registered_tensors.items():
lines.append(f" {tensor}")
if self.comprehensions:
lines[-1] += " {"
for comprehension in self.comprehensions:
lines.append(f" {comprehension}")
lines.append("}")
return "\n".join(lines)
def to_yaml(self):
# TODO: This is gross but useful. Make it cleaner.
from .linalg_op_config import LinalgOpConfig
configs = LinalgOpConfig.from_tc_op_def(self)
return yaml_dump_all(configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2.078125 | 2 |
tripleoclient/v1/undercloud_deploy.py | mail2nsrajesh/python-tripleoclient | 0 | 12765577 | # Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from __future__ import print_function
import argparse
import logging
import netaddr
import os
import pwd
import signal
import subprocess
import sys
import tempfile
import time
import yaml
try:
from urllib2 import HTTPError
from urllib2 import URLError
from urllib2 import urlopen
except ImportError:
# python3
from urllib.error import HTTPError
from urllib.error import URLError
from urllib.request import urlopen
from cliff import command
from heatclient.common import template_utils
from openstackclient.i18n import _
from tripleoclient import constants
from tripleoclient import exceptions
from tripleoclient import fake_keystone
from tripleoclient import heat_launcher
from tripleo_common.utils import passwords as password_utils
# TODO(bogdando) rework the list by real requirements for the
# heat-container-image vs heat-native cases
REQUIRED_PACKAGES = iter([
'openstack-heat-api',
'openstack-heat-engine',
'openstack-heat-monolith',
'python-heat-agent',
'python-heat-agent-apply-config',
'python-heat-agent-hiera',
'python-heat-agent-puppet',
'python-heat-agent-docker-cmd',
'python-heat-agent-json-file',
'python-heat-agent-ansible',
'python-ipaddr',
'python-tripleoclient',
'docker',
'openvswitch',
'openstack-puppet-modules',
'yum-plugin-priorities',
'openstack-tripleo-common',
'openstack-tripleo-heat-templates',
'deltarpm'
])
class DeployUndercloud(command.Command):
"""Deploy Undercloud (experimental feature)"""
log = logging.getLogger(__name__ + ".DeployUndercloud")
auth_required = False
prerequisites = REQUIRED_PACKAGES
def _get_hostname(self):
p = subprocess.Popen(["hostname", "-s"], stdout=subprocess.PIPE)
return p.communicate()[0].rstrip()
def _install_prerequisites(self):
print('Checking for installed prerequisites ...')
processed = []
for p in self.prerequisites:
try:
subprocess.check_call(['rpm', '-q', p])
except subprocess.CalledProcessError as e:
if e.returncode == 1:
processed.append(p)
elif e.returncode != 0:
raise Exception('Failed to check for prerequisites: '
'%s, the exit status %s'
% (p, e.returncode))
if len(processed) > 0:
print('Installing prerequisites ...')
subprocess.check_call(['yum', '-y', 'install'] + processed)
def _lookup_tripleo_server_stackid(self, client, stack_id):
server_stack_id = None
for X in client.resources.list(stack_id, nested_depth=6):
if X.resource_type in (
'OS::TripleO::Server',
'OS::TripleO::UndercloudServer'):
server_stack_id = X.physical_resource_id
return server_stack_id
def _launch_os_collect_config(self, keystone_port, stack_id):
print('Launching os-collect-config ...')
os.execvp('os-collect-config',
['os-collect-config',
'--polling-interval', '3',
'--heat-auth-url', 'http://127.0.0.1:%s/v3' % keystone_port,
'--heat-password', '<PASSWORD>',
'--heat-user-id', 'admin',
'--heat-project-id', 'admin',
'--heat-stack-id', stack_id,
'--heat-resource-name', 'deployed-server', 'heat'])
def _wait_local_port_ready(self, api_port):
count = 0
while count < 30:
time.sleep(1)
count += 1
try:
urlopen("http://127.0.0.1:%s/" % api_port, timeout=1)
except HTTPError as he:
if he.code == 300:
return True
pass
except URLError:
pass
return False
def _heat_deploy(self, stack_name, template_path, parameters,
environments, timeout, api_port, ks_port):
self.log.debug("Processing environment files")
env_files, env = (
template_utils.process_multiple_environments_and_files(
environments))
self.log.debug("Getting template contents")
template_files, template = template_utils.get_template_contents(
template_path)
files = dict(list(template_files.items()) + list(env_files.items()))
# NOTE(dprince): we use our own client here because we set
# auth_required=False above because keystone isn't running when this
# command starts
tripleoclients = self.app.client_manager.tripleoclient
orchestration_client = tripleoclients.local_orchestration(api_port,
ks_port)
self.log.debug("Deploying stack: %s", stack_name)
self.log.debug("Deploying template: %s", template)
self.log.debug("Deploying parameters: %s", parameters)
self.log.debug("Deploying environment: %s", env)
self.log.debug("Deploying files: %s", files)
stack_args = {
'stack_name': stack_name,
'template': template,
'environment': env,
'files': files,
}
if timeout:
stack_args['timeout_mins'] = timeout
self.log.info("Performing Heat stack create")
stack = orchestration_client.stacks.create(**stack_args)
stack_id = stack['stack']['id']
event_list_pid = self._fork_heat_event_list()
self.log.info("Looking up server stack id...")
server_stack_id = None
# NOTE(dprince) wait a bit to create the server_stack_id resource
for c in range(timeout * 60):
time.sleep(1)
server_stack_id = self._lookup_tripleo_server_stackid(
orchestration_client, stack_id)
if server_stack_id:
break
if not server_stack_id:
msg = ('Unable to find deployed server stack id. '
'See tripleo-heat-templates to ensure proper '
'"deployed-server" usage.')
raise Exception(msg)
self.log.debug("server_stack_id: %s" % server_stack_id)
pid = None
status = 'FAILED'
try:
pid = os.fork()
if pid == 0:
self._launch_os_collect_config(ks_port, server_stack_id)
else:
while True:
status = orchestration_client.stacks.get(stack_id).status
self.log.info(status)
if status in ['COMPLETE', 'FAILED']:
break
time.sleep(5)
finally:
if pid:
os.kill(pid, signal.SIGKILL)
if event_list_pid:
os.kill(event_list_pid, signal.SIGKILL)
stack_get = orchestration_client.stacks.get(stack_id)
status = stack_get.status
if status != 'FAILED':
pw_rsrc = orchestration_client.resources.get(
stack_id, 'DefaultPasswords')
passwords = {p.title().replace("_", ""): v for p, v in
pw_rsrc.attributes.get('passwords', {}).items()}
return passwords
else:
msg = "Stack create failed, reason: %s" % stack_get.reason
raise Exception(msg)
def _fork_heat_event_list(self):
pid = os.fork()
if pid == 0:
events_env = {
'OS_AUTH_URL': 'http://127.0.0.1:35358',
'OS_USERNAME': 'foo',
'OS_PROJECT_NAME': 'foo',
'OS_PASSWORD': '<PASSWORD>'
}
try:
os.setpgrp()
os.setgid(pwd.getpwnam('nobody').pw_gid)
os.setuid(pwd.getpwnam('nobody').pw_uid)
except KeyError:
raise exceptions.DeploymentError(
"Please create a 'nobody' user account before "
"proceeding.")
subprocess.check_call(['openstack', 'stack', 'event', 'list',
'undercloud', '--follow',
'--nested-depth', '6'], env=events_env)
sys.exit(0)
else:
return pid
def _fork_fake_keystone(self):
pid = os.fork()
if pid == 0:
try:
os.setpgrp()
os.setgid(pwd.getpwnam('nobody').pw_gid)
os.setuid(pwd.getpwnam('nobody').pw_uid)
except KeyError:
raise exceptions.DeploymentError(
"Please create a 'nobody' user account before "
"proceeding.")
fake_keystone.launch()
sys.exit(0)
else:
return pid
def _update_passwords_env(self, passwords=None):
pw_file = os.path.join(os.environ.get('HOME', ''),
'tripleo-undercloud-passwords.yaml')
stack_env = {'parameter_defaults': {}}
if os.path.exists(pw_file):
with open(pw_file) as pf:
stack_env = yaml.load(pf.read())
pw = password_utils.generate_passwords(stack_env=stack_env)
stack_env['parameter_defaults'].update(pw)
if passwords:
# These passwords are the DefaultPasswords so we only
# update if they don't already exist in stack_env
for p, v in passwords.items():
if p not in stack_env['parameter_defaults']:
stack_env['parameter_defaults'][p] = v
with open(pw_file, 'w') as pf:
yaml.safe_dump(stack_env, pf, default_flow_style=False)
return pw_file
def _generate_hosts_parameters(self):
hostname = self._get_hostname()
domain = 'undercloud'
data = {
'CloudName': hostname,
'CloudDomain': domain,
'CloudNameInternal': '%s.internalapi.%s' % (hostname, domain),
'CloudNameStorage': '%s.storage.%s' % (hostname, domain),
'CloudNameStorageManagement': ('%s.storagemgmt.%s'
% (hostname, domain)),
'CloudNameCtlplane': '%s.ctlplane.%s' % (hostname, domain),
}
return data
def _generate_portmap_parameters(self, ip_addr, cidr):
hostname = self._get_hostname()
data = {
'DeployedServerPortMap': {
('%s-ctlplane' % hostname): {
'fixed_ips': [{'ip_address': ip_addr}],
'subnets': [{'cidr': cidr}]
},
'control_virtual_ip': {
'fixed_ips': [{'ip_address': ip_addr}],
'subnets': [{'cidr': cidr}]
}
}
}
return data
def _deploy_tripleo_heat_templates(self, parsed_args):
"""Deploy the fixed templates in TripleO Heat Templates"""
parameters = {}
tht_root = parsed_args.templates
# generate jinja templates
args = ['python', 'tools/process-templates.py', '--roles-data',
'roles_data_undercloud.yaml']
subprocess.check_call(args, cwd=tht_root)
print("Deploying templates in the directory {0}".format(
os.path.abspath(tht_root)))
self.log.debug("Creating Environment file")
environments = []
resource_registry_path = os.path.join(
tht_root, 'overcloud-resource-registry-puppet.yaml')
environments.insert(0, resource_registry_path)
# this will allow the user to overwrite passwords with custom envs
pw_file = self._update_passwords_env()
environments.insert(1, pw_file)
undercloud_env_path = os.path.join(
tht_root, 'environments', 'undercloud.yaml')
environments.append(undercloud_env_path)
# use deployed-server because we run os-collect-config locally
deployed_server_env = os.path.join(
tht_root, 'environments',
'deployed-server-noop-ctlplane.yaml')
environments.append(deployed_server_env)
if parsed_args.environment_files:
environments.extend(parsed_args.environment_files)
with tempfile.NamedTemporaryFile() as tmp_env_file:
tmp_env = self._generate_hosts_parameters()
ip_nw = netaddr.IPNetwork(parsed_args.local_ip)
ip = str(ip_nw.ip)
cidr = str(ip_nw.netmask)
tmp_env.update(self._generate_portmap_parameters(ip, cidr))
with open(tmp_env_file.name, 'w') as env_file:
yaml.safe_dump({'parameter_defaults': tmp_env}, env_file,
default_flow_style=False)
environments.append(tmp_env_file.name)
undercloud_yaml = os.path.join(tht_root, 'overcloud.yaml')
passwords = self._heat_deploy(parsed_args.stack, undercloud_yaml,
parameters, environments,
parsed_args.timeout,
parsed_args.heat_api_port,
parsed_args.fake_keystone_port)
if passwords:
# Get legacy passwords/secrets generated via heat
# These need to be written to the passwords file
# to avoid re-creating them every update
self._update_passwords_env(passwords)
return True
def get_parser(self, prog_name):
parser = argparse.ArgumentParser(
description=self.get_description(),
prog=prog_name,
add_help=False
)
parser.add_argument(
'--templates', nargs='?', const=constants.TRIPLEO_HEAT_TEMPLATES,
help=_("The directory containing the Heat templates to deploy"),
)
parser.add_argument('--stack',
help=_("Stack name to create"),
default='undercloud')
parser.add_argument('-t', '--timeout', metavar='<TIMEOUT>',
type=int, default=30,
help=_('Deployment timeout in minutes.'))
parser.add_argument(
'-e', '--environment-file', metavar='<HEAT ENVIRONMENT FILE>',
action='append', dest='environment_files',
help=_('Environment files to be passed to the heat stack-create '
'or heat stack-update command. (Can be specified more than '
'once.)')
)
parser.add_argument(
'--heat-api-port', metavar='<HEAT_API_PORT>',
dest='heat_api_port',
default='8006',
help=_('Heat API port to use for the installers private'
' Heat API instance. Optional. Default: 8006.)')
)
parser.add_argument(
'--fake-keystone-port', metavar='<FAKE_KEYSTONE_PORT>',
dest='fake_keystone_port',
default='35358',
help=_('Keystone API port to use for the installers private'
' fake Keystone API instance. Optional. Default: 35358.)')
)
parser.add_argument(
'--heat-user', metavar='<HEAT_USER>',
dest='heat_user',
default='heat',
help=_('User to execute the non-priveleged heat-all process. '
'Defaults to heat.')
)
parser.add_argument(
'--heat-container-image', metavar='<HEAT_CONTAINER_IMAGE>',
dest='heat_container_image',
default='tripleoupstream/centos-binary-heat-all',
help=_('The container image to use when launching the heat-all '
'process. Defaults to: '
'tripleoupstream/centos-binary-heat-all')
)
parser.add_argument(
'--heat-native',
action='store_true',
default=False,
help=_('Execute the heat-all process natively on this host. '
'This option requires that the heat-all binaries '
'be installed locally on this machine. '
'This option is off by default which means heat-all is '
'executed in a docker container.')
)
parser.add_argument(
'--local-ip', metavar='<LOCAL_IP>',
dest='local_ip',
help=_('Local IP/CIDR for undercloud traffic. Required.')
)
parser.add_argument(
'-k',
'--keep-running',
action='store_true',
dest='keep_running',
help=_('Keep the process running on failures for debugging')
)
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
print("\nUndercloud deploy is an experimental developer focused "
"feature that does not yet replace "
"'openstack undercloud install'.")
if not parsed_args.local_ip:
print('Please set --local-ip to the correct ipaddress/cidr '
'for this machine.')
return
# NOTE(dprince): It would be nice if heat supported true 'noauth'
# use in a local format for our use case here (or perhaps dev testing)
# but until it does running our own lightweight shim to mock out
# the required API calls works just as well. To keep fake keystone
# light we run it in a thread.
if not os.environ.get('FAKE_KEYSTONE_PORT'):
os.environ['FAKE_KEYSTONE_PORT'] = parsed_args.fake_keystone_port
if not os.environ.get('HEAT_API_PORT'):
os.environ['HEAT_API_PORT'] = parsed_args.heat_api_port
# The main thread runs as root and we drop privs for forked
# processes below. Only the heat deploy/os-collect-config forked
# process runs as root.
if os.geteuid() != 0:
raise exceptions.DeploymentError("Please run as root.")
# Install required packages
self._install_prerequisites()
keystone_pid = self._fork_fake_keystone()
# we do this as root to chown config files properly for docker, etc.
if parsed_args.heat_native:
heat_launch = heat_launcher.HeatNativeLauncher(
parsed_args.heat_api_port,
parsed_args.fake_keystone_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
else:
heat_launch = heat_launcher.HeatDockerLauncher(
parsed_args.heat_api_port,
parsed_args.fake_keystone_port,
parsed_args.heat_container_image,
parsed_args.heat_user)
heat_pid = None
try:
# NOTE(dprince): we launch heat with fork exec because
# we don't want it to inherit our args. Launching heat
# as a "library" would be cool... but that would require
# more refactoring. It runs a single process and we kill
# it always below.
heat_pid = os.fork()
if heat_pid == 0:
os.setpgrp()
if parsed_args.heat_native:
try:
uid = pwd.getpwnam(parsed_args.heat_user).pw_uid
gid = pwd.getpwnam(parsed_args.heat_user).pw_gid
except KeyError:
raise exceptions.DeploymentError(
"Please create a %s user account before "
"proceeding." % parsed_args.heat_user)
os.setgid(gid)
os.setuid(uid)
heat_launch.heat_db_sync()
heat_launch.launch_heat()
else:
heat_launch.heat_db_sync()
heat_launch.launch_heat()
else:
self._wait_local_port_ready(parsed_args.fake_keystone_port)
self._wait_local_port_ready(parsed_args.heat_api_port)
if self._deploy_tripleo_heat_templates(parsed_args):
print("\nDeploy Successful.")
else:
print("\nUndercloud deployment failed: "
"press ctrl-c to exit.")
while parsed_args.keep_running:
try:
time.sleep(1)
except KeyboardInterrupt:
break
raise exceptions.DeploymentError("Stack create failed.")
finally:
if heat_launch:
print('Log files at: %s' % heat_launch.install_tmp)
heat_launch.kill_heat(heat_pid)
if keystone_pid:
os.kill(keystone_pid, signal.SIGKILL)
| 1.617188 | 2 |
midistegano.py | nonameable/steganography-with-midi | 2 | 12765578 | <gh_stars>1-10
from fileIO import FileIO
from hiding import Hider
from unraveling import Unraveler
import sys
from os import path
intention=sys.argv[1]
midi_file_name=sys.argv[2]
name = None
if intention=="-hide":
message_file_name = sys.argv[3]
if path.isfile(midi_file_name):
if path.isfile(message_file_name):
parts=midi_file_name.split(".")
extension=parts[-1]
if extension != "mid":
print "The file must have a .mid extension"
else:
fileIO = FileIO()
message = fileIO.get_text_from(message_file_name)
hider = Hider()
hider.hide(midi_file_name,message)
print "The output file name will be: " "secret_in_"+ midi_file_name
else:
print "You must put the message file in the same directory as midistegano.py"
else:
print "You must put the .mid file in the same directory as run.py"
elif intention=="-reveal":
if path.isfile(midi_file_name):
parts=midi_file_name.split(".")
name = parts[0];
extension=parts[-1]
if extension != "mid":
print "The file must have a .mid extension"
else:
unraveler = Unraveler()
hidden_message = unraveler.get_hidden_message(midi_file_name)
print "The hidden message is: %s" % hidden_message
fileIO = FileIO()
fileIO.print_to_file('hidden_message_in_' + name, hidden_message)
else:
print "The midi file you entered does not exist. Please enter a valid midi filename"
else:
error_message = intention + " is not a midistegano valid flag. Write either -hide or -reveal"
print error_message
| 3.265625 | 3 |
contest/urls.py | archimedeans/integration-bee | 0 | 12765579 | <filename>contest/urls.py
"""contest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path
# from django.views.generic import RedirectView
from . import settings
from .forms import BootstrapAuthenticationForm, BootstrapPasswordChangeForm
urlpatterns = [
path('admin/', admin.site.urls),
path('login/',
auth_views.LoginView.as_view(
authentication_form=BootstrapAuthenticationForm, redirect_authenticated_user=True),
name='login'),
path('logout/', auth_views.LogoutView.as_view(), name='logout'),
path('change-password/',
auth_views.PasswordChangeView.as_view(
form_class=BootstrapPasswordChangeForm),
name='password_change'),
path('change-password/done/',
auth_views.PasswordChangeDoneView.as_view(),
name='password_change_done'),
# path('accounts/', include('django.contrib.auth.urls')),
path('', include('round.urls')),
# path('round-two/', include('round.urls')),
# path('', RedirectView.as_view(url='round-two/', permanent=True)),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 2.71875 | 3 |
examples/pftest.py | MasterSpecter/ekmmeters | 9 | 12765580 | """ Simple example read getting 2 fields
(c) 2016 EKM Metering.
"""
from ekmmeters import *
my_port_name = "/dev/ttyO4"
my_meter_address = "000300001463"
ekm_set_log(ekm_no_log)
port = SerialPort(my_port_name)
if (port.initPort() == True):
my_meter = V4Meter(my_meter_address)
my_meter.attachPort(port)
else:
print( "Cannot open port")
exit()
for i in range(1000):
if my_meter.request():
my_read_buffer = my_meter.getReadBuffer()
# you can also traverse the buffer yourself,
#but this is the simplest way to get it all.
print( "*------")
print( my_meter.getField(Field.Power_Factor_Ln_1))
print( my_meter.getField(Field.Cos_Theta_Ln_1))
port.closePort()
| 2.78125 | 3 |
mojo/devtools/common/devtoolslib/shell.py | zbowling/mojo | 1 | 12765581 | <reponame>zbowling/mojo<filename>mojo/devtools/common/devtoolslib/shell.py
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Shell(object):
"""Represents an abstract Mojo shell."""
def serve_local_directories(self, mappings, port, reuse_servers=False):
"""Serves the content of the local (host) directories, making it available
to the shell under the url returned by the function.
The server will run on a separate thread until the program terminates. The
call returns immediately.
Args:
mappings: List of tuples (prefix, local_base_path_list) mapping URLs that
start with |prefix| to one or more local directories enumerated in
|local_base_path_list|. The prefixes should skip the leading slash.
The first matching prefix and the first location that contains the
requested file will be used each time.
port: port at which the server will be available to the shell. On Android
this can be different from the port on which the server runs on the
host.
reuse_servers: don't actually spawn the server. Instead assume that the
server is already running on |port|, and only set up forwarding if
needed.
Returns:
The url that the shell can use to access the server.
"""
raise NotImplementedError()
def forward_host_port_to_shell(self, host_port):
"""Forwards a port on the host machine to the same port wherever the shell
is running.
This is a no-op if the shell is running locally.
"""
raise NotImplementedError()
def run(self, arguments):
"""Runs the shell with given arguments until shell exits, passing the stdout
mingled with stderr produced by the shell onto the stdout.
Returns:
Exit code retured by the shell or None if the exit code cannot be
retrieved.
"""
raise NotImplementedError()
def run_and_get_output(self, arguments, timeout=None):
"""Runs the shell with given arguments until shell exits and returns the
output.
Args:
arguments: list of arguments for the shell
timeout: maximum running time in seconds, after which the shell will be
terminated
Returns:
A tuple of (return_code, output, did_time_out). |return_code| is the exit
code returned by the shell or None if the exit code cannot be retrieved.
|output| is the stdout mingled with the stderr produced by the shell.
|did_time_out| is True iff the shell was terminated because it exceeded
the |timeout| and False otherwise.
"""
raise NotImplementedError()
| 2.25 | 2 |
network.py | IBM/oct-glaucoma-global-visual-field | 0 | 12765582 | <reponame>IBM/oct-glaucoma-global-visual-field<filename>network.py
"""
Network architecture with loss function and optimizer
"""
from keras import backend as K
from keras.optimizers import SGD, Adam, RMSprop, Nadam
from keras.layers.normalization import BatchNormalization
from keras.models import Input, Model
from keras.layers import (Dense, Dropout, Activation, GlobalAveragePooling3D,
Conv3D, MaxPooling3D, Dropout, SpatialDropout3D)
from keras.layers import concatenate
from keras import regularizers
from nutsml import KerasNetwork
import network_decorrelate
import network_bayes
def create_optimizer(cfg):
lr = 1/pow(10, cfg['LR'])
optimizers = {
0: RMSprop(lr=lr),
1: Adam(lr=lr),
2: Nadam(lr=lr),
3: SGD(lr=lr, momentum=0.9, nesterov=True)
}
return optimizers[cfg['ALGO']]
def create_subnetwork(input, tag, cfg):
N_FILTER = cfg['N_FILTER']
N_CONV = cfg['N_CONV']
N_STRIDE = cfg['N_STRIDE']
REG = cfg['REG']
BN = cfg['BN']
DROPOUT = cfg['DROPOUT']
cam_i = len(N_FILTER) - 1
params = zip(N_FILTER, N_CONV, N_STRIDE)
for i, (n_filter, n_conv, n_stride) in enumerate(params):
if i == 0:
x = Conv3D(n_filter, n_conv, strides=n_stride,
kernel_regularizer=regularizers.l2(REG),
padding='same')(input)
else:
x = Conv3D(n_filter, n_conv, strides=n_stride,
kernel_regularizer=regularizers.l2(REG),
padding='same')(x)
if BN:
x = BatchNormalization(axis=-1)(x)
if i==cam_i:
name = tag + '_CAM'
else:
name = tag + '_layer' + str(i)
x = Activation('relu', name = name)(x)
if DROPOUT:
x = SpatialDropout3D(DROPOUT)(x)
x = GlobalAveragePooling3D(name=tag+'_GAP')(x)
return x
def create_solo_network(cfg):
C, H, W = cfg['C'], cfg['H'], cfg['W']
INPUTSHAPE = (H, W, C, 1)
ROI = cfg['ROI']
WEIGHTPATH = cfg['OUTSTEM']
ins = Input(shape = INPUTSHAPE)
net = create_subnetwork(ins, ROI, cfg)
c_decorr = cfg['DECORRELATION']
if c_decorr != 0.0:
net = network_decorrelate.CorrelationRegularization(c_decorr)(net)
out = Dense(1, name=ROI + 'CWGT')(net)
out = Activation('sigmoid')(out)
model = Model(inputs = ins, outputs = out)
optimizer = create_optimizer(cfg)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mse'])
return KerasNetwork(model, WEIGHTPATH)
########## MACONH
def create_maconh_network(cfg):
C, H, W = cfg['C'], cfg['H'], cfg['W']
INPUTSHAPE = (H, W, C, 1)
WEIGHTPATH = cfg['OUTSTEM']
N_OUT = 1
in_mac = Input(shape = INPUTSHAPE)
net_mac = create_subnetwork(in_mac, 'mac', cfg)
in_onh = Input(shape = INPUTSHAPE)
net_onh = create_subnetwork(in_onh, 'onh', cfg)
c_decorr = cfg['DECORRELATION']
if c_decorr == 0.0:
out_merged = concatenate([net_mac, net_onh], name='merged_GAP')
else:
out_merged = concatenate([net_mac, net_onh], name='merged_GAP')
out_merged = network_decorrelate.CorrelationRegularization(c_decorr)(out_merged)
out = Dense(N_OUT, name= 'merged' + 'CWGT')(out_merged)
out = Activation('sigmoid')(out)
model = Model(inputs = [in_mac, in_onh], outputs = out)
optimizer = create_optimizer(cfg)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mse'])
return KerasNetwork(model, WEIGHTPATH)
def create_maconh_3headed_network(cfg):
C, H, W = cfg['C'], cfg['H'], cfg['W']
INPUTSHAPE = (H, W, C, 1)
ROI = cfg['ROI']
WEIGHTPATH = cfg['OUTSTEM']
N_OUT = 1
in_mac = Input(shape = INPUTSHAPE)
net_mac = create_subnetwork(in_mac, 'mac', cfg)
in_onh = Input(shape = INPUTSHAPE)
net_onh = create_subnetwork(in_onh, 'onh', cfg)
c_decorr = cfg['DECORRELATION']
if c_decorr == 0.0:
out_merged = concatenate([net_mac, net_onh], name='merged_GAP')
else:
out_merged = concatenate([net_mac, net_onh], name='merged_GAP')
out_merged = network_decorrelate.CorrelationRegularization(c_decorr)(out_merged)
if cfg['FC']:
for i, n in enumerate(cfg['FC']):
out_merged = Dense(n, name='FC_merged_'+str(i), activation = 'tanh')(out_merged)
net_mac = Dense(n, name='FC_mac_'+str(i), activation = 'tanh')(net_mac)
net_onh = Dense(n, name='FC_onh_'+str(i), activation = 'tanh')(net_onh)
out_merged = Dense(N_OUT, name= 'merged' + 'CWGT')(out_merged)
out_merged = Activation('sigmoid')(out_merged)
out_mac = Dense(N_OUT, name='macCWGT')(net_mac)
out_mac = Activation('sigmoid')(out_mac)
out_onh = Dense(N_OUT, name='onhCWGT')(net_onh)
out_onh = Activation('sigmoid')(out_onh)
model = Model(inputs = [in_mac, in_onh], outputs = [out_mac, out_merged, out_onh])
optimizer = create_optimizer(cfg)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mse'])
return KerasNetwork(model, WEIGHTPATH)
############ bayes
def create_solo_bayes_network(cfg):
C, H, W = cfg['C'], cfg['H'], cfg['W']
INPUTSHAPE = (H, W, C, 1)
ROI = cfg['ROI']
WEIGHTPATH = cfg['OUTSTEM']
lam = cfg['ALEATROPIC_WEIGHT']
ins = Input(shape = INPUTSHAPE)
net = create_subnetwork(ins, ROI, cfg)
if cfg['FC']:
for i, n in enumerate(cfg['FC']):
net = Dense(n, name='FC_'+str(i), activation = 'tanh')(net)
out_mean = Dense(1, name='CWGT_mean')(net)
out_mean = Activation('sigmoid')(out_mean)
out_var = Dense(1,
name='CWGT_var',
activity_regularizer = network_bayes.reg_aleatropic(lam))(net)
out = concatenate([out_mean, out_var])
model = Model(inputs = ins, outputs = out)
optimizer = create_optimizer(cfg)
model.compile(loss = network_bayes.bnn_loss,
optimizer = optimizer,
metrics = [network_bayes.mse_metric, network_bayes.bnn_loss])
return KerasNetwork(model, WEIGHTPATH)
##############################################################
def create_network(cfg):
network_type = cfg['TYPE']
if network_type == 'solo':
network = create_solo_network(cfg)
elif network_type == 'maconh':
network = create_maconh_network(cfg)
elif network_type == 'maconh_3heads':
network = create_maconh_3headed_network(cfg)
elif network_type == 'solo_bayes':
network = create_solo_bayes_network(cfg)
else:
print("error:", network_type)
return network
| 2.296875 | 2 |
dmbrl/config/reacher.py | nikkik11/handful-of-trials | 358 | 12765583 | <reponame>nikkik11/handful-of-trials<gh_stars>100-1000
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
import tensorflow as tf
from dotmap import DotMap
import gym
from dmbrl.misc.DotmapUtils import get_required_argument
from dmbrl.modeling.layers import FC
import dmbrl.env
class ReacherConfigModule:
ENV_NAME = "MBRLReacher3D-v0"
TASK_HORIZON = 150
NTRAIN_ITERS = 100
NROLLOUTS_PER_ITER = 1
PLAN_HOR = 25
MODEL_IN, MODEL_OUT = 24, 17
GP_NINDUCING_POINTS = 200
def __init__(self):
self.ENV = gym.make(self.ENV_NAME)
self.ENV.reset()
cfg = tf.ConfigProto()
cfg.gpu_options.allow_growth = True
self.SESS = tf.Session(config=cfg)
self.NN_TRAIN_CFG = {"epochs": 5}
self.OPT_CFG = {
"Random": {
"popsize": 2000
},
"CEM": {
"popsize": 400,
"num_elites": 40,
"max_iters": 5,
"alpha": 0.1
}
}
self.UPDATE_FNS = [self.update_goal]
self.goal = tf.Variable(self.ENV.goal, dtype=tf.float32)
self.SESS.run(self.goal.initializer)
@staticmethod
def obs_postproc(obs, pred):
return obs + pred
@staticmethod
def targ_proc(obs, next_obs):
return next_obs - obs
def update_goal(self, sess=None):
if sess is not None:
self.goal.load(self.ENV.goal, sess)
def obs_cost_fn(self, obs):
if isinstance(obs, np.ndarray):
return np.sum(np.square(ReacherConfigModule.get_ee_pos(obs, are_tensors=False) - self.ENV.goal), axis=1)
else:
return tf.reduce_sum(tf.square(ReacherConfigModule.get_ee_pos(obs, are_tensors=True) - self.goal), axis=1)
@staticmethod
def ac_cost_fn(acs):
if isinstance(acs, np.ndarray):
return 0.01 * np.sum(np.square(acs), axis=1)
else:
return 0.01 * tf.reduce_sum(tf.square(acs), axis=1)
def nn_constructor(self, model_init_cfg):
model = get_required_argument(model_init_cfg, "model_class", "Must provide model class")(DotMap(
name="model", num_networks=get_required_argument(model_init_cfg, "num_nets", "Must provide ensemble size"),
sess=self.SESS, load_model=model_init_cfg.get("load_model", False),
model_dir=model_init_cfg.get("model_dir", None)
))
if not model_init_cfg.get("load_model", False):
model.add(FC(200, input_dim=self.MODEL_IN, activation="swish", weight_decay=0.00025))
model.add(FC(200, activation="swish", weight_decay=0.0005))
model.add(FC(200, activation="swish", weight_decay=0.0005))
model.add(FC(200, activation="swish", weight_decay=0.0005))
model.add(FC(self.MODEL_OUT, weight_decay=0.00075))
model.finalize(tf.train.AdamOptimizer, {"learning_rate": 0.00075})
return model
def gp_constructor(self, model_init_cfg):
model = get_required_argument(model_init_cfg, "model_class", "Must provide model class")(DotMap(
name="model",
kernel_class=get_required_argument(model_init_cfg, "kernel_class", "Must provide kernel class"),
kernel_args=model_init_cfg.get("kernel_args", {}),
num_inducing_points=get_required_argument(
model_init_cfg, "num_inducing_points", "Must provide number of inducing points."
),
sess=self.SESS
))
return model
@staticmethod
def get_ee_pos(states, are_tensors=False):
theta1, theta2, theta3, theta4, theta5, theta6, theta7 = \
states[:, :1], states[:, 1:2], states[:, 2:3], states[:, 3:4], states[:, 4:5], states[:, 5:6], states[:, 6:]
if are_tensors:
rot_axis = tf.concat([tf.cos(theta2) * tf.cos(theta1), tf.cos(theta2) * tf.sin(theta1), -tf.sin(theta2)],
axis=1)
rot_perp_axis = tf.concat([-tf.sin(theta1), tf.cos(theta1), tf.zeros(tf.shape(theta1))], axis=1)
cur_end = tf.concat([
0.1 * tf.cos(theta1) + 0.4 * tf.cos(theta1) * tf.cos(theta2),
0.1 * tf.sin(theta1) + 0.4 * tf.sin(theta1) * tf.cos(theta2) - 0.188,
-0.4 * tf.sin(theta2)
], axis=1)
for length, hinge, roll in [(0.321, theta4, theta3), (0.16828, theta6, theta5)]:
perp_all_axis = tf.cross(rot_axis, rot_perp_axis)
x = tf.cos(hinge) * rot_axis
y = tf.sin(hinge) * tf.sin(roll) * rot_perp_axis
z = -tf.sin(hinge) * tf.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = tf.cross(new_rot_axis, rot_axis)
new_rot_perp_axis = tf.where(tf.less(tf.norm(new_rot_perp_axis, axis=1), 1e-30),
rot_perp_axis, new_rot_perp_axis)
new_rot_perp_axis /= tf.norm(new_rot_perp_axis, axis=1, keepdims=True)
rot_axis, rot_perp_axis, cur_end = new_rot_axis, new_rot_perp_axis, cur_end + length * new_rot_axis
else:
rot_axis = np.concatenate([np.cos(theta2) * np.cos(theta1), np.cos(theta2) * np.sin(theta1), -np.sin(theta2)],
axis=1)
rot_perp_axis = np.concatenate([-np.sin(theta1), np.cos(theta1), np.zeros(theta1.shape)], axis=1)
cur_end = np.concatenate([
0.1 * np.cos(theta1) + 0.4 * np.cos(theta1) * np.cos(theta2),
0.1 * np.sin(theta1) + 0.4 * np.sin(theta1) * np.cos(theta2) - 0.188,
-0.4 * np.sin(theta2)
], axis=1)
for length, hinge, roll in [(0.321, theta4, theta3), (0.16828, theta6, theta5)]:
perp_all_axis = np.cross(rot_axis, rot_perp_axis)
x = np.cos(hinge) * rot_axis
y = np.sin(hinge) * np.sin(roll) * rot_perp_axis
z = -np.sin(hinge) * np.cos(roll) * perp_all_axis
new_rot_axis = x + y + z
new_rot_perp_axis = np.cross(new_rot_axis, rot_axis)
new_rot_perp_axis[np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30] = \
rot_perp_axis[np.linalg.norm(new_rot_perp_axis, axis=1) < 1e-30]
new_rot_perp_axis /= np.linalg.norm(new_rot_perp_axis, axis=1, keepdims=True)
rot_axis, rot_perp_axis, cur_end = new_rot_axis, new_rot_perp_axis, cur_end + length * new_rot_axis
return cur_end
CONFIG_MODULE = ReacherConfigModule
| 1.929688 | 2 |
tests/ut/test_core.py | tubone24/fileName2SerialNumber | 4 | 12765584 | <reponame>tubone24/fileName2SerialNumber
import os
import re
import logging
from unittest.mock import call
from unittest.mock import patch
import pytest
from ebook_homebrew.core import Common
from ebook_homebrew.exceptions import InvalidExtensionTypeError, \
InvalidPathTypeError, InvalidDigitsFormatError, ChangeFileNameOSError,\
InvalidImageParameterTypeError, TargetSrcFileNotFoundError
_logger = logging.getLogger(name=__name__)
class TestCommon(object):
def setup_method(self, method):
_logger.info("method{}".format(method.__name__))
self.target = Common()
@pytest.mark.parametrize("test_input, expected", [
("jpg", ".jpg"),
(".png", ".png"),
(".part1.rar", ".part1.rar"),
("part1.rar", ".part1.rar"),
(10, ".10"),
(0.42, ".0.42")])
def test_ok__convert_extension_with_dot(self, test_input, expected):
actual = self.target._convert_extension_with_dot(test_input)
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual == expected
@pytest.mark.parametrize("test_input", [["jpg"],
{},
None])
def test_error__convert_extension_with_dot(self, test_input):
with pytest.raises(InvalidExtensionTypeError):
self.target._convert_extension_with_dot(test_input)
@pytest.mark.parametrize("test_input, expected", [
("/usr/local/common/test.py", ("/usr/local/common", "test", ".py")),
("../tests/python3.6.txt", ("../tests", "python3.6", ".txt")),
("example.nim", ("", "example", ".nim"))])
def test_ok__split_dir_root_ext(self, test_input, expected):
actual = self.target._split_dir_root_ext(test_input)
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual == expected
def test_error__split_dir_root_ext(self):
test_input = 32.445
with pytest.raises(InvalidPathTypeError):
self.target._split_dir_root_ext(test_input)
@pytest.mark.parametrize("test_input, expected", [
(("test001.txt", 3), "001"),
(("hoge00234foo.py", "3,5"), "00234"),
(("barbar002345foo.py", "3,4"), "0023")])
def test_ok__check_serial_number(self, test_input, expected):
actual = self.target._check_serial_number(*test_input).group(0)
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual == expected
def test_unmatched__check_serial_number(self):
test_input = ("hogefoobar.js", 3)
actual = self.target._check_serial_number(*test_input)
assert actual is None
@pytest.mark.parametrize("test_input, expected", [
("3,4", 4),
("3", 3),
("3,5", 5)])
def test_ok__check_digit_format(self, test_input, expected):
actual = self.target._check_digit_format(test_input)
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual == expected
@pytest.mark.parametrize("test_input", [
4,
"3, 4",
3.5])
def test_error__check_digit_format(self, test_input):
with pytest.raises(InvalidDigitsFormatError):
self.target._check_digit_format(test_input)
@pytest.fixture()
def input_num(self):
return re.search("\\d{3}", "test001foo.jpg")
@pytest.fixture()
def input_regex_ext(self):
return re.compile(".jpg")
@pytest.mark.parametrize("test_filename, expected",[
("test001foo.jpg", False),
("test001.txt", True)])
def test_1_check_skip_file(self, test_filename, input_regex_ext, input_num, expected):
actual = self.target._check_skip_file(test_filename, input_regex_ext, input_num)
assert actual == expected
def test_2_check_skip_file(self, input_regex_ext):
test_input = "test001foo.jpg"
input_num = None
expected = True
actual = self.target._check_skip_file(test_input, input_regex_ext, input_num)
assert actual == expected
def test_ok__rename_file(self):
with patch("os.rename") as mock_rename:
actual = self.target._rename_file("foo", "bar")
mock_rename.assert_called_once_with("foo", "bar")
expected = True
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual is expected
def test_error__rename_file(self):
with patch("os.rename") as mock:
mock.side_effect = OSError
with pytest.raises(ChangeFileNameOSError):
self.target._rename_file("foo", "bar")
@pytest.mark.parametrize("input_param, expected", [
("Y", True),
("y", True),
("n", False),
("N", False),
("hoge", False)])
def test_ok__remove_file(self, input_param, expected):
with patch("os.remove") as mock_remove, patch("builtins.input") as mock_input:
mock_input.return_value = input_param
actual = self.target._remove_file("foo", assume_yes=False)
if expected is True:
mock_remove.assert_called_once_with("foo")
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual is expected
def test_ok__remove_file_with_assume_yes(self):
with patch("os.remove") as mock_remove:
actual = self.target._remove_file("foo", assume_yes=True)
mock_remove.assert_called_once_with("foo")
expected = True
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual is expected
@pytest.mark.parametrize("test_input, called, input_param, expected", [
(("foo", "/bar1/bar2"), ("foo", "/bar1"), "Y", True),
(("foo", "/bar1/bar2"), ("foo", "/bar1"), "y", True),
(("foo", "bar3"), ("foo", ""), "Y", True),
(("foo", "bar3"), ("foo", ""), "y", True),
(("foo", "bar1/bar2"), ("foo", "/bar1"), "n", False),
(("foo", "bar1/bar2"), ("foo", "/bar1"), "N", False),
(("foo", "bar1/bar2"), ("foo", "/bar1"), "hoge", False)])
def test_ok__move_file(self, test_input, called, input_param, expected):
with patch("shutil.move") as mock_move, patch("builtins.input") as mock_input:
mock_input.return_value = input_param
actual = self.target._move_file(*test_input, assume_yes=False)
if expected is True:
mock_move.assert_called_once_with(*called)
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual is expected
def test_ok__move_file_with_assume_yes(self):
with patch("shutil.move") as mock_move:
actual = self.target._move_file("foo", "/bar1/bar2", assume_yes=True)
mock_move.assert_called_once_with("foo", "/bar1")
expected = True
_logger.debug("\nactual: {actual}\nexpected: {expected}".format(actual=actual, expected=expected))
assert actual is expected
def test_ok_move_file(self):
test_input_file = "test.pdf"
test_input_dst = "tests"
test_input_assume_yes = True
with patch.object(self.target, "_move_file") as mock_move_file:
self.target.move_file(test_input_file, test_input_dst, test_input_assume_yes)
destination = os.path.join(test_input_dst, test_input_file)
mock_move_file.assert_called_once_with(file=test_input_file,
dst=destination,
assume_yes=test_input_assume_yes)
@pytest.mark.parametrize("test_input, calls, expected", [
(["foo", "bar"], [call("foo"), call("bar")], True),
(["foo", "bar", "hoge"], [call("foo"), call("bar"), call("hoge")], True),
(["foo"], [call("foo")], True),
([], [], False),
(None, [], False)])
def test_ok__remove_file_bulk(self, test_input, calls, expected):
with patch("os.remove") as mock_remove:
actual = self.target._remove_file_bulk(test_input)
if expected is True:
assert mock_remove.call_args_list == calls
assert actual is expected
@pytest.mark.parametrize("test_input, expected", [
("test.jpg", None),
("test.png", None),
("test.gif", None)])
def test_ok__check_image_file(self, test_input, expected):
with patch("PIL.Image.open") as mock_pil_open, patch("PIL.Image.open.show"):
actual = self.target._check_image_file(test_input)
mock_pil_open.assert_called_once_with(test_input)
assert actual == expected
def test_error__check_image_file(self):
test_input = "test.txt"
with patch("PIL.Image.open"), patch("PIL.Image.open.show"):
with pytest.raises(InvalidImageParameterTypeError):
self.target._check_image_file(test_input)
@pytest.mark.parametrize("test_dir, test_sort, expected", [
("test", False, ["aaa.txt", "test.txt", "aaa011.txt"]),
("test", True, ["aaa.txt", "aaa011.txt", "test.txt"])])
def test_ok_make_file_list(self, test_dir, test_sort, expected):
with patch("os.listdir", return_value=["aaa.txt", "test.txt", "aaa011.txt"]) as mock_list_dir:
actual = self.target._make_file_list(test_dir, test_sort)
mock_list_dir.assert_called_once_with(test_dir)
assert actual == expected
def test_error_make_file_list(self):
with patch("os.listdir") as mock_list_dir:
mock_list_dir.side_effect = FileNotFoundError
with pytest.raises(TargetSrcFileNotFoundError):
self.target._make_file_list("test")
| 2.34375 | 2 |
epm/modeling/ml_modeling.py | EPM-LearningAnalytics/EPM_Project | 2 | 12765585 | #!/usr/bin/env python
# coding: utf-8
"""
This module subsets the certain number of important features
and detects student behavior and grouping students
"""
# Load libraries
import pandas as pd
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.cluster import KMeans
def subset_important_features(data_list, num_of_features, option):
"""
Subset the certain number of statistically significant features
Parameters
----------
data_list: A list containing pandas dataframes
including sessions' and grades' data
num_of_features: The number of features that a user wants to subset
option: Different ways to subset the data_list
'common': Subset common significant features across all sessions
'different': Subset significant features from each session
Return
----------
A list containing subsetted pandas dataframes
"""
if not isinstance(num_of_features, int) is True:
raise ValueError("'num_of_features' should be an integer.")
if not isinstance(option, str) is True:
raise ValueError("'option' should be a string ('common' or 'different').")
if not isinstance(data_list, list) is True:
raise ValueError("'data_list' should be a list including panda dataframes.")
if not num_of_features >= 2:
raise ValueError("'num_of_features' should be greater than 2.")
else:
# Subset common significant features across all sessions
if option == 'different':
important_features = []
results_list = [0]*len(data_list)
for i, session in enumerate(data_list):
if i == 0:
continue
ivs = session.drop(columns=['ID', 'Y'])
outcome = session['Y']
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(ivs, outcome)
feat_importances = pd.Series(clf.feature_importances_, index=ivs.columns)
features = feat_importances.nlargest(num_of_features).index[0:num_of_features]
important_features.append(pd.DataFrame(features, columns=['session' + str(i+1)]))
important_features[i-1].loc[num_of_features] = ['Y']
important_features[i-1].loc[num_of_features+1] = ['ID']
for i, session in enumerate(data_list):
if i == 0:
continue
results_list[i] = session[important_features[i-1]['session'+str(i+1)]]
return results_list
# Subset significant features from each session
elif option == 'common':
num = 0
j = 10
results_list = [0]*len(data_list)
while num < num_of_features-1:
important_features = []
for i, session in enumerate(data_list):
if i == 0:
continue
ivs = session.drop(columns=['ID', 'Y'])
outcome = session['Y']
clf = ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(ivs, outcome)
feat_importances = pd.Series(clf.feature_importances_, index=ivs.columns)
features = feat_importances.nlargest(j).index[0:j]
important_features.append(features)
common_features = list(set.intersection(*map(set, important_features)))
num = len(common_features)
j += 1
if len(common_features) > num_of_features-1:
common_features = common_features[0:num_of_features-1]
for i, session in enumerate(data_list):
if i == 0:
continue
common_features.append('ID')
common_features.append('MID'+str(i+1))
results_list[i] = session[common_features]
del common_features[-2:]
if len(common_features) == num_of_features-1:
for i, session in enumerate(data_list):
if i == 0:
continue
common_features.append('ID')
common_features.append('MID'+str(i+1))
results_list[i] = session[common_features]
del common_features[-2:]
return results_list
def machine_learning_model(data_list, ml_model):
"""
Fit a machine learning model
Parameters
----------
data_list: A list containing pandas dataframes
including sessions' and grades' data
ml_model: A machine learning model that a user wants to fit
'KNN': K-nearest neighbors
'DT': Decision tree
'RF': Randome forest
'NB': Naive Bayes
'LR': Logistic regression
'SVC': Support vector classifier
Return
----------
A list containing pandas dataframes including features
and a fitted result from a machine learning model
"""
if not isinstance(data_list, list) is True:
raise ValueError("'data_list' should be a list including panda dataframes.")
if not isinstance(ml_model, str) is True:
raise ValueError("'ml_model' should be a string and \
one of machine learning models ('KNN', 'DT', 'RF', 'NB', 'LR', 'SVC').")
else:
for i, session in enumerate(data_list):
if i == 0:
continue
ivs = session.drop(columns=['Y'])
outcome = session['Y']
# Fit the K-nearest neighbors
if ml_model == 'KNN':
cknn = KNeighborsClassifier(n_neighbors=10, metric='minkowski',
p=2).fit(ivs, outcome)
predict = cknn.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the decision tree
elif ml_model == 'DT':
cdt = DecisionTreeClassifier(criterion='entropy').fit(ivs, outcome)
predict = cdt.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the random forest
elif ml_model == 'RF':
crf = RandomForestClassifier(n_estimators=10,
criterion='entropy').fit(ivs, outcome)
predict = crf.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the naive bayes
elif ml_model == 'NB':
cnb = GaussianNB().fit(ivs, outcome)
predict = cnb.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fit the logistic regression
elif ml_model == 'LR':
clr = LogisticRegression(solver='liblinear').fit(ivs, outcome)
predict = clr.predict(ivs)
session = session.assign(Predicted_Y=predict)
# Fir the support vector classfier
elif ml_model == 'SVC':
csvm = SVC(kernel='rbf', random_state=0).fit(ivs, outcome)
predict = csvm.predict(ivs)
session = session.assign(Predicted_Y=predict)
data_list[i] = session
return data_list
def kmean_clustering(data_list, num_of_sessions, num_of_clusters):
"""
Fit the k-means clustering
Parameters
----------
data_list: A list containing pandas dataframes
including sessions' and grades' data
num_of_sessions: The timing when a user wants to form a group
num_of_clusters: The number of clusters that a user wants to form
Return
----------
A list containing pandas dataframes including features
and results from the k-mean clustering
"""
if not isinstance(data_list, list) is True:
raise ValueError("'data_list' should be a list including panda dataframes.")
if not isinstance(num_of_sessions, int) is True:
raise ValueError("'num_of_sessions' should be an integer.")
if not isinstance(num_of_sessions, int) is True:
raise ValueError("'num_of_clusters' should be an integer.")
if not num_of_sessions >= 2:
raise ValueError("'num_of_sessions' should be greater than 2.")
else:
new_data_list = [0]*5
for i in range(0, num_of_sessions):
if i == 0:
continue
# k-mean clustering for the session 2
if i == 1:
new_data_list[i-1] = data_list[i]
kmeans = KMeans(n_clusters=num_of_clusters, init='k-means++',
max_iter=300, n_init=10)
kmeans.fit(new_data_list[i-1].loc[:, new_data_list[i-1].columns != 'ID'])
y_pred = kmeans.fit_predict(new_data_list[i-1].loc[:, new_data_list[i-1].columns != 'ID'])
new_data_list[i-1] = new_data_list[i-1].assign(group=y_pred)
# k-mean clustering for the session 3-6
if 2 <= i <= 5:
logs = data_list[i].columns[0:len(data_list[i].columns)-2]
new_data_list[i-1] = new_data_list[i-2].merge(data_list[i], how="outer", on=['ID'])
# Calculate current intermediate (mid) scores with previous scores
mid_cols = [col for col in new_data_list[i-1].columns if col.startswith('MID')]
new_data_list[i-1]['MID_Mean'] = new_data_list[i-1][mid_cols].mean(axis=1)
# Calculate current intermediate (mid) log feature(s) with previous log feature(s)
j = 0
all_log_cols = []
while j < len(logs):
log_cols = [col for col in new_data_list[i-1].columns
if col.startswith(logs[j])]
all_log_cols.append(logs[j]+'_Mean')
new_data_list[i-1][logs[j]+'_Mean'] = new_data_list[i-1][log_cols].mean(axis=1)
j += 1
cols_collection = all_log_cols+['ID', 'MID_Mean']
new_data_list[i-1] = new_data_list[i-1][cols_collection]
kmeans = KMeans(n_clusters=num_of_clusters, init='k-means++',
max_iter=300, n_init=10)
data_for_fitting = new_data_list[i-1].loc[:, new_data_list[i-1].columns != 'ID']
kmeans.fit(data_for_fitting)
y_pred = kmeans.fit_predict(data_for_fitting)
new_data_list[i-1] = new_data_list[i-1].assign(group=y_pred)
return new_data_list[num_of_sessions-2]
def main():
print('Done!')
if __name__ == '__main__':
main()
| 3.40625 | 3 |
sym2num/model.py | cea-ufmg/sym2num | 1 | 12765586 | """Symbolic model code generation.
Improvement ideas
-----------------
* Add compiled code to linecache so that tracebacks can be produced, like done
in the `IPython.core.compilerop` module.
"""
import abc
import collections
import collections.abc
import contextlib
import functools
import inspect
import itertools
import re
import types
import attrdict
import numpy as np
import jinja2
import sympy
from . import function, printing, utils, var
class Variables(var.SymbolObject):
"""Represents code generation model variables."""
pass
class Base:
"""Code generation model base."""
def __init__(self):
self.variables = Variables(self={})
"""Model variables dictionary."""
self.derivatives = {}
"""Dictionary of model derivatives, to optimize higher order diff."""
def __getattribute__(self, name):
"""Overloaded method to bind SymbolicSubsFunction objects."""
attr = super().__getattribute__(name)
if isinstance(attr, function.SymbolicSubsFunction) and attr.ismethod:
return functools.partial(attr, self)
else:
return attr
def _compute_derivative(self, fname, wrt):
assert isinstance(wrt, tuple)
if wrt == ():
return self.default_function_output(fname)
# See if the derivative is registered
dname = self.derivatives.get((fname,) + wrt)
if dname is not None:
return self.default_function_output(dname)
expr = self._compute_derivative(fname, wrt[1:])
wrt_array = self.variables[wrt[0]]
return utils.ndexpr_diff(expr, wrt_array)
def add_derivative(self, fname, wrt, dname):
if utils.isstr(wrt):
wrt = (wrt,)
elif not isinstance(wrt, tuple):
raise TypeError("argument wrt must be string or tuple")
args = self.function_codegen_arguments(fname, include_self=True)
expr = self._compute_derivative(fname, wrt)
deriv = function.SymbolicSubsFunction(args, expr)
setattr(self, dname, deriv)
self.derivatives[(fname,) + wrt] = dname
def set_default_members(self):
for key, val in self.variables['self'].items():
setattr(self, key, val)
@contextlib.contextmanager
def using_default_members(self):
"""Context manager that sets default attributes temporarily."""
set_members = {}
unset_members = []
# Get the values of the members before the entering the context
for k in self.variables['self'].keys():
try:
set_members[k] = getattr(self, k)
except AttributeError:
unset_members.append(k)
try:
# Set the members to their "default" values
self.set_default_members()
yield
finally:
# Restore previous values
for key, val in set_members.items():
setattr(self, key, val)
for key in unset_members:
delattr(self, key)
def function_codegen_arguments(self, fname, include_self=False):
f = getattr(self, fname)
param_names = inspect.signature(f).parameters.keys()
if include_self:
param_names = ['self', *param_names]
return function.Arguments((n,self.variables[n]) for n in param_names)
@utils.cached_method
def default_function_output(self, fname):
"""Function output for the default arguments."""
f = getattr(self, fname)
if isinstance(f, functools.partial):
if isinstance(f.func, function.SymbolicSubsFunction):
return f.func.default_output
args = self.function_codegen_arguments(fname)
with self.using_default_members():
return np.asarray(f(*args.values()))
def print_code(self, **options):
model_printer = ModelPrinter(self, **options)
return model_printer.print_class()
def compile_class(self, **options):
model_printer = ModelPrinter(self, **options)
return model_printer.class_obj()
def print_class(model, **options):
model_printer = ModelPrinter(model, **options)
return model_printer.print_class()
def compile_class(model, **options):
model_printer = ModelPrinter(model, **options)
return model_printer.class_obj()
model_template_src = '''\
# Model imports
import numpy as {{printer.numpy_alias}}
{% for import in m.imports -%}
import {{ import }}
{% endfor %}
class {{m.name}}({{ m.bases | join(', ') }}, metaclass={{m.metaclass}}):
"""Generated code for {{m.name}} from symbolic model."""
{% for method in m.methods %}
{{ method | indent }}
{% endfor %}
{% for name, value in m.assignments.items() -%}
{% if isndarray(value) -%}
{{ printer.print_ndarray(value, assign_to=name) }}
{% else -%}
{{ name }} = {{ value }}
{% endif -%}
{% endfor %}
'''
class ModelPrinter:
"""Generates numpy code for symbolic models."""
@utils.cached_class_property
def template(cls):
return jinja2.Template(model_template_src)
def __init__(self, model, **options):
self.model = model
"""The underlying symbolic model."""
self.options = options
"""Model printer options."""
try:
functions = options['functions']
except KeyError:
functions = getattr(model, 'generate_functions', [])
f_specs = []
for fname in functions:
output = self.model.default_function_output(fname)
arguments = self.model.function_codegen_arguments(fname, True)
f_specs.append((fname, output, arguments))
self._f_specs = f_specs
"""Function generation specifications."""
@property
def name(self):
"""Name of the generated class."""
return (getattr(self.model, 'generated_name', None)
or self.options.get('name', None)
or f'Generated{type(self.model).__name__}')
@property
def assignments(self):
"""Mapping of simple assignments to be made in the class code."""
try:
return self.options['assignments']
except KeyError:
return getattr(self.model, 'generate_assignments', {})
@property
def imports(self):
"""List of imports to include in the generated class code."""
try:
return self.options['imports']
except KeyError:
return getattr(self.model, 'generate_imports', [])
@property
def bases(self):
"""List of names of base classes for the generated model class."""
try:
return self.options['bases']
except KeyError:
return getattr(self.model, 'generated_bases', ['object'])
@property
def metaclass(self):
"""Metaclass for the generated model class."""
try:
return self.options['metaclass']
except KeyError:
return getattr(self.model, 'generated_metaclass', 'type')
@property
def methods(self):
for fname, output, arguments in self._f_specs:
fdef = function.print_function(fname, output, arguments)
yield fdef
def print_class(self):
isndarray = lambda var: isinstance(var, np.ndarray)
context = dict(m=self, printer=printing.Printer(), isndarray=isndarray)
return self.template.render(context)
def class_obj(self):
env = {}
exec(compile(self.print_class(), '<string>', 'exec'), env)
return env[self.name]
def collect_symbols(f):
sig = inspect.signature(f)
if len(sig.parameters) < 2:
raise ValueError(f"method {f.__name__} should have at least 2 "
"parameters, 'self' and the collected symbols")
params = list(sig.parameters.values())
collected_symbols_arg_name = params[-1].name
new_sig = sig.replace(parameters=params[:-1])
nargs_wrapped = len(params) - 1
@functools.wraps(f)
def wrapper(self, *args):
# Validate arguments
nargs_in = len(args) + 1
if nargs_in != nargs_wrapped:
raise TypeError(f"{f.__name__} takes {nargs_wrapped} arguments "
f"but got only {nargs_in}")
# Create substitution dictionary
subs = self.variables['self'].subs_map(self)
for param, value in zip(params[1:-1], args):
subs.update(self.variables[param.name].subs_map(value))
# Create collected symbols AttrDict
collected_symbols = attrdict.AttrDict()
for var, expr in subs.items():
collected_symbols[var.name] = expr
ret = f(self, *args, **{collected_symbols_arg_name: collected_symbols})
# Ensure function return is an ndarray
return np.asarray(ret, object)
wrapper.__signature__ = new_sig
return wrapper
| 2.34375 | 2 |
leetcode/path-sum-iii/path-sum-iii.py | EliahKagan/practice | 0 | 12765587 | <gh_stars>0
# LeetCode #437 - Path Sum III
# https://leetcode.com/problems/path-sum-iii/
# Prefix-sum hashing solution. Runs in linear time.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
@staticmethod
def pathSum(root: TreeNode, targetSum: int) -> int:
history = collections.Counter((0,))
def dfs(node, acc):
if not node:
return 0
acc += node.val
count = history[acc - targetSum]
history[acc] += 1
count += dfs(node.left, acc)
count += dfs(node.right, acc)
history[acc] -= 1
return count
return dfs(root, 0)
| 3.40625 | 3 |
memcards/tests/test_models.py | machine23/memmage | 0 | 12765588 | <reponame>machine23/memmage
# from ..models import Tag, Question, Answer
import pytest
from mixer.backend.django import mixer
pytestmark = pytest.mark.django_db
class TestTag:
def test_model(self):
obj = mixer.blend('memcards.Tag')
assert obj.pk == 1
def test_str(self):
tag = mixer.blend('memcards.Tag', name='tag_name')
assert str(tag) == 'tag_name'
class TestQuestion:
def test_model(self):
obj = mixer.blend('memcards.Question')
assert obj.pk == 1
def test_str(self):
test_string = 'test_question'
question = mixer.blend('memcards.Question', text=test_string)
assert str(question) == test_string
class TestAnswer:
def test_model(self):
obj = mixer.blend('memcards.Answer')
assert obj.pk == 1
def test_str(self):
test_string = 'test answer'
answer = mixer.blend('memcards.Answer', text=test_string)
assert str(answer) == test_string
| 2.4375 | 2 |
refinery/units/crypto/keyderive/__init__.py | larsborn/refinery | 0 | 12765589 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Implements key derivation routines. These are mostly meant to be used as
modifiers for multibin expressions that can be passed as key arguments to
modules in `refinery.units.crypto.cipher`.
"""
from ... import arg, Unit
from ....lib.argformats import number
try:
from Crypto.Hash import SHA as SHA1
except ImportError:
from Crypto.Hash import SHA1
from enum import Enum
from Crypto.Hash import MD2, MD4, MD5, SHA256, SHA512, SHA224, SHA384
__all__ = ['arg', 'HASH', 'KeyDerivation']
class HASH(Enum):
MD2 = MD2
MD4 = MD4
MD5 = MD5
SHA1 = SHA1
SHA256 = SHA256
SHA512 = SHA512
SHA224 = SHA224
SHA384 = SHA384
class KeyDerivation(Unit, abstract=True):
def __init__(
self,
size: arg(help='The number of bytes to generate.', type=number),
salt: arg(help='Salt for the derivation.'),
hash: arg.option(choices=HASH, metavar='hash',
help='Specify one of these algorithms (default is {default}): {choices}') = None,
iter: arg.number(metavar='iter', help='Number of iterations; default is {default}.') = None,
**kw
):
return super().__init__(salt=salt, size=size, iter=iter, hash=arg.as_option(hash, HASH), **kw)
@property
def hash(self): return self.args.hash.value
| 2.578125 | 3 |
utils/search_tours_data.py | aakanksha1/selenium-python-framework | 47 | 12765590 | class SearchToursData:
def __init__(self, destination, tour_type, start_year, start_month, start_day, adults_num):
self.destination = destination
self.tour_type = tour_type
self.start_year = start_year
self.start_month = start_month
self.start_day = start_day
self.adults_num = adults_num
| 2.59375 | 3 |
pset6/dna/dna.py | Oobl84/CS50-2020 | 0 | 12765591 | from sys import argv, exit
import csv
import re
if len(argv) != 3:
print("Usage: dna.py data.csv sequence.txt")
exit(1)
elif re.match(".*\.csv$", argv[1]) is None or re.match(".*\.txt$", argv[2]) is None:
print("Usage: dna.py data.csv sequence.txt")
exit(2)
else:
# opening csvfile
with open(argv[1], "r") as csvfile:
database = csv.DictReader(csvfile)
# creating dictionary to hold value counts
# getting columns from first row
seq_count = dict(next(database))
# remove name key
del seq_count['name']
# count number of keys
key_count = len(seq_count)
# set values = 0
for key in seq_count:
seq_count[key] = 0
# checking sequence for repeating patterns
with open(argv[2], "r") as txtfile:
sequence = csv.reader(txtfile)
# getting sequence length
for row in sequence:
line = row[0]
length = len(line)
# getting pattern to check
for key in seq_count:
pat_len = len(key)
flag = 0
repeat_count = 0
repeats = [0]
# checking loop for pattern
i = 0
while (length - i) >= pat_len:
# getting slice to compare
section = line[i: pat_len + i]
# actions on match
if section == key:
flag = 1
repeat_count += 1
i += pat_len
else:
# if matches found append to list
if flag == 1:
repeats.append(repeat_count)
repeat_count = 0
flag = 0
i += 1
# otherwise just check from the next sequence
else:
i += 1
# set repeats value in dictionary
seq_count[key] = max(repeats)
# rewind file and check values in database
csvfile.seek(0)
next(csvfile)
for row in database:
checker = dict(row)
check_count = 0
for key in seq_count:
if seq_count[key] == int(checker[key]):
check_count += 1
if check_count == key_count:
print(row['name'])
exit(0)
print("No match")
exit(0) | 3.375 | 3 |
bob/db/swan/config_protocol_grandtest0_voice_bio.py | bioidiap/bob.db.swan | 0 | 12765592 | database.protocol = 'grandtest0-voice-bio'
| 0.960938 | 1 |
kttool/actions/version.py | heiseish/kt | 6 | 12765593 | <gh_stars>1-10
from kttool.version import version
from kttool.base import Action
from kttool.logger import color_cyan, log
class Version(Action):
def _act(self) -> None:
log(f'Current version: {color_cyan(version)}')
| 1.929688 | 2 |
coderdojochi/migrations/0019_auto_20180815_1658.py | rgroves/weallcode-website | 15 | 12765594 | <gh_stars>10-100
# Generated by Django 2.0.6 on 2018-08-15 21:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coderdojochi', '0018_auto_20180606_0838'),
]
operations = [
migrations.AddField(
model_name='guardian',
name='birthday',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='guardian',
name='gender',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='guardian',
name='race_ethnicity',
field=models.ManyToManyField(to='coderdojochi.RaceEthnicity'),
),
migrations.AddField(
model_name='mentor',
name='birthday',
field=models.DateTimeField(null=True),
),
migrations.AddField(
model_name='mentor',
name='gender',
field=models.CharField(max_length=255, null=True),
),
migrations.AddField(
model_name='mentor',
name='race_ethnicity',
field=models.ManyToManyField(to='coderdojochi.RaceEthnicity'),
),
]
| 1.765625 | 2 |
solutions/1450-number-of-students-doing-homework-at-a-given-time.py | lk-hang/leetcode | 0 | 12765595 | """
Given two integer arrays startTime and endTime and given an integer queryTime.
The ith student started doing their homework at the time startTime[i] and finished it at time endTime[i].
Return the number of students doing their homework at time queryTime. More formally, return the number of students where queryTime lays in the interval [startTime[i], endTime[i]] inclusive.
"""
from typing import List
class Solution:
def busyStudent(self, startTime: List[int], endTime: List[int], queryTime: int) -> int:
return sum(s <= queryTime <=e for s, e in zip(startTime, endTime)) | 3.734375 | 4 |
Cron_Philip/Assignments/great_number_game/g_n_g.py | webguru001/Python-Django-Web | 5 | 12765596 | from flask import Flask, render_template, request, redirect, session
import random
app = Flask(__name__)
app.secret_key = 'ThisIsSecret'
@app.route('/')
def index():
if 'number' not in session:
session['format'] = 'hide'
session['number'] = random.randrange(0, 101)
print session['number']
return render_template("index.html")
@app.route('/process', methods=['POST'])
def process():
session['answer'] = int(request.form['my_guess'])
print session['answer']
print session['answer'] == session['number']
if session['answer'] == session['number']:
session['feedback'] = 'You win!'
session['format'] = 'correct boxStyle'
session['formatG'] = 'hide'
session['formatGuess'] = 'show'
return redirect('/')
elif session['answer'] < session['number']:
session['feedback'] = 'Too low!'
session['format'] = 'guess boxStyle'
session['formatG'] = 'show'
session['formatGuess'] = 'hide'
else:
session['feedback'] = 'Too high!'
session['format'] = 'guess boxStyle'
session['formatG'] = 'show'
session['formatGuess'] = 'hide'
return redirect('/')
@app.route('/reset', methods=['POST'])
def reset():
session.clear()
return redirect('/')
app.run(debug=True)
| 2.78125 | 3 |
stereo/preprocess/qc.py | nilsmechtel/stereopy | 61 | 12765597 | #!/usr/bin/env python3
# coding: utf-8
"""
@author: <NAME> <EMAIL>
@last modified by: <NAME>
@file:qc.py
@time:2021/03/26
"""
from scipy.sparse import issparse
import numpy as np
def cal_qc(data):
"""
calculate three qc index including the number of genes expressed in the count matrix, the total counts per cell
and the percentage of counts in mitochondrial genes.
:param data: the StereoExpData object.
:return: StereoExpData object storing quality control results.
"""
exp_matrix = data.exp_matrix
total_count = cal_total_counts(exp_matrix)
n_gene_by_count = cal_n_genes_by_counts(exp_matrix)
pct_counts_mt = cal_pct_counts_mt(data, exp_matrix, total_count)
data.cells.total_counts = total_count
data.cells.pct_counts_mt = pct_counts_mt
data.cells.n_genes_by_counts = n_gene_by_count
return data
def cal_total_counts(exp_matrix):
"""
calculate the total gene counts of per cell.
:param exp_matrix: the express matrix.
:return:
"""
total_count = np.array(exp_matrix.sum(1)).reshape(-1)
return total_count
def cal_per_gene_counts(exp_matrix):
"""
calculate the total counts of per gene.
:param exp_matrix: the express matrix.
:return:
"""
gene_counts = np.array(exp_matrix.sum(axis=0)).reshape(-1)
return gene_counts
def cal_n_cells_by_counts(exp_matrix):
"""
total counts of each gene.
:param exp_matrix: the express matrix.
:return:
"""
n_cells_by_counts = np.array(exp_matrix.sum(0)).reshape(-1)
return n_cells_by_counts
def cal_n_cells(exp_matrix):
"""
Number of cells that occur in each gene.
:param exp_matrix: the express matrix.
:return:
"""
n_cells = exp_matrix.getnnz(axis=0) if issparse(exp_matrix) else np.count_nonzero(exp_matrix, axis=0)
return n_cells
def cal_n_genes_by_counts(exp_matrix):
n_genes_by_counts = exp_matrix.getnnz(axis=1) if issparse(exp_matrix) else np.count_nonzero(exp_matrix, axis=1)
return n_genes_by_counts
def cal_pct_counts_mt(data, exp_matrix, total_count):
if total_count is None:
total_count = cal_total_counts(exp_matrix)
mt_index = np.char.startswith(np.char.lower(data.gene_names), prefix='mt-')
mt_count = np.array(exp_matrix[:, mt_index].sum(1)).reshape(-1)
pct_counts_mt = mt_count / total_count * 100
return pct_counts_mt
| 2.59375 | 3 |
e2e/aws/s3/__main__.py | bincyber/pitfall | 33 | 12765598 | <gh_stars>10-100
from datetime import datetime
import pulumi_aws as aws
import pulumi
cfg = pulumi.Config()
bucket_name = cfg.require("s3-bucket-name")
creation_date = datetime.utcnow().strftime('%Y/%m/%d')
tags = {
'Environment': cfg.require('environment'),
'BillingProject': cfg.require('billing-project'),
'CreatedBy': 'Pulumi',
'CreatedOn': creation_date,
'Owner': cfg.require('owner'),
'PulumiProject': pulumi.get_project(),
'PulumiStack': pulumi.get_stack(),
'Customer': cfg.require_secret('customer')
}
# Provision an AWS S3 Bucket
bucket = aws.s3.Bucket(resource_name=bucket_name, force_destroy=True, tags=tags)
# Export the name of the S3 bucket
pulumi.export('s3_bucket_name', bucket.id)
| 2.140625 | 2 |
djangocms_baseplugins/baseplugin/templatetags/baseplugin_tags.py | benzkji/djangocms-baseplugins | 2 | 12765599 | from django import template
from ..utils import sanitize_richtext
register = template.Library()
@register.filter
def baseplugin_pluginid(plugin_object):
return 'data-plugin-id="%s"' % plugin_object.pk
@register.filter
def baseplugin_sanitize_richtext(text):
return sanitize_richtext(text)
| 1.765625 | 2 |
bulk/send.py | Crazyokd/pt.csust_crawler | 4 | 12765600 | <reponame>Crazyokd/pt.csust_crawler
import csv
import os
from settings import SENDER_EMAIL, EMAIL_PASSWORD, DISPLAY_NAME
from smtplib import SMTP
import markdown
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
import time
def get_msg(csv_file_path, template):
with open(csv_file_path, 'r') as file:
headers = file.readline().split(',')
headers[len(headers) - 1] = headers[len(headers) - 1][:-1]
# i am opening the csv file two times above and below INTENTIONALLY, changing will cause error
with open(csv_file_path, 'r') as file:
data = csv.DictReader(file)
for row in data:
required_string = template
for header in headers:
value = row[header]
required_string = required_string.replace(f'${header}', value)
yield row['EMAIL'], required_string
def confirm_attachments():
file_contents = []
file_names = []
try:
for filename in os.listdir('./bulk/ATTACH'):
# entry = input(f"""TYPE IN 'Y' AND PRESS ENTER IF YOU CONFIRM T0 ATTACH {filename}
# TO SKIP PRESS ENTER: """)
entry = 'Y'
confirmed = True if entry.upper()[0] == 'Y' else False
if confirmed:
file_names.append(filename)
with open(f'{os.getcwd()}/bulk/ATTACH/{filename}', "rb") as f:
content = f.read()
file_contents.append(content)
return {'names': file_names, 'contents': file_contents}
except FileNotFoundError:
print('No ATTACH directory found...')
def send_emails(server: SMTP, template):
attachments = confirm_attachments()
sent_count = 0
for receiver, message in get_msg('./bulk/data.csv', template):
multipart_msg = MIMEMultipart("alternative")
multipart_msg["Subject"] = message.splitlines()[0]
multipart_msg["From"] = DISPLAY_NAME + f' <{SENDER_EMAIL}>'
multipart_msg["To"] = receiver
text = message
html = markdown.markdown(text)
part1 = MIMEText(text, "plain")
part2 = MIMEText(html, "html")
multipart_msg.attach(part1)
multipart_msg.attach(part2)
if attachments:
for content, name in zip(attachments['contents'], attachments['names']):
attach_part = MIMEBase('application', 'octet-stream')
attach_part.set_payload(content)
encoders.encode_base64(attach_part)
attach_part.add_header('Content-Disposition',
f"attachment; filename={name}")
multipart_msg.attach(attach_part)
try:
server.sendmail(SENDER_EMAIL, receiver,
multipart_msg.as_string())
except Exception as err:
print(f'Problem occurend while sending to {receiver} ')
print(err)
input("PRESS ENTER TO CONTINUE")
else:
sent_count += 1
print(f"Sent {sent_count} emails")
def send(template: str):
# host = "smtp.gmail.com"
host = "smtp.qq.com"
port = 587 # TLS replaced SSL in 1999
server = SMTP(host=host, port=port)
server.connect(host=host, port=port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(user=SENDER_EMAIL, password=<PASSWORD>)
send_emails(server, template)
server.quit()
# AAHNIK 2020
| 2.5 | 2 |
code/Datasets.py | asdqwe12011/Medical-Datasets-Classification | 0 | 12765601 | import pandas as pd
import numpy as np
def load_cancer():
# data, target, feature_names
result_dict = {'features': np.array(["Clump Thickness",
"Uniformity of Cell Size",
"Uniformity of Cell Shape",
"Marginal Adhesion",
"Single Epithelial Cell Size",
"Bare Nuclei",
"Bland Chromatin",
"Normal Nucleoli",
"Mitoses"])}
df_dict = pd.read_csv('breast_cancer_wisconsin.csv', header=0).to_dict('split')
df_data = np.array(df_dict['data'])
result_dict['data'] = df_data[:, :-1]
result_dict['target'] = df_data[:, -1]
return result_dict
def load_hepatitis():
result_dict = {'features': np.array(["AGE",
"SEX",
"STEROID",
"ANTIVIRAL",
"FATIGUE",
"MALAISE",
"ANOREXIA",
"LIVER BIG",
"LIVER FIRM",
"SPLEEN PALPABLE",
"SPIDERS",
"ASCITES",
"VARICES",
"BILIRUBIN",
"ALK PHOSPHATE",
"SGOT",
"ALBUMIN",
"PROTIME",
"HISTOLOGY"])
}
df_dict = pd.read_csv('hepatitis.csv', header=0).to_dict('split')
df_data = np.array(df_dict['data'])
result_dict['data'] = df_data[:, 1:]
result_dict['target'] = (df_data[:, 0]).astype(int)
return result_dict
| 2.953125 | 3 |
networks/drox/_solution/solver.py | afnom/what-the-ctf-2020 | 0 | 12765602 | import pyshark
cap = pyshark.FileCapture('drox.pcapng')
key = b'xord'
for packet in cap:
try:
data = bytes([int(x, 16) for x in packet.tcp.payload.split(":")])
r = range(max(len(key), len(data)))
print(''.join([chr((key[i%len(key)]) ^ (data[i])) for i in r]))
except Exception as e:
print(e)
| 2.21875 | 2 |
test/test_problem1.py | jimleroux/interview_problems | 0 | 12765603 | import pytest
from enphaseAI.problem1 import find_lines_from_points, find_lines_intersection
def test_find_lines_from_points() -> None:
p0 = 0., "string_input"
p1 = 1., 2.5
# Test for string input
args = [p0, p1]
pytest.raises(AssertionError, find_lines_from_points, *args)
# Test for same points
args = [p1, p1]
pytest.raises(AssertionError, find_lines_from_points, *args)
# Test for int inputs
p0 = 0, 1
p1 = 2, 4
args = [p0, p1]
pytest.raises(AssertionError, find_lines_from_points, *args)
p0 = 0., 0.
p1 = 1., 1.
a, b = find_lines_from_points(p0, p1)
assert a == 1., "Slope should be 1"
assert b == 0., "Intersect should be 0"
p0 = 0., 0.
p1 = 1., -1.
a, b = find_lines_from_points(p0, p1)
assert a == -1., "Slope should be 1"
assert b == 0., "Intersect should be 0"
def test_find_lines_intersection() -> None:
l0 = 0., "string_input"
l1 = 1., 2.5
# Test for string input
args = [l0, l1]
pytest.raises(AssertionError, find_lines_intersection, *args)
# Test for same lines
args = [l1, l1]
pytest.raises(AssertionError, find_lines_intersection, *args)
# Test for int inputs
l0 = 0, 1
l1 = 2, 4
args = [l0, l1]
pytest.raises(AssertionError, find_lines_intersection, *args)
l0 = 1., 0.
l1 = -1., 0.
args = [l0, l1]
x, y = find_lines_intersection(l0, l1)
assert x == 0. and y == 0., "The intersection should be the origin"
| 2.8125 | 3 |
attic/ngfrontman/wp_frontman/models.py | ludoo/wpkit | 0 | 12765604 | import os
import sys
import re
import datetime
import warnings
from urlparse import urlsplit, urlunsplit, SplitResult
from hashlib import md5
from threading import local
from types import ModuleType
from functools import partial
from copy import deepcopy
from warnings import warn
from django.conf import settings
from django.db import models, connection, connections, DatabaseError
from django.core.urlresolvers import set_urlconf
from django.db.utils import ConnectionDoesNotExist
from wp_frontman.lib.external.phpserialize import loads, phpobject
DB_PREFIX = getattr(settings, 'WPF_DB_PREFIX', 'wp_')
DB_CONNECTION = getattr(settings, 'WPF_DB_CONNECTION', 'default')
SITE_ID = getattr(settings, 'WPF_SITE_ID', 1)
#OPTIONS_PACKAGE = getattr(settings, 'WPF_OPTIONS_PACKAGE', 'wpf_blogs')
def strtobool(s):
"""Damn WP and its habit of using strings instead of ints for bools in MySQL"""
try:
i = int(s)
except (TypeError, ValueError):
return s
return bool(i)
php_unserialize = partial(loads, object_hook=phpobject)
class Site(object):
wp_frontman_template = dict(
db_version = None,
wp_root = None,
support_category_order = False,
use_sendfile = False,
builtin_post_types = {
'attachment': {
'_builtin': True, 'capability_type': 'post', 'description': '',
'exclude_from_search': False, 'has_archive': False, 'hierarchical': False,
'label': 'Media', 'map_meta_cap': True, 'name': 'attachment',
'public': True, 'publicly_queryable': True, 'rewrite': False,
'taxonomies': {}
},
'page': {
'_builtin': True, 'capability_type': 'page', 'description': '',
'exclude_from_search': False, 'has_archive': False, 'hierarchical': True,
'label': 'Pages', 'map_meta_cap': True, 'name': 'page',
'public': True, 'publicly_queryable': False, 'rewrite': False,
'taxonomies': {}
},
'post': {
'_builtin': True, 'capability_type': 'post', 'description': '',
'exclude_from_search': False, 'has_archive': False, 'hierarchical': False,
'label': 'Posts', 'map_meta_cap': True, 'name': 'post',
'public': True, 'publicly_queryable': True, 'rewrite': False,
'taxonomies': {}
}
},
builtin_taxonomies = {
'category': {
'_builtin': True, 'hierarchical': True, 'label': 'Categories',
'name': 'category', 'object_type': {0: 'post'},
'public': True, 'query_var': 'category_name',
'rewrite': {
'hierarchical': True, 'slug': 'category', 'with_front': True
},
'update_count_callback': '_update_post_term_count'
},
'post_format': {
'_builtin': True, 'hierarchical': False, 'label': 'Format',
'name': 'post_format', 'object_type': {0: 'post'},
'public': True, 'query_var': 'post_format',
'rewrite': {
'hierarchical': False, 'slug': 'type', 'with_front': True
},
'update_count_callback': ''
},
'post_tag': {
'_builtin': True, 'hierarchical': False, 'label': 'Post Tags',
'name': 'post_tag', 'object_type': {0: 'post'},
'public': True, 'query_var': 'tag',
'rewrite': {
'hierarchical': False, 'slug': 'tag', 'with_front': True
},
'update_count_callback': '_update_post_term_count'
}
},
wp_auth_key = None,
wp_auth_salt = None,
wp_secret_key = None,
wp_secret_salt = None,
wp_secure_auth_key = None,
wp_secure_auth_salt = None,
wp_logged_in_key = None,
wp_logged_in_salt = None,
wp_nonce_key = None,
wp_nonce_salt = None,
rewrite_vars = None,
rewrite_feeds = None,
)
meta_template = dict(
dm_hash=None, admin_email=None, admin_user_id=int,
registration=lambda v: False if v == 'none' else True,
registrationnotification=lambda v: True if v == 'yes' else False,
site_admins=php_unserialize, site_name=None, siteurl=None,
subdomain_install=strtobool, wp_frontman=php_unserialize,
auth_salt=str, cookiehash=None
)
site_id = None
db_prefix = None
using = None
key = None
mu = None
models_modname = None
_meta = None
_blog_data = None
_blog_path_map = None
_blog_domain_map = None
_blog_redirect_map = None
def __init__(self, site_id=None, db_prefix=None, using=None, mu=None):
self.site_id = site_id or SITE_ID
self.db_prefix = db_prefix or DB_PREFIX
self.using = using or DB_CONNECTION
self.key = '%s-%s-%s' % (self.using, self.db_prefix, self.site_id)
if mu is None:
# try settings
mu = getattr(settings, 'WPF_MULTIBLOG', None)
if mu is None:
# check the database for mu tables
cursor = self.get_cursor()
if cursor.execute("show tables like '%ssite'" % self.db_prefix):
mu = True
self.mu = mu or False
# sanity check
if self.mu and not bool(self.get_cursor().execute("show tables like '%ssitemeta'" % self.db_prefix)):
raise SystemError("Configured as multiblog ('WPF_MULTIBLOG' set to True in settings) but no site meta table found")
self.models_modname = '%s.site_%s' % (__name__, self.key)
def __repr__(self):
return '<Site id %s using %s db prefix %s>' % (self.site_id, self.using, self.db_prefix)
@property
def models(self):
if not self.models_modname in sys.modules:
from wp_frontman import wp_models
mod = ModuleType(self.models_modname)
manager_db = None if self.using is None or self.using == 'default' else self.using
# TODO: use a lock here so that each thread does not have to rebuild the module?
models = []
for c in wp_models.__site_models__:
model = c._wp_model_factory(self, mod, manager_db)
models.append(model.__name__)
setattr(mod, '__models__', models)
sys.modules[self.models_modname] = mod
return sys.modules[self.models_modname]
@property
def meta(self):
if self._meta is None:
meta = dict()
if self.mu:
cursor = self.get_cursor()
try:
cursor.execute("select meta_key, meta_value from %ssitemeta where site_id=%%s and left(meta_key, 1) != '_'" % self.db_prefix, (self.site_id,))
except DatabaseError:
return dict()
meta = dict()
rows = dict(cursor.fetchall())
for key, func in self.meta_template.items():
if key in rows:
try:
meta[key] = rows[key] if not callable(func) else func(rows[key])
except ValueError, e:
warn("Error in key %s for site %s: %s" % (key, self, e))
meta[key] = dict()
elif not callable(func):
meta[key] = func
else:
rows = Blog(1, self).options
for key, func in self.meta_template.items():
if key in rows:
meta[key] = rows[key]
elif not callable(func):
meta[key] = func
else:
meta[key] = None
meta['wp_frontman'] = rows.get('wp_frontman_site', dict())
t = urlsplit(meta['siteurl'])
meta['siteurl_tokens'] = t
if len(t.path) > 1 and not t.path.endswith('/'):
path = t.path + '/'
elif not t.path:
path = '/'
else:
path = t.path
meta['pingback_url'] = urlunsplit((t.scheme, t.netloc, t.path + 'xmlrpc.php', '', ''))
if not meta['cookiehash']:
meta['cookiehash'] = md5(meta['siteurl'])
wp_frontman = deepcopy(self.wp_frontman_template)
wp_frontman.update(meta.get('wp_frontman', dict()))
meta['wp_frontman'] = wp_frontman
self._meta = meta
return self._meta
@property
def blog_data(self):
if self._blog_data is None:
if not self.mu:
self._blog_data = {1:dict(blog_id=1, domain=None, secondary_domain=None, path=None, archived=0, lang_id=0)}
else:
cursor = self.get_cursor()
if cursor.execute("show tables like '%sdomain_mapping'" % self.db_prefix):
cursor.execute("""
select b.blog_id, if(dm.active=1, dm.domain, b.domain) as domain, if(dm.active=1, b.domain, NULL) as secondary_domain, b.path, b.archived, b.lang_id
from %sblogs b
left join %sdomain_mapping dm on dm.blog_id=b.blog_id
where site_id=%%s and deleted=0
""" % (self.db_prefix, self.db_prefix), (self.site_id,))
else:
cursor.execute("""
select blog_id, domain, NULL as secondary_domain, path, archived, lang_id
from %sblogs
where site_id=%%s and deleted=0
""" % self.db_prefix, (self.site_id,))
fields = [f[0] for f in cursor.description]
self._blog_data = dict((r[0], dict(zip(fields, [v or None for v in r]))) for r in cursor.fetchall())
return self._blog_data
@property
def blog_path_map(self):
if self._blog_path_map is None:
blogmap = dict()
for id, data in self.blog_data.items():
blogmap[(data['path'] or '').replace('/', '')] = id
self._blog_path_map = blogmap
return self._blog_path_map
@property
def blog_domain_map(self):
if self._blog_domain_map is None:
blogmap = dict()
for id, data in self.blog_data.items():
domain, secondary_domain = data['domain'], data['secondary_domain']
if domain:
blogmap[domain] = id
if secondary_domain:
blogmap[secondary_domain] = domain
self._blog_domain_map = blogmap
return self._blog_domain_map
def get_cursor(self):
if self.using and self.using != 'default':
try:
_connection = connections[self.using]
except ConnectionDoesNotExist, e:
raise ValueError("No connection named '%s' in Site object: %s" % (self.using, e))
else:
_connection = connection
return _connection.cursor()
class Blog(object):
wp_frontman_template = dict(
db_version = 1,
cache = dict(enabled=False),
preformatter = dict(enabled=False),
feedburner = dict(enabled=False),
images = dict(enabled=False),
analytics = dict(enabled=False),
custom_taxonomies = dict(),
custom_post_types = dict(),
)
options_template = dict(
admin_email=None, avatar_default=None, avatar_rating=None, blacklist_keys=None,
blog_charset=str, blog_public=strtobool, blogdescription=None, blogname=None, category_base=None,
category_children=php_unserialize, comment_max_links=int, comment_moderation=bool,
comment_order=None, comment_registration=strtobool, comment_whitelist=strtobool,
comments_notify=strtobool, comments_per_page=int, current_theme=None, date_format=None,
default_category=int, default_comment_status=None,
default_comments_page=dict(newest='last', oldest='first').get,
default_link_category=int, default_ping_status=None, default_pingback_flag=strtobool,
fileupload_url=None, home=None, html_type=None, language=None, links_recently_updated_time=int,
links_updated_date_format=None, mailserver_login=None, mailserver_pass=None,
mailserver_port=int, mailserver_url=None, moderation_notify=strtobool,
page_comments=strtobool, permalink_structure=None, ping_sites=None, post_count=None,
posts_per_page=int, posts_per_rss=int, require_name_email=strtobool,
rewrite_rules=php_unserialize, rss_language=None, rss_use_excerpt=bool,
show_avatars=strtobool, siteurl=None, start_of_week=int, tag_base=None, template=None,
thread_comments=strtobool, thread_comments_depth=int, time_format=None, timezone_string=None,
upload_path=None, upload_url_path=None, use_trackback=strtobool, WPLANG=None, wp_user_roles=php_unserialize,
wordpress_api_key=None, defensio_key=None, wp_frontman=php_unserialize, wp_frontman_site=php_unserialize,
)
wp_permalink_tokens = dict(
year='[0-9]{4}', monthnum='[0-9]{1,2}', day='[0-9]{1,2}', hour='[0-9]{1,2}',
minute='[0-9]{1,2}', second='[0-9]{1,2}', postname='[^/]+', post_id='[0-9]+',
category='.+?', tag='.+?', author='[^/]+', pagename='[^/]+?', search='.+'
)
wp_permalink_map = dict(
monthnum='month', postname='slug', post_id='id', search='q'
)
wp_permalink_re = re.compile(r'%([a-z_]+)%')
_blogs = dict()
_local = local()
site = Site()
blog_id = None
db_prefix = None
blog_key = None
models_modname = None
urlconf = 'urls'
_options = None
_post_types = None
_taxonomies = None
_cache = None
@classmethod
def factory(cls, blog_id, site=None, active=True):
site = site or cls.site
obj = cls(blog_id, site)
if active and site is cls.site:
set_urlconf(obj.urlconf)
cls._local.active_blog = obj
return obj
@classmethod
def get_active(cls):
return getattr(cls._local, 'active_blog', None)
@classmethod
def get_blogs(cls, site=None):
site = site or cls.site
for blog_id in site.blog_data:
yield(cls(blog_id))
def __new__(cls, *args, **kw):
if args:
blog_id = args[0]
elif 'blog_id' in kw:
blog_id = kw['blog_id']
else:
raise TypeError("No blog_id, cannot create Blog instance")
if len(args) == 2:
site = args[-1]
elif 'site' in kw:
site = kw['site']
else:
site = cls.site
site = site or cls.site
if blog_id not in site.blog_data:
raise ValueError("No blog with id '%s' in site '%s'" % (blog_id, site.site_id))
key = (site.key, blog_id)
if key in cls._blogs:
return cls._blogs[key]
obj = super(Blog, cls).__new__(cls)
cls._blogs[key] = obj
return obj
def __init__(self, blog_id, site=None):
if 'blog_id' in self.__dict__:
return
self.blog_id = blog_id
if site is not None:
self.site = site
self.domain = self.secondary_domain = self.path = self.archived = self.lang_id = None
for k, v in self.site.blog_data[blog_id].items():
setattr(self, k, v)
if self.path:
self.path = self.path if self.path[0] == '/' else '/' + self.path
self.path = self.path if self.path[-1] == '/' else self.path + '/'
self.db_prefix = DB_PREFIX if self.blog_id == 1 else '%s%s_' % (DB_PREFIX, self.blog_id)
self.blog_key = "%s_%s" % (self.site.key, self.blog_id)
self.models_modname = 'wp_frontman.models_blog_%s_%s' % (self.site.key, self.blog_id)
self.urlrules_modname = 'wp_frontman.blog_urls.urls_%s_%s' % (self.site.key, self.blog_id)
self._options = None
self._post_types = None
self._taxonomies = None
self._cache = dict()
@property
def cache(self):
return self._cache
@property
def post_types(self):
if self._post_types is None:
post_types = self.site.meta['wp_frontman']['builtin_post_types'].items()
if self.options['wp_frontman']['custom_post_types'] and isinstance(self.options['wp_frontman']['custom_post_types'], dict):
post_types += self.options['wp_frontman']['custom_post_types'].items()
self._post_types = dict(post_types)
return self._post_types
@property
def taxonomies(self):
if self._taxonomies is None:
taxonomies = self.site.meta['wp_frontman']['builtin_taxonomies'].items()
if self.options['wp_frontman']['custom_taxonomies'] and isinstance(self.options['wp_frontman']['custom_taxonomies'], dict):
taxonomies += self.options['wp_frontman']['custom_taxonomies'].items()
self._taxonomies = dict(taxonomies)
return self._taxonomies
@property
def options(self):
if self._options is None:
options = dict()
db_prefix = DB_PREFIX if self.blog_id == 1 else "%s%s_" % (DB_PREFIX, self.blog_id)
cursor = self.site.get_cursor()
if not cursor.execute("select option_name, option_value from %soptions where autoload='yes' order by option_name" % db_prefix):
raise SystemError("No options for blog %s" % self)
rows = dict(cursor.fetchall())
for key, func in self.options_template.items():
if key in rows:
try:
options[key] = rows[key] if not callable(func) else func(rows[key])
except ValueError, e:
warn("Error in key %s for blog %s: %s" % (key, self, e))
options[key] = dict()
del rows[key]
elif not callable(func):
options[key] = func
for k, v in rows.items():
if k in options:
continue
try:
v = int(v)
except (TypeError, ValueError):
try:
v = php_unserialize(v)
except ValueError:
pass
options[k] = v
permalink_structure = options['permalink_structure']
if not permalink_structure:
raise SystemError("No 'permalink_structure' option found in options for blog '%s'" % self)
if permalink_structure.startswith('/index.php'):
permalink_structure = permalink_structure[10:]
if permalink_structure.startswith('/'):
permalink_structure = permalink_structure[1:]
ps = permalink_structure
permalink_tokens = list()
scanner = self.wp_permalink_re.scanner(ps)
ps_tokens = list()
start = 0
while True:
m = scanner.search()
if not m:
break
ps_tokens.append(ps[start:m.start()]) #re.escape(ps[start:m.start()]))
permalink_tokens.append(self.wp_permalink_map.get(m.group(1), m.group(1)))
ps_tokens.append('(?P<%s>%s)' % (permalink_tokens[-1], self.wp_permalink_tokens[m.group(1)]))
start = m.end()
ps_tokens.append(ps[start:])
ps = ''.join(ps_tokens)
#for m in self.wp_permalink_re.findall(ps):
# permalink_tokens.append(self.wp_permalink_map.get(m, m))
# ps = ps.replace('%' + m + '%', '(?P<%s>%s)' % (permalink_tokens[-1], self.wp_permalink_tokens[m]))
if ps and ps[-1] == '/':
ps = ps[:-1]
options['permalink_structure_orig'] = options['permalink_structure']
options['permalink_structure'] = permalink_structure
options['permalink_tokens'] = permalink_tokens
options['permalink_ps'] = ps
home_tokens = urlsplit(options['home'])
if not home_tokens.path:
home_tokens = SplitResult(home_tokens.scheme, home_tokens.netloc, '/', home_tokens.query, home_tokens.fragment)
siteurl_tokens = urlsplit(options['siteurl'])
if not siteurl_tokens.path:
siteurl_tokens = SplitResult(siteurl_tokens.scheme, siteurl_tokens.netloc, '/', siteurl_tokens.query, siteurl_tokens.fragment)
if self.site.mu:
if home_tokens.netloc != self.domain:
home_tokens = SplitResult(home_tokens.scheme, self.domain, home_tokens.path, home_tokens.query, home_tokens.fragment)
options['siteurl_mapped'] = urlunsplit((siteurl_tokens.scheme, self.domain, siteurl_tokens.path, siteurl_tokens.query, siteurl_tokens.fragment))
if self.site.meta['pingback_url']:
if self.site.meta['subdomain_install']:
options['pingback_url'] = urlunsplit((t.scheme, self.domain, '/xmlrpc.php', '', ''))
else:
pingback_tokens = urlsplit(self.site.meta['pingback_url'])
options['pingback_url'] = urlunsplit((pingback_tokens.scheme, pingback_tokens.netloc, self.path + 'xmlrpc.php', '', ''))
path = self.site.meta['siteurl_tokens'].path
else:
if not options.get('pingback_url'):
if len(siteurl_tokens.path) > 1 and not siteurl_tokens.path.endswith('/'):
path = siteurl_tokens.path + '/'
elif not siteurl_tokens.path:
path = '/'
else:
path = siteurl_tokens.path
options['pingback_url'] = urlunsplit((siteurl_tokens.scheme, siteurl_tokens.netloc, siteurl_tokens.path + 'xmlrpc.php', '', ''))
options['siteurl_mapped'] = urlunsplit((siteurl_tokens.scheme, siteurl_tokens.netloc, siteurl_tokens.path, siteurl_tokens.query, siteurl_tokens.fragment))
path = siteurl_tokens.path
options['home'] = urlunsplit(home_tokens)
options['siteurl'] = urlunsplit(siteurl_tokens)
# the path to the wordpress files is set in the siteurl site meta value
if path == '/':
path = ''
options['admin_url'] = path + '/wp-admin/'
options['includes_url'] = path + '/wp-includes/'
options['themes_root_url'] = path + '/wp-content/themes/'
options['theme_url'] = options['themes_root_url'] + '%s/' % options['template']
options['media_url'] = path + ('/wp-content/blogs.dir/%s/' % self.blog_id)
options['upload_path'] = options['upload_path'] or 'wp-content/uploads'
if self.site.mu:
wp_root = self.site.meta.get('wp_frontman', dict()).get('wp_root')
else:
wp_root = options.get('wp_frontman_site', dict()).get('wp_root')
upload_path = options['upload_path'].replace('/', os.path.sep)
wp_root = wp_root.replace('/', os.path.sep)
if upload_path[0] != os.path.sep and wp_root and wp_root[0] == os.path.sep:
options['upload_abspath'] = wp_root + ('' if wp_root[-1] == os.path.sep else os.path.sep) + upload_path
else:
options['upload_abspath'] = os.path.abspath(upload_path)
if options.get('fileupload_url'):
options['fileupload_path'] = urlsplit(options['fileupload_url']).path
else:
options['fileupload_path'] = path + '/wp-content/uploads/'
if 'wp_user_roles' in options:
capabilities = dict()
for k, v in options['wp_user_roles'].items():
for cap, active in v['capabilities'].items():
if active:
capabilities.setdefault(cap, list()).append(k)
options['capabilities'] = capabilities
elif self.blog_id != 1:
options['capabilities'] = Blog(1, self.site).options['capabilities']
else:
raise SystemError("No capabilities found for default blog id 1.")
wp_frontman = deepcopy(self.wp_frontman_template)
wp_frontman.update(options.get('wp_frontman', dict()))
options['wp_frontman'] = wp_frontman
self._options = options
return self._options
@property
def models(self):
if not self.models_modname in sys.modules:
from wp_frontman import wp_models
mod = ModuleType(self.models_modname)
manager_db = None if self.site.using is None or self.site.using == 'default' else self.site.using
# TODO: use a lock here so that each thread does not have to rebuild the module?
for m in self.site.models.__models__:
setattr(mod, m, getattr(self.site.models, m))
for c in wp_models.__blog_models__:
c._wp_model_factory(self, mod, manager_db)
sys.modules[self.models_modname] = mod
return sys.modules[self.models_modname]
@property
def urlconf(self):
if not self.urlrules_modname in sys.modules:
try:
__import__(self.urlrules_modname)
except ImportError:
# check if we have to append or prepend the project-wide urlconf
if self.options['wp_frontman']['urlconf'] in ('append', 'prepend'):
root_urlpatterns = None
if not settings.ROOT_URLCONF in sys.modules:
try:
root_urls = __import__(settings.ROOT_URLCONF)
except ImportError, e:
warnings.warn("Cannot import root urlconf %s" % settings.ROOT_URLCONF)
else:
root_urlpatterns = sys.modules[settings.ROOT_URLCONF].urlpatterns
from django.conf.urls.defaults import patterns
mod = ModuleType(self.urlrules_modname)
urlpatterns = patterns('', *self.urlpatterns())
if self.options['wp_frontman']['urlconf'] == 'prepend' and root_urlpatterns:
urlpatterns = root_urlpatterns + urlpatterns
elif self.options['wp_frontman']['urlconf'] == 'append' and root_urlpatterns:
urlpatterns += root_urlpatterns
mod.urlpatterns = urlpatterns
sys.modules[self.urlrules_modname] = mod
return self.urlrules_modname
def urlpatterns(self):
path = urlsplit(self.options['home']).path
if not path.endswith('/'):
path += '/'
if path == '/':
path = ''
if path and path.startswith('/'):
path = path[1:]
rewrite_vars = dict((i[0], i[1]) for i in self.site.meta['wp_frontman']['rewrite_vars'].values())
#if 'rewrite' in self.options['wp_frontman']:
rewrite_prefixes = dict(self.options['wp_frontman']['rewrite'])
#else:
# rewrite_prefixes = dict(self.site.meta['wp_frontman']['rewrite'])
pattern_list = []
page_fragment = rewrite_prefixes['pagination_base'] + '/(?P<page>[0-9]+)/'
comment_page_fragment = 'comment-page-(?P<page>[0-9]+)/'
# home
pattern_list.append((r'^%s$' % path, 'wp_frontman.views.index', dict(), 'wpf_index'))
pattern_list.append((r'^%s%s$' % (path, page_fragment), 'wp_frontman.views.index', dict(), 'wpf_index'))
# favicon and robots
pattern_list.append((r'^favicon.ico$', 'wp_frontman.views.favicon', dict(), 'wpf_favicon'))
pattern_list.append((r'^robots.txt$', 'wp_frontman.views.robots', dict(), 'wpf_robots'))
# feed
pattern_list.append((r'^%sfeed/$' % path, 'wp_frontman.views.feed', dict(), 'wpf_feed'))
pattern_list.append((r'^%scomments/feed/$' % path, 'wp_frontman.views.feed_comments', dict(), 'wpf_feed_comments'))
feeds = (
'wp-atom.php|wp-rdf.php|wp-rss.php|wp-rss2.php|wp-feed.php|wp-commentsrss2.php',
'feed|rdf|rss|rss2|atom',
)
for f in feeds:
pattern_list.append((r'^%s(?:feed/)?(?:%s)%s$' % (path, f, '' if '.php' in f else '/'), 'wp_frontman.views.feed'))
# files
if self.site.mu:
pattern_list.append((r'^%sfiles/(?P<filepath>.*?)$' % path, 'wp_frontman.views.media', dict(), 'wpf_media'))
taxonomy_single = r'^%s%%s/(?P<slug>[^/]+)' % path
taxonomy_hierarchical = r'^%s%%s/(?P<hierarchy>(?:[^/]+/)+)(?P<slug>[^/]+)' % path
# category
base = rewrite_prefixes['category_base'] or 'category'
pattern_list.append(((taxonomy_single % base) + '/' + page_fragment + '$', 'wp_frontman.views.taxonomy', dict(taxonomy='category'), 'wpf_category'))
pattern_list.append(((taxonomy_single % base) + '/$', 'wp_frontman.views.taxonomy', dict(taxonomy='category'), 'wpf_category'))
pattern_list.append(((taxonomy_hierarchical % base) + '/' + page_fragment + '$', 'wp_frontman.views.taxonomy', dict(taxonomy='category'), 'wpf_category'))
pattern_list.append(((taxonomy_hierarchical % base) + '/$', 'wp_frontman.views.taxonomy', dict(taxonomy='category'), 'wpf_category'))
# tag
base = rewrite_prefixes['tag_base'] or 'tag'
pattern_list.append(((taxonomy_single % base) + '/$', 'wp_frontman.views.taxonomy', dict(taxonomy='tag'), 'wpf_tag'))
pattern_list.append(((taxonomy_single % base) + '/' + page_fragment + '$', 'wp_frontman.views.taxonomy', dict(taxonomy='tag'), 'wpf_tag'))
# custom taxonomies
for k, v in self.options['wp_frontman']['custom_taxonomies'].items():
pattern_list.append(((taxonomy_single % v['rewrite_slug']) + '/$', 'wp_frontman.views.taxonomy', dict(taxonomy=k), 'wpf_' + k))
pattern_list.append(((taxonomy_single % v['rewrite_slug']) + '/' + page_fragment + '$', 'wp_frontman.views.taxonomy', dict(taxonomy=k), 'wpf_' + k))
if v['rewrite_hierarchical']:
pattern_list.append(((taxonomy_hierarchical % v['rewrite_slug']) + '/$', 'wp_frontman.views.taxonomy', dict(taxonomy=k), 'wpf_' + k))
pattern_list.append(((taxonomy_hierarchical % v['rewrite_slug']) + '/' + page_fragment + '$', 'wp_frontman.views.taxonomy', dict(taxonomy=k), 'wpf_' + k))
# archives
pattern_list.append((r'^%s(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$' % path, 'wp_frontman.views.archives', dict(), 'wpf_archive'))
pattern_list.append((r'^%s(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/%s$' % (path, page_fragment), 'wp_frontman.views.archives', dict(), 'wpf_archive'))
pattern_list.append((r'^%s(?P<year>[0-9]{4})/$' % path, 'wp_frontman.views.archives', dict(), 'wpf_archive'))
pattern_list.append((r'^%s(?P<year>[0-9]{4})/%s$' % (path, page_fragment), 'wp_frontman.views.archives', dict(), 'wpf_archive'))
# author
pattern_list.append(('^%s%s/(?P<slug>[^/]+)/$' % (path, rewrite_prefixes['author_base']), 'wp_frontman.views.author', dict(), 'wpf_author'))
pattern_list.append(('^%s%s/(?P<slug>[^/]+)/%s$' % (path, rewrite_prefixes['author_base'], page_fragment), 'wp_frontman.views.author', dict(), 'wpf_author'))
# search
pattern_list.append(('^%s%s/$' % (path, rewrite_prefixes['search_base']), 'wp_frontman.views.search', dict(), 'wpf_search'))
pattern_list.append(('^%s%s/(?P<q>.+)/$' % (path, rewrite_prefixes['search_base']), 'wp_frontman.views.search', dict(), 'wpf_search'))
pattern_list.append(('^%s%s/(?P<q>.+)/%s$' % (path, rewrite_prefixes['search_base'], page_fragment), 'wp_frontman.views.search', dict(), 'wpf_search'))
# post formats
pattern_list.append((r'^%stype/(?P<post_format>[^/]+)/$' % path, 'wp_frontman.views.taxonomy', dict(taxonomy='post_format'), 'wpf_post_format'))
pattern_list.append((r'^%stype/(?P<post_format>[^/]+)/%s$' % (path, page_fragment), 'wp_frontman.views.taxonomy', dict(taxonomy='post_format'), 'wpf_post_format'))
# posts
ps = self.options['permalink_ps']
pattern_list.append(('^%s%s/$' % (path, ps), 'wp_frontman.views.post', dict(), 'wpf_post'))
pattern_list.append(('^%s%s/%s$' % (path, ps, comment_page_fragment), 'wp_frontman.views.post', dict(), 'wpf_post'))
# attachments
pattern_list.append(('^%s%s/attachment/(?P<attachment_slug>[^/]+)/$' % (path, ps), 'wp_frontman.views.post', dict(), 'wpf_attachment'))
pattern_list.append(('^%s%s/attachment/(?P<attachment_slug>[^/]+)/%s$' % (path, ps, comment_page_fragment), 'wp_frontman.views.post', dict(), 'wpf_attachment'))
return pattern_list
@classmethod
def find_blog_id(cls, domain=None, path=None):
if not cls.site.mu:
return 1
if not cls.site.meta['subdomain_install']:
if path is None:
return
try:
path = [p for p in path.split('/') if p][0]
except IndexError:
path = ''
#file('/tmp/wpf.log', 'a+').write("--- blog_path_map %s\n" % cls.site.blog_path_map)
blog_id = cls.site.blog_path_map.get(path) or 1
#file('/tmp/wpf.log', 'a+').write("--- path %s blog_id %s\n" % (path, blog_id))
elif domain is None:
return
else:
blog_id = cls.site.blog_domain_map.get(domain)
if not blog_id:
return
if isinstance(blog_id, basestring):
raise ValueError(basestring)
return blog_id
def __repr__(self):
return '<Blog id %s site id %s %s db prefix %s>' % (self.blog_id, self.site.site_id, self.site.using, self.site.db_prefix)
class Job(models.Model):
blog_id = models.IntegerField(db_index=True)
process = models.CharField(max_length=48)
tstamp = models.DateTimeField(blank=True, default=datetime.datetime.now)
error = models.BooleanField(blank=True, default=False)
object_id = models.IntegerField(blank=True, null=True)
message = models.CharField(blank=True, null=True, max_length=255)
class Meta:
db_table = '%swpf_job' % DB_PREFIX
unique_together = ('blog_id', 'process')
| 1.875 | 2 |
src/datalayer/tests/test_allelefilter.py | Dabble-of-DevOps-Bio/ella | 0 | 12765605 | """
Integration/unit test for the AlleleFilter module.
Since it consists mostly of database queries, it's tested on a live database.
"""
import pytest
from datalayer import AlleleFilter
from vardb.datamodel import sample, jsonschema
FILTER_CONFIG_NUM = 0
def insert_filter_config(session, filter_config):
global FILTER_CONFIG_NUM
FILTER_CONFIG_NUM += 1
# Add dummy schema that allows for any object
jsonschema.JSONSchema.get_or_create(
session, **{"name": "filterconfig", "version": 10000, "schema": {"type": "object"}}
)
# Allelefilter expects the following to be defined. Set them if not.
for f in filter_config["filters"]:
f.setdefault("config", {})
f.setdefault("exceptions", [])
for e in f["exceptions"]:
e.setdefault("config", {})
fc = sample.FilterConfig(name="Test {}".format(FILTER_CONFIG_NUM), filterconfig=filter_config)
session.add(fc)
session.commit()
return fc.id
def create_filter_mock(to_remove):
def filter_mock(key_allele_ids):
result = dict()
for gp_key, allele_ids in key_allele_ids.items():
result[gp_key] = set(allele_ids) & set(to_remove)
return result
return filter_mock
@pytest.fixture
def allele_filter(session):
af = AlleleFilter(session, config={})
# Mock the built-in filters
def filter_one(key_allele_ids, filter_config):
return create_filter_mock([1])(key_allele_ids)
def filter_one_two(key_allele_ids, filter_config):
return create_filter_mock([1, 2])(key_allele_ids)
def filter_three_four(key_allele_ids, filter_config):
return create_filter_mock([3, 4])(key_allele_ids)
def filter_five_six(key_allele_ids, filter_config):
return create_filter_mock([5, 6])(key_allele_ids)
def filter_none(key_allele_ids, filter_config):
return create_filter_mock([])(key_allele_ids)
def filter_one_three_if_one(key_allele_ids, filter_config):
result = dict()
for gp_key, allele_ids in key_allele_ids.items():
if 1 in allele_ids:
result[gp_key] = set(allele_ids) & set([1, 3])
else:
result[gp_key] = set([])
return result
assert filter_one_three_if_one({1: [1, 3]}, None) == {1: set([1, 3])}
assert filter_one_three_if_one({1: [3]}, None) == {1: set([])}
af.filter_functions = {
"allele_one": ("allele", filter_one),
"allele_one_two": ("allele", filter_one_two),
"allele_duplicate_one_two": ("allele", filter_one_two),
"allele_three_four": ("allele", filter_three_four),
"allele_five_six": ("allele", filter_five_six),
"allele_filter_one_three_if_one": ("allele", filter_one_three_if_one),
"allele_none": ("allele", filter_none),
"analysis_one_two": ("analysis", filter_one_two),
"analysis_duplicate_one_two": ("analysis", filter_one_two),
"analysis_three_four": ("analysis", filter_three_four),
"analysis_five_six": ("analysis", filter_five_six),
"analysis_filter_one_three_if_one": ("analysis", filter_one_three_if_one),
"analysis_none": ("analysis", filter_none),
}
return af
class TestAlleleFilter(object):
@pytest.mark.aa(order=0)
def test_filter_alleles(self, session, allele_filter):
# ---------
# Test simple allele filter
filter_config = {"filters": [{"name": "allele_one_two"}], "filter_exceptions": []}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2], "key2": [1, 4]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {"allele_ids": [], "excluded_allele_ids": {"allele_one_two": [1, 2]}},
"key2": {"allele_ids": [4], "excluded_allele_ids": {"allele_one_two": [1]}},
}
# ---------
assert result == expected_result
# Test multiple allele filters
filter_config = {
"filters": [
{"name": "allele_one_two"},
{"name": "allele_duplicate_one_two"},
{"name": "allele_three_four"},
{"name": "allele_five_six"},
{"name": "allele_none"},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2, 3, 4, 5, 6, 7, 8, 9]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {
"allele_ids": [7, 8, 9],
"excluded_allele_ids": {
"allele_one_two": [1, 2],
"allele_duplicate_one_two": [],
"allele_three_four": [3, 4],
"allele_five_six": [5, 6],
"allele_none": [],
},
}
}
assert result == expected_result
# ---------
# Test exceptions
# Test allele exception on allele filter
filter_config = {
"filters": [{"name": "allele_one_two", "exceptions": [{"name": "allele_one"}]}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2, 3, 4]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {"allele_ids": [1, 3, 4], "excluded_allele_ids": {"allele_one_two": [2]}}
}
assert result == expected_result
# ---------
# Test that analysis exception on allele filter fails
filter_config = {
"filters": [{"name": "allele_one_two", "exceptions": [{"name": "analysis_one_two"}]}]
}
filter_config_id = insert_filter_config(session, filter_config)
with pytest.raises(AssertionError):
allele_filter.filter_alleles(filter_config_id, {})
# ---------
# Test that exceptions only apply to the filter specified to
filter_config = {
"filters": [
{"name": "allele_one_two", "exceptions": [{"name": "allele_three_four"}]},
{"name": "allele_three_four", "exceptions": [{"name": "allele_one_two"}]},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = {"key": [1, 2, 3, 4]}
result = allele_filter.filter_alleles(filter_config_id, testdata)
expected_result = {
"key": {
"allele_ids": [],
"excluded_allele_ids": {"allele_one_two": [1, 2], "allele_three_four": [3, 4]},
}
}
assert result == expected_result
@pytest.mark.aa(order=1)
def test_filter_analysis(self, session, allele_filter):
# ---------
# Test single analysis filter
filter_config = {"filters": [{"name": "analysis_one_two"}]}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {"analysis_one_two": [1, 2]},
}
assert result == expected_result
# ---------
# Test multiple analysis filters
filter_config = {
"filters": [
{"name": "analysis_one_two"},
{"name": "analysis_duplicate_one_two"},
{"name": "analysis_three_four"},
{"name": "analysis_five_six"},
{"name": "analysis_none"},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4, 5, 6, 7, 8, 9]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [7, 8, 9],
"excluded_allele_ids": {
"analysis_one_two": [1, 2],
"analysis_duplicate_one_two": [],
"analysis_three_four": [3, 4],
"analysis_five_six": [5, 6],
"analysis_none": [],
},
}
assert result == expected_result
# ---------
# Test combining analysis and allele filters
filter_config = {
"filters": [
# Overlapping allele and analysis filter
{"name": "allele_one_two"},
{"name": "analysis_one_two"},
{"name": "analysis_three_four"},
{"name": "allele_five_six"},
{"name": "analysis_none"},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4, 5, 6, 7, 8, 9]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [7, 8, 9],
"excluded_allele_ids": {
"allele_one_two": [1, 2],
"analysis_one_two": [],
"analysis_three_four": [3, 4],
"allele_five_six": [5, 6],
"analysis_none": [],
},
}
assert result == expected_result
# ---------
# Test allele exception on analysis filter
filter_config = {
"filters": [{"name": "analysis_one_two", "exceptions": [{"name": "allele_one"}]}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [1, 3, 4],
"excluded_allele_ids": {"analysis_one_two": [2]},
}
assert result == expected_result
# ---------
# Test analysis exception on analysis filter
filter_config = {
"filters": [
{"name": "analysis_one_two", "exceptions": [{"name": "analysis_one_two"}]},
{"name": "analysis_three_four", "exceptions": [{"name": "analysis_one_two"}]},
]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [1, 2],
"excluded_allele_ids": {"analysis_one_two": [], "analysis_three_four": [3, 4]},
}
assert result == expected_result
# ---------
filter_config = {"filters": [{"name": "analysis_one_two"}, {"name": "allele_one_two"}]}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {"analysis_one_two": [1, 2], "allele_one_two": []},
}
assert result == expected_result
# ---------
# Test filters working conditionally to make sure
# previously filtered are not sent to next
# Four cases
# analysis -> analysis
filter_config = {
"filters": [{"name": "analysis_one_two"}, {"name": "analysis_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {
"analysis_one_two": [1, 2],
"analysis_filter_one_three_if_one": [],
},
}
assert result == expected_result
# allele -> analysis
filter_config = {
"filters": [{"name": "allele_one_two"}, {"name": "analysis_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {
"allele_one_two": [1, 2],
"analysis_filter_one_three_if_one": [],
},
}
assert result == expected_result
# allele -> analysis
filter_config = {
"filters": [{"name": "analysis_one_two"}, {"name": "allele_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {
"analysis_one_two": [1, 2],
"allele_filter_one_three_if_one": [],
},
}
assert result == expected_result
# allele -> allele
filter_config = {
"filters": [{"name": "allele_one_two"}, {"name": "allele_filter_one_three_if_one"}]
}
filter_config_id = insert_filter_config(session, filter_config)
testdata = [1, 2, 3, 4]
result = allele_filter.filter_analysis(filter_config_id, 1, testdata)
expected_result = {
"allele_ids": [3, 4],
"excluded_allele_ids": {"allele_one_two": [1, 2], "allele_filter_one_three_if_one": []},
}
assert result == expected_result
| 2.59375 | 3 |
first_non_repeating_letter.py | rmayherr/python | 0 | 12765606 | def first_non_repeating_letter(string):
lowercase_string = string.lower()
result = []
for i in lowercase_string:
if (lowercase_string.count(i) == 1):
result.append(i)
if (len(result) == 0):
return ""
else:
if (string.find(result[0]) == -1 ):
return result[0].upper()
else:
return result[0]
def test_first_non_repeating_letter():
assert first_non_repeating_letter("stress") == 't'
assert first_non_repeating_letter("") == ''
assert first_non_repeating_letter("aabb") == ''
assert first_non_repeating_letter("sTreSS") == 'T'
| 3.984375 | 4 |
quoridor/selfmatch.py | nakanoi/QuoridorAlphaZero | 0 | 12765607 | <filename>quoridor/selfmatch.py<gh_stars>0
import numpy as np
import os
import time
import subprocess
from networks import Network
from montecarlo import MCTS
from copy import deepcopy
from board import Board
from logs import Log
import config
class SelfMatch:
'''
Attribute
----------
log : Log
Logging class.
'''
def __init__(self):
'''
Paramators
----------
None.
'''
self.log = Log()
def first_play_value(self, board):
'''
Parameters
----------
board : Board
Final Board.
Returns
-------
value : int
End value. Win=1, Draw=0, Lose=-1
'''
if not board.is_over():
ValueError('This Match Hasnt Over yet.')
if board.is_lose():
return 0 if board.is_first() else 1
else:
return .5
def _save_history(self, hist_input, hist_policy, hist_value, epochs):
'''
Parameters
----------
hist_input : np.ndarray
Match's board history.
hist_policy : np.ndarray
Match's policy history.
hist_value : np.ndarray
Match's value history.
Returns
-------
None.
'''
os.makedirs('histories_input', exist_ok=True)
os.makedirs('histories_policy', exist_ok=True)
os.makedirs('histories_value', exist_ok=True)
path = os.path.join('histories_input', '{}.npy'.format(epochs))
np.save(path, hist_input)
path = os.path.join('histories_policy', '{}.npy'.format(epochs))
np.save(path, hist_policy)
path = os.path.join('histories_value', '{}.npy'.format(epochs))
np.save(path, hist_value)
def match(self, net, algo, simulations, gamma):
'''
Parameters
----------
net : Neural Network
Neural network class defined in network.py.
his class has instance variable "model",
which is tensorflow.keras.models.Model
algo : Monte-Carlo Class
Monte-Carlo algorithm class.
simulations : int
Number you want to repeat for root noe evaluation.
gamma : int, float
Constance used on Bolzman distribution.
Returns
-------
hist_input : np.ndarray
Match's board history.
hist_policy : np.ndarray
Match's policy history.
hist_value : np.ndarray
Match's value history.
ret : int
Match Value.
'''
board = Board()
hist_input, hist_policy = None, None
while not board.is_over():
probs = algo.get_probs(net, board, simulations, gamma)
policy = np.zeros(config.OUTPUT_SHAPE)
for a, p in zip(board.takable_actions(), probs):
policy[a] = p
inp = board.reshape_input()
if hist_input is None:
hist_input = inp.copy()
else:
hist_input = np.vstack([hist_input, inp])
if hist_policy is None:
hist_policy = policy.copy()
else:
hist_policy = np.vstack([hist_policy, policy])
action = np.random.choice(board.takable_actions(), p=probs)
old_board = deepcopy(board)
board = old_board.next_board(action)
value = self.first_play_value(board)
ret = value
hist_value = np.zeros(hist_policy.shape[0])
for i in range(len(hist_value)):
hist_value[i] = value
value *= -1
return hist_input, hist_policy, hist_value, ret
def selfmatch(self, net, algo, matches, simulations, gamma, epoch):
'''
Parameters
----------
net : Neural Network
Neural network class defined in network.py.
This class has instance variable "model",
which is tensorflow.keras.models.Model
algo : Monte-Carlo Class
Monte-Carlo algorithm class.
matches : int
Match times.
simulations : int
Number you want to repeat for root noe evaluation.
gamma : int, float
Constance used on Bolzman distribution.
epoch : int
Train epoch.
Returns
-------
None.
'''
hist_inputs, hist_policies, hist_values, results = None, None, None, []
for i in range(matches):
print('Self Play Repeats: {}-{}/{}'.format(j, i, matches))
hist_input, hist_policy, hist_value, value = self.match(net, algo, simulations, gamma)
results.append(value)
if hist_inputs is None:
hist_inputs = hist_input.copy()
else:
hist_inputs = np.vstack([hist_inputs, hist_input])
if hist_policies is None:
hist_policies = hist_policy.copy()
else:
hist_policies = np.vstack([hist_policies, hist_policy])
if hist_values is None:
hist_values = hist_value.copy()
else:
hist_values = np.append(hist_values, hist_value)
self.log.log_result(results, epoches)
self._save_history(hist_inputs, hist_policies, hist_values, epoches * config.PARALLEL_MATCH + j)
def parallel_match(self, epoch):
'''
Parameters
----------
epoch : int
Train epoch.
Returns
-------
None.
'''
for i in range(config.SELFMATCH // config.PARALLEL_MATCH):
rs = []
for process in range(config.PARALLEL_MATCH):
sh = 'python async_match.py {} {} {}'.format('selfmatch', epoch, i * config.PARALLEL_MATCH + process)
r = subprocess.Popen(sh, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
rs.append(r)
for r in rs:
r.wait()
print('\rSELF MATCH {} / {}'.format(i * config.PARALLEL_MATCH + process + 1, config.SELFMATCH), end='')
print()
| 2.359375 | 2 |
data/migrations/0098_auto_20200728_1208.py | SIXMON/peps | 5 | 12765608 | # Generated by Django 3.0.7 on 2020-07-28 12:08
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('data', '0097_auto_20200724_1157'),
]
operations = [
migrations.RenameModel(
old_name='Culture',
new_name='SimulatorCulture',
),
]
| 1.648438 | 2 |
easy_rec/python/model/easy_rec_model.py | swaitw/EasyRec | 61 | 12765609 | # -*- encoding: utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
import logging
import re
from abc import abstractmethod
import six
import tensorflow as tf
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops.variables import PartitionedVariable
from easy_rec.python.compat import regularizers
from easy_rec.python.layers import input_layer
from easy_rec.python.utils import constant
from easy_rec.python.utils import estimator_utils
from easy_rec.python.utils import restore_filter
from easy_rec.python.utils.load_class import get_register_class_meta
if tf.__version__ >= '2.0':
tf = tf.compat.v1
_EASY_REC_MODEL_CLASS_MAP = {}
_meta_type = get_register_class_meta(
_EASY_REC_MODEL_CLASS_MAP, have_abstract_class=True)
class EasyRecModel(six.with_metaclass(_meta_type, object)):
def __init__(self,
model_config,
feature_configs,
features,
labels=None,
is_training=False):
self._base_model_config = model_config
self._model_config = model_config
self._is_training = is_training
self._feature_dict = features
self._emb_reg = regularizers.l2_regularizer(self.embedding_regularization)
self._l2_reg = regularizers.l2_regularizer(self.l2_regularization)
self._feature_configs = feature_configs
self.build_input_layer(model_config, feature_configs)
self._labels = labels
self._prediction_dict = {}
self._loss_dict = {}
# add sample weight from inputs
self._sample_weight = 1.0
if constant.SAMPLE_WEIGHT in features:
self._sample_weight = features[constant.SAMPLE_WEIGHT]
@property
def embedding_regularization(self):
return self._base_model_config.embedding_regularization
@property
def kd(self):
return self._base_model_config.kd
@property
def l2_regularization(self):
model_config = getattr(self._base_model_config,
self._base_model_config.WhichOneof('model'))
l2_regularization = 0.0
if hasattr(model_config, 'dense_regularization') and \
model_config.HasField('dense_regularization'):
# backward compatibility
tf.logging.warn(
'dense_regularization is deprecated, please use l2_regularization')
l2_regularization = model_config.dense_regularization
elif hasattr(model_config, 'l2_regularization'):
l2_regularization = model_config.l2_regularization
return l2_regularization
def build_input_layer(self, model_config, feature_configs):
self._input_layer = input_layer.InputLayer(
feature_configs,
model_config.feature_groups,
use_embedding_variable=model_config.use_embedding_variable,
embedding_regularizer=self._emb_reg,
kernel_regularizer=self._l2_reg,
variational_dropout_config=model_config.variational_dropout
if model_config.HasField('variational_dropout') else None,
is_training=False)
@abstractmethod
def build_predict_graph(self):
pass
@abstractmethod
def build_loss_graph(self):
pass
@abstractmethod
def build_metric_graph(self, eval_config):
pass
@abstractmethod
def get_outputs(self):
pass
def restore(self,
ckpt_path,
include_global_step=False,
ckpt_var_map_path='',
force_restore_shape_compatible=False):
"""Restore variables from ckpt_path.
steps:
1. list the variables in graph that need to be restored
2. inspect checkpoint and find the variables that could restore from checkpoint
substitute scope names in case necessary
3. call tf.train.init_from_checkpoint to restore the variables
Args:
ckpt_path: checkpoint path to restore from
include_global_step: whether to restore global_step variable
ckpt_var_map_path: variable map from graph variables to variables in a checkpoint
each line consists of: variable name in graph variable name in ckpt
force_restore_shape_compatible: if variable shape is incompatible, clip or pad
variables in checkpoint, and then restore
Returns:
IncompatibleShapeRestoreHook if force_shape_compatible else None
"""
name2var_map = self._get_restore_vars(ckpt_var_map_path)
logging.info('start to restore from %s' % ckpt_path)
if ckpt_path.endswith('/') or tf.gfile.IsDirectory(ckpt_path + '/'):
ckpt_path = estimator_utils.latest_checkpoint(ckpt_path)
print('ckpt_path is model_dir, will use the latest checkpoint: %s' %
ckpt_path)
ckpt_reader = tf.train.NewCheckpointReader(ckpt_path)
ckpt_var2shape_map = ckpt_reader.get_variable_to_shape_map()
if not include_global_step:
ckpt_var2shape_map.pop(tf.GraphKeys.GLOBAL_STEP, None)
vars_in_ckpt = {}
incompatible_shape_var_map = {}
fail_restore_vars = []
for variable_name, variable in sorted(name2var_map.items()):
if variable_name in ckpt_var2shape_map:
print('restore %s' % variable_name)
ckpt_var_shape = ckpt_var2shape_map[variable_name]
if type(variable) == list:
shape_arr = [x.get_shape() for x in variable]
var_shape = list(shape_arr[0])
for x in shape_arr[1:]:
var_shape[0] += x[0]
var_shape = tensor_shape.TensorShape(var_shape)
variable = PartitionedVariable(
variable_name,
var_shape,
variable[0].dtype,
variable,
partitions=[len(variable)] + [1] * (len(var_shape) - 1))
else:
var_shape = variable.shape.as_list()
if ckpt_var_shape == var_shape:
vars_in_ckpt[variable_name] = list(variable) if isinstance(
variable, PartitionedVariable) else variable
elif len(ckpt_var_shape) == len(var_shape):
if force_restore_shape_compatible:
# create a variable compatible with checkpoint to restore
dtype = variable[0].dtype if isinstance(variable,
list) else variable.dtype
with tf.variable_scope('incompatible_shape_restore'):
tmp_var = tf.get_variable(
name=variable_name + '_T_E_M_P',
shape=ckpt_var_shape,
trainable=False,
# add to a special collection for easy reference
# by tf.get_collection('T_E_M_P_RESTROE')
collections=['T_E_M_P_RESTROE'],
dtype=dtype)
vars_in_ckpt[variable_name] = tmp_var
incompatible_shape_var_map[variable] = tmp_var
print('incompatible restore %s[%s, %s]' %
(variable_name, str(var_shape), str(ckpt_var_shape)))
else:
logging.warning(
'Variable [%s] is available in checkpoint, but '
'incompatible shape with model variable.', variable_name)
else:
logging.warning(
'Variable [%s] is available in checkpoint, but '
'incompatible shape dims with model variable.', variable_name)
else:
fail_restore_vars.append(variable_name)
for variable_name in fail_restore_vars:
if 'Momentum' not in variable_name:
logging.warning('Variable [%s] is not available in checkpoint',
variable_name)
tf.train.init_from_checkpoint(ckpt_path, vars_in_ckpt)
if force_restore_shape_compatible:
return estimator_utils.IncompatibleShapeRestoreHook(
incompatible_shape_var_map)
else:
return None
def _get_restore_vars(self, ckpt_var_map_path):
"""Restore by specify variable map between graph variables and ckpt variables.
Args:
ckpt_var_map_path: variable map from graph variables to variables in a checkpoint
each line consists of: variable name in graph variable name in ckpt
Returns:
the list of variables which need to restore from checkpoint
"""
# here must use global_variables, because variables such as moving_mean
# and moving_variance is usually not trainable in detection models
all_vars = tf.global_variables()
PARTITION_PATTERN = '/part_[0-9]+'
VAR_SUFIX_PATTERN = ':[0-9]$'
name2var = {}
for one_var in all_vars:
var_name = re.sub(VAR_SUFIX_PATTERN, '', one_var.name)
if re.search(PARTITION_PATTERN,
var_name) and (not var_name.endswith('/AdamAsync_2') and
not var_name.endswith('/AdamAsync_3')):
var_name = re.sub(PARTITION_PATTERN, '', var_name)
is_part = True
else:
is_part = False
if var_name in name2var:
assert is_part, 'multiple vars: %s' % var_name
name2var[var_name].append(one_var)
else:
name2var[var_name] = [one_var] if is_part else one_var
if ckpt_var_map_path != '':
if not tf.gfile.Exists(ckpt_var_map_path):
logging.warning('%s not exist' % ckpt_var_map_path)
return name2var
# load var map
name_map = {}
with open(ckpt_var_map_path, 'r') as fin:
for one_line in fin:
one_line = one_line.strip()
line_tok = [x for x in one_line.split() if x != '']
if len(line_tok) != 2:
logging.warning('Failed to process: %s' % one_line)
continue
name_map[line_tok[0]] = line_tok[1]
var_map = {}
for var_name in name2var:
if var_name in name_map:
in_ckpt_name = name_map[var_name]
var_map[in_ckpt_name] = name2var[var_name]
else:
logging.warning('Failed to find in var_map_file(%s): %s' %
(ckpt_var_map_path, var_name))
return name2var
else:
var_filter, scope_update = self.get_restore_filter()
if var_filter is not None:
name2var = {
var_name: name2var[var_name]
for var in name2var
if var_filter.keep(var.name)
}
# drop scope prefix if necessary
if scope_update is not None:
name2var = {
scope_update(var_name): name2var[var_name] for var_name in name2var
}
return name2var
def get_restore_filter(self):
"""Get restore variable filter.
Return:
filter: type of Filter in restore_filter.py
scope_drop: type of ScopeDrop in restore_filter.py
"""
if len(self._base_model_config.restore_filters) == 0:
return None, None
for x in self._base_model_config.restore_filters:
logging.info('restore will filter out pattern %s' % x)
all_filters = [
restore_filter.KeywordFilter(x, True)
for x in self._base_model_config.restore_filters
]
return restore_filter.CombineFilter(all_filters,
restore_filter.Logical.AND), None
def get_grouped_vars(self):
"""Get grouped variables, each group will be optimized by a separate optimizer.
Return:
grouped_vars: list of list of variables
"""
raise NotImplementedError()
| 2.03125 | 2 |
dev/a_covidcases.py | exaxorg/pycon20-nyc-taxi-covid-movie | 2 | 12765610 | <reponame>exaxorg/pycon20-nyc-taxi-covid-movie
datasets = ('source',)
def synthesis(job):
d = {}
agg = 0
for dt, x in datasets.source.iterate('roundrobin', ('DATE_OF_INTEREST', 'CASE_COUNT')):
agg += x
d[dt] = agg
return d
| 2.296875 | 2 |
pyreach/mock/color_camera_mock.py | google-research/pyreach | 13 | 12765611 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock PyReach Color Camera."""
from typing import Callable, Optional
import numpy as np # type: ignore
from pyreach import calibration as cal
from pyreach import color_camera
from pyreach import core
from pyreach.mock import calibration_mock as cal_mock
class ColorFrameMock(color_camera.ColorFrame):
"""A single color camera frame taken at a specific time.
Attributes:
time: The time in seconds of the frame since 1970.
sequence: The sequence number of the color frame.
device_type: The JSON device type string.
device_name: The JSON device name string.
color_image: A color image as a (DX,DY,3) array of uint8's.
calibration: The calibration when the image is captured.
"""
def __init__(self, time: float, sequence: int,
device_type: str, device_name: str,
color_image: np.ndarray,
calibration: Optional[cal.Calibration]) -> None:
"""Initialize a MockColorFrame."""
self._time: float = time
self._sequence = sequence
self._device_type: str = device_type
self._device_name: str = device_name
self._color_image: np.ndarray = color_image
self._calibration: Optional[cal.Calibration] = calibration
@property
def time(self) -> float:
"""Return timestamp of the ColorFrame."""
return self._time
@property
def sequence(self) -> int:
"""Sequence number of the ColorFrame."""
return self._sequence
@property
def device_type(self) -> str:
"""Return the Reach device type."""
return self._device_type
@property
def device_name(self) -> str:
"""Return the Reach device name."""
return self._device_name
@property
def color_image(self) -> np.ndarray:
"""Return the color image as a (DX,DY,3)."""
return self._color_image
@property
def calibration(self) -> Optional[cal.Calibration]:
"""Return the Calibration for for the ColorFrame."""
return self._calibration
def pose(self) -> Optional[core.Pose]:
"""Return the pose of the camera when the image is taken."""
raise NotImplementedError
class ColorCameraMock(color_camera.ColorCamera):
"""Mock ColorCamera class."""
def __init__(self) -> None:
"""Init a MockColorCamera."""
pass
def add_update_callback(
self,
callback: Callable[[color_camera.ColorFrame], bool],
finished_callback: Optional[Callable[[],
None]] = None) -> Callable[[], None]:
"""Add a callback function to be invoked when a new frame is available.
Args:
callback: A function to be invoked when a new frame is available. Returns
False to continue receiving new images. Returns True to stop further
update.
finished_callback: Optional callback, called when the callback is stopped
or if the camera is closed.
Returns:
A function that when called stops the callbacks.
"""
raise NotImplementedError
def start_streaming(self, request_period: float = 0.1) -> None:
"""Start streaming of camera images.
Args:
request_period: The number of seconds between frames. Defaults to .1
second between frames.
"""
pass
def stop_streaming(self) -> None:
"""Stop streaming camera images."""
raise NotImplementedError
def supports_tagged_request(self) -> bool:
"""Return True if tagged requests are supported."""
raise NotImplementedError
def enable_tagged_request(self) -> None:
"""Enable tagged requests."""
raise NotImplementedError
def disable_tagged_request(self) -> None:
"""Disable tagged requests."""
raise NotImplementedError
def image(self) -> Optional[color_camera.ColorFrame]:
"""Return the latest image if it exists."""
color_frame_mock: ColorFrameMock = ColorFrameMock(
1.0, 0, "device_type", "device_name", np.zeros((3, 5, 3),
dtype=np.uint8),
cal_mock.CalibrationMock("device_type", "device_name",
"color_camera_link_name"))
color_frame: color_camera.ColorFrame = color_frame_mock
return color_frame
def fetch_image(self,
timeout: float = 15.0) -> Optional[color_camera.ColorFrame]:
"""Fetch a new image or possibly times out.
Args:
timeout: The optional amount of time to wait for a camera frame. If not
specified, 15 seconds is the default timeout.
Returns:
Returns the color image or None for a timeout.
"""
raise NotImplementedError
def async_fetch_image(self,
callback: Optional[Callable[[color_camera.ColorFrame],
None]] = None,
error_callback: Optional[Callable[[core.PyReachStatus],
None]] = None,
timeout: float = 30) -> None:
"""Fetch a new image asynchronously.
The callback function will be invoked when new image is available.
Args:
callback: A callback function that is called when an image arrives. If the
camera fails to load an image, the callback is not called.
error_callback: Optional callback that is called if there is an error.
timeout: Timeout for the fetch, defaults to 30 seconds.
"""
raise NotImplementedError
@property
def pose(self) -> Optional[core.Pose]:
"""Return the latest pose of the camera."""
raise NotImplementedError
| 2.359375 | 2 |
tests/tools/test_aux_methods_sanity_checks.py | nipy/nilabels | 15 | 12765612 | <filename>tests/tools/test_aux_methods_sanity_checks.py
from os.path import join as jph
from numpy.testing import assert_raises
from nilabels.definitions import root_dir
from nilabels.tools.aux_methods.sanity_checks import check_pfi_io, check_path_validity, is_valid_permutation
from tests.tools.decorators_tools import create_and_erase_temporary_folder_with_a_dummy_nifti_image, pfo_tmp_test
# TEST: methods sanity_checks
def test_check_pfi_io():
assert check_pfi_io(root_dir, None)
assert check_pfi_io(root_dir, root_dir)
non_existing_file = jph(root_dir, 'non_existing_file.txt')
file_in_non_existing_folder = jph(root_dir, 'non_existing_folder/non_existing_file.txt')
with assert_raises(IOError):
check_pfi_io(non_existing_file, None)
with assert_raises(IOError):
check_pfi_io(root_dir, file_in_non_existing_folder)
def test_check_path_validity_not_existing_path():
with assert_raises(IOError):
check_path_validity('/Spammer/path_to_spam')
@create_and_erase_temporary_folder_with_a_dummy_nifti_image
def test_check_path_validity_for_a_nifti_image():
assert check_path_validity(jph(pfo_tmp_test, 'dummy_image.nii.gz'))
def test_check_path_validity_root():
assert check_path_validity(root_dir)
def test_is_valid_permutation():
assert not is_valid_permutation([1, 2, 3])
assert not is_valid_permutation([[1, 2, 3, 4], [3, 1, 2]])
assert not is_valid_permutation([[1, 2, 3], [4, 5, 6]])
assert not is_valid_permutation([[1, 1, 3], [1, 3, 1]])
assert not is_valid_permutation([[1.2, 2, 3], [2, 1.2, 3]])
assert is_valid_permutation([[1.2, 2, 3], [2, 1.2, 3]], for_labels=False)
assert is_valid_permutation([[1, 2, 3], [3, 1, 2]])
if __name__ == '__main__':
test_check_pfi_io()
test_check_path_validity_not_existing_path()
test_check_path_validity_for_a_nifti_image()
test_check_path_validity_root()
test_is_valid_permutation()
| 2.25 | 2 |
fhir/resources/DSTU2/elementdefinition.py | mmabey/fhir.resources | 0 | 12765613 | <reponame>mmabey/fhir.resources
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 1.0.2.7202 (http://hl7.org/fhir/StructureDefinition/ElementDefinition) on 2019-05-14.
# 2019, SMART Health IT.
from . import (address, annotation, attachment, codeableconcept, coding,
contactpoint, element, fhirdate, fhirreference, humanname,
identifier, meta, period, quantity, range, ratio, sampleddata,
signature, timing)
class ElementDefinition(element.Element):
""" Definition of an element in a resource or extension.
Captures constraints on each element within the resource, profile, or
extension.
"""
resource_name = "ElementDefinition"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.alias = None
""" Other names.
List of `str` items. """
self.base = None
""" Base definition information for tools.
Type `ElementDefinitionBase` (represented as `dict` in JSON). """
self.binding = None
""" ValueSet details if this is coded.
Type `ElementDefinitionBinding` (represented as `dict` in JSON). """
self.code = None
""" Defining code.
List of `Coding` items (represented as `dict` in JSON). """
self.comments = None
""" Comments about the use of this element.
Type `str`. """
self.condition = None
""" Reference to invariant about presence.
List of `str` items. """
self.constraint = None
""" Condition that must evaluate to true.
List of `ElementDefinitionConstraint` items (represented as `dict` in JSON). """
self.defaultValueAddress = None
""" Specified value it missing from instance.
Type `Address` (represented as `dict` in JSON). """
self.defaultValueAnnotation = None
""" Specified value it missing from instance.
Type `Annotation` (represented as `dict` in JSON). """
self.defaultValueAttachment = None
""" Specified value it missing from instance.
Type `Attachment` (represented as `dict` in JSON). """
self.defaultValueBase64Binary = None
""" Specified value it missing from instance.
Type `str`. """
self.defaultValueBoolean = None
""" Specified value it missing from instance.
Type `bool`. """
self.defaultValueCode = None
""" Specified value it missing from instance.
Type `str`. """
self.defaultValueCodeableConcept = None
""" Specified value it missing from instance.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.defaultValueCoding = None
""" Specified value it missing from instance.
Type `Coding` (represented as `dict` in JSON). """
self.defaultValueContactPoint = None
""" Specified value it missing from instance.
Type `ContactPoint` (represented as `dict` in JSON). """
self.defaultValueDate = None
""" Specified value it missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDateTime = None
""" Specified value it missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueDecimal = None
""" Specified value it missing from instance.
Type `float`. """
self.defaultValueHumanName = None
""" Specified value it missing from instance.
Type `HumanName` (represented as `dict` in JSON). """
self.defaultValueId = None
""" Specified value it missing from instance.
Type `str`. """
self.defaultValueIdentifier = None
""" Specified value it missing from instance.
Type `Identifier` (represented as `dict` in JSON). """
self.defaultValueInstant = None
""" Specified value it missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueInteger = None
""" Specified value it missing from instance.
Type `int`. """
self.defaultValueMarkdown = None
""" Specified value it missing from instance.
Type `str`. """
self.defaultValueMeta = None
""" Specified value it missing from instance.
Type `Meta` (represented as `dict` in JSON). """
self.defaultValueOid = None
""" Specified value it missing from instance.
Type `str`. """
self.defaultValuePeriod = None
""" Specified value it missing from instance.
Type `Period` (represented as `dict` in JSON). """
self.defaultValuePositiveInt = None
""" Specified value it missing from instance.
Type `int`. """
self.defaultValueQuantity = None
""" Specified value it missing from instance.
Type `Quantity` (represented as `dict` in JSON). """
self.defaultValueRange = None
""" Specified value it missing from instance.
Type `Range` (represented as `dict` in JSON). """
self.defaultValueRatio = None
""" Specified value it missing from instance.
Type `Ratio` (represented as `dict` in JSON). """
self.defaultValueReference = None
""" Specified value it missing from instance.
Type `FHIRReference` (represented as `dict` in JSON). """
self.defaultValueSampledData = None
""" Specified value it missing from instance.
Type `SampledData` (represented as `dict` in JSON). """
self.defaultValueSignature = None
""" Specified value it missing from instance.
Type `Signature` (represented as `dict` in JSON). """
self.defaultValueString = None
""" Specified value it missing from instance.
Type `str`. """
self.defaultValueTime = None
""" Specified value it missing from instance.
Type `FHIRDate` (represented as `str` in JSON). """
self.defaultValueTiming = None
""" Specified value it missing from instance.
Type `Timing` (represented as `dict` in JSON). """
self.defaultValueUnsignedInt = None
""" Specified value it missing from instance.
Type `int`. """
self.defaultValueUri = None
""" Specified value it missing from instance.
Type `str`. """
self.definition = None
""" Full formal definition as narrative text.
Type `str`. """
self.exampleAddress = None
""" Example value: [as defined for type].
Type `Address` (represented as `dict` in JSON). """
self.exampleAnnotation = None
""" Example value: [as defined for type].
Type `Annotation` (represented as `dict` in JSON). """
self.exampleAttachment = None
""" Example value: [as defined for type].
Type `Attachment` (represented as `dict` in JSON). """
self.exampleBase64Binary = None
""" Example value: [as defined for type].
Type `str`. """
self.exampleBoolean = None
""" Example value: [as defined for type].
Type `bool`. """
self.exampleCode = None
""" Example value: [as defined for type].
Type `str`. """
self.exampleCodeableConcept = None
""" Example value: [as defined for type].
Type `CodeableConcept` (represented as `dict` in JSON). """
self.exampleCoding = None
""" Example value: [as defined for type].
Type `Coding` (represented as `dict` in JSON). """
self.exampleContactPoint = None
""" Example value: [as defined for type].
Type `ContactPoint` (represented as `dict` in JSON). """
self.exampleDate = None
""" Example value: [as defined for type].
Type `FHIRDate` (represented as `str` in JSON). """
self.exampleDateTime = None
""" Example value: [as defined for type].
Type `FHIRDate` (represented as `str` in JSON). """
self.exampleDecimal = None
""" Example value: [as defined for type].
Type `float`. """
self.exampleHumanName = None
""" Example value: [as defined for type].
Type `HumanName` (represented as `dict` in JSON). """
self.exampleId = None
""" Example value: [as defined for type].
Type `str`. """
self.exampleIdentifier = None
""" Example value: [as defined for type].
Type `Identifier` (represented as `dict` in JSON). """
self.exampleInstant = None
""" Example value: [as defined for type].
Type `FHIRDate` (represented as `str` in JSON). """
self.exampleInteger = None
""" Example value: [as defined for type].
Type `int`. """
self.exampleMarkdown = None
""" Example value: [as defined for type].
Type `str`. """
self.exampleMeta = None
""" Example value: [as defined for type].
Type `Meta` (represented as `dict` in JSON). """
self.exampleOid = None
""" Example value: [as defined for type].
Type `str`. """
self.examplePeriod = None
""" Example value: [as defined for type].
Type `Period` (represented as `dict` in JSON). """
self.examplePositiveInt = None
""" Example value: [as defined for type].
Type `int`. """
self.exampleQuantity = None
""" Example value: [as defined for type].
Type `Quantity` (represented as `dict` in JSON). """
self.exampleRange = None
""" Example value: [as defined for type].
Type `Range` (represented as `dict` in JSON). """
self.exampleRatio = None
""" Example value: [as defined for type].
Type `Ratio` (represented as `dict` in JSON). """
self.exampleReference = None
""" Example value: [as defined for type].
Type `FHIRReference` (represented as `dict` in JSON). """
self.exampleSampledData = None
""" Example value: [as defined for type].
Type `SampledData` (represented as `dict` in JSON). """
self.exampleSignature = None
""" Example value: [as defined for type].
Type `Signature` (represented as `dict` in JSON). """
self.exampleString = None
""" Example value: [as defined for type].
Type `str`. """
self.exampleTime = None
""" Example value: [as defined for type].
Type `FHIRDate` (represented as `str` in JSON). """
self.exampleTiming = None
""" Example value: [as defined for type].
Type `Timing` (represented as `dict` in JSON). """
self.exampleUnsignedInt = None
""" Example value: [as defined for type].
Type `int`. """
self.exampleUri = None
""" Example value: [as defined for type].
Type `str`. """
self.fixedAddress = None
""" Value must be exactly this.
Type `Address` (represented as `dict` in JSON). """
self.fixedAnnotation = None
""" Value must be exactly this.
Type `Annotation` (represented as `dict` in JSON). """
self.fixedAttachment = None
""" Value must be exactly this.
Type `Attachment` (represented as `dict` in JSON). """
self.fixedBase64Binary = None
""" Value must be exactly this.
Type `str`. """
self.fixedBoolean = None
""" Value must be exactly this.
Type `bool`. """
self.fixedCode = None
""" Value must be exactly this.
Type `str`. """
self.fixedCodeableConcept = None
""" Value must be exactly this.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.fixedCoding = None
""" Value must be exactly this.
Type `Coding` (represented as `dict` in JSON). """
self.fixedContactPoint = None
""" Value must be exactly this.
Type `ContactPoint` (represented as `dict` in JSON). """
self.fixedDate = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDateTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedDecimal = None
""" Value must be exactly this.
Type `float`. """
self.fixedHumanName = None
""" Value must be exactly this.
Type `HumanName` (represented as `dict` in JSON). """
self.fixedId = None
""" Value must be exactly this.
Type `str`. """
self.fixedIdentifier = None
""" Value must be exactly this.
Type `Identifier` (represented as `dict` in JSON). """
self.fixedInstant = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedInteger = None
""" Value must be exactly this.
Type `int`. """
self.fixedMarkdown = None
""" Value must be exactly this.
Type `str`. """
self.fixedMeta = None
""" Value must be exactly this.
Type `Meta` (represented as `dict` in JSON). """
self.fixedOid = None
""" Value must be exactly this.
Type `str`. """
self.fixedPeriod = None
""" Value must be exactly this.
Type `Period` (represented as `dict` in JSON). """
self.fixedPositiveInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedQuantity = None
""" Value must be exactly this.
Type `Quantity` (represented as `dict` in JSON). """
self.fixedRange = None
""" Value must be exactly this.
Type `Range` (represented as `dict` in JSON). """
self.fixedRatio = None
""" Value must be exactly this.
Type `Ratio` (represented as `dict` in JSON). """
self.fixedReference = None
""" Value must be exactly this.
Type `FHIRReference` (represented as `dict` in JSON). """
self.fixedSampledData = None
""" Value must be exactly this.
Type `SampledData` (represented as `dict` in JSON). """
self.fixedSignature = None
""" Value must be exactly this.
Type `Signature` (represented as `dict` in JSON). """
self.fixedString = None
""" Value must be exactly this.
Type `str`. """
self.fixedTime = None
""" Value must be exactly this.
Type `FHIRDate` (represented as `str` in JSON). """
self.fixedTiming = None
""" Value must be exactly this.
Type `Timing` (represented as `dict` in JSON). """
self.fixedUnsignedInt = None
""" Value must be exactly this.
Type `int`. """
self.fixedUri = None
""" Value must be exactly this.
Type `str`. """
self.isModifier = None
""" If this modifies the meaning of other elements.
Type `bool`. """
self.isSummary = None
""" Include when _summary = true?.
Type `bool`. """
self.label = None
""" Name for element to display with or prompt for element.
Type `str`. """
self.mapping = None
""" Map element to another set of definitions.
List of `ElementDefinitionMapping` items (represented as `dict` in JSON). """
self.max = None
""" Maximum Cardinality (a number or *).
Type `str`. """
self.maxLength = None
""" Max length for strings.
Type `int`. """
self.maxValueAddress = None
""" Maximum Allowed Value (for some types).
Type `Address` (represented as `dict` in JSON). """
self.maxValueAnnotation = None
""" Maximum Allowed Value (for some types).
Type `Annotation` (represented as `dict` in JSON). """
self.maxValueAttachment = None
""" Maximum Allowed Value (for some types).
Type `Attachment` (represented as `dict` in JSON). """
self.maxValueBase64Binary = None
""" Maximum Allowed Value (for some types).
Type `str`. """
self.maxValueBoolean = None
""" Maximum Allowed Value (for some types).
Type `bool`. """
self.maxValueCode = None
""" Maximum Allowed Value (for some types).
Type `str`. """
self.maxValueCodeableConcept = None
""" Maximum Allowed Value (for some types).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.maxValueCoding = None
""" Maximum Allowed Value (for some types).
Type `Coding` (represented as `dict` in JSON). """
self.maxValueContactPoint = None
""" Maximum Allowed Value (for some types).
Type `ContactPoint` (represented as `dict` in JSON). """
self.maxValueDate = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDateTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueDecimal = None
""" Maximum Allowed Value (for some types).
Type `float`. """
self.maxValueHumanName = None
""" Maximum Allowed Value (for some types).
Type `HumanName` (represented as `dict` in JSON). """
self.maxValueId = None
""" Maximum Allowed Value (for some types).
Type `str`. """
self.maxValueIdentifier = None
""" Maximum Allowed Value (for some types).
Type `Identifier` (represented as `dict` in JSON). """
self.maxValueInstant = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueInteger = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValueMarkdown = None
""" Maximum Allowed Value (for some types).
Type `str`. """
self.maxValueMeta = None
""" Maximum Allowed Value (for some types).
Type `Meta` (represented as `dict` in JSON). """
self.maxValueOid = None
""" Maximum Allowed Value (for some types).
Type `str`. """
self.maxValuePeriod = None
""" Maximum Allowed Value (for some types).
Type `Period` (represented as `dict` in JSON). """
self.maxValuePositiveInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValueQuantity = None
""" Maximum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.maxValueRange = None
""" Maximum Allowed Value (for some types).
Type `Range` (represented as `dict` in JSON). """
self.maxValueRatio = None
""" Maximum Allowed Value (for some types).
Type `Ratio` (represented as `dict` in JSON). """
self.maxValueReference = None
""" Maximum Allowed Value (for some types).
Type `FHIRReference` (represented as `dict` in JSON). """
self.maxValueSampledData = None
""" Maximum Allowed Value (for some types).
Type `SampledData` (represented as `dict` in JSON). """
self.maxValueSignature = None
""" Maximum Allowed Value (for some types).
Type `Signature` (represented as `dict` in JSON). """
self.maxValueString = None
""" Maximum Allowed Value (for some types).
Type `str`. """
self.maxValueTime = None
""" Maximum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.maxValueTiming = None
""" Maximum Allowed Value (for some types).
Type `Timing` (represented as `dict` in JSON). """
self.maxValueUnsignedInt = None
""" Maximum Allowed Value (for some types).
Type `int`. """
self.maxValueUri = None
""" Maximum Allowed Value (for some types).
Type `str`. """
self.meaningWhenMissing = None
""" Implicit meaning when this element is missing.
Type `str`. """
self.min = None
""" Minimum Cardinality.
Type `int`. """
self.minValueAddress = None
""" Minimum Allowed Value (for some types).
Type `Address` (represented as `dict` in JSON). """
self.minValueAnnotation = None
""" Minimum Allowed Value (for some types).
Type `Annotation` (represented as `dict` in JSON). """
self.minValueAttachment = None
""" Minimum Allowed Value (for some types).
Type `Attachment` (represented as `dict` in JSON). """
self.minValueBase64Binary = None
""" Minimum Allowed Value (for some types).
Type `str`. """
self.minValueBoolean = None
""" Minimum Allowed Value (for some types).
Type `bool`. """
self.minValueCode = None
""" Minimum Allowed Value (for some types).
Type `str`. """
self.minValueCodeableConcept = None
""" Minimum Allowed Value (for some types).
Type `CodeableConcept` (represented as `dict` in JSON). """
self.minValueCoding = None
""" Minimum Allowed Value (for some types).
Type `Coding` (represented as `dict` in JSON). """
self.minValueContactPoint = None
""" Minimum Allowed Value (for some types).
Type `ContactPoint` (represented as `dict` in JSON). """
self.minValueDate = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDateTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueDecimal = None
""" Minimum Allowed Value (for some types).
Type `float`. """
self.minValueHumanName = None
""" Minimum Allowed Value (for some types).
Type `HumanName` (represented as `dict` in JSON). """
self.minValueId = None
""" Minimum Allowed Value (for some types).
Type `str`. """
self.minValueIdentifier = None
""" Minimum Allowed Value (for some types).
Type `Identifier` (represented as `dict` in JSON). """
self.minValueInstant = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueInteger = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValueMarkdown = None
""" Minimum Allowed Value (for some types).
Type `str`. """
self.minValueMeta = None
""" Minimum Allowed Value (for some types).
Type `Meta` (represented as `dict` in JSON). """
self.minValueOid = None
""" Minimum Allowed Value (for some types).
Type `str`. """
self.minValuePeriod = None
""" Minimum Allowed Value (for some types).
Type `Period` (represented as `dict` in JSON). """
self.minValuePositiveInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValueQuantity = None
""" Minimum Allowed Value (for some types).
Type `Quantity` (represented as `dict` in JSON). """
self.minValueRange = None
""" Minimum Allowed Value (for some types).
Type `Range` (represented as `dict` in JSON). """
self.minValueRatio = None
""" Minimum Allowed Value (for some types).
Type `Ratio` (represented as `dict` in JSON). """
self.minValueReference = None
""" Minimum Allowed Value (for some types).
Type `FHIRReference` (represented as `dict` in JSON). """
self.minValueSampledData = None
""" Minimum Allowed Value (for some types).
Type `SampledData` (represented as `dict` in JSON). """
self.minValueSignature = None
""" Minimum Allowed Value (for some types).
Type `Signature` (represented as `dict` in JSON). """
self.minValueString = None
""" Minimum Allowed Value (for some types).
Type `str`. """
self.minValueTime = None
""" Minimum Allowed Value (for some types).
Type `FHIRDate` (represented as `str` in JSON). """
self.minValueTiming = None
""" Minimum Allowed Value (for some types).
Type `Timing` (represented as `dict` in JSON). """
self.minValueUnsignedInt = None
""" Minimum Allowed Value (for some types).
Type `int`. """
self.minValueUri = None
""" Minimum Allowed Value (for some types).
Type `str`. """
self.mustSupport = None
""" If the element must supported.
Type `bool`. """
self.name = None
""" Name for this particular element definition (reference target).
Type `str`. """
self.nameReference = None
""" To another element constraint (by element.name).
Type `str`. """
self.path = None
""" The path of the element (see the Detailed Descriptions).
Type `str`. """
self.patternAddress = None
""" Value must have at least these property values.
Type `Address` (represented as `dict` in JSON). """
self.patternAnnotation = None
""" Value must have at least these property values.
Type `Annotation` (represented as `dict` in JSON). """
self.patternAttachment = None
""" Value must have at least these property values.
Type `Attachment` (represented as `dict` in JSON). """
self.patternBase64Binary = None
""" Value must have at least these property values.
Type `str`. """
self.patternBoolean = None
""" Value must have at least these property values.
Type `bool`. """
self.patternCode = None
""" Value must have at least these property values.
Type `str`. """
self.patternCodeableConcept = None
""" Value must have at least these property values.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.patternCoding = None
""" Value must have at least these property values.
Type `Coding` (represented as `dict` in JSON). """
self.patternContactPoint = None
""" Value must have at least these property values.
Type `ContactPoint` (represented as `dict` in JSON). """
self.patternDate = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDateTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternDecimal = None
""" Value must have at least these property values.
Type `float`. """
self.patternHumanName = None
""" Value must have at least these property values.
Type `HumanName` (represented as `dict` in JSON). """
self.patternId = None
""" Value must have at least these property values.
Type `str`. """
self.patternIdentifier = None
""" Value must have at least these property values.
Type `Identifier` (represented as `dict` in JSON). """
self.patternInstant = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternInteger = None
""" Value must have at least these property values.
Type `int`. """
self.patternMarkdown = None
""" Value must have at least these property values.
Type `str`. """
self.patternMeta = None
""" Value must have at least these property values.
Type `Meta` (represented as `dict` in JSON). """
self.patternOid = None
""" Value must have at least these property values.
Type `str`. """
self.patternPeriod = None
""" Value must have at least these property values.
Type `Period` (represented as `dict` in JSON). """
self.patternPositiveInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternQuantity = None
""" Value must have at least these property values.
Type `Quantity` (represented as `dict` in JSON). """
self.patternRange = None
""" Value must have at least these property values.
Type `Range` (represented as `dict` in JSON). """
self.patternRatio = None
""" Value must have at least these property values.
Type `Ratio` (represented as `dict` in JSON). """
self.patternReference = None
""" Value must have at least these property values.
Type `FHIRReference` (represented as `dict` in JSON). """
self.patternSampledData = None
""" Value must have at least these property values.
Type `SampledData` (represented as `dict` in JSON). """
self.patternSignature = None
""" Value must have at least these property values.
Type `Signature` (represented as `dict` in JSON). """
self.patternString = None
""" Value must have at least these property values.
Type `str`. """
self.patternTime = None
""" Value must have at least these property values.
Type `FHIRDate` (represented as `str` in JSON). """
self.patternTiming = None
""" Value must have at least these property values.
Type `Timing` (represented as `dict` in JSON). """
self.patternUnsignedInt = None
""" Value must have at least these property values.
Type `int`. """
self.patternUri = None
""" Value must have at least these property values.
Type `str`. """
self.representation = None
""" How this element is represented in instances.
List of `str` items. """
self.requirements = None
""" Why is this needed?.
Type `str`. """
self.short = None
""" Concise definition for xml presentation.
Type `str`. """
self.slicing = None
""" This element is sliced - slices follow.
Type `ElementDefinitionSlicing` (represented as `dict` in JSON). """
self.type = None
""" Data type and Profile for this element.
List of `ElementDefinitionType` items (represented as `dict` in JSON). """
super(ElementDefinition, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinition, self).elementProperties()
js.extend(
[
("alias", "alias", str, True, None, False),
("base", "base", ElementDefinitionBase, False, None, False),
("binding", "binding", ElementDefinitionBinding, False, None, False),
("code", "code", coding.Coding, True, None, False),
("comments", "comments", str, False, None, False),
("condition", "condition", str, True, None, False),
(
"constraint",
"constraint",
ElementDefinitionConstraint,
True,
None,
False,
),
(
"defaultValueAddress",
"defaultValueAddress",
address.Address,
False,
"defaultValue",
False,
),
(
"defaultValueAnnotation",
"defaultValueAnnotation",
annotation.Annotation,
False,
"defaultValue",
False,
),
(
"defaultValueAttachment",
"defaultValueAttachment",
attachment.Attachment,
False,
"defaultValue",
False,
),
(
"defaultValueBase64Binary",
"defaultValueBase64Binary",
str,
False,
"defaultValue",
False,
),
(
"defaultValueBoolean",
"defaultValueBoolean",
bool,
False,
"defaultValue",
False,
),
(
"defaultValueCode",
"defaultValueCode",
str,
False,
"defaultValue",
False,
),
(
"defaultValueCodeableConcept",
"defaultValueCodeableConcept",
codeableconcept.CodeableConcept,
False,
"defaultValue",
False,
),
(
"defaultValueCoding",
"defaultValueCoding",
coding.Coding,
False,
"defaultValue",
False,
),
(
"defaultValueContactPoint",
"defaultValueContactPoint",
contactpoint.ContactPoint,
False,
"defaultValue",
False,
),
(
"defaultValueDate",
"defaultValueDate",
fhirdate.FHIRDate,
False,
"defaultValue",
False,
),
(
"defaultValueDateTime",
"defaultValueDateTime",
fhirdate.FHIRDate,
False,
"defaultValue",
False,
),
(
"defaultValueDecimal",
"defaultValueDecimal",
float,
False,
"defaultValue",
False,
),
(
"defaultValueHumanName",
"defaultValueHumanName",
humanname.HumanName,
False,
"defaultValue",
False,
),
("defaultValueId", "defaultValueId", str, False, "defaultValue", False),
(
"defaultValueIdentifier",
"defaultValueIdentifier",
identifier.Identifier,
False,
"defaultValue",
False,
),
(
"defaultValueInstant",
"defaultValueInstant",
fhirdate.FHIRDate,
False,
"defaultValue",
False,
),
(
"defaultValueInteger",
"defaultValueInteger",
int,
False,
"defaultValue",
False,
),
(
"defaultValueMarkdown",
"defaultValueMarkdown",
str,
False,
"defaultValue",
False,
),
(
"defaultValueMeta",
"defaultValueMeta",
meta.Meta,
False,
"defaultValue",
False,
),
(
"defaultValueOid",
"defaultValueOid",
str,
False,
"defaultValue",
False,
),
(
"defaultValuePeriod",
"defaultValuePeriod",
period.Period,
False,
"defaultValue",
False,
),
(
"defaultValuePositiveInt",
"defaultValuePositiveInt",
int,
False,
"defaultValue",
False,
),
(
"defaultValueQuantity",
"defaultValueQuantity",
quantity.Quantity,
False,
"defaultValue",
False,
),
(
"defaultValueRange",
"defaultValueRange",
range.Range,
False,
"defaultValue",
False,
),
(
"defaultValueRatio",
"defaultValueRatio",
ratio.Ratio,
False,
"defaultValue",
False,
),
(
"defaultValueReference",
"defaultValueReference",
fhirreference.FHIRReference,
False,
"defaultValue",
False,
),
(
"defaultValueSampledData",
"defaultValueSampledData",
sampleddata.SampledData,
False,
"defaultValue",
False,
),
(
"defaultValueSignature",
"defaultValueSignature",
signature.Signature,
False,
"defaultValue",
False,
),
(
"defaultValueString",
"defaultValueString",
str,
False,
"defaultValue",
False,
),
(
"defaultValueTime",
"defaultValueTime",
fhirdate.FHIRDate,
False,
"defaultValue",
False,
),
(
"defaultValueTiming",
"defaultValueTiming",
timing.Timing,
False,
"defaultValue",
False,
),
(
"defaultValueUnsignedInt",
"defaultValueUnsignedInt",
int,
False,
"defaultValue",
False,
),
(
"defaultValueUri",
"defaultValueUri",
str,
False,
"defaultValue",
False,
),
("definition", "definition", str, False, None, False),
(
"exampleAddress",
"exampleAddress",
address.Address,
False,
"example",
False,
),
(
"exampleAnnotation",
"exampleAnnotation",
annotation.Annotation,
False,
"example",
False,
),
(
"exampleAttachment",
"exampleAttachment",
attachment.Attachment,
False,
"example",
False,
),
(
"exampleBase64Binary",
"exampleBase64Binary",
str,
False,
"example",
False,
),
("exampleBoolean", "exampleBoolean", bool, False, "example", False),
("exampleCode", "exampleCode", str, False, "example", False),
(
"exampleCodeableConcept",
"exampleCodeableConcept",
codeableconcept.CodeableConcept,
False,
"example",
False,
),
(
"exampleCoding",
"exampleCoding",
coding.Coding,
False,
"example",
False,
),
(
"exampleContactPoint",
"exampleContactPoint",
contactpoint.ContactPoint,
False,
"example",
False,
),
(
"exampleDate",
"exampleDate",
fhirdate.FHIRDate,
False,
"example",
False,
),
(
"exampleDateTime",
"exampleDateTime",
fhirdate.FHIRDate,
False,
"example",
False,
),
("exampleDecimal", "exampleDecimal", float, False, "example", False),
(
"exampleHumanName",
"exampleHumanName",
humanname.HumanName,
False,
"example",
False,
),
("exampleId", "exampleId", str, False, "example", False),
(
"exampleIdentifier",
"exampleIdentifier",
identifier.Identifier,
False,
"example",
False,
),
(
"exampleInstant",
"exampleInstant",
fhirdate.FHIRDate,
False,
"example",
False,
),
("exampleInteger", "exampleInteger", int, False, "example", False),
("exampleMarkdown", "exampleMarkdown", str, False, "example", False),
("exampleMeta", "exampleMeta", meta.Meta, False, "example", False),
("exampleOid", "exampleOid", str, False, "example", False),
(
"examplePeriod",
"examplePeriod",
period.Period,
False,
"example",
False,
),
(
"examplePositiveInt",
"examplePositiveInt",
int,
False,
"example",
False,
),
(
"exampleQuantity",
"exampleQuantity",
quantity.Quantity,
False,
"example",
False,
),
("exampleRange", "exampleRange", range.Range, False, "example", False),
("exampleRatio", "exampleRatio", ratio.Ratio, False, "example", False),
(
"exampleReference",
"exampleReference",
fhirreference.FHIRReference,
False,
"example",
False,
),
(
"exampleSampledData",
"exampleSampledData",
sampleddata.SampledData,
False,
"example",
False,
),
(
"exampleSignature",
"exampleSignature",
signature.Signature,
False,
"example",
False,
),
("exampleString", "exampleString", str, False, "example", False),
(
"exampleTime",
"exampleTime",
fhirdate.FHIRDate,
False,
"example",
False,
),
(
"exampleTiming",
"exampleTiming",
timing.Timing,
False,
"example",
False,
),
(
"exampleUnsignedInt",
"exampleUnsignedInt",
int,
False,
"example",
False,
),
("exampleUri", "exampleUri", str, False, "example", False),
(
"fixedAddress",
"fixedAddress",
address.Address,
False,
"fixed",
False,
),
(
"fixedAnnotation",
"fixedAnnotation",
annotation.Annotation,
False,
"fixed",
False,
),
(
"fixedAttachment",
"fixedAttachment",
attachment.Attachment,
False,
"fixed",
False,
),
("fixedBase64Binary", "fixedBase64Binary", str, False, "fixed", False),
("fixedBoolean", "fixedBoolean", bool, False, "fixed", False),
("fixedCode", "fixedCode", str, False, "fixed", False),
(
"fixedCodeableConcept",
"fixedCodeableConcept",
codeableconcept.CodeableConcept,
False,
"fixed",
False,
),
("fixedCoding", "fixedCoding", coding.Coding, False, "fixed", False),
(
"fixedContactPoint",
"fixedContactPoint",
contactpoint.ContactPoint,
False,
"fixed",
False,
),
("fixedDate", "fixedDate", fhirdate.FHIRDate, False, "fixed", False),
(
"fixedDateTime",
"fixedDateTime",
fhirdate.FHIRDate,
False,
"fixed",
False,
),
("fixedDecimal", "fixedDecimal", float, False, "fixed", False),
(
"fixedHumanName",
"fixedHumanName",
humanname.HumanName,
False,
"fixed",
False,
),
("fixedId", "fixedId", str, False, "fixed", False),
(
"fixedIdentifier",
"fixedIdentifier",
identifier.Identifier,
False,
"fixed",
False,
),
(
"fixedInstant",
"fixedInstant",
fhirdate.FHIRDate,
False,
"fixed",
False,
),
("fixedInteger", "fixedInteger", int, False, "fixed", False),
("fixedMarkdown", "fixedMarkdown", str, False, "fixed", False),
("fixedMeta", "fixedMeta", meta.Meta, False, "fixed", False),
("fixedOid", "fixedOid", str, False, "fixed", False),
("fixedPeriod", "fixedPeriod", period.Period, False, "fixed", False),
("fixedPositiveInt", "fixedPositiveInt", int, False, "fixed", False),
(
"fixedQuantity",
"fixedQuantity",
quantity.Quantity,
False,
"fixed",
False,
),
("fixedRange", "fixedRange", range.Range, False, "fixed", False),
("fixedRatio", "fixedRatio", ratio.Ratio, False, "fixed", False),
(
"fixedReference",
"fixedReference",
fhirreference.FHIRReference,
False,
"fixed",
False,
),
(
"fixedSampledData",
"fixedSampledData",
sampleddata.SampledData,
False,
"fixed",
False,
),
(
"fixedSignature",
"fixedSignature",
signature.Signature,
False,
"fixed",
False,
),
("fixedString", "fixedString", str, False, "fixed", False),
("fixedTime", "fixedTime", fhirdate.FHIRDate, False, "fixed", False),
("fixedTiming", "fixedTiming", timing.Timing, False, "fixed", False),
("fixedUnsignedInt", "fixedUnsignedInt", int, False, "fixed", False),
("fixedUri", "fixedUri", str, False, "fixed", False),
("isModifier", "isModifier", bool, False, None, False),
("isSummary", "isSummary", bool, False, None, False),
("label", "label", str, False, None, False),
("mapping", "mapping", ElementDefinitionMapping, True, None, False),
("max", "max", str, False, None, False),
("maxLength", "maxLength", int, False, None, False),
(
"maxValueAddress",
"maxValueAddress",
address.Address,
False,
"maxValue",
False,
),
(
"maxValueAnnotation",
"maxValueAnnotation",
annotation.Annotation,
False,
"maxValue",
False,
),
(
"maxValueAttachment",
"maxValueAttachment",
attachment.Attachment,
False,
"maxValue",
False,
),
(
"maxValueBase64Binary",
"maxValueBase64Binary",
str,
False,
"maxValue",
False,
),
("maxValueBoolean", "maxValueBoolean", bool, False, "maxValue", False),
("maxValueCode", "maxValueCode", str, False, "maxValue", False),
(
"maxValueCodeableConcept",
"maxValueCodeableConcept",
codeableconcept.CodeableConcept,
False,
"maxValue",
False,
),
(
"maxValueCoding",
"maxValueCoding",
coding.Coding,
False,
"maxValue",
False,
),
(
"maxValueContactPoint",
"maxValueContactPoint",
contactpoint.ContactPoint,
False,
"maxValue",
False,
),
(
"maxValueDate",
"maxValueDate",
fhirdate.FHIRDate,
False,
"maxValue",
False,
),
(
"maxValueDateTime",
"maxValueDateTime",
fhirdate.FHIRDate,
False,
"maxValue",
False,
),
("maxValueDecimal", "maxValueDecimal", float, False, "maxValue", False),
(
"maxValueHumanName",
"maxValueHumanName",
humanname.HumanName,
False,
"maxValue",
False,
),
("maxValueId", "maxValueId", str, False, "maxValue", False),
(
"maxValueIdentifier",
"maxValueIdentifier",
identifier.Identifier,
False,
"maxValue",
False,
),
(
"maxValueInstant",
"maxValueInstant",
fhirdate.FHIRDate,
False,
"maxValue",
False,
),
("maxValueInteger", "maxValueInteger", int, False, "maxValue", False),
("maxValueMarkdown", "maxValueMarkdown", str, False, "maxValue", False),
("maxValueMeta", "maxValueMeta", meta.Meta, False, "maxValue", False),
("maxValueOid", "maxValueOid", str, False, "maxValue", False),
(
"maxValuePeriod",
"maxValuePeriod",
period.Period,
False,
"maxValue",
False,
),
(
"maxValuePositiveInt",
"maxValuePositiveInt",
int,
False,
"maxValue",
False,
),
(
"maxValueQuantity",
"maxValueQuantity",
quantity.Quantity,
False,
"maxValue",
False,
),
(
"maxValueRange",
"maxValueRange",
range.Range,
False,
"maxValue",
False,
),
(
"maxValueRatio",
"maxValueRatio",
ratio.Ratio,
False,
"maxValue",
False,
),
(
"maxValueReference",
"maxValueReference",
fhirreference.FHIRReference,
False,
"maxValue",
False,
),
(
"maxValueSampledData",
"maxValueSampledData",
sampleddata.SampledData,
False,
"maxValue",
False,
),
(
"maxValueSignature",
"maxValueSignature",
signature.Signature,
False,
"maxValue",
False,
),
("maxValueString", "maxValueString", str, False, "maxValue", False),
(
"maxValueTime",
"maxValueTime",
fhirdate.FHIRDate,
False,
"maxValue",
False,
),
(
"maxValueTiming",
"maxValueTiming",
timing.Timing,
False,
"maxValue",
False,
),
(
"maxValueUnsignedInt",
"maxValueUnsignedInt",
int,
False,
"maxValue",
False,
),
("maxValueUri", "maxValueUri", str, False, "maxValue", False),
("meaningWhenMissing", "meaningWhenMissing", str, False, None, False),
("min", "min", int, False, None, False),
(
"minValueAddress",
"minValueAddress",
address.Address,
False,
"minValue",
False,
),
(
"minValueAnnotation",
"minValueAnnotation",
annotation.Annotation,
False,
"minValue",
False,
),
(
"minValueAttachment",
"minValueAttachment",
attachment.Attachment,
False,
"minValue",
False,
),
(
"minValueBase64Binary",
"minValueBase64Binary",
str,
False,
"minValue",
False,
),
("minValueBoolean", "minValueBoolean", bool, False, "minValue", False),
("minValueCode", "minValueCode", str, False, "minValue", False),
(
"minValueCodeableConcept",
"minValueCodeableConcept",
codeableconcept.CodeableConcept,
False,
"minValue",
False,
),
(
"minValueCoding",
"minValueCoding",
coding.Coding,
False,
"minValue",
False,
),
(
"minValueContactPoint",
"minValueContactPoint",
contactpoint.ContactPoint,
False,
"minValue",
False,
),
(
"minValueDate",
"minValueDate",
fhirdate.FHIRDate,
False,
"minValue",
False,
),
(
"minValueDateTime",
"minValueDateTime",
fhirdate.FHIRDate,
False,
"minValue",
False,
),
("minValueDecimal", "minValueDecimal", float, False, "minValue", False),
(
"minValueHumanName",
"minValueHumanName",
humanname.HumanName,
False,
"minValue",
False,
),
("minValueId", "minValueId", str, False, "minValue", False),
(
"minValueIdentifier",
"minValueIdentifier",
identifier.Identifier,
False,
"minValue",
False,
),
(
"minValueInstant",
"minValueInstant",
fhirdate.FHIRDate,
False,
"minValue",
False,
),
("minValueInteger", "minValueInteger", int, False, "minValue", False),
("minValueMarkdown", "minValueMarkdown", str, False, "minValue", False),
("minValueMeta", "minValueMeta", meta.Meta, False, "minValue", False),
("minValueOid", "minValueOid", str, False, "minValue", False),
(
"minValuePeriod",
"minValuePeriod",
period.Period,
False,
"minValue",
False,
),
(
"minValuePositiveInt",
"minValuePositiveInt",
int,
False,
"minValue",
False,
),
(
"minValueQuantity",
"minValueQuantity",
quantity.Quantity,
False,
"minValue",
False,
),
(
"minValueRange",
"minValueRange",
range.Range,
False,
"minValue",
False,
),
(
"minValueRatio",
"minValueRatio",
ratio.Ratio,
False,
"minValue",
False,
),
(
"minValueReference",
"minValueReference",
fhirreference.FHIRReference,
False,
"minValue",
False,
),
(
"minValueSampledData",
"minValueSampledData",
sampleddata.SampledData,
False,
"minValue",
False,
),
(
"minValueSignature",
"minValueSignature",
signature.Signature,
False,
"minValue",
False,
),
("minValueString", "minValueString", str, False, "minValue", False),
(
"minValueTime",
"minValueTime",
fhirdate.FHIRDate,
False,
"minValue",
False,
),
(
"minValueTiming",
"minValueTiming",
timing.Timing,
False,
"minValue",
False,
),
(
"minValueUnsignedInt",
"minValueUnsignedInt",
int,
False,
"minValue",
False,
),
("minValueUri", "minValueUri", str, False, "minValue", False),
("mustSupport", "mustSupport", bool, False, None, False),
("name", "name", str, False, None, False),
("nameReference", "nameReference", str, False, None, False),
("path", "path", str, False, None, True),
(
"patternAddress",
"patternAddress",
address.Address,
False,
"pattern",
False,
),
(
"patternAnnotation",
"patternAnnotation",
annotation.Annotation,
False,
"pattern",
False,
),
(
"patternAttachment",
"patternAttachment",
attachment.Attachment,
False,
"pattern",
False,
),
(
"patternBase64Binary",
"patternBase64Binary",
str,
False,
"pattern",
False,
),
("patternBoolean", "patternBoolean", bool, False, "pattern", False),
("patternCode", "patternCode", str, False, "pattern", False),
(
"patternCodeableConcept",
"patternCodeableConcept",
codeableconcept.CodeableConcept,
False,
"pattern",
False,
),
(
"patternCoding",
"patternCoding",
coding.Coding,
False,
"pattern",
False,
),
(
"patternContactPoint",
"patternContactPoint",
contactpoint.ContactPoint,
False,
"pattern",
False,
),
(
"patternDate",
"patternDate",
fhirdate.FHIRDate,
False,
"pattern",
False,
),
(
"patternDateTime",
"patternDateTime",
fhirdate.FHIRDate,
False,
"pattern",
False,
),
("patternDecimal", "patternDecimal", float, False, "pattern", False),
(
"patternHumanName",
"patternHumanName",
humanname.HumanName,
False,
"pattern",
False,
),
("patternId", "patternId", str, False, "pattern", False),
(
"patternIdentifier",
"patternIdentifier",
identifier.Identifier,
False,
"pattern",
False,
),
(
"patternInstant",
"patternInstant",
fhirdate.FHIRDate,
False,
"pattern",
False,
),
("patternInteger", "patternInteger", int, False, "pattern", False),
("patternMarkdown", "patternMarkdown", str, False, "pattern", False),
("patternMeta", "patternMeta", meta.Meta, False, "pattern", False),
("patternOid", "patternOid", str, False, "pattern", False),
(
"patternPeriod",
"patternPeriod",
period.Period,
False,
"pattern",
False,
),
(
"patternPositiveInt",
"patternPositiveInt",
int,
False,
"pattern",
False,
),
(
"patternQuantity",
"patternQuantity",
quantity.Quantity,
False,
"pattern",
False,
),
("patternRange", "patternRange", range.Range, False, "pattern", False),
("patternRatio", "patternRatio", ratio.Ratio, False, "pattern", False),
(
"patternReference",
"patternReference",
fhirreference.FHIRReference,
False,
"pattern",
False,
),
(
"patternSampledData",
"patternSampledData",
sampleddata.SampledData,
False,
"pattern",
False,
),
(
"patternSignature",
"patternSignature",
signature.Signature,
False,
"pattern",
False,
),
("patternString", "patternString", str, False, "pattern", False),
(
"patternTime",
"patternTime",
fhirdate.FHIRDate,
False,
"pattern",
False,
),
(
"patternTiming",
"patternTiming",
timing.Timing,
False,
"pattern",
False,
),
(
"patternUnsignedInt",
"patternUnsignedInt",
int,
False,
"pattern",
False,
),
("patternUri", "patternUri", str, False, "pattern", False),
("representation", "representation", str, True, None, False),
("requirements", "requirements", str, False, None, False),
("short", "short", str, False, None, False),
("slicing", "slicing", ElementDefinitionSlicing, False, None, False),
("type", "type", ElementDefinitionType, True, None, False),
]
)
return js
class ElementDefinitionBase(element.Element):
""" Base definition information for tools.
Information about the base definition of the element, provided to make it
unncessary for tools to trace the deviation of the element through the
derived and related profiles. This information is only provided where the
element definition represents a constraint on another element definition,
and must be present if there is a base element definition.
"""
resource_name = "ElementDefinitionBase"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.max = None
""" Max cardinality of the base element.
Type `str`. """
self.min = None
""" Min cardinality of the base element.
Type `int`. """
self.path = None
""" Path that identifies the base element.
Type `str`. """
super(ElementDefinitionBase, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBase, self).elementProperties()
js.extend(
[
("max", "max", str, False, None, True),
("min", "min", int, False, None, True),
("path", "path", str, False, None, True),
]
)
return js
class ElementDefinitionBinding(element.Element):
""" ValueSet details if this is coded.
Binds to a value set if this element is coded (code, Coding,
CodeableConcept).
"""
resource_name = "ElementDefinitionBinding"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Human explanation of the value set.
Type `str`. """
self.strength = None
""" required | extensible | preferred | example.
Type `str`. """
self.valueSetReference = None
""" Source of value set.
Type `FHIRReference` referencing `ValueSet` (represented as `dict` in JSON). """
self.valueSetUri = None
""" Source of value set.
Type `str`. """
super(ElementDefinitionBinding, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionBinding, self).elementProperties()
js.extend(
[
("description", "description", str, False, None, False),
("strength", "strength", str, False, None, True),
(
"valueSetReference",
"valueSetReference",
fhirreference.FHIRReference,
False,
"valueSet",
False,
),
("valueSetUri", "valueSetUri", str, False, "valueSet", False),
]
)
return js
class ElementDefinitionConstraint(element.Element):
""" Condition that must evaluate to true.
Formal constraints such as co-occurrence and other constraints that can be
computationally evaluated within the context of the instance.
"""
resource_name = "ElementDefinitionConstraint"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.human = None
""" Human description of constraint.
Type `str`. """
self.key = None
""" Target of 'condition' reference above.
Type `str`. """
self.requirements = None
""" Why this constraint necessary or appropriate.
Type `str`. """
self.severity = None
""" error | warning.
Type `str`. """
self.xpath = None
""" XPath expression of constraint.
Type `str`. """
super(ElementDefinitionConstraint, self).__init__(
jsondict=jsondict, strict=strict
)
def elementProperties(self):
js = super(ElementDefinitionConstraint, self).elementProperties()
js.extend(
[
("human", "human", str, False, None, True),
("key", "key", str, False, None, True),
("requirements", "requirements", str, False, None, False),
("severity", "severity", str, False, None, True),
("xpath", "xpath", str, False, None, True),
]
)
return js
class ElementDefinitionMapping(element.Element):
""" Map element to another set of definitions.
Identifies a concept from an external specification that roughly
corresponds to this element.
"""
resource_name = "ElementDefinitionMapping"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.identity = None
""" Reference to mapping declaration.
Type `str`. """
self.language = None
""" Computable language of mapping.
Type `str`. """
self.map = None
""" Details of the mapping.
Type `str`. """
super(ElementDefinitionMapping, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionMapping, self).elementProperties()
js.extend(
[
("identity", "identity", str, False, None, True),
("language", "language", str, False, None, False),
("map", "map", str, False, None, True),
]
)
return js
class ElementDefinitionSlicing(element.Element):
""" This element is sliced - slices follow.
Indicates that the element is sliced into a set of alternative definitions
(i.e. in a structure definition, there are multiple different constraints
on a single element in the base resource). Slicing can be used in any
resource that has cardinality ..* on the base resource, or any resource
with a choice of types. The set of slices is any elements that come after
this in the element sequence that have the same path, until a shorter path
occurs (the shorter path terminates the set).
"""
resource_name = "ElementDefinitionSlicing"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.description = None
""" Text description of how slicing works (or not).
Type `str`. """
self.discriminator = None
""" Element values that used to distinguish the slices.
List of `str` items. """
self.ordered = None
""" If elements must be in same order as slices.
Type `bool`. """
self.rules = None
""" closed | open | openAtEnd.
Type `str`. """
super(ElementDefinitionSlicing, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionSlicing, self).elementProperties()
js.extend(
[
("description", "description", str, False, None, False),
("discriminator", "discriminator", str, True, None, False),
("ordered", "ordered", bool, False, None, False),
("rules", "rules", str, False, None, True),
]
)
return js
class ElementDefinitionType(element.Element):
""" Data type and Profile for this element.
The data type or resource that the value of this element is permitted to
be.
"""
resource_name = "ElementDefinitionType"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.aggregation = None
""" contained | referenced | bundled - how aggregated.
List of `str` items. """
self.code = None
""" Name of Data type or Resource.
Type `str`. """
self.profile = None
""" Profile (StructureDefinition) to apply (or IG).
List of `str` items. """
super(ElementDefinitionType, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ElementDefinitionType, self).elementProperties()
js.extend(
[
("aggregation", "aggregation", str, True, None, False),
("code", "code", str, False, None, True),
("profile", "profile", str, True, None, False),
]
)
return js
| 2.25 | 2 |
pycs/spells/beacon_of_hope.py | dwagon/pycs | 0 | 12765614 | <filename>pycs/spells/beacon_of_hope.py
"""https://www.dndbeyond.com/spells/beacon-of-hope"""
from pycs.constant import SpellType
from pycs.spell import SpellAction
##############################################################################
##############################################################################
##############################################################################
class BeaconOfHope(SpellAction):
"""This spell bestows hope and vitality. Choose any number of
creatures within range. For the duration, each target has advantage
on Wisdom saving throws and death saving throws, and regains the
maximum number of hit points possible from any healing."""
##########################################################################
def __init__(self, **kwargs):
name = "Beacon of Hope"
kwargs.update(
{
"reach": 30,
"level": 3,
"type": SpellType.BUFF,
"concentration": SpellType.CONCENTRATION,
}
)
super().__init__(name, **kwargs)
##########################################################################
def heuristic(self):
"""Should we do the spell
the more people it can effect the more we should do it"""
if not self.owner.spell_available(self):
return 0
return 0
##########################################################################
def pick_target(self):
"""Who should we do the spell to"""
return self.owner
##########################################################################
def cast(self):
"""Do the spell"""
return True
##########################################################################
def end_concentration(self):
"""What happens when we stop concentrating"""
# EOF
| 2.53125 | 3 |
pyvisdk/mo/host_system.py | Infinidat/pyvisdk | 0 | 12765615 |
from pyvisdk.base.managed_object_types import ManagedObjectTypes
from pyvisdk.mo.managed_entity import ManagedEntity
import logging
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
class HostSystem(ManagedEntity):
'''The HostSystem managed object type provides access to a virtualization host
platform.Invoking destroy on a HostSystem of standalone type throws a
NotSupported fault. A standalone HostSystem can be destroyed only by invoking
destroy on its parent ComputeResource. Invoking destroy on a failover host
throws a DisallowedOperationOnFailoverHost fault. See
ClusterFailoverHostAdmissionControlPolicy.'''
def __init__(self, core, name=None, ref=None, type=ManagedObjectTypes.HostSystem):
super(HostSystem, self).__init__(core, name=name, ref=ref, type=type)
@property
def capability(self):
'''Host capabilities. This might not be available for a disconnected host.'''
return self.update('capability')
@property
def config(self):
'''Host configuration information. This might not be available for a disconnected
host.'''
return self.update('config')
@property
def configManager(self):
'''Host configuration systems.'''
return self.update('configManager')
@property
def datastore(self):
'''A collection of references to the subset of datastore objects in the datacenter
that are available in this HostSystem.'''
return self.update('datastore')
@property
def datastoreBrowser(self):
'''DatastoreBrowser to browse datastores for this host.'''
return self.update('datastoreBrowser')
@property
def hardware(self):
'''Hardware configuration of the host. This might not be available for a
disconnected host.'''
return self.update('hardware')
@property
def licensableResource(self):
'''Information about all licensable resources, currently present on this host.'''
return self.update('licensableResource')
@property
def network(self):
'''A collection of references to the subset of network objects in the datacenter
that are available in this HostSystem.'''
return self.update('network')
@property
def runtime(self):
'''Runtime state information about the host such as connection state.'''
return self.update('runtime')
@property
def summary(self):
'''Basic information about the host, including connection state.'''
return self.update('summary')
@property
def systemResources(self):
'''Reference for the system resource hierarchy, used for configuring the set of
resources reserved to the system and unavailable to virtual machines.'''
return self.update('systemResources')
@property
def vm(self):
'''List of virtual machines associated with this host.'''
return self.update('vm')
def AcquireCimServicesTicket(self):
'''Creates and returns a one-time credential used to establish a remote connection
to a CIM interface. The port to connect to is the standard well known port for
the service.
'''
return self.delegate("AcquireCimServicesTicket")()
def DisconnectHost_Task(self):
'''Disconnects from a host and instructs the server to stop sending heartbeats.
'''
return self.delegate("DisconnectHost_Task")()
def EnterLockdownMode(self):
'''Modifies the permissions on the host, so that it will only be accessible
through local console or an authorized centralized management application. Any
user defined permissions found on the host are lost.Modifies the permissions on
the host, so that it will only be accessible through local console or an
authorized centralized management application. Any user defined permissions
found on the host are lost.Modifies the permissions on the host, so that it
will only be accessible through local console or an authorized centralized
management application. Any user defined permissions found on the host are
lost.
'''
return self.delegate("EnterLockdownMode")()
def EnterMaintenanceMode_Task(self, timeout, evacuatePoweredOffVms=None):
'''Puts the host in maintenance mode. While this task is running and when the host
is in maintenance mode, no virtual machines can be powered on and no
provisioning operations can be performed on the host. Once the call completes,
it is safe to turn off a host without disrupting any virtual machines.Puts the
host in maintenance mode. While this task is running and when the host is in
maintenance mode, no virtual machines can be powered on and no provisioning
operations can be performed on the host. Once the call completes, it is safe to
turn off a host without disrupting any virtual machines.Puts the host in
maintenance mode. While this task is running and when the host is in
maintenance mode, no virtual machines can be powered on and no provisioning
operations can be performed on the host. Once the call completes, it is safe to
turn off a host without disrupting any virtual machines.
:param timeout: The task completes when the host successfully enters maintenance mode or the timeout expires, and in the latter case the task contains a Timeout fault. If the timeout is less than or equal to zero, there is no timeout. The timeout is specified in seconds.
:param evacuatePoweredOffVms: This is a parameter only supported by VirtualCenter. If set to true, for a DRS disabled cluster, the task will not succeed unless all powered-off virtual machines have been manually reregistered; for a DRS enabled cluster, VirtualCenter will automatically reregister powered-off virtual machines and a powered-off virtual machine may remain at the host only for two reasons: (a) no compatible host found for reregistration, (b) DRS is disabled for the virtual machine. If set to false, powered-off virtual machines do not need to be moved.VI API 2.5
'''
return self.delegate("EnterMaintenanceMode_Task")(timeout, evacuatePoweredOffVms)
def ExitLockdownMode(self):
'''Restores Administrator permission for the local administrative account for the
host that was removed by prior call to EnterLockdownMode. If the operation is
successful, adminDisabled will be set to false. This API is not supported on
the host. If invoked directly on a host, a NotSupported fault will be
thrown.See AuthorizationManager
'''
return self.delegate("ExitLockdownMode")()
def ExitMaintenanceMode_Task(self, timeout):
'''Takes the host out of maintenance mode. This blocks if any concurrent running
maintenance-only host configurations operations are being performed. For
example, if VMFS volumes are being upgraded.Takes the host out of maintenance
mode. This blocks if any concurrent running maintenance-only host
configurations operations are being performed. For example, if VMFS volumes are
being upgraded.
:param timeout: Number of seconds to wait for the exit maintenance mode to succeed. If the timeout is less than or equal to zero, there is no timeout.
'''
return self.delegate("ExitMaintenanceMode_Task")(timeout)
def PowerDownHostToStandBy_Task(self, timeoutSec, evacuatePoweredOffVms=None):
'''Puts the host in standby mode, a mode in which the host is in a standby state
from which it can be powered up remotely. While this task is running, no
virtual machines can be powered on and no provisioning operations can be
performed on the host.Puts the host in standby mode, a mode in which the host
is in a standby state from which it can be powered up remotely. While this task
is running, no virtual machines can be powered on and no provisioning
operations can be performed on the host.Puts the host in standby mode, a mode
in which the host is in a standby state from which it can be powered up
remotely. While this task is running, no virtual machines can be powered on and
no provisioning operations can be performed on the host.Puts the host in
standby mode, a mode in which the host is in a standby state from which it can
be powered up remotely. While this task is running, no virtual machines can be
powered on and no provisioning operations can be performed on the host.
:param timeoutSec: The task completes when the host successfully enters standby mode and stops sending heartbeat signals. If heartbeats are still coming after timeoutSecs seconds, the host is declared timedout, and the task is assumed failed.
:param evacuatePoweredOffVms: This is a parameter used only by VirtualCenter. If set to true, for a DRS disabled cluster, the task will not succeed unless all powered-off virtual machines have been manually reregistered; for a DRS enabled cluster, VirtualCenter will automatically reregister powered-off virtual machines and a powered-off virtual machine may remain at the host only for two reasons: (a) no compatible host found for reregistration, (b) DRS is disabled for the virtual machine.
'''
return self.delegate("PowerDownHostToStandBy_Task")(timeoutSec, evacuatePoweredOffVms)
def PowerUpHostFromStandBy_Task(self, timeoutSec):
'''Takes the host out of standby mode. If the command is successful, the host
wakes up and starts sending heartbeats. This method may be called automatically
by a dynamic recommendation generation module to add capacity to a cluster, if
the host is not in maintenance mode.Takes the host out of standby mode. If the
command is successful, the host wakes up and starts sending heartbeats. This
method may be called automatically by a dynamic recommendation generation
module to add capacity to a cluster, if the host is not in maintenance
mode.Takes the host out of standby mode. If the command is successful, the host
wakes up and starts sending heartbeats. This method may be called automatically
by a dynamic recommendation generation module to add capacity to a cluster, if
the host is not in maintenance mode.
:param timeoutSec: The task completes when the host successfully exits standby state and sends a heartbeat signal. If nothing is received from the host for timeoutSec seconds, the host is declared timedout, and the task is assumed failed.
'''
return self.delegate("PowerUpHostFromStandBy_Task")(timeoutSec)
def QueryHostConnectionInfo(self):
'''Connection-oriented information about a host.
'''
return self.delegate("QueryHostConnectionInfo")()
def QueryMemoryOverhead(self, memorySize, numVcpus, videoRamSize=None):
'''<b>Deprecated.</b> <i>As of VI API 2.5, use QueryMemoryOverheadEx.</i>
Determines the amount of memory overhead necessary to power on a virtual
machine with the specified characteristics.
:param memorySize: The amount of virtual system RAM, in bytes. For an existing virtual machine, this value can be found (in megabytes) as the memoryMB property of the VirtualHardware.
:param videoRamSize: The amount of virtual video RAM, in bytes. For an existing virtual machine on a host that supports advertising this property, this value can be found (in kilobytes) as the videoRamSizeInKB property of the VirtualMachineVideoCard. If this parameter is left unset, the default video RAM size for virtual machines on this host is assumed.
:param numVcpus: The number of virtual CPUs. For an existing virtual machine, this value can be found as the numCPU property of the VirtualHardware.
'''
return self.delegate("QueryMemoryOverhead")(memorySize, videoRamSize, numVcpus)
def QueryMemoryOverheadEx(self, vmConfigInfo):
'''Determines the amount of memory overhead necessary to power on a virtual
machine with the specified characteristics.
:param vmConfigInfo: The configuration of the virtual machine.
'''
return self.delegate("QueryMemoryOverheadEx")(vmConfigInfo)
def RebootHost_Task(self, force):
'''Reboots a host. If the command is successful, then the host has been rebooted.
If connected directly to the host, the client never receives an indicator of
success in the returned task but simply loses connection to the host, upon
success.Reboots a host. If the command is successful, then the host has been
rebooted. If connected directly to the host, the client never receives an
indicator of success in the returned task but simply loses connection to the
host, upon success.
:param force: Flag to specify whether or not the host should be rebooted regardless of whether it is in maintenance mode. If true, the host is rebooted, even if there are virtual machines running or other operations in progress.
'''
return self.delegate("RebootHost_Task")(force)
def ReconfigureHostForDAS_Task(self):
'''Reconfigures the host for vSphere HA.Reconfigures the host for vSphere HA.
'''
return self.delegate("ReconfigureHostForDAS_Task")()
def ReconnectHost_Task(self, cnxSpec=None, reconnectSpec=None):
'''Reconnects to a host. This process reinstalls agents and reconfigures the host,
if it has gotten out of date with VirtualCenter. The reconnection process goes
through many of the same steps as addHost: ensuring the correct set of licenses
for the number of CPUs on the host, ensuring the correct set of agents is
installed, and ensuring that networks and datastores are discovered and
registered with VirtualCenter.Reconnects to a host. This process reinstalls
agents and reconfigures the host, if it has gotten out of date with
VirtualCenter. The reconnection process goes through many of the same steps as
addHost: ensuring the correct set of licenses for the number of CPUs on the
host, ensuring the correct set of agents is installed, and ensuring that
networks and datastores are discovered and registered with
VirtualCenter.Reconnects to a host. This process reinstalls agents and
reconfigures the host, if it has gotten out of date with VirtualCenter. The
reconnection process goes through many of the same steps as addHost: ensuring
the correct set of licenses for the number of CPUs on the host, ensuring the
correct set of agents is installed, and ensuring that networks and datastores
are discovered and registered with VirtualCenter.Reconnects to a host. This
process reinstalls agents and reconfigures the host, if it has gotten out of
date with VirtualCenter. The reconnection process goes through many of the same
steps as addHost: ensuring the correct set of licenses for the number of CPUs
on the host, ensuring the correct set of agents is installed, and ensuring that
networks and datastores are discovered and registered with
VirtualCenter.Reconnects to a host. This process reinstalls agents and
reconfigures the host, if it has gotten out of date with VirtualCenter. The
reconnection process goes through many of the same steps as addHost: ensuring
the correct set of licenses for the number of CPUs on the host, ensuring the
correct set of agents is installed, and ensuring that networks and datastores
are discovered and registered with VirtualCenter.
:param cnxSpec: Includes the parameters to use, including user name and password, when reconnecting to the host. If this parameter is not specified, the default connection parameters is used.
:param reconnectSpec: Includes connection parameters specific to reconnect. This will mainly be used to indicate how to handle divergence between the host settings and vCenter Server settings when the host was disconnected.vSphere API 5.0
'''
return self.delegate("ReconnectHost_Task")(cnxSpec, reconnectSpec)
def RetrieveHardwareUptime(self):
'''Return the hardware uptime of the host in seconds. The harware uptime of a host
is not affected by NTP and changes to its wall clock time and can be used by
clients to provide a common time reference for all hosts.
'''
return self.delegate("RetrieveHardwareUptime")()
def ShutdownHost_Task(self, force):
'''Shuts down a host. If the command is successful, then the host has been shut
down. Thus, the client never receives an indicator of success in the returned
task if connected directly to the host.Shuts down a host. If the command is
successful, then the host has been shut down. Thus, the client never receives
an indicator of success in the returned task if connected directly to the host.
:param force: Flag to specify whether or not the host should be shut down regardless of whether it is in maintenance mode. If true, the host is shut down, even if there are virtual machines running or other operations in progress.
'''
return self.delegate("ShutdownHost_Task")(force)
def UpdateFlags(self, flagInfo):
'''Update flags that are part of the HostFlagInfo object.
:param flagInfo:
'''
return self.delegate("UpdateFlags")(flagInfo)
def UpdateIpmi(self, ipmiInfo):
'''Update fields that are part of the HostIpmiInfo object.
:param ipmiInfo:
'''
return self.delegate("UpdateIpmi")(ipmiInfo)
def UpdateSystemResources(self, resourceInfo):
'''Update the configuration of the system resource hierarchy.
:param resourceInfo:
'''
return self.delegate("UpdateSystemResources")(resourceInfo)
def RetrieveManagedMethodExecuter(self):
return self.delegate("RetrieveManagedMethodExecuter")()
def RetrieveDynamicTypeManager(self):
return self.delegate("RetrieveDynamicTypeManager")()
| 2.015625 | 2 |
HomeComponents/AutoCheckBox.py | kristian5336/osr2mp4-app | 1 | 12765616 | <reponame>kristian5336/osr2mp4-app
import os
from PyQt5 import QtCore
from PyQt5.QtWidgets import QCheckBox
from abspath import abspath
from helper.helper import changesize
class AutoCheckBox(QCheckBox):
def __init__(self, parent):
super().__init__(parent)
self.main_window = parent
self.img_uncheck = os.path.join(abspath, "res/Uncheck_HD.png")
self.img_check = os.path.join(abspath, "res/Check_HD.png")
self.box_width = 20
self.box_height = 20
self.default_fontsize = 14
self.default_x = 520
self.default_y = 145
self.text = " " + "Use Auto replay"
self.setText(self.text)
self.curfont = self.font()
self.default_width = self.box_width * 1.1 + self.textwidth()
self.default_height = self.box_height * 1.1
self.setCheckState(QtCore.Qt.Unchecked)
super().stateChanged.connect(self.stateChanged)
def textwidth(self):
return self.fontMetrics().boundingRect(self.text).width() * 2
def textheight(self):
return self.fontMetrics().boundingRect(self.text).height() * 2
def changesize(self):
changesize(self)
scale = self.main_window.height() / self.main_window.default_height
self.setStyleSheet("""
QCheckBox {
font-weight: bold;
color: white;
background-color: transparent;
}
QCheckBox::indicator {
width: %fpx;
height: %fpx;
}
QCheckBox::indicator:unchecked {
border-image: url(%s);
}
QCheckBox::indicator:checked {
border-image: url(%s);
}
""" % (self.box_width * scale, self.box_height * scale, self.img_uncheck, self.img_check))
self.curfont.setPointSize(self.default_fontsize * scale)
self.setFont(self.curfont)
# super().setFixedWidth(p_int)
@QtCore.pyqtSlot(int)
def stateChanged(self, p_int):
self.main_window.toggle_auto(p_int == 2)
| 2.46875 | 2 |
model/Consulta.py | RRFreitas/Projeto_APS | 0 | 12765617 | from util.Color import *
class Consulta():
def __init__(self, preco, data, paciente_id, medico_id, realizada, paga, id=None):
self.id=id
self.preco=preco
self.data=data
self.paciente_id=paciente_id
self.medico_id=medico_id
self.realizada=realizada
self.paga=paga
def infoConsulta(self, sistema):
from database.MedicoDAO import MedicoDAO
from database.PacienteDAO import PacienteDAO
print(CYAN + "CONSULTA" + RESET)
print("Médico: " + MedicoDAO().getByID(self.medico_id).nome)
print("Paciente: " + PacienteDAO().getByID(self.paciente_id).nome) | 2.796875 | 3 |
services/storage/src/simcore_service_storage/utils.py | KZzizzle/osparc-simcore | 0 | 12765618 | <reponame>KZzizzle/osparc-simcore
import logging
import tenacity
from aiohttp import ClientSession
from yarl import URL
logger = logging.getLogger(__name__)
RETRY_WAIT_SECS = 2
RETRY_COUNT = 20
CONNECT_TIMEOUT_SECS = 30
@tenacity.retry(
wait=tenacity.wait_fixed(RETRY_WAIT_SECS),
stop=tenacity.stop_after_attempt(RETRY_COUNT),
before_sleep=tenacity.before_sleep_log(logger, logging.INFO),
)
async def assert_enpoint_is_ok(
session: ClientSession, url: URL, expected_response: int = 200
):
""" Tenace check to GET given url endpoint
Typically used to check connectivity to a given service
In sync code use as
loop.run_until_complete( check_endpoint(url) )
:param url: endpoint service URL
:type url: URL
:param expected_response: expected http status, defaults to 200 (OK)
:param expected_response: int, optional
"""
async with session.get(url) as resp:
if resp.status != expected_response:
raise AssertionError(f"{resp.status} != {expected_response}")
def is_url(location):
return bool(URL(str(location)).host)
def expo(base=1.2, factor=0.1, max_value=2):
"""Generator for exponential decay.
Args:
base: The mathematical base of the exponentiation operation
factor: Factor to multiply the exponentation by.
max_value: The maximum value until it will yield
"""
n = 0
while True:
a = factor * base ** n
if max_value is None or a < max_value:
yield a
n += 1
else:
yield max_value | 2.890625 | 3 |
ZergParser.py | loginn/sc2-build-classifier | 0 | 12765619 | <filename>ZergParser.py
# This file contains parsing functions for Zerg build orders
from RaceParser import RaceParser
class ZergParser(RaceParser):
def __init__(self, player, max_tuples=50):
super().__init__(player, max_tuples)
self.name = "zerg parser"
| 2.328125 | 2 |
script_generator.py | openharmony-gitee-mirror/update_packaging_tools | 0 | 12765620 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Description : Create script file for updater
"""
import os
import re
import tempfile
from decimal import getcontext
from decimal import Decimal
from log_exception import VendorExpandError
from log_exception import UPDATE_LOGGER
from utils import OPTIONS_MANAGER
from utils import IMAGE_FILE_MOUNT_TO_PARTITION_DICT
from utils import PARTITION_FILE
from utils import TWO_STEP
from utils import TOTAL_SCRIPT_FILE_NAME
from utils import SCRIPT_FILE_NAME
from utils import SCRIPT_KEY_LIST
class Script:
def __init__(self):
self.script = []
self.version = 0
self.info = {}
def add_command(self, cmd=None):
"""
Add command content to the script.
:param cmd: command content
:return:
"""
self.script.append(cmd)
def get_script(self):
"""
Get the script list.
:return: script list
"""
return self.script
def sha_check(self, *args, **kwargs):
raise VendorExpandError(type(self), 'sha_check')
def first_block_check(self, *args, **kwargs):
raise VendorExpandError(type(self), 'first_block_check')
def abort(self, *args, **kwargs):
raise VendorExpandError(type(self), 'abort')
def show_progress(self, *args, **kwargs):
raise VendorExpandError(type(self), 'show_progress')
def block_update(self, *args, **kwargs):
raise VendorExpandError(type(self), 'block_update')
def sparse_image_write(self, *args, **kwargs):
raise VendorExpandError(type(self), 'sparse_image_write')
def raw_image_write(self, *args, **kwargs):
raise VendorExpandError(type(self), 'raw_image_write')
def get_status(self, *args, **kwargs):
raise VendorExpandError(type(self), 'get_status')
def set_status(self, *args, **kwargs):
raise VendorExpandError(type(self), 'set_status')
def reboot_now(self, *args, **kwargs):
raise VendorExpandError(type(self), 'reboot_now')
def updater_partitions(self, *args, **kwargs):
raise VendorExpandError(type(self), 'updater_partitions')
class PreludeScript(Script):
def __init__(self):
super().__init__()
class VerseScript(Script):
def __init__(self):
super().__init__()
def sha_check(self, ranges_str, expected_sha, partition):
"""
Get the sha_check command.
:param ranges_str: ranges string
:param expected_sha: hash value
:param partition: image name
:return:
"""
cmd = ('sha_check("/{partition}", "{ranges_str}", '
'"{expected_sha}")').format(
ranges_str=ranges_str,
expected_sha=expected_sha, partition=partition)
return cmd
def first_block_check(self, partition):
"""
Get the first_block_check command.
:param partition: image name
:return:
"""
cmd = 'first_block_check("/{partition}")'.format(
partition=partition)
return cmd
def abort(self, partition):
"""
Get the abort command.
:param partition: image name
:return:
"""
cmd = 'abort("ERROR: {partition} partition ' \
'fails to incremental check!");\n'.format(
partition=partition)
return cmd
def show_progress(self, start_progress, dur):
"""
Get the show_progress command.
'dur' may be zero to advance the progress via SetProgress
:param start_progress: start progress
:param dur: seconds
:return:
"""
cmd = 'show_progress({start_progress}, {dur});\n'.format(
start_progress=start_progress, dur=dur)
return cmd
def block_update(self, partition):
"""
Get the block_update command.
:param partition: image name
:return:
"""
cmd = 'block_update("/{partition}", ' \
'"{partition}.transfer.list", "{partition}.new.dat", ' \
'"{partition}.patch.dat");\n'.format(partition=partition)
return cmd
def sparse_image_write(self, partition):
"""
Get the sparse_image_write command.
:param partition: image name
:return:
"""
cmd = 'sparse_image_write("/%s");\n' % partition
return cmd
def raw_image_write(self, partition, image_file_name):
"""
Get the raw_image_write command.
:param partition: image name
:param image_file_name: image file name
:return:
"""
if partition in IMAGE_FILE_MOUNT_TO_PARTITION_DICT.keys():
partition = IMAGE_FILE_MOUNT_TO_PARTITION_DICT.get(partition)
cmd = 'raw_image_write("/%s", "/%s");\n' % (partition, image_file_name)
return cmd
def get_status(self):
"""
Get the get_status command.
:return:
"""
cmd = 'get_status("/misc")'
return cmd
def set_status(self, status_value):
"""
Get the set_status command.
:param status_value: status value to be set
:return:
"""
cmd = 'set_status("/misc", %s);\n' % status_value
return cmd
def reboot_now(self):
"""
Get the reboot_now command.
:return:
"""
cmd = 'reboot_now();\n'
return cmd
def updater_partitions(self):
"""
Get the updater_partitions command.
:return:
"""
cmd = 'update_partitions("/%s");\n' % PARTITION_FILE
return cmd
class RefrainScript(Script):
def __init__(self):
super().__init__()
class EndingScript(Script):
def __init__(self):
super().__init__()
def write_script(script_content, opera_name):
"""
Generate the {opera}script.
:param script_content: script content
:param opera_name: Opera phase names corresponding to the script content
'prelude', 'verse', 'refrain', and 'ending'.
:return:
"""
script_file = tempfile.NamedTemporaryFile(mode='w+')
script_file.write(script_content)
script_file.seek(0)
script_file_name = ''.join([opera_name.title(), SCRIPT_FILE_NAME])
OPTIONS_MANAGER.opera_script_file_name_dict[opera_name].\
append((script_file_name, script_file))
UPDATE_LOGGER.print_log("%s generation complete!" % script_file_name)
def generate_total_script():
"""
Generate the overall script.
"""
content_list = []
for each_key, each_value in \
OPTIONS_MANAGER.opera_script_file_name_dict.items():
for each in each_value:
each_content = "LoadScript(\"%s\", %s);" % \
(each[0], SCRIPT_KEY_LIST.index(each_key))
content_list.append(each_content)
script_total = tempfile.NamedTemporaryFile(mode='w+')
script_total.write('\n'.join(content_list))
script_total.seek(0)
OPTIONS_MANAGER.total_script_file_obj = script_total
UPDATE_LOGGER.print_log("%s generation complete!" % TOTAL_SCRIPT_FILE_NAME)
def get_progress_value(distributable_value=100):
"""
Allocate a progress value to each image update.
:param distributable_value: distributable value
:return:
"""
progress_value_dict = {}
full_img_list = OPTIONS_MANAGER.full_img_list
incremental_img_list = OPTIONS_MANAGER.incremental_img_list
file_size_list = []
if len(full_img_list) == 0 and len(incremental_img_list) == 0:
UPDATE_LOGGER.print_log(
"get progress value failed! > getting progress value failed!",
UPDATE_LOGGER.ERROR_LOG)
return False
for idx, _ in enumerate(incremental_img_list):
# Obtain the size of the incremental image file.
if OPTIONS_MANAGER.two_step and incremental_img_list[idx] == TWO_STEP:
# Updater images are not involved in progress calculation.
incremental_img_list.remove(TWO_STEP)
continue
file_obj = OPTIONS_MANAGER.incremental_image_file_obj_list[idx]
each_img_size = os.path.getsize(file_obj.name)
file_size_list.append(each_img_size)
for idx, _ in enumerate(full_img_list):
# Obtain the size of the full image file.
if OPTIONS_MANAGER.two_step and full_img_list[idx] == TWO_STEP:
# Updater images are not involved in progress calculation.
continue
file_obj = OPTIONS_MANAGER.full_image_file_obj_list[idx]
each_img_size = os.path.getsize(file_obj.name)
file_size_list.append(each_img_size)
if OPTIONS_MANAGER.two_step and TWO_STEP in full_img_list:
full_img_list.remove(TWO_STEP)
proportion_value_list = get_proportion_value_list(
file_size_list, distributable_value=distributable_value)
adjusted_proportion_value_list = adjust_proportion_value_list(
proportion_value_list, distributable_value)
all_img_list = incremental_img_list + full_img_list
current_progress = 40
for idx, each_img in enumerate(all_img_list):
temp_progress = current_progress + adjusted_proportion_value_list[idx]
progress_value_dict[each_img] = (current_progress, temp_progress)
current_progress = temp_progress
return progress_value_dict
def get_proportion_value_list(file_size_list, distributable_value=100):
"""
Obtain the calculated progress proportion value list
(proportion_value_list).
:param file_size_list: file size list
:param distributable_value: distributable value
:return proportion_value_list: progress proportion value list
"""
sum_size = sum(file_size_list)
getcontext().prec = 2
proportion_value_list = []
for each_size_value in file_size_list:
proportion = Decimal(str(float(each_size_value))) / Decimal(
str(float(sum_size)))
proportion_value = int(
Decimal(str(proportion)) *
Decimal(str(float(distributable_value))))
if proportion_value == 0:
proportion_value = 1
proportion_value_list.append(proportion_value)
return proportion_value_list
def adjust_proportion_value_list(proportion_value_list, distributable_value):
"""
Adjust the calculated progress proportion value list to ensure that
sum is equal to distributable_value.
:param proportion_value_list: calculated progress proportion value list
:param distributable_value: number of distributable progress values
:return proportion_value_list: new progress proportion value list
"""
if len(proportion_value_list) == 0:
return []
sum_proportion_value = sum(proportion_value_list)
if sum_proportion_value > distributable_value:
max_value = max(proportion_value_list)
max_idx = proportion_value_list.index(max_value)
proportion_value_list[max_idx] = \
max_value - (sum_proportion_value - distributable_value)
elif sum_proportion_value < distributable_value:
min_value = min(proportion_value_list)
min_idx = proportion_value_list.index(min_value)
proportion_value_list[min_idx] = \
min_value + (distributable_value - sum_proportion_value)
return proportion_value_list
def create_script(prelude_script, verse_script,
refrain_script, ending_script):
"""
Generate the script file.
:param prelude_script: prelude script
:param verse_script: verse script
:param refrain_script: refrain script
:param ending_script: ending script
:return:
"""
# Generate the prelude script.
prelude_script.add_command("\n# ---- prelude ----\n")
# Get the distribution progress.
progress_value_dict = get_progress_value()
if progress_value_dict is False:
return False
verse_script_content_list = verse_script.get_script()
updater_content = []
if OPTIONS_MANAGER.two_step:
for idx, each_cmd in enumerate(verse_script_content_list[1:]):
if "/%s" % TWO_STEP in each_cmd:
updater_content.append(each_cmd)
each_cmd = \
'\n'.join(
[' %s' % each for each in each_cmd.split('\n')])
verse_script_content_list[0] = \
verse_script_content_list[0].replace(
"UPDATER_WRITE_FLAG",
"%s\nUPDATER_WRITE_FLAG" % each_cmd)
verse_script_content_list[0] = \
verse_script_content_list[0].replace("UPDATER_WRITE_FLAG", "")
verse_script_content_list[0] = \
verse_script_content_list[0].replace("updater_WRITE_FLAG", "")
for each in updater_content:
verse_script_content_list.remove(each)
verse_script_content = '\n'.join(verse_script_content_list[1:])
else:
verse_script_content = '\n'.join(verse_script_content_list)
for key, value in progress_value_dict.items():
show_progress_content = \
verse_script.show_progress((value[1] - value[0]) / 100, 0)
verse_script_content = \
re.sub(r'%s_WRITE_FLAG' % key, '%s' % show_progress_content,
verse_script_content, count=1)
if OPTIONS_MANAGER.two_step:
verse_script_content = '\n'.join(
[' %s' % each for each in verse_script_content.split('\n')])
verse_script_content = verse_script_content_list[0].replace(
"ALL_WRITE_FLAG", verse_script_content)
# Generate the verse script.
write_script(verse_script_content, 'verse')
# Generate the refrain script.
refrain_script.add_command("\n# ---- refrain ----\n")
# Generate the ending script.
ending_script.add_command("\n# ---- ending ----\n")
generate_total_script()
| 2.109375 | 2 |
tests/integration/test_requirements.py | Darsstar/pipenv | 0 | 12765621 | import pytest
@pytest.mark.requirements
def test_requirements_generates_requirements_from_lockfile(PipenvInstance):
with PipenvInstance(chdir=True) as p:
packages = ('requests', '2.14.0')
dev_packages = ('flask', '0.12.2')
with open(p.pipfile_path, 'w') as f:
contents = f"""
[packages]
{packages[0]}= "=={packages[1]}"
[dev-packages]
{dev_packages[0]}= "=={dev_packages[1]}"
""".strip()
f.write(contents)
p.pipenv('lock')
c = p.pipenv('requirements')
assert c.returncode == 0
assert f'{packages[0]}=={packages[1]}' in c.stdout
assert f'{dev_packages[0]}=={dev_packages[1]}' not in c.stdout
d = p.pipenv('requirements --dev')
assert d.returncode == 0
assert f'{packages[0]}=={packages[1]}' in d.stdout
assert f'{dev_packages[0]}=={dev_packages[1]}' in d.stdout
e = p.pipenv('requirements --dev-only')
assert e.returncode == 0
assert f'{packages[0]}=={packages[1]}' not in e.stdout
assert f'{dev_packages[0]}=={dev_packages[1]}' in e.stdout
e = p.pipenv('requirements --hash')
assert e.returncode == 0
assert f'{packages[0]}=={packages[1]}' in e.stdout
for value in p.lockfile['default'].values():
for hash in value['hashes']:
assert f' --hash={hash}' in e.stdout
@pytest.mark.requirements
def test_requirements_generates_requirements_from_lockfile_multiple_sources(PipenvInstance):
with PipenvInstance(chdir=True) as p:
packages = ('requests', '2.14.0')
dev_packages = ('flask', '0.12.2')
with open(p.pipfile_path, 'w') as f:
contents = f"""
[[source]]
name = "pypi"
url = "https://pypi.org/simple"
verify_ssl = true
[[source]]
name = "other_source"
url = "https://some_other_source.org"
verify_ssl = true
[packages]
{packages[0]}= "=={packages[1]}"
[dev-packages]
{dev_packages[0]}= "=={dev_packages[1]}"
""".strip()
f.write(contents)
l = p.pipenv('lock')
assert l.returncode == 0
c = p.pipenv('requirements')
assert c.returncode == 0
assert '-i https://pypi.org/simple' in c.stdout
assert '--extra-index-url https://some_other_source.org' in c.stdout
| 2.34375 | 2 |
electoral/urls.py | RyanFleck/Django-React-Heroku-Test | 0 | 12765622 | """electoral URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from electoral_backend.views import Authenticate, FrontendAppView, TestDataView, PrivacyPolicy, TermsOfService
urlpatterns = [
path('admin/', admin.site.urls),
path('api/testdata/', TestDataView.as_view()),
path('api/authenticate/', Authenticate.as_view()),
path('privacy-policy/', PrivacyPolicy.as_view()),
path('terms-of-service/', TermsOfService.as_view()),
re_path(r'^', FrontendAppView.as_view()),
]
| 2.421875 | 2 |
PythonAppDevSamples/OrcsV1/orcsV0_0_1.py | HamzaHindRyanSofia/wargameRepo | 0 | 12765623 | '''
Created on 16 ago 2017
@author: Hamza
'''
class MyClass(object):
'''
classdocs
'''
def __init__(self, params):
'''
Constructor
'''
| 2.828125 | 3 |
designate/storage/impl_sqlalchemy/migrate_repo/versions/040_fix_record_data.py | infobloxopen/designate | 0 | 12765624 | <filename>designate/storage/impl_sqlalchemy/migrate_repo/versions/040_fix_record_data.py
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Text
from sqlalchemy.schema import Table, MetaData
meta = MetaData()
# No downgrade possible - MySQL may have performed an implicit conversion from
# text -> mediumtext depending on the particular deployments server-wide
# default charset during migration 21's conversion to utf-8.
def upgrade(migrate_engine):
meta.bind = migrate_engine
if migrate_engine.name == "mysql":
records_table = Table('records', meta, autoload=True)
records_table.c.data.alter(type=Text())
def downgrade(migrate_engine):
pass
| 1.835938 | 2 |
pyCardiac/signal/processing/filtration/fourier_filter.py | humanphysiologylab/pyCardiac | 0 | 12765625 | import numpy as np
from scipy.fftpack import rfft, irfft, rfftfreq
from ....routines import rescale
def fourier_filter(data: np.ndarray, fs: float,
lp_freq: float = None, hp_freq: float = None, bs_freqs: list = [],
trans_width: float = 1, band_width: float = 1) -> np.ndarray:
"""
Fourer filter along last axis of ``data`` with lowpass, highpass and bandstop options.
Parameters
----------
``data`` : np.ndarray
``fs``: float
sampling frequency
``lp_freq``: float, optional
lowpass frequency (default is None)
``hp_freq``: float, optional
highpass frequency (default is None)
``bs_freqs``: list, optional
bandstop frequencies (default is [])
``trans_width``: float, optional
width of the transition region between bands (default is 1)
in frequency units
``band_width``: float, optional
width of the band to remove (default is 1)
in frequency units
Returns
-------
np.ndarray
filtered ``data``
"""
T = data.shape[-1]
d = 1. / fs
freq = rfftfreq(T, d)
f_data = rfft(data, axis=-1)
freq_resp = create_freq_resp(freq, lp_freq,
hp_freq, bs_freqs,
trans_width, band_width)
f_data = np.apply_along_axis(lambda x: x * freq_resp, -1, f_data)
data_filtered = irfft(f_data)
return data_filtered
def create_freq_resp(freq: np.ndarray, lp_freq: float,
hp_freq: float, bs_freqs: list,
trans_width: float, band_width: float) -> np.ndarray:
"""Calculates frequency responce for given ``freq``
Parameters
----------
``freq``: np.ndarray, shape=(N)
frequency array
``lp_freq``: float
lowpass frequency
``hp_freq``: float
highpass frequency
``bs_freqs``: list
bandstop frequencies
``trans_width``: float
width of the transition region between bands
``band_width``: float
width of the band to remove
Returns
--------
np.ndarray, shape=(N)
frequency responce array in range form 0 to 1
"""
freq_resp = np.ones_like(freq)
if lp_freq is not None:
freq_resp *= FR_lowpass(freq, lp_freq, trans_width)
if hp_freq is not None:
freq_resp *= FR_highpass(freq, hp_freq, trans_width)
for bs_freq in bs_freqs:
freq_resp *= FR_bandstop(freq, bs_freq, trans_width, band_width)
return freq_resp
def FR_lowpass(freq: np.ndarray, lp_freq: float,
trans_width: float) -> np.ndarray:
"""Frequency responce for lowpass filter
Parameters
----------
``freq``: np.ndarray
frequency array
``lp_freq``: float
lowpass frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
sigma = trans_width / 6.
return 1 / (1 + np.exp((freq - lp_freq) / sigma))
def FR_highpass(freq: np.ndarray, hp_freq: float,
trans_width: float) -> np.ndarray:
"""Frequency responce for highpass filter
Parameters
----------
``freq``: np.ndarray
frequency array
``hp_freq``: float
highpass frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
sigma = trans_width / 6.
return 1 / (1 + np.exp((hp_freq - freq) / sigma))
def FR_bandstop(freq: np.ndarray, bs_freq: float,
trans_width: float, band_width: float) -> np.ndarray:
"""Frequency responce for bandstop filter
Parameters
----------
``freq``: np.ndarray
frequency array
``bs_freq``: float
bandstop frequency
``trans_width``: float
width of the transition region between bands
Returns
-------
np.ndarray
with values in [0, 1]
"""
left = FR_lowpass(freq, bs_freq - band_width / 2., trans_width)
right = FR_highpass(freq, bs_freq + band_width / 2., trans_width)
return rescale(left + right) | 2.734375 | 3 |
PSICT_extras/WorkerScriptManager.py | JeanOlivier/Labber-PSICT | 2 | 12765626 | import os
import sys
import re
import shutil
import importlib.util
import numpy as np
from datetime import datetime
import time
import pathlib
import logging
import PSICT_UIF._include36._LogLevels as LogLevels
## Worker script breakpoints - DO NOT MODIFY
OPTIONS_DICT_BREAKPOINT = '## OPTIONS DICT BREAKPOINT'
SCRIPT_COPY_BREAKPOINT = '## SCRIPT COPY BREAKPOINT'
##############################################################################
## Grouping formatting styles for worker script values
format_groups = {}
format_groups['GHz .6'] = ['readout_frequency_opt', 'qubit_frequency_opt', 'magnon_frequency_opt', 'pump_frequency_opt']
format_groups['MHz int'] = ['readout_IF_frequency', 'qubit_IF_frequency', 'magnon_IF_frequency', 'pump_IF_frequency']
format_groups['MHz .3'] = ['intentional_detuning', 'optimal_detuning']
format_groups['int'] = ['readout_LO_power', 'qubit_LO_power', 'magnon_LO_power', 'pump_IF_frequency', 'SQPG_truncation_range']
format_groups['.2'] = ['magnon_amplitude_alpha', 'magnon_amplitude_beta', 'magnon_phase_beta', 'n_m']
format_groups['.3'] = ['magnon_amplitude', 'pump_amplitude', 'qubit_amplitude', 'readout_amplitude']
format_groups['.4'] = ['readout_amplitude_opt']
format_groups['e rm0'] = ['N_shots', 'SQPG_sampling_rate', 'MultiPulse_sampling_rate', 'digitizer_sampling_rate', 'N_single_shots', 'N_repetitions', 'N_repetitions_2', 'N_pts']
format_groups['e-3 .6'] = ['current']
format_groups['ns'] = ['SQPG_sequence_duration', 'MultiPulse_sequence_duration', 'readout_plateau_opt', 'qubit_width_pi', 'qubit_plateau_pi', 'demodulation_skip_start', 'demodulation_length', 'qubit_width', 'qubit_plateau', 'magnon_width', 'magnon_plateau', 'tau_s', 'tau', 'tau_delay', 'digitizer_length']
format_groups['us'] = ['wait_time']
format_groups['list GHz rm0'] = ['readout_frequency_list', 'qubit_frequency_list', 'magnon_frequency_list']
format_groups['list MHz int'] = ['qubit_drive_detuning_list', 'intentional_detuning_list']
format_groups['list .3'] = ['readout_amplitude_list', 'qubit_amplitude_list', 'n_m_list', 'magnon_amplitude_alpha_list', 'magnon_real_alpha_list', 'magnon_imag_alpha_list']
format_groups['list ns'] = ['qubit_width_list', 'qubit_plateau_list', 'tau_list']
format_groups['dict ns:.6'] = ['qubit_amplitude_pi_dict', 'qubit_amplitude_pi_2_dict']
format_groups['dict ns:1.2'] = ['lambda_dict']
## Function to convert values to correct formatting style
def get_formatted_rep(key, value):
if isinstance(value, str):
value_rep = '\''+value+'\''
## Single-value formats
elif key in format_groups['GHz .6']:
value_rep = '{:.6f}e9'.format(value*1e-9)
elif key in format_groups['MHz int']:
value_rep = '{:.0f}e6'.format(value*1e-6)
elif key in format_groups['MHz .3']:
value_rep = '{:.3f}e6'.format(value*1e-6)
elif key in format_groups['int']:
value_rep = '{:d}'.format(int(value))
elif key in format_groups['.2']:
value_rep = '{:.2f}'.format(value)
elif key in format_groups['.3']:
value_rep = '{:.3f}'.format(value)
elif key in format_groups['.4']:
value_rep = '{:.4f}'.format(value)
elif key in format_groups['e rm0']:
mantissa, exponent = '{:e}'.format(value).split('e')
value_rep = mantissa.rstrip('0').rstrip('.')+'e'+exponent.lstrip('+')
elif key in format_groups['e-3 .6']:
value_rep = '{:.6f}e-3'.format(value*1e3)
elif key in format_groups['ns']:
value_rep = '{:.0f}e-9'.format(value*1e9)
elif key in format_groups['us']:
value_rep = '{:.0f}e-6'.format(value*1e6)
## List formats
elif key in format_groups['list GHz rm0']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = ''.join(['{:f}'.format(start*1e-9).rstrip('0'), 'e9'])
str_stop = ''.join(['{:f}'.format(stop*1e-9).rstrip('0'), 'e9'])
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
elif key in format_groups['list MHz int']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = ''.join(['{:.0f}'.format(start*1e-6), 'e6'])
str_stop = ''.join(['{:.0f}'.format(stop*1e-6), 'e6'])
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
elif key in format_groups['list .3']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = '{:.3f}'.format(start)
str_stop = '{:.3f}'.format(stop)
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
elif key in format_groups['list ns']:
start = value[0]
stop = value[1]
npts = value[2]
str_start = '{:.0f}e-9'.format(start*1e9)
str_stop = '{:.0f}e-9'.format(stop*1e9)
str_npts = '{:d}'.format(npts)
value_rep = ''.join(['[', str_start, ', ', str_stop, ', ', str_npts, ']'])
## Dict formats
elif key in format_groups['dict ns:.6']:
value_rep = '{'
for inner_key, inner_value in value.items():
key_string = '{:.0f}e-9'.format(inner_key*1e9)
value_string = '{:.6f}'.format(inner_value)
value_rep += key_string+': '+value_string+', '
value_rep += '}'
elif key in format_groups['dict ns:1.2']:
value_rep = '{'
for inner_key, inner_value in value.items():
key_string = '{:.0f}e-9'.format(inner_key*1e9)
value_string = '{:1.3f}'.format(inner_value)
value_rep += key_string+': '+value_string+', '
value_rep += '}'
## new formats go here...
else:
# print(key, 'is not a special class')
value_rep = str(value)
return value_rep
##############################################################################
## Labber Data folder structure
def split_labber_data_dir(original_dir):
head, Data_MMDD_folder = os.path.split(original_dir)
head, MM_folder = os.path.split(head)
head, YYYY_folder = os.path.split(head)
return head, YYYY_folder, MM_folder, Data_MMDD_folder
def update_labber_dates_dir(original_dir, time_obj = datetime.now()):
## Separate path into parts
head, old_year_folder, old_month_folder, old_Data_folder = split_labber_data_dir(original_dir.rstrip('/'))
## Create updated year folder
year_folder = '{:%Y}'.format(time_obj)
## Create updated month folder
month_folder = '{:%m}'.format(time_obj)
## Create updated Data_MMDD folder
Data_folder = 'Data_{:%m%d}'.format(time_obj)
return pathlib.Path(head, year_folder, month_folder, Data_folder)
def increment_filename(fname_in):
'''
Re-implementation of the PSICT-UIF filename incrementation procedure.
'''
## Split the file name into a head and sequential id
fname_split = re.split(r'(\d+$)', fname_in) # split by int searching from back
if len(fname_split) < 2: # could not split properly
raise RuntimeError("Could not identify sequential ID in filename:", fname_in)
fname_head = fname_split[0]
fname_id = fname_split[1]
## Increment the id
new_id = increment_string(fname_id)
## Put path back together
new_fname = "".join([fname_head, new_id])
return new_fname
def increment_string(str_in):
'''
Increment a string, preserving leading zeros.
eg "00567" -> "00568"
'''
return str(int(str_in)+1).zfill(len(str_in))
##############################################################################
## User interaction for hardware changes
def get_user_confirmation(message, MAX_ATTEMPTS = 5):
'''
Wait for a response from the user; use to hold off experiments until hardware changes have been carried out.
'''
n_attempts = 0
positive_response = False
while n_attempts < MAX_ATTEMPTS:
## Print and ask for input
print(message)
user_response = input('Confirm? ({:d}/{:d}) [y/N] '.format(n_attempts+1, MAX_ATTEMPTS))
if user_response == '' or not user_response.lower()[0] == 'y':
print('Response negative; please try again.')
else:
print('Positive response received; continuing...')
positive_response = True
break
## Increment to prevent infinite loops
n_attempts += 1
## Raise error if number of attempts has run out
if not positive_response:
raise RuntimeError('Maximum number of confirmation attempts exceeded; stopping execution.')
##############################################################################
def scan_worker_blocks(worker_file):
'''
Scan the worker file and return the blocks corresponding to its different parts.
'''
## re matches for options dicts
re_match_pulse_sequence = re.compile('pulse_sequence ?= ?')
re_match_PSICT_options = re.compile('worker_PSICT_options ?= ?')
re_match_general_options = re.compile('worker_general_options ?= ?')
re_match_pulse_sequence_options = re.compile('worker_pulse_sequence_options ?= ?')
re_match_script_copy_breakpoint = re.compile(SCRIPT_COPY_BREAKPOINT)
## Prepare empty lists
header_block = []
PSICT_options_block = []
general_options_block = []
pulse_sequence_options_block = []
end_block = []
with open(worker_file, 'r') as worker:
line = worker.readline()
## Read up to 'pulse_sequence = ...'
while not re_match_pulse_sequence.match(line):
header_block.append(line)
line = worker.readline()
## Skip actual 'pulse_sequence = ...' line
line = worker.readline()
## Read up to worker_PSICT_options
while not re_match_PSICT_options.match(line):
line = worker.readline()
## Read up to worker_general_options and add to PSICT_options_block
while not re_match_general_options.match(line):
PSICT_options_block.append(line)
line = worker.readline()
## Read up to worker_pulse_sequence_options and add to general_options_block
while not re_match_pulse_sequence_options.match(line):
general_options_block.append(line)
line = worker.readline()
## Read up to script copy breakpoint and add to pulse_sequence_options_block
while not re_match_script_copy_breakpoint.match(line):
pulse_sequence_options_block.append(line)
line = worker.readline()
## Read the rest of the file and add to end_block
end_block = worker.readlines()
return header_block, PSICT_options_block, general_options_block, pulse_sequence_options_block, end_block
##############################################################################
##############################################################################
class WorkerScriptManager:
def __init__(self, worker_script, PSICT_config):
## Load config (log after logger initialized)
self.set_PSICT_config(PSICT_config)
## Logging
self.init_logging()
## Log config loading for debugging
self.logger.log(LogLevels.VERBOSE, 'Config file loaded from path: {}'.format(self.PSICT_config_path))
## Set up flags
self._iscopied_master = False
## Get file details for master script copying
self._master_wd = os.getcwd()
self._master_inv = sys.argv[0]
self._master_target_dir = None
## Create block placeholders to enable dict setters to function correctly
self.PSICT_options_block = []
self.general_options_block = []
self.pulse_sequence_options_block = []
## Set worker script path
self._worker_path = worker_script
self.refresh_worker()
def set_PSICT_config(self, PSICT_config_path):
self.PSICT_config_path = PSICT_config_path
## Import config file as module
config_spec = importlib.util.spec_from_file_location('', self.PSICT_config_path)
self._PSICT_config = importlib.util.module_from_spec(config_spec)
config_spec.loader.exec_module(self._PSICT_config)
#############################################################################
## Logging
def init_logging(self):
'''
Initialize logging for the WorkerScriptManager.
'''
## Add extra logging levels
logging.addLevelName(LogLevels.ALL, 'ALL')
logging.addLevelName(LogLevels.TRACE, 'TRACE')
logging.addLevelName(LogLevels.VERBOSE, 'VERBOSE')
logging.addLevelName(LogLevels.SPECIAL, 'SPECIAL')
## Init logger
logger_name = 'WSMgr'
self.logger = logging.getLogger(logger_name)
self.logger.setLevel(LogLevels.ALL) # Log all possible events
## Add handlers if there are none already added - code copied from psictUIFInterface module
if len(self.logger.handlers) == 0:
## Console stream handler
if self._PSICT_config.logging_config['console_log_enabled']:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(self._PSICT_config.logging_config['console_log_level'])
console_fmt = logging.Formatter(self._PSICT_config.logging_config['console_fmt'], \
datefmt = self._PSICT_config.logging_config['console_datefmt'])
console_handler.setFormatter(console_fmt)
## Add handler to logger
self.logger.addHandler(console_handler)
## File handler
if self._PSICT_config.logging_config['file_log_enabled']:
log_dir = self._PSICT_config.logging_config['log_dir']
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_file = self._PSICT_config.logging_config['log_file'].format(datetime.now())+'.log'
log_path = os.path.join(log_dir, log_file)
file_handler = logging.FileHandler(log_path)
file_handler.setLevel(self._PSICT_config.logging_config['file_log_level'])
file_fmt = logging.Formatter(self._PSICT_config.logging_config['file_fmt'], \
datefmt = self._PSICT_config.logging_config['file_datefmt'])
file_handler.setFormatter(file_fmt)
## Add handler to logger
self.logger.addHandler(file_handler)
## Add NullHandler if no other handlers are configured
if len(self.logger.handlers) == 0:
self.logger.addHandler(logging.NullHandler())
## Status message
self.logger.debug('Logging initialization complete.')
def log(self, msg, loglevel = 'special', *args, **kwargs):
'''
Log a message to the logger at the specified level.
This method should be used instead of bare `print` functions in scripts at the master level. This method should NOT be used internally within the WorkerScriptManager or related classes.
Log levels can be specified as an integer (the usual way), but can also be string corresponding to the name of the level. Available options are: TRACE, DEBUG, VERBOSE, INFO, SPECIAL, WARNING, ERROR, CRITICAL. Specifying an unsupported string will result in a logged ERROR-level message, but no execution error.
'''
if isinstance(loglevel, str):
## Convert to lowercase
loglevel = loglevel.lower()
## Convert string to appropriate level
if loglevel == 'trace':
lvl = LogLevels.TRACE
elif loglevel == 'debug':
lvl = LogLevels.DEBUG
elif loglevel == 'verbose':
lvl = LogLevels.VERBOSE
elif loglevel == 'info':
lvl = LogLevels.INFO
elif loglevel == 'special':
lvl = LogLevels.SPECIAL
elif loglevel == 'warning':
lvl = LogLevels.WARNING
elif loglevel == 'error':
lvl = LogLevels.ERROR
elif LogLevel == 'critical':
lvl = LogLevels.CRITICAL
else:
self.logger.error('Invalid loglevel string specified in call to log(): {}'.format(loglevel))
return
else: # loglevel is assumed to be numeric
lvl = loglevel
## Log message
self.logger.log(lvl, msg, *args, **kwargs)
#############################################################################
## Working with parameter dicts and text blocks
@property
def PSICT_options(self):
return self._PSICT_options
@PSICT_options.setter
def PSICT_options(self, new_PSICT_options):
## Update stored parameter dict
self._PSICT_options = new_PSICT_options
## Update stored block
self.update_block(self.PSICT_options_block, self._PSICT_options)
@property
def general_options(self):
return self._general_options
@general_options.setter
def general_options(self, new_general_options):
## Update stored parameter dict
self._general_options = new_general_options
## Update stored block
self.update_block(self.general_options_block, self._general_options)
@property
def pulse_sequence_options(self):
return self._pulse_sequence_options
@pulse_sequence_options.setter
def pulse_sequence_options(self, new_pulse_sequence_options):
## Update stored parameter dict
self._pulse_sequence_options = new_pulse_sequence_options
## Update stored block
self.update_block(self.pulse_sequence_options_block, self._pulse_sequence_options, nested_dicts = True)
def refresh_worker(self):
'''
Mount the worker and pull values from it.
'''
self.logger.debug('Refreshing worker...')
self.mount_worker()
self.pull_from_worker()
def mount_worker(self):
'''
(Re)-import/'mount' the worker script.
'''
## Invalidate caches as well, just in case
importlib.invalidate_caches()
## Wait 1 second before mounting the worker - avoids blocking reload of module
time.sleep(1)
## Import worker script as module
worker_spec = importlib.util.spec_from_file_location('', self._worker_path)
self._worker_script = importlib.util.module_from_spec(worker_spec)
worker_spec.loader.exec_module(self._worker_script)
## Status message
self.logger.debug('Worker file mounted as module.')
def pull_from_worker(self):
'''
Pull option values from the worker script.
'''
self.logger.log(LogLevels.TRACE, 'Pulling options dicts from worker...')
## Scan blocks from worker - done first to avoid no-matches when updating options dicts
scanned_blocks = scan_worker_blocks(self._worker_path)
## Allocate worker blocks to specific attributes
self.header_block = scanned_blocks[0]
self.PSICT_options_block = scanned_blocks[1]
self.general_options_block = scanned_blocks[2]
self.pulse_sequence_options_block = scanned_blocks[3]
self.end_block = scanned_blocks[4]
## Import options dicts from worker script
self.PSICT_options = self._worker_script.worker_PSICT_options
self.general_options = self._worker_script.worker_general_options
self.pulse_sequence_options = self._worker_script.worker_pulse_sequence_options
## Status message
self.logger.debug('Pulled options dicts from worker.')
def get_parameters(self):
'''
Convenience method for returning all three options dicts
'''
return self.PSICT_options, self.general_options, self.pulse_sequence_options
def set_parameters(self, new_PSICT_options, new_general_options, new_pulse_sequence_options):
'''
Set stored parameter dicts (and blocks).
'''
self.logger.log(LogLevels.VERBOSE, 'Setting parameters...')
## Update stored dicts and blocks
self.PSICT_options = new_PSICT_options
self.general_options = new_general_options
self.pulse_sequence_options = new_pulse_sequence_options
def update_parameters(self):
'''
Update script based on stored parameters, and then refresh stored parameters from script.
'''
self.logger.log(LogLevels.VERBOSE, 'Cycling parameters through worker...')
## Push to worker
self.update_script(copy = False)
## Refresh worker and pull
self.refresh_worker()
def update_block(self, block, options_dict = {}, nested_dicts = False):
if nested_dicts:
for outer_key, nested_dict in options_dict.items():
## Define top-level match object (pulse sequence name)
re_outer_match = re.compile('\t*[\"\']'+str(outer_key)+'[\'\"]:')
## Iterate over keys in the sub-dict
for inner_key, inner_value in nested_dict.items():
## Define inner match object
re_inner_match = re.compile('\t*[\"\']'+str(inner_key)+'[\"\'] ?: ?')
## Find sub-block by top-level match
outer_key_found = False
inner_key_found = False
for line_index, line in enumerate(block):
if outer_key_found:
## Check for inner match
match_obj = re_inner_match.match(line)
if match_obj:
self.logger.log(LogLevels.TRACE, 'Key {} matches line at index {}'.format(inner_key, line_index))
## Get specific formatting
value_rep = get_formatted_rep(inner_key, inner_value)
## Replace line in block
block[line_index] = ''.join([match_obj.group(), value_rep, ','])
## Stop searching for key
inner_key_found = True
break
else:
## Check for outer match
if re_outer_match.match(line):
outer_key_found = True
self.logger.log(LogLevels.TRACE, 'Outer key {} matches line at index {}'.format(outer_key, line_index))
continue
## End looping over lines
if not inner_key_found:
self.logger.warning('Match not found for key: {}'.format(inner_key))
else:
## Iterate over options_dict keys
for key, value in options_dict.items():
## Generate re match object
re_match = re.compile('\t*[\"\']'+str(key)+'[\"\'] ?: ?')
## Attempt to find a match in the block
key_found = False
for line_index, line in enumerate(block):
match_obj = re_match.match(line)
if match_obj:
self.logger.log(LogLevels.TRACE, 'Key {} matches line at index {}'.format(key, line_index))
## Get specific formatting
value_rep = get_formatted_rep(key, value)
## Replace line in block
block[line_index] = "".join([match_obj.group(), value_rep, ','])
## Stop searching for key
key_found = True
break
## End looping over lines
if not key_found:
self.logger.warning('Match not found for key: {}'.format(key))
return block
#############################################################################
## Writing text blocks to new worker file
def write_block(self, stream, block):
for line in block:
stream.write(line.strip('\n')+'\n')
def write_new_script(self, new_script_path):
with open(new_script_path, 'w') as new_script:
self.write_block(new_script, self.header_block)
new_script.write('pulse_sequence = \''+self._pulse_sequence_name+'\'\n\n')
self.write_block(new_script, self.PSICT_options_block)
self.write_block(new_script, self.general_options_block)
self.write_block(new_script, self.pulse_sequence_options_block)
new_script.write(SCRIPT_COPY_BREAKPOINT+'\n')
self.write_block(new_script, self.end_block)
#############################################################################
## Update the script (ie write and copy)
def set_script_copy_target_dir(self, script_copy_target_dir):
self.target_dir = script_copy_target_dir
def set_master_copy_target_dir(self, master_copy_target_dir):
self._master_target_dir = master_copy_target_dir
def update_script(self, copy = False, target_filename = None, output_path = None):
'''
Docstring
'''
## Status message
self.logger.debug('Updating worker script; copy option is {}'.format(copy))
## Update the original worker script file
self.write_new_script(self._worker_path)
if copy:
## Get a target filename from either the given filename or path
if target_filename is not None:
self.target_file = target_filename
elif output_path is not None:
self.target_file = ''.join([os.path.splitext(os.path.basename(output_path))[0], self._PSICT_config.script_copy_postfix, '.py'])
else:
raise RuntimeError('The target must be specified through either a filename or a path.')
## Generate the full script target path
self.target_path = os.path.join(self.target_dir, self.target_file)
## Create target directory if it does not exist
if not os.path.exists(self.target_dir):
os.makedirs(self.target_dir)
## Copy worker script to target path
shutil.copy(self._worker_path, self.target_path)
# shutil.copy('worker_new.py', self.target_path)
#############################################################################
## Run measurement & do associated admin
def run_measurement(self, pulse_sequence_name):
'''
Docstring.
'''
## Status message
self.logger.info('Running measurement at master: {}'.format(pulse_sequence_name))
## Update pulse sequence name attribute
self._pulse_sequence_name = pulse_sequence_name
## Update parent logger name for worker script
PSICT_options = self.PSICT_options
PSICT_options['parent_logger_name'] = self.logger.name
self.PSICT_options = PSICT_options
## Update parameters: stored -> script -> stored
self.update_parameters()
## Execute measurement function
self.output_path = self._worker_script.run_pulse_sequence(self._pulse_sequence_name, \
self.PSICT_options, self.general_options, \
self.pulse_sequence_options)
## Get output filename and dir
self.output_filename = os.path.splitext(os.path.basename(self.output_path))[0]
self.output_dir = os.path.dirname(os.path.abspath(self.output_path))
# ## Log if required
# if self._logging:
# self._output_logger.add_entry(self.output_filename, self.output_dir, self._pulse_sequence_name)
## Copy master script if required
if not self._iscopied_master:
self.copy_master(self._master_target_dir)
## Update script (with copy)
self.update_script(copy = True, output_path = self.output_path)
## Increment filename in preparation for next measurement
self.PSICT_options['output_file'] = increment_filename(self.output_filename)
## Set parameters
self.set_parameters(self.PSICT_options, self.general_options, self.pulse_sequence_options)
## Update script (incremented filename), with no copy
self.update_parameters()
## Status message
self.logger.info('Running measurement completed at master.')
def update_date(self):
## Update output dir based on today's date
self.PSICT_options['output_dir'] = update_labber_dates_dir(self._PSICT_options['output_dir'])
def copy_master(self, master_dir_target = None):
'''
Copy the master script to the script_copy_target_dir
'''
## Use script_copy_target_dir if no alternative is provided
if master_dir_target is None:
master_dir_target = self.target_dir
## Create target dir if it does not exist
pathlib.Path(master_dir_target).mkdir(parents = True, exist_ok = True)
## Get full path to master file
master_path_original = os.path.join(self._master_wd, self._master_inv)
## Construct filename for target
master_file_target = ''.join([self.output_filename, '_master.py'])
## Construct full path for target
master_path_target = os.path.join(master_dir_target, master_file_target)
## Copy master file
self._master_path_new = shutil.copy(master_path_original, master_path_target)
self.logger.log(LogLevels.SPECIAL, 'Master script copied to: {:s}'.format(self._master_path_new))
## Set flag
self._iscopied_master = True
##
| 1.71875 | 2 |
2021-summer-research.py | KhoiLe01/2021-summer-research | 0 | 12765627 | import random
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import colorsys
import copy
def visualization(machines, jobs, algo):
# Declaring a figure "gnt"
fig, gnt = plt.subplots()
# Setting labels for x-axis and y-axis
gnt.set_xlabel('Processing Time')
gnt.set_ylabel('Machine')
yticks = []
ylabels = []
for i in range(len(machines)):
yt = 15 * (i + 1)
yl = i + 1
ylabels.append(str(yl))
yticks.append(yt)
color = []
for i in range(len(jobs)):
h, s, l = random.random(), 0.5 + random.random() / 2.0, 0.4 + random.random() / 5.0
r, g, b = [int(256 * i) for i in colorsys.hls_to_rgb(h, l, s)]
c = '#%02x%02x%02x' % (r, g, b)
color.append(c)
# print(color)
# color = ["#"+''.join([random.choice('0123456789ABCDEF') for j in range(6)]) for i in range(len(jobs))]
# Setting ticks on y-axis
gnt.set_yticks(yticks)
# Labelling tickes of y-axis
gnt.set_yticklabels(ylabels)
# print(yticks, ylabels)
# Setting graph attribute
gnt.grid(True)
# Declaring a bar in schedule
previous = 0
# for i in range (len(machines)):
# for j in range (len(machines[i])):
# gnt.broken_barh([(previous, machines[i][j][0])], ((i+1)*10, 9), facecolors =(color[machines[i][j][1]-1]), edgecolor = "black")
# previous += machines[i][j][0]
# previous = 0
for i in range(len(machines)):
for j in range(len(machines[i])):
if machines[i][j][1] != 0:
gnt.broken_barh([(previous, machines[i][j][0])], ((i + 1) * 10, 9),
facecolors=(color[machines[i][j][1] - 1]), edgecolor="black")
previous += machines[i][j][0]
else:
if (float(machines[i][j][0])) != 0:
gnt.broken_barh([(previous, (float(machines[i][j][0])))], ((i + 1) * 10, 9), facecolors='white',
edgecolor="black")
previous += (float(machines[i][j][0]))
previous = 0
plt.yticks([])
fig.set_size_inches(37, 21)
plt.title(algo)
plt.show()
plt.savefig("{}.png".format(algo))
# mpimg.imsave("{}.png".format(algo), fig)
def evan(machines, jobs, c):
machines[0].append([jobs[0][0], 1])
high = 0
low = 1
for index2, item in enumerate(jobs):
if index2 == 0:
continue
if makespan_machines(machines[high]) + item[0] - makespan_machines(machines[low]) <= makespan_machines(
machines[low]):
machines[high].append([item[0], index2 + 1])
elif (item[0] / 2 + c > item[0]) or min(makespan_machines(machines[low]) + item[0],
makespan_machines(machines[high])) >= max(
makespan_machines(machines[low]) + item[0], makespan_machines(machines[high])) - min(
makespan_machines(machines[low]) + item[0], makespan_machines(machines[high])):
machines[low].append([item[0], index2 + 1])
else:
machines[low].append([str(makespan_machines(machines[high]) - makespan_machines(machines[low])), 0])
machines[low].append([item[0]/2+c, index2+1])
machines[high].append([item[0] / 2 + c, index2+1])
if makespan_machines(machines[0]) > makespan_machines(machines[1]):
high = 0
low = 1
else:
high = 1
low = 0
def evan_greedy(machines, jobs, c):
low = 0
high = 1
for index2, item in enumerate(jobs):
if item[0]/2 + c < makespan_machines(machines[low]) + item[0]:
machines[low].append([str(makespan_machines(machines[high]) - makespan_machines(machines[low])), 0])
machines[low].append([item[0] / 2 + c, index2+1])
machines[high].append([item[0] / 2 + c, index2+1])
else:
machines[low].append([item[0]/2+c, index2+1])
if makespan_machines(machines[0]) > makespan_machines(machines[1]):
high = 0
low = 1
else:
high = 1
low = 0
def evan_76(machines, jobs, c):
machines[0].append([jobs[0][0], 1])
high = 0
low = 1
job_sum = jobs[0][0]
r4 = 0
for index2, item in enumerate(jobs):
if index2 == 0:
continue
job_sum += item[0]
if makespan_machines(machines[high]) + item[0] <= 7/12 * job_sum:
machines[high].append([item[0], index2+1])
elif max(makespan_machines(machines[low])+item[0], makespan_machines(machines[high])) <= 7/12 * job_sum:
machines[low].append([item[0], index2+1])
elif makespan_machines(machines[high]) + item[0]/2 + c <= 7/12 * job_sum:
machines[low].append([str(makespan_machines(machines[high]) - makespan_machines(machines[low])), 0])
machines[low].append([item[0] / 2 + c, index2 + 1])
machines[high].append([item[0] / 2 + c, index2 + 1])
else:
r4 += 1
machines[low].append([item[0], index2+1])
if makespan_machines(machines[0]) > makespan_machines(machines[1]):
high = 0
low = 1
else:
high = 1
low = 0
# print(makespan(machines)*2/sum([makespan_machines(machines[0])+makespan_machines(machines[1])]))
return r4
def LS(machines, jobs):
min = machines[0][0]
index = 0
for index2, item in enumerate(jobs):
print(machines)
min = machines[index][0]
for i in range(0, len(machines)):
if machines[i][0] < min:
min = machines[i][0]
index = i
insert_job(machines, item[0], index)
machines[index].append([item[0], index2 + 1])
LS_makespan = makespan(machines)
for i in range(len(machines)):
machines[i] = machines[i][1:]
return LS_makespan
def SET(machines, jobs, c):
m = len(machines) - 1
for index, item in enumerate(jobs):
kj = 0
k = min(m, math.sqrt(item[0] / c))
if k == m:
kj = m
elif (item[0] / (math.floor(k)) + (math.floor(k) - 1) * c) <= (
item[0] / (math.ceil(k)) + (math.ceil(k) - 1) * c):
kj = math.floor(k)
else:
kj = math.ceil(k)
machines.sort(key=lambda machines: machines[0])
sj = machines[kj - 1][0]
for j in range(kj):
# if machines[j][0] == 0:
# machines[j].append([sj + (item[0]/(kj)+(kj-1)*c), index+1])
# else:
machines[j].append([str(sj - machines[j][0]), 0])
machines[j].append([(item[0] / (kj) + (kj - 1) * c), index + 1])
machines[j][0] = sj + (item[0] / (kj) + (kj - 1) * c)
SET_makespan = makespan(machines)
for i in range(len(machines)):
machines[i] = machines[i][1:]
return SET_makespan
def LPT(machines, jobs):
jobs.sort(key=lambda jobs: jobs[0], reverse=True)
min = machines[0][0]
index = 0
for index2, item in enumerate(jobs):
min = machines[index][0]
for i in range(0, len(machines)):
if machines[i][0] < min:
min = machines[i][0]
index = i
insert_job(machines, item[0], index)
machines[index].append([item[0], index2 + 1])
LPT_makespan = makespan(machines)
for i in range(len(machines)):
machines[i] = machines[i][1:]
return LPT_makespan
def makespan_machines(machine):
return sum([float(machine[i][0]) for i in range(len(machine))])
def makespan(machine_list):
return max([makespan_machines(machine_list[i]) for i in range(len(machine_list))])
def insert_job(machine_list, job, machine_index):
machine_list[machine_index][0] += job
def main(m, nj):
machines = []
jobs = []
for i in range(m):
machines.append([0])
for i in range(nj):
jobs.append([random.randint(1, 10)])
machines_evan = [[], []]
# machines2 = copy.deepcopy(machines)
# machines3 = copy.deepcopy(machines)
#
# makespan = SET(machines, jobs, 1)
# makespan2 = LS(machines2, jobs)
# makespan3 = LPT(machines3, jobs)
#
# visualization(machines, jobs, "SET Algorithm")
# visualization(machines2, jobs, "LS Algorithm")
# visualization(machines3, jobs, "LPT Algorithm")
# print(makespan)
# for i in machines:
# print(makespan_machines(i), i)
# print(jobs)
# LS(machines, jobs)
# for i in machines:
# print(i)
evan_76(machines_evan, jobs, 3)
print(jobs)
for i in machines_evan:
print(i)
print(makespan(machines_evan)*2/sum([jobs[i][0] for i in range(len(jobs))]))
# visualization(machines_evan, jobs, "Evan algo")
def main_stimulation_2machines():
maxi = 0
max_jobs = []
c_max = 0
jobs_range = 0
no_job_max = 0
no_r4 = 0
max_r4 = 0
for j in range(50000):
nj = random.randint(1000,2000)
jobs = []
jr = random.randint(1,500)
for i in range(nj):
jobs.append([random.randint(1, jr)])
machines_evan = [[], []]
cr = random.randint(2000,3000)
no_r4 = evan_76(machines_evan, jobs, cr)
k = makespan(machines_evan)*2/sum([jobs[i][0] for i in range(len(jobs))])
if k > maxi:
maxi = k
max_jobs = copy.deepcopy(jobs)
c_max = cr
jobs_range = jr
no_job_max = nj
max_r4 = no_r4
print(maxi, no_r4)
print(maxi, max_r4)
print(max_jobs)
print(c_max)
print(jobs_range)
print(no_job_max)
# main_stimulation_2machines()
# c = 4
# epsilon = 0.01
# l = []
# for i in range (300):
# if i == 0:
# l.append([12*c-epsilon])
# else:
# l.append([12*(6**i)*c-epsilon])
# m = [[],[]]
# print(evan_76(m, l, c))
# print(l)
# for i in (m):
# print(i)
# avg = sum(l[i][0] for i in range(len(l)))
# print(len(l))
# print(makespan(m), avg)
# print(makespan(m)*2/avg)
def find_counter(c):
epsilon = 0.0001*c
job = [[], []]
job[0].append(12*c-epsilon)
li = 0
si = 1
for i in range (1000):
# print("Bound:", 7/5 * (sum(job[0])+sum(job[1])) - 12/5 * sum(job[si]), 12*sum(job[li]) + 12*c - 7*(sum(job[0])+sum(job[1])) - epsilon)
k = 12 * sum(job[li]) + 12 * c - 7 * (sum(job[0]) + sum(job[1])) - epsilon
while epsilon/(12*sum(job[li]) + 12*c - 7*(sum(job[0])+sum(job[1])) - epsilon) < 10**(-10):
epsilon *= 10
if math.isnan(k):
break
job[si].append(12*sum(job[li]) + 12*c - 7*(sum(job[0])+sum(job[1])) - epsilon)
if sum(job[0]) > sum(job[1]):
li = 0
si = 1
else:
si = 0
li = 1
# for i in (job):
# print(i)
fin_job = []
for i in range(len(job[1])):
for j in range(len(job)):
fin_job.append([job[j][i]])
return fin_job
max_approx = 0
for c in range(1,50):
l = find_counter(c)
m = [[],[]]
print(evan_76(m, l, c))
# print(len(l))
for i in (m):
print(i)
avg = max(sum([l[i][0] for i in range(len(l))])/2, max([l[i][0] for i in range(len(l))]))
# print(len(l))
# print(makespan(m), avg)
# print(makespan(m)/avg)
if makespan(m)/avg > max_approx:
max_approx = makespan(m)/avg
print(max_approx) | 2.796875 | 3 |
Code/Common/kitti_tfrecords_creator.py | Turgibot/NeuralNets | 0 | 12765628 | <filename>Code/Common/kitti_tfrecords_creator.py
# Copyright 2018 <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
########################################################################################################################
'''
Create a tfrecord from given data
As for today tfrecord is the recommended format for tensor flow
It is a binary file format - a serialized tf.tarin.Example protobuf object
better use of disk cache - faster to move around - can handle data of different types (image + label in one object)
good blogpost : http://warmspringwinds.github.io/tensorflow/tf-slim/2016/12/21/tfrecords-guide/
Feature: an image
Label: a number
basic steps for a single image:
# Step 1: create a writer to write tfrecord to that file
writer = tf.python_io.TFRecordWriter(out_file)
# Step 2: get serialized shape and values of the image
shape, binary_image = get_image_binary(image_file)
# Step 3: create a tf.train.Features object
features = tf.train.Features(feature={'label': _int64_feature(label),
'shape': _bytes_feature(shape),
'image': _bytes_feature(binary_image)})
# Step 4: create a sample containing of features defined above
sample = tf.train.Example(features=features)
# Step 5: write the sample to the tfrecord file
writer.write(sample.SerializeToString())
writer.close()
steps specific for Kitti dataset
Step 1: Split labels to train and test labels
Step 2: Resize images according to parameter <shrink>
Step 3:
'''
import os
import sys
import re
import tensorflow as tf
import cv2
import numpy as np
import utils
# TFRecords convertion parameters.
RANDOM_SEED = 2018
SAMPLES_PER_FILES = 200
CWD = os.getcwd()
CODE_DIR = os.path.abspath(os.path.join(CWD, os.pardir))
ROOT_DIR = os.path.abspath(os.path.join(CODE_DIR, os.pardir))
PATH_TFRECORDS = os.path.join(CODE_DIR, 'TFRecords')
PATH_TFRECORDS_TRAIN = os.path.join(PATH_TFRECORDS, 'Training')
PATH_TFRECORDS_TEST = os.path.join(PATH_TFRECORDS, 'Testing')
DATA_DIR = os.path.join(ROOT_DIR, 'Data')
KITTY_DIR = os.path.join(DATA_DIR, 'Kitti')
PATH_IMAGES = os.path.join(KITTY_DIR, 'Images')
PATH_LABELS = os.path.join(KITTY_DIR, 'Labels')
CLASSES = {
'Pedestrian': 0,
'Cyclist': 1,
'Car': 2,
}
def start():
# create a folder for the tfrecords if doesn't exist'''
utils.create_dir(PATH_TFRECORDS)
utils.create_dir(PATH_TFRECORDS_TRAIN)
utils.create_dir(PATH_TFRECORDS_TEST)
train_labels, test_labels = utils.random_split_kitti(PATH_LABELS, 0.8, CLASSES, RANDOM_SEED)
# Step 1: create a writer to write tfrecord to that file
labels_tuple = (train_labels, test_labels)
for l in range(len(labels_tuple)):
files_counter = 0
labels_src = labels_tuple[l]
labels_keys = list(labels_src)
labels_num = len(labels_src)
if l is 0:
tfrecord_folder = PATH_TFRECORDS_TRAIN
else:
tfrecord_folder = PATH_TFRECORDS_TEST
i = 0
while i < labels_num:
sys.stdout.write('\r>> Creating TFRecord file number %d ' % files_counter)
tfrecord_file = os.path.join(tfrecord_folder, 'train_'+'%d' % files_counter + '.tfrecord')
utils.create_file(tfrecord_file)
with tf.python_io.TFRecordWriter(tfrecord_file) as writer:
j = 0
while i < labels_num and j < SAMPLES_PER_FILES:
sys.stdout.write('\r>> Converting image %d/%d ' % (i + 1, labels_num))
sys.stdout.flush()
image_number = labels_keys[i]
image_path = os.path.join(PATH_IMAGES, image_number + '.png')
utils.append_to_tfrecord(image_path, labels_src[image_number], writer)
i += 1
j += 1
files_counter += 1
start()
| 2.078125 | 2 |
BackEnd/src/app/domain/collections/stocks/stocksQueries.py | JeremieBeaudoin/LavalUniversityStockExchange | 0 | 12765629 | <reponame>JeremieBeaudoin/LavalUniversityStockExchange<filename>BackEnd/src/app/domain/collections/stocks/stocksQueries.py
from src.app.database.utility.StockUtility import splitTickerAggregate
def getAllStockTickersAndSuffixes():
return """SELECT ticker, suffix
FROM stocks;"""
def getAllStocks(withDetailedAnalytics = True):
if withDetailedAnalytics:
return """SELECT
S.company_uuid,
S.ticker,
S.suffix,
S.currency,
S.regular_market_price,
S.regular_market_volume,
(100 * (S.regular_market_price - S.regular_market_previous_close) / S.regular_market_previous_close)
AS day_change,
S.regular_market_open,
S.regular_market_previous_close,
S.regular_market_day_high,
S.regular_market_day_low,
S.52_week_high,
S.52_week_low,
S.200_day_average,
S.ask,
S.ask_size,
S.bid,
S.bid_size,
S.market_cap,
S.dividend_yield,
S.held_percent_institutions,
S.held_percent_insiders,
C.long_name
FROM stocks S, companies C
WHERE S.company_uuid = C.uuid;"""
else:
return """SELECT company_uuid, ticker, suffix, currency, regular_market_price, regular_market_volume,
(100 * (regular_market_price - regular_market_open) / regular_market_open)
FROM stocks;"""
def getCompanyIdForStockTickerAndSuffix(stockTicker, stockSuffix):
return f"""SELECT company_uuid
FROM stocks S
WHERE S.ticker = {stockTicker}
AND S.suffix = {stockSuffix}"""
def getFullStockByTickerAndSuffix(stockTicker, stockSuffix):
return f"""SELECT
S.company_uuid,
S.ticker,
S.suffix,
S.currency,
S.regular_market_price,
S.regular_market_volume,
(100 * (S.regular_market_price - S.regular_market_previous_close) / S.regular_market_previous_close)
AS day_change,
S.regular_market_open,
S.regular_market_previous_close,
S.regular_market_day_high,
S.regular_market_day_low,
S.52_week_high,
S.52_week_low,
S.200_day_average,
S.ask,
S.ask_size,
S.bid,
S.bid_size,
S.market_cap,
S.dividend_yield,
S.held_percent_institutions,
S.held_percent_insiders,
C.long_name
FROM stocks S, companies C
WHERE S.ticker = '{stockTicker}'
AND S.suffix = '{stockSuffix}'
AND S.company_uuid = C.uuid;"""
def insertStock(companyUuid, stock):
stockTicker, stockSuffix = splitTickerAggregate(stock['symbol'])
return f"""INSERT INTO stocks
(company_uuid, ticker, suffix, currency, regular_market_price,
regular_market_volume, regular_market_open, regular_market_previous_close,
regular_market_day_high, regular_market_day_low, 52_week_high,
52_week_low, 200_day_average, ask, ask_size, bid, bid_size, market_cap,
dividend_yield, held_percent_institutions, held_percent_insiders)
VALUES
('{companyUuid}', '{stockTicker}', '{stockSuffix}',
'{stock['currency']}', {stock['regularMarketPrice']}, {stock['regularMarketVolume']},
{stock['regularMarketOpen']}, {stock['regularMarketPreviousClose']},
{stock['regularMarketDayHigh']}, {stock['regularMarketDayLow']},
{stock['fiftyTwoWeekHigh']}, {stock['fiftyTwoWeekLow']},
{stock['twoHundredDayAverage']}, {stock['ask']}, {stock['askSize']},
{stock['bid']}, {stock['bidSize']}, {stock['marketCap']},
{stock['dividendYield']}, {stock['heldPercentInstitutions']},
{stock['heldPercentInsiders']});"""
def updateStockByTickerAndSuffix(stockTicker, stockSuffix, patchString):
return f"""UPDATE stocks S
SET {patchString}
WHERE S.ticker = '{stockTicker}'
AND S.suffix = '{stockSuffix}';"""
def getRefreshStockQueryForTickerAndSuffix(stockTicker, stockSuffix, stock):
return f"""UPDATE stocks S SET
regular_market_price = {stock['regularMarketPrice']},
regular_market_volume = {stock['regularMarketVolume']},
regular_market_open = {stock['regularMarketOpen']},
regular_market_previous_close = {stock['regularMarketPreviousClose']},
regular_market_day_high = {stock['regularMarketDayHigh']},
regular_market_day_low = {stock['regularMarketDayLow']},
52_week_high = {stock['fiftyTwoWeekHigh']},
52_week_low = {stock['fiftyTwoWeekLow']},
200_day_average = {stock['twoHundredDayAverage']},
ask = {stock['ask']},
ask_size = {stock['askSize']},
bid = {stock['bid']},
bid_size = {stock['bidSize']},
market_cap = {stock['marketCap']},
dividend_yield = {stock['dividendYield']},
held_percent_institutions = {stock['heldPercentInstitutions']},
held_percent_insiders = {stock['heldPercentInsiders']}
WHERE S.ticker = '{stockTicker}' AND S.suffix = '{stockSuffix}';"""
def getRefreshPriceQueryString(stockTicker, stockSuffix, stockRegularMarketPrice):
return f"""UPDATE stocks S SET
S.regular_market_price = {stockRegularMarketPrice}
WHERE S.ticker = '{stockTicker}'
AND S.suffix = '{stockSuffix}';"""
def deleteStockByTickerAndSuffix(stockTicker, stockSuffix):
return f"""DELETE FROM stocks S
WHERE S.ticker = '{stockTicker}'
AND S.suffix = '{stockSuffix}';"""
| 2.546875 | 3 |
prev_ob_models/KaplanLansner2014/plotting_and_analysis/plot_gauss.py | fameshpatel/olfactorybulb | 5 | 12765630 |
import numpy as np
import matplotlib.mlab as mlab
import pylab
p = [1.631787e+02, 6.670855e+00, 1.977871e+00, \
1.909487e+01, 1.110809e+01, 3.353855e+00, \
4.188897e+00, 4.088460e+01, 4.966478e-01]
def trimodal_gauss(x, p):
w1 = p[0]
mu1 = p[1]
sigma1 = p[2]
w2 = p[3]
mu2 = p[4]
sigma2 = p[5]
w3 = p[6]
mu3 = p[7]
sigma3 = p[8]
return w1 * mlab.normpdf(x, mu1, sigma1) + w2 * mlab.normpdf(x, mu2, sigma2) + w3 * mlab.normpdf(x, mu3, sigma3)
def gauss(x, mu, sigma):
return 1. / (sigma * np.sqrt(2 * np.pi)) * np.exp( - (x - mu)**2 / (2. * sigma ** 2))
bins = np.linspace(0, 4.330310991999920844e+01, 100)
#G1 = p[0] * gauss(bins, p[1], p[2])
G1 = p[0] * mlab.normpdf(bins, p[1], p[2])
print 'G1', G1
G2 = p[3] * gauss(bins, p[4], p[5])
print 'G2', G2
G21 = p[3] * mlab.normpdf(bins, p[4], p[5])
print 'G2 normpdf', G21 - G2
G3 = p[6] * gauss(bins, p[7], p[8])
print 'G3', G3
pylab.plot(bins, G1, c='k', lw=3)
pylab.plot(bins, G2, c='b', lw=3)
pylab.plot(bins, G3, c='g', lw=3)
pylab.xlim((bins[0], bins[-1]))
#pylab.ylim((0, 1))
pylab.show()
| 2.375 | 2 |
plugins/snortlabslist/komand_snortlabslist/actions/lookup/schema.py | lukaszlaszuk/insightconnect-plugins | 46 | 12765631 | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "IP Check"
class Input:
ADDRESS = "address"
class Output:
ADDRESS = "address"
FOUND = "found"
STATUS = "status"
URL = "url"
class LookupInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address": {
"type": "string",
"title": "Address",
"description": "IPv4 Address",
"order": 1
}
},
"required": [
"address"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class LookupOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"address": {
"type": "string",
"title": "Address",
"description": "IP address that was found",
"order": 3
},
"found": {
"type": "boolean",
"title": "Found",
"description": "Found status",
"order": 1
},
"status": {
"type": "string",
"title": "Status",
"description": "Error message",
"order": 4
},
"url": {
"type": "string",
"title": "URL",
"description": "URL of reputation list",
"order": 2
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 2.375 | 2 |
envs/meta/__init__.py | twni2016/pomdp-baselines | 40 | 12765632 | from gym.envs.registration import register
## off-policy variBAD benchmark
register(
"PointRobot-v0",
entry_point="envs.meta.toy_navigation.point_robot:PointEnv",
kwargs={"max_episode_steps": 60, "n_tasks": 2},
)
register(
"PointRobotSparse-v0",
entry_point="envs.meta.toy_navigation.point_robot:SparsePointEnv",
kwargs={"max_episode_steps": 60, "n_tasks": 2, "goal_radius": 0.2},
)
register(
"Wind-v0",
entry_point="envs.meta.toy_navigation.wind:WindEnv",
)
register(
"HalfCheetahVel-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.half_cheetah_vel:HalfCheetahVelEnv",
"max_episode_steps": 200,
},
max_episode_steps=200,
)
## on-policy variBAD benchmark
register(
"AntDir-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.ant_dir:AntDirEnv",
"max_episode_steps": 200,
"forward_backward": True,
"n_tasks": None,
},
max_episode_steps=200,
)
register(
"CheetahDir-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.half_cheetah_dir:HalfCheetahDirEnv",
"max_episode_steps": 200,
"n_tasks": None,
},
max_episode_steps=200,
)
register(
"HumanoidDir-v0",
entry_point="envs.meta.wrappers:mujoco_wrapper",
kwargs={
"entry_point": "envs.meta.mujoco.humanoid_dir:HumanoidDirEnv",
"max_episode_steps": 200,
"n_tasks": None,
},
max_episode_steps=200,
)
| 1.671875 | 2 |
prev_ob_models/exclude/GilraBhalla2015/generators/generate_firerates_sinusoids.py | fameshpatel/olfactorybulb | 0 | 12765633 | <reponame>fameshpatel/olfactorybulb
import sys, pickle
from pylab import *
import numpy
sys.path.extend(["..","../networks","../simulations"])
from networkConstants import *
from stimuliConstants import *
#### Generate sinusoids of different frequencies and amplitudes
#### to feed to gloms individually to obtain frequency response.
#### Cut off at zero below, and stimuliConstants has mean firing rate for odors,
#### keep amplitude less than mean firing rate to avoid cutoff,
#### and variance in firing rate of different ORNs is same as mean.
#### The firing rate as a function of time is fed
#### to a Poisson spike generator in generate_firefiles_sinusoids.py .
frateResponseList = []
sinepulsetime = arange(0,SIN_RUNTIME,FIRINGFILLDT)
len_pulsetime = len(sinepulsetime)
def firingRateSinusoid(DC,ampl,f):
## in Hz
## array of Gaussian distributed firing rates at each time point
## mean = FIRINGMEANA, standard deviation = sqrt(FIRINGMEANA)
#pulse_steps = normal(loc=FIRINGMEANA,scale=sqrt(FIRINGMEANA),size=len_pulsetime)
pulse_steps = array( [ DC + ampl*sin(2*pi*t*f) for t in sinepulsetime ] )
## clip firing rates below zero; in-place hence pulse_steps is also the output
clip(pulse_steps,0,1e6,pulse_steps)
return array(pulse_steps)
def sinusoid_stimuli():
## firing rates to generate Poisson input to mitrals and PGs
for glomnum in range(NUM_GLOMS):
frateResponseList.append([])
for sine_f in sine_frequencies:
## firing rates
frate = firingRateSinusoid(sine_ORN_mean,sine_amplitude,sine_f)
## important to put within [] or (...,) for extend
frateResponseList[-1].extend([frate])
if __name__ == "__main__":
### Seed only if called directly, else do not seed.
### Also seeding this way ensures seeding after importing other files that may set seeds.
### Thus this seed overrides other seeds.
seed([123.0])#[stim_rate_seednum]) ##### Seed numpy's random number generator.
sinusoid_stimuli()
filename = 'firerates/firerates_sinusoids_seed'+str(stim_rate_seednum)+\
'_ampl'+str(sine_amplitude)+'_mean'+str(sine_ORN_mean)+'.pickle'
fireratefile = open(filename,'w')
pickle.dump( frateResponseList, fireratefile)
fireratefile.close()
print "wrote",filename
figure(facecolor='w')
title('psd of sinusoid')
frate = frateResponseList[0][0]
fftsq = abs(fft(array(frate)-frate.mean()))**2.0
plot(fftsq**0.5)
# glom0 & glom1
figure(facecolor='w')
title('Glomerulus 0 & 1')
xlabel('time (s)', fontsize='large')
ylabel('firing rate (Hz)', fontsize='large')
plot(sinepulsetime, frateResponseList[0][0], color=(1,0,0))
plot(sinepulsetime, frateResponseList[1][0], color=(0,1,0))
show()
| 2.53125 | 3 |
test_pomo.py | liuxk99/sjPomotodo | 0 | 12765634 | <reponame>liuxk99/sjPomotodo<gh_stars>0
# coding=utf-8
import re
from unittest import TestCase
from pomo import Pomo
json = u'{' \
u' "uuid": "26a37b6f-6e69-4a2e-ae79-e9c264a4a653",' \
u' "created_at": "2020-03-30T12:25:26.800Z",' \
u' "updated_at": "2020-03-30T12:25:26.800Z",' \
u' "description": "#生活/行/车(福特·蒙迪欧) \'维修\'·搭火",' \
u' "started_at": "2020-03-30T11:59:20.254Z",' \
u' "ended_at": "2020-03-30T12:25:26.799Z",' \
u' "local_started_at": "2020-03-30T19:59:20.000Z",' \
u' "local_ended_at": "2020-03-30T20:25:26.000Z",' \
u' "length": 25,' \
u' "abandoned": false,' \
u' "manual": false' \
u'}'
class TestPomo(TestCase):
def test_from_json(self):
print json
Pomo.from_json(json)
# self.fail()
def testcaseActions01(self):
pomo1 = "#SW/App 'xxx' |2021/04/26"
print pomo1
pattern = "'.+'"
print pattern
self.assertIsNotNone((re.search(pattern, pomo1)))
return
def testcaseActions02(self):
pomo1 = u"#生活/日常 '天气<应用:墨迹天气>' |2021/04/26"
pomo2 = u"#社会/户籍(北京) '积分落户'·申报 |2021年"
pattern = u"'.+'"
print pattern
obj = re.search(pattern, pomo1)
self.assertIsNotNone(obj)
print obj
obj = re.search(pattern, pomo2)
self.assertIsNotNone(obj)
print obj
print pomo1
pat = u"(.+)'.+'(.+)"
obj = re.match(pat, pomo1)
if obj:
print obj.group(2)
print pomo2
obj = re.match(pat, pomo2)
if obj:
print obj.group(2)
return
| 2.671875 | 3 |
api.py | johnnymck/historical-api | 0 | 12765635 | #!/usr/bin/evn python
import sqlite3
from flask import Flask, jsonify, g
app = Flask(__name__)
DATABASE = 'union-bridge'
def query_db(query, args=(), one=False):
cur=g.db.execute(query, args)
rv = [dict((cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()]
return (rv[0] if rv else None) if one else rv
def connect_db():
return sqlite3.connect(DATABASE)
@app.before_request
def before_request():
g.db = connect_db()
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route("/")
def index():
test = {
"version": "0.0.1",
"name": "historical-api",
"tables": ("buildings", "facts", "sources", "subjects"),
}
return jsonify(test)
@app.route("/buildings")
def buildings():
values = []
for building in query_db('select * from buildings'):
values.push(building)
return(jsonify(values))
@app.route("/facts")
def facts():
values = []
for fact in query_db('select * from facts'):
values.push(fact)
return(jsonify(values))
@app.route("/sources")
def sources():
values = []
for source in query_db('select * from sources'):
values.push(source)
return(jsonify(values))
@app.route("/subjects")
def subjects():
values = []
for subject in query_db('select * from subjects'):
values.push(subject)
return(jsonify(values))
@app.teardown_appcontext
def close_connection(exeption):
db = getattr(g, '_database', None)
if db is not None:
db.close()
if __name__ == "__main__":
app.run(debug=True)
| 2.859375 | 3 |
lino/core/requests.py | khchine5/lino | 1 | 12765636 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
See introduction in :doc:`/dev/ar`.
"""
from builtins import str
import six
import logging
logger = logging.getLogger(__name__)
from copy import copy
from xml.sax.saxutils import escape
# from urlparse import urlsplit
# from six.moves.urllib.parse import urlencode
# try:
# from html import escape
# except ImportError:
# from cgi import escape
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from django.utils import translation
from django.utils import timezone
from django.core import exceptions
from lino.core.utils import obj2unicode
from lino.core import constants
from lino.core.utils import navinfo
from lino.core.boundaction import BoundAction
from lino.core.signals import on_ui_created, pre_ui_save
from lino.core.diff import ChangeWatcher
from lino.core.utils import getrqdata
from lino.utils import AttrDict
from etgen.html import E, tostring
from lino.core.auth.utils import AnonymousUser
CATCHED_AJAX_EXCEPTIONS = (Warning, exceptions.ValidationError)
class ValidActionResponses(object):
"""
These are the allowed keyword arguments for :meth:`ar.set_response
<BaseRequest.set_response>`, and the action responses supported by
:js:func:`Lino.handle_action_result` (defined in
:xfile:`linolib.js`).
This class is never instantiated, but used as a placeholder for
these names and their documentation.
"""
message = None
"""
A translatable message text to be shown to the user.
"""
alert = None
"""
True to specify that the message is rather important and should
alert and should be presented in a dialog box to be confirmed by
the user.
"""
success = None
errors = None
html = None
rows = None
navinfo = None
data_record = None
"""
Certain requests are expected to return detailed information about
a single data record. That's done in :attr:`data_record` which
must itself be a dict with the following keys:
- id : the primary key of this record_deleted
- title : the title of the detail window
- data : a dict with one key for every data element
- navinfo : an object with information for the navigator
- disable_delete : either null (if that record may be deleted, or
otherwise a message explaining why.
"""
record_id = None
"""
When an action returns a `record_id`, it asks the user interface to
jump to the given record.
"""
refresh = None
refresh_all = None
close_window = None
record_deleted = None
xcallback = None
goto_url = None
"""
Leave current page and go to the given URL.
"""
open_url = None
"""
Open the given URL in a new browser window.
"""
open_webdav_url = None
info_message = None
warning_message = None
"deprecated"
eval_js = None
active_tab = None
detail_handler_name = None
"""
The name of the detail handler to be used. Application code should
not need to use this. It is automatically set by
:meth:`ActorRequest.goto_instance`.
"""
class VirtualRow(object):
def __init__(self, **kw):
self.update(**kw)
def update(self, **kw):
for k, v in list(kw.items()):
setattr(self, k, v)
def get_row_permission(self, ar, state, ba):
if ba.action.readonly:
return True
return False
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PhantomRow(VirtualRow):
def __init__(self, request, **kw):
self._ar = request
VirtualRow.__init__(self, **kw)
def __str__(self):
return six.text_type(self._ar.get_action_title())
inheritable_attrs = frozenset(
'user subst_user renderer requesting_panel master_instance'.split())
def bool2text(x):
if x:
return _("Yes")
return _("No")
class BaseRequest(object):
"""
Base class of all action requests.
"""
user = None
subst_user = None
renderer = None
"""
The renderer to use when processing this request.
"""
actor = None
action_param_values = None
param_values = None
bound_action = None
known_values = {}
is_on_main_actor = True
master_instance = None
"""
The database object which acts as master. This is `None` for master
requests.
"""
request = None
"""
The incoming Django HttpRequest object which caused this action
request.
"""
selected_rows = []
content_type = 'application/json'
requesting_panel = None
def __init__(self, request=None, parent=None,
is_on_main_actor=True, **kw):
self.request = request
self.response = dict()
if request is not None:
rqdata = getrqdata(request)
kw = self.parse_req(request, rqdata, **kw)
if parent is not None:
self._confirm_answer = parent._confirm_answer
for k in inheritable_attrs:
if k in kw:
if kw[k] is None:
raise Exception("%s : %s=None" % (kw, k))
else:
kw[k] = getattr(parent, k)
kv = kw.setdefault('known_values', {})
kv.update(parent.known_values)
# kw.setdefault('user', parent.user)
# kw.setdefault('subst_user', parent.subst_user)
# kw.setdefault('renderer', parent.renderer)
# kw.setdefault('requesting_panel', parent.requesting_panel)
# if not parent.is_on_main_actor or parent.actor != kw.get('actor', None):
if not parent.is_on_main_actor:
is_on_main_actor = False
elif parent.actor is not None and parent.actor is not self.actor:
is_on_main_actor = False
# is_on_main_actor = False
self.is_on_main_actor = is_on_main_actor
self.setup(**kw)
def setup(self,
user=None,
subst_user=None,
current_project=None,
selected_pks=None,
selected_rows=None,
master_instance=None,
limit=None,
requesting_panel=None,
renderer=None):
self.requesting_panel = requesting_panel
self.master_instance = master_instance
if user is None:
self.user = AnonymousUser()
else:
self.user = user
self.current_project = current_project
if renderer is None:
renderer = settings.SITE.kernel.text_renderer
self.renderer = renderer
self.subst_user = subst_user
if selected_rows is not None:
self.selected_rows = selected_rows
assert selected_pks is None
if selected_pks is not None:
self.set_selected_pks(*selected_pks)
def parse_req(self, request, rqdata, **kw):
"""
Parse the given incoming HttpRequest and set up this action
request from it.
"""
if settings.SITE.user_model:
kw.update(user=request.user)
kw.update(subst_user=request.subst_user)
kw.update(requesting_panel=request.requesting_panel)
kw.update(current_project=rqdata.get(
constants.URL_PARAM_PROJECT, None))
# If the incoming request specifies an active tab, then the
# response must forward this information. Otherwise Lino would
# forget the current tab when a user saves a detail form for
# the first time. The `active_tab` is not (yet) used directly
# by Python code, so we don't store it as attribute on `self`,
# just in the response.
tab = rqdata.get(constants.URL_PARAM_TAB, None)
if tab is not None:
tab = int(tab)
# logger.info("20150130 b %s", tab)
self.set_response(active_tab=tab)
if not 'selected_pks' in kw:
selected = rqdata.getlist(constants.URL_PARAM_SELECTED)
kw.update(selected_pks=selected)
#~ if settings.SITE.user_model:
#~ username = rqdata.get(constants.URL_PARAM_SUBST_USER,None)
#~ if username:
#~ try:
#~ kw.update(subst_user=settings.SITE.user_model.objects.get(username=username))
#~ except settings.SITE.user_model.DoesNotExist, e:
#~ pass
# logger.info("20140503 ActionRequest.parse_req() %s", kw)
return kw
def setup_from(self, other):
"""
Copy certain values (renderer, user, subst_user & requesting_panel)
from this request to the other.
Deprecated. You should rather instantiate a request and
specify parent instead.
"""
if not self.must_execute():
return
# raise Exception("Request %r was already executed" % other)
self.renderer = other.renderer
# self.cellattrs = other.cellattrs
# self.tableattrs = other.tableattrs
self.user = other.user
self.subst_user = other.subst_user
self._confirm_answer = other._confirm_answer
# self.master_instance = other.master_instance # added 20150218
self.requesting_panel = other.requesting_panel
def spawn_request(self, **kw):
"""
Create a new of same class which inherits from this one.
"""
kw.update(parent=self)
return self.__class__(**kw)
def spawn(self, spec=None, **kw):
"""
Create a new action request using default values from this one and
the action specified by `spec`.
The first argument, `spec` can be:
- a string with the name of a model, actor or action
- a :class:`BoundAction` instance
- another action request (deprecated use)
Deprecated. Use spawn_request() if spec is
"""
from lino.core.actors import resolve_action
if isinstance(spec, ActionRequest): # deprecated use
# raise Exception("20160627 Deprecated")
for k, v in list(kw.items()):
assert hasattr(spec, k)
setattr(spec, k, v)
spec.setup_from(self)
elif isinstance(spec, BoundAction):
kw.update(parent=self)
spec = spec.request(**kw)
else:
kw.update(parent=self)
ba = resolve_action(spec)
spec = ba.request(**kw)
# from lino.core.menus import create_item
# mi = create_item(spec)
# spec = mi.bound_action.request(**kw)
return spec
def get_printable_context(self, **kw):
"""
Adds a series of names to the context used when rendering printable
documents. See :doc:`/user/templates_api`.
"""
# from django.conf import settings
from django.utils.translation import ugettext
from django.utils.translation import pgettext
from lino.api import dd, rt
from lino.utils import iif
from lino.utils.restify import restify
from django.db import models
# needed e.g. for polls tutorial
for n in ('Count', 'Sum', 'Max', 'Min', 'Avg', 'F'):
kw[n] = getattr(models, n)
if False: # 20150803 why was this? It disturbed e.g. for the bs3
# language selector.
sar = copy(self)
sar.renderer = settings.SITE.kernel.html_renderer
kw['ar'] = sar
else:
kw['ar'] = self
kw['_'] = ugettext
kw.update(
E=E, tostring=tostring,
dd=dd,
rt=rt,
decfmt=dd.decfmt,
fds=dd.fds,
fdm=dd.fdm,
fdl=dd.fdl,
fdf=dd.fdf,
fdmy=dd.fdmy,
iif=iif,
bool2text=bool2text,
bool2js=lambda b: "true" if b else "false",
unicode=str, # backwards-compatibility. In new template
# you should prefer `str`.
pgettext=pgettext,
now=timezone.now(),
getattr=getattr,
restify=restify,
requested_language=get_language())
def parse(s):
# Jinja doesn't like a name 'self' in the context which
# might exist there in a backwards-compatible appypod
# template:
kw.pop('self', None)
return dd.plugins.jinja.renderer.jinja_env.from_string(
s).render(**kw)
kw.update(parse=parse)
return kw
def set_selected_pks(self, *selected_pks):
"""
Given a tuple of primary keys, set :attr:`selected_rows` to a list
of corresponding database objects.
"""
#~ print 20131003, selected_pks
self.selected_rows = []
for pk in selected_pks:
if pk:
obj = self.get_row_by_pk(pk)
if obj is not None:
self.selected_rows.append(obj)
# self.selected_rows = filter(lambda x: x, self.selected_rows)
# note: ticket #523 was because the GET contained an empty pk ("&sr=")
def get_permission(self):
"""
Whether this request has permission to run. `obj` can be None if
the action is a list action (whose `select_rows` is `False`).
"""
if self.bound_action.action.select_rows:
# raise Exception("20160814 {}".format(self.bound_action))
if len(self.selected_rows) == 1:
obj = self.selected_rows[0]
state = self.bound_action.actor.get_row_state(obj)
return self.bound_action.get_row_permission(self, obj, state)
return self.bound_action.get_bound_action_permission(
self, None, None)
def set_response(self, **kw):
"""
Set (some part of) the response to be sent when the action request
finishes. Allowed keywords are documented in
:class:`ValidActionResponses`.
This does not yet respond anything, it is stored until the action
has finished. The response might be overwritten by subsequent
calls to :meth:`set_response`.
:js:func:`Lino.handle_action_result` will get these instructions
as *keywords* and thus will not know the order in which they have
been issued. This is a design decision. We *want* that, when
writing custom actions, the order of these instructions does not
matter.
"""
for k in kw.keys():
if not hasattr(ValidActionResponses, k):
raise Exception("Unknown key %r in action response." % k)
self.response.update(kw)
def error(self, e=None, message=None, **kw):
"""
Shortcut to :meth:`set_response` used to set an error response.
The first argument should be either an exception object or a
text with a message.
If a message is not explicitly given, Lino escapes any
characters with a special meaning in HTML. For example::
NotImplementedError: <dl> inside <text:p>
will be converted to::
NotImplementedError: <dl> inside <text:p>
"""
kw.update(success=False)
kw.update(alert=_("Error")) # added 20140304
if isinstance(e, Exception):
if False: # useful when debugging, but otherwise rather disturbing
logger.exception(e)
if hasattr(e, 'message_dict'):
kw.update(errors=e.message_dict)
if message is None:
try:
message = six.text_type(e)
except UnicodeDecodeError as e:
message = repr(e)
message = escape(message)
kw.update(message=message)
self.set_response(**kw)
def success(self, message=None, alert=None, **kw):
"""
Tell the client to consider the action as successful. This is the
same as :meth:`set_response` with `success=True`.
First argument should be a textual message.
"""
kw.update(success=True)
if alert is not None:
if alert is True:
alert = _("Success")
kw.update(alert=alert)
if message is not None:
if 'message' in self.response and alert is None:
# ignore alert-less messages when there is already a
# message set. For example
# finan.FinancialVoucherItem.parter_changed with more
# than 1 suggestion.
pass
else:
kw.update(message=message)
self.set_response(**kw)
def append_message(self, level, msg, *args, **kw):
if args:
msg = msg % args
if kw:
msg = msg % kw
k = level + '_message'
old = self.response.get(k, None)
if old is None:
self.response[k] = msg
else:
self.response[k] = old + '\n' + msg
# return self.success(*args,**kw)
def debug(self, msg, *args, **kw):
if settings.SITE.verbose_client_info_message:
self.append_message('info', msg, *args, **kw)
def info(self, msg, *args, **kw):
# deprecated?
self.append_message('info', msg, *args, **kw)
def warning(self, msg, *args, **kw):
# deprecated?
self.append_message('warning', msg, *args, **kw)
_confirm_answer = True
def set_confirm_answer(self, ans):
"""
Set an answer for following confirm in a non-interactive renderer.
"""
self._confirm_answer = ans
def confirm(self, ok_func, *msgs):
"""
Execute the specified callable `ok_func` after the user has
confirmed the specified message.
The confirmation message may be specified as a series of
positional arguments which will be concatenated to a single
prompt.
The callable will be called with a single positional argument
which will be the action request that confirmed the
message. In a web context this will be another object than
this one.
In a non-interactive environment the `ok_func` function is
called directly (i.e. we don't ask any confirmation and act as
confirmation had been given).
"""
cb = self.add_callback(*msgs)
def noop(ar):
return ar.success(_("Aborted"))
cb.add_choice('yes', ok_func, _("Yes"))
cb.add_choice('no', noop, _("No"))
self.set_callback(cb)
if not self.renderer.is_interactive:
if self._confirm_answer:
ok_func(self)
def parse_memo(self, txt, **context):
context.update(ar=self)
return settings.SITE.kernel.memo_parser.parse(txt, **context)
def obj2memo(self, *args, **kwargs):
"""
Calls the site's parser's :meth:`obj2memo
<lino.utils.memo.Parser.obj2memo>` method.
"""
# kwargs.update(ar=self)
return settings.SITE.kernel.memo_parser.obj2memo(*args, **kwargs)
# def parse_memo(self, html):
# return self.renderer.parse_memo(html, ar=self)
def set_callback(self, *args, **kw):
return settings.SITE.kernel.set_callback(self, *args, **kw)
def add_callback(self, *args, **kw):
return settings.SITE.kernel.add_callback(self, *args, **kw)
def goto_instance(self, *args, **kwargs):
return self.renderer.goto_instance(self, *args, **kwargs)
def close_window(self, **kw):
"""Ask client to close the current window. This is the same as
:meth:`BaseRequest.set_response` with `close_window=True`.
"""
kw.update(close_window=True)
self.set_response(**kw)
def set_content_type(self, ct):
# logger.info("20140430 set_content_type(%r)", ct)
self.content_type = ct
def must_execute(self):
return True
def get_total_count(self):
"""
TableRequest overrides this to return the number of rows.
"""
return -1
def get_data_value(self, obj, name):
"""
Return the value of the virtual field `name` for this action
request on the given object `obj`.
"""
fld = obj.get_data_elem(name)
return fld.value_from_object(obj, self)
def get_user(self):
"""
Return the :class:`User <lino.modlib.users.models.User>` instance
of the user who issued the request. If the authenticated user
is acting as somebody else, return that user's instance.
"""
return self.subst_user or self.user
def run(self, thing, *args, **kw):
"""
The first parameter `thing` may be an :class:`InstanceAction
<lino.core.utils.InstanceAction>` or a Model instance.
"""
return thing.run_from_session(self, *args, **kw)
def story2html(self, story, *args, **kwargs):
"""
Convert a story into a stream of HTML elements.
"""
# return self.renderer.show_story(self, story, *args, **kwargs)
return settings.SITE.kernel.html_renderer.show_story(
self, story, *args, **kwargs)
def story2rst(self, story, *args, **kwargs):
return self.renderer.show_story(self, story, *args, **kwargs)
def show(self, spec, master_instance=None, column_names=None,
header_level=None, language=None, nosummary=False,
stripped=True, show_links=False,
**kwargs):
"""
Show the specified table or action using the current renderer. If
the table is a :term:`slave table`, then a `master_instance`
must be specified as second argument.
The first argument specifies the table or actor to show. It is
forwarded to :meth:`spawn`.
Optional keyword arguments are:
:column_names: overrides default list of columns
:show_links: show links and other html formatting. Used
.e.g. in :ref:`avanti.specs.roles` where we want
to show whether cells are clickable or not.
:nosummary: if it is a table with :attr:`display_mode
<lino.core.tables.AbstractTable.display_mode>`
set to ``'summary'``, force rendering it as a
table.
:header_level: show also the header (using specified level)
:language: overrides the default language used for headers and
translatable data
Any other keyword arguments are forwarded to :meth:`spawn`.
Note that this function either returns a string or prints to
stdout and returns None, depending on the current renderer.
Usage in a :doc:`tested document </dev/doctests>`:
>>> from lino.api import rt
>>> rt.login('robin').show('users.UsersOverview', limit=5)
Usage in a Jinja template::
{{ar.show('users.UsersOverview')}}
"""
from lino.utils.report import Report
if master_instance is not None:
kwargs.update(master_instance=master_instance)
if isinstance(spec, BaseRequest):
assert not kwargs
ar = spec
else:
ar = self.spawn(spec, **kwargs)
def doit():
# print 20160530, ar.renderer
if issubclass(ar.actor, Report):
story = ar.actor.get_story(None, ar)
return ar.renderer.show_story(
self, story, header_level=header_level, stripped=stripped)
return ar.renderer.show_table(
ar, column_names=column_names, header_level=header_level,
nosummary=nosummary, stripped=stripped,
show_links=show_links)
if language:
with translation.override(language):
return doit()
return doit()
def show_menu(self, language=None, **kwargs):
"""Show the main menu for the requesting user using the requested
renderer.
This is used in tested docs.
:language: explicitly select another language than that
specified in the requesting user's :attr:`language
<lino.modlib.users.models.User.language>` field.
"""
user = self.get_user()
if language is None:
language = user.language
with translation.override(language):
mnu = settings.SITE.get_site_menu(None, user.user_type)
self.renderer.show_menu(self, mnu, **kwargs)
def get_home_url(self, *args, **kw):
"""Return URL to the "home page" as defined by the renderer, without
switching language to default language.
"""
if translation.get_language() != settings.SITE.DEFAULT_LANGUAGE:
kw[constants.URL_PARAM_USER_LANGUAGE] = translation.get_language()
return self.renderer.get_home_url(*args, **kw)
def get_request_url(self, *args, **kw):
"""When called on a BaseRequest, this just redirects to home.
"""
return self.renderer.get_home_url(*args, **kw)
def summary_row(self, obj, **kwargs):
return obj.summary_row(self, **kwargs)
def obj2html(self, obj, *args, **kwargs):
"""
Return a HTML element which represents a pointer to the given
database object. Depending on the renderer this will be more
or less clickable.
"""
if obj is None:
return ''
return self.renderer.obj2html(self, obj, *args, **kwargs)
def obj2str(self, *args, **kwargs):
"""Return a string with a pointer to the given object.
"""
return self.renderer.obj2str(self, *args, **kwargs)
def html_text(self, *args, **kwargs):
"""
"""
return self.renderer.html_text(*args, **kwargs)
def href_button(self, *args, **kwargs):
return self.renderer.href_button(*args, **kwargs)
def href_to_request(self, *args, **kwargs):
return self.renderer.href_to_request(self, *args, **kwargs)
def menu_item_button(self, *args, **kwargs):
"""Forwards to :meth:`lino.core.renderer.`"""
return self.renderer.menu_item_button(self, *args, **kwargs)
def window_action_button(self, *args, **kwargs):
# settings.SITE.logger.info(
# "20160529 window_action_button %s %s", args, self.renderer)
return self.renderer.window_action_button(self, *args, **kwargs)
def row_action_button(self, obj, ba, *args, **kwargs):
return self.renderer.row_action_button(
obj, None, ba, *args, **kwargs)
def row_action_button_ar(self, obj, *args, **kw):
"""Return an HTML element with a button for running this action
request on the given database object. Does not spawn another
request.
"""
return self.renderer.row_action_button_ar(obj, self, *args, **kw)
def ar2button(self, *args, **kw):
"""Return an HTML element with a button for running this action
request. Does not spawn another request. Does not check
permissions.
"""
return self.renderer.ar2button(self, *args, **kw)
def instance_action_button(self, ai, *args, **kw):
"""Return an HTML element with a button which would run the given
:class:`InstanceAction <lino.core.utils.InstanceAction>`
``ai`` on the client.
"""
# logger.info("20141106 %s", ai.instance)
return self.renderer.row_action_button(
ai.instance, self, ai.bound_action, *args, **kw)
def action_button(self, ba, obj, *args, **kwargs):
"""Returns the HTML of an action link which will run the specified
action.
``kwargs`` may contain additional html attributes like `style`.
"""
return self.renderer.action_button(obj, self, ba, *args, **kwargs)
def get_detail_title(self, elem):
return self.actor.get_detail_title(self, elem)
def as_button(self, *args, **kw):
"""Return a button which when activated executes (a copy of)
this request.
"""
return self.renderer.action_button(
None, self, self.bound_action, *args, **kw)
def elem2rec1(ar, rh, elem, **rec):
rec.update(data=rh.store.row2dict(ar, elem))
return rec
def elem2rec_insert(self, ah, elem):
"""
Returns a dict of this record, designed for usage by an InsertWindow.
"""
rec = self.elem2rec1(ah, elem)
rec.update(title=self.get_action_title())
rec.update(phantom=True)
return rec
def elem2rec_detailed(ar, elem, with_navinfo=True, **rec):
"""Adds additional information for this record, used only by detail
views.
The "navigation information" is a set of pointers to the next,
previous, first and last record relative to this record in
this report. (This information can be relatively expensive
for records that are towards the end of the queryset. See
`/blog/2010/0716`, `/blog/2010/0721`, `/blog/2010/1116`,
`/blog/2010/1207`.)
recno 0 means "the requested element exists but is not
contained in the requested queryset". This can happen after
changing the quick filter (search_change) of a detail view.
"""
rh = ar.ah
rec = ar.elem2rec1(rh, elem, **rec)
if ar.actor.hide_top_toolbar or ar.bound_action.action.hide_top_toolbar:
rec.update(title=ar.get_detail_title(elem))
else:
#~ print(ar.get_title())
#~ print(dd.obj2str(elem))
#~ print(repr(unicode(elem)))
if True: # before 20131017
rec.update(title=ar.get_title() + u" » " +
ar.get_detail_title(elem))
else: # todo
rec.update(title=tostring(ar.href_to_request(ar))
+ u" » " + ar.get_detail_title(elem))
rec.update(id=elem.pk)
if ar.actor.editable:
rec.update(disable_delete=rh.actor.disable_delete(elem, ar))
if rh.actor.show_detail_navigator and with_navinfo:
rec.update(navinfo=navinfo(ar.data_iterator, elem))
return rec
def form2obj_and_save(ar, data, elem, is_new):
"""
Parses the data from HttpRequest to the model instance and saves
it.
This is used by `ApiList.post` and `ApiElement.put`, and by
`Restful.post` and `Restful.put`.
20140505 : no longer used by ApiList and ApiElement, but still
by Restful.*
"""
if is_new:
watcher = None
else:
watcher = ChangeWatcher(elem)
ar.ah.store.form2obj(ar, data, elem, is_new)
elem.full_clean()
if is_new or watcher.is_dirty():
pre_ui_save.send(sender=elem.__class__, instance=elem, ar=ar)
elem.before_ui_save(ar)
kw2save = {}
if is_new:
kw2save.update(force_insert=True)
else:
kw2save.update(force_update=True)
elem.save(**kw2save)
if is_new:
on_ui_created.send(elem, request=ar.request)
ar.success(_("%s has been created.") % obj2unicode(elem))
else:
watcher.send_update(ar)
ar.success(_("%s has been updated.") % obj2unicode(elem))
else:
ar.success(_("%s : nothing to save.") % obj2unicode(elem))
elem.after_ui_save(ar, watcher)
def get_help_url(self, docname=None, text=None, **kw):
"""
Generate a link to the help section of the documentation (whose
base is defined by :attr:`lino.core.site.Site.help_url`)
Usage example::
help = ar.get_help_url("foo", target='_blank')
msg = _("You have a problem with foo."
"Please consult %(help)s "
"or ask your system administrator.")
msg %= dict(help=tostring(help))
kw.update(message=msg, alert=True)
"""
if text is None:
text = six.text_type(_("the documentation"))
url = settings.SITE.help_url
if docname is not None:
url = "%s/help/%s.html" % (url, docname)
return E.a(text, href=url, **kw)
class ActorRequest(BaseRequest):
"""Base for :class:`ActionRequest`, but also used directly by
:meth:`lino.core.kernel.Kernel.run_callback`.
"""
no_data_text = _("No data to display")
def create_phantom_rows(self, **kw):
# phantom row disturbs when there is an insert button in
# the toolbar
if self.actor.no_phantom_row:
return
# if self.actor.insert_layout is not None \
# and not self.actor.stay_in_grid \
# and not self.actor.force_phantom_row:
# return
if self.create_kw is None or not self.actor.editable \
or not self.actor.allow_create:
return
if not self.actor.get_create_permission(self):
return
yield PhantomRow(self, **kw)
def create_instance(self, **kw):
"""
Create a row (a model instance if this is a database table) using
the specified keyword arguments.
"""
if self.create_kw:
kw.update(self.create_kw)
if self.known_values:
kw.update(self.known_values)
obj = self.actor.create_instance(self, **kw)
return obj
def create_instance_from_request(self, **kwargs):
elem = self.create_instance( **kwargs)
if self.actor.handle_uploaded_files is not None:
self.actor.handle_uploaded_files(elem, self.request)
if self.request is not None:
self.ah.store.form2obj(self, self.request.POST, elem, True)
elem.full_clean()
return elem
def get_status(self, **kw):
"""Return a `dict` with the "status", i.e. a json representation of
this request.
"""
if self.actor.parameters:
kw.update(
param_values=self.actor.params_layout.params_store.pv2dict(
self, self.param_values))
kw = self.bound_action.action.get_status(self, **kw)
bp = kw.setdefault('base_params', {})
if self.current_project is not None:
bp[constants.URL_PARAM_PROJECT] = self.current_project
if self.subst_user is not None:
bp[constants.URL_PARAM_SUBST_USER] = self.subst_user.id
return kw
# def spawn(self, actor, **kw):
# """Same as :meth:`BaseRequest.spawn`, except that the first positional
# argument is an `actor`.
# """
# if actor is None:
# actor = self.actor
# return super(ActorRequest, self).spawn(actor, **kw)
def summary_row(self, *args, **kw):
return self.actor.summary_row(self, *args, **kw)
def get_sum_text(self, sums):
return self.actor.get_sum_text(self, sums)
def get_row_by_pk(self, pk):
return self.actor.get_row_by_pk(self, pk)
def get_action_title(self):
return self.bound_action.action.get_action_title(self)
def get_title(self):
return self.actor.get_title(self)
def render_to_dict(self):
return self.bound_action.action.render_to_dict(self)
def get_request_url(self, *args, **kw):
return self.renderer.get_request_url(self, *args, **kw)
def absolute_uri(self, *args, **kw):
ar = self.spawn(*args, **kw)
location = ar.get_request_url()
return self.request.build_absolute_uri(location)
def build_webdav_uri(self, location):
if self.request is None:
return location
url = self.request.build_absolute_uri(location)
if settings.SITE.webdav_protocol:
url = settings.SITE.webdav_protocol + "://" + url
# url = urlsplit(url)
# url.scheme = settings.SITE.webdav_protocol
# url = url.unsplit()
print("20180410 {}", url)
return url
def pk2url(self, pk):
return self.renderer.get_detail_url(self.actor, pk)
def run(self, *args, **kw):
"""
Runs this action request.
"""
return self.bound_action.action.run_from_code(self, *args, **kw)
class ActionRequest(ActorRequest):
"""
Holds information about an indivitual web request and provides
methods like
- :meth:`get_user <lino.core.actions.BaseRequest.get_user>`
- :meth:`confirm <lino.core.actions.BaseRequest.confirm>`
- :meth:`spawn <lino.core.actions.BaseRequest.spawn>`
An `ActionRequest` is also a :class:`BaseRequest` and inherits its
methods.
An ActionRequest is instantiated from different shortcut methods:
- :meth:`lino.core.actors.Actor.request`
- :meth:`lino.core.actions.Action.request`
"""
create_kw = None
renderer = None
offset = None
limit = None
order_by = None
def __init__(self, actor=None,
unused_request=None, action=None, unused_renderer=None,
rqdata=None,
**kw):
# print("20170116 ActionRequest.__init__()", actor, kw)
assert unused_renderer is None
assert unused_request is None
self.actor = actor
self.rqdata = rqdata
self.bound_action = action or actor.default_action
BaseRequest.__init__(self, **kw)
self.ah = actor.get_request_handle(self)
def setup(self,
known_values=None,
param_values=None,
action_param_values={},
**kw):
BaseRequest.setup(self, **kw)
#~ 20120111
#~ self.known_values = known_values or self.report.known_values
#~ if self.report.known_values:
#~ d = dict(self.report.known_values)
kv = dict()
for k, v in list(self.actor.known_values.items()):
kv.setdefault(k, v)
if known_values:
kv.update(known_values)
self.known_values = kv
request = self.request
if self.actor.parameters is not None:
pv = self.actor.param_defaults(self)
for k in list(pv.keys()):
if k not in self.actor.parameters:
raise Exception(
"%s.param_defaults() returned invalid keyword %r" %
(self.actor, k))
# New since 20120913. E.g. newcomers.Newcomers is a
# simple pcsw.Clients with
# known_values=dict(client_state=newcomer) and since there
# is a parameter `client_state`, we override that
# parameter's default value.
for k, v in list(self.known_values.items()):
if k in pv:
pv[k] = v
# New since 20120914. MyClientsByGroup has a `group` as
# master, this must also appear as `group` parameter
# value. Lino now understands tables where the master_key
# is also a parameter.
if self.actor.master_key is not None:
if self.actor.master_key in pv:
pv[self.actor.master_key] = self.master_instance
if param_values is None:
if request is not None:
# call get_layout_handle to make sure that
# params_store has been created:
self.actor.params_layout.get_layout_handle(
self.renderer.plugin)
ps = self.actor.params_layout.params_store
# print('20160329 requests.py', ps, self.actor.parameters)
if ps is not None:
pv.update(ps.parse_params(request))
else:
raise Exception(
"20160329 params_layout {0} has no params_store "
"in {1!r}".format(
self.actor.params_layout, self.actor))
else:
for k in list(param_values.keys()):
if k not in pv:
raise Exception(
"Invalid key '%s' in param_values of %s "
"request (possible keys are %s)" % (
k, self.actor, list(pv.keys())))
pv.update(param_values)
# print("20160329 ok", pv)
self.param_values = AttrDict(**pv)
# self.actor.check_params(self.param_values)
action = self.bound_action.action
if action.parameters is not None:
if len(self.selected_rows) == 1:
apv = action.action_param_defaults(
self, self.selected_rows[0])
else:
apv = action.action_param_defaults(self, None)
# msg = "20170116 selected_rows is {} for {!r}".format(
# self.selected_rows, action)
# raise Exception(msg)
if request is not None:
apv.update(
action.params_layout.params_store.parse_params(request))
self.action_param_values = AttrDict(**apv)
# action.check_params(action_param_values)
self.set_action_param_values(**action_param_values)
self.bound_action.setup_action_request(self)
def set_action_param_values(self, **action_param_values):
apv = self.action_param_values
for k in list(action_param_values.keys()):
if k not in apv:
raise Exception(
"Invalid key '%s' in action_param_values "
"of %s request (possible keys are %s)" %
(k, self.actor, list(apv.keys())))
apv.update(action_param_values)
def get_data_iterator(self):
raise NotImplementedError
def get_base_filename(self):
return six.text_type(self.actor)
#~ s = self.get_title()
#~ return s.encode('us-ascii','replace')
| 1.804688 | 2 |
tests/test_a0179largestnumber.py | nirofang/pyleet | 3 | 12765637 | <reponame>nirofang/pyleet<gh_stars>1-10
from solutions.a0179largestnumber import Solution
solution = Solution()
def test_largestNumber1():
nums = [10, 2]
expect = "210"
actual = solution.largestNumber(nums)
assert actual == expect
def test_largestNumber2():
nums = [3, 30, 34, 5, 9]
expect = "9534330"
actual = solution.largestNumber(nums)
assert actual == expect
def test_largestNumber3():
nums = [3, 3330, 333, 2, 9]
expect = "9333333302"
actual = solution.largestNumber(nums)
assert actual == expect
def test_largestNumber4():
nums = [0, 0]
expect = "0"
actual = solution.largestNumber(nums)
assert actual == expect
| 2.59375 | 3 |
Project_Euler/Problem 14 script.py | JasPass/Projects | 0 | 12765638 | <reponame>JasPass/Projects
# Project Euler: Problem 14
#
#
# Which starting number, under one million, produces the longest chain?
import time
# Sets starting time of program
startTime = time.time()
# Function to run the sequence until 1 is reached
def sequence(n, sequence_length=1):
# Checks if (n) is even
if n % 2 == 0:
n /= 2
# If (n) is not even, it must be odd
else:
n *= 3
n += 1
# Counts up the sequence length
sequence_length += 1
# Checks if number is still greater than 1
if n > 1:
# Runs the next iteration of the sequence
return sequence(n, sequence_length)
else:
# If this code is reached, (n) equals 1, and we are done
return sequence_length
# Longest found sequence length and corresponding seed
output = [1, 0]
# Loops through all numbers bellow 1 million
for i in range(2, 10**6):
# Sets the current sequence length
length = sequence(i)
# Checks if the sequence length is greater than
# the previously greatest sequence length
if length > output[1]:
# Sets the new greatest sequence length and seed
output = [i, length]
# Variable to hod answer
ans = output[0]
# Prints out the answer
print('The answer to Project Euler problem 14 is:', ans)
# Sets finishing time of program
stopTime = time.time()
# Prints the time it took the program to execute
print('The computation took', '%.2g' % (stopTime - startTime), 'seconds')
| 3.609375 | 4 |
punkweb_boards/context_processors.py | shakedown-street/punkweb-boards | 1 | 12765639 | from punkweb_boards.conf import settings as BOARD_SETTINGS
from punkweb_boards.models import Report
def settings(request):
return {
"BOARD_SETTINGS": {
"BOARD_NAME": BOARD_SETTINGS.BOARD_NAME,
"BOARD_THEME": BOARD_SETTINGS.BOARD_THEME,
"SHOUTBOX_ENABLED": BOARD_SETTINGS.SHOUTBOX_ENABLED,
"SIGNATURES_ENABLED": BOARD_SETTINGS.SIGNATURES_ENABLED,
"USER_BIRTHDAY_MESSAGE": BOARD_SETTINGS.USER_BIRTHDAY_MESSAGE,
}
}
def base_context(request):
ctx = {}
if request.user.is_authenticated and not request.user.profile.is_banned:
ctx.update({"notifications": request.user.notifications.all()[:5]})
ctx.update({"unread_conversations": request.user.unread_conversations.count()})
ctx.update(
{
"unread_notifications": request.user.notifications.filter(
read=False
).count()
}
)
if request.user.is_staff:
unresolved_reports = Report.objects.filter(resolved=False).count()
ctx.update({"unresolved_reports": unresolved_reports})
return ctx
| 2.03125 | 2 |
AlgorithmQuestionAnswering/StanfordSpacyNLP.py | zointblackbriar/QuestionAnswering | 1 | 12765640 | <gh_stars>1-10
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
from __future__ import print_function
# try:
# unicode_ = unicode #Python2
# except NameError:
# unicode_ = str #Python3
#This py file will use Stanford Core NLP Server
from collections import defaultdict
import nltk
from textblob import TextBlob
from textblob.parsers import PatternParser
from nltk.corpus import wordnet
#from nltk import word_tokenize
from itertools import chain
from stanfordcorenlp import StanfordCoreNLP
from textblob.sentiments import NaiveBayesAnalyzer
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
from textacy import similarity
import json
import logging
from nltk.tree import *
import spacy
#import pandas as pd
from spacy.symbols import *
#import en_coref_md
#import en_coref_lg
nlp = spacy.load('en_core_web_md', disable=['ner', 'textcat'])
nlp_sm = spacy.load("en_core_web_sm", disable=['ner', 'textcat'])
logger = logging.getLogger(__name__)
class ConnectionCoreNLP(object):
def __init__(self, host="http://localhost", port=9000):
self.nlp = StanfordCoreNLP(host, port=port, timeout=30000) # quiet = False, logging_level = logging.DEBUG
self.props = {
'annotators': 'tokenize,ssplit,pos,lemma,ner,parse,depparse,dcoref,relation','sentiment'
'pipelineLanguage' : 'en',
'outputFormat' : 'json'
}
def word_tokenize(self, sentence):
logger.info("word tokenize")
return self.nlp.word_tokenize(sentence)
def posTagger(self, sentence):
logger.info("pos tagger")
return self.nlp.pos_tag(sentence)
def ner(self, sentence):
logger.info("ner")
return self.nlp.ner(sentence)
def parse(self, sentence):
logger.info("parser activated")
return self.nlp.parse(sentence)
def dependency_parser(self, sentence):
logger.info("dependency parser activated")
return self.nlp.dependency_parse(sentence)
def deterministic_coreference(self, sentence):
logger.info("deterministic coreference activated")
return self.nlp.coref(sentence)
def annotate(self, sentence):
logger.info("annotator")
return json.loads(self.nlp.annotate(sentence, properties=self.props))
@staticmethod
def tokens_to_dict(_tokens):
tokens = defaultdict(dict)
for token in _tokens:
tokens[int(token['index'])] = {
'word' : token['word'],
'lemma' : token['lemma'],
'pos' : token['pos'],
'ner' : token['ner']
}
return tokens
class TestConnectionCoreNLP(object):
# print('Tokenize', nlpCore.word_tokenize(param_test))
# print('Part of Speech Tagger', nlpCore.posTagger(param_test))
# print('Named Entities', nlpCore.ner(param_test))
# print('ner list: ', param_test)
def __init__(self):
pass
def dependencyParser(self, param_depend):
try:
nlpCore = ConnectionCoreNLP()
dependencyParseTree = nlpCore.dependency_parser(param_depend)
#be careful when you return dependencyParseTree
#It is not a tree
#It is a unicoded string format
except Exception as ex:
logger.exception("Dependency Parser Error")
return dependencyParseTree
def coreferenceSolutionStanford(self, sentence):
nlpCore = ConnectionCoreNLP()
coreference = nlpCore.deterministic_coreference(sentence)
return coreference
def spacyDependencyParser(self, sentence):
nlp = spacy.load('en_core_web_md')
document = nlp(unicode(sentence, "utf-8"))
return document.print_tree(light=True)
def spacy_verb_finder(self, sentence):
#nlp = spacy.load('en_core_web_md')
doc = nlp(sentence.decode('utf-8'))
for chunk in doc.noun_chunks:
chunk_root = [chunk.root.text for chunk in doc.noun_chunks]
print("chunk root:", chunk_root)
return chunk_root
def spacy_noun_finder(self, sentence):
nlp = spacy.load('en_core_web_md')
doc = nlp(sentence.decode('utf-8'))
for chunk in doc.noun_chunks:
chunk_root_head = [chunk.root.head.text for chunk in doc.noun_chunks]
#print(listChunk)
print("chunk_root_head: ", chunk_root_head)
return chunk_root_head
def textblob_sentiment_analysis(self, sentence):
blob = TextBlob(sentence, analyzer=NaiveBayesAnalyzer())
if blob.sentiment.p_pos > blob.sentiment.p_neg:
return True
else:
return False
def similarity_jaccard(self, first_input, second_input):
print("text similarity with jaccard")
similarity_level = similarity.jaccard(first_input, second_input)
print("similarity_level: ", similarity_level)
if similarity_level > 0.50:
return True
else:
return False
def similarity_jaro_winkler(self, first_input, second_input):
print("text similarity with jaro_winkler")
similarity_level = similarity.jaro_winkler(str(first_input), str(second_input))
print("similarity_level: ", similarity_level)
if similarity_level > 0.50:
return True
else:
return False
def similarity_levenshtein(self, first_input, second_input):
print("text similarity with levenshtein")
similarity_level = similarity.levenshtein(str(first_input), str(second_input))
print("similarity_level: ", similarity_level)
if similarity_level >= 0.53:
return True
else:
return False
def similarity_word_levensthein(self, first_input, second_input):
print("text similarity with levenshtein")
similarity_level = similarity.levenshtein(str(first_input), str(second_input))
print("similarity_level: ", similarity_level)
if similarity_level > 0.50:
return True
else:
return False
def spacyDetailedDependencyChunk(self, sentence):
nlp = spacy.load('en_core_web_md')
doc = nlp(sentence.decode('utf-8'))
chunk_text = [chunk.text for chunk in doc.noun_chunks]
chunk_root = [chunk.root.text for chunk in doc.noun_chunks]
chunk_root_dep = [chunk.root.dep_ for chunk in doc.noun_chunks]
chunk_root_head = [chunk.root.head.text for chunk in doc.noun_chunks]
print("ChunkText: ", chunk_text)
print("ChunkRoot: ", chunk_root)
print("ChunkRootHead: ", chunk_root_head)
print("Chunk root dep: ", chunk_root_dep)
return chunk_root_dep
def spacyDependencyRelations(self, sentence):
nlp = spacy.load('en_core_web_md')
doc = nlp(sentence.decode('utf-8'))
token_text = [token.text for token in doc]
token_dep = [token.dep_ for token in doc]
token_head_text = [token.head.text for token in doc]
token_head_pos = [token.pos_ for token in doc]
token_child = ([child for child in token.children] for token in doc)
# dataframe = pd.DataFrame(zip(token_text, token_dep, token_head_text, token_head_pos, token_child),
# columns = ['Token Text', 'Token Dep', 'Token Head Text', 'Token Head Pos', 'Token Child'])
# dataframe.to_html('Dependencies.html')
print("token_text: ", token_text)
print("token dep: ", token_dep)
print("token_head_text: ", token_head_text)
print("token head pos: ", token_head_pos)
print("token child: ", token_child)
#print(dataframe)
return token_head_pos ,token_text
def spacyArchMatching(self, sentence):
verbs = []
#nlp = spacy.load("en_core_web_sm", disable=['ner', 'textcat'])
doc = nlp_sm(sentence.decode('utf-8'))
for possible_verb in doc:
if possible_verb.pos == VERB:
for possible_subject in possible_verb.children:
if possible_subject.dep == nsubj or possible_subject.dep == dobj or possible_subject.dep == ccomp or possible_subject.dep == root:
verbs.append(possible_verb)
break
print('VERBS', verbs)
return verbs
def spacyDependencyChunk(self, sentence):
listChunk = []
indirectDependency = False
#A medium English model based on spacy -- Size 161 Mo -- Not Recommended en_core_web_md
#nlp = spacy.load('en_core_web_md', disable=['ner'])
doc = nlp(sentence.decode('utf-8'))
print("nlp meta", nlp.meta)
chunk_root_dep = [chunk.root.dep_ for chunk in doc.noun_chunks]
print("chunk root dep: ", chunk_root_dep)
if chunk_root_dep[-1] == u'nsubj' and len(chunk_root_dep) == 1:
indirectDependency = False
elif len(chunk_root_dep) > 1:
if chunk_root_dep[-2] == u'nsubj' and chunk_root_dep[-1] == u'dobj':
indirectDependency = True
elif chunk_root_dep[-2] == u'dobj' and chunk_root_dep[-1] == u'nsubj':
indirectDependency = False
print("indirectDependency: ", indirectDependency)
return indirectDependency
#Install en_coref_md
#pip install en_coref_sm-3.0.0.tar.gz
# def spacyCoreferenceResolution(self, sentence):
# #nlp = spacy.load('en_coref_md')
# nlp = en_coref_lg.load()
# doc = nlp(unicode(sentence, encoding="utf-8"))
# #doc = unicode_(sentence)
# if doc._.has_coref is True:
# print(doc._.coref_clusters)
# mentions = [{'start': mention.start_char,
# 'end': mention.end_char,
# 'text': mention.text,
# 'resolved': cluster.main.text
# }
# for cluster in doc._.coref_clusters
# for mention in cluster.mentions]
# clusters = list(list(span.text for span in cluster)
# for cluster in doc._.coref_clusters)
# resolved = doc._.coref_resolved
#
# return doc._.has_coref
def textblobPatternParser(self, sentence):
blob = TextBlob(sentence, parser=PatternParser())
return blob.parse()
# def spell_checker_input(self, statement):
# print("statement is: ", statement)
# result = enchant.Dict("en-US") ##Spell Checking tokenize words
# print(list(set([word.encode('ascii', 'ignore') for word in word_tokenize(result) if result.check(word) is False and re.match('^[a-zA-Z ]*$',word)])))
def stanfordCoreferenceResolution(self, statement):
nlp = StanfordCoreNLP(r'G:\AllFiles\Projeler\OpenSource\SemanticWeb&NLP\stanford-corenlp-full-2018-02-27\stanford-corenlp-full-2018-02-27', quiet=False)
props = {'annotators': 'coref', 'pipelineLanguage': 'en'}
result = json.loads(nlp.annotate(statement, properties = props))
num, mentions = result['corefs'].items()[0]
for mention in mentions:
print(mention)
def namedEntityRecognition(self, param_ner):
try:
nlpCore = ConnectionCoreNLP()
nerResult = nlpCore.ner(param_ner)
print(nerResult)
except Exception as ex:
logger.exception("Named Entity Recognition Error ")
def spacy_verb_lemmatizer(self, param_to_be_lemmatized):
lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
lemmas = lemmatizer(param_to_be_lemmatized, u'VERB')
return lemmas
def spacy_noun_lemmatizer(self, noun_to_be_lemmatized):
lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
lemmas = lemmatizer(noun_to_be_lemmatized, u'NOUN')
return lemmas
def posTaggerSender(self, param_posTagger):
try:
nlpCore = ConnectionCoreNLP()
posResult = nlpCore.posTagger(param_posTagger)
except Exception as ex:
logger.exception("Pos Tagger Error")
return posResult
#Tested with Server. Stanford CoreNLP server up and running
def constituencyParser(self, param_constituent):
try:
nlpCore = ConnectionCoreNLP()
constituentParseTree = nlpCore.parse(param_constituent)
# print(constituentParseTree)
return Tree.fromstring(constituentParseTree)
except Exception as ex:
logger.exception("Constituency Parser error")
def resultSentiment(self, param_sentiment):
try:
nlpCore = ConnectionCoreNLP()
sentiment_analysis = nlpCore.annotate(param_sentiment)
print(sentiment_analysis)
except Exception as ex:
logger.exception("Sentiment Analysis error")
#Find tree's node with the following method
#get_node function has been changed with label() function
def printSubtrees(self, tree, constituentTags, extraCheckTags=None):
try:
for subtree in tree.subtrees():
if subtree.label() == constituentTags or subtree.label() == extraCheckTags:
return subtree.leaves()
except Exception as e:
logger.exception("Subtree parse problem")
def findNNSubtree(self, tree):
try:
NPs = list(tree.subtrees(filter=lambda x: x.label() == 'NP'))
NNs_insideNPs = map(lambda x: list(x.subtrees(filter=lambda x: x.label() == 'NNP' or x.label() == 'NN' or x.label() == 'NNS')), NPs)
return self.removeDuplicates([noun.leaves()[0] for nouns in NNs_insideNPs for noun in nouns])
except Exception as ex:
logger.exception("Constituency Parser error")
def findVPSubtree(self, tree):
try:
VPs = list(tree.subtrees(filter=lambda x: x.label() == 'VP' or x.label() == 'VBZ'
or x.label() == 'VBD' or x.label() == 'VBG' or x.label() == 'VBN' or x.label() == 'VBP'))
print("length of VPs", len(VPs))
print("VPs", VPs)
if len(VPs) == 0:
return None
#or x.label() == 'VB'
VBZs = map(lambda x: list(x.subtrees(filter=lambda x: x.label() == 'VBZ')), VPs)
print("VBZ", VBZs)
return self.removeDuplicates([verb.leaves()[0] for verbs in VBZs for verb in verbs])
except Exception as ex:
logger.exception("VPSubtree Parser Error")
def removeDuplicates(self, dup_list):
assignmentList = []
for elem in dup_list:
if elem not in assignmentList:
assignmentList.append(elem)
return assignmentList
#Write all nodes of NLTK Parse Tree
#This function has some errors it should be fixed.
def getNodes(self, tree):
output = []
try:
for node in tree:
if type(node) is nltk.Tree:
if node.label() == 'ROOT':
print("=====Sentence=====")
print("Sentence:", ".join(node.leaves()")
elif node.label() == 'NN':
#append all the nodes into the list of python
output.append(node.leaves())
else:
print("Label:", node.label())
print("Leaves", node.leaves())
#print("output", output)
self.getNodes(node)
except Exception as ex:
logger.exception("Get node function couldn't parse it")
yield output
#with pure nltk to find synonym
def synonym(self, input):
synonyms = wordnet.synsets(input)
lemmas = set(chain.from_iterable([word.lemma_names() for word in synonyms]))
print(lemmas)
#n-grams function in NLP
#if you want to do bigram, please send a value n=2
#Usage is self.ngrams("What is the value of sensor1 in machine1".split(), 2]
def ngrams(self, words, n):
return [words[i:i+n] for i in range(len(words)-n+1)]
#Wordnet Latent Semantic Analysis - To test for synonyms
def wordnetLatentAnalysis(self, word1, word2, lch_threshold=2.15, verbose=False):
"""Determine if two (already lemmatized) words are similar or not"""
"""Call with verbose=True to print the Wordnet senses from each word that are considered similar"""
from nltk.corpus import wordnet as wn
results = []
for net1 in wn.synsets(word1):
for net2 in wn.synsets(word2):
try:
lch = net1.lch_similarity(net2)
except:
continue
#The value to compare the LCH to was found empirically
#The value is application dependent. Do experiment
if lch >= lch_threshold:
results.append((net1, net2))
if not results:
return False
if verbose:
for net1, net2 in results:
print(net1)
print(net1.definition)
print(net2)
print(net2.definition)
print('path similarity')
print(net1.path_similarity(net2))
print(net1.lch_similarity(net2))
print('wup similarity:')
print(net1.wup_similarity(net2))
print('-' + 79)
return True
| 2.3125 | 2 |
nodes/pulsar-houdini/open_file.py | Soulayrol/Pulsar | 16 | 12765641 | import os
import hou
def main(arguments):
file = arguments["file"].replace(os.sep, '/')
if(arguments["force"] == 0):
hou.hipFile.load(file, suppress_save_prompt=True)
else:
hou.hipFile.save(file_name=None)
hou.hipFile.load(file, suppress_save_prompt=False)
# workspace_path = file.split('/scenes')[0]
# wipcache_path = os.path.split(file.replace('02_shot/3d/scenes', '03_WIP_CACHE_FX'))[0]
# wipcache_path = wipcache_path.replace('01_asset_3d/3d/scenes', '03_WIP_CACHE_FX')
# pubcache_path = os.path.split(file.replace('02_shot/3d/scenes', '04_PUBLISH_CACHE_FX'))[0]
# pubcache_path = pubcache_path.replace('01_asset_3d/3d/scenes', '04_PUBLISH_CACHE_FX')
# hou.putenv('JOB', workspace_path)
# hou.putenv('WIPCACHE', wipcache_path)
# hou.putenv('PUBCACHE', pubcache_path)
| 2.25 | 2 |
driloader/__main__.py | lucasmello/Driloader | 4 | 12765642 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# pylint: disable=too-few-public-methods
"""Driloader Command Line Interface
Using Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
import argparse
import sys
from driloader.browser.exceptions import BrowserDetectionError
from driloader.factories.browser_factory import BrowserFactory
class OutputType:
"""Output Type
Enum Class to store the possible output types.
Types:
INFO: Any information non-error related.
ERROR: Any error message.
"""
INFO = 'INFO'
ERROR = 'ERROR'
class CliError(Exception):
""" CliError """
def __init__(self, message, cause):
"""Init method
Sets superclass arguments up.
Sets the cause of exception up.
"""
super().__init__(message)
self.cause = cause
# def __str__(self):
# return 'Error: {}.\nCause: {}'.format(self.args[0], self.cause)
class DriloaderCommands:
"""A facade to BrowserDetection"""
@staticmethod
def get_google_chrome_version():
""" Returns Google Chrome version.
Args:
self
Returns:
Returns an int with the browser version.
Raises:
CliError: Case something goes wrong when getting the browser version.
"""
try:
return BrowserFactory('CHROME').browser.installed_browser_version()
except BrowserDetectionError as err:
raise CliError('Unable to get the Google Chrome version', str(err)) from err
@staticmethod
def get_firefox_version():
""" Returns Firefox version.
Args:
self
Returns:
Returns an int with the browser version.
Raises:
CliError: Case something goes wrong when getting the browser version.
"""
try:
return BrowserFactory('FIREFOX').browser.\
installed_browser_version()
except BrowserDetectionError as err:
raise CliError('Unable to get the Firefox version', str(err)) from err
@staticmethod
def get_internet_explorer_version():
""" Returns Internet Explorer version.
Args:
self
Returns:
Returns an int with the browser version.
Raises:
CliError: Case something goes wrong when getting the browser version.
"""
try:
return BrowserFactory('IE').browser.installed_browser_version()
except BrowserDetectionError as err:
raise CliError('Unable to get the Internet Explorer version',
str(err)) from err
def get_all_browsers_versions(self):
""" Returns all browser version.
Args:
self
Returns:
Returns an string with the browser version. Like:
Internet Explorer: 11
Firefox: 45
Google Chrome: 58
Raises:
None
"""
result_message = 'Firefox: {}\nGoogle Chrome: ' \
'{}\nInternet Explorer: {}\n'
try:
ff_version = str(self.get_firefox_version())
except CliError as error:
ff_version = str(error)
try:
chrome_version = str(self.get_google_chrome_version())
except CliError as error:
chrome_version = str(error)
try:
ie_version = str(self.get_internet_explorer_version())
except CliError as error:
ie_version = str(error)
return result_message.format(ff_version, chrome_version, ie_version)
def parse_args():
""" Parse Arguments
Parse arguments from stdin.
Args:
Returns:
A string argument from stdin.
Raises:
None
"""
parser = argparse.ArgumentParser(prog="driloader")
action = parser.add_mutually_exclusive_group(required=True)
action.add_argument('--firefox', '-f',
help='get Firefox version.',
action='store_true')
action.add_argument('--chrome', '-c',
help='get Google Chrome version.',
action='store_true')
action.add_argument('--internet-explorer', '-i',
help='get Internet Explorer version.',
action='store_true')
action.add_argument('--all',
help='look for browser an get their versions.',
action='store_true')
args = parser.parse_args()
for key, value in args.__dict__.items():
if value is True:
return key
return None
def display_output(message, output_type=OutputType.INFO):
""" Display Output
Displays an output message to the correct file descriptor (STDIN or STDOUT) and exits
the script based on the type sent as parameter.
If output_type == OutputType.INFO sends the message to STDIN and exits with code 0.
If output_type == OutputType.ERROR sends the message to STDERR and exits with code 1.
Args:
message: The message to be displayed.
output_type: A type in OutputType class
Returns:
None
Raises:
None
"""
if output_type == OutputType.INFO:
std_descriptor = sys.stdout
exit_code = 0
else:
std_descriptor = sys.stderr
exit_code = 1
message = str(message)
if 'Cause' in message:
message = message.replace('Cause', '\tCause')
print(message, file=std_descriptor)
sys.exit(exit_code)
def main():
""" Main Function
Responsible for:
- call the parse_args() function and get the parameter sent from stdin.
- instantiate the DriloaderCommands class and call its methods based
on the argparser input.
Args:
Returns:
None
Raises:
None
"""
option = parse_args()
commands = DriloaderCommands()
options = {
'chrome': commands.get_google_chrome_version,
'firefox': commands.get_firefox_version,
'internet_explorer': commands.get_internet_explorer_version,
'all': commands.get_all_browsers_versions
}
message = ''
try:
result = options[option]()
message = result
except CliError as cli_error:
display_output(str(cli_error), OutputType.ERROR)
display_output(message, OutputType.INFO)
if __name__ == '__main__':
main()
| 2.515625 | 3 |
services/backend/src/database/config.py | CarooSilvestri/fastapi-vue | 35 | 12765643 | <reponame>CarooSilvestri/fastapi-vue
import os
TORTOISE_ORM = {
"connections": {"default": os.environ.get("DATABASE_URL")},
"apps": {
"models": {
"models": [
"src.database.models", "aerich.models"
],
"default_connection": "default"
}
}
}
| 1.4375 | 1 |
4-2/race.py | mttaggart/everlive-python-intro | 0 | 12765644 | from car import Car
class Race:
def __init__(self, racers, distance):
pass
def tick(self):
pass
def run(self):
pass
| 2.3125 | 2 |
metrics_exporter.py | nfvsap/latency-tests | 0 | 12765645 | # -*- coding: utf-8 -*-
import flask
import os
import sys
import ast
import json
import argparse
app = flask.Flask(__name__)
filename = ''
@app.route('/latency_metrics')
def get_latency_percentiles():
""" Retrieves the last saved latency hdr histogram percentiles
and the average latency
Args:
-
Returns:
dict: A JSON object containing the metrics
"""
status = 200
return flask.Response(get_stats_json(),
status=status,
mimetype='application/json')
def get_stats_json():
try:
f = open(filename, 'r')
line = f.readline()
percentiles_dict = {}
while line:
percentile = line.split(' ')[0]
latency = float(line.split(' ')[1])
percentiles_dict[percentile] = latency
line = f.readline()
js = json.dumps(percentiles_dict, indent=2)
f.close()
return js
except Exception as e:
print("An exception occurred")
print(str(e))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Give configuration options')
parser.add_argument('--filename', metavar='filename', type=str,
help='the filename from which will retrieve the metrics')
parser.add_argument('--port', metavar='port', type=int, default=5000,
help='Server port (default 5000)')
args = parser.parse_args()
filename = args.filename
app.run(host='0.0.0.0', port=args.port, debug=True)
| 2.890625 | 3 |
GPyOpt/testing/util_tests/test_general_utils.py | RaulAstudillo/bocf | 9 | 12765646 | import numpy as np
import unittest
from numpy.testing import assert_array_less
from GPyOpt.core.errors import InvalidConfigError
from GPyOpt.core.task.space import Design_space
from GPyOpt.experiment_design import initial_design
class TestInitialDesign(unittest.TestCase):
def setUp(self):
self.space = [
{'name': 'var_1', 'type': 'continuous', 'domain':(-3,1), 'dimensionality': 1},
{'name': 'var_2', 'type': 'discrete', 'domain': (0,1,2,3)},
{'name': 'var_3', 'type': 'categorical', 'domain': (0,1,2)}
]
self.design_space = Design_space(self.space)
self.bandit_variable = {'name': 'stations', 'type': 'bandit', 'domain': np.array([[1, 1], [2, 2], [3, 3], [4, 4]])}
def assert_samples_against_space(self, samples):
lower_bound_var1 = self.design_space.name_to_variable['var_1'].domain[0]
upper_bound_var1 = self.design_space.name_to_variable['var_1'].domain[1]
self.assertTrue((samples[:,0] >= lower_bound_var1).all())
self.assertTrue((samples[:,0] <= upper_bound_var1).all())
var2_values = self.design_space.name_to_variable['var_2'].domain
self.assertTrue(np.in1d(samples[:,1], var2_values).all())
var3_values = self.design_space.name_to_variable['var_3'].domain
self.assertTrue(np.in1d(samples[:,2], var3_values).all())
def test_grid_design(self):
init_points_count = 3
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
init_points_count = 1000
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_grid_design_with_multiple_continuous_variables(self):
self.space.extend([
{'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
{'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
])
self.design_space = Design_space(self.space)
init_points_count = 10
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), 1)
init_points_count = 100
samples = initial_design('grid', self.design_space, init_points_count)
self.assertEqual(len(samples), 3**4)
def test_random_design(self):
init_points_count = 10
samples = initial_design('random', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_random_design_with_constraints(self):
constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
self.design_space = Design_space(self.space, constraints=constraints)
initial_points_count = 10
samples = initial_design('random', self.design_space, initial_points_count)
self.assert_samples_against_space(samples)
self.assertTrue((samples[:,0]**2 - 1 < 0).all())
def test_random_design_with_bandit_only(self):
space = [self.bandit_variable]
self.design_space = Design_space(space)
initial_points_count = 3
samples = initial_design('random', self.design_space, initial_points_count)
self.assertEqual(len(samples), initial_points_count)
def test_nonrandom_designs_with_constrains(self):
constraints = [{'name': 'const_1', 'constraint': 'x[:,0]**2 - 1'}]
self.design_space = Design_space(self.space, constraints=constraints)
initial_points_count = 10
with self.assertRaises(InvalidConfigError):
initial_design('grid', self.design_space, initial_points_count)
with self.assertRaises(InvalidConfigError):
initial_design('latin', self.design_space, initial_points_count)
with self.assertRaises(InvalidConfigError):
initial_design('sobol', self.design_space, initial_points_count)
def test_latin_design(self):
init_points_count = 10
samples = initial_design('latin', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_latin_design_with_multiple_continuous_variables(self):
self.space.extend([
{'name': 'var_5', 'type': 'continuous', 'domain':(0,5), 'dimensionality': 2},
{'name': 'var_6', 'type': 'continuous', 'domain':(-5,5), 'dimensionality': 1}
])
self.design_space = Design_space(self.space)
init_points_count = 10
samples = initial_design('latin', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
def test_sobol_design(self):
init_points_count = 10
samples = initial_design('sobol', self.design_space, init_points_count)
self.assertEqual(len(samples), init_points_count)
self.assert_samples_against_space(samples)
| 2.609375 | 3 |
src/pipeline/stock-news-pipeline.py | HalmonLui/msft-azure-hacks-2019 | 5 | 12765647 | <filename>src/pipeline/stock-news-pipeline.py
import datetime
import json
from airflow import DAG
from airflow import models
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.hooks.mongo_hook import MongoHook
DEFAULT_ARGS = {
'start_date': datetime.datetime.today(),
'email_on_retry': False,
'email_on_failure': False,
'retries': 3,
'retry_delay': datetime.timedelta(minutes=30),
}
dag = DAG('Stock-News-Pipeline', default_args=DEFAULT_ARGS,
schedule_interval=" 0 0 20 ? * * *",
)
t1 = BashOperator(task_id='Stock-News-Agg', dag=dag, bash_command="python3 /Users/sonamghosh/Desktop/msft_azure_hackathon_2019/exploratory_analysis/msft-azure-hacks-2019/src/pipeline/create_tables.py")
| 1.960938 | 2 |
yamlsql/render.py | yejianye/yamlsql | 0 | 12765648 | <filename>yamlsql/render.py
import sqlparse
from funcy import omit, merge
from ruamel.yaml.comments import CommentedMap
from .parser import YAMLParser
def sql_format(sql):
sql = sql.strip()
if not sql.endswith(';'):
sql = sql + ';'
return sqlparse.format(sql.strip(), reindent=True, keyword_case='upper')
def _build_clause(name, item):
key = name.replace(' ', '_')
if key not in item:
return ''
return '{} {}'.format(name, item[key])
class Processor(object):
when = None
def process(self, item):
if self.can_apply(item):
return self.transform(item)
return item
def transform(self, item):
raise NotImplemented()
def can_apply(self, item):
if self.when:
return self.when in item
return True
class SqlProcessor(Processor):
when = 'sql'
def transform(self, item):
return sql_format(item['sql'])
class SelectProcessor(Processor):
when = 'select'
tmpl = """
select {fields} from {from_}
{when}
{group_by}
{order_by}
{limit}
"""
def transform(self, item):
ctx = dict(
fields=','.join(item['select']),
from_=item['from'],
when=_build_clause('when', item),
group_by=_build_clause('group by', item),
order_by=_build_clause('order by', item),
limit=_build_clause('limit', item))
sql = self.tmpl.format(**ctx).strip()
return merge(
omit(item, ['select', 'from', 'group_by', 'order_by', 'limit']),
{'sql': sql})
class SQLRender(object):
MAX_ITERATION = 100
DEFAULT_PROCESSORS = [SqlProcessor, SelectProcessor]
def __init__(self, content, processors=DEFAULT_PROCESSORS):
self.parser = YAMLParser(content)
if not processors:
processors = self.DEFAULT_PROCESSORS
self.processors = [cls() for cls in processors]
def render(self, query_name=None, lineno=None):
""" Render YAML to SQL
If `query_name` is specified, only render query with specified name
If `lineno` is specified, only render query around specified line
"""
if query_name:
items = [item for item in self.parser.doc
if item.get('name') == query_name]
return '\n\n'.join(self.render_item(item) for item in items)
if lineno:
return self.render_item(self.parser.find_root(lineno))
return '\n\n'.join(self.render_item(item) for item in self.parser.doc)
def render_item(self, item):
for i in xrange(self.MAX_ITERATION):
for p in self.processors:
if isinstance(item, basestring):
return item
if isinstance(item, CommentedMap):
item = dict(item)
item = p.process(item)
raise Exception(
"Cannot parse the item within {} iterations:\n{}".format(
self.MAX_ITERATION, item))
| 2.484375 | 2 |
coding_intereview/1450. Number of Students Doing Homework at a Given Time.py | Jahidul007/Python-Bootcamp | 2 | 12765649 | <filename>coding_intereview/1450. Number of Students Doing Homework at a Given Time.py
class Solution:
def busyStudent(self, startTime: List[int], endTime: List[int], queryTime: int) -> int:
working = 0
for i, stu in enumerate(startTime):
if stu <= queryTime and endTime[i] >= queryTime:
working += 1
return working | 3.4375 | 3 |
setup.py | gustavla/vzlog | 3 | 12765650 | <reponame>gustavla/vzlog
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from setuptools import setup
import os
if os.getenv('READTHEDOCS'):
with open('requirements_docs.txt') as f:
required = f.read().splitlines()
compile_ext = False
else:
import numpy as np
from Cython.Build import cythonize
with open('requirements.txt') as f:
required = f.read().splitlines()
compile_ext = True
CLASSIFIERS = [
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
]
args = dict(
name='vzlog',
version='0.1.8',
url="https://github.com/gustavla/vzlog",
description=("Python tool for logging rich content, "
"particularly plots and images"),
author='<NAME>',
author_email='<EMAIL>',
install_requires=required,
packages=[
'vzlog',
'vzlog.image',
],
license='BSD',
classifiers=CLASSIFIERS,
)
if compile_ext:
args['setup_requires'] = ['numpy', 'cython']
args['ext_modules'] = cythonize("vzlog/image/resample.pyx")
args['include_dirs'] = [np.get_include()]
setup(**args)
| 1.601563 | 2 |