max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/test_security.py
|
jaraco/jaraco.windows
| 21
|
12780551
|
from jaraco.windows import security
def test_get_security_attributes_for_user():
security.get_security_attributes_for_user()
| 1.375
| 1
|
keylime/tornado_requests.py
|
mit-ll/MIT-keylime
| 0
|
12780552
|
#!/usr/bin/env python3
'''
SPDX-License-Identifier: BSD-2-Clause
Copyright 2017 Massachusetts Institute of Technology.
'''
import asyncio
import json
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except ImportError:
from yaml import SafeLoader as SafeLoader, SafeDumper as SafeDumper
from tornado import httpclient, platform
from keylime import common
async def request(method,url,params=None,data=None,context=None):
http_client = httpclient.AsyncHTTPClient()
if params is not None and len(list(params.keys()))>0:
url+='?'
for key in list(params.keys()):
url+="%s=%s&"%(key,params[key])
url=url[:-1]
if context is not None:
url = url.replace('http://','https://',1)
try:
request = httpclient.HTTPRequest(url=url,
method=method,
ssl_options=context,
body=data)
response = await http_client.fetch(request)
except httpclient.HTTPError as e:
if e.response is None:
return tornado_response(500,str(e))
return tornado_response(e.response.code,e.response.body)
except ConnectionError as e:
return tornado_response(599,"Connection error: %s"%e)
if response is None:
return None
return tornado_response(response.code,response.body)
def is_refused(e):
if hasattr(e,'strerror'):
return "Connection refused" in e.strerror
else:
return False
class tornado_response():
def __init__(self,code,body):
self.status_code = code
self.body = body
def json(self):
try:
retval = json.loads(self.body)
except Exception as e:
retval = [self.body,str(e)]
return retval
def yaml(self):
try:
retval = yaml.load(self.body, Loader=SafeLoader)
except Exception as e:
retval = [self.body,str(e)]
return retval
| 2.296875
| 2
|
condor/particle/particle_map.py
|
Toonggg/condor
| 20
|
12780553
|
<filename>condor/particle/particle_map.py
# -----------------------------------------------------------------------------------------------------
# CONDOR
# Simulator for diffractive single-particle imaging experiments with X-ray lasers
# http://xfel.icm.uu.se/condor/
# -----------------------------------------------------------------------------------------------------
# Copyright 2016 <NAME>, <NAME>, <NAME>
# Condor is distributed under the terms of the BSD 2-Clause License
# -----------------------------------------------------------------------------------------------------
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------------------------------
# General note:
# All variables are in SI units by default. Exceptions explicit by variable name.
# -----------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import # Compatibility with python 2 and 3
import sys
import numpy
#from scipy.interpolate import RegularGridInterpolator
import logging
logger = logging.getLogger(__name__)
import condor
import condor.utils.log
from condor.utils.log import log_and_raise_error,log_warning,log_info,log_debug
from condor.utils.variation import Variation
import condor.utils.spheroid_diffraction
import condor.utils.diffraction
import condor.utils.bodies
import condor.utils.emdio
from .particle_abstract import AbstractContinuousParticle
ENABLE_MAP_INTERPOLATION = False
class ParticleMap(AbstractContinuousParticle):
r"""
Class for a particle model
*Model:* Refractive index map sampled on a cubic grid (continuum approximation)
Args:
:geometry (str): Geometry type
*Choose one of the following options:*
- ``'custom'`` - provide map either with an HDF5 file (``map3d_filename``, ``map3d_dataset``) or with a numpy array (``map3d``)
- ``'icosahedron'`` - create map of a uniformly filled icosahedron
- ``'cube'`` - create map of a uniformly filled cube
- ``'sphere'`` - create map of a uniformly filled sphere
- ``'spheroid'`` - create map of a uniformly filled spheroid
:diameter (float): Particle diameter (not map diameter)
Kwargs:
:diameter_variation (str): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_diameter_variation` (default ``None``)
:diameter_spread (float): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_diameter_variation` (default ``None``)
:diameter_variation_n (int): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_diameter_variation` (default ``None``)
:dx: Distance between grid points of the map. This needs to be specified only if ``geometry=`custom''. Depending on whether the geometry is specified by file (``map3d_filename``, ``map3d_dataset``) or by numpy array (``map3d``) for more documentation see :meth:`set_custom_geometry_by_h5file` or :meth:`set_custom_geometry_by_array` respectively (default ``None``)
:map3d: See :meth:`set_custom_geometry_by_array` (default ``None``)
:map3d_filename: See :meth:`set_custom_geometry_by_h5file` (default ``None``)
:map3d_dataset: See :meth:`set_custom_geometry_by_h5file` (default ``None``)
:emd_id: See :meth:`set_custom_geometry_by_emd_id` (default ``None``)
:rotation_values (array): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_alignment` (default ``None``)
:rotation_formalism (str): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_alignment` (default ``None``)
:rotation_mode (str): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_alignment` (default ``None``)
:flattening (float): (Mean) value of :math:`a/c`, takes only effect if ``geometry='spheroid'`` (default ``0.75``)
:number (float): Expectation value for the number of particles in the interaction volume. (defaukt ``1.``)
:arrival (str): Arrival of particles at the interaction volume can be either ``'random'`` or ``'synchronised'``. If ``sync`` at every event the number of particles in the interaction volume equals the rounded value of ``number``. If ``'random'`` the number of particles is Poissonian and ``number`` is the expectation value. (default ``'synchronised'``)
:position (array): See :class:`condor.particle.particle_abstract.AbstractParticle` (default ``None``)
:position_variation (str): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_position_variation` (default ``None``)
:position_spread (float): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_position_variation` (default ``None``)
:position_variation_n (int): See :meth:`condor.particle.particle_abstract.AbstractParticle.set_position_variation` (default ``None``)
:material_type (str): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``'water'``)
:massdensity (float): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``None``)
:atomic_composition (dict): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``None``)
:electron_density (float): See :meth:`condor.particle.particle_abstract.AbstractContinuousParticle.set_material` (default ``None``)
"""
def __init__(self,
geometry, diameter = None,
diameter_variation = None, diameter_spread = None, diameter_variation_n = None,
dx = None,
map3d = None,
map3d_filename = None, map3d_dataset = None,
emd_id = None,
rotation_values = None, rotation_formalism = None, rotation_mode = "extrinsic",
flattening = 0.75,
number = 1., arrival = "synchronised",
position = None, position_variation = None, position_spread = None, position_variation_n = None,
material_type = None, massdensity = None, atomic_composition = None, electron_density = None):
# Initialise base class
AbstractContinuousParticle.__init__(self,
diameter=diameter, diameter_variation=diameter_variation, diameter_spread=diameter_spread, diameter_variation_n=diameter_variation_n,
rotation_values=rotation_values, rotation_formalism=rotation_formalism, rotation_mode=rotation_mode,
number=number, arrival=arrival,
position=position, position_variation=position_variation, position_spread=position_spread, position_variation_n=position_variation_n,
material_type=material_type, massdensity=massdensity, atomic_composition=atomic_composition, electron_density=electron_density)
# Check for valid geometry
if geometry not in ["icosahedron", "cube", "sphere", "spheroid", "custom"]:
log_and_raise_error(logger, "Cannot initialize %s because \'%s\' is not a valid argument for \'geometry\'." % (kwargs["geometry"], self.__class__.__name__))
sys.exit(1)
self.geometry = geometry
# Has effect only for spheroids
self.flattening = flattening
# Init chache
self._cache = {}
self._dx_orig = None
self._map3d_orig = None
if geometry == "custom":
if map3d is not None:
if dx is None:
log_and_raise_error(logger, "Cannot initialize custom geometry with \'map3d\' without known grid spacing (\'dx\').")
sys.exit(1)
else:
log_debug(logger, "Attempting to initialise custom geometry with \'map3d\'.")
if map3d_filename is not None or map3d_dataset is not None or emd_id is not None:
log_and_raise_error(logger, "Cannot initialize custom geometry because of ambiguous keyword arguments.")
sys.exit(1)
self.set_custom_geometry_by_array(map3d, dx)
elif map3d_filename is not None and map3d_dataset is not None:
if dx is None:
log_and_raise_error(logger, "You are trying to initialise the map with an HDF5 file. You also need to provide the grid spacing \'dx\'")
sys.exit(1)
log_debug(logger, "Attempting to initialise custom geometry with \'map3d_filename\', \'map3d_dataset\' and \'dx\'.")
if not map3d_filename.endswith(".h5"):
log_and_raise_error(logger, "Map file is not an HDF5 file!")
sys.exit(1)
if map3d is not None or emd_id is not None:
log_and_raise_error(logger, "Cannot initialize custom geometry because of ambiguous keyword arguments.")
sys.exit(1)
self.set_custom_geometry_by_h5file(map3d_filename, map3d_dataset, dx)
elif map3d_filename is not None:
if not map3d_filename.endswith(".map") and not map3d_filename.endswith(".mrc"):
log_and_raise_error(logger, "Map file is not an MRC/MAP file!")
sys.exit(1)
self.set_custom_geometry_by_mrcfile(map3d_filename)
elif emd_id is not None:
log_debug(logger, "Attempting to initialise custom geometry with \'emd_id\'.")
if map3d_filename is not None or map3d_dataset is not None or map3d is not None or dx is not None:
log_and_raise_error(logger, "Cannot initialize custom geometry because of ambiguous keyword arguments.")
sys.exit(1)
self.set_custom_geometry_by_emd_id(emd_id)
if diameter is None:
self.diameter_mean = self._dx_orig * self._map3d_orig.shape[-1]
def get_conf(self):
"""
Get configuration in form of a dictionary. Another identically configured ParticleMap instance can be initialised by:
.. code-block:: python
conf = P0.get_conf() # P0: already existing ParticleMap instance
P1 = condor.ParticleMap(**conf) # P1: new ParticleMap instance with the same configuration as P0
"""
conf = {}
conf.update(AbstractContinuousParticle.get_conf(self))
conf["geometry"] = self.geometry
if self.geometry == "custom":
m,dx = self.get_original_map()
conf["map3d"] = m
conf["dx"] = dx
if self.geometry == "spheroid":
conf["flattening"] = self.flattening
return conf
def get_next(self):
"""
Iterate the parameters and return them as a dictionary
"""
O = AbstractContinuousParticle.get_next(self)
O["particle_model"] = "map"
O["geometry"] = self.geometry
if self.geometry == "spheroid":
O["flattening"] = self.flattening
return O
def set_custom_geometry_by_array(self, map3d, dx):
"""
Set map from numpy array
Args:
:map3d (array): 4D numpy array (material index, z, y, x) of float values. If a material is defined (material not ``None``) the values of the map scale the complex refractive index of the material. If no material is defined (materials is ``None``) the map will be casted to complex values and used without any rescaling.
:dx (float): Grid spacing in unit meter
"""
# Check shape
s = numpy.array(map3d.shape)
if numpy.any(s[-3:]!=s[-1]):
log_and_raise_error(logger, "Condor only accepts maps with equal spatial dimensions. Current shape is: %s" % str(s[-3:]))
if self.materials is None:
# Complex map(s) = refractive index map
# Check input
if len(s) == 3:
map3d = [map3d]
if len(s) < 3 or len(s) > 4:
log_and_raise_error(logger, "map3d has %i dimensions but should have 3 or 4." % len(s))
return
# Load map(s)
_map3d = numpy.asarray(map3d)
else:
# Real map(s) to be scaled by material's complext refractive index
# Check input
if len(s) not in [3,4]:
log_and_raise_error(logger, "map3d has %i dimensions but it has to have either 3 or 4." % len(s))
return
# Load map(s)
if len(s) == 3:
n_mat = len(self.materials)
s = numpy.array([n_mat] + list(s))
_map3d = numpy.array(n_mat*[map3d], dtype=numpy.float64)
else:
if s[0] != len(self.materials):
log_and_raise_error(logger, "The first dimension of the map (%i) does not equal the number of specified materials (%i)." % (s[0], len(self.materials)))
return
_map3d = numpy.asarray(map3d, dtype=numpy.float64)
self._map3d_orig = _map3d
self._dx_orig = dx
self._set_cache(_map3d, dx, geometry="custom")
def set_custom_geometry_by_h5file(self, map3d_filename, map3d_dataset, dx):
"""
Load map from dataset in HDF5 file
If a material is defined (``material_type`` is not ``None``) the absolute values of the map will be rescaled by the complex refractive index of the material. If no material is defined (``material_type=None``) the map will be casted to complex values and used without any rescaling.
Args:
:map3d_filename (str): Location of the HDF5 file that contains the map data
:map3d_dataset (str): Dataset location in the file. The dataset must have three equal dimensions of float values.
:dx: Grid spacing in unit meter
"""
import h5py
with h5py.File(map3d_filename,"r") as f:
if map3d_dataset is not None:
ds = map3d_dataset
elif len(f.keys()) == 1:
ds = f.keys()[0]
else:
log_and_raise_error(logger, "No dataset specified where to find the map.")
if len(f[ds].shape) == 4:
map3d = numpy.array(f[ds][:,:,:,:])
elif len(f[ds].shape) == 3:
map3d = numpy.array([f[ds][:,:,:]])
else:
log_and_raise_error(logger, "Dataset has %i dimensions but it has to have either 3 or 4." % len(f[ds].shape))
return
self.set_custom_geometry_by_array(map3d, dx)
def set_custom_geometry_by_emd_id(self, emd_id, offset=None, factor=None):
"""
Fetch map from the EMD by id code.
The map will be preprocessed by applying an offset and rescaling and by padding the water background with zeros.
Finally, the avereage value of the map will be rescaled by the refractive index of the associated material.
Args:
:emd_id (str): EMD ID code.
:offset (float): Offset value of the map (MAP = (EM_DATA + OFFSET) X FACTOR)
:factor (float): Rescale factor of the map (MAP = (EM_DATA + OFFSET) X FACTOR)
"""
map3d, dx = condor.utils.emdio.fetch_map(emd_id)
if offset is None and factor is None:
ed_water = condor.utils.material.AtomDensityMaterial(material_type="water").get_electron_density()
if len(self.materials) > 1:
log_and_raise_error(logger, "More than one material defined. This is incompatible with automatic scaling of an EMD map.")
sys.exit(1)
ed_particle = self.materials[0].get_electron_density()
map3d = condor.utils.emdio.preproc_map_auto(map3d, ed_water=ed_water, ed_particle=ed_particle)
else:
map3d = condor.utils.emdio.perproc_map_manual(map3d, offset=offset, factor=factor)
self.set_custom_geometry_by_array(map3d, dx)
def set_custom_geometry_by_mrcfile(self, filename, offset=None, factor=None):
"""
Read map from the MRC file (CCP4 file format, see http://www.ccp4.ac.uk/html/maplib.html).
The map will be preprocessed by applying an offset and rescaling and by padding the water background with zeros.
Finally, the avereage value of the map will be rescaled by the refractive index of the associated material.
Args:
:filename (str): Filename of MRC file.
:offset (float): Offset value of the map (MAP = (EM_DATA + OFFSET) X FACTOR)
:factor (float): Rescale factor of the map (MAP = (EM_DATA + OFFSET) X FACTOR)
"""
map3d, dx = condor.utils.emdio.read_map(filename)
if offset is None and factor is None:
ed_water = condor.utils.material.AtomDensityMaterial(material_type="water").get_electron_density()
if len(self.materials) > 1:
log_and_raise_error(logger, "More than one material defined. This is incompatible with automatic scaling of an EMD map.")
sys.exit(1)
ed_particle = self.materials[0].get_electron_density()
map3d = condor.utils.emdio.preproc_map_auto(map3d, ed_water=ed_water, ed_particle=ed_particle)
else:
map3d = condor.utils.emdio.perproc_map_manual(map3d, offset=offset, factor=factor)
self.set_custom_geometry_by_array(map3d, dx)
def get_new_dn_map(self, O, dx_required, dx_suggested, photon_wavelength):
"""
Return the a new refractive index map
Args:
:O (dict): Parameter dictionary as returned from :meth:`condor.particle.particle_map.get_next`
:dx_required (float): Required resolution (grid spacing) of the map. An error is raised if the resolution of the map has too low resolution
:dx_suggested (float): Suggested resolution (grid spacing) of the map. If the map has a very high resolution it will be interpolated to a the suggested resolution value
:photon_wavelength (float): Photon wavelength in unit meter
"""
m,dx = self.get_new_map(O=O, dx_required=dx_required, dx_suggested=dx_suggested)
if self.materials is not None:
dn = numpy.zeros(shape=(m.shape[1], m.shape[2], m.shape[3]), dtype=numpy.complex128)
for mat_i, m_i in zip(self.materials, m):
dn_i = mat_i.get_dn(photon_wavelength=photon_wavelength)
dn += m_i * dn_i
else:
dn = m[0]
return dn,dx
def get_current_map(self):
"""
Return the current map
"""
return self._map3d, self._dx
def get_original_map(self):
"""
Return the original map
"""
return self._map3d_orig, self._dx_orig
def _get_map3d(self, O = None, dx_required = None, dx_suggested = None):
if O is not None:
m,dx = self.get_new_map(O, dx_required, dx_suggested)
else:
m,dx = self.get_current_map()
return m, dx
def _set_cache(self, map3d, dx, geometry, diameter=None, flattening=None):
self._cache = {
"map3d" : map3d,
"dx" : dx,
"geometry" : geometry,
"diameter" : diameter,
"flattening" : flattening,
}
def _is_map_in_cache(self, O, dx_required):
# Empty cache?
if not self._cache:
return False
# Custom map?
elif O["geometry"] == "custom":
return False
# Correct geometry?
elif self._cache["geometry"] != O["geometry"]:
return False
# Correct size?
elif abs(self._cache["diameter"] - O["diameter"]) > 1E-10:
return False
# Correct spheroid flattening?
elif O["geometry"] == "spheroid":
if abs(self._cache["flattening"] - O["flattening"]) > 1E-10:
return False
# Sufficient resolution?
elif self._cache["dx"] > dx_required:
return False
else:
return True
def get_new_map(self, O, dx_required, dx_suggested):
"""
Return new map with given parameters
Args:
:O (dict): Parameter dictionary as returned from :meth:`condor.particle.particle_map.get_next`
:dx_required (float): Required resolution (grid spacing) of the map. An error is raised if the resolution of the map has too low resolution
:dx_suggested (float): Suggested resolution (grid spacing) of the map. If the map has a very high resolution it will be interpolated to a the suggested resolution value
"""
if O["geometry"] in ["icosahedron", "sphere", "spheroid", "cube"]:
if not self._is_map_in_cache(O, dx_required):
dx = dx_suggested
n_mat = len(self.materials)
if O["geometry"] == "icosahedron":
m_tmp = self._get_map_icosahedron(O["diameter"]/2., dx)
elif O["geometry"] == "spheroid":
a = condor.utils.spheroid_diffraction.to_spheroid_semi_diameter_a(O["diameter"],O["flattening"])
c = condor.utils.spheroid_diffraction.to_spheroid_semi_diameter_c(O["diameter"],O["flattening"])
m_tmp = self._get_map_spheroid(a, c, dx)
elif O["geometry"] == "sphere":
m_tmp = self._get_map_sphere(O["diameter"]/2., dx)
elif O["geometry"] == "cube":
m_tmp = self._get_map_cube(O["diameter"], dx)
else:
log_and_raise_error(logger, "Particle map geometry \"%s\" is not implemented. Change your configuration and try again." % O["geometry"])
sys.exit(1)
m = numpy.array(n_mat * [m_tmp])
self._set_cache(map3d=m,
dx=dx,
geometry=O["geometry"],
diameter=O["diameter"],
flattening=(None if O["geometry"] != "spheroid" else O["flattening"]))
else:
log_debug(logger, "No need for calculating a new map. Reading map from cache.")
m = self._cache["map3d"]
dx = self._cache["dx"]
elif O["geometry"] == "custom":
rescale_factor = O["diameter"] / self.diameter_mean
dx_rescaled = self._cache["dx"] * rescale_factor
# Current map too coarsely sampled?
if (dx_rescaled/dx_required > 1.) and not numpy.isclose(dx_rescaled/dx_required, 1.):
# Cached map (original) also too coarsely sampled?
if self._dx_orig/dx_required > 1. and not numpy.isclose(self._dx_orig/dx_required, 1.):
# Not fine enough -> exit
log_and_raise_error(logger, "Resolution of given custom map is insufficient for simulation. Required is at most %e m vs. provided %e m." % (dx_required, self._dx_orig))
sys.exit(1)
else:
# Change back to original fine map
self._set_cache(map3d=self._map3d_orig,
dx=self._dx_orig,
geometry="custom")
# Can we downsample current map?
# MAX: We would do this only for performance reasons but have not found a good way of downsampling without introducing artifacts
#if (dx_suggested/dx_rescaled >= 2.) and (dx_suggested/self._dx_orig >= 2.) and ENABLE_MAP_INTERPOLATION:
# print("ENABLE_MAP_INTERPOLATION=%i" % ENABLE_MAP_INTERPOLATION)
# N1 = self._map3d_orig.shape[0]
# m1 = numpy.zeros(shape=(N1,N1,N1), dtype=numpy.float64)
# m1[:self._map3d_orig.shape[0],:self._map3d_orig.shape[0],:self._map3d_orig.shape[0]] = self._map3d_orig[:,:,:]
# fm1 = numpy.fft.fftshift(numpy.fft.ifftn(m1))
# N1 = m1.shape[0]
# N2 = int(numpy.ceil(N1 * self._dx_orig / dx_suggested))
# x1 = numpy.linspace(-0.5,0.5,N2)*(1-0.5/N2)
# Z,Y,X = numpy.meshgrid(x1,x1,x1,indexing="ij")
# coords = numpy.array([[z,y,x] for z,y,x in zip(Z.ravel(),Y.ravel(),X.ravel())])
# m2 = abs(numpy.fft.fftshift(condor.utils.nfft.nfft(fm1,coords).reshape((N2,N2,N2))))
# #from pylab import *
# #imsave("m1.png", m1.sum(0))
# #imsave("m2.png", m2.sum(0))
# self._dx = self._dx_orig * float(N1)/float(N2)
# self._map3d = m2 / m2.sum() * m1.sum()
m = self._cache["map3d"]
dx = rescale_factor * self._cache["dx"]
return m,dx
def _get_map_sphere(self, radius, dx):
nR = radius/dx
N = int(round((nR*1.2)*2))
m = condor.utils.bodies.make_sphere_map(N,nR)
return numpy.asarray(m, dtype=numpy.float64)
def _get_map_spheroid(self, a, c, dx, rotation=None):
# maximum radius
Rmax = max([a,c])
# maximum radius in pixel
nRmax = Rmax/dx
# dimensions in pixel
nA = a/dx
nC = c/dx
# leaving a bit of free space around spheroid
N = int(round((nRmax*1.2)*2))
m = condor.utils.bodies.make_spheroid_map(N,nA,nC,rotation)
return numpy.asarray(m, dtype=numpy.float64)
def _get_map_icosahedron(self, radius, dx):
# icosahedon size parameter
a = radius*(16*numpy.pi/5.0/(3+numpy.sqrt(5)))**(1/3.0)
# radius at corners in meter
Rmax = numpy.sqrt(10.0+2*numpy.sqrt(5))*a/4.0
# radius at corners in pixel
nRmax = Rmax/dx
# leaving a bit of free space around icosahedron
N = int(numpy.ceil(2.3*(nRmax)))
log_info(logger,"Building icosahedron with radius %e (%i pixel) in %i x %i x %i voxel cube." % (radius,nRmax,N,N,N))
m = condor.utils.bodies.make_icosahedron_map(N,nRmax)
return numpy.asarray(m, dtype=numpy.float64)
def _get_map_cube(self, a, dx):
# edge_length in pixels
nel = a/dx
# leaving a bit of free space around
N = int(numpy.ceil(2.3*nel))
# make map
X,Y,Z = 1.0*numpy.mgrid[0:N,0:N,0:N]
X = X - (N-1)/2.
Y = Y - (N-1)/2.
Z = Z - (N-1)/2.
DX = abs(X)-nel/2.
DY = abs(Y)-nel/2.
DZ = abs(Z)-nel/2.
D = numpy.array([DZ,DY,DX])
m = numpy.zeros(shape=(N,N,N))
Dmax = D.max(0)
m[Dmax<-0.5] = 1.
temp = (Dmax<0.5)*(m==0.)
d = Dmax[temp]
m[temp] = 0.5-d
return numpy.asarray(m, dtype=numpy.float64)
| 1.304688
| 1
|
menus.py
|
Talendar/neuroevolutionary_snake
| 11
|
12780554
|
<gh_stars>10-100
""" Implements the game's menus.
@author <NAME> (Talendar)
"""
import pygame
import pygame_menu
import tkinter as tk
from tkinter.filedialog import askopenfilename, askdirectory
import config
from snake_game import SnakeGame
from player import HumanPlayer
from evolution.snake_ai import SnakePopulation
from evolution.evolution_visualizer import EvolutionVisualizer
import neural_network.neural_network as nn
class MainMenu:
def __init__(self, height, width, title, theme=pygame_menu.themes.THEME_BLUE):
self._width = width
self._height = height
self._theme = theme
self._screen = pygame.display.set_mode((width, height))
self._menu = pygame_menu.Menu(height, width, title, theme=theme)
self._menu.add_button('Play', self._play_game)
self._menu.add_button('Evolve', self._evolve)
self._menu.add_button('Visualize', self._visualize)
self._menu.add_button('Settings', self._settings)
self._menu.add_vertical_margin(40)
self._menu.add_button('Quit', pygame_menu.events.EXIT)
self._menu.mainloop(self._screen)
def _play_game(self):
SnakeGame(HumanPlayer()).start()
pygame.display.set_mode((self._width, self._height))
def _evolve(self):
EvolutionMenu(self._height, self._width, theme=self._theme)
pygame.display.set_mode((self._width, self._height))
def _visualize(self):
VisualizationMenu(self._height, self._width, theme=self._theme)
pygame.display.set_mode((self._width, self._height))
def _settings(self):
SettingsMenu(self._height, self._width, theme=self._theme)
pygame.display.set_mode((self._width, self._height))
class EvolutionMenu:
def __init__(self, height, width, title="Evolution Config", theme=pygame_menu.themes.THEME_BLUE):
self._width, self._height = width, height
self._screen = pygame.display.set_mode((width, height))
self._menu = pygame_menu.Menu(height, width, title, theme=theme)
self._ev_button = self._menu.add_button("Evolve!", self._evolve)
self._menu.add_vertical_margin(40)
self._base_model_path = ""
self._fs_button = self._menu.add_button("Base model: none", None)
self._fs_button.update_callback(self._select_file)
self._pop_size = 20
self._menu.add_text_input('Pop. size: ', default=self._pop_size, input_type=pygame_menu.locals.INPUT_INT, onchange=self._update_pop_size)
self._gens = 25
self._menu.add_text_input('Generations: ', default=self._gens, input_type=pygame_menu.locals.INPUT_INT, onchange=self._update_gen_count)
self._menu.add_vertical_margin(40)
self._menu.add_button("Back", self._menu.disable)
self._menu.add_vertical_margin(40)
self._menu.add_label("(check config.py for more settings)", font_size=20)
self._menu.mainloop(self._screen)
def _select_file(self):
root = tk.Tk()
self._base_model_path = askopenfilename(initialdir="./evolution/pre_trained_models")
root.destroy()
if len(self._base_model_path) > 0:
self._fs_button.set_title("Base model: [...]" + self._base_model_path[-12:])
def _update_gen_count(self, count):
self._gens = count
def _update_pop_size(self, count):
self._pop_size = count
def _evolve(self):
pop = SnakePopulation(size=self._pop_size,
pre_trained_brain=(
None if self._fs_button.get_title() == "Base model: none"
else nn.NeuralNetwork.load(self._base_model_path)))
self._screen = pygame.display.set_mode((380, 90))
self._screen.fill((0, 0, 0))
self._screen.blit(pygame.font.SysFont("monospace", 24, bold=True).render("Evolving...", False, (255, 247, 0)), (10, 20))
self._screen.blit(pygame.font.SysFont("monospace", 16).render("(check real time logging on stdout)", False, (255, 247, 0)), (8, 55))
pygame.display.update()
pop.evolve(self._gens)
pygame.display.set_mode((self._width, self._height))
class VisualizationMenu:
def __init__(self, height, width, title="Visualize", theme=pygame_menu.themes.THEME_BLUE):
self._width, self._height = width, height
self._screen = pygame.display.set_mode((width, height))
self._menu = pygame_menu.Menu(height, width, title, theme=theme)
self._vis_button = self._menu.add_button("Visualize!", self._visualize)
self._menu.add_vertical_margin(40)
self._pop = None
self._pop_dir = ""
self._fs_button = self._menu.add_button("Pop. directory: -", None)
self._fs_button.update_callback(self._select_dir)
self._gen = 20
self._gen_txt_widget = self._menu.add_text_input('Generation: ', default=self._gen, onchange=self._update_gen,
input_type=pygame_menu.locals.INPUT_INT)
self._menu.add_vertical_margin(40)
self._menu.add_button("Back", self._menu.disable)
self._menu.mainloop(self._screen)
def _select_dir(self):
root = tk.Tk()
self._pop_dir = askdirectory(initialdir="./evolution/populations")
root.destroy()
if self._pop_dir is not None and len(self._pop_dir) > 0:
self._pop_dir += "/"
self._pop = EvolutionVisualizer(self._pop_dir, False)
self._fs_button.set_title("Pop. directory: [...]" + self._pop_dir[-15:])
self._gen_txt_widget.set_value(self._pop.best_gen)
self._update_gen(self._pop.best_gen)
def _update_gen(self, gen):
self._gen = gen
def _visualize(self):
self._pop.start(self._gen)
pygame.display.set_mode((self._width, self._height))
class SettingsMenu:
def __init__(self, height, width, title="Settings", theme=pygame_menu.themes.THEME_BLUE):
self._width, self._height = width, height
self._screen = pygame.display.set_mode((width, height))
self._menu = pygame_menu.Menu(height, width, title, theme=theme)
def update_fps(f): config.FPS = f
self._menu.add_text_input('FPS: ', default=config.FPS, input_type=pygame_menu.locals.INPUT_INT, onchange=update_fps)
self._menu.add_vertical_margin(40)
def update_head(c): config.COLOR_MAP[config.SNAKE_HEAD] = c
self._menu.add_color_input('Head color: ', color_type='rgb', default=config.COLOR_MAP[config.SNAKE_HEAD],
onchange=update_head, font_size=20)
def update_body(c): config.COLOR_MAP[config.SNAKE_BODY] = c
self._menu.add_color_input('Body color: ', color_type='rgb', default=config.COLOR_MAP[config.SNAKE_BODY],
onchange=update_body, font_size=20)
self._menu.add_vertical_margin(40)
def update_dead_head(c): config.DEAD_SNAKE_HEAD_COLOR = c
self._menu.add_color_input('Dead head color: ', color_type='rgb', default=config.DEAD_SNAKE_HEAD_COLOR,
onchange=update_dead_head, font_size=20)
def update_dead_body(c): config.DEAD_SNAKE_BODY_COLOR = c
self._menu.add_color_input('Dead head color: ', color_type='rgb', default=config.DEAD_SNAKE_BODY_COLOR,
onchange=update_dead_body, font_size=20)
self._menu.add_vertical_margin(40)
self._menu.add_button("Back", self._menu.disable)
self._menu.add_vertical_margin(40)
self._menu.add_label("(check config.py for more settings)", font_size=20)
self._menu.mainloop(self._screen)
| 2.953125
| 3
|
tests/core/test_oauth2/test_rfc7591.py
|
YPCrumble/authlib
| 3,172
|
12780555
|
from unittest import TestCase
from authlib.oauth2.rfc7591 import ClientMetadataClaims
from authlib.jose.errors import InvalidClaimError
class ClientMetadataClaimsTest(TestCase):
def test_validate_redirect_uris(self):
claims = ClientMetadataClaims({'redirect_uris': ['foo']}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_client_uri(self):
claims = ClientMetadataClaims({'client_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_logo_uri(self):
claims = ClientMetadataClaims({'logo_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_tos_uri(self):
claims = ClientMetadataClaims({'tos_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_policy_uri(self):
claims = ClientMetadataClaims({'policy_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_jwks_uri(self):
claims = ClientMetadataClaims({'jwks_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
| 2.75
| 3
|
src/document/models.py
|
juliannovoa/SmartScribble
| 0
|
12780556
|
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ckeditor.fields import RichTextField
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
class Document(models.Model):
""" This model describes a document """
title = models.CharField(max_length=50)
description = models.CharField(max_length=100, blank=True)
body = RichTextField(blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
created = models.DateTimeField(editable=False)
def save(self, *args, **kwargs):
if not self.id:
self.created = timezone.now()
return super(Document, self).save(*args, **kwargs)
| 2.15625
| 2
|
test6.py
|
Calen0wong/calen1
| 0
|
12780557
|
<gh_stars>0
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(653, 483)
self.hxx = QtWidgets.QGraphicsView(Form)
self.hxx.setGeometry(QtCore.QRect(-5, 1, 661, 481))
self.hxx.setObjectName("hxx") # 以上代码是eirc6编译窗口后自动生成的
self.hxx.scene = QtWidgets.QGraphicsScene() # 创建一个图片元素的对象
item = QtWidgets.QGraphicsPixmapItem(p) # 创建一个变量用于承载加载后的图片
self.hxx.scene.addItem(item) # 将加载后的图片传递给scene对象
self.hxx.setScene(self.hxx.scene) # 这个我也不知道是做了个啥
self.retranslateUi(Form) # eirc6编译窗口后自动生成
QtCore.QMetaObject.connectSlotsByName(Form) # eirc6编译窗口后自动生成
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget() # 以上代码是eirc6编译窗口后自动生成的
p = QtGui.QPixmap()
p.load("./newPrefix/line3.png") # 加载图片
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 2.21875
| 2
|
kabuka/test_kabuka.py
|
sdanaipat/kabuka
| 0
|
12780558
|
import os
from pathlib import Path
from unittest import mock
from kabuka import kabuka, get_latest_price
TEST_DATA_DIR = Path(os.path.dirname(os.path.realpath(__file__))) / "test_data"
def test_is_numeric():
assert not kabuka.is_numeric("abc")
assert not kabuka.is_numeric("123a")
assert not kabuka.is_numeric("")
assert not kabuka.is_numeric("this is not a number")
assert not kabuka.is_numeric("a123")
assert not kabuka.is_numeric("a123a")
assert not kabuka.is_numeric("1a123")
assert not kabuka.is_numeric("0.0.0.0")
assert not kabuka.is_numeric("1,234.678")
assert kabuka.is_numeric("123")
assert kabuka.is_numeric(".123")
assert kabuka.is_numeric("123.")
assert kabuka.is_numeric("123.456")
assert kabuka.is_numeric("123_456")
assert kabuka.is_numeric("0.123")
assert kabuka.is_numeric(".123_456")
assert kabuka.is_numeric("123_456.")
assert kabuka.is_numeric("123_456.789_101")
def mocked_requests_get(url):
class MockedResponse:
def __init__(self, text):
self.text = text
uri = url.replace("https://finance.yahoo.com/quote", str(TEST_DATA_DIR)) + ".html"
try:
with open(uri, "rb") as f:
return MockedResponse(f.read().decode("utf8"))
except IOError:
with open(TEST_DATA_DIR / "unknown_symbol.html") as f:
return MockedResponse(f.read())
@mock.patch("requests.get", side_effect=mocked_requests_get)
def test_get_lastest_price(mock_get_latest_price):
# stonk
price = get_latest_price("TSLA")
assert kabuka.is_numeric(price) and float(price) >= 0
# ETF
price = get_latest_price("SPY")
assert kabuka.is_numeric(price) and float(price) >= 0
# tokyo stock exchange
price = get_latest_price("4385.T")
assert kabuka.is_numeric(price) and float(price) >= 0
# invalid symbol
try:
price = get_latest_price("")
except ValueError:
assert True
else:
assert False
# invalid symbol
try:
price = get_latest_price("apple.com")
except ValueError:
assert True
else:
assert False
| 2.75
| 3
|
server/migrations/versions/f24691273ca4_.py
|
Rubilmax/netflux
| 2
|
12780559
|
"""empty message
Revision ID: f24691273ca4
Revises: <PASSWORD>
Create Date: 2019-06-18 13:45:46.250079
"""
# revision identifiers, used by Alembic.
revision = 'f24691273ca4'
down_revision = 'b<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('user_id', sa.String(length=300), nullable=False),
sa.Column('first_name', sa.String(length=300), nullable=True),
sa.Column('last_name', sa.String(length=300), nullable=True),
sa.Column('email', sa.String(length=300), nullable=True),
sa.Column('age', sa.Integer(), nullable=True),
sa.Column('gender', sa.String(length=300), nullable=True),
sa.PrimaryKeyConstraint('user_id')
)
op.drop_table('user')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('first_name', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.Column('last_name', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.Column('age', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('email', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.Column('gender', sa.VARCHAR(length=300), autoincrement=False, nullable=True),
sa.Column('user_id', sa.VARCHAR(length=300), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('first_name', 'last_name', name='user_pkey')
)
op.drop_table('users')
# ### end Alembic commands ###
| 1.632813
| 2
|
python/examples/gtk_example.py
|
rambo/arDuBUS
| 3
|
12780560
|
<gh_stars>1-10
#!/usr/bin/env python
"""This is a very simple arDuBUS example reading one button and controlling PWM of one LED, use the provided YML to generate the Arduino code
1. Copy ardubus.yml.example to examples/ardubus.yml and symlink/copy examples/gtk_example_devices.yml to examples/devices.yml
2. Use bin/codegenerator.py examples/devices.yml to generate the Arduino sketch
3. Use Arduino IDE to compile and program your Arduino
4. Connect pushbutton (normally-open type) and a LED as shown in the image
5. Use bin/ardubus_launcher.py examples/ardubus.yml to launch the arDuBUS daemon
6. Launch this program and press the button, button state text should change, then use the slider to adjust the LED
"""
import pygtk
import gtk
import gobject
import dbus
import dbus.mainloop.glib
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
from dbushelpers.call_cached import call_cached
class example_program:
def __init__(self, bus):
# Boilerplate
self.mainwindow = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.mainwindow.resize(300,200)
self.mainwindow.set_title('PyGTK Example')
self.mainwindow.connect("delete_event", self.quit)
self.mainwindow.connect("destroy", self.quit)
# Connect to DBus signals
self.bus = bus
self.bus.add_signal_receiver(self.alias_changed, dbus_interface = "fi.hacklab.ardubus", signal_name = "alias_change")
# Divide widow to top & bottom halves
vbox = gtk.VBox(homogeneous=False)
self.top_half = gtk.HBox()
vbox.pack_start(self.top_half, fill=False, expand=False)
self.bottom_half = gtk.HBox()
vbox.pack_start(self.bottom_half, fill=False, expand=False)
self.mainwindow.add(vbox)
# Make a slider widget for the LED
self.led_pwm = gtk.Adjustment(value=0, lower=0, upper=255, step_incr=1.0)
self.led_pwm.connect("value_changed", self.led_pwm_changed)
self.led_pwm_slider = gtk.HScale(adjustment=self.led_pwm)
self.led_pwm_slider.set_digits(0)
sliderlabel = gtk.Label()
sliderlabel.set_markup("<span size='24000'>LED brightness</span>")
self.top_half.pack_start(sliderlabel, fill=False, expand=False)
self.top_half.pack_start(self.led_pwm_slider, fill=True, expand=True)
self.button_state_text = gtk.Label()
self.button_state_text.set_markup("<span size='32000'>Button state: unknown</span>")
self.bottom_half.pack_start(self.button_state_text)
def alias_changed(self, alias, value, sender):
if alias != 'example_button':
print "Got unknown alias %s from %s (state was %s)" % (alias, sender, repr(value))
return
# Remember, inputs are pulled up and button shorts to ground
if value:
self.button_state_text.set_markup("<span size='32000'>Button state: released</span>")
else:
self.button_state_text.set_markup("<span size='32000'>Button state: pressed</span>")
def led_pwm_changed(self, *args):
value = int(self.led_pwm.get_value())
try:
call_cached('/fi/hacklab/ardubus/gtk_example_board', 'set_alias', 'dimmable_led', value)
# Above is shorthand for this
# obj = self.bus.get_object('fi.hacklab.ardubus.gtk_example_board', '/fi/hacklab/ardubus/gtk_example_board')
# obj.set_alias('dimmable_led', value)
except dbus.exceptions.DBusException,e:
# No arduino
print "Could not set value %d via arDuBUS" % value
def quit(self, widget, data=None):
gtk.main_quit()
def mainloop(self):
self.mainwindow.show_all()
gtk.main()
if __name__ == '__main__':
bus = dbus.SessionBus()
w = example_program(bus)
w.mainloop()
| 3.625
| 4
|
Translator/Translator.py
|
ToastFreak777/Random_Projects
| 0
|
12780561
|
def translator(phrase):
translated = ""
for letter in phrase:
if letter.lower() in "aeiou":
if letter.isupper():
translated = translated + "G"
else:
translated = translated + "g"
else: # I have just experienced my first indentation error [this might become a problem...I though it was the code not the indentation]
translated = translated + letter
return translated
print(translator(input("Enter a phrase: ")))
| 3.84375
| 4
|
src/HABApp/util/__init__.py
|
DerOetzi/HABApp
| 0
|
12780562
|
<reponame>DerOetzi/HABApp
from . import functions
from .counter_item import CounterItem
from .period_counter import PeriodCounter
from .threshold import Threshold
from .statistics import Statistics
from . import multimode
from .listener_groups import EventListenerGroup
# 27.04.2020 - this can be removed in some time
from .multimode import MultiModeItem
| 1.007813
| 1
|
sutils/slurm_interface/tests/test_resources.py
|
t-mertz/slurm_utils
| 0
|
12780563
|
<reponame>t-mertz/slurm_utils<gh_stars>0
import unittest
from unittest.mock import Mock, patch
from .. import resources
from .. import api as slurm
SINFO_STDOUT_TWO_LINE = "node01 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
class TestCpuCount(unittest.TestCase):
def test_none(self):
retval = ""
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {}
self.assertEqual(resources.cpu_count(data), count)
def test_empty_lines_are_deleted(self):
retval = "\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {}
self.assertEqual(resources.cpu_count(data), count)
def test_single_node(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)"
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {"node01": 4}
self.assertEqual(resources.cpu_count(data), count)
def test_two_nodes(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n" \
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)"
data = slurm.SinfoData(slurm.SinfoResult(retval))
count = {"node01": 4, "node02": 4}
self.assertEqual(resources.cpu_count(data), count)
class TestIsCPUCommensurate(unittest.TestCase):
def test_5_is_not_commensurate_with_4(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertFalse(resources.is_cpu_commensurate(data, 5))
def test_4_is_commensurate_with_4(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 4))
def test_8_is_commensurate_with_4(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 8))
def test_5_is_not_commensurate_with_4_idle(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertFalse(resources.is_cpu_commensurate(data, 5, status='idle'))
def test_4_is_commensurate_with_4_idle(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 4, status='idle'))
def test_8_is_commensurate_with_4_idle(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 8, status='idle'))
def test_zero_cpus_is_commensurate(self):
retval = "node01 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 0/4/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 0))
def test_no_idle_is_not_commensurate(self):
retval = "node01 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
data = slurm.SinfoData(slurm.SinfoResult(retval))
self.assertTrue(resources.is_cpu_commensurate(data, 8, status='idle'))
class TestFindResources(unittest.TestCase):
def test_zero_request_returns_zero(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 0), (0, 0))
def test_single_cpu_returns_four(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 1), (4, 1))
def test_four_cpus_returns_four(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 4), (4, 1))
def test_too_many_cpus_returns_none(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.find_resources(sinfo_data, 10), None)
class TestResource(unittest.TestCase):
def test_partition_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, None)
self.assertEqual(res.partition(), 'partition')
def test_cpus_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, None)
self.assertEqual(res.cpus(), 10)
def test_nodes_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, None)
self.assertEqual(res.nodes(), 2)
def test_mem_can_be_retrieved(self):
res = resources.Resource('partition', 10, 2, 1000)
self.assertEqual(res.memory(), 1000)
def test_mem_defaults_to_none(self):
res = resources.Resource('partition', 10, 2)
self.assertEqual(res.memory(), None)
def test_mem_per_cpu_defaults_to_none(self):
res = resources.Resource('partition', 10, 2)
self.assertEqual(res.mem_per_cpu(), None)
# def test_zero_init_raises_ValueError(self):
# self.assertRaises(ValueError, resources.Resource, [])
# self.assertRaises(ValueError, resources.Resource, [0])
# def test_list_of_three(self):
# res = resources.Resource([1, 2, 3])
# self.assertEqual(len(res), 3)
def test_eq_returns_true_for_copy(self):
res1 = resources.Resource('partition', 2, 3, None)
res2 = resources.Resource('partition', 2, 3, None)
self.assertEqual(res1, res2)
def test_eq_returns_false_for_nonequal_nodes(self):
res1 = resources.Resource('partition', 1, 3, None)
res2 = resources.Resource('partition', 1, 2, None)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_cpus(self):
res1 = resources.Resource('partition', 1, 3, None)
res2 = resources.Resource('partition', 2, 3, None)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_partitions(self):
res1 = resources.Resource('partition', 1, 3, None)
res2 = resources.Resource('partition1', 1, 3, None)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_mem(self):
res1 = resources.Resource('partition', 1, 3, 1000)
res2 = resources.Resource('partition', 1, 3, 500)
self.assertNotEqual(res1, res2)
def test_eq_returns_false_for_nonequal_mem_per_cpu(self):
res1 = resources.Resource('partition', 1, 3, None, 100)
res2 = resources.Resource('partition', 1, 3, None, 200)
self.assertNotEqual(res1, res2)
def test_repr_returns_dict(self):
res = resources.Resource('mypartition', 12, 14, 100)
self.assertEqual(repr(res), "<Resource object, partition=mypartition, cpus=12, nodes=14, mem=100, mem_per_cpu=None>")
def test_repr_has_correct_mem_per_cpu(self):
res = resources.Resource('mypartition', 12, 14, None, 200)
self.assertEqual(repr(res), "<Resource object, partition=mypartition, cpus=12, nodes=14, mem=None, mem_per_cpu=200>")
def test_conversion_to_dict(self):
res = resources.Resource('mypartition', 12, 14, 1000)
d = {
'partition' : 'mypartition',
'ntasks' : 12,
'nodes' : 14,
'mem' : 1000,
'mem_per_cpu' : None
}
self.assertEqual(res.to_dict(), d)
def test_conversion_to_short_dict(self):
res = resources.Resource('mypartition', 12, 14, 1000)
d = {
'partition' : 'mypartition',
'ntasks' : 12,
'nodes' : 14
}
self.assertEqual(res.to_short_dict(), d)
class TestSubsetInternal(unittest.TestCase):
def test_empty_and_zero_returns_empty(self):
self.assertEqual(resources._subset_internal([], 0), [])
def test_empty_and_positive_returns_false(self):
self.assertFalse(resources._subset_internal([], 1))
def test_finite_and_zero_returns_empty(self):
self.assertEqual(resources._subset_internal([1, 2, 3], 0), [])
def test_n_eq_sum_returns_input(self):
self.assertEqual(resources._subset_internal([2, 2], 4), [2, 2])
def test_n_smaller_sum_returns_subset(self):
self.assertEqual(resources._subset_internal([2, 2, 3, 4], 4), [4])
def test_non_commensurate(self):
self.assertEqual(resources._subset_internal([2, 2, 4], 5), [2, 4])
def test_cluster_many(self):
self.assertEqual(resources._subset_internal([16, 16, 16, 20, 20, 20], 48), [16, 16 ,16])
def test_cluster_one(self):
self.assertEqual(resources._subset_internal([16, 16, 16, 20, 20, 48], 48), [48])
def test_cluster_incommensurate(self):
self.assertEqual(sorted(resources._subset_internal([16, 16, 20, 20], 48)), [16, 16, 20])
def test_xeon_cluster_48(self):
self.assertEqual(sorted(resources._subset_internal(
[16, 16, 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20, 20, 20, 20, 24], 48)), [16, 16, 16])
def test_xeon_cluster_24(self):
self.assertEqual(sorted(resources._subset_internal(
[16, 16, 16, 16, 16, 16, 16, 16, 16, 20, 20, 20, 20, 20, 20, 20], 24)), [16, 16])
def test_big_cluster_48(self):
self.assertEqual(sorted(resources._subset_internal(
[48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 64, 64, 64, 64, 24], 48)), [48])
def test_big_cluster_64(self):
self.assertEqual(sorted(resources._subset_internal(
[48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 64, 64, 64, 64, 24], 64)), [64])
def test_big_cluster_200(self):
self.assertEqual(sorted(resources._subset_internal(
[48, 48, 48, 48, 48, 48, 48, 48, 48, 48, 64, 64, 64, 64, 24], 200)), [24, 48, 64, 64])
class TestGetMaximalResources(unittest.TestCase):
def test_returns_single_resource(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.get_maximal_resources(sinfo_data), {'partition': resources.Resource('partition', 8, 2, 16384)})
def test_returns_multiple_resources(self):
sout = "node01 partition1 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
sinfo_data = slurm.SinfoData(sout)
res = {'partition1': resources.Resource('partition1', 4, 1, 8192), 'partition2': resources.Resource('partition2', 4, 1, 8192)}
self.assertEqual(resources.get_maximal_resources(sinfo_data), res)
def test_returns_no_resource(self):
sinfo_data = slurm.SinfoData('')
self.assertEqual(resources.get_maximal_resources(sinfo_data), {})
class TestGetMaximalMemory(unittest.TestCase):
def test_returns_total_memory(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.get_maximal_memory(sinfo_data), {'partition': 16384})
def test_returns_sum_of_multiple(self):
sout = "node01 partition1 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 16384 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
sinfo_data = slurm.SinfoData(sout)
res = {'partition1': 8192, 'partition2': 24576}
self.assertEqual(resources.get_maximal_memory(sinfo_data), res)
def test_returns_empty_dict_if_empty_input(self):
sinfo_data = slurm.SinfoData('')
self.assertEqual(resources.get_maximal_memory(sinfo_data), {})
class TestGetMaximalMemPerCpu(unittest.TestCase):
def test_returns_single_memory(self):
sinfo_data = slurm.SinfoData(SINFO_STDOUT_TWO_LINE)
self.assertEqual(resources.get_maximal_mem_per_cpu(sinfo_data), {'partition': 8192})
def test_returns_max_of_multiple(self):
sout = "node01 partition1 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 16384 8000 0 (null)\n"\
+"node02 partition2 0.00 4/0/0/4 1:4:1 idle 8192 8000 0 (null)\n"
sinfo_data = slurm.SinfoData(sout)
res = {'partition1': 8192, 'partition2': 16384}
self.assertEqual(resources.get_maximal_mem_per_cpu(sinfo_data), res)
def test_returns_empty_dict_if_empty_input(self):
sinfo_data = slurm.SinfoData('')
self.assertEqual(resources.get_maximal_mem_per_cpu(sinfo_data), {})
| 2.40625
| 2
|
Compliance_minimization/Figure_6/visualization_2_design_space.py
|
julianschumann/ae-opt
| 0
|
12780564
|
import numpy as np
from mpi4py import MPI
from SIMP import TO_SIMP, make_Conn_matrix
def get_void(nely,nelx):
v=np.zeros((nely,nelx))
R=min(nely,nelx)/15
loc=np.array([[1/3, 1/4], [2/3, 1/4],[ 1/3, 1/2], [2/3, 1/2], [1/3 , 3/4], [2/3, 3/4]])
loc=loc*np.array([[nely,nelx]])
for i in range(nely):
for j in range(nelx):
v[i,j]=R-np.min(np.sqrt(np.sum((loc-np.array([[i+1,j+1]]))**2,1)));
v=v>0
return v
def evaluate(x0,volfrac,void,Iar,cMat):
beta=0.05
epsilon_2=0.25
nelx=90
nely=45
penal=3
E0=1
nu=0.3
max_move=0.25
if np.mean(x0)>volfrac:
x0=x0*volfrac/np.mean(x0)
_,c1 = TO_SIMP(x0,nelx,nely,volfrac,penal,beta,epsilon_2,max_move,E0,nu,Iar,cMat,True,void,np.zeros((1,nely,nelx)),0,10)
_,c2 = TO_SIMP(x0,nelx,nely,volfrac,penal,beta,epsilon_2,max_move,E0,nu,Iar,cMat,True,void,np.zeros((1,nely,nelx)),0,0)
return c1,c2
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
nelx=90
nely=45
volfrac=0.4+0.6*0.0001
void=get_void(nely,nelx)
Iar,cMat=make_Conn_matrix(nelx,nely)
num_samples=100000
perrank=int(np.ceil(num_samples/size))
num_samples=perrank*size
C_rand_rank=np.zeros(perrank)
C_rand_opt_rank=np.zeros(perrank)
C_rand_inner_opt_rank=np.zeros(perrank)
X_rand_rank=np.zeros((perrank,nely,nelx))
for i in range(perrank):
X_rand_rank[i]=np.random.rand(nely,nelx)**1.5
X_rand_rank_inner_i=X_rand_rank[i]*0.5+0.2
C_rand_opt_rank[i],C_rand_rank[i]=evaluate(X_rand_rank[i],volfrac,void,Iar,cMat)
C_rand_inner_opt_rank[i],_=evaluate(X_rand_rank_inner_i,volfrac,void,Iar,cMat)
if rank==0:
X_rand=np.zeros((perrank*size,nely,nelx))
C_rand=np.zeros(perrank*size)
C_rand_opt=np.zeros(perrank*size)
C_rand_inner_opt=np.zeros(perrank*size)
else:
X_rand=None
C_rand=None
C_rand_opt=None
C_rand_inner_opt=None
comm.Gather(C_rand_rank,C_rand,root=0)
comm.Gather(C_rand_opt_rank,C_rand_opt,root=0)
comm.Gather(C_rand_inner_opt_rank,C_rand_inner_opt,root=0)
comm.Gather(X_rand_rank,X_rand,root=0)
if rank==0:
np.save('Sample_data/X_rand.npy',X_rand)
np.save('Sample_data/C_rand_opt.npy',C_rand_opt)
np.save('Sample_data/C_rand_inner_opt.npy',C_rand_inner_opt)
np.save('Sample_data/C_rand.npy',C_rand)
| 1.914063
| 2
|
test/test_triggers.py
|
PaleNutcrackers/cactbot
| 0
|
12780565
|
<filename>test/test_triggers.py
"""Tests individual trigger files for the raidboss Cactbot module."""
from pathlib import Path
import subprocess
import sys
from definitions import CactbotModule, DATA_DIRECTORY
def main():
"""Validates individual trigger files within the raidboss Cactbot module.
Current validation only checks that the trigger file successfully compiles.
Returns:
An exit status code of 0 or 1 if the tests passed successfully or failed, respectively.
"""
exit_status = 0
for filepath in Path(CactbotModule.RAIDBOSS.directory(), DATA_DIRECTORY).glob('**/*.js'):
exit_status |= subprocess.call(['node', str(filepath)])
return exit_status
if __name__ == '__main__':
EXIT_STATUS = main()
sys.exit(EXIT_STATUS)
| 2.265625
| 2
|
disgames/mixins/blackjack.py
|
Jerrydotpy/Disgames
| 8
|
12780566
|
<filename>disgames/mixins/blackjack.py
import discord
from discord.ext import commands
import random
class BlackJack(commands.Cog):
def __init__(self, bot):
self.bot = bot
def has_lost(self, amt):
if amt == 21:
return False
elif amt > 21:
return True
@commands.command(aliases=['bj'])
async def blackjack(self, ctx):
num = random.randint(1,10)
bot_am = random.randint(1,10)
hum_am = num
embed = discord.Embed(title='BlackJack', color=discord.Color.blurple())
embed.add_field(name=self.bot.user.display_name, value="`???`")
embed.add_field(name=ctx.author.display_name, value="`"+str(hum_am)+"`")
msg = await ctx.send(embed=embed)
for _ in range(5):
await msg.edit(f"You got {num}",embed=embed)
inp = await self.bot.wait_for('message', check = lambda m:m.author == ctx.author and m.channel == ctx.channel and m.content.lower() in ['hit','sit','h','s'])
if inp.content.lower() in ['hit','h']:
num = random.randint(1,10)
bot_am += random.randint(1,10)
hum_am += num
bot_has_lost = self.has_lost(bot_am)
hum_has_lost = self.has_lost(hum_am)
embed._fields[1]['value'] = "`"+str(hum_am)+"`"
if bot_has_lost:
embed._fields[0]['value'] = "`"+str(bot_am)+"`"
return await ctx.send("The bot went over 21", embed=embed)
elif bot_has_lost == False:
embed._fields[0]['value'] = "`"+str(bot_am)+"`"
return await ctx.send("The bot hit 21", embed=embed)
if hum_has_lost:
embed._fields[0]['value'] = "`"+str(bot_am)+"`"
return await ctx.send("You went over 21", embed=embed)
elif hum_has_lost == False:
embed._fields[0]['value'] = "`"+str(bot_am)+"`"
return await ctx.send("You hit 21", embed=embed)
else:
embed._fields[0]['value'] = "`"+str(bot_am)+"`"
if bot_am == hum_am:
return await ctx.send("Tie", embed=embed)
elif bot_am > hum_am:
return await ctx.send("The bot won",embed=embed)
else:
return await ctx.send("You won",embed=embed)
embed._fields[0]['value'] = "`"+str(bot_am)+"`"
await ctx.send("You won",embed=embed)
| 2.65625
| 3
|
solutions/rank-1/cleanup_model/predict.py
|
mattmotoki/ashrae-great-energy-predictor-3-solution-analysis
| 48
|
12780567
|
<gh_stars>10-100
#!/usr/bin/env python
# coding: utf-8
import argparse
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
import pickle
from utils import *
INPUT_DIR = '../input/'
PROCESSED_PATH = "../processed/"
OUTPUT_DIR = '../output/'
MODEL_PATH = '../models/'
parser = argparse.ArgumentParser(description='')
parser.add_argument('--debug', action='store_true', help='debug mode')
args = parser.parse_args()
drops= ["timestamp", 'wind_direction', 'wind_speed']
def predict(debug=True):
with open(MODEL_PATH + 'cleanup_model.pickle', mode='rb') as f:
[model] = pickle.load(f)
with timer("Preprocesing"):
X = combined_test_data()
X = compress_dataframe(add_time_features(X))
X = X.drop(columns=drops) # Raw timestamp doesn't help when prediction
with timer("Training"):
predictions = pd.DataFrame({
"row_id": X.index,
"meter_reading": np.clip(np.expm1(model.predict(X)), 0, None)
})
predictions.loc[(X.site_id == 0) & (X.meter==0), 'meter_reading'] = predictions.loc[(X.site_id == 0) & (X.meter==0), 'meter_reading'] * 3.4118
del X
# Finally, write the predictions out for submission. After that, it's Miller Time (tm).
if not debug:
predictions.to_csv(OUTPUT_DIR + "submission_cleanup.csv", index=False, float_format="%.4f")
with timer("Post-procesing"):
# # LB Score
leak_df = pd.read_feather(INPUT_DIR + 'leak.feather')
leak_df.fillna(0, inplace=True)
leak_df = leak_df[(leak_df.timestamp.dt.year > 2016) & (leak_df.timestamp.dt.year < 2019)]
leak_df.loc[leak_df.meter_reading < 0, 'meter_reading'] = 0 # remove large negative values
leak_df = leak_df[leak_df.building_id!=245]
test_df = pd.read_feather(PROCESSED_PATH + 'test.feather')
building_meta_df = pd.read_feather(PROCESSED_PATH + 'building_metadata.feather')
test_df['timestamp'] = pd.to_datetime(test_df.timestamp)
test_df['pred'] = predictions.meter_reading
leak_df = leak_df.merge(test_df[['building_id', 'meter', 'timestamp', 'pred', 'row_id']], left_on = ['building_id', 'meter', 'timestamp'], right_on = ['building_id', 'meter', 'timestamp'], how = "left")
leak_df = leak_df.merge(building_meta_df[['building_id', 'site_id']], on='building_id', how='left')
leak_df['pred_l1p'] = np.log1p(leak_df.pred)
leak_df['meter_reading_l1p'] = np.log1p(leak_df.meter_reading)
sns.distplot(leak_df.pred_l1p)
sns.distplot(leak_df.meter_reading_l1p)
leak_score = np.sqrt(mean_squared_error(leak_df.pred_l1p, leak_df.meter_reading_l1p))
leak_df = leak_df[['meter_reading', 'row_id']].set_index('row_id').dropna()
predictions.loc[leak_df.index, 'meter_reading'] = leak_df['meter_reading']
if not debug:
predictions.to_csv(OUTPUT_DIR + 'submission_replaced_cleanup.csv', index=False, float_format='%.4f')
print('total score=', leak_score)
if __name__ == '__main__':
debug = args.debug
print ('debug=', debug)
predict(debug)
| 2.375
| 2
|
crawler.py
|
nkpydev/SimpleWebCrawler
| 0
|
12780568
|
import time
from datetime import date
from spydr.spydr import Spydr
from urllib.parse import urlparse
target_url = input("Enter the URL to crawl:\t")
start_time = time.time()
domain_file_name = urlparse(target_url).netloc.replace(".", "_")
result = Spydr().crawl(target_url)
end_time = time.time()
print(f"\n{len(result)} URLs Crawled in {end_time - start_time} seconds")
file_name = domain_file_name + "_" + str(date.today().isoformat()) + ".txt"
with open(file_name, 'w') as wf:
for entry in result:
wf.write(entry + "\n")
print("Result file created!")
| 3.25
| 3
|
Chapter7_CNN/Chapter7_3_CNN_Optimization/mnistData.py
|
thisisjako/UdemyTF
| 0
|
12780569
|
from typing import Tuple
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.datasets import mnist
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.utils import to_categorical
class MNIST:
def __init__(self, with_normalization: bool = True) -> None:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
self.x_train_: np.ndarray = None
self.y_train_: np.ndarray = None
self.x_val_: np.ndarray = None
self.y_val_: np.ndarray = None
self.val_size = 0
self.train_splitted_size = 0
# Preprocess x data
self.x_train = x_train.astype(np.float32)
self.x_train = np.expand_dims(x_train, axis=-1)
if with_normalization:
self.x_train = self.x_train / 255.0
self.x_test = x_test.astype(np.float32)
self.x_test = np.expand_dims(x_test, axis=-1)
if with_normalization:
self.x_test = self.x_test / 255.0
# Dataset attributes
self.train_size = self.x_train.shape[0]
self.test_size = self.x_test.shape[0]
self.width = self.x_train.shape[1]
self.height = self.x_train.shape[2]
self.depth = self.x_train.shape[3]
self.img_shape = (self.width, self.height, self.depth)
self.num_classes = 10
# Preprocess y data
self.y_train = to_categorical(y_train, num_classes=self.num_classes)
self.y_test = to_categorical(y_test, num_classes=self.num_classes)
def get_train_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_train, self.y_train
def get_test_set(self) -> Tuple[np.ndarray, np.ndarray]:
return self.x_test, self.y_test
def get_splitted_train_validation_set(self, validation_size: float = 0.33) -> tuple:
self.x_train_, self.x_val_, self.y_train_, self.y_val_ = train_test_split(
self.x_train,
self.y_train,
test_size=validation_size
)
self.val_size = self.x_val_.shape[0]
self.train_splitted_size = self.x_train_.shape[0]
return self.x_train_, self.x_val_, self.y_train_, self.y_val_
def data_augmentation(self, augment_size: int = 5_000) -> None:
image_generator = ImageDataGenerator(
rotation_range=5,
zoom_range=0.08,
width_shift_range=0.08,
height_shift_range=0.08
)
# Fit the data generator
image_generator.fit(self.x_train, augment=True)
# Get random train images for the data augmentation
rand_idxs = np.random.randint(self.train_size, size=augment_size)
x_augmented = self.x_train[rand_idxs].copy()
y_augmented = self.y_train[rand_idxs].copy()
x_augmented = image_generator.flow(
x_augmented,
np.zeros(augment_size),
batch_size=augment_size,
shuffle=False
).next()[0]
# Append the augmented images to the train set
self.x_train = np.concatenate((self.x_train, x_augmented))
self.y_train = np.concatenate((self.y_train, y_augmented))
self.train_size = self.x_train.shape[0]
| 3.109375
| 3
|
resize/forms.py
|
kelvins/ResizeMe
| 1
|
12780570
|
<reponame>kelvins/ResizeMe<gh_stars>1-10
from django import forms
class UploadFileForm(forms.Form):
# Image file
file = forms.ImageField()
# Get the width and height fields
width = forms.IntegerField()
height = forms.IntegerField()
# Image formats
CHOICES = [
('jpg','jpg'),
('png','png'),
('bmp','bmp')
]
image_format = forms.ChoiceField(choices=CHOICES, widget=forms.RadioSelect())
| 2.40625
| 2
|
snake_game.py
|
SuhasBRao/Snake-game
| 0
|
12780571
|
<filename>snake_game.py
######################################################################
# Below are the few changes in this file.
#
# I've added a functionaity to keep track of the best score in this py
# file. The score is trakced by using a file named best_score.txt
# The program overwrites this file if the score is greater than any
# privious score.
#
# [1]After the game ends This displays Best score and the user's score
# [2]The Welcome to snake game image displayed in the opening has also
# been modified.
######################################################################
import pygame
import random
import time
pygame.init()
#############################
# Initialize the colors######
white = (255,255,255)
blue = (0,0,255)
green = (0,255,0)
light_green = (0,200,0)
red = (255,0,0)
light_red = (200,0,0)
yellow = (255,255,0)
black = (0,0,0)
snake_green = (109,223,42)
############################
clock = pygame.time.Clock()
score_pos = [0,0]
display_width = 800
display_height = 600
snake_block = 10
snake_speed = 7
snake_color = snake_green
######## load the snake_image.png image ######
snake_img = pygame.image.load('Welcome.png')
game_over_img = pygame.image.load('game_over.png')
font_style = pygame.font.SysFont('sarai', 24)
score_font = pygame.font.SysFont('rasa',28)
d = pygame.display.set_mode((display_width,display_height)) # for layout where d stands for the
# display surface
pygame.display.set_caption('SNAKE GAME') # sets the tile of the project
# The Snake_Img function displays
#the starting snake image
def Snake_Img(w,l):
d.blit(snake_img,(w,l))
def gameOver(w,l):
d.blit(game_over_img,(w,l))
# Your_score funciton keeps track of the current score
def Your_score(score):
value = score_font.render(f'Your score: {score}', True, yellow)
d.blit(value, score_pos)
def message(txt,color,w,l):
mesg = font_style.render(txt,True, color)
d.blit(mesg, [w,l])
# our_snake function display the snake
# after each food
def our_snake(snake_block, snake_list):
global snake_color
for x in snake_list:
#pygame.draw.rect(d, snake_color, [x[0],x[1], snake_block,snake_block])
pygame.draw.circle(d, snake_color,[x[0],x[1]], 7)
def quit_game():
pygame.quit()
quit()
# Button function for display of buttons
def button(msg,x,y,w,h,ic,ac, action = None):
mouse = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.MOUSEBUTTONDOWN:
if x+w > mouse[0] > x and y+h > mouse[1] > y:
action()
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(d,ac,[x,y,w,h])
else:
pygame.draw.rect(d,ic,[x,y,w,h])
message(msg,black, x, y)
pygame.display.update()
def game_loop(): # main GAME LOOP
game_over = False
game_close = False
x1 = display_width/2
y1 = display_height/2
x_change = 0
y_change = 0
snake_list = []
length_of_snake = 1
foodx = round(random.randrange(0,display_width - snake_block) /10.0 )* 10.0
foody = round(random.randrange(0, display_height - snake_block) / 10.0 ) *10.0
while not game_over: #While loop for the screen to get displayed
pygame.display.update()
d.fill(black)
# This below works only after the game
# is over
cnt = 0
while game_close == True:
cnt += 1
mouse = pygame.mouse.get_pos()
# This function crestes the exit and reply buttons
gameOver(display_width*.4,display_height*.3)
pygame.display.update()
if cnt == 1:
clock.tick(0.25)
# This part of the code is there to check if the current
# best score (it checks for the file named best_score.txt)
# in the current directory
myfile = open('best_score.txt')
best_score = int(myfile.read())
myfile.seek(0)
if length_of_snake-1 > best_score:
myfile = open('best_score.txt', 'w+')
myfile.write(str(length_of_snake - 1))
message(f'Best-score:{length_of_snake-1}', white,0,70)
else:
message(f'Best-score:{best_score}', blue,0,70)
message('Do you want to play again?',green,0,100)
button('YES',10,131, 80,40,light_green,green,game_loop)
button('NO',100,130, 80,40,light_red,red,quit_game)
Your_score(length_of_snake -1)
pygame.display.update()
# The below loop is responsible for movements of the snake
for event in pygame.event.get():# until quit button is pressed
if event.type == pygame.QUIT:
game_over = True # if event is quit / if exit button is
# pressed it exits from the screen
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x_change = -snake_block
y_change = 0
elif event.key == pygame.K_RIGHT:
x_change = snake_block
y_change = 0
elif event.key == pygame.K_UP:
x_change = 0
y_change = -snake_block
elif event.key == pygame.K_DOWN:
x_change = 0
y_change = snake_block
x1 += x_change
y1 += y_change
# IF YOU DONT WANT END THE GAME IF SNAKE TOUCHES THE BORDER REMOVE THE BELOW COMMENTS
# if x1 > display_width:
# x1 = 0
# elif x1 < 0:
# x1 = display_width
# elif y1 > display_height:
# y1 = 0
# elif y1 < 0:
# y1 = display_height
if x1 >= display_width or x1 <= 0 or y1 >= display_height or y1<=0:
game_close = True
d.fill(black)
pygame.draw.rect(d,red, [int(foodx), int(foody), snake_block,snake_block])
#pygame.draw.circle(d,red,[int(foodx), int(foody)], 5)
snake_head =[]
snake_head.append(x1)
snake_head.append(y1)
snake_list.append(snake_head)
if len(snake_list) > length_of_snake:
del snake_list[0]
for x in snake_list[:-1]:
if x == snake_head:
game_close = True # if snake collides itself then game is over
our_snake(snake_block,snake_list)
Your_score(length_of_snake -1) # Displays current score
pygame.display.update()
if x1 == int(foodx) and y1 == int(foody):
foodx = round(random.randrange(0, display_width - snake_block) / 10.0) * 10.0
foody = round(random.randrange(0, display_height - snake_block) / 10.0) * 10.0
length_of_snake += 1 # increases the food eaten depending
#on the lenght of the snake
clock.tick(snake_speed)
pygame.quit()
quit()
Snake_Img(display_width*.4,display_height*.3)
message('SNAKE GAME',green,display_width/3 + 50,display_height/1.8)
#message('Feed the snakes',white,display_width/3 + 50,display_height/1.8 + 30)
font = pygame.font.SysFont('uroob', 22)
mesg = font.render('Feed the snakes',True, white)
d.blit(mesg, [display_width/3 + 53,display_height/1.8 + 30])
pygame.display.update()
clock.tick(.25)
game_loop()
| 3.765625
| 4
|
cyder/tests/all.py
|
ngokevin/cyder
| 1
|
12780572
|
from cyder.cydns.tests.all import *
from cyder.cybind.tests import *
| 0.96875
| 1
|
pyPLM/Widgets/LineEdit.py
|
vtta2008/pipelineTool
| 7
|
12780573
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Script Name: Label.py
Author: <NAME>/Jimmy - 3D artist.
Description:
"""
# -------------------------------------------------------------------------------------------------------------
from PySide2.QtWidgets import QLineEdit
from pyPLM.Gui import IntValidator
from PLM.options import PRS
class LineEdit(QLineEdit):
Type = 'DAMGUI'
key = 'LineEdit'
_name = 'DAMG Line Edit'
def __init__(self, preset=None, parent=None):
QLineEdit.__init__(self)
self.parent = parent
self.preset = preset
if self.preset:
self.buildUI()
def setValue(self, key, value):
return self.settings.initSetValue(key, value, self.key)
def getValue(self, key, decode=None):
if decode is None:
return self.settings.initValue(key, self.key)
else:
return self.settings.initValue(key, self.key, decode)
@property
def name(self):
return self._name
@name.setter
def name(self, newName):
self._name = newName
def buildUI(self):
if self.preset is None or self.preset == {}:
self.preset = {'txt': ' '}
for key, value in self.preset.items():
if key == 'fn':
self.setEchoMode(PRS[value])
elif key == 'txt':
self.setText(value)
elif key == 'validator':
if value == 'int':
self.setValidator(IntValidator())
elif key == 'echo':
if value == 'password':
self.setEchoMode(QLineEdit.Password)
else:
print("PresetKeyError at {0}: No such key registed in preset: {1}: {2}".format(__name__, key, value))
# -------------------------------------------------------------------------------------------------------------
# Created by panda on 27/10/2019 - 6:40 PM
# © 2017 - 2018 DAMGteam. All rights reserved
| 2.3125
| 2
|
events_page/remove_subpath_from_gcs.py
|
los-verdes/lv-event-pagenerator
| 0
|
12780574
|
#!/usr/bin/env python
from logzero import logger
from apis import storage
if __name__ == "__main__":
import cli
from config import cfg
cfg.load()
parser = cli.build_parser()
args = cli.parse_args(parser)
parser.add_argument(
"-g",
"--gcs-bucket-prefix",
default=cfg.gcs_bucket_prefix,
help="The GCS bucket prefix to publish the static site under.",
)
parser.add_argument(
"-s",
"--site-hostname",
default=cfg.hostname,
help="Fully-qualified domain name of the published site. Used in cache purging / priming methods.",
)
args = cli.parse_args(parser)
storage.remove_subpath_from_gcs(
client=storage.get_client(),
bucket_id=args.site_hostname,
prefix=args.gcs_bucket_prefix,
)
logger.info(
f"Subpath deletion {args.gcs_bucket_prefix} for {args.site_hostname} completed! 🎉"
)
| 2.25
| 2
|
smart_imports/tests/test_importer.py
|
Tiendil/smart-imports
| 32
|
12780575
|
import os
import math
import json
import uuid
import unittest
import importlib
import subprocess
from unittest import mock
from .. import rules
from .. import config
from .. import helpers
from .. import importer
from .. import constants
from .. import exceptions
from .. import scopes_tree
TEST_FIXTURES_DIR = os.path.join(os.path.dirname(__file__), 'fixtures')
class TestApplyRules(unittest.TestCase):
def setUp(self):
self.source_module = 'smart_imports.tests.fake_package.config_variables'
self.config = config.DEFAULT_CONFIG.clone(path='#config.1',
rules=[{'type': 'rule_custom',
'variables': {'config_variable': {'module': self.source_module}}},
{'type': 'rule_local_modules'},
{'type': 'rule_stdlib'},
{'type': 'rule_predefined_names'}])
self.module = type(os)
def test_command_not_found(self):
result = importer.apply_rules(module_config=self.config,
module=self.module,
variable='x')
self.assertEqual(result, None)
def test_command_found(self):
command = importer.apply_rules(module_config=self.config,
module=self.module,
variable='config_variable')
self.assertEqual(command, rules.ImportCommand(target_module=self.module,
target_attribute='config_variable',
source_module=self.source_module,
source_attribute=None))
def test_rules_priority(self):
test_config = config.DEFAULT_CONFIG.clone(path='#config.2',
rules=[{'type': 'rule_custom',
'variables': {'var_1': {'module': 'math'}}},
{'type': 'rule_custom',
'variables': {'var_1': {'module': 'json'}}}])
command = importer.apply_rules(module_config=test_config,
module=self.module,
variable='var_1')
self.assertEqual(command, rules.ImportCommand(target_module=self.module,
target_attribute='var_1',
source_module='math',
source_attribute=None))
class TestGetModuleScopesTree(unittest.TestCase):
def test(self):
source = '''
x = 1
def y(q):
return q + z
'''
scope = importer.get_module_scopes_tree(source)
self.assertEqual(scope.variables, {'x': scopes_tree.VariableInfo(constants.VARIABLE_STATE.INITIALIZED, 2),
'y': scopes_tree.VariableInfo(constants.VARIABLE_STATE.INITIALIZED, 4)})
self.assertEqual(scope.children[0].variables,
{'q': scopes_tree.VariableInfo(constants.VARIABLE_STATE.INITIALIZED, 4),
'z': scopes_tree.VariableInfo(constants.VARIABLE_STATE.UNINITIALIZED, 5)})
class TestExtractVariables(unittest.TestCase):
def test_empty_source(self):
self.assertEqual(importer.extract_variables(''), ([], {}))
def test_has_source(self):
source = '''
x = 1 + y
def y():
return x + z
'''
self.assertEqual(set(importer.extract_variables(source)[0]),
{'z', 'y'})
class TestProcessModule(unittest.TestCase):
SIMPLE_SOURCE = '''
x = 'X'
def y(z):
return z + math.log(1)
'''
def apply_commands(self, commands):
for command in commands:
command()
def test_process_simple(self):
module_name = 'process_simple_' + uuid.uuid4().hex
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(self.SIMPLE_SOURCE)
module = importlib.import_module(module_name)
self.assertEqual(getattr(module, 'math', None), None)
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=module)
self.assertEqual(commands,
[rules.ImportCommand(target_module=module,
target_attribute='math',
source_module='math',
source_attribute=None)])
self.apply_commands(commands)
self.assertEqual(getattr(module, 'math'), math)
def test_process_simple__cached(self):
module_name = 'process_simple_' + uuid.uuid4().hex
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(self.SIMPLE_SOURCE)
module = importlib.import_module(module_name)
self.assertEqual(getattr(module, 'math', None), None)
# not required to create other temp directory, since filenames do not intersect
test_config = config.DEFAULT_CONFIG.clone(cache_dir=temp_directory)
commands = importer.process_module(module_config=test_config,
module=module)
self.apply_commands(commands)
self.assertEqual(getattr(module, 'math'), math)
self.assertTrue(os.path.isfile(os.path.join(temp_directory, module_name + '.cache')))
with mock.patch('smart_imports.importer.extract_variables') as extract_variables:
importer.process_module(module_config=test_config,
module=module)
extract_variables.assert_not_called()
def prepair_data(self, temp_directory):
modules_names = []
for i in range(1, 5):
modules_names.append('process_module_circular_{}_{}'.format(i, uuid.uuid4().hex))
source_1 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
x = 1
def y():
return {module_2_name}.z()
'''.format(module_2_name=modules_names[1])
source_2 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
def z():
return {module_1_name}.x
'''.format(module_1_name=modules_names[0])
source_3 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
x = 1
y = 10 + {module_4_name}.z
'''.format(module_4_name=modules_names[3])
source_4 = '''
def import_hook():
from smart_imports import config
from smart_imports import importer
from smart_imports import discovering
target_module = discovering.find_target_module()
commands = importer.process_module(module_config=config.DEFAULT_CONFIG,
module=target_module)
for command in commands:
command()
import_hook()
z = 100 + {module_1_name}.x
'''.format(module_1_name=modules_names[0])
sources = [source_1, source_2, source_3, source_4]
for name, source in zip(modules_names, sources):
with open(os.path.join(temp_directory, name + '.py'), 'w') as f:
f.write(source)
return modules_names
def test_process_circular__local_namespace(self):
with helpers.test_directory() as temp_directory:
modules_names = self.prepair_data(temp_directory)
module = importlib.import_module(modules_names[0])
self.assertTrue(hasattr(module, modules_names[1]))
self.assertEqual(module.y(), 1)
def test_process_circular__global_namespace(self):
with helpers.test_directory() as temp_directory:
modules_names = self.prepair_data(temp_directory)
module = importlib.import_module(modules_names[2])
self.assertTrue(hasattr(module, modules_names[3]))
self.assertEqual(module.y, 111)
def test_no_import_found(self):
module_name = 'process_module_no_imports_{}'.format(uuid.uuid4().hex)
source = '''
def y():
print(x)
def z():
print(x)
'''
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(source)
module = importlib.import_module(module_name)
with self.assertRaises(exceptions.NoImportFound) as error:
importer.process_module(module_config=config.DEFAULT_CONFIG,
module=module)
self.assertEqual(set(error.exception.arguments['lines']), {3, 6})
def test_no_import_found__cached_module(self):
module_name = 'process_module_no_imports_{}'.format(uuid.uuid4().hex)
source = '''
def y():
print(x)
def z():
print(x)
'''
with helpers.test_directory() as temp_directory:
with open(os.path.join(temp_directory, module_name + '.py'), 'w') as f:
f.write(source)
module = importlib.import_module(module_name)
# not required to create other temp directory, since filenames do not intersect
test_config = config.DEFAULT_CONFIG.clone(cache_dir=temp_directory)
# test repeated calls
for i in range(3):
with self.assertRaises(exceptions.NoImportFound) as error:
importer.process_module(module_config=test_config,
module=module)
self.assertEqual(set(error.exception.arguments['lines']), {3, 6})
self.assertTrue(os.path.isfile(os.path.join(temp_directory, module_name + '.cache')))
class TestAll(unittest.TestCase):
def test(self):
self.assertNotIn('string', globals())
importer.all(importlib.import_module('smart_imports.tests.test_importer'))
self.assertIn('string', globals())
self.assertEqual(string.digits, '0123456789')
class TestSimpleScript(unittest.TestCase):
def prepair_modules(self, base_directory):
os.makedirs(os.path.join(base_directory, 'a', 'b', 'c'))
script = '''
import smart_imports
smart_imports.all()
myprint((__name__, datetime.datetime.now()))
'''
with open(os.path.join(base_directory, 'a.py'), 'w') as f:
f.write(script)
config = {'rules': [{'type': 'rule_predefined_names'},
{'type': 'rule_stdlib'},
{'type': 'rule_custom',
'variables': {'myprint': {'module': 'pprint', 'attribute': 'pprint'}}}]}
with open(os.path.join(base_directory, 'smart_imports.json'), 'w') as f:
f.write(json.dumps(config))
def test(self):
with helpers.test_directory() as temp_directory:
self.prepair_modules(temp_directory)
output = subprocess.check_output(['python', os.path.join(temp_directory, 'a.py')])
self.assertIn(b"'__main__'", output)
self.assertIn(b"datetime.datetime", output)
| 2.375
| 2
|
tests/conftest.py
|
EJEmmett/functimer
| 1
|
12780576
|
from tests.test_timer import mock_timed
| 1.046875
| 1
|
images/program/ultra.py
|
FikriSatria11/project-skripsi2
| 0
|
12780577
|
<filename>images/program/ultra.py
import RPi.GPIO as GPIO
import time
GPIO.setwarnings(False)
#GPIO.setmode(GPIO.BOARD)
def jarakMobil(min, max):
GPIO.setmode(GPIO.BOARD)
TRIG = 11
ECHO = 12
jarakMinimal = min
jarakMaksimal = max
GPIO.setup(TRIG,GPIO.OUT)
GPIO.setup(ECHO,GPIO.IN)
while True:
GPIO.output(TRIG,False)
print('Pengukuran dimulai')
time.sleep(2)
GPIO.output(TRIG,True)
time.sleep(0.00001)
GPIO.output(TRIG,False)
while GPIO.input(ECHO) == 0:
pulse_start = time.time()
while GPIO.input(ECHO) == 1:
pulse_end = time.time()
pulse_duration = pulse_end - pulse_start
distance = pulse_duration * 17150
distance = round(distance, 2)
print(distance)
if distance > jarakMinimal and distance < jarakMaksimal :
return True
GPIO.cleanup()
| 3.015625
| 3
|
main.py
|
Vyvy-vi/gh-twitter-integration
| 0
|
12780578
|
<reponame>Vyvy-vi/gh-twitter-integration
import os
import tweepy
import logging
logger = logging.getLogger()
def main():
CONSUMER_API_KEY = os.environ["CONSUMER_API_KEY"]
CONSUMER_API_SECRET = os.environ["CONSUMER_API_SECRET"]
ACCESS_TOKEN = os.environ["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = os.environ["ACCESS_TOKEN_SECRET"]
try:
auth = tweepy.OAuthHandler(CONSUMER_API_KEY, CONSUMER_API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
api.verify_credentials()
logger.info('Twitter Authenticated')
except Exception as e:
logger.error('Error in credentials', exc_info=True)
raise err
api.update_status()
if __name__ == "__main__":
main()
| 2.28125
| 2
|
Sorting/Quicksort/quicksort.py
|
wizh/algorithms
| 0
|
12780579
|
<filename>Sorting/Quicksort/quicksort.py
def quicksort(seq, low, high):
if low < high:
pivot = partition(seq, low, high)
quicksort(seq, low, pivot)
quicksort(seq, pivot + 1, high)
return seq
def partition(seq, low, high):
pivot = seq[low]
leftwall = low
for i in range(low + 1, high):
if seq[i] < pivot:
leftwall += 1
seq[leftwall], seq[i] = seq[i], seq[leftwall]
seq[low], seq[leftwall] = seq[leftwall], seq[low]
return leftwall
import random
l = random.sample(range(1000), 1000)
print(quicksort(l, 0, len(l)))
| 3.9375
| 4
|
app_python.py
|
BernLeWal/raskeytar_webapp
| 0
|
12780580
|
<reponame>BernLeWal/raskeytar_webapp<filename>app_python.py
#!/usr/bin/python3
#
# The Website of raskeytar.at implemented in Python hosted by Flask.
#
from flask import Flask, send_from_directory
from flask import request
from flask import render_template
webapp = Flask(__name__, template_folder="app_python")
# Templates
@webapp.route("/index.html")
def send_index():
return render_template("index.html")
# Deliver static content
@webapp.route("/<path:path>")
def send_static(path):
return send_from_directory('static', path)
# The main-page
@webapp.route("/")
def main():
return render_template("index.html")
if __name__ == "__main__":
webapp.run(host="0.0.0.0", port=8080)
| 2.5
| 2
|
batch/test/test_batch.py
|
atgenomix/hail
| 1
|
12780581
|
import os
import time
import re
import unittest
import batch
from flask import Flask, Response, request
import requests
from .serverthread import ServerThread
class Test(unittest.TestCase):
def setUp(self):
self.batch = batch.client.BatchClient(url=os.environ.get('BATCH_URL'))
def test_job(self):
j = self.batch.create_job('alpine', ['echo', 'test'])
status = j.wait()
self.assertTrue('attributes' not in status)
self.assertEqual(status['state'], 'Complete')
self.assertEqual(status['exit_code'], 0)
self.assertEqual(status['log'], 'test\n')
self.assertEqual(j.log(), 'test\n')
self.assertTrue(j.is_complete())
def test_create_fails_for_closed_batch(self):
b = self.batch.create_batch()
b.close()
try:
b.create_job('alpine', ['echo', 'test'])
except requests.exceptions.HTTPError as err:
assert err.response.status_code == 400
assert re.search('.*invalid request: batch_id [0-9]+ is closed', err.response.text)
return
assert False
def test_batch_ttl(self):
b = self.batch.create_batch(ttl=1)
t = 1
while b.status()['is_open']:
if t > 64:
assert False, "took more than 128 seconds to close a batch with ttl 1"
time.sleep(t)
t = t * 2
def test_attributes(self):
a = {
'name': 'test_attributes',
'foo': 'bar'
}
j = self.batch.create_job('alpine', ['true'], attributes=a)
status = j.status()
assert(status['attributes'] == a)
def test_scratch_folder(self):
sb = 'gs://test-bucket/folder'
j = self.batch.create_job('alpine', ['true'], scratch_folder=sb)
status = j.status()
assert(status['scratch_folder'] == sb)
def test_fail(self):
j = self.batch.create_job('alpine', ['false'])
status = j.wait()
self.assertEqual(status['exit_code'], 1)
def test_deleted_job_log(self):
j = self.batch.create_job('alpine', ['echo', 'test'])
id = j.id
j.wait()
j.delete()
self.assertEqual(self.batch._get_job_log(id), 'test\n')
def test_delete_job(self):
j = self.batch.create_job('alpine', ['sleep', '30'])
id = j.id
j.delete()
# verify doesn't exist
try:
self.batch._get_job(id)
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_cancel_job(self):
j = self.batch.create_job('alpine', ['sleep', '30'])
status = j.status()
self.assertTrue(status['state'], 'Created')
j.cancel()
status = j.status()
self.assertTrue(status['state'], 'Cancelled')
self.assertTrue('log' not in status)
# cancelled job has no log
try:
j.log()
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_get_nonexistent_job(self):
try:
self.batch._get_job(666)
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_api_cancel_nonexistent_job(self):
try:
self.batch._cancel_job(666)
except requests.HTTPError as e:
if e.response.status_code == 404:
pass
else:
raise
def test_get_job(self):
j = self.batch.create_job('alpine', ['true'])
j2 = self.batch.get_job(j.id)
status2 = j2.status()
assert(status2['id'] == j.id)
def test_batch(self):
b = self.batch.create_batch()
j1 = b.create_job('alpine', ['false'])
j2 = b.create_job('alpine', ['sleep', '1'])
j3 = b.create_job('alpine', ['sleep', '30'])
# test list_jobs
jobs = self.batch.list_jobs()
self.assertTrue(
set([j.id for j in jobs]).issuperset([j1.id, j2.id, j3.id]))
# test refresh_k8s_state
self.batch._refresh_k8s_state()
j2.wait()
j3.cancel()
bstatus = b.wait()
n_cancelled = bstatus['jobs']['Cancelled']
n_complete = bstatus['jobs']['Complete']
self.assertTrue(n_cancelled <= 1)
self.assertTrue(n_cancelled + n_complete == 3)
n_failed = sum([ec > 0 for _, ec in bstatus['exit_codes'].items() if ec is not None])
self.assertTrue(n_failed == 1)
def test_callback(self):
app = Flask('test-client')
d = {}
@app.route('/test', methods=['POST'])
def test():
d['status'] = request.get_json()
return Response(status=200)
server = ServerThread(app)
try:
server.start()
j = self.batch.create_job(
'alpine',
['echo', 'test'],
attributes={'foo': 'bar'},
callback=server.url_for('/test'))
j.wait()
status = d['status']
self.assertEqual(status['state'], 'Complete')
self.assertEqual(status['attributes'], {'foo': 'bar'})
finally:
server.shutdown()
server.join()
| 2.5625
| 3
|
cardea/fhir/Specimen.py
|
sarahmish/Cardea
| 69
|
12780582
|
<filename>cardea/fhir/Specimen.py
from .fhirbase import fhirbase
class Specimen(fhirbase):
"""
A sample to be used for analysis.
Args:
resourceType: This is a Specimen resource
identifier: Id for specimen.
accessionIdentifier: The identifier assigned by the lab when
accessioning specimen(s). This is not necessarily the same as the
specimen identifier, depending on local lab procedures.
status: The availability of the specimen.
type: The kind of material that forms the specimen.
subject: Where the specimen came from. This may be from the patient(s)
or from the environment or a device.
receivedTime: Time when specimen was received for processing or
testing.
parent: Reference to the parent (source) specimen which is used when
the specimen was either derived from or a component of another
specimen.
request: Details concerning a test or procedure request that required
a specimen to be collected.
collection: Details concerning the specimen collection.
processing: Details concerning processing and processing steps for the
specimen.
container: The container holding the specimen. The recursive nature
of containers; i.e. blood in tube in tray in rack is not addressed
here.
note: To communicate any details or issues about the specimen or
during the specimen collection. (for example: broken vial, sent with
patient, frozen).
"""
__name__ = 'Specimen'
def __init__(self, dict_values=None):
self.resourceType = 'Specimen'
# type: str
# possible values: Specimen
self.accessionIdentifier = None
# reference to Identifier
self.status = None
# type: str
# possible values: available, unavailable, unsatisfactory,
# entered-in-error
self.type = None
# reference to CodeableConcept
self.subject = None
# reference to Reference: identifier
self.receivedTime = None
# type: str
self.parent = None
# type: list
# reference to Reference: identifier
self.request = None
# type: list
# reference to Reference: identifier
self.collection = None
# reference to Specimen_Collection
self.processing = None
# type: list
# reference to Specimen_Processing
self.container = None
# type: list
# reference to Specimen_Container: identifier
self.note = None
# type: list
# reference to Annotation
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.status is not None:
for value in self.status:
if value is not None and value.lower() not in [
'available', 'unavailable', 'unsatisfactory', 'entered-in-error']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'available, unavailable, unsatisfactory, entered-in-error'))
def get_relationships(self):
return [
{'parent_entity': 'Specimen_Container',
'parent_variable': 'identifier',
'child_entity': 'Specimen',
'child_variable': 'container'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Specimen',
'child_variable': 'identifier'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Specimen',
'child_variable': 'parent'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Specimen',
'child_variable': 'type'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Specimen',
'child_variable': 'accessionIdentifier'},
{'parent_entity': 'Annotation',
'parent_variable': 'object_id',
'child_entity': 'Specimen',
'child_variable': 'note'},
{'parent_entity': 'Specimen_Processing',
'parent_variable': 'object_id',
'child_entity': 'Specimen',
'child_variable': 'processing'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Specimen',
'child_variable': 'request'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Specimen',
'child_variable': 'subject'},
{'parent_entity': 'Specimen_Collection',
'parent_variable': 'object_id',
'child_entity': 'Specimen',
'child_variable': 'collection'},
]
class Specimen_Collection(fhirbase):
"""
A sample to be used for analysis.
Args:
collector: Person who collected the specimen.
collectedDateTime: Time when specimen was collected from subject - the
physiologically relevant time.
collectedPeriod: Time when specimen was collected from subject - the
physiologically relevant time.
quantity: The quantity of specimen collected; for instance the volume
of a blood sample, or the physical measurement of an anatomic
pathology sample.
method: A coded value specifying the technique that is used to perform
the procedure.
bodySite: Anatomical location from which the specimen was collected
(if subject is a patient). This is the target site. This element is
not used for environmental specimens.
"""
__name__ = 'Specimen_Collection'
def __init__(self, dict_values=None):
self.collector = None
# reference to Reference: identifier
self.collectedDateTime = None
# type: str
self.collectedPeriod = None
# reference to Period
self.quantity = None
# reference to Quantity
self.method = None
# reference to CodeableConcept
self.bodySite = None
# reference to CodeableConcept
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Collection',
'child_variable': 'collectedPeriod'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Specimen_Collection',
'child_variable': 'collector'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Collection',
'child_variable': 'bodySite'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Collection',
'child_variable': 'quantity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Collection',
'child_variable': 'method'},
]
class Specimen_Processing(fhirbase):
"""
A sample to be used for analysis.
Args:
description: Textual description of procedure.
procedure: A coded value specifying the procedure used to process the
specimen.
additive: Material used in the processing step.
timeDateTime: A record of the time or period when the specimen
processing occurred. For example the time of sample fixation or the
period of time the sample was in formalin.
timePeriod: A record of the time or period when the specimen
processing occurred. For example the time of sample fixation or the
period of time the sample was in formalin.
"""
__name__ = 'Specimen_Processing'
def __init__(self, dict_values=None):
self.description = None
# type: str
self.procedure = None
# reference to CodeableConcept
self.additive = None
# type: list
# reference to Reference: identifier
self.timeDateTime = None
# type: str
self.timePeriod = None
# reference to Period
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Specimen_Processing',
'child_variable': 'additive'},
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Processing',
'child_variable': 'timePeriod'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Processing',
'child_variable': 'procedure'},
]
class Specimen_Container(fhirbase):
"""
A sample to be used for analysis.
Args:
identifier: Id for container. There may be multiple; a manufacturer's
bar code, lab assigned identifier, etc. The container ID may differ
from the specimen id in some circumstances.
description: Textual description of the container.
type: The type of container associated with the specimen (e.g. slide,
aliquot, etc.).
capacity: The capacity (volume or other measure) the container may
contain.
specimenQuantity: The quantity of specimen in the container; may be
volume, dimensions, or other appropriate measurements, depending on
the specimen type.
additiveCodeableConcept: Introduced substance to preserve, maintain or
enhance the specimen. Examples: Formalin, Citrate, EDTA.
additiveReference: Introduced substance to preserve, maintain or
enhance the specimen. Examples: Formalin, Citrate, EDTA.
"""
__name__ = 'Specimen_Container'
def __init__(self, dict_values=None):
self.description = None
# type: str
self.type = None
# reference to CodeableConcept
self.capacity = None
# reference to Quantity
self.specimenQuantity = None
# reference to Quantity
self.additiveCodeableConcept = None
# reference to CodeableConcept
self.additiveReference = None
# reference to Reference: identifier
self.identifier = None
# type: list
# reference to Identifier
if dict_values:
self.set_attributes(dict_values)
def get_relationships(self):
return [
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Container',
'child_variable': 'additiveCodeableConcept'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Container',
'child_variable': 'specimenQuantity'},
{'parent_entity': 'Quantity',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Container',
'child_variable': 'capacity'},
{'parent_entity': 'CodeableConcept',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Container',
'child_variable': 'type'},
{'parent_entity': 'Reference',
'parent_variable': 'identifier',
'child_entity': 'Specimen_Container',
'child_variable': 'additiveReference'},
{'parent_entity': 'Identifier',
'parent_variable': 'object_id',
'child_entity': 'Specimen_Container',
'child_variable': 'identifier'},
]
| 2.546875
| 3
|
linux/rest/workspace/radoop.py
|
petergyorgy/virtue
| 0
|
12780583
|
<reponame>petergyorgy/virtue<gh_stars>0
#from win32com.client import Dispatch
import winshell
from pathlib import Path
import sys, os
import subprocess
import pprint
import base64
from os import listdir
from os.path import isfile, join, isdir
from shutil import rmtree
import pc
import ezfuncs
rdpskel = ""
#load the rdp template into memory
#grabs it from the API
def loadrdp():
global rdpskel
pc.pc('Grabbing RDP template')
sys.stdout.flush()
try:
rdpskel = ezfuncs.getit("/rdp.template").decode('utf-8')
except:
rdpskel = ''
pc.pc('Unable to grab RDP template', pc.FAIL)
raise
pprint.pprint(rdpskel)
# removes the shortcuts files on the desktop for virtues that no longer exist
# called approx every 10 seconds with a list of currently active virtues (aka a list of DONT DELETE ME!s)
#also called on quit of the service. to clean up any leftovers
#creates a desktop shortcut for every Virtue application.
def outputshortcuts(v, desktop = '', path = ''):
# pc.pc("Outputting shortcuts for rdps")
# print( "writing desktop stuff for {}".format( v['roleID'] ) )
if not desktop:
desktop = str(Path.home()) + '/Desktop/'
if not path:
path = str(Path.home()) + '/.virtuerdps/'
folder = join( desktop, v['roleID'] + " virtue" )
if( not( isdir( folder ) ) ):
os.mkdir( folder )
# print( "Folder is {}".format( folder ) )
#todo a convig?
for a in v['applications']:
sessionid=a['appID'] + '.' + v['virtueID']
fid=path + sessionid + '.rdp'
if 'desktop' in a and a['desktop']:
shapath = join(folder, a['desktop'])
else:
shapath = join(folder, sessionid)
if 'roleID' in v:
shpath = shapath + '-' + v['roleID'] + '.lnk'
else:
shpath = shapath + '.lnk'
iname = ''
if 'iconpath' in a:
iname = a['iconpath']
#todo dont overwrite if already exists?
with winshell.shortcut(shpath) as sh:
sh.path = fid
sh.icon_location = (iname, 0)
# sh.arguments = r"--session-conf=" +str(fid)+""
sh.working_directory = str(Path.home())
sh.description = v['virtueID']
sh.write(shpath)
#cleans the rdp files for virtues that no longer exist
#called basically right after cleanshortcuts
def cleanrdps(vs, path = ''):
if not path:
path = str(Path.home()) + '/.virtuerdps/'
try:
files = {f:join(path, f) for f in listdir(path) if f.endswith('.rdp')}
except:
#in case the path doesnt exist
return
#early out i guess?
if not files:
return
cleanups = []
for k,v in files.items():
try:
##get virtueid from filename
if all( vid not in vs for vid in k.split('.')):
cleanups.append(v)
# os.remove(v)
except:
continue
for f in cleanups:
try:
pc.pc("removing rdp file " + f);
os.remove(f)
except:
pass
def outputrdps(virtues, path = ''):
pc.pc("Outpudding rdps for")
if not path:
path = str(Path.home()) + '/.virtuerdps/'
os.makedirs(path, exist_ok=True)
#lets process the skel
#trigger the cache to grab it if it doesnt exist
if not rdpskel:
loadrdp()
for k, v in virtues.items():
vpass = ezfuncs.sshhashchain( [v['roleID'],v['virtueID'] ] )
unbinned = base64.b85encode( vpass ).decode('utf-8')
pscmd = 'ConvertTo-SecureString "{}" -AsPlainText -Force | ConvertFrom-SecureString'.format( unbinned )
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
psexe = subprocess.run( ["powershell.exe","-Command",pscmd], stdout=subprocess.PIPE, stdin=subprocess.DEVNULL, stderr=subprocess.DEVNULL, startupinfo=si )
bpass = psexe.stdout.strip().decode('ascii')
temp = rdpskel.replace('__VIRTUE', v['virtueID'])
temp = temp.replace('__HOST', v['host'])
temp = temp.replace('__BPASS', bpass )
temp = temp.replace('__PORT', str(v['port']))
temp = temp.replace('__KEY', (str(Path.home()) + '/.ssh/id_rsa').strip().replace("\\", "/")) #todo make this nicer?
for a in v['applications']:
sessionid=a['appID'] + '.' + v['virtueID']
fid=path + sessionid + '.rdp'
#TODO check if the file already exists!
bemp = temp.replace('__SESSIONID', sessionid)
bemp = temp.replace('__CMD', a['launchCmd'])
with open(fid, 'w') as f:
f.write(bemp)
sys.stdout.flush()
| 2.484375
| 2
|
Basic/rockPaper.py
|
tusharad/python_practice_problems
| 0
|
12780584
|
# Make a two-player Rock-Paper-Scissors game. (Hint: Ask for player plays (using input),
# compare them, print out a message of congratulations to the winner, and ask if the
# players want to start a new game) Rules:
# Rock beats scissors
# Scissors beats paper
# Paper beats rock
def gameOn():
player1 = int(input("Player 1\nEnter your choice:\n1.Rock\n2.Paper\n3.Scissor: "))
player2 = int(input("Player 2\nEnter your choice:\n1.Rock\n2.Paper\n3.Scissor: "))
if(player1 == player2):
return "It's a tie!"
elif (player1 == 1 and player2 == 3):
return "player 1 won!"
elif (player1 == 3 and player2 == 2):
return "player 1 won!"
elif (player1 == 2 and player2 == 1):
return "player 1 won!"
else:
return "player 2 won!"
print(gameOn())
while(True):
print("Want play again?(yes/no)")
ans = input()
if(ans == 'yes'):
print(gameOn())
else:
break
| 3.9375
| 4
|
custom_components/ariston/config_flow.py
|
fustom/ariston-remotethermo-home-assistant-v3
| 5
|
12780585
|
"""Config flow for Ariston integration."""
from __future__ import annotations
import logging
import voluptuous as vol
from typing import Any
from homeassistant import config_entries
from homeassistant.const import (
CONF_DEVICE,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.data_entry_flow import FlowResult
from .const import (
DEFAULT_ENERGY_SCAN_INTERVAL_MINUTES,
DEFAULT_EXTRA_ENERGY_FEATURES,
DEFAULT_SCAN_INTERVAL_SECONDS,
DOMAIN,
ENERGY_SCAN_INTERVAL,
EXTRA_ENERGY_FEATURES,
)
from .ariston import AristonAPI, DeviceAttribute
_LOGGER = logging.getLogger(__name__)
STEP_USER_DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
class AristonConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Ariston Config Flow."""
VERSION = 1
def __init__(self):
self.api: AristonAPI = None
self.cloud_username: str = None
self.cloud_password: str = None
self.cloud_devices = {}
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA
)
errors = {}
try:
self.cloud_username = user_input[CONF_USERNAME]
self.cloud_password = user_input[CONF_PASSWORD]
self.api = AristonAPI(self.cloud_username, self.cloud_password)
response = await self.api.async_connect()
if not response:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
cloud_devices = await self.api.async_get_devices()
if len(cloud_devices) == 1:
cloud_device = cloud_devices[0]
existing_entry = await self.async_set_unique_id(
cloud_device[DeviceAttribute.GW_ID], raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
self.hass.config_entries.async_update_entry(
existing_entry, data=data
)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(
title=cloud_device[DeviceAttribute.PLANT_NAME],
data={
CONF_USERNAME: self.cloud_username,
CONF_PASSWORD: <PASSWORD>,
CONF_DEVICE: cloud_device,
},
)
for device in cloud_devices:
name = device[DeviceAttribute.PLANT_NAME]
model = device[DeviceAttribute.GW_SERIAL]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
return await self.async_step_select()
return self.async_show_form(
step_id="user", data_schema=STEP_USER_DATA_SCHEMA, errors=errors
)
async def async_step_select(self, user_input=None):
"""Multiple device found, select one of them"""
errors = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
return self.async_create_entry(
title=cloud_device[DeviceAttribute.PLANT_NAME],
data={
CONF_USERNAME: self.cloud_username,
CONF_PASSWORD: <PASSWORD>,
CONF_DEVICE: cloud_device,
},
)
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return AristonOptionsFlow(config_entry)
class AristonOptionsFlow(config_entries.OptionsFlow):
"""Handle Ariston options."""
def __init__(self, config_entry):
"""Initialize Ariston options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = self.config_entry.options
extra_energy_features = options.get(
EXTRA_ENERGY_FEATURES, DEFAULT_EXTRA_ENERGY_FEATURES
)
scan_interval = options.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL_SECONDS)
energy_scan_interval = options.get(
ENERGY_SCAN_INTERVAL, DEFAULT_ENERGY_SCAN_INTERVAL_MINUTES
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
EXTRA_ENERGY_FEATURES,
default=extra_energy_features,
): bool,
vol.Optional(
CONF_SCAN_INTERVAL,
default=scan_interval,
): int,
vol.Optional(
ENERGY_SCAN_INTERVAL,
default=energy_scan_interval,
): int,
}
),
last_step=True,
)
| 2.1875
| 2
|
src/backend/libro/obtener/app.py
|
gpeitzner/SA_EZREAD
| 0
|
12780586
|
<reponame>gpeitzner/SA_EZREAD<filename>src/backend/libro/obtener/app.py
from flask import Flask, request
import os
import json
import pymongo
from bson.objectid import ObjectId
from bson.json_util import dumps
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
db_host = os.environ["db_host"] if "db_host" in os.environ else "localhost"
db_password = os.environ["db_password"] if "db_password" in os.environ else ""
db_port = int(os.environ["db_port"]) if "db_port" in os.environ else 27017
db_name = os.environ["db_name"] if "db_name" in os.environ else "ezread"
db_user = os.environ["db_user"] if "db_user" in os.environ else ""
client = pymongo.MongoClient(
host=db_host, port=db_port, username=db_user, password=<PASSWORD>)
db = client[str(db_name)]
col = db["libros"]
colLogs = db["logs"]
@app.route("/")
def main2():
return "<p>libro_obtener</p>"
@app.route("/libros") # obbtener todos los libros registrados.
def main():
if request.method == 'GET':
busqueda = col.find()
if busqueda:
libros = []
for libro in busqueda:
if (libro['Activo']):
libros.append({"id": str(libro['_id']), "Titulo": str(libro['Titulo']), "Editorial": str(
libro['Editorial']), "Autor": str(libro['Autor']), "Genero": str(libro['Genero']),
"Cantidad": libro['Cantidad'], "Activo": libro['Activo'], "Precio": libro['Precio'],
"Imagen": libro['Imagen']})
return {'libros': libros}
else:
return {"libros": []}
@app.route('/libro', methods=['GET']) # por medio de id
def obtenerLibro():
if request.method == 'GET':
content = request.args.get('id')
libro = col.find_one({'_id': ObjectId(content)})
if libro:
if (libro['Activo']):
return {'libro': {"id": str(libro['_id']), "Titulo": str(libro['Titulo']), "Editorial": str(
libro['Editorial']), "Autor": str(libro['Autor']), "Genero": str(libro['Genero']),
"Cantidad": libro['Cantidad'], "Activo": libro['Activo'], "Precio": libro['Precio'],
"Imagen": libro['Imagen']}}
else:
return {"mensaje": "Libro fuera de stock"}
else:
return {"mensaje": "Libro no existe"}
# traer todos los generos con sus libros
@app.route('/libros/Generos', methods=['GET'])
def obtenerGeneros():
if request.method == 'GET':
ret = col.aggregate([
{'$group': {
'_id': "$Genero",
'libros': {'$push':
{
'id': '$_id',
'Editorial': "$Editorial",
'Titulo': "$Titulo",
'Genero': "$Genero",
'Autor': "$Autor",
'Cantidad': "$Cantidad",
'Activo': "$Activo",
'Precio': "$Precio",
'Imagen': "$Imagen"
},
}
}
}
])
if ret:
l = list(ret)
list2 = []
for genero in l:
books = []
for libro in genero['libros']:
if (libro['Activo']):
books.append({"id": str(libro['id']), "Titulo": str(libro['Titulo']), "Editorial": str(
libro['Editorial']), "Autor": str(libro['Autor']),
"Cantidad": libro['Cantidad'], "Activo": libro['Activo'], "Precio": libro['Precio'],
"Imagen": libro['Imagen']})
list2.append({'Genero': genero['_id'], 'libros': books})
return {'Generos': list2}
else:
return {'Generos': []}
# traer todos las editoriales con sus libros
@app.route('/libros/Editoriales', methods=['GET'])
def obtenerEditoriales():
if request.method == 'GET':
ret = col.aggregate([
{'$group': {
'_id': "$Editorial",
'libros': {'$push':
{
'id': '$_id',
'Editorial': "$Editorial",
'Titulo': "$Titulo",
'Genero': "$Genero",
'Autor': "$Autor",
'Cantidad': "$Cantidad",
'Activo': "$Activo",
'Imagen': "$Imagen",
'Precio': "$Precio"
},
}
}
}
])
if ret:
l = list(ret)
list2 = []
for editorial in l:
books = []
for libro in editorial['libros']:
if (libro['Activo']):
books.append({"id": str(libro['id']), "Titulo": str(libro['Titulo']), "Genero": str(
libro['Genero']), "Autor": str(libro['Autor']),
"Cantidad": libro['Cantidad'], "Activo": libro['Activo'], "Precio": libro['Precio'],
"Imagen": libro['Imagen']})
list2.append({'Editorial': editorial['_id'], 'libros': books})
return {'Editoriales': list2}
else:
return {'Editoriales': []}
# obtener libros de una editorial en específico
@app.route('/libros/byEditorial', methods=['GET'])
def obtenerbyEditorial():
if request.method == 'GET':
editorial = request.args.get('user')
ret = col.aggregate([
{
'$match': {'Editorial': editorial}
}
])
if ret:
l = list(ret)
books = []
for libro in l:
if (libro['Activo']):
books.append({"id": str(libro['_id']), "Titulo": str(libro['Titulo']), "Genero": str(
libro['Genero']), "Autor": str(libro['Autor']),
"Cantidad": libro['Cantidad'], "Activo": libro['Activo'], "Precio": libro['Precio'],
"Imagen": libro['Imagen']})
return {'libros': books}
else:
return {'libros': []}
@app.route("/logs", methods=['GET']) # obbtener todos los libros registrados.
def logFunction():
if request.method == 'GET':
busqueda = colLogs.find()
if busqueda:
print('busqueda', busqueda)
logs = []
for log in busqueda:
print(log)
logs.append({"id": str(log['_id']), "Operacion": str(log['Operacion']), "Libro": str(
log['Libro']), "Editorial": str(log['Editorial']), "Descripcion": str(log['Descripcion']), "Fecha": str(log['Fecha:'])})
return {'logs': logs}
else:
return {"logs": []}
| 2.125
| 2
|
tilezilla/cli/main.py
|
ceholden/landsat_tiles
| 5
|
12780587
|
import logging
from pkg_resources import iter_entry_points
import click
import click_plugins
from . import options
from .. import __version__
_context = dict(
token_normalize_func=lambda x: x.lower(),
help_option_names=['--help', '-h'],
auto_envvar_prefix='TILEZILLA'
)
LOG_FORMAT = '%(asctime)s:%(levelname)s:%(message)s'
LOG_DATE_FORMAT = '%H:%M:%S'
@click_plugins.with_plugins(ep for ep in
iter_entry_points('tilez.commands'))
@click.group(help='tilezilla command line interface',
context_settings=_context)
@options.opt_config_file
@click.option('--verbose', '-v', count=True, help='Be louder')
@click.option('--quiet', '-q', count=True, help='Be quieter')
@click.version_option(__version__)
@click.pass_context
def cli(ctx, config_file, verbose, quiet):
verbosity = verbose - quiet
log_level = 20 - 10 * verbosity
# Logging config for tilez
logger = logging.getLogger('tilez')
formatter = logging.Formatter(LOG_FORMAT, LOG_DATE_FORMAT)
handler = logging.StreamHandler(click.get_text_stream('stdout'))
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(max(10, log_level)) # never below DEBUG (10)
# Logging for main module
main_logger = logging.getLogger('tilezilla')
if log_level <= 0: # log_level=NOSET (0) sets main logger to debug
main_logger.setLevel(logging.DEBUG)
# Parse config
ctx.obj = ctx.obj or {}
if config_file:
from ..config import parse_config
ctx.obj['config'] = parse_config(config_file)
| 2.015625
| 2
|
twixtools/map_twix.py
|
mrphysics-bonn/twixtools
| 25
|
12780588
|
<reponame>mrphysics-bonn/twixtools<gh_stars>10-100
import copy
import numpy as np
import twixtools
from twixtools.recon_helpers import (
remove_oversampling, calc_regrid_traj, perform_regrid
)
# define categories in which the twix data should be sorted based on MDH flags
# that must or must not be set (True/False)
# only one 'any' per category allowed (however, it is possible to add other
# appropriate functions (even synonyms for any))
twix_category = {
'image': {'RTFEEDBACK': False, 'HPFEEDBACK': False,
'REFPHASESTABSCAN': False, 'PHASESTABSCAN': False,
'PHASCOR': False, 'NOISEADJSCAN': False,
any: {'PATREFSCAN': False, 'PATREFANDIMASCAN': True},
'noname60': False},
'noise': {'NOISEADJSCAN': True},
'phasecorr': {'PHASCOR': True, 'PATREFSCAN': False, 'noname60': False},
'phasestab': {'PHASESTABSCAN': True, 'REFPHASESTABSCAN': False,
'noname60': False,
any: {'PATREFSCAN': False, 'PATREFANDIMASCAN': True}},
'refphasestab0': {'REFPHASESTABSCAN': True, 'PHASESTABSCAN': False,
'noname60': False,
any: {'PATREFSCAN': False, 'PATREFANDIMASCAN': True}},
'refphasestab1': {'REFPHASESTABSCAN': True, 'PHASESTABSCAN': True,
'noname60': False,
any: {'PATREFSCAN': False, 'PATREFANDIMASCAN': True}},
'refscan': {any: {'PATREFSCAN': True, 'PATREFANDIMASCAN': True},
'PHASCOR': False, 'PHASESTABSCAN': False,
'REFPHASESTABSCAN': False, 'RTFEEDBACK': False,
'HPFEEDBACK': False, 'noname60': False},
'ref_pc': {any: {'PATREFSCAN': True, 'PATREFANDIMASCAN': True},
'PHASCOR': True},
'ref_ps': {any: {'PATREFSCAN': True, 'PATREFANDIMASCAN': True},
'REFPHASESTABSCAN': False, 'PHASESTABSCAN': True},
'ref_ps_ref0': {any: {'PATREFSCAN': True, 'PATREFANDIMASCAN': True},
'REFPHASESTABSCAN': True, 'PHASESTABSCAN': False},
'ref_ps_ref1': {any: {'PATREFSCAN': True, 'PATREFANDIMASCAN': True},
'REFPHASESTABSCAN': True, 'PHASESTABSCAN': True},
'rt_feedback': {any: {'RTFEEDBACK': True, 'HPFEEDBACK': True},
'MDH_VOP': False},
'vop': {'MDH_VOP': True},
'fidnav': {'noname60': True} # why we include the 'noname60' checks
}
def map_twix(input):
""" creates a list of measurements (or a single dict if input was dict)
with data for each measurement mapped to a twix_array object.
Parameter
----------
input: string, int, list, or dict
If the filename or its measurement id are passed as a string or int,
respectively, the corresponding twix file is first parsed using
`read_twix`. Alternatively, it is possible to directly pass a scan list
(as returned by `read_twix`) to `map_twix` or to pass only a dict that
includes header information and mdb list of a single twix scan.
Returns:
----------
out: dict of twix_array objects
A twix_array object is created for each data category (as defined by
`twix_category`) that is encountered in the input.
The twix_array object includes the header information (twix_array.hdr)
as well as access to the underlying data via array slicing of a virtual
'k-space'-like array that is designed to closely mimick a
`numpy.ndarray` object (and indeed returns a `numpy.ndarray`).
Examples:
----------
Read the data and then select only the twix_array object that contains
image data:
>>> twix = map_twix(filename)
>>> im_array = twix['image']
Now set a few optional flags that control additional features and determine
the shape of the output array:
>>> im_array.flags['remove_os'] = True # activate automatic os removal
>>> im_array.flags['regrid'] = True # activate ramp sampling regridding
>>> im_array.flags['average']['Rep'] = True # average all repetitions
>>> im_array.flags['squeeze_singletons'] = True # squeezes singleton dims
Print all available flags and their values:
>>> print(im_array.flags)
Print the shape of the data and the names of the active dimensions:
>>> print(im_array.shape)
>>> print(im_array.dims)
And finally read the data:
>>> im_data = im_array[:]
Alternatively, we can for example only select the data for the first
receiver channel:
>>> im_data0 = im_array[...,0,:]
All standard array slicing operations should be supported.
"""
if isinstance(input, list):
# assume list of measurements
twix = input
elif isinstance(input, dict):
# assume measurement dict
# return twix_array of the input (no measurement list)
twix = [input]
else:
# assume that this is the filename or a meas id
twix = twixtools.read_twix(input)
out = list()
for meas in twix:
if not isinstance(meas, dict):
continue
# append new dict to output list
out.append(dict())
# sort mdbs into categories
for mdb in meas['mdb']:
if mdb.is_flag_set('SYNCDATA'): # ignore syncdata
continue
if mdb.is_flag_set('ACQEND'):
break
for category, rqmts in twix_category.items():
include_in_cat = True
for flag in rqmts.keys():
if isinstance(flag, str): # check whether flag is set
if mdb.is_flag_set(flag) != rqmts[flag]:
include_in_cat = False
break
else: # assume a function call (probably any())
checks = list()
for flag2 in rqmts[flag].keys():
checks.append(
mdb.is_flag_set(flag2) == rqmts[flag][flag2])
if not flag(checks):
include_in_cat = False
break
if include_in_cat:
if category not in out[-1]:
out[-1][category] = list()
out[-1][category].append(mdb)
# convert each categories' mdb list to twix_array
for category in out[-1].keys():
out[-1][category] = twix_array(out[-1][category],
meas['hdr'].copy())
# include hdr in dict
out[-1]['hdr'] = meas['hdr'].copy()
out[-1]['hdr_str'] = meas['hdr_str'].copy()
# go back to dict if input was dict
if isinstance(input, dict):
out = out[0]
return out
class twix_array():
"""Memory-mapped storage class for Siemens MRI raw data.
The twix array class constructs a virtual multi-dimensional array from a
list of mdb objects, that tries to closely resemble a numpy.ndarray with
standard array slicing operations. The selected array is then read from the
twix file (all reading operations are handled by the Mdb class) and
returned in the form of a multi-dimensional numpy.ndarray.
Note that additional flags can change the shape of the virtual array.
Important Attributes
----------
ndim: int
number of output dimensions. May change depending on `flags`.
shape: tuple
shape of the output array. May change depending on `flags`.
dims: list
List of names of output dimensions. May change depending on `flags`.
non_singleton_dims: list
Returns list of non-singleton dimensions.
dim_order: tuple
List of the standard dimension order (immutable).
hdr: dict
twix header information
flags: dict
Dict of optional flags. The following flags are currently supported:
- 'average': dict of bools that determines which dimensions should
be averaged.
- 'squeeze_ave_dims': bool that determines whether averaged
dimensions should be removed/squeezed from the array's shape.
- 'squeeze_singletons': bool that determines whether singleton
dimensions should be removed ('True' makes previous option irrelevant)
- 'remove_os': oversampling removal. Reduces the number of columns
by a factor of two.
- 'regrid': bool that controls ramp-sampling regridding (if applicable)
- 'skip_empty_lead': skips to first line & partition that is found
in mdb list (e.g. if first line counter is 10, the output array
starts at line counter 10).
- 'zf_missing_lines': zero-fill k-space to include lines and partitions
that are higher than the maximum counter found in the mdb list, but
are still within the k-space matrix according to the twix header.
"""
def __init__(self, mdb_list, hdr=None, flags=None):
self.mdb_list = mdb_list.copy()
self.hdr = None
if hdr is not None:
self.hdr = copy.deepcopy(hdr)
self.rs_traj = calc_regrid_traj(self.hdr)
# delete 'ACQEND' and 'SYNCDATA' flags if present
twixtools.del_from_mdb_list(
self.mdb_list,
lambda b: b.is_flag_set('ACQEND') or b.is_flag_set('SYNCDATA'))
self._dim_order = (
"Ide", "Idd", "Idc", "Idb", "Ida", "Seg", "Set", "Rep",
"Phs", "Eco", "Par", "Sli", "Ave", "Lin", "Cha", "Col"
)
# dtype that includes all dims:
self.dt_dims = np.dtype([(name, "<u2") for name in self.dim_order])
# dtype that only includes counters (no cha & col)
self.dt_counters = np.dtype([(n, "<u2") for n in self.dim_order[:-2]])
self.key_map = {
'Ide': 'ushIde', 'Idd': 'ushIdd', 'Idc': 'ushIdc',
'Idb': 'ushIdb', 'Ida': 'ushIda', 'Seg': 'ushSeg',
'Set': 'ushSet', 'Rep': 'ushRepetition', 'Phs': 'ushPhase',
'Eco': 'ushEcho', 'Par': 'ushPartition', 'Sli': 'ushSlice',
'Ave': 'ushAcquisition', 'Lin': 'ushLine'
}
self.sorted_mdh_keys = [self.key_map[d] for d in self.dim_order[:-2]]
# determine k-space shape by finding max index
shp = np.ones(len(self.dt_dims), dtype=self.dt_dims[1])
self._first_ix = 1024 * np.ones(len(self.dt_dims)-2,
dtype=self.dt_dims[1])
for mdb in self.mdb_list:
sLC = mdb.mdh['sLC']
sLC = np.asarray(sLC[self.sorted_mdh_keys].tolist(),
dtype=sLC[0].dtype)
req_shape = 1 + sLC
# add channels & columns
req_shape = np.concatenate([req_shape,
[mdb.mdh['ushUsedChannels'],
mdb.mdh['ushSamplesInScan']]])
shp = np.maximum(shp, req_shape)
self._first_ix = np.minimum(self._first_ix, sLC)
self.base_size = np.ones(1, dtype=self.dt_dims)[0]
for key, item in enumerate(shp):
# complicated, can we do this conversion better? (proper casting?)
self.base_size[key] = item
self._flags = {'average': {item: False for item in self.dim_order},
'remove_os': False,
'regrid': False,
'squeeze_ave_dims': False,
'squeeze_singletons': False,
'skip_empty_lead': False,
'zf_missing_lines': False}
# 'Ave' should be averaged by default, Idx indices should be ignored:
for dim in ['Ide', 'Idd', 'Idc', 'Idb', 'Ida', 'Seg', 'Ave']:
self._flags['average'][dim] = True
# set flags that were passed in constructor call
if flags is not None:
for key, item in flags.items():
try:
self.flags[key] = item.copy()
except Exception:
self.flags[key] = item
def copy(self):
return self.__copy__()
def __copy__(self):
self._flags = self._flags.copy()
return twix_array(self.mdb_list, self.hdr, self.flags)
@property
def dim_order(self):
return self._dim_order
@property
def dims(self):
if self.flags['squeeze_singletons']:
return [name for name in self.dim_order if self.size[name] > 1]
elif self.flags['squeeze_ave_dims']:
return [name for name in self.dim_order if not self.flags['average'][name]]
else:
return self.dim_order
@property
def non_singleton_dims(self):
return [dim for dim in self.dim_order if self.size[dim] > 1]
@property
def ndim(self):
return len(self.dims)
@property
def flags(self):
# wip: although the flags dict itself is write-protected, its entries
# are currently not and can be overwritten by garbage!
return self._flags
@property
def size(self):
# self.size returns the shape of the data as a dtype with named
# elements for easier access
# averaged dims will be kept even if one of the 'squeeze' options is set
sz = self.base_size.copy()
if not self.flags['average']['Col'] and self.flags['remove_os']:
sz[self.dim_order.index('Col')] //= 2
if self.hdr is not None and self.flags['zf_missing_lines']:
if not self.flags['average']['Lin']:
hdr_lin = \
self.hdr['MeasYaps']['sKSpace']['lPhaseEncodingLines']
sz['Lin'] = max(sz['Lin'], hdr_lin)
if not self.flags['average']['Par']\
and self.hdr['MeasYaps']['sKSpace']['ucDimension'] > 2:
hdr_par = self.hdr['MeasYaps']['sKSpace']['lPartitions']
sz['Par'] = max(sz['Par'], hdr_par)
if self.flags['skip_empty_lead']:
if not self.flags['average']['Lin']:
sz['Lin'] -= self._first_ix[self.dim_order.index('Lin')]
if not self.flags['average']['Par']:
sz['Par'] -= self._first_ix[self.dim_order.index('Par')]
for dim in range(len(sz)):
if self.flags['average'][self.dim_order[dim]]:
sz[dim] = 1
return sz
@property
def shape(self):
# self.shape is the more numpy compatible version of self.size by
# returning a tuple
# 'squeeze_singletons': singleton dimensions are removed from shape
# 'squeeze_ave_dims': averaged dims are removed from shape
if self.flags['squeeze_singletons']:
return [sz for sz, name in zip(self.size.item(),
self.size.dtype.names) if self.size[name] > 1]
elif self.flags['squeeze_ave_dims']:
return [sz for sz, name in zip(self.size.item(),
self.size.dtype.names) if not self.flags['average'][name]]
else:
return self.size.item()
def __getitem__(self, index):
# implement array slicing here
# returns numpy.ndarray
self_dims = self.dims
self_ndim = self.ndim
self_shape = self.shape
if not isinstance(index, tuple):
index = (index,) # make sure to pass along tuple
if len(index) > self_ndim:
raise IndexError(
"too many indices for array: array is %d-dimensional, "
"but %d were indexed" % (self_ndim, len(index)))
ellipsis_in_index = False
selection = list()
remove_dim = list()
for key, item in enumerate(index):
if ellipsis_in_index:
key += self_ndim - len(index)
if item is Ellipsis:
if ellipsis_in_index:
raise IndexError(
"an index can only have a single ellipsis ('...')")
ellipsis_in_index = True
# fill selection with slice(None)
for _ in range(self_ndim - len(index) + 1):
selection.append(slice(None))
elif isinstance(item, slice):
if item == slice(None):
selection.append(item)
continue
if item.start is not None and item.start > self_shape[key]:
raise IndexError(
"index %d is out of bounds for axis %d with size %d"
% (item.start, key, self_shape[key]))
else:
ix = item.indices(self_shape[key])
selection.append(range(ix[0], ix[1], ix[2]))
else:
if isinstance(item, int):
item = [item]
remove_dim.append(key)
for k, i in enumerate(item):
if (i < -self_shape[key]) or (i >= self_shape[key]):
raise IndexError("index %d is out of bounds for axis "
"%d with size %d"
% (i, key, self_shape[key]))
# make sure to only keep positive indices
if i < 0:
item[k] = self.shape[key] + i
selection.append(item)
average_cha = self.flags['average']['Cha']
average_col = self.flags['average']['Col']
regrid = self.flags['regrid']
remove_os = self.flags['remove_os']
skip_empty_lead = self.flags['skip_empty_lead']
mdh_dims = [dim for dim in self_dims if dim not in ['Cha', 'Col']]
mdh_ndim = len(mdh_dims)
sLC_sel = [self.key_map[d] for d in mdh_dims]
dims_averaged = [self.flags['average'][dim] for dim in mdh_dims]
if skip_empty_lead:
lpos, ppos = self.dim_order.index('Lin'),\
self.dim_order.index('Par')
sLC_names = [item[0] for item in twixtools.mdh_def.mdhLC]
sLC_lpos, sLC_ppos = sLC_names.index('ushLine'),\
sLC_names.index('ushPartition')
target_sz = list(self_shape)
# to follow the python convention, single indices
# will reduce the output's dimensionality
for key, item in enumerate(selection):
if item != slice(None):
target_sz[key] = len(item)
out = np.zeros(target_sz, dtype='complex64')
# make sure that cha & col dim exist
if self.flags['squeeze_singletons']:
if self.size['Col'] == 1:
out = out[..., np.newaxis]
if self.size['Cha'] == 1:
out = out[..., np.newaxis, :]
elif self.flags['squeeze_ave_dims']:
if average_col:
out = out[..., np.newaxis]
if average_cha:
out = out[..., np.newaxis, :]
# 'vectorize' the output array for now
out = out.reshape([-1, out.shape[-2], out.shape[-1]])
# average counter to scale the data properly later
ave_counter = np.zeros(np.prod(out.shape[:-2]), dtype=np.uint16)
# now that we have our selection, we can read the data
# for this, we simply go through all mdb's and fill them in if selected
# this is not very efficient for large files, but fool-proof
for mdb in self.mdb_list:
sLC = mdb.mdh['sLC'].copy()
# # test early exit (for profiling)
# if sLC['ushRepetition']!=selection[0]:
# continue
if skip_empty_lead:
sLC[sLC_lpos] -= self._first_ix[lpos]
sLC[sLC_ppos] -= self._first_ix[ppos]
counters = sLC[sLC_sel]
# check if we have to read this mdb
do_not_read = False
for key, sel in enumerate(selection):
if sel == slice(None):
# all data selected, no counter check required for this dim
continue
if key >= mdh_ndim:
# skip col & cha
continue
if dims_averaged[key]:
# averaged dims are completely read
continue
if counters[key] not in sel:
do_not_read = True
break
if do_not_read:
# go to next mdb
continue
# read data
data = mdb.data
# average cha if requested
if average_cha:
data = data.mean(-2, keepdims=True)
# average col or apply oversampling removal if requested
if average_col:
data = data.mean(-1, keepdims=True)
else:
if regrid and self.rs_traj is not None\
and not mdb.is_flag_set('SKIP_REGRIDDING'):
data = perform_regrid(
data, self.rs_traj, mdb.mdh["fReadOutOffcentre"])
if remove_os:
data, _ = remove_oversampling(data)
# reflect data if mdh flag is set
if mdb.is_flag_set('REFLECT'):
data = data[..., ::-1]
ix = [int(0)]
for key in range(mdh_ndim):
if dims_averaged[key]:
pass # nothing to add
elif key >= len(selection) or selection[key] == slice(None):
block_sz = np.prod(target_sz[key+1:mdh_ndim])
ix = [i + int(counters[key] * block_sz) for i in ix]
else:
ix_new = list()
block_sz = np.prod(target_sz[key+1:mdh_ndim])
for sel_ix in list_indices(selection[key], counters[key]):
offset = sel_ix * block_sz
for i in ix:
ix_new.append(int(i + offset))
ix = ix_new
# only keep selected channels & columns
if 'Cha' not in self_dims:
# remove channel dim
data = data[0]
elif len(selection) > mdh_ndim:
# select channels
if 'Col' in self_dims:
data = data[selection[-2]]
else:
data = data[selection[-1]]
if 'Col' not in self_dims:
# remove column dim
data = data[..., 0]
elif len(selection) > self_ndim-1:
# select columns
data = data[:, selection[-1]]
out[ix] += data
# increment average counter for ix
ave_counter[ix] += 1
# scale data according to ave_counter:
ave_counter = np.maximum(ave_counter, 1)
out /= ave_counter[..., np.newaxis, np.newaxis]
# to follow the numpy convention,
# single indices will reduce the output's dimensionality
target_sz = [target_sz[key] for key in range(len(target_sz))
if key not in remove_dim]
return out.reshape(target_sz)
def list_indices(seq, item):
locs = [seq.index(item, 0)]
while True:
try:
loc = seq.index(item, 1+locs[-1])
except ValueError:
break
else:
locs.append(loc)
return locs
| 1.828125
| 2
|
recohut/datasets/retailrocket.py
|
sparsh-ai/recohut
| 0
|
12780589
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/datasets/datasets.retailrocket.ipynb (unless otherwise specified).
__all__ = ['RetailRocketDataset', 'RetailRocketDatasetv2']
# Cell
from typing import List, Optional, Callable, Union, Any, Tuple
import os
import os.path as osp
from collections.abc import Sequence
import sys
import numpy as np
import pandas as pd
from datetime import timezone, datetime, timedelta
import time
from ..utils.common_utils import download_url, extract_zip, makedirs
from .bases.common import Dataset
from .bases.session_graph import SessionGraphDataset
# Cell
class RetailRocketDataset(SessionGraphDataset):
train_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/train.txt"
test_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/test.txt"
all_train_seq_url = "https://github.com/RecoHut-Datasets/retail_rocket/raw/v1/all_train_seq.txt"
def __init__(self, root, shuffle=False, n_node=40727, is_train=True):
self.n_node = n_node
self.shuffle = shuffle
self.is_train = is_train
super().__init__(root, shuffle, n_node)
@property
def raw_file_names(self) -> str:
if self.is_train:
return ['train.txt', 'all_train_seq.txt']
return ['test.txt', 'all_train_seq.txt']
def download(self):
download_url(self.all_train_seq_url, self.raw_dir)
if self.is_train:
download_url(self.train_url, self.raw_dir)
else:
download_url(self.test_url, self.raw_dir)
# Internal Cell
def to_list(value: Any) -> Sequence:
if isinstance(value, Sequence) and not isinstance(value, str):
return value
else:
return [value]
def files_exist(files: List[str]) -> bool:
# NOTE: We return `False` in case `files` is empty, leading to a
# re-processing of files on every instantiation.
return len(files) != 0 and all([osp.exists(f) for f in files])
# Cell
class RetailRocketDatasetv2(Dataset):
r"""Load and process RetailRocket dataset.
Args:
root (string): Root directory where the dataset should be saved.
process_method (string):
last: last day => test set
last_min_date: last day => test set, but from a minimal date onwards
days_test: last N days => test set
slice: create multiple train-test-combinations with a sliding window approach
min_date (string, optional): Minimum date
session_length (int, optional): Session time length :default = 30 * 60 #30 minutes
min_session_length (int, optional): Minimum number of items for a session to be valid
min_item_support (int, optional): Minimum number of interactions for an item to be valid
num_slices (int, optional): Offset in days from the first date in the data set
days_offset (int, optional): Number of days the training start date is shifted after creating one slice
days_shift (int, optional): Days shift
days_train (int, optional): Days in train set in each slice
days_test (int, optional): Days in test set in each slice
"""
url = 'https://github.com/RecoHut-Datasets/retail_rocket/raw/v2/retailrocket.zip'
def __init__(self, root, process_method, min_date='2015-09-02',
session_length=30*60, min_session_length=2, min_item_support=5,
num_slices=5, days_offset=0, days_shift=27, days_train=25, days_test=2):
super().__init__(root)
self.process_method = process_method
self.min_date = min_date
self.session_length = session_length
self.min_session_length = min_session_length
self.min_item_support = min_item_support
self.num_slices = num_slices
self.days_offset = days_offset
self.days_shift = days_shift
self.days_train = days_train
self.days_test = days_test
self.data = None
self.cart = None
self._process()
@property
def raw_file_names(self) -> str:
return 'events.csv'
@property
def processed_file_names(self) -> str:
return 'data.pkl'
def download(self):
path = download_url(self.url, self.raw_dir)
extract_zip(path, self.raw_dir)
from shutil import move, rmtree
move(osp.join(self.raw_dir, 'retailrocket', 'events.csv'),
osp.join(self.raw_dir, 'events.csv'))
rmtree(osp.join(self.raw_dir, 'retailrocket'))
os.unlink(path)
def load(self):
#load csv
data = pd.read_csv(osp.join(self.raw_dir,self.raw_file_names), sep=',',
header=0, usecols=[0,1,2,3],
dtype={0:np.int64, 1:np.int32, 2:str, 3:np.int32})
#specify header names
data.columns = ['Time','UserId','Type','ItemId']
data['Time'] = (data.Time / 1000).astype(int)
data.sort_values(['UserId','Time'], ascending=True, inplace=True)
#sessionize
data['TimeTmp'] = pd.to_datetime(data.Time, unit='s')
data.sort_values(['UserId','TimeTmp'], ascending=True, inplace=True)
data['TimeShift'] = data['TimeTmp'].shift(1)
data['TimeDiff'] = (data['TimeTmp'] - data['TimeShift']).dt.total_seconds().abs()
data['SessionIdTmp'] = (data['TimeDiff'] > self.session_length).astype(int)
data['SessionId'] = data['SessionIdTmp'].cumsum( skipna=False)
del data['SessionIdTmp'], data['TimeShift'], data['TimeDiff']
data.sort_values(['SessionId','Time'], ascending=True, inplace=True)
cart = data[data.Type == 'addtocart']
data = data[data.Type == 'view']
del data['Type']
# output
print(data.Time.min())
print(data.Time.max())
data_start = datetime.fromtimestamp( data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp( data.Time.max(), timezone.utc)
del data['TimeTmp']
print('Loaded data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
self.cart = cart
def filter_data(self):
data = self.data
#filter session length
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>1].index)]
#filter item support
item_supports = data.groupby('ItemId').size()
data = data[np.in1d(data.ItemId, item_supports[item_supports>= self.min_item_support].index)]
#filter session length
session_lengths = data.groupby('SessionId').size()
data = data[np.in1d(data.SessionId, session_lengths[session_lengths>= self.min_session_length].index)]
#output
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
def filter_min_date(self):
data = self.data
min_datetime = datetime.strptime(self.min_date + ' 00:00:00', '%Y-%m-%d %H:%M:%S')
#filter
session_max_times = data.groupby('SessionId').Time.max()
session_keep = session_max_times[session_max_times > min_datetime.timestamp()].index
data = data[np.in1d(data.SessionId, session_keep)]
#output
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Filtered data set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}\n\n'.
format(len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.date().isoformat(), data_end.date().isoformat()))
self.data = data
def split_data_org(self):
data = self.data
tmax = data.Time.max()
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_test = session_max_times[session_max_times >= tmax-86400].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(osp.join(self.processed_dir,'events_train_full.txt'), sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(osp.join(self.processed_dir,'events_test.txt'), sep='\t', index=False)
tmax = train.Time.max()
session_max_times = train.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < tmax-86400].index
session_valid = session_max_times[session_max_times >= tmax-86400].index
train_tr = train[np.in1d(train.SessionId, session_train)]
valid = train[np.in1d(train.SessionId, session_valid)]
valid = valid[np.in1d(valid.ItemId, train_tr.ItemId)]
tslength = valid.groupby('SessionId').size()
valid = valid[np.in1d(valid.SessionId, tslength[tslength>=2].index)]
print('Train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train_tr), train_tr.SessionId.nunique(), train_tr.ItemId.nunique()))
train_tr.to_csv(osp.join(self.processed_dir,'events_train_tr.txt'), sep='\t', index=False)
print('Validation set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(valid), valid.SessionId.nunique(), valid.ItemId.nunique()))
valid.to_csv(osp.join(self.processed_dir,'events_train_valid.txt'), sep='\t', index=False)
def split_data(self):
data = self.data
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
test_from = data_end - timedelta(self.days_test)
session_max_times = data.groupby('SessionId').Time.max()
session_train = session_max_times[session_max_times < test_from.timestamp()].index
session_test = session_max_times[session_max_times >= test_from.timestamp()].index
train = data[np.in1d(data.SessionId, session_train)]
test = data[np.in1d(data.SessionId, session_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Full train set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(train), train.SessionId.nunique(), train.ItemId.nunique()))
train.to_csv(osp.join(self.processed_dir,'events_train_full.txt'), sep='\t', index=False)
print('Test set\n\tEvents: {}\n\tSessions: {}\n\tItems: {}'.format(len(test), test.SessionId.nunique(), test.ItemId.nunique()))
test.to_csv(osp.join(self.processed_dir,'events_test.txt'), sep='\t', index=False)
def slice_data(self):
for slice_id in range(0, self.num_slices):
self.split_data_slice(slice_id, self.days_offset+(slice_id*self.days_shift))
def split_data_slice(self, slice_id, days_offset):
data = self.data
data_start = datetime.fromtimestamp(data.Time.min(), timezone.utc)
data_end = datetime.fromtimestamp(data.Time.max(), timezone.utc)
print('Full data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format(slice_id, len(data), data.SessionId.nunique(), data.ItemId.nunique(), data_start.isoformat(), data_end.isoformat()))
start = datetime.fromtimestamp(data.Time.min(), timezone.utc ) + timedelta(days_offset)
middle = start + timedelta(self.days_train)
end = middle + timedelta(self.days_test)
#prefilter the timespan
session_max_times = data.groupby('SessionId').Time.max()
greater_start = session_max_times[session_max_times >= start.timestamp()].index
lower_end = session_max_times[session_max_times <= end.timestamp()].index
data_filtered = data[np.in1d(data.SessionId, greater_start.intersection(lower_end))]
print('Slice data set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} / {}'.
format( slice_id, len(data_filtered), data_filtered.SessionId.nunique(), data_filtered.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat(), end.date().isoformat() ) )
#split to train and test
session_max_times = data_filtered.groupby('SessionId').Time.max()
sessions_train = session_max_times[session_max_times < middle.timestamp()].index
sessions_test = session_max_times[session_max_times >= middle.timestamp()].index
train = data[np.in1d(data.SessionId, sessions_train)]
print('Train set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {}'.
format( slice_id, len(train), train.SessionId.nunique(), train.ItemId.nunique(), start.date().isoformat(), middle.date().isoformat() ) )
train.to_csv(osp.join(self.processed_dir,'events_train_full.'+str(slice_id)+'.txt'), sep='\t', index=False)
test = data[np.in1d(data.SessionId, sessions_test)]
test = test[np.in1d(test.ItemId, train.ItemId)]
tslength = test.groupby('SessionId').size()
test = test[np.in1d(test.SessionId, tslength[tslength>=2].index)]
print('Test set {}\n\tEvents: {}\n\tSessions: {}\n\tItems: {}\n\tSpan: {} / {} \n\n'.
format( slice_id, len(test), test.SessionId.nunique(), test.ItemId.nunique(), middle.date().isoformat(), end.date().isoformat() ) )
test.to_csv(osp.join(self.processed_dir,'events_test.'+str(slice_id)+'.txt'), sep='\t', index=False)
def store_buys(self):
self.cart.to_csv(osp.join(self.processed_dir,'events_buys.txt'), sep='\t', index=False)
def process(self):
self.load()
self.filter_data()
if self.process_method == 'last':
self.split_data_org()
elif self.process_method == 'last_min_date':
self.filter_min_date()
self.split_data_org()
elif self.process_method == 'days_test':
self.split_data()
elif self.process_method == 'slice':
self.slice_data()
self.store_buys()
| 2.3125
| 2
|
omni_reports/facebook_reports/base.py
|
paretogroup/omni-reports
| 24
|
12780590
|
<filename>omni_reports/facebook_reports/base.py
import json
from typing import Dict, List
from aiohttp import ClientSession
from omni_reports.client import ReportClient
from omni_reports.client.errors import ReportResponseError
from omni_reports.client.models import ReportPredicate, ReportDefinitionDateRange
from omni_reports.client.types import ReportType
from omni_reports.facebook_reports.fields import FacebookReportField, FACEBOOK_OPERATORS_MAP
from omni_reports.facebook_reports.settings import (
FACEBOOK_BASE_API, FACEBOOK_VERSION_API,
REPORT_ENDPOINT_PATTERN
)
def get_base_url():
return f"{FACEBOOK_BASE_API}/{FACEBOOK_VERSION_API}"
class FacebookAdsReportType(ReportType):
REPORT_TYPE = None
FIELD_BEGIN_SEPARATOR = "{"
FIELD_END_SEPARATOR = "}"
async def resolve(self, fields, predicates, report_definition, context, client: ReportClient):
if not self.REPORT_TYPE:
raise AttributeError('The attribute "facebook_report_type" is required')
if client.session is None:
raise AttributeError('Client session cannot be null.')
session = client.session
date_range: ReportDefinitionDateRange = report_definition.selector.date_range
self._validate_predicates(predicates, fields, date_range)
formatted_fields = self._get_fields(fields)
predicates = self._get_predicates(predicates)
url = self._build_request_url(context, formatted_fields, predicates, date_range)
records = await request(session, url)
return self._resolve_fields_on_records(records, fields)
def _build_request_url(self, context, fields, predicates, date_range):
token = context.get("FACEBOOK_TOKEN")
network_id = context.get("FACEBOOK_NETWORK_ID")
endpoint_pattern = REPORT_ENDPOINT_PATTERN[self.REPORT_TYPE] or ""
endpoint = self._compose_endpoint_with_predicates(endpoint_pattern, {
"network_id": network_id
})
date_range = self._compose_date_range(date_range)
predicates = self._compose_predicates(predicates)
return f"{get_base_url()}/{endpoint}?fields={fields}" \
f"&access_token={token}&{date_range}" \
f"&filtering={predicates}"
@staticmethod
def _validate_predicates(
predicates: Dict[str, ReportPredicate],
fields: dict,
date_range: ReportDefinitionDateRange
):
for predicate in predicates.values():
if predicate.field.target_group_name:
raise AttributeError(f"Fields with target group is not accepted yet. "
f"Invalid field: {predicate.field.target_group_name}"
f" - {predicate.field.target_name}")
if date_range:
has_date_field = bool([
field_name
for field_name, _ in fields.items()
if field_name == 'date'
])
if date_range.time_increment != 1 and has_date_field:
raise AttributeError(
"You can only date field with time_increment equals to 1. For"
"other time_increment values use date_start and date_end fields."
)
@staticmethod
def _get_predicates(predicates: Dict[str, ReportPredicate]) -> List[dict]:
formatted_predicates = []
for predicate in predicates.values():
operator = FACEBOOK_OPERATORS_MAP[predicate.operator]
formatted_predicates.append({
"field": predicate.field.target_name,
"operator": operator,
"value": predicate.values[0] if operator == "EQUAL" else predicate.values
})
return formatted_predicates
@staticmethod
def _resolve_fields_on_records(records, fields) -> List[dict]:
if not isinstance(records, dict):
raise ValueError(f'Invalid records response for {records}')
if 'error' in records:
raise ReportResponseError(
f"\n\nFacebook Api response error:\n{records.get('error')}"
)
only_records = records.get("data") or []
only_records = only_records if isinstance(only_records, list) else list(only_records)
updated_records = []
for record in only_records:
new_record = {}
for field_name, field in fields.items():
try:
new_record[field_name] = record[field.target_name]
except KeyError as error:
raise KeyError(f"{error}\nField '{field_name}' not mapped on record {record}.")
updated_records.append(new_record)
return updated_records
@staticmethod
def _compose_predicates(predicates: dict) -> str:
return json.dumps(predicates) if predicates else ""
@staticmethod
def _compose_date_range(date_range: ReportDefinitionDateRange) -> str:
if not date_range:
return ""
start_range = "{start_object}'since':'{since}'".format(
start_object="{",
since=date_range.start,
)
end_range = "'until': '{until}'{end_object}".format(
until=date_range.end,
end_object="}"
)
time_range = f"time_range={start_range},{end_range}"
time_increment = f"time_increment={date_range.time_increment}"
return f"{time_range}&{time_increment}"
def _get_fields(self, fields: Dict[str, FacebookReportField]):
group_fields = {
field.target_group_name
for _, field in fields.items()
if field.target_group_name
}
grouped_fields = {}
for group_field in group_fields:
filtered_fields = (
field.target_name
for _, field in fields.items()
if group_field == field.target_group_name
)
grouped_fields[group_field] = \
group_field \
+ self.FIELD_BEGIN_SEPARATOR \
+ ",".join(filtered_fields) \
+ self.FIELD_END_SEPARATOR
non_object_fields = [
field.target_name
for _, field in fields.items()
if not field.target_group_name
]
all_fields = ",".join([*non_object_fields, *grouped_fields.values()])
return all_fields
@staticmethod
def _compose_endpoint_with_predicates(pattern: str, predicates: dict):
return pattern.format(**predicates)
@staticmethod
def _get_predicates_not_used_on_endpoint_pattern(pattern: str, predicates: dict):
return {
predicate_field: predicates[predicate_field]
for predicate_field in predicates.keys()
if predicate_field not in pattern
}
async def request(session: ClientSession, url: str):
async with session.get(url) as response:
return await response.json()
| 2.046875
| 2
|
PythonExercicios/ex009.py
|
marcoantonio97/Curso-de-Python
| 0
|
12780591
|
<reponame>marcoantonio97/Curso-de-Python<gh_stars>0
n = int(input('Digite um número: '))
print('{}x1={}'.format(n, (n*1)))
print('{}x2={}'.format(n, (n*2)))
print('{}x3={}'.format(n, (n*3)))
print('{}x4={}'.format(n, (n*4)))
print('{}x5={}'.format(n, (n*5)))
print('{}x6={}'.format(n, (n*6)))
print('{}x7={}'.format(n, (n*7)))
print('{}x8={}'.format(n, (n*8)))
print('{}x9={}'.format(n, (n*9)))
print('{}x10={}'.format(n, (n*10)))
| 4.03125
| 4
|
Python/_03_Strings/_13_The_Minion_Game/solution.py
|
avtomato/HackerRank
| 0
|
12780592
|
def minion_game(string):
# your code goes here
a = s.strip().lower()
v = sum([len(a) - i for i,c in enumerate(a) if c in 'aeiou'])
c = sum([len(a) - i for i,c in enumerate(a) if c not in 'aeiou'])
if (v == c):
print('Draw')
else:
print('Stuart {}'.format(c)) if c > v else print('Kevin {}'.format(v))
if __name__ == '__main__':
s = input()
minion_game(s)
| 3.53125
| 4
|
demo/onlyuserrole/demo/views.py
|
tangdyy/onlyuserclient
| 2
|
12780593
|
<reponame>tangdyy/onlyuserclient<gh_stars>1-10
from onlyuserclient.viewsets import RoleModelViewSet
from .serializers import DefaultDemoSerializer, CompleteDemoSerializer, HideDemoSerializer
from .models import RoleDemo
class RoleViewSet(RoleModelViewSet):
queryset = RoleDemo.objects.all()
user_relate_field = 'owner'
org_relate_field = 'organization'
serializer_classs = {
'default': DefaultDemoSerializer,
'complete_view': CompleteDemoSerializer,
'part_view': HideDemoSerializer
}
| 1.914063
| 2
|
CS362_TESTS/jacobsonUnitTest2.py
|
IsaacMarquez907/CS361_GROUP_PROJECT
| 1
|
12780594
|
<reponame>IsaacMarquez907/CS361_GROUP_PROJECT
#!/usr/bin/env python
# coding: utf-8
# import necessary libraries
import os
import sys
import unittest
#allow the script to be run directly
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#import function to test
from youtube_dl.utils import clean_html
#Unit test for the html clean function. Funtion should remove the html formatting and just leave the plain text
class test_util_html_clean(unittest.TestCase):
#test base case, no html
def test_clean_html_base (self):
self.assertEqual(clean_html('This should return the exact same text'), 'This should return the exact same text')
#short test
def test_clean_html_basic (self):
self.assertEqual(clean_html('<HTML><HEAD> this is a test</HEAD><HTML>'), 'this is a test')
#medium test
def test_clean_html_moderate (self):
self.assertEqual(clean_html('<HTML><HEAD><TITLE> this is a test </TITLE></HEAD><BR><P>here is a line of text</HTML>'), 'this is a test here is a line of text')
#test to see how links are handled
def test_clean_html_links (self):
self.assertAlmostEqual(clean_html('<a href="http://myspace.com"> myspace </a>'), 'myspace')
#Advanced test with a variety of tags
def test_clean_html_advance (self):
self.assertEqual(clean_html('<HTML><HEAD><TITLE>TEST <TITLE></HEAD><BODY BGCOLOR="EEEEEE"><H1>this is </H1><H2>a test </H2><BR><P><BODY>checkout myspace.com<table border="1" cellpadding="10" width="80%"><img src="img_myspace.jpg" alt="myspace.com"></BODY></HTML>'), 'TEST this is a test checkout myspace.com')
if __name__ == '__main__':
unittest.main()
| 3.21875
| 3
|
STEP 2: INPUT ANALYSIS/rd_parser.py
|
amalshehu/super-slang
| 0
|
12780595
|
<reponame>amalshehu/super-slang
from abstract_syntax_tree import Operator
from abstract_syntax_tree import Expression
from abstract_syntax_tree import NumericConstant
from abstract_syntax_tree import BinaryExpression
from abstract_syntax_tree import UnaryExpression
from lexer import Lexer
from lexer import Token
| 1.09375
| 1
|
private_training/src/models.py
|
xiyueyiwan/private-ml-for-health
| 28
|
12780596
|
<gh_stars>10-100
import torch
from torch import nn
import torch.nn.functional as F
class CNNMnistRelu(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = F.relu(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = F.relu(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = F.relu(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
def name(self):
return "SampleConvNet"
class CNNMnistTanh(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = torch.tanh(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = torch.tanh(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = torch.tanh(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
def name(self):
return "SampleConvNet"
class CNNFashion_MnistRelu(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = F.relu(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = F.relu(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = F.relu(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
def name(self):
return "SampleConvNet"
class CNNFashion_MnistTanh(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)
self.conv2 = nn.Conv2d(16, 32, 4, 2)
self.fc1 = nn.Linear(32 * 4 * 4, 32)
self.fc2 = nn.Linear(32, 10)
def forward(self, x):
# x of shape [B, 1, 28, 28]
x = torch.tanh(self.conv1(x)) # -> [B, 16, 14, 14]
x = F.max_pool2d(x, 2, 1) # -> [B, 16, 13, 13]
x = torch.tanh(self.conv2(x)) # -> [B, 32, 5, 5]
x = F.max_pool2d(x, 2, 1) # -> [B, 32, 4, 4]
x = x.view(-1, 32 * 4 * 4) # -> [B, 512]
x = torch.tanh(self.fc1(x)) # -> [B, 32]
x = self.fc2(x) # -> [B, 10]
return x
def name(self):
return "SampleConvNet"
class CNNCifar10Relu(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, 5)
self.fc1 = nn.Linear(32 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 32 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
class CNNCifar10Tanh(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 32, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 32, 5)
self.fc1 = nn.Linear(32 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(torch.tanh(self.conv1(x)))
x = self.pool(torch.tanh(self.conv2(x)))
x = x.view(-1, 32 * 5 * 5)
x = torch.tanh(self.fc1(x))
x = torch.tanh(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
| 2.84375
| 3
|
intro/part01-25_calculator/test/test_calculator.py
|
Hannah-Abi/python-pro-21
| 0
|
12780597
|
import unittest
from unittest.mock import patch
from tmc import points
from tmc.utils import load_module, reload_module, get_stdout, sanitize
exercise = 'src.calculator'
def parse_result(output):
if len(output) > 30:
return output[:30] + "..."
else:
return output
#add, multiply,subtract(-)
@points('1.calculator')
class CalculatorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
with patch('builtins.input', return_value = '0'):
cls.module = load_module(exercise, 'en')
def test_add1(self):
with patch('builtins.input', side_effect = [ '1', '2', 'add', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '1 + 2 = 3'
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 1, 2, add")
self.assertTrue(expect in output, f"With inputs 1, 2, add your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_add2(self):
with patch('builtins.input', side_effect = [ '75', '23', 'add', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 75, 23, add")
expect = '75 + 23 = 98'
self.assertTrue(expect in output, f"With inputs 75, 23, add your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_subtract1(self):
with patch('builtins.input', side_effect = [ '2', '1', 'subtract', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 2, 1, subtract")
expect = '2 - 1 = 1'
self.assertTrue(expect in output, f"With inputs 2, 1, subtract your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_subtract2(self):
with patch('builtins.input', side_effect = [ '13', '34', 'subtract', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '13 - 34 = -21'
self.assertTrue(expect in output, f"With inputs 13, 34, subtract your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_multiply1(self):
with patch('builtins.input', side_effect = [ '2', '3', 'multiply', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '2 * 3 = 6'
self.assertTrue(len(output)>0, "Your program does not print out anything with inputs 2, 3, multiply")
self.assertTrue(expect in output, f"With inputs 2, 3, multiply your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_multiply2(self):
with patch('builtins.input', side_effect = [ '27', '-3', 'multiply', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
expect = '27 * -3 = -81'
self.assertTrue(expect in output, f"With inputs 27, -3, multiply your program should have printed out\n{expect}\nYour program printed out:\n{output}")
def test_xcrap(self):
with patch('builtins.input', side_effect = [ '27', '-3', 'quotient', AssertionError("Input is asked too many times.") ]) as prompt:
reload_module(self.module)
output = get_stdout()
self.assertTrue(len(output) == 0, f"With inputs 27, -3, quotient your program should not print out anything\nYour program printed out:\n{output}")
if __name__ == '__main__':
unittest.main()
| 3.171875
| 3
|
solarpv/deployment/pipeline.py
|
shivareddyiirs/solar-pv-global-inventory
| 64
|
12780598
|
"""
Run the pipeline
"""
# built-in
import os, sys, logging, datetime, json
# packages
import yaml
import descarteslabs as dl
import shapely.geometry as geometry
# lib
from deployment.cloud_dl_functions import DL_CLOUD_FUNCTIONS
# conf
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def reduce2mp(polys, verbose=False):
### generate a big multipolygon -> takes forever. Make small mps of 100 each then, combine.
big_mps = []
big_mp = polys[0]
mod_count=0
for ii in range(1,len(polys)):
if ii%100==0:
if verbose:
print ('mod count',ii)
mod_count+=1
big_mps.append(big_mp)
big_mp=polys[ii]
else:
#print (mod_count,ii)
big_mp=big_mp.union(polys[ii])
big_mps.append(big_mp)
if verbose:
print ('n big mps',len(big_mps))
### now reduce list of big_mps
big_mp=big_mps[0]
for ii in range(1,len(big_mps)):
if verbose:
print ('big_mp: ',ii)
big_mp = big_mp.union(big_mps[ii])
return big_mp
def flatten_polys(polys):
polys_flattened=[]
for pp in polys:
if pp.type=='MultiPolygon':
polys_flattened+=list(pp)
elif pp.type=='Polygon':
polys_flattened+=[pp]
else:
print ('not poly',pp.type)
return polys_flattened
def get_shp(shp_str):
"""
Return a shapely geometry in WGS84 lon/lat
input: shp_str - a string corresponding to an iso-3166-1 or -2 administrative area for admin-level 1 (countries) and -2 (states/provinces) respectively
"""
if len(shp_str.split('-'))>1:
load_fts = json.load(open(os.path.join(os.getcwd(),'data','ne_10m_admin_1_states_provinces.geojson'),'r'))
select_fts = [ft for ft in load_fts['features'] if ft['properties']['iso_3166_2']==shp_str]
else:
load_fts = json.load(open(os.path.join(os.getcwd(),'data','ne_10m_admin_0_countries.geojson'),'r'))
select_fts = [ft for ft in load_fts['features'] if ft['properties']['ISO_A2']==shp_str]
all_shps = [geometry.shape(ft['geometry']) for ft in select_fts]
return reduce2mp(flatten_polys(all_shps))
def shp_exclusions(shp, shp_str):
pop_json = json.load(open(os.path.join(os.getcwd(),'data','popshp_gt1_d7k.geojson'),'r')) # main population dilation shape
dnr_json = json.load(open(os.path.join(os.getcwd(),'data','do_not_run.geojson'),'r')) # removes some census-based null areas in Canada and Australia
pop_shps = []
for ii_f, ft in enumerate(pop_json['features']):
#print (ii_f)
try:
pop_shps.append(geometry.shape(ft['geometry']))
except:
pass
dnr_shps = [geometry.shape(ft['geometry']) for ft in dnr_json['features']]
pop_unions = []
for ii_s,pop_shp in enumerate(pop_shps):
if not pop_shp.intersection(shp).is_empty:
pop_unions.append(pop_shp.intersection(shp))
deployment_shp = reduce2mp(pop_unions)
for shp in dnr_shps:
deployment_shp = deployment_shp.difference(shp)
ak_poly = geometry.Polygon([[-169,0],[-169,60],[-141,60],[-141,0]])
clip_poly = geometry.Polygon([[-180,0],[-180,60],[179,60],[179,0]])
if shp_str =='US-AK':
deployment_shp = deployment_shp.intersection(ak_poly).buffer(0)
elif shp_str[0:2] in ['CA','RU']:
deployment_shp = deployment_shp.intersection(clip_poly).buffer(0)
if deployment_shp.is_empty:
logging.error('empty geom!')
return None
return deployment_shp
class Pipeline:
def __init__(self):
self.raster_client = dl.Raster()
self.catalog_client = dl.Catalog()
self.tasks_client = dl.Tasks()
self.fn_config = yaml.safe_load(open(os.path.join(os.getcwd(),'cloud_functions.yaml'),'r'))
self.prod_config = yaml.safe_load(open(os.path.join(os.getcwd(),'cloud_products.yaml'),'r'))
def run(self,which,shp_str):
if type(which)==list:
# create cloud functions from a list
for fn_key in which:
self._run_cloud_function(fn_key, shp_str)
elif type(which)==str:
# create a single cloud function
self._run_cloud_function(which, shp_str)
elif not which:
# create cloud functions for each cloud function
for fn_key, fn_conf in self.fn_config.items():
self._run_cloud_function(fn_key, shp_str)
def _run_cloud_function(self,fn_key, shp_str):
shp = get_shp(shp_str)
shp = shp_exclusions(shp, shp_str)
async_function = self.tasks_client.get_function(name=self.fn_config[fn_key]['name'])
logging.info(f"Running {self.fn_config[fn_key]['name']} for {shp_str}")
if fn_key=='S2Infer1':
sdate='2019-09-30'
edate='2019-12-31'
tiles = self.raster_client.dltiles_from_shape(
self.fn_config[fn_key]['tiledict']['resolution'],
self.fn_config[fn_key]['tiledict']['tilesize'],
self.fn_config[fn_key]['tiledict']['pad'],
geometry.mapping(shp))
logging.info(f"Running {len(tiles['features'])} tiles and storing to raster {self.prod_config['S2-R1-Primary']['cloud_id']} and vector {self.prod_config['S2-V1-Primary']['cloud_id']}")
done_scenes, null = dl.scenes.search(aoi=geometry.mapping(shp), products=[self.prod_config['S2-R1-Primary']['cloud_id']])
done_keys = [ft['key'].split(':')[3].replace('_',':') for ft in done_scenes]
run_tiles = [t for t in tiles['features'] if t.properties.key not in done_keys]
logging.info(f"Found {len(done_keys)} previously-run tiles, running remaining {len(run_tiles)} tiles")
for ii_t, tile in enumerate(run_tiles):
async_function(
dltile=tile,
src_product_id='sentinel-2:L1C',
dest_product_id=self.prod_config['S2-R1-Primary']['cloud_id'],
fc_id=self.prod_config['S2-V1-Primary']['cloud_id'],
sdate=sdate,
edate=edate,
run_ii=ii_t)
if ii_t % 10 ==0:
logging.info(f'Running tile {ii_t} of {len(run_tiles)}')
elif fn_key=='S2RNN1':
tiles = self.raster_client.dltiles_from_shape(
self.fn_config[fn_key]['tiledict']['resolution'],
self.fn_config[fn_key]['tiledict']['tilesize'],
self.fn_config[fn_key]['tiledict']['pad'],
geometry.mapping(shp))
logging.info(f"Running {len(tiles['features'])} tiles and storing to raster {self.prod_config['S2-R2-Secondary']['cloud_id']}")
logging.info(f"Input vector:{self.prod_config['S2-V1-Primary']['cloud_id']}, output vector: {self.prod_config['S2-V2-Secondary']['cloud_id']}")
done_scenes, null = dl.scenes.search(aoi=geometry.mapping(shp), products=[self.prod_config['S2-R2-Secondary']['cloud_id']])
done_keys = [ft['key'].split(':')[3].replace('_',':') for ft in done_scenes]
run_tiles = [t for t in tiles['features'] if t.properties.key not in done_keys]
logging.info(f"Found {len(done_keys)} previously-run tiles, running remaining {len(run_tiles)} tiles")
for ii_t, tile in enumerate(run_tiles):
async_function(
dltile=tile,
src_vector_id=self.prod_config['S2-V1-Primary']['cloud_id'],
dest_vector_id=self.prod_config['S2-V2-Secondary']['cloud_id'],
dest_product_id=self.prod_config['S2-R2-Secondary']['cloud_id'],
push_rast = True
)
if ii_t % 10 ==0:
logging.info(f'Running tile {ii_t} of {len(run_tiles)}')
elif fn_key=='S2Infer2':
THRESHOLD=0.5
fc_src = dl_local.vectors.FeatureCollection(self.prod_config['S2-V2-Secondary']['cloud_id'])
fc_dest = dl_local.vectors.FeatureCollection(self.prod_config['S2-V3-Deepstack']['cloud_id'])
logging.info(f"Gathering features passing RNN-1 with threshold {THRESHOLD} and running them through the full imagery stack.")
all_deep_fts = [f for f in fc_dest.filter(shp).features()]
logging.info(f"Features already run: {len(all_deep_fts)}")
deep_ft_ids = [f.properties.primary_id for f in all_deep_fts]
THRESHOLD = 0.5
sec_fts = [f for f in fc_src.filter(shp).filter(properties=(dl_p_local.prediction >=THRESHOLD)).features()]
logging.info(f'Features in geography meeting threshold {THRESHOLD}: {len(sec_fts)}')
deploy_fts = [f for f in sec_fts if f.properties.primary_id not in deep_ft_ids]
logging.info(f'Features in geography to deploy: {len(deploy_fts)}')
for ii_f, f in enumerate(deploy_fts):
# make a lowpass area filter
f_area = area(json.dumps(geometry.mapping(f.geometry)).replace('(','[').replace(')',']'))
if f_area >=80:
try:
async_function(
storage_key=None,
dl_ft=json.dumps(f.geojson),
src_product_id='sentinel-2:L1C',
dest_fc_id=self.prod_config['S2-V3-Deepstack']['cloud_id'],
storage_flag=False
)
logging.info(f'Doing {ii_f}, p: {ii_f/len(deploy_fts)}, area:{f_area}')
except Exception as e:
logging.error(e)
storage_key = 'TMP_FT_'+f.properties['primary_id']
storage_local.set(storage_key,json.dumps(f.geojson),storage_type='data')
async_function(
storage_key=storage_key,
dl_ft=None,
src_product_id='sentinel-2:L1C',
dest_fc_id=self.prod_config['S2-V3-Deepstack']['cloud_id'],
storage_flag=True
)
logging.info(f'Doing {ii_f} via storage, key: {storage_key}, p: {ii_f/len(deploy_fts)}, area:{f_area}')
elif fn_key=='SPOTVectoriser':
# get SPOT scenes
SPOT_scenes = []
if shp.type=='Polygon':
new_scenes, null = [f for f in dl.scenes.search(aoi=geometry.mapping(shp), products=[self.prod_config['SPOT-R1-Primary']['cloud_id']])]
SPOT_scenes += new_scenes
else:
for subshp in list(shp):
new_scenes, null = [f for f in dl.scenes.search(aoi=geometry.mapping(subshp), products=[self.prod_config['SPOT-R1-Primary']['cloud_id']])]
SPOT_scenes += new_scenes
logging.info(f"Retrieved {len(SPOT_scenes)} SPOT scenes for the geography")
scenes_polys = [geometry.shape(s['geometry']) for s in SPOT_scenes]
if len(scenes_polys)<1:
logging.error('No scenes!')
exit()
scenes_mp = reduce2mp(scenes_polys)
tiles = self.raster_client.dltiles_from_shape(
self.fn_config[fn_key]['tiledict']['resolution'],
self.fn_config[fn_key]['tiledict']['tilesize'],
self.fn_config[fn_key]['tiledict']['pad'],
geometry.mapping(scenes_mp))
logging.info(f"Retrieved {len(tiles['features'])} for processing.")
for tile in tiles['features']:
async_function(
dltile=tile,
src_product_id=self.prod_config['SPOT-R1-Primary']['cloud_id'],
band_names=[self.prod_config['SPOT-R1-Primary']['other']['bands'][0]['name']],
scales=[self.prod_config['SPOT-R1-Primary']['other']['bands'][0]['data_range']],
dest_fc_id=self.prod_config['SPOT-V1-Vecotrised']['cloud_id'],
shp_str=shp_str)
if __name__ =="__main__":
if __name__ == "__main__":
pl = Pipeline()
parser = argparse.ArgumentParser()
parser.add_argument("--stage", help="specify the pipeline stage, one of ['S2Infer1','S2RNN1','S2Infer2','SPOTVectoriser']", type=str)
parser.add_argument("--geography", help="specify a country geography with the iso-3166-1 2-letter code", type=str)
args = parser.parse_args()
if args.stage and args.geography:
print (args.stage, args.geography)
pl.run(args.state,args.geography)
| 2.375
| 2
|
util/misc.py
|
AlonFischer/SpatialDatabaseBench
| 1
|
12780599
|
<filename>util/misc.py
import decimal
def convert_decimals_to_ints_in_tuples(data):
"""data is an array of tuples"""
modified_data = []
for t in data:
new_t = t
for idx in range(len(new_t)):
if isinstance(t[idx], decimal.Decimal):
new_t = new_t[0:idx] + (int(new_t[idx]),) + new_t[idx+1:]
modified_data.append(new_t)
return modified_data
def convert_none_to_null_in_tuples(data):
"""data is an array of tuples"""
modified_data = []
for t in data:
new_t = t
for idx in range(len(new_t)):
if t[idx] == None:
new_t = new_t[0:idx] + ("NULL",) + new_t[idx+1:]
modified_data.append(new_t)
return modified_data
def tuple_to_str(tup):
output = "("
for val in tup:
if val == None:
output += "NULL, "
elif isinstance(val, str):
# escaped_val = val.replace("'", "\\'")
escaped_val = val.replace("'", "")
output += f"'{str(escaped_val)}', "
else:
output += f"{str(val)}, "
output = output[:-2] + ")"
return output
| 3.78125
| 4
|
lib/bridgedb/test/test_schedule.py
|
pagea/bridgedb
| 0
|
12780600
|
<reponame>pagea/bridgedb<filename>lib/bridgedb/test/test_schedule.py
# -*- coding: utf-8 -*-
#
# This file is part of BridgeDB, a Tor bridge distribution system.
#
# :authors: <NAME> 0xA3ADB67A2CDB8B35 <<EMAIL>>
# :copyright: (c) 2014, Isis Lovecruft
# (c) 2014, The Tor Project, Inc.
# :license: see LICENSE for licensing information
"""Unittests for the :mod:`bridgedb.schedule` module."""
from __future__ import print_function
from twisted.trial import unittest
from bridgedb import schedule
class UnscheduledTests(unittest.TestCase):
"""Tests for :class:`bridgedb.scheduled.Unscheduled`."""
def setUp(self):
self.sched = schedule.Unscheduled()
def test_Unscheduled_init(self):
"""The instance should be an instance of its class."""
self.assertIsInstance(self.sched, schedule.Unscheduled)
def test_Unscheduled_providesISchedule(self):
"""Unscheduled should implement the ISchedule interface."""
schedule.ISchedule.namesAndDescriptions()
self.assertTrue(schedule.ISchedule.providedBy(self.sched))
def test_Unscheduled_intervalStart_noargs(self):
time = self.sched.intervalStart()
self.assertIsInstance(time, int)
self.assertEquals(time, -62135596800)
def test_Unscheduled_getInterval_is_constant(self):
import time
now = time.time()
interval_default = self.sched.getInterval()
self.assertIsInstance(interval_default, str)
interval_zero = self.sched.getInterval(0)
self.assertIsInstance(interval_zero, str)
interval_now = self.sched.getInterval(now)
self.assertIsInstance(interval_now, str)
self.assertEquals(interval_default, interval_zero)
self.assertEquals(interval_default, interval_now)
def test_Unscheduled_nextIntervalStarts_noargs(self):
time = self.sched.nextIntervalStarts()
self.assertIsInstance(time, int)
self.assertEquals(time, 253402300799)
class ScheduledIntervalTests(unittest.TestCase):
"""Tests for :class:`bridgedb.scheduled.ScheduledInterval`."""
def setUp(self):
import time
self.now = time.time
self.sched = schedule.ScheduledInterval
def test_ScheduledInterval_providesISchedule(self):
"""ScheduledInterval should implement the ISchedule interface."""
self.assertTrue(schedule.ISchedule.providedBy(self.sched('month', 1)))
def _check_init(self, sched):
"""The instance should be an instance of its class."""
self.assertIsInstance(sched, schedule.ScheduledInterval)
def test_ScheduledInterval_init_month(self):
self._check_init(self.sched('month', 1))
def test_ScheduledInterval_init_week(self):
self._check_init(self.sched('week', 2))
def test_ScheduledInterval_init_day(self):
self._check_init(self.sched('days', 5))
def test_ScheduledInterval_init_hour(self):
self._check_init(self.sched('hours', 12))
def test_ScheduledInterval_init_minute(self):
self._check_init(self.sched('minute', 10))
def test_ScheduledInterval_init_seconds(self):
self._check_init(self.sched('seconds', 30))
def test_ScheduledInterval_init_badIntervalPeriod(self):
self.assertRaises(schedule.UnknownInterval,
self.sched, 'decades', 2)
def test_ScheduledInterval_init_badIntervalCount(self):
self.assertRaises(schedule.UnknownInterval,
self.sched, 'minutes', 'd20')
def test_ScheduledInterval_init_negativeIntervalCount(self):
sched = self.sched('days', -100000)
self.assertEquals(sched.intervalCount, 1)
self.assertEquals(sched.intervalPeriod, 'day')
def test_ScheduledInterval_init_noargs(self):
"""Check that the defaults parameters function as expected."""
sched = self.sched()
self.assertEquals(sched.intervalCount, 1)
self.assertEquals(sched.intervalPeriod, 'hour')
def _check_intervalStart(self, period='second', count=30, variance=30):
"""Test the ScheduledInterval.intervalStart() method.
:param str period: The interval type for the period.
:param int count: The number of **period**s within an interval.
:param int variance: The amount of variance (in seconds) to tolerate
between the start of the interval containing now, and now.
"""
now = int(self.now())
sched = self.sched(period, count)
time = sched.intervalStart(now)
self.assertIsInstance(time, int)
self.assertApproximates(now, time, variance)
def test_ScheduledInterval_intervalStart_month(self):
self._check_intervalStart('month', 1, 31*24*60*60)
def test_ScheduledInterval_intervalStart_week(self):
self._check_intervalStart('week', 2, 14*24*60*60)
def test_ScheduledInterval_intervalStart_day(self):
self._check_intervalStart('days', 5, 5*24*60*60)
def test_ScheduledInterval_intervalStart_hour(self):
self._check_intervalStart('hours', 12, 12*60*60)
def test_ScheduledInterval_intervalStart_minute(self):
self._check_intervalStart('minute', 10, 10*60)
def test_ScheduledInterval_intervalStart_seconds(self):
self._check_intervalStart('seconds', 30, 30)
def _check_getInterval(self, period='second', count=30, variance=30):
"""Test the ScheduledInterval.getInterval() method.
:param str period: The interval type for the period.
:param int count: The number of **period**s within an interval.
:param int variance: The amount of variance (in seconds) to tolerate
between the start of the interval containing now, and now.
"""
now = int(self.now())
sched = self.sched(period, count)
ts = sched.getInterval(now)
self.assertIsInstance(ts, str)
secs = [int(x) for x in ts.replace('-', ' ').replace(':', ' ').split()]
[secs.append(0) for _ in xrange(6-len(secs))]
secs = schedule.calendar.timegm(secs)
self.assertApproximates(now, secs, variance)
def test_ScheduledInterval_getInterval_month(self):
self._check_getInterval('month', 2, 2*31*24*60*60)
def test_ScheduledInterval_getInterval_week(self):
self._check_getInterval('week', 1, 7*24*60*60)
def test_ScheduledInterval_getInterval_day(self):
self._check_getInterval('days', 4, 4*24*60*60)
def test_ScheduledInterval_getInterval_hour(self):
self._check_getInterval('hours', 23, 23*60*60)
def test_ScheduledInterval_getInterval_minute(self):
self._check_getInterval('minutes', 15, 15*60)
def test_ScheduledInterval_getInterval_seconds(self):
self._check_getInterval('seconds', 10, 60)
def _check_nextIntervalStarts(self, period='second', count=30, variance=30):
"""Test the ScheduledInterval.nextIntervalStarts() method.
:param str period: The interval type for the period.
:param int count: The number of **period**s within an interval.
:param int variance: The amount of variance (in seconds) to tolerate
between the start of the interval containing now, and now.
"""
now = int(self.now())
sched = self.sched(period, count)
time = sched.nextIntervalStarts(now)
self.assertIsInstance(time, int)
# (now + variance - time) should be > variance
self.assertApproximates(now + variance, time, variance)
def test_ScheduledInterval_nextIntervalStarts_month(self):
self._check_nextIntervalStarts('month', 2, 2*31*24*60*60)
def test_ScheduledInterval_nextIntervalStarts_week(self):
self._check_nextIntervalStarts('week', 1, 7*24*60*60)
def test_ScheduledInterval_nextIntervalStarts_day(self):
self._check_nextIntervalStarts('days', 4, 4*24*60*60)
def test_ScheduledInterval_nextIntervalStarts_hour(self):
self._check_nextIntervalStarts('hours', 23, 23*60*60)
def test_ScheduledInterval_nextIntervalStarts_minute(self):
self._check_nextIntervalStarts('minutes', 15, 15*60)
def test_ScheduledInterval_nextIntervalStarts_seconds(self):
self._check_nextIntervalStarts('seconds', 10, 10)
| 2.296875
| 2
|
inventory/admin.py
|
brkyavuz/pfna
| 0
|
12780601
|
from django.contrib import admin
from inventory.models import Group, Host, Data
# Register your models here.
admin.site.register(Host)
admin.site.register(Group)
admin.site.register(Data)
| 1.476563
| 1
|
alipay/aop/api/response/AlipayBossFncGffundStandardvoucherBatchqueryResponse.py
|
antopen/alipay-sdk-python-all
| 0
|
12780602
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.StandardVoucherOpenApiVO import StandardVoucherOpenApiVO
class AlipayBossFncGffundStandardvoucherBatchqueryResponse(AlipayResponse):
def __init__(self):
super(AlipayBossFncGffundStandardvoucherBatchqueryResponse, self).__init__()
self._cur_page = None
self._page_size = None
self._standard_voucher_list = None
self._success = None
self._total_items = None
self._total_pages = None
@property
def cur_page(self):
return self._cur_page
@cur_page.setter
def cur_page(self, value):
self._cur_page = value
@property
def page_size(self):
return self._page_size
@page_size.setter
def page_size(self, value):
self._page_size = value
@property
def standard_voucher_list(self):
return self._standard_voucher_list
@standard_voucher_list.setter
def standard_voucher_list(self, value):
if isinstance(value, list):
self._standard_voucher_list = list()
for i in value:
if isinstance(i, StandardVoucherOpenApiVO):
self._standard_voucher_list.append(i)
else:
self._standard_voucher_list.append(StandardVoucherOpenApiVO.from_alipay_dict(i))
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
@property
def total_items(self):
return self._total_items
@total_items.setter
def total_items(self, value):
self._total_items = value
@property
def total_pages(self):
return self._total_pages
@total_pages.setter
def total_pages(self, value):
self._total_pages = value
def parse_response_content(self, response_content):
response = super(AlipayBossFncGffundStandardvoucherBatchqueryResponse, self).parse_response_content(response_content)
if 'cur_page' in response:
self.cur_page = response['cur_page']
if 'page_size' in response:
self.page_size = response['page_size']
if 'standard_voucher_list' in response:
self.standard_voucher_list = response['standard_voucher_list']
if 'success' in response:
self.success = response['success']
if 'total_items' in response:
self.total_items = response['total_items']
if 'total_pages' in response:
self.total_pages = response['total_pages']
| 1.960938
| 2
|
pandaexcel.py
|
cenchaojun/pytorch-image-models
| 0
|
12780603
|
import pandas as pd
def deal():
# 列表
company_name_list = ['12312', '141', '515', '41']
# list转dataframe
df = pd.DataFrame(company_name_list)
# 保存到本地excel
df.to_csv("company_name_li.csv", index=False)
if __name__ == '__main__':
deal()
| 2.765625
| 3
|
setup.py
|
equinor/webviz-dev-sync
| 0
|
12780604
|
import os
import re
import pathlib
from setuptools import setup, find_packages
def get_long_description() -> str:
"""Converts relative repository links to absolute URLs
if GITHUB_REPOSITORY and GITHUB_SHA environment variables exist.
If not, it returns the raw content in README.md.
"""
raw_readme = pathlib.Path("README.md").read_text()
repository = os.environ.get("GITHUB_REPOSITORY")
sha = os.environ.get("GITHUB_SHA")
if repository is not None and sha is not None:
full_url = f"https://github.com/{repository}/blob/{sha}/"
return re.sub(r"]\((?!https)", "](" + full_url, raw_readme)
return raw_readme
# pylint: disable=line-too-long
setup(
name="webviz-dev-sync",
description="Developer tool for syncing webviz packages",
long_description=get_long_description(),
long_description_content_type="text/markdown",
url="https://github.com/equinor/webviz-dev-sync",
author="<NAME>",
packages=find_packages(exclude=["tests"]),
package_data={
"webviz_dev_sync": [
"py.typed",
]
},
entry_points={
"console_scripts": ["webviz-dev=webviz_dev_sync.command_line:main"],
},
install_requires=[
"gitpython>=3.1.18",
"jsonschema>=4.0.0",
"Pillow>=7.0.0",
"progress>=1.6",
"PyGithub>=1.55",
"pysimplegui>=4.55.1",
"pyyaml>=5.4.1",
"types-PyYAML>=5.4.1",
],
setup_requires=["setuptools_scm~=3.2"],
python_requires="~=3.6",
use_scm_version=True,
zip_safe=False,
project_urls={
"Documentation": "https://equinor.github.io/webviz-dev-sync",
"Download": "https://equinor.github.io/webviz-dev-sync",
"Source": "https://equinor.github.io/webviz-dev-sync",
"Tracker": "https://equinor.github.io/webviz-dev-sync/issues",
},
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"Natural Language :: English",
"Environment :: Web Environment",
"Topic :: Multimedia :: Graphics",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Visualization",
"License :: OSI Approved :: MIT License",
],
)
| 1.960938
| 2
|
models/base_model.py
|
xuyouze/DropNet
| 1
|
12780605
|
# coding:utf-8
# @Time : 2019/5/15
# @Author : xuyouze
# @File Name : base_model.py
import importlib
import os
from abc import ABC, abstractmethod
from collections import OrderedDict
import torch
from torch import nn
from config.base_config import BaseConfig
from networks import *
class BaseModel(ABC):
def __init__(self, config: BaseConfig) -> None:
super().__init__()
self.config = config
self.net_names = []
self.optimizers = []
self.schedulers = []
self.save_path = config.checkpoints_dir
self.correct = None
self.output = None
self.attr = None
self.validate_size = None
self.pos_num = None
self.tnr = None
self.tpr = None
@abstractmethod
def set_input(self, x):
pass
@abstractmethod
def forward(self):
pass
def optimize_parameters(self):
self.forward()
for optimizer in self.optimizers:
optimizer.zero_grad()
self.backward()
for optimizer in self.optimizers:
optimizer.step()
def backward(self):
for name in self.net_names:
setattr(self, "loss_%s" % name,
getattr(self, "criterion_%s" % name)(getattr(self, "output_%s" % name),
getattr(self,
"attr_%s" % name)).cuda())
getattr(self, "loss_%s" % name).backward()
def setup(self):
"""
setup the network
if Train:
set the optimizer
else:
load the pre-training models
:return:
"""
print('-----------------------------------------------')
if self.config.isTrain:
self.schedulers = [get_scheduler(optimizer, self.config) for optimizer in self.optimizers]
if not self.config.isTrain or self.config.continue_train:
load_prefix = "iter_%d" % self.config.load_iter if self.config.load_iter > 0 else self.config.last_epoch
self.load_networks(load_prefix)
self.print_networks()
def update_learning_rate(self):
for scheduler in self.schedulers:
scheduler.step()
def get_current_loss(self):
errors_map = OrderedDict()
for name in self.net_names:
if isinstance(name, str):
errors_map[name] = float(getattr(self, "loss_" + name))
return errors_map
def eval(self):
for name in self.net_names:
if isinstance(name, str):
net = getattr(self, "net_" + name)
net.eval()
def train(self):
for name in self.net_names:
if isinstance(name, str):
net = getattr(self, "net_" + name)
net.train()
def save_networks(self, epoch_prefix):
for name in self.net_names:
if isinstance(name, str):
save_filename = "%s_net_%s.pth" % (epoch_prefix, name)
save_path = os.path.join(self.save_path, save_filename)
net = getattr(self, "net_" + name)
torch.save(net.module.cpu().state_dict(), save_path)
net.cuda()
def load_networks(self, epoch_prefix):
for name in self.net_names:
if isinstance(name, str):
load_filename = "%s_net_%s.pth" % (epoch_prefix, name)
load_path = os.path.join(self.save_path, load_filename)
net = getattr(self, "net_" + name)
if isinstance(net, nn.DataParallel):
net = net.module
print('loading the model from %s' % load_path)
state_dict = torch.load(load_path)
net.load_state_dict(state_dict)
def set_requires_grad(self, nets, requires_grad=False):
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def print_networks(self):
for name in self.net_names:
if isinstance(name, str):
net = getattr(self, "net_" + name)
num_params = 0
for param in net.parameters():
num_params += param.numel()
self.config.logger.info('[Network %s] Total number of parameters : %.3f M' % (name, num_params / 1e6))
def get_learning_rate(self):
return self.optimizers[0].param_groups[0]["lr"]
def test(self):
with torch.no_grad():
self.forward()
self.output = torch.zeros(self.attr.size(0), self.config.dataset_config.attribute_num)
for name in self.net_names:
self.output[:, getattr(self, "attr_%s_index" % name)] = getattr(self, "output_%s" % name).cpu()
com1 = self.output > 0.5
com2 = self.attr > 0
# class_balance accuracy
accuracy = com1.eq(com2)
self.pos_num.add_(com2.sum(0).float())
tpr = (accuracy & (com2 > 0)).sum(0).float()
tnr = (accuracy & (com2 < 1)).sum(0).float()
self.tpr.add_(tpr)
self.tnr.add_(tnr)
# mean accuracy
mean_accuracy = accuracy.sum(0).float()
self.correct.add_(mean_accuracy)
def get_model_precision(self):
return self.correct / self.validate_size
def get_model_class_balance_precision(self):
return 1 / 2 * (self.tpr / self.pos_num + self.tnr / (self.get_validate_size() - self.pos_num))
def clear_precision(self):
self.correct = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
self.tpr = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
self.tnr = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
self.pos_num = torch.FloatTensor(self.config.dataset_config.attribute_num).fill_(0)
def create_network_model(self):
return create_network_model(self.config)
def set_validate_size(self, validate_size: int):
self.validate_size = validate_size
def get_validate_size(self):
if self.validate_size:
return self.validate_size
else:
return 0
| 2.5
| 2
|
rendering.py
|
NChechulin/telegram-renderer-bot
| 1
|
12780606
|
"""This module contains functions for converting LaTeX and Markdown files"""
import string
import random
import os
import multiprocessing
import time
from markdown import markdown
import pdfkit
MAX_WAIT_TIME = 3
POLLING_RATE = 10
def try_create_tempdir():
os.makedirs(os.getcwd() + "/TEMP", exist_ok=True)
def generate_random_name(size=120, alphabet=string.ascii_letters):
"""This fuction generates a random name from file. The length and alphabet can be changed"""
return ''.join(random.choice(alphabet) for _ in range(size))
def render_markdown(code):
"""Returns path to generated PDF or None"""
filename = 'TEMP/' + generate_random_name() + '.pdf'
try:
try_create_tempdir()
html_text = markdown(code, output_format='html4')
pdfkit.from_string(html_text, filename)
except Exception:
return None
return filename
def __run_pdflatex(code, send_end):
"""Sets path to generated PDF or None"""
filename = 'TEMP/' + generate_random_name()
tex_file_path = filename + '.tex'
try_create_tempdir()
with open(tex_file_path, 'w') as tex_file:
tex_file.write(code)
try:
command = f'pdflatex -output-directory=TEMP {tex_file_path} > /dev/null'
os.system(command)
send_end.send(filename + '.pdf')
except Exception:
send_end.send(None)
def render_latex(code):
"""Returns path to generated PDF or None"""
pdf_file_path = None
recv_end, send_end = multiprocessing.Pipe(False)
proc = multiprocessing.Process(
target=__run_pdflatex, args=(code, send_end))
proc.start()
for _ in range(POLLING_RATE * MAX_WAIT_TIME):
if proc.is_alive():
time.sleep(1 / POLLING_RATE)
else:
pdf_file_path = recv_end.recv()
break
if proc.is_alive():
proc.kill()
proc.join()
return pdf_file_path
| 2.90625
| 3
|
authz/controller/apiv1/__init__.py
|
nimatbt/Auth-Microservice
| 0
|
12780607
|
from authz.controller.apiv1.user import UserController
from authz.controller.apiv1.auth import AuthController # 20-1 : 52'
| 1.21875
| 1
|
python/challenges/datastructures/arrays-left-rotation.py
|
KoderDojo/hackerrank
| 1
|
12780608
|
"""
Given an array of integers and a number, n, perform d left rotations
on the array. Then print the updated array as a single line of
space-separated integers.
"""
_, d = map(int, input().strip().split(' '))
arr = input().strip().split(' ')
shifted_arr = arr[d:] + arr[0:d]
print(' '.join(shifted_arr))
| 3.71875
| 4
|
gendoc.py
|
Francesco149/jniproxy
| 23
|
12780609
|
#!/usr/bin/env python
import sys
print_lines = False
with open("jniproxy.c", "r") as f:
for line in f:
if line.strip().endswith("*/"):
sys.exit(0);
if print_lines:
print(line[4:-1])
elif line.strip().startswith("/*"):
print_lines = True
| 2.546875
| 3
|
tests/backend_test.py
|
onebitaway/khal
| 0
|
12780610
|
<filename>tests/backend_test.py
import pytest
import pytz
from datetime import date, datetime, timedelta, time
import icalendar
from khal.khalendar import backend
from khal.khalendar.event import LocalizedEvent
from khal.khalendar.exceptions import OutdatedDbVersionError, UpdateFailed
from .aux import _get_text
BERLIN = pytz.timezone('Europe/Berlin')
LONDON = pytz.timezone('Europe/London')
SAMOA = pytz.timezone('Pacific/Samoa')
SYDNEY = pytz.timezone('Australia/Sydney')
LOCALE_BERLIN = {'local_timezone': BERLIN, 'default_timezone': BERLIN}
LOCALE_SAMOA = {'local_timezone': SAMOA, 'default_timezone': SAMOA}
LOCALE_SYDNEY = {'local_timezone': SYDNEY, 'default_timezone': SYDNEY}
calname = 'home'
def test_new_db_version():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
backend.DB_VERSION += 1
with pytest.raises(OutdatedDbVersionError):
dbi._check_table_version()
def test_event_rrule_recurrence_id():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
assert dbi.list() == list()
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 8, 26, 0, 0)))
assert list(events) == list()
dbi.update(_get_text('event_rrule_recuid'), href='12345.ics', etag='abcd')
assert dbi.list() == [('12345.ics', 'abcd')]
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 8, 26, 0, 0)))
events = sorted(events, key=lambda x: x.start)
assert len(events) == 6
assert events[0].start == BERLIN.localize(datetime(2014, 6, 30, 7, 0))
assert events[1].start == BERLIN.localize(datetime(2014, 7, 7, 9, 0))
assert events[2].start == BERLIN.localize(datetime(2014, 7, 14, 7, 0))
assert events[3].start == BERLIN.localize(datetime(2014, 7, 21, 7, 0))
assert events[4].start == BERLIN.localize(datetime(2014, 7, 28, 7, 0))
assert events[5].start == BERLIN.localize(datetime(2014, 8, 4, 7, 0))
def test_event_different_timezones():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(_get_text('event_dt_london'), href='12345.ics', etag='abcd')
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 9, 0, 0)),
BERLIN.localize(datetime(2014, 4, 9, 23, 59)))
events = list(events)
assert len(events) == 1
event = events[0]
assert event.start_local == LONDON.localize(datetime(2014, 4, 9, 14))
assert event.end_local == LONDON.localize(datetime(2014, 4, 9, 19))
# no event scheduled on the next day
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 10, 0, 0)),
BERLIN.localize(datetime(2014, 4, 10, 23, 59)))
events = list(events)
assert len(events) == 0
# now setting the local_timezone to Sydney
dbi.locale = LOCALE_SYDNEY
events = dbi.get_localized([calname],
SYDNEY.localize(datetime(2014, 4, 9, 0, 0)),
SYDNEY.localize(datetime(2014, 4, 9, 23, 59)))
events = list(events)
assert len(events) == 1
event = events[0]
assert event.start_local == SYDNEY.localize(datetime(2014, 4, 9, 23))
assert event.end_local == SYDNEY.localize(datetime(2014, 4, 10, 4))
# the event spans midnight Sydney, therefor it should also show up on the
# next day
events = dbi.get_localized([calname],
SYDNEY.localize(datetime(2014, 4, 10, 0, 0)),
SYDNEY.localize(datetime(2014, 4, 10, 23, 59)))
events = list(events)
assert len(events) == 1
assert event.start_local == SYDNEY.localize(datetime(2014, 4, 9, 23))
assert event.end_local == SYDNEY.localize(datetime(2014, 4, 10, 4))
def test_event_rrule_recurrence_id_invalid_tzid():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(_get_text('event_rrule_recuid_invalid_tzid'), href='12345.ics', etag='abcd')
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 30, 0, 0)),
BERLIN.localize(datetime(2014, 9, 26, 0, 0)))
events = sorted(events)
assert len(events) == 6
assert events[0].start == BERLIN.localize(datetime(2014, 6, 30, 7, 0))
assert events[1].start == BERLIN.localize(datetime(2014, 7, 7, 9, 0))
assert events[2].start == BERLIN.localize(datetime(2014, 7, 14, 7, 0))
assert events[3].start == BERLIN.localize(datetime(2014, 7, 21, 7, 0))
assert events[4].start == BERLIN.localize(datetime(2014, 7, 28, 7, 0))
assert events[5].start == BERLIN.localize(datetime(2014, 8, 4, 7, 0))
event_rrule_recurrence_id_reverse = """
BEGIN:VCALENDAR
BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RECURRENCE-ID:20140707T050000Z
DTSTART;TZID=Europe/Berlin:20140707T090000
DTEND;TZID=Europe/Berlin:20140707T140000
END:VEVENT
BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RRULE:FREQ=WEEKLY;COUNT=6
DTSTART;TZID=Europe/Berlin:20140630T070000
DTEND;TZID=Europe/Berlin:20140630T120000
END:VEVENT
END:VCALENDAR
"""
def test_event_rrule_recurrence_id_reverse():
"""as icalendar elements can be saved in arbitrary order, we also have to
deal with `reverse` ordered icalendar files
"""
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
assert dbi.list() == list()
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 8, 26, 0, 0)))
assert list(events) == list()
dbi.update(event_rrule_recurrence_id_reverse, href='12345.ics', etag='abcd')
assert dbi.list() == [('12345.ics', 'abcd')]
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 8, 26, 0, 0)))
events = sorted(events, key=lambda x: x.start)
assert len(events) == 6
assert events[0].start == BERLIN.localize(datetime(2014, 6, 30, 7, 0))
assert events[1].start == BERLIN.localize(datetime(2014, 7, 7, 9, 0))
assert events[2].start == BERLIN.localize(datetime(2014, 7, 14, 7, 0))
assert events[3].start == BERLIN.localize(datetime(2014, 7, 21, 7, 0))
assert events[4].start == BERLIN.localize(datetime(2014, 7, 28, 7, 0))
assert events[5].start == BERLIN.localize(datetime(2014, 8, 4, 7, 0))
def test_event_rrule_recurrence_id_update_with_exclude():
"""
test if updates work as they should. The updated event has the extra
RECURRENCE-ID event removed and one recurrence date excluded via EXDATE
"""
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(_get_text('event_rrule_recuid'), href='12345.ics', etag='abcd')
dbi.update(_get_text('event_rrule_recuid_update'), href='12345.ics', etag='abcd')
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 30, 0, 0)),
BERLIN.localize(datetime(2014, 9, 26, 0, 0)))
events = sorted(events, key=lambda x: x.start)
assert len(events) == 5
assert events[0].start == BERLIN.localize(datetime(2014, 6, 30, 7, 0))
assert events[1].start == BERLIN.localize(datetime(2014, 7, 7, 7, 0))
assert events[2].start == BERLIN.localize(datetime(2014, 7, 21, 7, 0))
assert events[3].start == BERLIN.localize(datetime(2014, 7, 28, 7, 0))
assert events[4].start == BERLIN.localize(datetime(2014, 8, 4, 7, 0))
def test_no_valid_timezone():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(_get_text('event_dt_local_missing_tz'), href='12345.ics', etag='abcd')
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 9, 0, 0)),
BERLIN.localize(datetime(2014, 4, 10, 0, 0)))
events = sorted(list(events))
assert len(events) == 1
event = events[0]
assert event.start == BERLIN.localize(datetime(2014, 4, 9, 9, 30))
def test_event_delete():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
assert dbi.list() == list()
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 8, 26, 0, 0)))
assert list(events) == list()
dbi.update(event_rrule_recurrence_id_reverse, href='12345.ics', etag='abcd')
assert dbi.list() == [('12345.ics', 'abcd')]
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 9, 26, 0, 0)))
assert len(list(events)) == 6
dbi.delete('12345.ics')
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 9, 26, 0, 0)))
assert len(list(events)) == 0
event_rrule_this_and_prior = """
BEGIN:VCALENDAR
BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RRULE:FREQ=WEEKLY;UNTIL=20140806T060000Z
DTSTART;TZID=Europe/Berlin:20140630T070000
DTEND;TZID=Europe/Berlin:20140630T120000
END:VEVENT
BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RECURRENCE-ID;RANGE=THISANDPRIOR:20140707T050000Z
DTSTART;TZID=Europe/Berlin:20140707T090000
DTEND;TZID=Europe/Berlin:20140707T140000
END:VEVENT
END:VCALENDAR
"""
def test_this_and_prior():
"""we do not support THISANDPRIOR, therefore this should fail"""
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
with pytest.raises(UpdateFailed):
dbi.update(event_rrule_this_and_prior, href='12345.ics', etag='abcd')
event_rrule_this_and_future_temp = """
BEGIN:VCALENDAR
BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RRULE:FREQ=WEEKLY;UNTIL=20140806T060000Z
DTSTART;TZID=Europe/Berlin:20140630T070000
DTEND;TZID=Europe/Berlin:20140630T120000
END:VEVENT
BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit (lang)
RECURRENCE-ID;RANGE=THISANDFUTURE:20140707T050000Z
DTSTART;TZID=Europe/Berlin:{0}
DTEND;TZID=Europe/Berlin:{1}
END:VEVENT
END:VCALENDAR
"""
event_rrule_this_and_future = \
event_rrule_this_and_future_temp.format('20140707T090000', '20140707T180000')
def test_event_rrule_this_and_future():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(event_rrule_this_and_future, href='12345.ics', etag='abcd')
assert dbi.list() == [('12345.ics', 'abcd')]
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 30, 0, 0)),
BERLIN.localize(datetime(2014, 9, 26, 0, 0)))
events = sorted(events, key=lambda x: x.start)
assert len(events) == 6
assert events[0].start == BERLIN.localize(datetime(2014, 6, 30, 7, 0))
assert events[1].start == BERLIN.localize(datetime(2014, 7, 7, 9, 0))
assert events[2].start == BERLIN.localize(datetime(2014, 7, 14, 9, 0))
assert events[3].start == BERLIN.localize(datetime(2014, 7, 21, 9, 0))
assert events[4].start == BERLIN.localize(datetime(2014, 7, 28, 9, 0))
assert events[5].start == BERLIN.localize(datetime(2014, 8, 4, 9, 0))
assert events[0].end == BERLIN.localize(datetime(2014, 6, 30, 12, 0))
assert events[1].end == BERLIN.localize(datetime(2014, 7, 7, 18, 0))
assert events[2].end == BERLIN.localize(datetime(2014, 7, 14, 18, 0))
assert events[3].end == BERLIN.localize(datetime(2014, 7, 21, 18, 0))
assert events[4].end == BERLIN.localize(datetime(2014, 7, 28, 18, 0))
assert events[5].end == BERLIN.localize(datetime(2014, 8, 4, 18, 0))
assert str(events[0].summary) == 'Arbeit'
for num, event in enumerate(events[1:]):
event.raw
assert str(event.summary) == 'Arbeit (lang)'
event_rrule_this_and_future_multi_day_shift = \
event_rrule_this_and_future_temp.format('20140708T090000', '20140709T150000')
def test_event_rrule_this_and_future_multi_day_shift():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(event_rrule_this_and_future_multi_day_shift, href='12345.ics', etag='abcd')
assert dbi.list() == [('12345.ics', 'abcd')]
events = dbi.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 30, 0, 0)),
BERLIN.localize(datetime(2014, 9, 26, 0, 0)))
events = sorted(events, key=lambda x: x.start)
assert len(events) == 6
assert events[0].start == BERLIN.localize(datetime(2014, 6, 30, 7, 0))
assert events[1].start == BERLIN.localize(datetime(2014, 7, 8, 9, 0))
assert events[2].start == BERLIN.localize(datetime(2014, 7, 15, 9, 0))
assert events[3].start == BERLIN.localize(datetime(2014, 7, 22, 9, 0))
assert events[4].start == BERLIN.localize(datetime(2014, 7, 29, 9, 0))
assert events[5].start == BERLIN.localize(datetime(2014, 8, 5, 9, 0))
assert events[0].end == BERLIN.localize(datetime(2014, 6, 30, 12, 0))
assert events[1].end == BERLIN.localize(datetime(2014, 7, 9, 15, 0))
assert events[2].end == BERLIN.localize(datetime(2014, 7, 16, 15, 0))
assert events[3].end == BERLIN.localize(datetime(2014, 7, 23, 15, 0))
assert events[4].end == BERLIN.localize(datetime(2014, 7, 30, 15, 0))
assert events[5].end == BERLIN.localize(datetime(2014, 8, 6, 15, 0))
assert str(events[0].summary) == 'Arbeit'
for event in events[1:]:
assert str(event.summary) == 'Arbeit (lang)'
event_rrule_this_and_future_allday_temp = """
BEGIN:VCALENDAR
BEGIN:VEVENT
UID:event_rrule_recurrence_id_allday
SUMMARY:Arbeit
RRULE:FREQ=WEEKLY;UNTIL=20140806
DTSTART;VALUE=DATE:20140630
DTEND;VALUE=DATE:20140701
END:VEVENT
BEGIN:VEVENT
UID:event_rrule_recurrence_id_allday
SUMMARY:Arbeit (lang)
RECURRENCE-ID;RANGE=THISANDFUTURE;VALUE=DATE:20140707
DTSTART;VALUE=DATE:{}
DTEND;VALUE=DATE:{}
END:VEVENT
END:VCALENDAR
"""
event_rrule_this_and_future_allday = \
event_rrule_this_and_future_allday_temp.format(20140708, 20140709)
def test_event_rrule_this_and_future_allday():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(event_rrule_this_and_future_allday,
href='rrule_this_and_future_allday.ics', etag='abcd')
assert dbi.list() == [('rrule_this_and_future_allday.ics', 'abcd')]
events = list(dbi.get_floating([calname],
datetime(2014, 4, 30, 0, 0),
datetime(2014, 9, 27, 0, 0)))
assert len(events) == 6
assert events[0].start == date(2014, 6, 30)
assert events[1].start == date(2014, 7, 8)
assert events[2].start == date(2014, 7, 15)
assert events[3].start == date(2014, 7, 22)
assert events[4].start == date(2014, 7, 29)
assert events[5].start == date(2014, 8, 5)
assert events[0].end == date(2014, 6, 30)
assert events[1].end == date(2014, 7, 8)
assert events[2].end == date(2014, 7, 15)
assert events[3].end == date(2014, 7, 22)
assert events[4].end == date(2014, 7, 29)
assert events[5].end == date(2014, 8, 5)
assert str(events[0].summary) == 'Arbeit'
for event in events[1:]:
assert str(event.summary) == 'Arbeit (lang)'
def test_event_rrule_this_and_future_allday_prior():
event_rrule_this_and_future_allday_prior = \
event_rrule_this_and_future_allday_temp.format(20140705, 20140706)
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(event_rrule_this_and_future_allday_prior,
href='rrule_this_and_future_allday.ics', etag='abcd')
assert dbi.list() == [('rrule_this_and_future_allday.ics', 'abcd')]
events = list(dbi.get_floating([calname],
datetime(2014, 4, 30, 0, 0),
datetime(2014, 9, 27, 0, 0)))
assert len(events) == 6
assert events[0].start == date(2014, 6, 30)
assert events[1].start == date(2014, 7, 5)
assert events[2].start == date(2014, 7, 12)
assert events[3].start == date(2014, 7, 19)
assert events[4].start == date(2014, 7, 26)
assert events[5].start == date(2014, 8, 2)
assert events[0].end == date(2014, 6, 30)
assert events[1].end == date(2014, 7, 5)
assert events[2].end == date(2014, 7, 12)
assert events[3].end == date(2014, 7, 19)
assert events[4].end == date(2014, 7, 26)
assert events[5].end == date(2014, 8, 2)
assert str(events[0].summary) == 'Arbeit'
for event in events[1:]:
assert str(event.summary) == 'Arbeit (lang)'
event_rrule_multi_this_and_future_allday = """BEGIN:VCALENDAR
BEGIN:VEVENT
UID:event_multi_rrule_recurrence_id_allday
SUMMARY:Arbeit
RRULE:FREQ=WEEKLY;UNTIL=20140806
DTSTART;VALUE=DATE:20140630
DTEND;VALUE=DATE:20140701
END:VEVENT
BEGIN:VEVENT
UID:event_multi_rrule_recurrence_id_allday
SUMMARY:Arbeit (neu)
RECURRENCE-ID;RANGE=THISANDFUTURE;VALUE=DATE:20140721
DTSTART;VALUE=DATE:20140717
DTEND;VALUE=DATE:20140718
END:VEVENT
BEGIN:VEVENT
UID:event_multi_rrule_recurrence_id_allday
SUMMARY:Arbeit (lang)
RECURRENCE-ID;RANGE=THISANDFUTURE;VALUE=DATE:20140707
DTSTART;VALUE=DATE:20140712
DTEND;VALUE=DATE:20140714
END:VEVENT
END:VCALENDAR"""
def test_event_rrule_multi_this_and_future_allday():
dbi = backend.SQLiteDb(calname, ':memory:', locale=LOCALE_BERLIN)
dbi.update(event_rrule_multi_this_and_future_allday,
href='event_rrule_multi_this_and_future_allday.ics', etag='abcd')
assert dbi.list() == [('event_rrule_multi_this_and_future_allday.ics', 'abcd')]
events = sorted(dbi.get_floating([calname],
datetime(2014, 4, 30, 0, 0),
datetime(2014, 9, 27, 0, 0)))
assert len(events) == 6
assert events[0].start == date(2014, 6, 30)
assert events[1].start == date(2014, 7, 12)
assert events[2].start == date(2014, 7, 17)
assert events[3].start == date(2014, 7, 19)
assert events[4].start == date(2014, 7, 24)
assert events[5].start == date(2014, 7, 31)
assert events[0].end == date(2014, 6, 30)
assert events[1].end == date(2014, 7, 13)
assert events[2].end == date(2014, 7, 17)
assert events[3].end == date(2014, 7, 20)
assert events[4].end == date(2014, 7, 24)
assert events[5].end == date(2014, 7, 31)
assert str(events[0].summary) == 'Arbeit'
for event in [events[1], events[3]]:
assert str(event.summary) == 'Arbeit (lang)'
for event in [events[2], events[4], events[5]]:
assert str(event.summary) == 'Arbeit (neu)'
master = """BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RRULE:FREQ=WEEKLY;UNTIL=20140806T060000Z
DTSTART;TZID=Europe/Berlin:20140630T070000
DTEND;TZID=Europe/Berlin:20140630T120000
END:VEVENT"""
recuid_this_future = icalendar.Event.from_ical("""BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RECURRENCE-ID;RANGE=THISANDFUTURE:20140707T050000Z
DTSTART;TZID=Europe/Berlin:20140707T090000
DTEND;TZID=Europe/Berlin:20140707T140000
END:VEVENT""")
recuid_this_future_duration = icalendar.Event.from_ical("""BEGIN:VEVENT
UID:event_rrule_recurrence_id
SUMMARY:Arbeit
RECURRENCE-ID;RANGE=THISANDFUTURE:20140707T050000Z
DTSTART;TZID=Europe/Berlin:20140707T090000
DURATION:PT4H30M
END:VEVENT""")
def test_calc_shift_deltas():
assert (timedelta(hours=2), timedelta(hours=5)) == \
backend.calc_shift_deltas(recuid_this_future)
assert (timedelta(hours=2), timedelta(hours=4, minutes=30)) == \
backend.calc_shift_deltas(recuid_this_future_duration)
event_a = """BEGIN:VEVENT
UID:123
SUMMARY:event a
RRULE:FREQ=WEEKLY;UNTIL=20140806T060000Z
DTSTART;TZID=Europe/Berlin:20140630T070000
DTEND;TZID=Europe/Berlin:20140630T120000
END:VEVENT"""
event_b = """BEGIN:VEVENT
UID:123
SUMMARY:event b
RRULE:FREQ=WEEKLY;UNTIL=20140806T060000Z
DTSTART;TZID=Europe/Berlin:20140630T070000
DTEND;TZID=Europe/Berlin:20140630T120000
END:VEVENT"""
def test_two_calendars_same_uid(tmpdir):
dbpath = str(tmpdir) + '/khal.db'
dba = backend.SQLiteDb(calname, dbpath, locale=LOCALE_BERLIN)
dbb = backend.SQLiteDb('work', dbpath, locale=LOCALE_BERLIN)
assert dba.list() == []
assert dbb.list() == []
dba.update(event_a, href='12345.ics', etag='abcd')
assert dba.list() == [('12345.ics', 'abcd')]
assert dbb.list() == []
dbb.update(event_b, href='12345.ics', etag='abcd')
assert dba.list() == [('12345.ics', 'abcd')]
assert dbb.list() == [('12345.ics', 'abcd')]
events_a = list(dba.get_localized(['home'],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 7, 26, 0, 0))))
events_b = list(dba.get_localized(['work'],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 7, 26, 0, 0))))
assert len(events_a) == 4
assert len(events_b) == 4
events_c = list(dba.get_localized(['work', 'home'],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 7, 26, 0, 0))))
assert len(events_c) == 8
assert [event.calendar for event in events_c].count('home') == 4
assert [event.calendar for event in events_c].count('work') == 4
dba.delete('12345.ics')
events_a = list(dba.get_localized(['home'],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 7, 26, 0, 0))))
events_b = list(dba.get_localized(['work'],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 7, 26, 0, 0))))
assert len(events_a) == 0
assert len(events_b) == 4
events_c = list(dba.get_localized(['work', 'home'],
BERLIN.localize(datetime(2014, 6, 30, 0, 0)),
BERLIN.localize(datetime(2014, 7, 26, 0, 0))))
assert [event.calendar for event in events_c].count('home') == 0
assert [event.calendar for event in events_c].count('work') == 4
assert dba.list() == []
assert dbb.list() == [('12345.ics', 'abcd')]
def test_update_one_should_not_affect_others(tmpdir):
"""test if an THISANDFUTURE param effects other events as well"""
dbpath = str(tmpdir) + '/khal.db'
db = backend.SQLiteDb(calname, dbpath, locale=LOCALE_BERLIN)
db.update(_get_text('event_d_15'), href='first')
events = db.get_floating([calname], datetime(2015, 4, 9, 0, 0), datetime(2015, 4, 10, 0, 0))
assert len(list(events)) == 1
db.update(event_rrule_multi_this_and_future_allday, href='second')
events = list(db.get_floating([calname],
datetime(2015, 4, 9, 0, 0),
datetime(2015, 4, 10, 0, 0)))
assert len(events) == 1
def test_zuluv_events(tmpdir):
"""test if events in Zulu time are correctly recognized as locaized events"""
dbpath = str(tmpdir) + '/khal.db'
db = backend.SQLiteDb(calname, dbpath, locale=LOCALE_BERLIN)
db.update(_get_text('event_dt_simple_zulu'), href='event_zulu')
events = db.get_localized([calname],
BERLIN.localize(datetime(2014, 4, 9, 0, 0)),
BERLIN.localize(datetime(2014, 4, 10, 0, 0)))
events = list(events)
assert len(events) == 1
event = events[0]
assert type(event) == LocalizedEvent
assert event.start_local == BERLIN.localize(datetime(2014, 4, 9, 11, 30))
event_rdate_period = """BEGIN:VEVENT
SUMMARY:RDATE period
DTSTART:19961230T020000Z
DTEND:19961230T060000Z
UID:rdate_period
RDATE;VALUE=PERIOD:19970101T180000Z/19970102T070000Z,19970109T180000Z/PT5H30M
END:VEVENT"""
supported_events = [
event_a, event_b, event_rrule_this_and_future,
event_rrule_this_and_future_allday,
event_rrule_this_and_future_multi_day_shift
]
def test_check_support():
ical = icalendar.Calendar.from_ical(event_rrule_this_and_prior)
for cal_str in supported_events:
ical = icalendar.Calendar.from_ical(cal_str)
[backend.check_support(event, '', '') for event in ical.walk()]
ical = icalendar.Calendar.from_ical(event_rrule_this_and_prior)
with pytest.raises(UpdateFailed):
[backend.check_support(event, '', '') for event in ical.walk()]
ical = icalendar.Calendar.from_ical(event_rdate_period)
with pytest.raises(UpdateFailed):
[backend.check_support(event, '', '') for event in ical.walk()]
card = """BEGIN:VCARD
VERSION:3.0
FN:Unix
BDAY:19710311
END:VCARD
"""
card_no_year = """BEGIN:VCARD
VERSION:3.0
FN:Unix
BDAY:--0311
END:VCARD
"""
card_does_not_parse = """BEGIN:VCARD
VERSION:3.0
FN:Unix
BDAY:x
END:VCARD
"""
card_no_fn = """BEGIN:VCARD
VERSION:3.0
N:Ritchie;Dennis;MacAlistair;;
BDAY:19410909
END:VCARD
"""
day = date(1971, 3, 11)
start = datetime.combine(day, time.min)
end = datetime.combine(day, time.max)
def test_birthdays(tmpdir):
dbpath = str(tmpdir) + '/khal.db'
db = backend.SQLiteDb_Birthdays(calname, dbpath, locale=LOCALE_BERLIN)
assert list(db.get_floating([calname], start, end)) == list()
db.update(card, 'unix.vcf')
events = list(db.get_floating([calname], start, end))
assert len(events) == 1
assert events[0].summary == 'Unix\'s birthday'
def test_birthdays_no_year(tmpdir):
dbpath = str(tmpdir) + '/khal.db'
db = backend.SQLiteDb_Birthdays(calname, dbpath, locale=LOCALE_BERLIN)
assert list(db.get_floating([calname], start, end)) == list()
db.update(card_no_year, 'unix.vcf')
events = list(db.get_floating([calname], start, end))
assert len(events) == 1
assert events[0].summary == 'Unix\'s birthday'
def test_birthdays_no_fn(tmpdir):
dbpath = str(tmpdir) + '/khal.db'
db = backend.SQLiteDb_Birthdays('home', dbpath, locale=LOCALE_BERLIN)
assert list(db.get_floating(['home'],
datetime(1941, 9, 9, 0, 0),
datetime(1941, 9, 9, 23, 59, 59, 9999))) == list()
db.update(card_no_fn, 'unix.vcf')
events = list(db.get_floating(['home'],
datetime(1941, 9, 9, 0, 0),
datetime(1941, 9, 9, 23, 59, 59, 9999)))
assert len(events) == 1
assert events[0].summary == '<NAME>\'s birthday'
def test_birthday_does_not_parse(tmpdir):
dbpath = str(tmpdir) + '/khal.db'
db = backend.SQLiteDb_Birthdays(calname, dbpath, locale=LOCALE_BERLIN)
assert list(db.get_floating([calname], start, end)) == list()
db.update(card_does_not_parse, 'unix.vcf')
events = list(db.get_floating(calname, start, end))
assert len(events) == 0
| 2.078125
| 2
|
py/cidoc_crm_types/properties/p74i_is_current_or_former_residence_of.py
|
minorg/cidoc-crm-types
| 0
|
12780611
|
from dataclasses import dataclass
@dataclass
class P74iIsCurrentOrFormerResidenceOf:
URI = "http://erlangen-crm.org/current/P74i_is_current_or_former_residence_of"
| 1.820313
| 2
|
integration/tests_failed/assert_invalid_predicate_type.py
|
jleverenz/hurl
| 0
|
12780612
|
<reponame>jleverenz/hurl
from app import app
@app.route("/error-assert-invalid-predicate-type")
def error_assert_invalid_predicate_type():
return ""
| 1.882813
| 2
|
examples/applications/run_multi_functions.py
|
JokerHB/mealpy
| 1
|
12780613
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 17:40, 06/11/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
from mealpy.bio_based import SMA
import numpy as np
def obj_function(solution):
def booth(x, y):
return (x + 2*y - 7)**2 + (2*x + y - 5)**2
def bukin(x, y):
return 100 * np.sqrt(np.abs(y - 0.01 * x**2)) + 0.01 * np.abs(x + 10)
def matyas(x, y):
return 0.26 * (x**2 + y**2) - 0.48 * x * y
return [booth(solution[0], solution[1]), bukin(solution[0], solution[1]), matyas(solution[0], solution[1])]
problem_dict1 = {
"obj_func": obj_function,
"lb": [-10, -10],
"ub": [10, 10],
"minmax": "min",
"verbose": True,
"obj_weight": [0.4, 0.1, 0.5] # Define it or default value will be [1, 1, 1]
}
## Run the algorithm
model1 = SMA.BaseSMA(problem_dict1, epoch=100, pop_size=50, pr=0.03)
model1.solve()
## You can access them all via object "history" like this:
model1.history.save_global_objectives_chart(filename="hello/goc")
model1.history.save_local_objectives_chart(filename="hello/loc")
model1.history.save_global_best_fitness_chart(filename="hello/gbfc")
model1.history.save_local_best_fitness_chart(filename="hello/lbfc")
model1.history.save_runtime_chart(filename="hello/rtc")
model1.history.save_exploration_exploitation_chart(filename="hello/eec")
model1.history.save_diversity_chart(filename="hello/dc")
model1.history.save_trajectory_chart(list_agent_idx=[3, 5], list_dimensions=[2], filename="hello/tc")
| 2.71875
| 3
|
crab/plugins/tools/rigging/joints/singulization.py
|
Mikfr83/crab
| 0
|
12780614
|
<reponame>Mikfr83/crab<gh_stars>0
import crab
import pymel.core as pm
# ------------------------------------------------------------------------------
class SingulizeSelected(crab.RigTool):
identifier = 'joints_singulize_selected'
display_name = 'Singulize Selected'
icon = 'joints.png'
# --------------------------------------------------------------------------
def run(self):
rig = crab.Rig(node=pm.selected()[0])
for node in pm.selected():
rig.add_component('Singular', pre_existing_joint=node.name())
# ------------------------------------------------------------------------------
class SingulizeAll(crab.RigTool):
identifier = 'joints_singulize_all'
display_name = 'Singulize All'
icon = 'joints.png'
# --------------------------------------------------------------------------
def run(self):
# -- Get the currently active rig
rig = crab.Rig.all()[0]
# -- Find all teh joints in the rig so we can check which
# -- require singulizing
all_joints = rig.skeleton_org().getChildren(
allDescendents=True,
type='joint',
)
for joint in all_joints:
# -- Assume the joint will need singulizing unless
# -- we find otherwise.
requires_singulizing = True
# -- If the joint has crab meta attached, then we do not
# -- want to singulize it
for possible_meta in joint.outputs(type='network'):
if possible_meta.hasAttr('crabComponent'):
requires_singulizing = False
break
# -- Singulize if required
if requires_singulizing:
rig.add_component('Singular', pre_existing_joint=joint.name())
| 2.25
| 2
|
monoensemble/version.py
|
vasselai/monoensemble
| 5
|
12780615
|
<reponame>vasselai/monoensemble
from __future__ import absolute_import, division, print_function
from os.path import join as pjoin
# Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z"
_version_major = 1
_version_minor = 3
_version_micro = 2 # use '' for first of series, number for 1 and above
_version_extra = 'dev'
_version_extra = '' # Uncomment this for full releases
# Construct full version string from these.
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: BSD 3 Clause License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Scientific/Engineering"]
# Description should be a one-liner:
description = """
monoensemble: a high performance library for monotone Gradient Boosting and Random Forest Classification.
"""
# Long description will go up on the pypi page
long_description = """
monoensemble
========
This package contains high performance implementations of
MonoGradientBoostingClassifier and MonoRandomForestClassifier. These monotone
classifiers use the fast and scaleable monotone rule approach described
in <NAME>., <NAME>., and <NAME>. \Enhanced Random Forest Algorithms for
Partially Monotone Ordinal Classication". In: Proceedings of the Thirty-Third AAAI Conference on
Articial Intelligence (AAAI-2019), Honolulu, Hawaii, USA, Jan 27 - Feb 1 PREPRESS. ed. by
AAAI Press. AAAI. 2019.
To get started, please go to the repository README_.
.. _README: https://github.com/chriswbartley/monoensemble/blob/master/README.md
License
=======
``monoensemble`` is licensed under the terms of the BSD 3 Clause License. See the
file "LICENSE" for information on the history of this software, terms &
conditions for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2017, <NAME>
"""
NAME = 'monoensemble'
MAINTAINER = "<NAME>"
MAINTAINER_EMAIL = "<EMAIL>"
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = "http://github.com/chriswbartley/monoensemble"
DOWNLOAD_URL = ""
LICENSE = "BSD 3 Clause"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
PLATFORMS = "OS Independent"
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGE_DATA = {'monoensemble': [pjoin('data', '*')]}
REQUIRES = []
INSTALL_REQUIRES = ["numpy","scipy","scikit-learn", "cython"]
| 1.453125
| 1
|
examples/heat_example_base/options.py
|
melissa-sa/melissa
| 7
|
12780616
|
<gh_stars>1-10
###################################################################
# Melissa #
#-----------------------------------------------------------------#
# COPYRIGHT (C) 2017 by INRIA and EDF. ALL RIGHTS RESERVED. #
# #
# This source is covered by the BSD 3-Clause License. #
# Refer to the LICENCE file for further information. #
# #
#-----------------------------------------------------------------#
# Original Contributors: #
# <NAME>, #
# <NAME>, #
# <NAME>, #
# <NAME>, #
###################################################################
"""
user defined options
"""
import numpy as np
def draw_param_set():
param_set = np.zeros(STUDY_OPTIONS['nb_parameters'])
for i in range(STUDY_OPTIONS['nb_parameters']):
param_set[i] = np.random.uniform(0, 1)
return param_set
STUDY_OPTIONS = {}
STUDY_OPTIONS['user_name'] = "user"
STUDY_OPTIONS['working_directory'] = "/home/user/study"
STUDY_OPTIONS['nb_parameters'] = 5
STUDY_OPTIONS['sampling_size'] = 10
STUDY_OPTIONS['nb_time_steps'] = 100
STUDY_OPTIONS['threshold_value'] = 0.7
STUDY_OPTIONS['field_names'] = ["field1", "field2"]
STUDY_OPTIONS['server_timeout'] = 600
STUDY_OPTIONS['simulation_timeout'] = 400
STUDY_OPTIONS['coupling'] = "MELISSA_COUPLING_DEFAULT"
MELISSA_STATS = {}
MELISSA_STATS['mean'] = True
MELISSA_STATS['variance'] = True
MELISSA_STATS['skewness'] = True
MELISSA_STATS['kurtosis'] = True
MELISSA_STATS['min'] = True
MELISSA_STATS['max'] = True
MELISSA_STATS['threshold_exceedance'] = False
MELISSA_STATS['quantile'] = False
MELISSA_STATS['sobol_indices'] = True
USER_FUNCTIONS = {}
USER_FUNCTIONS['create_study'] = None
USER_FUNCTIONS['draw_parameter_set'] = draw_param_set
USER_FUNCTIONS['create_group'] = None
USER_FUNCTIONS['launch_server'] = None
USER_FUNCTIONS['launch_group'] = None
USER_FUNCTIONS['check_server_job'] = None
USER_FUNCTIONS['check_group_job'] = None
USER_FUNCTIONS['cancel_job'] = None
USER_FUNCTIONS['restart_server'] = None
USER_FUNCTIONS['restart_group'] = None
USER_FUNCTIONS['check_scheduler_load'] = None
USER_FUNCTIONS['postprocessing'] = None
USER_FUNCTIONS['finalize'] = None
| 2.28125
| 2
|
boilerplate/setup.py
|
MTrajK/python-projects
| 5
|
12780617
|
<filename>boilerplate/setup.py
from setuptools import setup, find_packages
setup(
name='boilerplate',
version='0.0.1',
description='Python project boilerplate.',
author='<NAME>',
python_requires='>=3',
install_requires=[],
# If you have only one package then use: packages=['boilerplate']
packages=find_packages(exclude=('tests')),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
include_package_data=True,
entry_points={
'console_scripts': [
'boilerplate=boilerplate.main:M'
]
}
)
| 1.664063
| 2
|
app/WebSensorGatherer.py
|
ncalligaro/GardenCity
| 0
|
12780618
|
#!/usr/bin/python
from config import config
import commonFunctions
import datetime
import traceback
import httplib
import json
import sys
import logging
from time import sleep
logging.basicConfig(level=config.get_logging_level(),
format=config.runtime_variables['log_format'],
datefmt=config.runtime_variables['log_date_format'])
def get_current_city_data():
http_connection = httplib.HTTPSConnection(config.open_map['host'])
http_connection.request("GET", ("%s?q=%s&units=%s&appid=%s" % (config.open_map['path'], config.open_map['city'], 'metric', config.open_map['api_key'])))
response = http_connection.getresponse()
#if (response.status != httplib.OK):
# print 'Error ocurred'
# print response.status, response.reason
# return None #Replace this with an exception
#else:
jsondata = response.read()
data = json.loads(jsondata)
return data
def save_openweather_map_info_to_DB(json_data, creation_time):
current_place = commonFunctions.get_from_dic(json_data, 'name')
place = "city_%s" % current_place
measurement_date = commonFunctions.get_from_dic(json_data, 'dt')
current_temperature = commonFunctions.get_from_dic(json_data, 'main', 'temp')
current_pressure = commonFunctions.get_from_dic(json_data, 'main', 'pressure')
current_humidity = commonFunctions.get_from_dic(json_data, 'main', 'humidity')
current_temperature_min = commonFunctions.get_from_dic(json_data, 'main', 'temp_min')
current_temperature_max = commonFunctions.get_from_dic(json_data, 'main', 'temp_max')
current_rain = commonFunctions.get_from_dic(json_data, 'rain', '3h')
current_visibility = commonFunctions.get_from_dic(json_data, 'visibility')
current_wind_speed = commonFunctions.get_from_dic(json_data, 'wind', 'speed')
current_wind_direction = commonFunctions.get_from_dic(json_data, 'wind', 'deg')
current_clouds = commonFunctions.get_from_dic(json_data, 'clouds', 'all')
current_sunrise = commonFunctions.get_from_dic(json_data, 'sys', 'sunrise')
current_sunset = commonFunctions.get_from_dic(json_data, 'sys', 'sunset')
commonFunctions.save_temperature_data(place, current_temperature, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_pressure_data(place, current_pressure, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_humidity_data(place, current_humidity, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_temperature_range_min_data(place, current_temperature_min, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_temperature_range_max_data(place, current_temperature_max, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_rain_data(place, current_rain, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_visibility_data(place, current_visibility, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_wind_data(place, current_wind_speed, current_wind_direction, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_clouds_data(place, current_clouds, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_sunrise_data(place, current_sunrise, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
commonFunctions.save_sunset_data(place, current_sunset, "FROM_UNIXTIME(%s)" % (measurement_date), creation_time)
def main():
logging.info("Saving to file: %s" % (config.file['save_to_file']))
logging.info("Saving to DB: %s" % (config.mysql['save_to_DB']))
logging.info("Starting loop")
try:
while True:
now = datetime.datetime.utcnow()
openweathermap_jsondata = get_current_city_data()
save_openweather_map_info_to_DB(openweathermap_jsondata, now.isoformat())
sleep(config.open_map['sleep_time_in_seconds_between_reads'])
except KeyboardInterrupt:
logging.error("\nbye!")
sys.exit(1)
except Exception as e:
logging.error("\nOther error occurred")
logging.error (e)
logging.error(traceback.format_exc())
sys.exit(1)
finally:
logging.info("\nbye2!")
#print("\nCleaning GPIO port\n")
#GPIO.cleanup()
# call main
if __name__ == '__main__':
main()
| 2.59375
| 3
|
perfplot/__about__.py
|
pardha-bandaru/perfplot
| 1
|
12780619
|
# -*- coding: utf-8 -*-
#
__author__ = u"<NAME>"
__author_email__ = "<EMAIL>"
__copyright__ = u"Copyright (c) 2017-2018, {} <{}>".format(__author__, __author_email__)
__license__ = "License :: OSI Approved :: MIT License"
__version__ = "0.5.0"
__status__ = "Development Status :: 5 - Production/Stable"
| 1.25
| 1
|
training_model.py
|
puneesh00/cs-mri-gan
| 21
|
12780620
|
from keras.utils import multi_gpu_model
import numpy as np
import tensorflow as tf
import pickle
from keras.models import Model, Input
from keras.optimizers import Adam, RMSprop
from keras.layers import Dense
from keras.layers import Conv2D, Conv2DTranspose
from keras.layers import Flatten, Add
from keras.layers import Concatenate, Activation
from keras.layers import LeakyReLU, BatchNormalization, Lambda
from keras import backend as K
import os
def accw(y_true, y_pred):
y_pred=K.clip(y_pred, -1, 1)
return K.mean(K.equal(y_true, K.round(y_pred)))
def mssim(y_true, y_pred):
costs = 1.0 - tf.reduce_mean(tf.image.ssim(y_true, y_pred, 2.0))
return costs
def wloss(y_true,y_predict):
return -K.mean(y_true*y_predict)
def discriminator(inp_shape = (256,256,1), trainable = True):
gamma_init = tf.random_normal_initializer(1., 0.02)
inp = Input(shape = (256,256,1))
l0 = Conv2D(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp) #b_init is set to none, maybe they are not using bias here, but I am.
l0 = LeakyReLU(alpha=0.2)(l0)
l1 = Conv2D(64*2, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l0)
l1 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l1)
l1 = LeakyReLU(alpha=0.2)(l1)
l2 = Conv2D(64*4, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l1)
l2 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l2)
l2 = LeakyReLU(alpha=0.2)(l2)
l3 = Conv2D(64*8, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l2)
l3 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l3)
l3 = LeakyReLU(alpha=0.2)(l3)
l4 = Conv2D(64*16, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l3)
l4 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l4)
l4 = LeakyReLU(alpha=0.2)(l4)
l5 = Conv2D(64*32, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l4)
l5 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l5)
l5 = LeakyReLU(alpha=0.2)(l5)
l6 = Conv2D(64*16, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l5)
l6 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l6)
l6 = LeakyReLU(alpha=0.2)(l6)
l7 = Conv2D(64*8, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l6)
l7 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l7)
l7 = LeakyReLU(alpha=0.2)(l7)
#x
l8 = Conv2D(64*2, (1,1), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l7)
l8 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l8)
l8 = LeakyReLU(alpha=0.2)(l8)
l9 = Conv2D(64*2, (3,3), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l8)
l9 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l9)
l9 = LeakyReLU(alpha=0.2)(l9)
l10 = Conv2D(64*8, (3,3), strides = (1,1), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l9)
l10 = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(l10)
l10 = LeakyReLU(alpha=0.2)(l10)
#y
l11 = Add()([l7,l10])
l11 = LeakyReLU(alpha = 0.2)(l11)
out=Conv2D(filters=1,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(l11)
model = Model(inputs = inp, outputs = out)
return model
def resden(x,fil,gr,beta,gamma_init,trainable):
x1=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x)
x1=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x1)
x1=LeakyReLU(alpha=0.2)(x1)
x1=Concatenate(axis=-1)([x,x1])
x2=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x1)
x2=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x2)
x2=LeakyReLU(alpha=0.2)(x2)
x2=Concatenate(axis=-1)([x1,x2])
x3=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x2)
x3=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x3)
x3=LeakyReLU(alpha=0.2)(x3)
x3=Concatenate(axis=-1)([x2,x3])
x4=Conv2D(filters=gr,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x3)
x4=BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(x4)
x4=LeakyReLU(alpha=0.2)(x4)
x4=Concatenate(axis=-1)([x3,x4])
x5=Conv2D(filters=fil,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(x4)
x5=Lambda(lambda x:x*beta)(x5)
xout=Add()([x5,x])
return xout
def resresden(x,fil,gr,betad,betar,gamma_init,trainable):
x1=resden(x,fil,gr,betad,gamma_init,trainable)
x2=resden(x1,fil,gr,betad,gamma_init,trainable)
x3=resden(x2,fil,gr,betad,gamma_init,trainable)
x3=Lambda(lambda x:x*betar)(x3)
xout=Add()([x3,x])
return xout
def generator(inp_shape, trainable = True):
gamma_init = tf.random_normal_initializer(1., 0.02)
fd=512
gr=32
nb=12
betad=0.2
betar=0.2
inp_real_imag = Input(inp_shape)
lay_128dn = Conv2D(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(inp_real_imag)
lay_128dn = LeakyReLU(alpha = 0.2)(lay_128dn)
lay_64dn = Conv2D(128, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_128dn)
lay_64dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_64dn)
lay_64dn = LeakyReLU(alpha = 0.2)(lay_64dn)
lay_32dn = Conv2D(256, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_64dn)
lay_32dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_32dn)
lay_32dn = LeakyReLU(alpha=0.2)(lay_32dn)
lay_16dn = Conv2D(512, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_32dn)
lay_16dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_16dn)
lay_16dn = LeakyReLU(alpha=0.2)(lay_16dn) #16x16
lay_8dn = Conv2D(512, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_16dn)
lay_8dn = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_8dn)
lay_8dn = LeakyReLU(alpha=0.2)(lay_8dn) #8x8
xc1=Conv2D(filters=fd,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_8dn) #8x8
xrrd=xc1
for m in range(nb):
xrrd=resresden(xrrd,fd,gr,betad,betar,gamma_init,trainable)
xc2=Conv2D(filters=fd,kernel_size=3,strides=1,padding='same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(xrrd)
lay_8upc=Add()([xc1,xc2])
lay_16up = Conv2DTranspose(1024, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_8upc)
lay_16up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_16up)
lay_16up = Activation('relu')(lay_16up) #16x16
lay_16upc = Concatenate(axis = -1)([lay_16up,lay_16dn])
lay_32up = Conv2DTranspose(256, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_16upc)
lay_32up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_32up)
lay_32up = Activation('relu')(lay_32up) #32x32
lay_32upc = Concatenate(axis = -1)([lay_32up,lay_32dn])
lay_64up = Conv2DTranspose(128, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_32upc)
lay_64up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_64up)
lay_64up = Activation('relu')(lay_64up) #64x64
lay_64upc = Concatenate(axis = -1)([lay_64up,lay_64dn])
lay_128up = Conv2DTranspose(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_64upc)
lay_128up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_128up)
lay_128up = Activation('relu')(lay_128up) #128x128
lay_128upc = Concatenate(axis = -1)([lay_128up,lay_128dn])
lay_256up = Conv2DTranspose(64, (4,4), strides = (2,2), padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_128upc)
lay_256up = BatchNormalization(gamma_initializer = gamma_init, trainable = trainable)(lay_256up)
lay_256up = Activation('relu')(lay_256up) #256x256
out = Conv2D(1, (1,1), strides = (1,1), activation = 'tanh', padding = 'same', use_bias = True, kernel_initializer = 'he_normal', bias_initializer = 'zeros')(lay_256up)
model = Model(inputs = inp_real_imag, outputs = out)
return model
def define_gan_model(gen_model, dis_model, inp_shape):
dis_model.trainable = False
inp = Input(shape = inp_shape)
out_g = gen_model(inp)
out_dis = dis_model(out_g)
out_g1 = out_g
model = Model(inputs = inp, outputs = [out_dis, out_g, out_g1])
model.summary()
return model
def train(g_par, d_par, gan_model, dataset_real, u_sampled_data, n_epochs, n_batch, n_critic, clip_val, n_patch, f):
bat_per_epo = int(dataset_real.shape[0]/n_batch)
half_batch = int(n_batch/2)
for i in range(n_epochs):
for j in range(bat_per_epo):
# training the discriminator
for k in range(n_critic):
ix = np.random.randint(0, dataset_real.shape[0], half_batch)
X_real = dataset_real[ix]
y_real = np.ones((half_batch,n_patch,n_patch,1))
ix_1 = np.random.randint(0, u_sampled_data.shape[0], half_batch)
X_fake = g_par.predict(u_sampled_data[ix_1])
y_fake = -np.ones((half_batch,n_patch,n_patch,1))
X, y = np.vstack((X_real, X_fake)), np.vstack((y_real,y_fake))
d_loss, accuracy = d_par.train_on_batch(X,y)
for l in d_par.layers:
weights=l.get_weights()
weights=[np.clip(w, -clip_val,clip_val) for w in weights]
l.set_weights(weights)
# training the generator
ix = np.random.randint(0, dataset_real.shape[0], n_batch)
X_r = dataset_real[ix]
X_gen_inp = u_sampled_data[ix]
y_gan = np.ones((n_batch,n_patch,n_patch,1))
g_loss = gan_model.train_on_batch ([X_gen_inp], [y_gan, X_r, X_r])
f.write('>%d, %d/%d, d=%.3f, acc = %.3f, w=%.3f, mae=%.3f, mssim=%.3f, g=%.3f' %(i+1, j+1, bat_per_epo, d_loss, accuracy, g_loss[1], g_loss[2], g_loss[3], g_loss[0]))
f.write('\n')
print ('>%d, %d/%d, d=%.3f, acc = %.3f, g=%.3f' %(i+1, j+1, bat_per_epo, d_loss, accuracy, g_loss[0]))
filename = '/home/cs-mri-gan/gen_weights_a5_%04d.h5' % (i+1)
g_save = g_par.get_layer('model_3')
g_save.save_weights(filename)
f.close()
#hyperparameters
n_epochs = 300
n_batch = 32
n_critic = 3
clip_val = 0.05
in_shape_gen = (256,256,2)
in_shape_dis = (256,256,1)
accel = 3
d_model = discriminator (inp_shape = in_shape_dis, trainable = True)
d_model.summary()
d_par = multi_gpu_model(d_model, gpus=4, cpu_relocation = True) #for multi-gpu training
opt = Adam(lr = 0.0002, beta_1 = 0.5)
d_par.compile(loss = wloss, optimizer = opt, metrics = [accw])
g_model = generator(inp_shape = in_shape_gen , trainable = True)
g_par = multi_gpu_model(g_model, gpus=4, cpu_relocation = True) #for multi-gpu training
g_par.summary()
gan_model = define_gan_model(g_par, d_par, in_shape_gen)
opt1 = Adam(lr = 0.0001, beta_1 = 0.5)
gan_model.compile(loss = [wloss, 'mae', mssim], optimizer = opt1, loss_weights = [0.01, 20.0, 1.0]) #loss weights for generator training
n_patch=d_model.output_shape[1]
data_path='/home/cs-mri-gan/training_gt_aug.pickle' #Ground truth
usam_path='/home/cs-mri-gan/training_usamp_1dg_a5_aug.pickle' #Zero-filled reconstructions
df = open(data_path,'rb')
uf = open(usam_path,'rb')
dataset_real = pickle.load(df)
u_sampled_data = pickle.load(uf)
dataset_real = np.expand_dims(dataset_real, axis = -1)
u_sampled_data = np.expand_dims(u_sampled_data, axis = -1)
u_sampled_data_real = u_sampled_data.real
u_sampled_data_imag = u_sampled_data.imag
u_sampled_data_2c = np.concatenate((u_sampled_data_real, u_sampled_data_imag), axis = -1)
f = open('/home/cs-mri-gan/log_a5.txt', 'x')
f = open('/home/cs-mri-gan/log_a5.txt', 'a')
train(g_par, d_par, gan_model, dataset_real, u_sampled_data_2c, n_epochs, n_batch, n_critic, clip_val, n_patch, f)
| 2.09375
| 2
|
text_normalizer/library/strip.py
|
Yoctol/text-normalizer
| 16
|
12780621
|
from ..factory import Strip
pure_strip_text_normalizer = Strip()
| 1.195313
| 1
|
deprecated/water_rgb.py
|
alex-ip/agdc
| 34
|
12780622
|
<reponame>alex-ip/agdc
#!/usr/bin/env python
#===============================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
'''
Stacker subclass implementation to create PQ-masked RGB files showing water extents in blue.
NB: GDAL cannot temporally stack multi-band files, so separate files are generated.
Created on 28/06/2013
@author: u76345
'''
from osgeo import gdal
import numpy
import sys
import os
def process(input_vrt_path, output_dir):
water_rgb = (0, 169, 230)
assert os.path.exists(input_vrt_path), 'Input file %s does not exist' % input_vrt_path
assert os.path.isdir(output_dir), 'Input output directory %s' % output_dir
input_dataset = gdal.Open(input_vrt_path)
assert input_dataset, 'Unable to open dataset %s' % input_vrt_path
file_list = input_dataset.GetFileList()
for band_number in range(1,input_dataset.RasterCount + 1):
input_band = input_dataset.GetRasterBand(band_number)
water_mask = (input_band.ReadAsArray() == 128) # True==WET, False==DRY
water_file_path = os.path.join(output_dir,
os.path.basename(file_list[band_number])
)
if os.path.exists(water_file_path):
print('Skipping existing dataset %s' % water_file_path)
continue
gdal_driver = gdal.GetDriverByName('GTiff')
output_dataset = gdal_driver.Create(water_file_path,
input_dataset.RasterXSize, input_dataset.RasterYSize,
3, gdal.GDT_Byte,
['INTERLEAVE=PIXEL'])
assert output_dataset, 'Unable to open output dataset %s'% water_file_path
output_dataset.SetGeoTransform(input_dataset.GetGeoTransform())
output_dataset.SetProjection(input_dataset.GetProjection())
for output_band_index in range(3):
output_band = output_dataset.GetRasterBand(output_band_index + 1)
output_array = (water_mask * water_rgb[output_band_index]).astype(numpy.uint8)
print('output_array = %s' % output_array)
print('output_array[water_mask] = %s' % output_array[water_mask])
output_band.WriteArray(output_array)
output_band.SetNoDataValue(0)
output_band.FlushCache()
output_dataset.FlushCache()
print('Finished writing output dataset %s' % water_file_path)
if __name__ == '__main__':
process(sys.argv[1], sys.argv[2])
| 1.390625
| 1
|
print_result/print_result.py
|
Josepholaidepetro/maven_kubeflow_pipeline
| 0
|
12780623
|
import argparse
def print_result(args):
# Print results
with open(args.accuracy, 'r') as f:
score = f.read()
print(f"Random forest (accuracy): {score}")
if __name__ == '__main__':
# Defining and parsing the command-line arguments
parser = argparse.ArgumentParser(description='My program description')
parser.add_argument('--data', type=str)
parser.add_argument('--accuracy', type=str)
args = parser.parse_args()
print_result(args)
| 2.984375
| 3
|
Project_Files/source/simprocedure/wcslen.py
|
SoftwareSecurityLab/Heap-Overflow-Detection
| 0
|
12780624
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 13 22:07:00 2021
@authors: <NAME>
<NAME>
<NAME>
"""
import angr
class wcslen(angr.SimProcedure):
def run(self, s):
print('in wcslen')
f=angr.SIM_PROCEDURES['libc']['strlen']
self.state.globals['iswchar']=True
re = self.inline_call(f,s,wchar=True).ret_expr
return re
| 2.28125
| 2
|
backprop/models/st_model/model.py
|
lucky7323/backprop
| 200
|
12780625
|
from typing import Dict, List
import torch
from functools import partial
from backprop.models import PathModel
from torch.optim.adamw import AdamW
from sentence_transformers import SentenceTransformer
class STModel(PathModel):
"""
Class for models which are initialised from Sentence Transformers
Attributes:
model_path: path to ST model
name: string identifier for the model. Lowercase letters and numbers.
No spaces/special characters except dashes.
max_length: Max supported token length for vectorisation
description: String description of the model.
tasks: List of supported task strings
details: Dictionary of additional details about the model
init_model: Class used to initialise model
device: Device for model. Defaults to "cuda" if available.
"""
def __init__(self, model_path, init_model=SentenceTransformer, name: str = None,
description: str = None, tasks: List[str] = None, details: Dict = None,
max_length=512, device=None):
init_model = partial(init_model, device=device)
tasks = ["text-vectorisation"]
PathModel.__init__(self, model_path, name=name, description=description,
details=details, tasks=tasks,
init_model=init_model,
device=device)
self.max_length = max_length
@staticmethod
def list_models():
from .models_list import models
return models
@torch.no_grad()
def __call__(self, task_input, task="text-vectorisation", return_tensor=False):
"""
Uses the model for the text-vectorisation task
Args:
task_input: input dictionary according to the ``text-vectorisation`` task specification
task: text-vectorisation
"""
is_list = False
if task == "text-vectorisation":
input_ids = None
attention_mask = None
text = task_input.get("text")
if type(text) == list:
is_list = True
else:
text = [text]
features = self.model.tokenizer(text, truncation=True, padding=True, return_tensors="pt").to(self._model_device)
text_vecs = self.vectorise(features)
if not return_tensor:
text_vecs = text_vecs.tolist()
output = text_vecs
if not is_list:
output = output[0]
return output
else:
raise ValueError(f"Unsupported task '{task}'")
def training_step(self, params, task="text-vectorisation"):
text = params["text"]
return self.vectorise(text)
def process_batch(self, params, task="text-vectorisation"):
if task == "text-vectorisation":
max_length = params["max_length"] or self.max_length
if max_length > self.max_length:
raise ValueError(f"This model has a max_length limit of {self.max_length}")
text = params["text"]
return self.model.tokenizer(text, truncation=True, padding="max_length", return_tensors="pt")
def vectorise(self, features):
return self.model.forward(features)["sentence_embedding"]
def configure_optimizers(self):
return AdamW(params=self.model.parameters(), lr=2e-5, eps=1e-6, correct_bias=False)
| 2.671875
| 3
|
tokenizer/java/java_tokenizer.py
|
Guardian99/NeuralCodeSummarization
| 0
|
12780626
|
from c2nl.tokenizers.code_tokenizer import CodeTokenizer, Tokens, Tokenizer
import argparse
import re
from os import path
import javalang
from pathlib import Path
def get_project_root() -> Path:
"""Returns project root folder."""
return str(Path(__file__).parent.parent.parent)
def get_java_method_map(tree):
"""High level model that handles initializing the underlying network
architecture, saving, updating examples, and predicting examples.
"""
method_map = []
for method in tree.types[0].methods:
if len(method.annotations) > 0:
method_map.append([method.annotations[0].position.line, method.position.line, method.annotations[0].position.column])
else:
method_map.append([method.position.line, method.position.line, method.position.column])
return method_map
def get_method_location_map(java_file_path):
method_map = []
with open(java_file_path, 'r') as java_file:
java_file_text = java_file.read()
tree = javalang.parse.parse(java_file_text)
method_map = get_java_method_map(tree)
return method_map
def process_java_file(java_file_path):
method_map = get_method_location_map(java_file_path)
total_methods = len(method_map)
method_text = []
tokenizer = CodeTokenizer(True, True)
with open(java_file_path, 'r') as process_sample_file:
current_line_no = 1
method_no = 0
current_method = []
count_open_bracket = 0
count_close_bracket = 0
verify = False
for x in process_sample_file:
if current_line_no >= method_map[method_no][0]:
current_method.append(x)
if current_line_no >= method_map[method_no][1]:
count_open_bracket = count_open_bracket + x.count('{')
count_close_bracket = count_close_bracket + x.count('}')
if count_open_bracket > 0:
verify = True
if count_open_bracket == count_close_bracket and verify:
temp_method_text = ' '.join([line.strip() for line in current_method])
temp_method_text = tokenize_java_method(tokenizer, temp_method_text)
method_text.append([method_map[method_no], temp_method_text])
current_method = []
method_no = method_no + 1
count_open_bracket = 0
count_close_bracket = 0
verify = False
if method_no == total_methods:
break
current_line_no = current_line_no + 1
return method_text
def tokenize_java_method(tokenizer, inline_method_text):
if tokenizer is None:
tokenizer = CodeTokenizer(True, True)
text = ''
for i in tokenizer.tokenize(inline_method_text).data:
s = '(@|\+|\-|,|\]|\[|{|}|=|!|\(|\)|>|<|;|"|/|\.)'
res = list(filter(None, re.split(s, str(i[0]))))
res = ' '.join(res)
text = text + ' ' + res
return text[1:]
def tokenize_java(java_file_path, save_data):
# check if the file exist
if path.exists(java_file_path):
print("Processing the java file : % s" % java_file_path)
else:
raise Exception('No such java file at location: %s' % java_file_path)
method_text = process_java_file(java_file_path)
if save_data:
with open(str(get_project_root()) + '/output.code', 'w+') as output_sample_file:
for line, method in method_text:
output_sample_file.write(method + '\n')
print('Saving tokenize fine into : %s' % get_project_root() + '/output.code')
return method_text
if __name__ == '__main__':
# Parse cmdline args and setup environment
parser = argparse.ArgumentParser(
'Java Code Tokenizer Generator',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
# Adding Java file path argument
parser.add_argument("-p", "--file_path", help="Input file path", required=True)
# Adding Java file path argument
parser.add_argument("-f", "--file_type", help="File type", required=True,
choices=['java', 'method'], )
# Read arguments from command line
args = parser.parse_args()
if args.file_type == 'java':
print("Tokenized : % s" % tokenize_java(args.file_path, True))
if args.file_type == 'method':
if path.exists(args.file_path):
print("Processing the file : % s" % args.file_path)
with open(args.file_path, 'r') as sample_file:
java_file_content = sample_file.read()
tokenize_method_text = tokenize_java_method(None, java_file_content)
with open('../../output.code', 'w+') as output_file:
output_file.write(tokenize_method_text)
print("Tokenized : % s" % tokenize_method_text)
else:
raise Exception('No such file at location: %s' % args.file_path)
| 2.6875
| 3
|
table_border_syntax.py
|
akrabat/SublimeTableEditor
| 313
|
12780627
|
<gh_stars>100-1000
# table_border_syntax.py - Base classes for table with borders: Pandoc,
# Emacs Org mode, Simple, reStrucutredText
# Copyright (C) 2012 Free Software Foundation, Inc.
# Author: <NAME>
# Package: SublimeTableEditor
# Homepage: https://github.com/vkocubinsky/SublimeTableEditor
# This file is part of SublimeTableEditor.
# SublimeTableEditor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# SublimeTableEditor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SublimeTableEditor. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import division
import re
try:
from . import table_base as tbase
except ValueError:
import table_base as tbase
class SeparatorRow(tbase.Row):
def __init__(self, table, separator='-', size=0):
tbase.Row.__init__(self, table)
self.separator = separator
for i in range(size):
self.columns.append(SeparatorColumn(self, self.separator))
def new_empty_column(self):
return SeparatorColumn(self, self.separator)
def create_column(self, text):
return SeparatorColumn(self, self.separator)
def is_header_separator(self):
return True
def is_separator(self):
return True
def render(self):
r = self.syntax.hline_out_border
for ind, column in enumerate(self.columns):
if ind != 0:
r += self.syntax.hline_in_border
r += column.render()
r += self.syntax.hline_out_border
return r
class SeparatorColumn(tbase.Column):
def __init__(self, row, separator):
tbase.Column.__init__(self, row)
self.separator = separator
def min_len(self):
# '---' or '==='
return 3
def render(self):
return self.separator * self.col_len
class BorderTableDriver(tbase.TableDriver):
def editor_insert_single_hline(self, table, table_pos):
table.rows.insert(table_pos.row_num + 1, SeparatorRow(table, '-'))
table.pack()
return ("Single separator row inserted",
tbase.TablePos(table_pos.row_num, table_pos.field_num))
def editor_insert_double_hline(self, table, table_pos):
table.rows.insert(table_pos.row_num + 1, SeparatorRow(table, '='))
table.pack()
return ("Double separator row inserted",
tbase.TablePos(table_pos.row_num, table_pos.field_num))
def editor_insert_hline_and_move(self, table, table_pos):
table.rows.insert(table_pos.row_num + 1, SeparatorRow(table, '-'))
table.pack()
if table_pos.row_num + 2 < len(table):
if table[table_pos.row_num + 2].is_separator():
table.insert_empty_row(table_pos.row_num + 2)
else:
table.insert_empty_row(table_pos.row_num + 2)
return("Single separator row inserted",
tbase.TablePos(table_pos.row_num + 2, 0))
class BorderTableParser(tbase.BaseTableParser):
def _is_single_row_separator(self, str_cols):
if len(str_cols) == 0:
return False
for col in str_cols:
if not re.match(r"^\s*[\-]+\s*$", col):
return False
return True
def _is_double_row_separator(self, str_cols):
if len(str_cols) == 0:
return False
for col in str_cols:
if not re.match(r"^\s*[\=]+\s*$", col):
return False
return True
def create_row(self, table, line):
if self._is_single_row_separator(line.str_cols()):
row = SeparatorRow(table, '-')
elif self._is_double_row_separator(line.str_cols()):
row = SeparatorRow(table, '=')
else:
row = self.create_data_row(table, line)
return row
def create_data_row(self, table, line):
return tbase.DataRow(table)
| 2.65625
| 3
|
apps/places/urls.py
|
bergran/places
| 0
|
12780628
|
# -*- coding: utf-8 -*-
from fastapi.routing import APIRouter
from apps.places.views import places
router = APIRouter()
router.include_router(places.router, prefix='/places')
| 1.84375
| 2
|
ansys/mapdl/core/_commands/misc/__init__.py
|
da1910/pymapdl
| 0
|
12780629
|
from .misc import verify
| 1.054688
| 1
|
Day5/python problems/reverse_string.py
|
abbeyperini/DC_HTML_CSS
| 0
|
12780630
|
'''
Reverse string
Reverse string, in linear time complexity.
Input: 'i like this program very much'
Output: 'hcum yrev margorp siht ekil i'
Input: 'how are you'
Output: 'uoy era woh'
'''
'''
Finish the function
'''
def reverse_sentence(sentence):
reverse = []
for i in range((len(sentence) - 1), -1, -1):
reverse.append(sentence[i])
reverse_str = ''.join(reverse)
return(reverse_str)
###########
# Testing #
###########
# Test 1
# Correct result => 'hcum yrev margorp siht ekil i'
print(reverse_sentence('i like this program very much'))
# Test 2
# Correct result => 'uoy era woh'
print(reverse_sentence('how are you'))
| 4
| 4
|
tests/test_date_serialisation.py
|
thehyve/transmart_loader
| 3
|
12780631
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the date formatter of the TransmartCopyWriter.
"""
from datetime import datetime, timezone, date
from dateutil.tz import gettz
from transmart_loader.copy_writer import format_date, microseconds
def test_date_serialization():
assert format_date(
date(2019, 6, 28)) == '2019-06-28'
assert format_date(
datetime(2019, 6, 28, 13, 2, 58,
tzinfo=timezone.utc)) == '2019-06-28 13:02:58'
assert format_date(
datetime(2019, 6, 28, 13, 2, 58, 12345,
tzinfo=timezone.utc)) == '2019-06-28 13:02:58.012345'
assert format_date(
datetime(2019, 6, 28, 13, 2, 58,
tzinfo=gettz('Europe/Amsterdam')
)) == '2019-06-28 11:02:58'
assert format_date(datetime.fromtimestamp(
microseconds(date(2019, 6, 28))/1000,
timezone.utc)) == '2019-06-28 00:00:00'
assert format_date(datetime.fromtimestamp(
microseconds(datetime(2019, 6, 28, 13, 2, 58))/1000,
timezone.utc)) == '2019-06-28 13:02:58'
| 2.609375
| 3
|
code/result_reranker.py
|
leifos/retrievability
| 0
|
12780632
|
<reponame>leifos/retrievability
# AUTHOR: <NAME>
# 12-02-2022
# Reranking result runs using retrievability scores
import os
import argparse
def check_file_exists(filename):
if filename and not os.path.exists(filename):
print("{0} Not Found".format(filename))
quit(1)
def read_ret_file(ret_file):
rets = dict()
with open(ret_file, "r") as cf:
while cf:
line = cf.readline().strip()
if not line:
break
(doc_id, ret_score) = line.split()
doc_id = doc_id.strip()
rets[doc_id] = float(ret_score)
return rets
def get_max(rets):
max_ret = 0
for key,val in rets.items():
if val > max_ret:
max_ret = val
return max_ret
def rerank(doc_list, rets, k, lam):
max_ret = get_max(rets)
max_ret_lam = max_ret * lam
reranking = []
for i,doc in enumerate(doc_list):
if i <= k:
doc_ret_score = rets.get(doc[0], 0.0)
# to make sure that the updated scores are not lower than the kth + scores,
# we add the maximum adjustment possible, and then decrease the score for the doc.
new_doc_score = float(doc[1]) + max_ret_lam - (doc_ret_score*lam)
reranking.append( (doc[0], new_doc_score) )
else:
reranking.append(doc)
# resort the list in descending order --- bigggest score to the smallest.
reranking.sort(key=lambda x:x[1], reverse=True )
return reranking
def do_reranking(out_file_handler, topic_id, doclist, rets, run_id, k, lam):
print(f'Reranking Topic: {topic_id}')
# Perform the re-ranking for the curr_topic_id, current_topic_doclist
reranked_list = rerank(doclist, rets, k=k, lam=lam)
# output the re-ranking to the outfile
for i, doc in enumerate(reranked_list):
rank = i+1
out_file_handler.write(f'{topic_id} Q1 {doc[0]} {rank} {doc[1]} {run_id}-rr-k{k}-lam{lam}\n')
def process_results(result_file, rets, out_file, k, lam):
curr_topic_id = None
curr_topic_doclist = []
of = open(out_file, "w")
with open(result_file, "r") as rf:
while rf:
line = rf.readline().strip()
if not line:
# before we stop, peform the re-ranking for the final topic
do_reranking(of, curr_topic_id, curr_topic_doclist, rets, run_id, k, lam)
break
(topic_id, element_type, doc_id, rank, score, run_id) = line.split()
doc_id = doc_id.strip()
score = float(score.strip())
if topic_id == curr_topic_id:
# add doc and score to list
curr_topic_doclist.append((doc_id, score))
else:
if curr_topic_id is not None:
# do the re ranking for the current topic, before moving to the next one
do_reranking(of, curr_topic_id, curr_topic_doclist, rets, run_id, k, lam)
# reset for the new topic
curr_topic_id = topic_id
curr_topic_doclist = [(doc_id, score)]
rf.close()
of.close()
def parse_args():
arg_parser = argparse.ArgumentParser(description="Reranker")
arg_parser.add_argument("result_file",
help="TREC formatted results file. Six column tab/space sep file with fields:"
" topic_id element_type doc_id rank score run_id.")
arg_parser.add_argument("ret_file", help="A retrievability file. Two colum tab/space sep file with fields:"
"doc_id retrievability_score")
arg_parser.add_argument("out_file", help="Outputs a TREC formmatted results file.")
arg_parser.add_argument("k", help="Number of results to re-rank", type=int)
arg_parser.add_argument("lam", help="Lambda", type=float)
args = arg_parser.parse_args()
return args
def main(result_file, ret_file, out_file='out.res', k=50, lam=0.5):
print(f'About to re-rank: {result_file} using the retrievability scores from: {ret_file}')
rets = read_ret_file(ret_file)
print(f'Read in {len(rets)} retrievability scores.')
print(f'About to process the files and re-rank with k={k} and lambda={lam}')
process_results(result_file, rets, out_file, k, lam)
print(f'Results written to: {out_file}')
print(f'Done!')
if __name__ == '__main__':
# performance_main()
args = parse_args()
check_file_exists(args.result_file)
check_file_exists(args.ret_file)
main(args.result_file, args.ret_file, args.out_file, args.k, args.lam)
| 3.140625
| 3
|
aiogram_dialog_extras/models/__init__.py
|
SamWarden/aiogram_dialog_extras
| 1
|
12780633
|
<gh_stars>1-10
from .text import PositionalVM
| 1.007813
| 1
|
toqito/matrices/iden.py
|
paniash/toqito
| 76
|
12780634
|
"""Identity matrix."""
from scipy import sparse
import numpy as np
def iden(dim: int, is_sparse: bool = False) -> np.ndarray:
r"""
Calculate the :code:`dim`-by-:code:`dim` identity matrix [WIKID]_.
Returns the :code:`dim`-by-:code:`dim` identity matrix. If :code:`is_sparse
= False` then the matrix will be full. If :code:`is_sparse = True` then the
matrix will be sparse.
.. math::
\mathbb{I} = \begin{pmatrix}
1 & 0 & 0 & \ldots & 0 \\
0 & 1 & 0 & \ldots & 0 \\
0 & 0 & 1 & \ldots & 0 \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
0 & 0 & 0 & \ldots & 1
\end{pmatrix}
Only use this function within other functions to easily get the correct
identity matrix. If you always want either the full or the sparse
identity matrix, just use numpy's built-in np.identity function.
Examples
==========
The identity matrix generated from :math:`d = 3` yields the following
matrix:
.. math::
\mathbb{I}_3 = \begin{pmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{pmatrix}
>>> from toqito.matrices import iden
>>> iden(3)
[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
It is also possible to create sparse identity matrices. The sparse identity
matrix generated from :math:`d = 10` yields the following matrix:
>>> from toqito.matrices import iden
>>> iden(10, True)
<10x10 sparse matrix of type '<class 'numpy.float64'>' with 10 stored
elements (1 diagonals) in DIAgonal format>
References
==========
.. [WIKID] Wikipedia: Identity matrix
https://en.wikipedia.org/wiki/Identity_matrix
:param dim: Integer representing dimension of identity matrix.
:param is_sparse: Whether or not the matrix is sparse.
:return: Sparse identity matrix of dimension :code:`dim`.
"""
if is_sparse:
id_mat = sparse.eye(dim)
else:
id_mat = np.identity(dim)
return id_mat
| 4.09375
| 4
|
MayaPythonPlugin/pyHelloMaya.py
|
WendyAndAndy/MayaDev
| 19
|
12780635
|
# coding:utf-8
# 一个简单的Maya Python插件 By Jason (<EMAIL>), 公众号: WendyAndAndy
import sys
from maya.api import OpenMaya as om
def maya_useNewAPI():
pass
__VENDOR = '<EMAIL> | <EMAIL> | iJasonLee@WeChat'
__VERSION= '2018.08.08.01'
class HelloMaya(om.MPxCommand):
command = 'pyHello'
def __init__(self):
super(HelloMaya, self).__init__()
def doIt(self, args):
print(u'Hello, Maya. 中文测试:你好,妈呀!')
@staticmethod
def creator():
return HelloMaya()
def initializePlugin(obj):
plugin = om.MFnPlugin(obj, __VENDOR, __VERSION)
try:
plugin.registerCommand(HelloMaya.command, HelloMaya.creator)
except:
sys.stderr.write('register pyHello command failed')
raise
def uninitializePlugin(obj):
plugin = om.MFnPlugin(obj, __VENDOR, __VERSION)
try:
plugin.deregisterCommand(HelloMaya.command)
except:
sys.stderr.write('deregister pyHello command failed')
raise
| 2.53125
| 3
|
zwog/utils.py
|
tare/zwog
| 2
|
12780636
|
"""Routines for processing workouts."""
from typing import Union, Tuple, Optional
from xml.etree.ElementTree import (ElementTree, Element,
SubElement, tostring)
from lark import Lark
from lark import Transformer
class WorkoutTransformer(Transformer):
"""Class to process workout parse-trees."""
def duration(self, d: list) -> int:
"""Return duration in seconds."""
if d[1] == 'hrs' or d[1] == 'h':
# return duration in seconds
return int(d[0]*3600)
elif d[1] == 'min' or d[1] == 'm':
# return duration in seconds
return int(d[0]*60)
elif d[1] == 'sec' or d[1] == 's':
return int(d[0])
else:
# this should not happen
raise ValueError(f'Unexpected unit of time: {d[1]}')
def durations(self, d: list) -> int:
"""Return total duration."""
return sum(d)
def steady_state(self, s: list) -> dict:
"""Return steady-state."""
return dict(duration=s[0], power=s[1])
def ramp(self, s: list) -> dict:
"""Return ramp."""
return dict(duration=s[0], power=s[1])
def power(self, p: list[float]) -> Union[float, list]:
"""Return power."""
if len(p) == 1:
return p[0]
else:
return p
def repeats(self, r: list) -> Tuple[str, int]:
"""Return repeats."""
return 'repeats', r[0]
def intervals(self, i: list) -> Tuple[str, list]:
"""Return intervals."""
return 'intervals', i
def block(self, b: list) -> dict:
"""Return block."""
return dict(b)
INT = int
NUMBER = float
TIME_UNIT = str
workout = list
class ZWOG():
"""Zwift workout generator (ZWOG)."""
def __init__(self, workout: str,
author: str = ('Zwift workout generator '
'(https://github.com/tare/zwog)'),
name: str = 'Structured workout',
category: Optional[str] = None,
subcategory: Optional[str] = None):
"""Initialize ZWOG.
Args:
workout: Workout as a string.
author: Author.
name: Workout name.
category: Workout category.
subcategory: Workout subcategory.
"""
parser = Lark(r"""
workout: block*
block: [repeats "x"] intervals
intervals: (ramp|steady_state)~1 ("," (steady_state|ramp))*
steady_state: durations "@" steady_state_power "%" "FTP"
ramp: durations "from" ramp_power "%" "FTP"
durations: duration+
duration: NUMBER TIME_UNIT
time_unit: TIME_UNIT
TIME_UNIT: ("sec"|"s"|"min"|"m"|"hrs"|"h")
repeats: INT
steady_state_power: NUMBER -> power
ramp_power: NUMBER "to" NUMBER -> power
%ignore WS
%import common.WS
%import common.INT
%import common.NUMBER
""", start='workout')
self.__name = name
self.__author = author
self.__category = category
self.__subcategory = subcategory
# self.__tree_workout = parser.parse(workout)
self.__json_workout = (WorkoutTransformer()
.transform(parser.parse(workout)))
self.__pretty_workout = self._json_to_pretty(self.__json_workout)
self.__zwo_workout = self._json_to_zwo(self.__json_workout)
self.__tss = self._json_to_tss(self.__json_workout)
def save_zwo(self, filename) -> None:
"""Save the workout in the ZWO format.
Args:
filename: Filename.
"""
self.__zwo_workout.write(filename)
def __str__(self):
"""Return str."""
return self.__pretty_workout
@property
def tss(self) -> float:
"""Get TSS."""
return self.__tss
@property
def json_workout(self) -> list[dict]:
"""Return workout as JSON."""
return self.__json_workout
# @property
# def _tree_workout(self) -> str:
# """"""
# return self.__tree_workout.pretty()
@property
def zwo_workout(self) -> str:
"""Get the workout as ZWO."""
return tostring(self.__zwo_workout.getroot(),
encoding='unicode')+'\n'
def _is_ramp(self, block: dict) -> bool:
"""Tell whether the block is a ramp block.
Args:
block: Block.
Returns:
True if a ramp, False otherwise.
"""
return bool(len(block['intervals']) == 1 and
isinstance(block['intervals'][0]['power'],
list))
def _is_steady_state(self, block: dict) -> bool:
"""Tell whether the block is a steady-state block.
Args:
block: Block.
Returns:
True if a steady-state, False otherwise.
"""
return bool(len(block['intervals']) == 1 and not
isinstance(block['intervals'][0]['power'],
list))
def _is_intervalst(self, block: dict) -> bool:
"""Tell whether the block is an intervalst.
Args:
block: Block.
Returns:
True if an intervalst , False otherwise.
"""
return bool(len(block['intervals']) == 2 and not
isinstance(block['intervals'][0]['power'], list) and not
isinstance(block['intervals'][1]['power'], list))
def _interval_to_xml(self, interval: dict,
repeats: int = 1) -> Element:
"""Return the interval as a XML node.
Args:
interval: The interval.
repeats: Number of repeats.
Returns:
XML node representing the interval.
"""
if not isinstance(interval, list):
if not isinstance(interval['power'], list): # steady-state
element = Element('SteadyState')
element.set('Duration', str(interval['duration']))
element.set('Power', str(interval['power']/100))
else: # ramp
element = Element('Ramp')
element.set('Duration', str(interval['duration']))
element.set('PowerLow', str(interval['power'][0]/100))
element.set('PowerHigh', str(interval['power'][1]/100))
else: # intervalst
element = Element('IntervalsT')
element.set('Repeat', str(repeats))
element.set('OnDuration', str(interval[0]['duration']))
element.set('OnPower', str(interval[0]['power']/100))
element.set('OffDuration', str(interval[1]['duration']))
element.set('OffPower', str(interval[1]['power']/100))
return element
def _json_to_zwo(self, blocks: list[dict]) -> ElementTree:
"""Convert JSON to ZWO.
See: https://github.com/h4l/zwift-workout-file-reference/blob/master/zwift_workout_file_tag_reference.md
Args:
blocks: Blocks.
Returns:
XML tree representing the workout.
""" # pylint: disable=line-too-long # noqa
root = Element('workout_file')
# fill metadata
for child, value in [('author', self.__author),
('name', self.__name),
('description',
('This workout was generated using ZWOG.\n\n'
f'{self._json_to_pretty(blocks)}')),
('sportType', 'bike'),
('category', self.__category),
('subcategory', self.__subcategory)]:
if value is not None:
tmp = SubElement(root, child)
tmp.text = value
tmp = SubElement(root, 'workout')
for block_idx, block in enumerate(blocks):
# warmup and ramp
if block_idx in [0, (len(blocks)-1)] and self._is_ramp(block):
element = self._interval_to_xml(block['intervals'][0])
if block_idx == 0:
element.tag = 'Warmup'
else:
element.tag = 'Cooldown'
tmp.append(element)
else:
# ramp or steady state
if self._is_ramp(block) or self._is_steady_state(block):
tmp.append(self._interval_to_xml(block['intervals'][0]))
else:
if 'repeats' in block:
repeats = block['repeats']
else:
repeats = 1
if self._is_intervalst(block): # intervalst
tmp.append(self._interval_to_xml(block['intervals'],
repeats=repeats))
else: # non intervalst
for _ in range(repeats):
for interval in block['intervals']:
tmp.append(self._interval_to_xml(interval))
tree = ElementTree(root)
return tree
def _duration_to_pretty_str(self, duration: int) -> str:
"""Prettify and stringify duration given in seconds.
Args:
duration: Duration in seconds.
Returns:
Prettified and stringified duration.
"""
pretty_str = ''
if int(duration/3600) > 0:
pretty_str += f'{int(duration/3600)}h'
if int((duration % 3600)/60) > 0:
pretty_str += f'{int((duration % 3600)/60)}m'
if duration % 60 > 0:
pretty_str += f'{int((duration % 60))}s'
return pretty_str
def _interval_to_str(self, interval: dict) -> str:
"""Return the interval as a string.
Args:
interval: Interval.
Returns:
String representation of the interval.
"""
if isinstance(interval['power'], list):
return (f'{self._duration_to_pretty_str(interval["duration"])}'
f' from {interval["power"][0]:.0f} to '
f'{interval["power"][1]:.0f}% FTP')
else:
return (f'{self._duration_to_pretty_str(interval["duration"])} '
f'@ {interval["power"]:.0f}% FTP')
def _interval_to_tss(self, interval: dict) -> float:
"""Calculate TSS for an interval.
Args:
interval: Interval.
Returns:
Calculated TSS.
"""
if isinstance(interval['power'], list):
min_power = min([interval['power'][0], interval['power'][1]])
max_power = max([interval['power'][0], interval['power'][1]])
tss = interval['duration']/3600*min_power
tss += interval['duration']/3600*(max_power-min_power)/2
else:
tss = interval['duration']/3600*interval['power']
return tss
def _json_to_pretty(self, blocks: list[dict]) -> str:
"""Return the workout as a string.
Args:
blocks (list[dict]): Workout.
Returns:
str: String representation of the workout.
"""
output = []
for block in blocks:
tmp = ''
if 'repeats' in block:
tmp = f'{block["repeats"]}x '
output.append(tmp + ', '.join([
self._interval_to_str(interval)
for interval in block['intervals']]))
return '\n'.join(output)
def _json_to_tss(self, blocks: list[dict]) -> float:
"""Calculate TSS for a workout.
Args:
blocks: Workout.
Returns:
float: Calculated TSS.
"""
tss = 0
for block in blocks:
# ramp or steady state
if self._is_ramp(block) or self._is_steady_state(block):
tss += self._interval_to_tss(block['intervals'][0])
else:
if 'repeats' in block:
repeats = block['repeats']
else:
repeats = 1
tss += sum([repeats*self._interval_to_tss(interval)
for interval in block['intervals']])
return tss
| 3.078125
| 3
|
packages/syft/src/syft/core/tensor/autodp/row_entity_phi.py
|
pculliton/PySyft
| 2
|
12780637
|
<reponame>pculliton/PySyft
# future
from __future__ import annotations
# stdlib
from collections.abc import Sequence
from typing import Any
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
# third party
import numpy as np
import numpy.typing as npt
# relative
from ...adp.vm_private_scalar_manager import (
VirtualMachinePrivateScalarManager as TypeScalarManager,
)
from ...common.serde.serializable import serializable
from ..broadcastable import is_broadcastable
from ..passthrough import PassthroughTensor # type: ignore
from ..passthrough import implements # type: ignore
from ..passthrough import is_acceptable_simple_type # type: ignore
from ..types import AcceptableSimpleType
from .adp_tensor import ADPTensor
from .initial_gamma import InitialGammaTensor
from .single_entity_phi import SingleEntityPhiTensor
@serializable(recursive_serde=True)
class RowEntityPhiTensor(PassthroughTensor, ADPTensor):
"""This tensor is one of several tensors whose purpose is to carry metadata
relevant to automatically tracking the privacy budgets of tensor operations. This
tensor is called 'Phi' tensor because it assumes that each number in the tensor
originates from a single entity (no numbers originate from multiple entities). This
tensor is called 'RowEntity' because it additionally assumes that all entries in a row
come from the same entity (note: multiple rows may also be from the same or different
entities). The reason we have a tensor specifically for tracking row-organized entities
is that data entity-linked by row is very common and specifically accommodating it offers
significant performance benefits over other DP tracking tensors. Note that when
we refer to the number of 'rows' we simply refer to the length of the first dimension. This
tensor can have an arbitrary number of dimensions."""
# a list of attributes needed for serialization using RecursiveSerde
__attr_allowlist__ = ["child"]
def __init__(self, rows: Sequence, check_shape: bool = True):
"""Initialize a RowEntityPhiTensor
rows: the actual data organized as an iterable (can be any type of iterable)
check_shape: whether or not we are already confident that the objects in iterable
'rows' all have the same dimension (check if we're not sure).
"""
# Container type heirachy: https://docs.python.org/3/library/collections.abc.html
self.child: Sequence
super().__init__(rows)
# include this check because it's expensvie to check and sometimes we can skip it when
# we already know the rows are identically shaped.
if check_shape:
# shape of the first row we use for reference
shape = rows[0].shape
# check each row to make sure it's the same shape as the first
for row in rows[1:]:
if shape != row.shape:
raise Exception(
f"All rows in RowEntityPhiTensor must match: {shape} != {row.shape}"
)
@property
def scalar_manager(self) -> TypeScalarManager:
return self.child[0].scalar_manager
@property
def min_vals(self) -> np.ndarray:
return np.concatenate([x.min_vals for x in self.child]).reshape(self.shape)
@property
def max_vals(self) -> np.ndarray:
return np.concatenate([x.max_vals for x in self.child]).reshape(self.shape)
@property
def value(self) -> np.ndarray:
return np.concatenate([x.child for x in self.child]).reshape(self.shape)
@property
def entities(self) -> np.ndarray:
return np.array(
[[x.entity] * np.array(x.shape).prod() for x in self.child]
).reshape(self.shape)
@property
def gamma(self) -> InitialGammaTensor:
return self.create_gamma()
def create_gamma(
self, scalar_manager: Optional[TypeScalarManager] = None
) -> InitialGammaTensor:
if scalar_manager is None:
scalar_manager = self.scalar_manager
return InitialGammaTensor(
values=self.value, # 5 x 10 data
min_vals=self.min_vals, # 5 x 10 minimum values
max_vals=self.max_vals, # 5 x 10 maximum values
entities=self.entities, # list of 5 entities
scalar_manager=scalar_manager,
)
@property
def shape(self) -> Tuple[Any, ...]:
return tuple([len(self.child)] + list(self.child[0].shape))
def __eq__(self, other: Any) -> RowEntityPhiTensor:
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
new_list = list()
for i in range(len(self.child)):
if is_acceptable_simple_type(other):
new_list.append(self.child[i] == other)
else:
new_list.append(self.child[i] == other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
f"Tensor dims do not match for __eq__: {len(self.child)} != {len(other.child)}"
)
def __ne__(self, other: Any) -> RowEntityPhiTensor:
opposite_result = self.__eq__(other)
# Normal inversion on (opposite_result.child) might not work on nested lists
result = []
for row in opposite_result.child:
result.append(np.invert(row))
return RowEntityPhiTensor(rows=result)
def __add__(
self, other: Union[RowEntityPhiTensor, AcceptableSimpleType]
) -> RowEntityPhiTensor:
# TODO: Catch unacceptable types (str, dict, etc) to avoid errors for other.child below
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
new_list = list()
for i in range(len(self.child)):
if is_acceptable_simple_type(other):
new_list.append(self.child[i] + other)
else:
# Private/Public and Private/Private are handled by the underlying SEPT self.child objects.
new_list.append(self.child[i] + other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
# Broadcasting is possible, but we're skipping that for now.
raise Exception(
f"Tensor dims do not match for __add__: {len(self.child)} != {len(other.child)}"
)
def __sub__(
self, other: Union[RowEntityPhiTensor, AcceptableSimpleType]
) -> RowEntityPhiTensor:
# TODO: Catch unacceptable types (str, dict, etc) to avoid errors for other.child below
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
new_list = list()
for i in range(len(self.child)):
if is_acceptable_simple_type(other):
new_list.append(self.child[i] - other)
else:
new_list.append(self.child[i] - other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
f"Tensor dims do not match for __sub__: {len(self.child)} != {len(other.child)}"
)
def __mul__(
self, other: Union[RowEntityPhiTensor, AcceptableSimpleType]
) -> RowEntityPhiTensor:
new_list = list()
if is_acceptable_simple_type(other):
if isinstance(other, np.ndarray):
if is_broadcastable(self.shape, other.shape):
new_list.append(
[self.child[i] * other[i] for i in range(len(self.child))]
)
else:
raise Exception(
f"Tensor dims do not match for __sub__: {getattr(self.child, 'shape', None)} != {other.shape}"
)
else: # int, float, bool, etc
new_list = [child * other for child in self.child]
elif isinstance(other, RowEntityPhiTensor):
if is_broadcastable(self.shape, other.shape):
new_list = [
self.child[i] * other.child[i] for i in range(len(self.child))
]
else:
raise Exception(
f"Tensor dims do not match for __sub__: {self.shape} != {other.shape}"
)
elif isinstance(other, SingleEntityPhiTensor):
for child in self.child:
# If even a single SEPT in the REPT isn't broadcastable, the multiplication operation doesn't work
if not is_broadcastable(child.shape, other.shape):
raise Exception(
f"Tensor dims do not match for __sub__: {self.shape} != {other.shape}"
)
new_list = [i * other for i in self.child]
else:
raise NotImplementedError
return RowEntityPhiTensor(rows=new_list)
# if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
# new_list = list()
# for i in range(len(self.child)):
# if is_acceptable_simple_type(other):
# if isinstance(other, (int, bool, float)):
# new_list.append(self.child[i] * other)
# else:
# new_list.append(self.child[i] * other[i])
# else:
# if isinstance(other, RowEntityPhiTensor):
# new_list.append(self.child[i] * other.child[i])
# elif isinstance(other, SingleEntityPhiTensor):
#
# new_list.append(self.child[i] * other)
# return RowEntityPhiTensor(rows=new_list, check_shape=False)
# else:
# raise Exception(
# f"Tensor dims do not match for __mul__: {len(self.child)} != {len(other.child)}"
# )
def __pos__(self) -> RowEntityPhiTensor:
return RowEntityPhiTensor(rows=[+x for x in self.child], check_shape=False)
def __neg__(self) -> RowEntityPhiTensor:
return RowEntityPhiTensor(rows=[-x for x in self.child], check_shape=False)
def __or__(self, other: Any) -> RowEntityPhiTensor:
return RowEntityPhiTensor(
rows=[x | other for x in self.child], check_shape=False
)
def __and__(self, other: Any) -> RowEntityPhiTensor:
return RowEntityPhiTensor(
rows=[x & other for x in self.child], check_shape=False
)
def __truediv__(
self, other: Union[RowEntityPhiTensor, AcceptableSimpleType]
) -> RowEntityPhiTensor:
if is_acceptable_simple_type(other) or len(self.child) == len(other.child):
new_list = list()
for i in range(len(self.child)):
if is_acceptable_simple_type(other):
new_list.append(self.child[i] / other)
else:
new_list.append(self.child[i] / other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
f"Tensor dims do not match for __truediv__: {len(self.child)} != {len(other.child)}"
)
def repeat(
self, repeats: Union[int, List[int]], axis: Optional[int] = None
) -> RowEntityPhiTensor:
if not isinstance(repeats, int):
raise Exception(
f"{type(self)}.repeat for repeats: List {repeats} not implemented yet"
)
if axis is None:
raise Exception(
"Conservatively, RowEntityPhiTensor doesn't yet support repeat(axis=None)"
)
if axis == 0 or axis == -len(self.shape):
new_list = list()
for _ in range(repeats):
for row in self.child:
new_list.append(row)
return RowEntityPhiTensor(rows=new_list, check_shape=False)
elif axis > 0:
new_list = list()
for row in self.child:
new_list.append(row.repeat(repeats, axis=axis - 1))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
# axis is negative
elif abs(axis) < len(self.shape):
new_list = list()
for row in self.child:
new_list.append(row.repeat(repeats, axis=axis))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
"'axis' arg is negative and strangely large... not sure what to do."
)
def flatten(self, order: Optional[str] = "C") -> RowEntityPhiTensor:
new_list = list()
for tensor in self.child:
new_list.append(tensor.flatten(order))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
def ravel(self, order: Optional[str] = "C") -> RowEntityPhiTensor:
new_list = list()
for tensor in self.child:
new_list.append(tensor.ravel(order))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
def swapaxes(self, axis1: int, axis2: int) -> RowEntityPhiTensor:
if axis1 == 0 or axis2 == 0:
raise Exception(
"For now, you can't swap the first axis b/c that would "
"probably create a Gamma Tensor. Sorry about that!"
)
new_list = list()
for tensor in self.child:
# Axis=0 for REPT.child is Axis=1 for REPT, so subtract 1
new_list.append(tensor.swapaxes(axis1 - 1, axis2 - 1))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
def squeeze(
self, axis: Optional[Union[int, Tuple[int, ...]]] = None
) -> RowEntityPhiTensor:
if axis == 0:
# If the first axis can be squeezed then there is only one
# tensor in the REPT, as such it might be a SEPT
# TODO: Check if the output type is still a REPT
# if isinstance(self.child[0], SEPT): return self.child[0]
return RowEntityPhiTensor(rows=self.child[0])
else:
new_list = list()
for tensor in self.child:
new_list.append(tensor.squeeze(axis))
self.child = new_list
return RowEntityPhiTensor(rows=new_list, check_shape=False)
def reshape(
self,
shape: Union[
int,
Union[Sequence[int], Sequence[Sequence[int]]],
],
) -> RowEntityPhiTensor:
if isinstance(shape, int):
raise Exception(
f"{type(self)}.reshape for shape: int {shape} is not implemented"
)
# This is to fix the bug where shape = ([a, b, c], )
if isinstance(shape[0], Sequence):
shape = shape[0]
if shape[0] != self.shape[0]:
raise Exception(
"For now, you can't reshape the first dimension because that would"
+ "probably require creating a gamma tensor."
+ str(shape)
+ " and "
+ str(self.shape)
)
new_list = list()
for row in self.child:
new_list.append(row.reshape(shape[1:]))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
def resize(
self,
new_shape: Union[int, Tuple[int, ...]],
refcheck: Optional[bool] = True,
) -> None:
"""This method is identical to reshape, but it modifies the Tensor in-place instead of returning a new one"""
if isinstance(new_shape, int):
raise Exception(f"new_shape: {new_shape} must be a Tuple for {type(self)}")
if new_shape[0] != self.shape[0]:
raise Exception(
"For now, you can't reshape the first dimension because that would"
+ "probably require creating a gamma tensor."
)
new_list = list()
for row in self.child:
new_list.append(row.reshape(new_shape[1:]))
# Modify the tensor data in-place instead of returning a new one.
self.child = new_list
def compress(
self,
condition: List[bool],
axis: Optional[int] = None,
out: Optional[np.ndarray] = None,
) -> RowEntityPhiTensor:
# TODO: Could any conditions result in GammaTensors being formed?
# TODO: Will min/max vals change upon filtering? I don't think so, since they're data independent
new_list = list()
for tensor in self.child:
new_list.append(tensor.compress(condition, axis, out))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
def partition(
self,
kth: Union[int, Tuple[int, ...]],
axis: Optional[int] = -1,
kind: Optional[str] = "introselect",
order: Optional[Union[int, Tuple[int, ...]]] = None,
) -> RowEntityPhiTensor:
if axis == 0: # Unclear how to sort the SEPTs in a REPT
raise NotImplementedError
new_list = list()
for tensor in self.child:
new_list.append(tensor.partition(kth, axis, kind, order))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
# Since this is being used differently compared to supertype, ignoring type annotation errors
def sum(
self, *args: Any, axis: Optional[int] = None, **kwargs: Any
) -> RowEntityPhiTensor:
if axis is None or axis == 0:
return self.gamma.sum(axis=axis)
new_list = list()
for row in self.child:
new_list.append(row.sum(*args, axis=axis - 1, **kwargs))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
# Since this is being used differently compared to supertype, ignoring type annotation errors
def transpose(self, *dims: Optional[Any]) -> RowEntityPhiTensor:
if dims:
if dims[0] != 0:
raise Exception("Can't move dim 0 in RowEntityPhiTensor at this time")
new_dims = list(np.array(dims[1:]))
new_list = list()
for row in self.child:
new_list.append(row.transpose(*new_dims))
else:
new_list = list()
for row in self.child:
new_list.append(row.transpose())
return RowEntityPhiTensor(rows=new_list, check_shape=False)
def __le__(self, other: Any) -> RowEntityPhiTensor:
# if the tensor being compared is a public tensor / int / float / etc.
if is_acceptable_simple_type(other):
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] <= other)
return RowEntityPhiTensor(rows=new_list, check_shape=False)
if len(self.child) == len(other.child):
# tensors have different entities
if not (self.entities == other.entities).all():
raise Exception("Tensor owners do not match")
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] <= other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
f"Tensor dims do not match for __le__: {len(self.child)} != {len(other.child)}"
)
def __lt__(self, other: Any) -> RowEntityPhiTensor:
# if the tensor being compared is a public tensor / int / float / etc.
if is_acceptable_simple_type(other):
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] < other)
return RowEntityPhiTensor(rows=new_list, check_shape=False)
if len(self.child) == len(other.child):
# tensors have different entities
if not (self.entities == other.entities).all():
raise Exception("Tensor owners do not match")
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] < other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
f"Tensor dims do not match for __lt__: {len(self.child)} != {len(other.child)}"
)
def __gt__(self, other: Any) -> RowEntityPhiTensor:
# if the tensor being compared is a public tensor / int / float / etc.
if is_acceptable_simple_type(other):
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] > other)
return RowEntityPhiTensor(rows=new_list, check_shape=False)
if len(self.child) == len(other.child):
# tensors have different entities
if not (self.entities == other.entities).all():
raise Exception("Tensor owners do not match")
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] > other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
f"Tensor dims do not match for __gt__: {len(self.child)} != {len(other.child)}"
)
def __ge__(self, other: Any) -> RowEntityPhiTensor:
# if the tensor being compared is a public tensor / int / float / etc.
if is_acceptable_simple_type(other):
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] >= other)
return RowEntityPhiTensor(rows=new_list, check_shape=False)
if len(self.child) == len(other.child):
# tensors have different entities
if not (self.entities == other.entities).all():
raise Exception("Tensor owners do not match")
new_list = list()
for i in range(len(self.child)):
new_list.append(self.child[i] >= other.child[i])
return RowEntityPhiTensor(rows=new_list, check_shape=False)
else:
raise Exception(
f"Tensor dims do not match for __ge__: {len(self.child)} != {len(other.child)}"
)
def clip(
self, a_min: npt.ArrayLike, a_max: npt.ArrayLike, *args: Any
) -> RowEntityPhiTensor:
if a_min is None and a_max is None:
raise Exception("ValueError: clip: must set either max or min")
new_list = list()
for row in self.child:
new_list.append(row.clip(a_min=a_min, a_max=a_max, *args))
return RowEntityPhiTensor(rows=new_list, check_shape=False)
@implements(RowEntityPhiTensor, np.expand_dims)
def expand_dims(a: np.typing.ArrayLike, axis: int) -> RowEntityPhiTensor:
if axis == 0:
raise Exception(
"Currently, we don't have functionality for axis=0 but we could with a bit more work."
)
new_rows = list()
for row in a.child:
new_rows.append(np.expand_dims(row, axis - 1))
return RowEntityPhiTensor(rows=new_rows, check_shape=False)
| 1.914063
| 2
|
strar/errors.py
|
martvanrijthoven/strar
| 0
|
12780638
|
from dataclasses import dataclass
from typing import Optional
@dataclass(frozen=True)
class RegistrantNotRegisteredError(Exception):
"""Raised when registrant name does not exist in the register"""
cls: type
registrant_name: str
register: Optional[dict]
def __post_init__(self):
super().__init__(self._message())
def _message(self):
if self.register is None:
return self._empty_register_message()
return self._register_name_not_found_message()
def _prefix_message(self):
return f"Registrant name '{self.registrant_name}' is not found in the register of class '{self.cls.__name__}'"
def _empty_register_message(self):
return f"""
{self._prefix_message()} with an empty register.
"""
def _register_name_not_found_message(self):
return f"""
{self._prefix_message()} with registrant names {tuple(self.register.keys())}.
"""
| 3.234375
| 3
|
samples/use_market_hours.py
|
areed1192/td-ameritrade-api
| 40
|
12780639
|
<filename>samples/use_market_hours.py
from pprint import pprint
from datetime import datetime
from configparser import ConfigParser
from td.credentials import TdCredentials
from td.client import TdAmeritradeClient
from td.utils.enums import Markets
# Initialize the Parser.
config = ConfigParser()
# Read the file.
config.read('config/config.ini')
# Get the specified credentials.
client_id = config.get('main', 'client_id')
redirect_uri = config.get('main', 'redirect_uri')
# Intialize our `Crednetials` object.
td_credentials = TdCredentials(
client_id=client_id,
redirect_uri=redirect_uri,
credential_file='config/td_credentials.json'
)
# Initalize the `TdAmeritradeClient`
td_client = TdAmeritradeClient(
credentials=td_credentials
)
# Initialize the `MarketHours` service.
market_hours_service = td_client.market_hours()
# Grab the market hours
pprint(
market_hours_service.get_multiple_market_hours(
markets=['EQUITY', Markets.Bond],
date=datetime.now()
)
)
# Grab the hours for a specific market.
pprint(
market_hours_service.get_market_hours(
market='EQUITY',
date=datetime.now()
)
)
| 2.625
| 3
|
avista_sensors/impl/vibration_processor.py
|
ommmid/sensors
| 0
|
12780640
|
<gh_stars>0
import time
from avista_sensors.sensor_processor import SensorProcessor
from mpu6050 import mpu6050
import numpy.fft as nfft
import numpy as np
class VibrationProcessor(SensorProcessor):
"""MPU6050 sensor implementation (Accelerometer and Vibration)
Attributes:
**_address (int)**: I2C address
**_sensor (:obj: `mpu6050`)**: mpu6050 sensor object
**_time_step (float)**: short periodicity to capture movement
"""
def __init__(self):
"""Constructs a new VibrationSensor instance"""
super().__init__()
self._address = None
self._sensor = None
self._time_step = 0.005
def setup(self):
"""Sets up sensor configurations that should happen after loading from the database"""
self._address = int(self._parameters['address'], 16)
self._sensor = mpu6050(self._address)
def _read_sensor(self, ts):
"""Reads data from the sensor
Args:
**ts (int)**: timestamp of when the data was read
"""
x = np.empty([400])
y = np.empty([400])
z = np.empty([400])
self._sample(x, y, z)
data = {
"x": self._find_freq(x),
"y": self._find_freq(y),
"z": self._find_freq(z)
}
return data
def _find_freq(self, data):
"""Performs an fft on the data to extract the frequency
Args:
**data (:obj: `array`)**: a collected data sample
"""
# execute the fft
w = nfft.fft(data)
freqs = nfft.fftfreq(len(data), d=self._time_step)
# get the power
pwr = np.abs(data)
# remove negatives
freqs = freqs[1:int(len(freqs) / 2)]
pwr = pwr[1:int(len(pwr) / 2)]
# find max power and its index
pmax = np.max(pwr)
condition = (pwr == pmax)
index = np.where(condition)
# return frequency
return freqs[index][0]
def _sample(self, x, y, z):
"""Samples acceleration data
Args:
**x (:obj: `array`)**: a numpy array to store x acceleration data
**y (:obj: `array`)**: a numpy array to store y acceleration data
**z (:obj: `array`)**: a numpy array to store z acceleration data
"""
for i in range(400):
data = self._sensor.get_accel_data()
x[i] = data["x"]
y[i] = data["y"]
z[i] = data["z"]
time.sleep(self._time_step)
| 3.171875
| 3
|
pru/SphereTurnArc.py
|
euctrl-pru/rt-python
| 0
|
12780641
|
<gh_stars>0
# Copyright (c) 2018 Via Technology Ltd. All Rights Reserved.
# Consult your license regarding permissions and restrictions.
"""
This module supports turning arcs in Spherical Vector coordinates.
"""
import numpy as np
from via_sphere import distance_radians, Arc3d
MIN_TURN_ANGLE = np.deg2rad(1.0)
""" The minimum turn angle to model. """
MAX_TURN_ANGLE = np.deg2rad(150.0)
""" The maximum turn angle to model. """
def calculate_radius(turn_angle, anticipation_distance):
"""
Calculate the radius of a turn from it's angle and anticipation_distance.
Note: turn_angle nust be positive and greater than MIN_TURN_ANGLE.
Parameters
----------
turn_angle: float
The turn angle in [radians].
anticipation_distance: float
The turn anticipation distance in [radians].
Returns
-------
The radius of the turn in [radians].
"""
return anticipation_distance / np.tan(0.5 * turn_angle)
def calculate_arc_length(angle, anticipation_distance):
"""
Calculate the length of a turn arc from it's angle and anticipation distance.
Parameters
----------
angle: float
The turn angle in [radians].
anticipation_distance: float
The turn anticipation distance in [radians].
Returns
-------
The radius of the turn in [radians].
"""
turn_angle = np.abs(angle)
if turn_angle > MIN_TURN_ANGLE:
return turn_angle * calculate_radius(turn_angle, anticipation_distance)
else: # turn_angle is too small, calculate the straight distance
return 2.0 * anticipation_distance
class SphereTurnArc:
"""
A class for the arc of a turn between two Great Circle arcs in ECEF coordinates.
The class contains the start, end and centre points of the arc together
with it's radius [radians] and (signed) angle [radians].
"""
__slots__ = ('__start', '__centre', '__finish', '__angle', '__radius')
def __init__(self, inbound, outbound, distance):
"""
Create a new Arc from the inbound and outbound route legs.
Parameters
----------
inbound, outbound: Arc3ds
The inbound and outbound legs.
distance: float
The turn anticipation distance [radians].
"""
self.__start = outbound.a()
self.__centre = outbound.a()
self.__finish = outbound.a()
self.__angle = 0.0
self.__radius = 0.0
self.__angle = inbound.turn_angle(outbound.b())
turn_angle = abs(self.__angle)
if MIN_TURN_ANGLE < turn_angle <= MAX_TURN_ANGLE:
self.__radius = calculate_radius(turn_angle, distance)
start_point = inbound.position(inbound.length() - distance)
self.__start = start_point
r = -self.__radius if (self.__angle > 0.0) else self.__radius
self.__centre = inbound.perp_position(start_point, r)
self.__finish = outbound.position(distance)
else:
self.__angle = 0.0
@property
def start(self):
"""Accessor for the Turn Arc start point."""
return self.__start
@property
def centre(self):
"""Accessor for the Turn Arc centre point."""
return self.__centre
@property
def finish(self):
"""Accessor for the Turn Arc end point."""
return self.__finish
@property
def angle(self):
"""Accessor for the Turn angle [radians]."""
return self.__angle
@property
def radius(self):
"""Accessor for the Turn radius [radians]."""
return self.__radius
def __eq__(self, other):
"""Return True if the centre, angle and radius are the same, False otherwise."""
return (self.centre == other.centre) \
and (self.angle == other.angle) \
and (self.radius == other.radius)
def __bool__(self):
"""Return False if the radius is zero, True otherwise."""
return bool(self.__radius > 0.0)
def length(self):
"""Calculate the length of the turn arc [radians]."""
return self.__radius * np.abs(self.__angle)
def radial_distance(self, point):
"""
Calculate the distance of a point from the centre of the turn arc.
Parameters
----------
point: Point3d
The point to measure.
Returns
-------
distance: float
The distance between point and the centre of the turn [radians].
"""
return distance_radians(self.centre, point)
def cross_track_distance(self, point):
"""
Calculate the distance of a point outside (+ve) or inside (-ve) the turn.
Parameters
----------
point: Point3d
The point to measure.
Returns
-------
distance: float
The distance between point and the turn arc [radians].
"""
return self.radial_distance(point) - self.radius
def point_angle(self, point):
"""
Calculate the angle of a point from the start of the turn arc.
Parameters
----------
point: Point3d
The point to measure.
Returns
-------
angle: float
The angle between point and the start of the turn [radians].
"""
start_arc = Arc3d(self.centre, self.start)
return start_arc.start_angle(point)
def along_track_distance(self, point):
"""
Calculate the distance of a point along the turn from the start of the arc.
Parameters
----------
point: Point3d
The point to measure.
Returns
-------
distance: float
The (signed) distance between point and the start of the turn,
+ve in the direction of the arc, -ve before the start [radians].
"""
distance = self.radius * self.point_angle(point)
return -distance if (self.angle < 0.0) else distance
def position(self, angle):
"""
Calcuate the position of a point along the turn at angle from the start point.
Parameters
----------
angle: float
The angle between point and the start of the turn [radians].
Returns
-------
point: Point3d
The point at angle from the start along the turn arc.
"""
start_arc = Arc3d(self.centre, self.start)
return start_arc.angle_position(angle)
| 3.109375
| 3
|
src/ModelEvaluation/roc.py
|
FDUJiaG/PyML-Course
| 1
|
12780642
|
<reponame>FDUJiaG/PyML-Course
import numpy as np
import matplotlib.pyplot as plt
from mglearn.datasets import make_blobs
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_curve, roc_auc_score
import warnings
warnings.filterwarnings('ignore', category=Warning)
X, y = make_blobs(n_samples=(4000, 500), centers=2, cluster_std=[7.0, 2], random_state=22)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
svc = SVC(gamma=.05).fit(X_train, y_train)
fpr, tpr, thresholds = roc_curve(y_test, svc.decision_function(X_test))
plt.figure(figsize=(11, 4.5))
plt.subplots_adjust(left=0.32, right=0.68)
plt.plot(fpr, tpr, label="ROC Curve")
plt.xlabel("FPR")
plt.ylabel("TPR (recall)")
# find threshold closest to zero
close_zero = np.argmin(np.abs(thresholds))
plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10,
label="threshold zero", fillstyle="none", c='k', mew=2)
plt.legend(loc=4)
# plt.show()
rf = RandomForestClassifier(n_estimators=100, random_state=0, max_features=2)
rf.fit(X_train, y_train)
fpr_rf, tpr_rf, thresholds_rf = roc_curve(y_test, rf.predict_proba(X_test)[:, 1])
plt.figure(figsize=(11, 4.5))
plt.subplots_adjust(left=0.32, right=0.68)
plt.plot(fpr, tpr, label="ROC Curve SVC")
plt.plot(fpr_rf, tpr_rf, label="ROC Curve RF")
plt.xlabel("FPR")
plt.ylabel("TPR (recall)")
plt.plot(fpr[close_zero], tpr[close_zero], 'o', markersize=10,
label="threshold zero SVC", fillstyle="none", c='k', mew=2)
close_default_rf = np.argmin(np.abs(thresholds_rf - 0.5))
plt.plot(fpr_rf[close_default_rf], tpr[close_default_rf], '^', markersize=10,
label="threshold 0.5 RF", fillstyle="none", c='k', mew=2)
plt.legend(loc=4)
# plt.show()
rf_auc = roc_auc_score(y_test, rf.predict_proba(X_test)[:, 1])
svc_auc = roc_auc_score(y_test, svc.decision_function(X_test))
print("AUC for Random Forest: {:.3f}".format(rf_auc))
print("AUC for SVC: {:.3f}".format(svc_auc))
digits = load_digits()
y = digits.target == 9
X_train, X_test, y_train, y_test = train_test_split(
digits.data, y, random_state=0)
plt.figure(figsize=(11, 4.5))
plt.subplots_adjust(left=0.32, right=0.68)
for gamma in [1, 0.05, 0.01]:
svc = SVC(gamma=gamma).fit(X_train, y_train)
accuracy = svc.score(X_test, y_test)
auc = roc_auc_score(y_test, svc.decision_function(X_test))
fpr, tpr, _ = roc_curve(y_test, svc.decision_function(X_test))
print("gamma = {:.2f} accuracy = {:.2f} AUC = {:.2f}".format(gamma, accuracy, auc))
plt.plot(fpr, tpr, label="gamma={:.3f}".format(gamma))
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.xlim(-0.01, 1)
plt.ylim(0, 1.02)
plt.legend(loc="best")
plt.show()
| 2.359375
| 2
|
cursor.py
|
avysk/psgo_emitter
| 2
|
12780643
|
<filename>cursor.py
"""Cursor on the board."""
class Cursor():
"""Entity that stores a cursor."""
def __init__(self):
self._point = [0, 0]
@property
def point(self):
"""Return cursor position."""
return tuple(self._point)
def _change(self, axis, delta):
self._point[axis] += delta
self._point[axis] %= 19
def move(self, where):
"""Move cursor left, right, up or down."""
axis = {'left': 0, 'right': 0,
'up': 1, 'down': 1}[where]
delta = {'left': -1, 'down': -1,
'right': 1, 'up': 1}[where]
self._change(axis, delta)
| 3.5625
| 4
|
skmob/preprocessing/clustering.py
|
LLucchini/scikit-mobility
| 0
|
12780644
|
<gh_stars>0
from ..utils import utils, constants
from ..core.trajectorydataframe import *
from sklearn.cluster import DBSCAN
import numpy as np
import inspect
kms_per_radian = 6371.0088 # Caution: this is only true at the Equator!
# This may cause problems at high latitudes.
def cluster(tdf, cluster_radius_km=0.1, min_samples=1):
"""
Cluster stops corresponding to visits to the same location at different times, based on spatial proximity.
The clustering algorithm used is DBSCAN (by sklearn).
:param tdf: TrajDataFrame
the input TrajDataFrame that should contain the stops, i.e. the output of a `preprocessing.detection` function
:param cluster_radius_km: float (default 0.1)
the parameter `eps` of the function sklearn.cluster.DBSCAN in kilometers
:param min_samples: int (default 1)
the parameter `min_samples` of the function sklearn.cluster.DBSCAN (minimum size of a cluster)
:return: TrajDataFrame
a TrajDataFrame with the additional column 'cluster' containing the cluster labels.
Stops belonging to the same cluster have the same label.
Labels are integers corresponding to the ranks of clusters according to the frequency of visitation
(the most visited cluster has label 0, the second most visited has label 1, ...)
References:
.. [hariharan2004project] Hariharan, Ramaswamy, and <NAME>. "Project Lachesis: parsing and modeling location histories." In International Conference on Geographic Information Science, pp. 106-124. Springer, Berlin, Heidelberg, 2004.
"""
# Sort
tdf = tdf.sort_by_uid_and_datetime()
# Save function arguments and values in a dictionary
frame = inspect.currentframe()
args, _, _, arg_values = inspect.getargvalues(frame)
arguments = dict([('function', cluster.__name__)]+[(i, arg_values[i]) for i in args[1:]])
groupby = []
if utils.is_multi_user(tdf):
groupby.append(constants.UID)
# if utils.is_multi_trajectory(data):
# groupby.append(constants.TID)
stops_df = tdf
# stops_df = detection.stops(data, stop_radius_factor=0.5, \
# minutes_for_a_stop=20.0, spatial_radius=0.2, leaving_time=True)
if len(groupby) > 0:
# Apply cluster stops to each group of points
ctdf = stops_df.groupby(groupby, group_keys=False, as_index=False).apply(_cluster_trajectory,
cluster_radius_km=cluster_radius_km, min_samples=min_samples).reset_index(drop=True)
else:
ctdf = _cluster_trajectory(stops_df, cluster_radius_km=cluster_radius_km, min_samples=min_samples).reset_index(drop=True)
ctdf.parameters = tdf.parameters
ctdf.set_parameter(constants.CLUSTERING_PARAMS, arguments)
return ctdf
def _cluster_trajectory(tdf, cluster_radius_km, min_samples):
# From dataframe convert to numpy matrix
lat_lng_dtime_other = list(utils.to_matrix(tdf))
columns_order = list(tdf.columns)
l2x, cluster_IDs = _cluster_array(lat_lng_dtime_other, cluster_radius_km, min_samples)
clusters_df = nparray_to_trajdataframe(lat_lng_dtime_other, utils.get_columns(tdf), {})
# Put back to the original order
clusters_df = clusters_df[columns_order]
clusters_df.loc[:, 'cluster'] = cluster_IDs
return clusters_df
def group_by_label(X, labels):
"""
return a dictionary 'l2x' in which the elements 'x' of list 'X'
are grouped according to 'labels'
"""
l2x = dict([(l ,[]) for l in set(labels)])
for x ,l in list(zip(X ,labels)):
l2x[l] += [x]
return l2x
def _cluster_array(lat_lng_dtime_other, cluster_radius_km, min_samples, verbose=False):
X = np.array([[point[1], point[0]] for point in lat_lng_dtime_other])
# Compute DBSCAN
eps_rad = cluster_radius_km / kms_per_radian
db = DBSCAN(eps=eps_rad, min_samples=min_samples, algorithm='ball_tree', metric='haversine')
clus = db.fit(np.radians(X))
# core_samples = clus.core_sample_indices_
labels = clus.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
if verbose:
print('Estimated number of clusters: %d' % n_clusters_)
l02x = group_by_label(X, labels)
# Map cluster index to most frequent location: label2fml
c2mfl = dict([(c[1] ,i) for i ,c in \
enumerate(sorted([[len(v) ,l] for l ,v in l02x.items() if l> -0.5], reverse=True))])
l2x = dict([(c2mfl[k], v) for k, v in l02x.items() if k > -0.5])
try:
l2x[-1] = l02x[-1.]
except KeyError:
pass
return l2x, [c2mfl[k] for k in labels]
| 2.71875
| 3
|
base.py
|
vicenteneto/online-judge-solutions
| 0
|
12780645
|
<reponame>vicenteneto/online-judge-solutions
# -*- coding: utf-8 -*-
'''
Escreva a sua solução aqui
Code your solution here
Escriba su solución aquí
'''
| 1.15625
| 1
|
runtime/python/Lib/asyncio/format_helpers.py
|
hwaipy/InteractionFreeNode
| 207
|
12780646
|
<filename>runtime/python/Lib/asyncio/format_helpers.py<gh_stars>100-1000
import functools
import inspect
import reprlib
import sys
import traceback
from . import constants
def _get_function_source(func):
func = inspect.unwrap(func)
if inspect.isfunction(func):
code = func.__code__
return (code.co_filename, code.co_firstlineno)
if isinstance(func, functools.partial):
return _get_function_source(func.func)
if isinstance(func, functools.partialmethod):
return _get_function_source(func.func)
return None
def _format_callback_source(func, args):
func_repr = _format_callback(func, args, None)
source = _get_function_source(func)
if source:
func_repr += f' at {source[0]}:{source[1]}'
return func_repr
def _format_args_and_kwargs(args, kwargs):
"""Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
# use reprlib to limit the length of the output
items = []
if args:
items.extend(reprlib.repr(arg) for arg in args)
if kwargs:
items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
return '({})'.format(', '.join(items))
def _format_callback(func, args, kwargs, suffix=''):
if isinstance(func, functools.partial):
suffix = _format_args_and_kwargs(args, kwargs) + suffix
return _format_callback(func.func, func.args, func.keywords, suffix)
if hasattr(func, '__qualname__') and func.__qualname__:
func_repr = func.__qualname__
elif hasattr(func, '__name__') and func.__name__:
func_repr = func.__name__
else:
func_repr = repr(func)
func_repr += _format_args_and_kwargs(args, kwargs)
if suffix:
func_repr += suffix
return func_repr
def extract_stack(f=None, limit=None):
"""Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""
if f is None:
f = sys._getframe().f_back
if limit is None:
# Limit the amount of work to a reasonable amount, as extract_stack()
# can be called for each coroutine and future in debug mode.
limit = constants.DEBUG_STACK_DEPTH
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
limit=limit,
lookup_lines=False)
stack.reverse()
return stack
| 2.59375
| 3
|
bazel/aprutil.bzl
|
arunvc/incubator-pagespeed-mod
| 535
|
12780647
|
aprutil_build_rule = """
cc_library(
name = "aprutil",
srcs = [
"@mod_pagespeed//third_party/aprutil:aprutil_pagespeed_memcache_c",
'buckets/apr_brigade.c',
'buckets/apr_buckets.c',
'buckets/apr_buckets_alloc.c',
'buckets/apr_buckets_eos.c',
'buckets/apr_buckets_file.c',
'buckets/apr_buckets_flush.c',
'buckets/apr_buckets_heap.c',
'buckets/apr_buckets_mmap.c',
'buckets/apr_buckets_pipe.c',
'buckets/apr_buckets_pool.c',
'buckets/apr_buckets_refcount.c',
'buckets/apr_buckets_simple.c',
'buckets/apr_buckets_socket.c',
'crypto/apr_md5.c',
'crypto/getuuid.c',
'crypto/uuid.c',
#'dbm/apr_dbm.c',
#'dbm/apr_dbm_sdbm.c',
#'dbm/sdbm/sdbm.c',
#'dbm/sdbm/sdbm_hash.c',
#'dbm/sdbm/sdbm_lock.c',
#'dbm/sdbm/sdbm_pair.c',
'encoding/apr_base64.c',
'hooks/apr_hooks.c',
#'ldap/apr_ldap_stub.c',
#'ldap/apr_ldap_url.c',
'memcache/apr_memcache.c',
'misc/apr_date.c',
'misc/apr_queue.c',
'misc/apr_reslist.c',
'misc/apr_rmm.c',
'misc/apr_thread_pool.c',
'misc/apu_dso.c',
'misc/apu_version.c',
'strmatch/apr_strmatch.c',
'uri/apr_uri.c',
'xlate/xlate.c',
],
hdrs = [
"@mod_pagespeed//third_party/aprutil:aprutil_pagespeed",
"crypto/crypt_blowfish.h",
#"test/test_apu.h",
#"test/abts_tests.h",
#"test/testutil.h",
#"test/abts.h",
"dbm/sdbm/sdbm_private.h",
"dbm/sdbm/sdbm_pair.h",
"dbm/sdbm/sdbm_tune.h",
"include/apr_siphash.h",
"include/apr_dbm.h",
"include/apr_xlate.h",
"include/apr_ldap_url.h",
"include/apu_version.h",
"include/apr_redis.h",
"include/private/apr_dbd_odbc_v2.h",
"include/private/apr_dbm_private.h",
"include/private/apu_internal.h",
"include/private/apr_dbd_internal.h",
"include/private/apr_crypto_internal.h",
"include/apr_md5.h",
"include/apu_errno.h",
"include/apr_xml.h",
"include/apr_sdbm.h",
"include/apr_md4.h",
"include/apr_hooks.h",
"include/apr_date.h",
"include/apr_reslist.h",
"include/apr_memcache.h",
"include/apr_uuid.h",
"include/apr_base64.h",
"include/apr_sha1.h",
"include/apr_uri.h",
"include/apr_queue.h",
"include/apr_ldap_option.h",
"include/apr_optional.h",
"include/apr_dbd.h",
"include/apr_anylock.h",
"include/apr_strmatch.h",
"include/apr_optional_hooks.h",
"include/apr_thread_pool.h",
"include/apr_buckets.h",
"include/apr_rmm.h",
"include/apr_ldap_rebind.h",
"include/apr_ldap_init.h",
"include/apr_crypto.h",
],
copts = [
"-Ithird_party/aprutil/gen/arch/linux/x64/include/",
"-Ithird_party/aprutil/gen/arch/linux/x64/include/private",
"-Iexternal/aprutil/include/",
"-Iexternal/aprutil/include/private/",
"-Iexternal/aprutil/include/arch/unix/",
"-Iexternal/aprutil/",
"-Iexternal/apr/include/",
"-Iexternal/apr/include/arch/unix/",
"-Ithird_party/apr/gen/arch/linux/x64/include/",
],
deps = [
"@apr//:apr",
],
visibility = ["//visibility:public"],
)
"""
# find | grep .h$ | while read line; do echo "\"$line\","; done
| 1.148438
| 1
|
project_optimizing_public_transportation/consumers/clean_schema.py
|
seoruosa/streaming-data-nanodegree
| 0
|
12780648
|
import requests
SCHEMA_REGISTRY = "http://localhost:8081"
def subjects():
resp = requests.get(
f"{SCHEMA_REGISTRY}/subjects",
headers={"Content-Type": "application/json"}
)
resp.raise_for_status()
return resp.json()
# curl -X DELETE http://localhost:8081/subjects/com.udacity.station.arrivals-value
def delete_subject(subject):
resp = requests.delete(
f"{SCHEMA_REGISTRY}/subjects/{subject}"
)
def main():
for subject in subjects():
delete_subject(subject)
if __name__ == '__main__':
main()
| 2.65625
| 3
|
WebBrickGateway/WebBrickGateway/panels/widgets/NumericDisplay.py
|
AndyThirtover/wb_gateway
| 0
|
12780649
|
# Copyright L.P.Klyne 2013
# Licenced under 3 clause BSD licence
# $Id: NumericDisplay.py 2696 2008-09-05 09:33:43Z graham.klyne $
#
# Widget class for simple button on a form
#
from urlparse import urljoin
from turbogears.widgets.base import Widget, CompoundWidget, WidgetsList
from turbogears.widgets.forms import FormField, Button
from EventLib.URI import EventBaseUri
SetNumericDisplayValueEvent = urljoin(EventBaseUri, "SetNumericDisplayValue")
SetNumericDisplayStateEvent = urljoin(EventBaseUri, "SetNumericDisplayState")
class NumericDisplay(FormField):
template = """
<span xmlns:py="http://purl.org/kid/ns#"
py:attrs="attrs"
class="${field_class}"
py:content="str(value)"
InitializeWidget="NumericDisplay_Init"
>
(NumericDisplay)
</span>
"""
params = ["attrs", "value_override"]
params_doc = {'attrs' : 'Dictionary containing extra (X)HTML attributes for'
' the numeric display tag'}
attrs = {}
def update_params(self, d):
super(NumericDisplay, self).update_params(d)
if self.is_named:
d['attrs']['name'] = d["name"]
d['attrs']['id'] = d["field_id"]
d['attrs']['SetNumericDisplayValueEvent'] = SetNumericDisplayValueEvent
d['attrs']['SetNumericDisplayStateEvent'] = SetNumericDisplayStateEvent
if d.get('value_override', None):
d['value'] = d['value_override']
# End.
| 2.171875
| 2
|
evaluation/nmi.py
|
kikaitech/classification_metric_learning
| 93
|
12780650
|
import faiss
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics.cluster import normalized_mutual_info_score
from argparse import ArgumentParser
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(description="PyTorch metric learning nmi script")
# Optional arguments for the launch helper
parser.add_argument("--num_workers", type=int, default=4,
help="The number of workers for eval")
parser.add_argument("--snap", type=str,
help="The snapshot to compute nmi")
parser.add_argument("--output", type=str, default="/data1/output/",
help="The output file")
parser.add_argument("--dataset", type=str, default="StanfordOnlineProducts",
help="The dataset for training")
parser.add_argument('--binarize', action='store_true')
return parser.parse_args()
def test_nmi(embeddings, labels, output_file):
unique_labels = np.unique(labels)
kmeans = KMeans(n_clusters=unique_labels.size, random_state=0, n_jobs=-1).fit(embeddings)
nmi = normalized_mutual_info_score(kmeans.labels_, labels)
print("NMI: {}".format(nmi))
return nmi
def test_nmi_faiss(embeddings, labels):
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
unique_labels = np.unique(labels)
d = embeddings.shape[1]
kmeans = faiss.Clustering(d, unique_labels.size)
kmeans.verbose = True
kmeans.niter = 300
kmeans.nredo = 10
kmeans.seed = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
kmeans.train(embeddings, index)
dists, pred_labels = index.search(embeddings, 1)
pred_labels = pred_labels.squeeze()
nmi = normalized_mutual_info_score(labels, pred_labels)
print("NMI: {}".format(nmi))
return nmi
if __name__ == '__main__':
args = parse_args()
embedding_file = args.snap.replace('.pth', '_embed.npy')
all_embeddings = np.load(embedding_file)
lable_file = args.snap.replace('.pth', '_label.npy')
all_labels = np.load(lable_file)
nmi = test_nmi_faiss(all_embeddings, all_labels)
| 2.484375
| 2
|