hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d285baafc260e6bc15a3cb91a69a445ffa93d910
| 12,816
|
py
|
Python
|
blocks/monitoring/evaluators.py
|
bartvm/blocks-fork
|
a5376e196e3c2df080a009e8ae8d6bf0efb57f36
|
[
"BSD-3-Clause"
] | 1
|
2015-10-19T07:54:34.000Z
|
2015-10-19T07:54:34.000Z
|
blocks/monitoring/evaluators.py
|
bartvm/blocks-fork
|
a5376e196e3c2df080a009e8ae8d6bf0efb57f36
|
[
"BSD-3-Clause"
] | null | null | null |
blocks/monitoring/evaluators.py
|
bartvm/blocks-fork
|
a5376e196e3c2df080a009e8ae8d6bf0efb57f36
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import OrderedDict
import logging
from picklable_itertools.extras import equizip
import theano
from theano import tensor
from blocks.utils import dict_subset
from blocks.monitoring.aggregation import (_DataIndependent, Mean,
TakeLast, MonitoredQuantity)
from blocks.graph import ComputationGraph
from blocks.utils import reraise_as
logger = logging.getLogger()
class MonitoredQuantityBuffer(object):
"""Intermediate results of aggregating values of monitored-quantity.
Accumulate results for a list of monitored-quantity for every
single batch. Provides initialization and readout routines to
initialize each quantity and capture its accumulated results.
Parameters
----------
quantities : list of :class:`MonitoredQuantity`
The quantity names are used as record names in the logs. Hence, all
the quantity names must be different.
Attributes
----------
requires : list of :class:`~tensor.TensorVariable`
Needed to calculate monitored-quantities.
quantity_names : list of str
Names of quantities.
inputs : list of :class:`~tensor.TensorVariable`
The list of inputs needed for variables in `requires`.
"""
def __init__(self, quantities):
self.quantities = quantities
requires = []
for quantity in quantities:
requires += quantity.requires
self.requires = list(set(requires))
self._initialized = False
self.quantity_names = [q.name for q in self.quantities]
self._computation_graph = ComputationGraph(self.requires)
self.inputs = self._computation_graph.inputs
def initialize(self):
"""Initialize the quantities."""
self._initialized = True
for quantity in self.quantities:
quantity.initialize()
def get_aggregated_values(self):
"""Readout the accumulated values."""
if not self._initialized:
raise Exception("To readout you must first initialize, then"
"process batches!")
else:
ret_vals = [q.readout() for q in self.quantities]
return dict(zip(self.quantity_names, ret_vals))
def accumulate_quantities(self, numerical_values):
"""Accumulate the results for every batch."""
if not self._initialized:
raise Exception("To readout you must first initialize, then"
"process batches!")
else:
for quantity in self.quantities:
quantity.accumulate(
*[numerical_values[self.requires.index(requirement)]
for requirement in quantity.requires])
class AggregationBuffer(object):
"""Intermediate results of aggregating values of Theano variables.
Encapsulates aggregators for a list of Theano variables. Collects
the respective updates and provides initialization and readout
routines.
Parameters
----------
variables : list of :class:`~tensor.TensorVariable`
The variable names are used as record names in the logs. Hence, all
the variable names must be different.
use_take_last : bool
When ``True``, the :class:`TakeLast` aggregation scheme is used
instead of :class:`_DataIndependent` for those variables that
do not require data to be computed.
Attributes
----------
initialization_updates : list of tuples
Initialization updates of the aggregators.
accumulation_updates : list of tuples
Accumulation updates of the aggregators.
readout_variables : dict
A dictionary of record names to :class:`~tensor.TensorVariable`
representing the aggregated values.
inputs : list of :class:`~tensor.TensorVariable`
The list of inputs needed for accumulation.
"""
def __init__(self, variables, use_take_last=False):
self.variables = variables
self.use_take_last = use_take_last
self.variable_names = [v.name for v in self.variables]
if len(set(self.variable_names)) < len(self.variables):
raise ValueError("variables should have different names")
self._computation_graph = ComputationGraph(self.variables)
self.inputs = self._computation_graph.inputs
self._initialized = False
self._create_aggregators()
self._compile()
def _create_aggregators(self):
"""Create aggregators and collect updates."""
self.initialization_updates = []
self.accumulation_updates = []
self.readout_variables = OrderedDict()
for v in self.variables:
logger.debug('variable to evaluate: %s', v.name)
if not hasattr(v.tag, 'aggregation_scheme'):
if not self._computation_graph.has_inputs(v):
scheme = (TakeLast if self.use_take_last
else _DataIndependent)
logger.debug('Using %s aggregation scheme'
' for %s since it does not depend on'
' the data', scheme.__name__, v.name)
v.tag.aggregation_scheme = scheme(v)
else:
logger.debug('Using the default '
' (average over minibatches)'
' aggregation scheme for %s', v.name)
v.tag.aggregation_scheme = Mean(v, 1.0)
aggregator = v.tag.aggregation_scheme.get_aggregator()
self.initialization_updates.extend(
aggregator.initialization_updates)
self.accumulation_updates.extend(aggregator.accumulation_updates)
self.readout_variables[v.name] = aggregator.readout_variable
def _compile(self):
"""Compiles Theano functions.
.. todo::
The current compilation method does not account for updates
attached to `ComputationGraph` elements. Compiling should
be out-sourced to `ComputationGraph` to deal with it.
"""
logger.debug("Compiling initialization and readout functions")
if self.initialization_updates:
self._initialize_fun = theano.function(
[], [], updates=self.initialization_updates)
else:
self._initialize_fun = None
# We need to call `as_tensor_variable` here
# to avoid returning `CudaNdarray`s to the user, which
# happens otherwise under some circumstances (see
# https://groups.google.com/forum/#!topic/theano-users/H3vkDN-Shok)
self._readout_fun = theano.function(
[], [tensor.as_tensor_variable(v)
for v in self.readout_variables.values()])
logger.debug("Initialization and readout functions compiled")
def initialize_aggregators(self):
"""Initialize the aggregators."""
self._initialized = True
if self._initialize_fun is not None:
self._initialize_fun()
def get_aggregated_values(self):
"""Readout the aggregated values."""
if not self._initialized:
raise Exception("To readout you must first initialize, then"
"process batches!")
ret_vals = self._readout_fun()
return dict(equizip(self.variable_names, ret_vals))
class DatasetEvaluator(object):
"""A DatasetEvaluator evaluates many Theano variables or other quantities.
The DatasetEvaluator provides a do-it-all method, :meth:`evaluate`,
which computes values of ``variables`` on a dataset.
Alternatively, methods :meth:`initialize_aggregators`,
:meth:`process_batch`, :meth:`get_aggregated_values` can be used with a
custom loop over data.
The values computed on subsets of the given dataset are aggregated
using the :class:`AggregationScheme`s provided in the
`aggregation_scheme` tags. If no tag is given, the value is **averaged
over minibatches**. However, care is taken to ensure that variables
which do not depend on data are not unnecessarily recomputed.
Parameters
----------
variables : list of :class:`~tensor.TensorVariable` and
:class:`MonitoredQuantity`
The variable names are used as record names in the logs. Hence, all
the names must be different.
Each variable can be tagged with an :class:`AggregationScheme` that
specifies how the value can be computed for a data set by
aggregating minibatches.
updates : list of tuples or :class:`~collections.OrderedDict` or None
:class:`~tensor.TensorSharedVariable` updates to be performed
during evaluation. This parameter is only for Theano variables.
Be careful not to update any model parameters as this is not
intended to alter your model in any meaningfullway. A typical
use case of this option arises when the theano function used
for evaluation contains a call to:function:`~theano.scan` which
might have returned shared variable updates.
"""
def __init__(self, variables, updates=None):
theano_variables = []
monitored_quantities = []
for variable in variables:
if isinstance(variable, MonitoredQuantity):
monitored_quantities.append(variable)
else:
theano_variables.append(variable)
self.theano_variables = theano_variables
self.monitored_quantities = monitored_quantities
variable_names = [v.name for v in variables]
if len(set(variable_names)) < len(variables):
raise ValueError("variables should have different names")
self.theano_buffer = AggregationBuffer(theano_variables)
self.monitored_quantities_buffer = MonitoredQuantityBuffer(
monitored_quantities)
self.updates = updates
self._compile()
def _compile(self):
"""Compiles Theano functions.
.. todo::
The current compilation method does not account for updates
attached to `ComputationGraph` elements. Compiling should
be out-sourced to `ComputationGraph` to deal with it.
"""
inputs = []
outputs = []
updates = None
if self.theano_buffer.accumulation_updates:
updates = OrderedDict()
updates.update(self.theano_buffer.accumulation_updates)
if self.updates:
updates.update(self.updates)
inputs += self.theano_buffer.inputs
inputs += self.monitored_quantities_buffer.inputs
outputs = self.monitored_quantities_buffer.requires
if inputs != []:
self.unique_inputs = list(set(inputs))
self._accumulate_fun = theano.function(self.unique_inputs,
outputs,
updates=updates)
else:
self._accumulate_fun = None
def initialize_aggregators(self):
self.theano_buffer.initialize_aggregators()
self.monitored_quantities_buffer.initialize()
def process_batch(self, batch):
try:
input_names = [v.name for v in self.unique_inputs]
batch = dict_subset(batch, input_names)
except KeyError:
reraise_as(
"Not all data sources required for monitoring were"
" provided. The list of required data sources:"
" {}.".format(input_names))
if self._accumulate_fun is not None:
numerical_values = self._accumulate_fun(**batch)
self.monitored_quantities_buffer.accumulate_quantities(
numerical_values)
def get_aggregated_values(self):
values = self.theano_buffer.get_aggregated_values()
values.update(
self.monitored_quantities_buffer.get_aggregated_values())
return values
def evaluate(self, data_stream):
"""Compute the variables over a data stream.
Parameters
----------
data_stream : instance of :class:`.DataStream`
The data stream. Only the first epoch of data is used.
Returns
-------
A mapping from record names to the values computed on the provided
dataset.
"""
self.initialize_aggregators()
if self._accumulate_fun is not None:
for batch in data_stream.get_epoch_iterator(as_dict=True):
self.process_batch(batch)
else:
logger.debug(
'Only data independent variables were given,'
'will not iterate the over data!')
return self.get_aggregated_values()
| 39.192661
| 78
| 0.638811
|
25fbd53e267bed3bd34c846fa57ed5644299a95a
| 17,380
|
py
|
Python
|
tests/cylinder_tests.py
|
kauevestena/sanit3Dsdi
|
6f12add218a9c64b86e3eb85d865117ac07e7299
|
[
"MIT"
] | null | null | null |
tests/cylinder_tests.py
|
kauevestena/sanit3Dsdi
|
6f12add218a9c64b86e3eb85d865117ac07e7299
|
[
"MIT"
] | null | null | null |
tests/cylinder_tests.py
|
kauevestena/sanit3Dsdi
|
6f12add218a9c64b86e3eb85d865117ac07e7299
|
[
"MIT"
] | null | null | null |
# import subprocess
# import numpy as np
# import json
# import geopandas as gpd
# from shapely.geometry import LineString
# import numpy as np
# import os
# import pymesh
# from copy import deepcopy
# # normalized_v = v/np.linalg.norm(v)
# def normalize_vec(input_vec):
# try:
# return input_vec/np.linalg.norm(input_vec)
# except:
# print(np.linalg.norm(input_vec),'check for zero norm')
# return input_vec * 0
# # class plane:
# # def __init__(self,pt_onplane:np.array,normal:np.array):
# # self.d = -pt_onplane.dot(normal)
# # self.a = normal[0]
# # self.b = normal[1]
# # self.c = normal[2]
# # def a_point(self,X,Y,Z):
# # return self.a*X + self.b*Y + self.c*Z + self.d
# def plane_as_4vec(normal:np.array,pt_onplane:np.array):
# '''
# plane as 4vec:
# - normal vector
# - point on plane
# '''
# return np.array([*normal,-np.dot(normal,pt_onplane)])
# def pt_onplane(plane4vec,X,Y):
# # plane equation, with z=f(X,Y)
# if not plane4vec[2] < 0.0001:
# Z = - (plane4vec[0]*X+plane4vec[1]*Y+plane4vec[3])/plane4vec[2]
# return np.array([X,Y,Z])
# else:
# Z = X + 0.1*X
# Y = - - (plane4vec[0]*X+plane4vec[2]*Z+plane4vec[3])/plane4vec[1]
# return np.array([X,Y,Z])
# def gdec2rad(gdec):
# return gdec * np.pi/180
# def circumference_3D(center_pt,radius,v1,v2,n_points=32):
# '''
# a circunference in 3D:
# - Center Point
# - The Radius
# thx: https://math.stackexchange.com/a/1184089/307651
# '''
# angles = np.linspace(0,2*np.pi,n_points)
# point_list = []
# for angle in angles:
# # circle_point = center_pt + (radius*np.cos(angle)*v1) + (radius*np.sin(angle)*v2)
# circle_point = center_pt + radius * (np.cos(angle)*v2 + np.sin(angle)*v1)
# point_list.append(circle_point)
# return np.array(point_list)
# def reverse_order_rangelist(a,b):
# l1 = list(range(-a+1,-b+1))
# return list(map(abs,l1))
# def segments(curve):
# '''
# code from
# https://stackoverflow.com/a/62061414/4436950
# thx Georgy
# '''
# return list(map(LineString, zip(curve.coords[:-1], curve.coords[1:])))
# def create_edgeslist(num_vertices,as_np=True):
# edgelist = []
# if num_vertices > 0:
# for i in range(num_vertices-1):
# edgelist.append([i,i+1])
# if as_np:
# return np.array(edgelist)
# else:
# return edgelist
# def get_raster_val_at_geoXY(x,y,rasterpath):
# runstring = f'gdallocationinfo -valonly -geoloc {rasterpath} {x} {y}'
# ret = subprocess.run(runstring,shell=True, stdout=subprocess.PIPE).stdout.decode('utf-8')
# return float(ret.strip('\n'))
# def pymesh_cylinder_for_cityjson(vertices,radius,zero_index=0,num_edges=16,rounding_places=4,custom_attrs=None,tol_for_simplification=None):
# '''
# creates a cylinder using pymesh
# '''
# num_vertices = vertices.shape[0]
# edges_list = create_edgeslist(num_vertices)
# wire_network = pymesh.wires.WireNetwork.create_from_data(vertices, edges_list)
# inflator = pymesh.wires.Inflator(wire_network)
# inflator.set_profile(num_edges)
# inflator.inflate(radius, per_vertex_thickness=False)
# mesh = inflator.mesh
# bbox_min, bbox_max = mesh.bbox
# mins = bbox_min.tolist()
# maxs = bbox_max.tolist()
# points = np.around(mesh.vertices,decimals=rounding_places).tolist()
# max_vert_idx = np.amax(mesh.faces)
# faces = deepcopy(mesh.faces) + zero_index
# faces = faces.tolist()
# faces = [[face] for face in faces]
# meshdata = {'mins':mins,'maxs':maxs,'points':points,'faces':faces,'zero_ref':max_vert_idx}
# if tol_for_simplification:
# # simplify based on tolerance
# mesh,info = pymesh.collapse_short_edges(mesh,tol_for_simplification)
# meshdata['edges_collapsed'] = info["num_edge_collapsed"]
# if custom_attrs:
# # for key in custom_attrs:
# # meshdata[key] = custom_attrs[key]
# meshdata['attributes'] = custom_attrs
# return meshdata
# def city_object_dict(faces,attrs_dict):
# # TODO: swap between MultiSurface/Solid
# city_obj = {
# "geometry": [
# {
# "boundaries": [],
# "lod": 1,
# "type": "MultiSurface"
# }
# ],
# "attributes": {
# },
# "type": "GenericCityObject"
# }
# city_obj['geometry'][0]['boundaries'] = faces
# city_obj['attributes'] = attrs_dict
# return city_obj
# class cylinder3D:
# '''
# DEPRECATED BY
# pymesh_cylinder(vertices,radius,num_edges=16,tol_for_simplification=None)
# manteined only because I don't wanna delete it
# class to implement a cylinder in 3D to use it in cityjson
# '''
# def __init__(self,p1,p2,radius,points_per_circle=32):
# # first, its handy:
# self.p_1 = p1
# self.p_2 = p2
# self.number_of_points = points_per_circle*2
# self.circle_points_n = points_per_circle
# # the axis of the cylinder, is the difference vector:
# self.axis = p2 - p1
# # its normalized version will be used as the plane normal
# self.plane_n = normalize_vec(self.axis)
# # the plane as a 4 vec of parameters: [a,b,c,d]
# plane = plane_as_4vec(self.plane_n,p1)
# # any point on the plane
# point_on_plane = pt_onplane(plane,p1[0]+0.1*p1[0],p1[1]-0.1*p1[1])
# # first vector parallel to the plane containing the circle
# vec1_planeparalel = normalize_vec(point_on_plane-p1)
# # second vector parallel to the plane containing the circle
# vec2_planeparalel = normalize_vec(np.cross(vec1_planeparalel,self.plane_n))
# # first circumference
# # it must needs to be divisible by 4
# if points_per_circle % 4 != 0:
# points_per_circle = (points_per_circle // 4) * 4
# # the first circumference
# self.circle1 = circumference_3D(p1,radius,vec1_planeparalel,vec2_planeparalel,points_per_circle)
# # the second contains basically each point summed up with the axis
# self.circle2 = self.circle1 + self.axis
# def check_circles(self):
# centers = (self.p_1,self.p_2)
# for i,circle in enumerate((self.circle1,self.circle2)):
# print('\ncircle ',i+1,':')
# for point in circle:
# print(np.dot(point-centers[i],self.axis))
# print(np.linalg.norm(point-centers[i]))
# def get_vertices_list(self,as_list=False):
# self.justaposed = np.concatenate((self.circle1,self.circle2))
# self.mins = np.min(self.justaposed,axis=0)
# self.maxs = np.max(self.justaposed,axis=0)
# if as_list:
# return list(map(list,self.justaposed))
# else:
# return self.justaposed
# def boundaries_list(self,new_zero=0):
# # first the two circles boundaries
# zero = new_zero
# # first circle ending:
# fce = zero + self.circle_points_n
# c1 = [list(range(zero,fce))]
# # c2 = [list(range(fce,fce+self.circle_points_n))]
# c2 = [reverse_order_rangelist(fce+self.circle_points_n,fce)]
# # for the rest of the faces:
# rectangles = []
# for i in range(zero,fce):
# print(i,fce)
# p0 = i
# p1 = i + fce
# if i+1 == fce:
# p2 = fce
# p3 = zero
# else:
# p2 = i + fce + 1
# p3 = i + 1
# # the current face
# curr = [[p3,p0,p1,p2]]
# rectangles.append(curr)
# # rectangles.append(rectangles[0])
# # rectangles.pop(0)
# # print(rectangles)
# # res_list = []
# # res_list.append(c1)
# # res_list.append(rectangles)
# # res_list.append(c2)
# res_list = [c1,rectangles,c2]
# self.boundaries = res_list
# return res_list
# def as_city_object(self,attrs_dict):
# # city_obj = {name: {
# # "geometry": [
# # {
# # "boundaries": [],
# # "lod": 1,
# # "type": "Solid"
# # }
# # ],
# # "attributes": {
# # },
# # "type": "GenericCityObject"
# # }}
# # city_obj[name]['geometry'][0]['boundaries'].append(self.boundaries)
# # city_obj[name]['attributes'] = attrs_dict
# city_obj = {
# "geometry": [
# {
# "boundaries": [],
# "lod": 1,
# "type": "MultiSurface"
# }
# ],
# "attributes": {
# },
# "type": "GenericCityObject"
# }
# # city_obj['geometry'][0]['boundaries'].append(self.boundaries)
# city_obj['geometry'][0]['boundaries'] = self.boundaries
# city_obj['attributes'] = attrs_dict
# return city_obj
# # # # # THIS WAS AN ATTEMPT, MANTEINED HERE
# # # # # class city_json_simple2:
# # # # # base = {
# # # # # "type": "CityJSON",
# # # # # "version": "1.0",
# # # # # "CityObjects": {},
# # # # # "vertices": [],
# # # # # "metadata": {
# # # # # "geographicalExtent": [
# # # # # ]}}
# # # # # # cjio validation:
# # # # # # cjio our_test_cylinder.json validate --long > test_cylinder_report.txt
# # # # # mins = []
# # # # # maxs = []
# # # # # point_list = []
# # # # # def __init__(self,axis_vertex_list,radii_list,attrs_list,pts_per_cicle=32):
# # # # # # first we will check if two list are equally-sized
# # # # # # thx: https://stackoverflow.com/a/16720915/4436950
# # # # # ref_len = len(axis_vertex_list)
# # # # # if all(len(lst) == ref_len for lst in [radii_list,attrs_list]):
# # # # # for i,pointpair in enumerate(axis_vertex_list):
# # # # # print('processing segment ',i,' of ',ref_len,' segments')
# # # # # name = f't{i}'
# # # # # p1 = pointpair[0]
# # # # # p2 = pointpair[1]
# # # # # zero = i * 2 * pts_per_cicle
# # # # # cylinder = cylinder3D(p1,p2,radii_list[i],pts_per_cicle)
# # # # # self.point_list.append(cylinder.get_vertices_list(True))
# # # # # boundaries = cylinder.boundaries_list(zero)
# # # # # self.base['CityObjects'][name] = cylinder.as_city_object(attrs_list[i])
# # # # # self.mins.append(cylinder.mins)
# # # # # self.maxs.append(cylinder.maxs)
# # # # # del cylinder
# # # # # abs_max = np.max(np.array(self.maxs),axis=0)
# # # # # abs_min = np.min(np.array(self.mins),axis=0)
# # # # # bbox = [*abs_min,*abs_max]
# # # # # # filling the bounding box:
# # # # # self.base['metadata']['geographicalExtent'] = bbox
# # # # # # filling the vertices:
# # # # # # self.base['vertices'] = list(map(list,self.point_list))
# # # # # for i,point in enumerate(self.point_list[0]):
# # # # # self.base['vertices'].append(point)
# # # # # # self.base['vertices'] = [[point.tolist()] for point in self.point_list]
# # # # # # self.plist = [[point] for point in self.point_list]
# # # # # else:
# # # # # print('input lists are in different sizes, check your data!!!')
# # # # # def dump_to_file(self,outpath):
# # # # # with open(outpath,'w+') as writer:
# # # # # json.dump(self.base,writer,indent=2)
# ##### OUR BIG CLASS:
# class city_json_simple:
# base = {
# "type": "CityJSON",
# "version": "1.0",
# "CityObjects": {},
# "vertices": [],
# "metadata": {
# "geographicalExtent": [
# ]}}
# # cjio validation:
# # cjio our_test_cylinder.json validate --long > test_cylinder_report.txt
# mins = []
# maxs = []
# point_list = []
# def __init__(self,cylinderlist,EPSG):
# # SETTING epsg
# self.base["metadata"]["referenceSystem"] = f"urn:ogc:def:crs:EPSG::{EPSG}"
# # first we will check if two list are equally-sized
# # thx: https://stackoverflow.com/a/16720915/4436950
# total_cylinders = len(cylinderlist)
# for i,cylinder in enumerate(cylinderlist):
# print('writing cylinder',i,' of ',total_cylinders,' segments')
# name = f't{i}'
# self.base['CityObjects'][name] = city_object_dict(cylinder['faces'],cylinder['attributes'])
# self.mins.append(cylinder['mins'])
# self.maxs.append(cylinder['maxs'])
# self.base['vertices'] += cylinder['points']
# abs_max = np.max(np.array(self.maxs),axis=0)
# abs_min = np.min(np.array(self.mins),axis=0)
# bbox = [*abs_min,*abs_max]
# # filling the bounding box:
# self.base['metadata']['geographicalExtent'] = bbox
# # filling the vertices:
# # self.base['vertices'] = list(map(list,self.point_list))3
# # for i,point in enumerate(self.point_list[0]):
# # self.base['vertices'].append(point)
# # self.base['vertices'] = [[point.tolist()] for point in self.point_list]
# # self.plist = [[point] for point in self.point_list]
# def dump_to_file(self,outpath):
# with open(outpath,'w+') as writer:
# json.dump(self.base,writer)
# # # ###############################################################
# # # # the points
# # # p1 = np.array([1,1,1])
# # # p2 = np.array([5,5,1])
# # # p3 = np.array([6,7,6])
# # # # c1 = cylinder3D(p1,p2,10)
# # # # p_list = c1.get_vertices_list(False)
# # # # v_list = c1.boundaries_list(64)
# # # # print(v_list[0])
# # # # print(c1.maxs)
# # # # print(c1.mins)
# # # # print(c1.justaposed)
# # # lines_list = [(p1,p2)]
# # # radius_list = [1]
# # # attrs_list = [{"function": "something"}]
# # # builder = city_json_simple(lines_list,radius_list,attrs_list,4)
# # # # print(builder.base)
# # # builder.dump_to_file('our_test_cylinder.json')
# pipes_filepath = '/home/kaue/sanit3Dsdi/tests/sample_rede_agua_tratada.geojson'
# rasterpath = os.path.join(os.environ['HOME'],'sanit3Dsdi/tests/test_vrt_dtm.vrt')
# as_gdf = gpd.read_file(pipes_filepath)
# material_key = 'MATERIAL'
# diameter_key = 'DIAMETRO'
# print(as_gdf[diameter_key].unique())
# meshinfos = []
# n_entities = as_gdf.shape[0]
# zeroindex = 0
# with open('cylinder_report.txt','w+') as writer:
# for i,feature in enumerate(as_gdf.geometry):
# if as_gdf[diameter_key][i] != '':
# if feature.geom_type == 'LineString':
# as_array = np.array(feature)
# else:
# lines = []
# for line in feature:
# lines.append(np.array(line))
# as_array = np.concatenate(lines,axis=0)
# Z_list = []
# for point in as_array:
# Z = get_raster_val_at_geoXY(*point,rasterpath) - 2
# Z_list.append(Z)
# # as_array = np.concatenate((as_array,np.array(Z_list)[:,-1:]),axis=1)
# vertices = np.column_stack((as_array,np.array(Z_list)))
# radius = float(as_gdf[diameter_key][i]) / 200 #200 transforms into radius in centimeters
# customattrs = {"diametro":as_gdf[diameter_key][i],'material':as_gdf[material_key][i]}
# try:
# print('cylinder',i,' of ',n_entities,' with zero index: ',zeroindex)
# cylinder_meshinfo = pymesh_cylinder_for_cityjson(vertices,radius,zero_index=zeroindex,custom_attrs=customattrs)
# zeroindex += (cylinder_meshinfo['zero_ref'] + 500 )
# except Exception as e:
# writer.write(f'\n{i}')
# writer.write(feature.wkt)
# writer.write(str(e))
# meshinfos.append(cylinder_meshinfo)
# if i > 50:
# break
# # "referenceSystem":"urn:ogc:def:crs:EPSG::31984"
# builder = city_json_simple(meshinfos,31984)
# outpath = os.path.join(os.environ['HOME'],'data/sanit3d_out/pipery01_50.json')
# print(outpath)
# builder.dump_to_file(outpath)
| 27.5
| 142
| 0.528884
|
89ad019847bfc14c71a5683913afeaa3123cc30c
| 761
|
py
|
Python
|
sonarr_exporter/__init__.py
|
dr1s/sonarr_exporter.py
|
46361ec6ad93489b6ba2bd2815e1dad8c1b39c43
|
[
"Apache-2.0"
] | 3
|
2019-09-15T17:02:50.000Z
|
2020-04-09T09:13:42.000Z
|
sonarr_exporter/__init__.py
|
dr1s/sonarr_exporter.py
|
46361ec6ad93489b6ba2bd2815e1dad8c1b39c43
|
[
"Apache-2.0"
] | 1
|
2018-12-22T00:16:30.000Z
|
2019-06-17T17:11:06.000Z
|
sonarr_exporter/__init__.py
|
dr1s/sonarr_exporter.py
|
46361ec6ad93489b6ba2bd2815e1dad8c1b39c43
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
#
# Copyright 2018 Daniel Schmitz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sonarr_exporter import main
__NAME__ = 'sonarr_exporter'
__VERSION__ = '0.1.dev0'
__AUTHOR__ = 'Daniel Schmitz'
if __name__ == '__main__':
main()
| 30.44
| 74
| 0.751643
|
50cfe4cbd4e8014a860556dee6ed883de0e11858
| 13,464
|
py
|
Python
|
invenio_rdm_records/services/schemas/metadata.py
|
mb-wali/invenio-rdm-records
|
df2f2ae047ad262d189b2cb72049263a272039bc
|
[
"MIT"
] | null | null | null |
invenio_rdm_records/services/schemas/metadata.py
|
mb-wali/invenio-rdm-records
|
df2f2ae047ad262d189b2cb72049263a272039bc
|
[
"MIT"
] | null | null | null |
invenio_rdm_records/services/schemas/metadata.py
|
mb-wali/invenio-rdm-records
|
df2f2ae047ad262d189b2cb72049263a272039bc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
# Copyright (C) 2021 Graz University of Technology.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""RDM record schemas."""
from functools import partial
from urllib import parse
from flask import current_app
from flask_babelex import lazy_gettext as _
from marshmallow import Schema, ValidationError, fields, post_load, validate, \
validates, validates_schema
from marshmallow_utils.fields import EDTFDateString, IdentifierSet, \
SanitizedHTML, SanitizedUnicode
from marshmallow_utils.schemas import GeometryObjectSchema, IdentifierSchema
from werkzeug.local import LocalProxy
record_personorg_schemes = LocalProxy(
lambda: current_app.config["RDM_RECORDS_PERSONORG_SCHEMES"]
)
record_identifiers_schemes = LocalProxy(
lambda: current_app.config["RDM_RECORDS_IDENTIFIERS_SCHEMES"]
)
record_references_schemes = LocalProxy(
lambda: current_app.config["RDM_RECORDS_REFERENCES_SCHEMES"]
)
record_location_schemes = LocalProxy(
lambda: current_app.config["RDM_RECORDS_LOCATION_SCHEMES"]
)
def _not_blank(error_msg):
"""Returns a non-blank validation rule with custom error message."""
return validate.Length(min=1, error=error_msg)
def _valid_url(error_msg):
"""Returns a URL validation rule with custom error message."""
return validate.URL(error=error_msg)
def locale_validation(value, field_name):
"""Validates the locale value."""
valid_locales = current_app.extensions['invenio-i18n'].get_locales()
valid_locales_code = [v.language for v in valid_locales]
if value:
if len(value.keys()) > 1:
raise ValidationError(_("Only one value is accepted."), field_name)
elif list(value.keys())[0] not in valid_locales_code:
raise ValidationError(_("Not a valid locale."), field_name)
class AffiliationSchema(Schema):
"""Affiliation of a creator/contributor."""
id = SanitizedUnicode()
name = SanitizedUnicode()
@validates_schema
def validate_affiliation(self, data, **kwargs):
"""Validates that either id either name are present."""
id_ = data.get("id")
name = data.get("name")
if id_:
data = {"id": id_}
elif name:
data = {"name": name}
if not id_ and not name:
raise ValidationError(
_("An existing id or a free text name must be present"),
"affiliations"
)
class SubjectSchema(Schema):
"""Subject schema."""
id = SanitizedUnicode()
subject = SanitizedUnicode()
scheme = SanitizedUnicode()
@validates_schema
def validate_subject(self, data, **kwargs):
"""Validates that either id either name are present."""
id_ = data.get("id")
subject = data.get("subject")
if id_:
data = {"id": id_}
elif subject:
data = {"subject": subject}
if not id_ and not subject:
raise ValidationError(
_("An existing id or a free text subject must be present"),
"subjects"
)
class PersonOrOrganizationSchema(Schema):
"""Person or Organization schema."""
NAMES = [
"organizational",
"personal"
]
type = SanitizedUnicode(
required=True,
validate=validate.OneOf(
choices=NAMES,
error=_('Invalid value. Choose one of {NAMES}.')
.format(NAMES=NAMES)
),
error_messages={
# [] needed to mirror error message above
"required": [
_('Invalid value. Choose one of {NAMES}.').format(
NAMES=NAMES)]
}
)
name = SanitizedUnicode()
given_name = SanitizedUnicode()
family_name = SanitizedUnicode()
identifiers = IdentifierSet(
fields.Nested(partial(
IdentifierSchema,
# It is intended to allow org schemes to be sent as personal
# and viceversa. This is a trade off learnt from running
# Zenodo in production.
allowed_schemes=record_personorg_schemes
))
)
@validates_schema
def validate_names(self, data, **kwargs):
"""Validate names based on type."""
if data['type'] == "personal":
if not data.get('family_name'):
messages = [_("Family name must be filled.")]
raise ValidationError({
"family_name": messages
})
elif data['type'] == "organizational":
if not data.get('name'):
messages = [_('Name cannot be blank.')]
raise ValidationError({"name": messages})
@post_load
def update_names(self, data, **kwargs):
"""Update names for organization / person.
Fill name from given_name and family_name if person.
Remove given_name and family_name if organization.
"""
if data["type"] == "personal":
names = [data.get("family_name"), data.get("given_name")]
data["name"] = ", ".join([n for n in names if n])
elif data['type'] == "organizational":
if 'family_name' in data:
del data['family_name']
if 'given_name' in data:
del data['given_name']
return data
class VocabularySchema(Schema):
"""Invenio Vocabulary schema."""
id = SanitizedUnicode(required=True)
title = fields.Dict(dump_only=True)
class CreatorSchema(Schema):
"""Creator schema."""
person_or_org = fields.Nested(PersonOrOrganizationSchema, required=True)
role = fields.Nested(VocabularySchema)
affiliations = fields.List(fields.Nested(AffiliationSchema))
class ContributorSchema(Schema):
"""Contributor schema."""
person_or_org = fields.Nested(PersonOrOrganizationSchema, required=True)
role = fields.Nested(VocabularySchema, required=True)
affiliations = fields.List(fields.Nested(AffiliationSchema))
class TitleSchema(Schema):
"""Schema for the additional title."""
title = SanitizedUnicode(required=True, validate=validate.Length(min=3))
type = fields.Nested(VocabularySchema, required=True)
lang = fields.Nested(VocabularySchema)
class DescriptionSchema(Schema):
"""Schema for the additional descriptions."""
description = SanitizedHTML(required=True,
validate=validate.Length(min=3))
type = fields.Nested(VocabularySchema, required=True)
lang = fields.Nested(VocabularySchema)
def _is_uri(uri):
try:
parse.urlparse(uri)
return True
except AttributeError:
return False
class PropsSchema(Schema):
"""Schema for the URL schema."""
url = SanitizedUnicode(
validate=_valid_url(_('Not a valid URL.'))
)
scheme = SanitizedUnicode()
class RightsSchema(Schema):
"""License schema."""
id = SanitizedUnicode()
title = fields.Dict()
description = fields.Dict()
props = fields.Nested(PropsSchema)
link = SanitizedUnicode(
validate=_valid_url(_('Not a valid URL.'))
)
@validates("title")
def validate_title(self, value):
"""Validates that title contains only one valid locale."""
locale_validation(value, "title")
@validates("description")
def validate_description(self, value):
"""Validates that description contains only one valid locale."""
locale_validation(value, "description")
@validates_schema
def validate_rights(self, data, **kwargs):
"""Validates that id xor name are present."""
id_ = data.get("id")
title = data.get("title")
if not id_ and not title:
raise ValidationError(
_("An existing id or a free text title must be present"),
"rights"
)
elif id_ and len(data.values()) > 1:
raise ValidationError(
_("Only an existing id or free text title/description/link" +
" is accepted, but not both cases at the same time"),
"rights"
)
class DateSchema(Schema):
"""Schema for date intervals."""
date = EDTFDateString(required=True)
type = fields.Nested(VocabularySchema, required=True)
description = fields.Str()
class RelatedIdentifierSchema(IdentifierSchema):
"""Related identifier schema."""
def __init__(self, **kwargs):
"""Constructor."""
super().__init__(allowed_schemes=record_identifiers_schemes, **kwargs)
relation_type = fields.Nested(VocabularySchema)
resource_type = fields.Nested(VocabularySchema)
@validates_schema
def validate_related_identifier(self, data, **kwargs):
"""Validate the related identifiers."""
relation_type = data.get("relation_type")
errors = dict()
if not relation_type:
errors['relation_type'] = self.error_messages["required"]
if errors:
raise ValidationError(errors)
class FunderSchema(Schema):
"""Funder schema."""
name = SanitizedUnicode(
required=True,
validate=_not_blank(_('Name cannot be blank.'))
)
scheme = SanitizedUnicode()
identifier = SanitizedUnicode()
class AwardSchema(Schema):
"""Award schema."""
title = SanitizedUnicode(
required=True,
validate=_not_blank(_('Name cannot be blank.'))
)
number = SanitizedUnicode(
required=True,
validate=_not_blank(_('Name cannot be blank.'))
)
scheme = SanitizedUnicode()
identifier = SanitizedUnicode()
class FundingSchema(Schema):
"""Funding schema."""
funder = fields.Nested(FunderSchema)
award = fields.Nested(AwardSchema)
@validates_schema
def validate_data(self, data, **kwargs):
"""Validate either funder or award is present."""
funder = data.get('funder')
award = data.get('award')
if not funder and not award:
raise ValidationError(
{"funding": _("At least award or funder should be present.")})
class ReferenceSchema(IdentifierSchema):
"""Reference schema."""
def __init__(self, **kwargs):
"""Constructor."""
super().__init__(allowed_schemes=record_references_schemes,
identifier_required=False, **kwargs)
reference = SanitizedUnicode(required=True)
class PointSchema(Schema):
"""Point schema."""
lat = fields.Number(required=True)
lon = fields.Number(required=True)
class LocationSchema(Schema):
"""Location schema."""
geometry = fields.Nested(GeometryObjectSchema)
place = SanitizedUnicode()
identifiers = fields.List(
fields.Nested(partial(
IdentifierSchema, allowed_schemes=record_location_schemes))
)
description = SanitizedUnicode()
@validates_schema
def validate_data(self, data, **kwargs):
"""Validate identifier based on type."""
if not data.get('geometry') and not data.get('place') and \
not data.get('identifiers') and not data.get('description'):
raise ValidationError({
"locations": _("At least one of ['geometry', 'place', \
'identifiers', 'description'] shold be present.")
})
class FeatureSchema(Schema):
"""Location feature schema."""
features = fields.List(fields.Nested(LocationSchema))
class MetadataSchema(Schema):
"""Schema for the record metadata."""
field_load_permissions = {
# TODO: define "can_admin" action
}
field_dump_permissions = {
# TODO: define "can_admin" action
}
# Metadata fields
resource_type = fields.Nested(VocabularySchema, required=True)
creators = fields.List(
fields.Nested(CreatorSchema),
required=True,
validate=validate.Length(
min=1, error=_("Missing data for required field.")
)
)
title = SanitizedUnicode(required=True, validate=validate.Length(min=3))
additional_titles = fields.List(fields.Nested(TitleSchema))
publisher = SanitizedUnicode()
publication_date = EDTFDateString(required=True)
subjects = fields.List(fields.Nested(SubjectSchema))
contributors = fields.List(fields.Nested(ContributorSchema))
dates = fields.List(fields.Nested(DateSchema))
languages = fields.List(fields.Nested(VocabularySchema))
# alternate identifiers
identifiers = IdentifierSet(
fields.Nested(partial(
IdentifierSchema, allowed_schemes=record_identifiers_schemes))
)
related_identifiers = fields.List(fields.Nested(RelatedIdentifierSchema))
sizes = fields.List(SanitizedUnicode(
validate=_not_blank(_('Size cannot be a blank string.'))))
formats = fields.List(SanitizedUnicode(
validate=_not_blank(_('Format cannot be a blank string.'))))
version = SanitizedUnicode()
rights = fields.List(fields.Nested(RightsSchema))
description = SanitizedHTML(validate=validate.Length(min=3))
additional_descriptions = fields.List(fields.Nested(DescriptionSchema))
locations = fields.Nested(FeatureSchema)
funding = fields.List(fields.Nested(FundingSchema))
references = fields.List(fields.Nested(ReferenceSchema))
| 30.392777
| 79
| 0.645276
|
b32497499203c0a3e3539bd25903a1ab41581c9d
| 3,529
|
py
|
Python
|
silk/views/requests.py
|
JoshData/silk
|
f025dff691d8ce11a17e4a9499084e1c05598450
|
[
"MIT"
] | 1
|
2016-02-16T09:24:26.000Z
|
2016-02-16T09:24:26.000Z
|
silk/views/requests.py
|
JoshData/silk
|
f025dff691d8ce11a17e4a9499084e1c05598450
|
[
"MIT"
] | null | null | null |
silk/views/requests.py
|
JoshData/silk
|
f025dff691d8ce11a17e4a9499084e1c05598450
|
[
"MIT"
] | null | null | null |
from django.core.context_processors import csrf
from django.db.models import Sum
from django.shortcuts import render_to_response
from django.utils.decorators import method_decorator
from django.views.generic import View
from silk.profiling.dynamic import _get_module
from silk.auth import login_possibly_required, permissions_possibly_required
from silk.models import Request
from silk.request_filters import BaseFilter, filters_from_request
__author__ = 'mtford'
class RequestsView(View):
show = [5, 10, 25, 100, 250]
default_show = 25
order_by = ['Recent', 'Path', 'Num. Queries', 'Time', 'Time on queries']
defualt_order_by = 'Recent'
session_key_request_filters = 'request_filters'
def _get_paths(self):
return [''] + [x['path'] for x in Request.objects.values('path').distinct()]
def _get_objects(self, show=None, order_by=None, path=None, filters=None):
if not filters:
filters = []
if not show:
show = self.default_show
query_set = Request.objects.all()
if not order_by:
order_by = self.defualt_order_by
if order_by == 'Recent':
query_set = query_set.order_by('-start_time')
elif order_by == 'Path':
query_set = query_set.order_by('-path')
elif order_by == 'Num. Queries':
query_set = query_set.order_by('-num_sql_queries')
elif order_by == 'Time':
query_set = query_set.order_by('-time_taken')
elif order_by == 'Time on queries':
query_set = query_set.annotate(db_time=Sum('queries__time_taken')).order_by('-db_time')
else:
raise RuntimeError('Unknown order_by: "%s"' % order_by)
if path:
query_set = query_set.filter(path=path)
for f in filters:
query_set = f.contribute_to_query_set(query_set)
query_set = query_set.filter(f)
return list(query_set[:show])
def _create_context(self, request):
show = request.GET.get('show', self.default_show)
order_by = request.GET.get('order_by', self.defualt_order_by)
if show:
show = int(show)
path = request.GET.get('path', None)
raw_filters = request.session.get(self.session_key_request_filters, {})
context = {
'show': show,
'order_by': order_by,
'request': request,
'options_show': self.show,
'options_order_by': self.order_by,
'options_paths': self._get_paths(),
'view_names': [x[0] for x in Request.objects.values_list('view_name').distinct()],
'filters': raw_filters
}
context.update(csrf(request))
if path:
context['path'] = path
context['results'] = self._get_objects(show, order_by, path, filters=[BaseFilter.from_dict(x) for _, x in raw_filters.items()])
return context
@method_decorator(login_possibly_required)
@method_decorator(permissions_possibly_required)
def get(self, request):
return render_to_response('silk/requests.html', self._create_context(request))
@method_decorator(login_possibly_required)
@method_decorator(permissions_possibly_required)
def post(self, request):
filters = filters_from_request(request)
request.session[self.session_key_request_filters] = {ident: f.as_dict() for ident, f in filters.items()}
return render_to_response('silk/requests.html', self._create_context(request))
| 39.211111
| 135
| 0.65571
|
8253e880e6ea28c0a08387bb10cf6c1cce4879f2
| 105
|
py
|
Python
|
ulmo/usace/rivergages/__init__.py
|
cameronbracken/ulmo
|
4f38995843a760e0fca4e9f07c21ffeb4552f29c
|
[
"BSD-3-Clause"
] | null | null | null |
ulmo/usace/rivergages/__init__.py
|
cameronbracken/ulmo
|
4f38995843a760e0fca4e9f07c21ffeb4552f29c
|
[
"BSD-3-Clause"
] | null | null | null |
ulmo/usace/rivergages/__init__.py
|
cameronbracken/ulmo
|
4f38995843a760e0fca4e9f07c21ffeb4552f29c
|
[
"BSD-3-Clause"
] | null | null | null |
from core import (
get_stations,
get_station_data,
get_station_parameters,
)
| 17.5
| 31
| 0.609524
|
c31b2e66bfcc9e5feb39a0e8e1f9c18b5bdeb64d
| 11,245
|
py
|
Python
|
board.py
|
cs10/Pytris-FA20-Skeleton
|
9b70ae644b6fc0c39297bc08b4bb1a6b883f7502
|
[
"BSD-2-Clause"
] | null | null | null |
board.py
|
cs10/Pytris-FA20-Skeleton
|
9b70ae644b6fc0c39297bc08b4bb1a6b883f7502
|
[
"BSD-2-Clause"
] | null | null | null |
board.py
|
cs10/Pytris-FA20-Skeleton
|
9b70ae644b6fc0c39297bc08b4bb1a6b883f7502
|
[
"BSD-2-Clause"
] | null | null | null |
class Board:
""" An object to represent a 2-Dimensional rectangular board
"""
def __init__(self, num_cols=10, num_rows=20, cell_item=None, grid=None):
""" Create a Board instance that has num cols and num rows.
The 2D board is represented with a single list, if the board looks like:
col col col
0 1 2
-------------
| 0 | 1 | 2 | row 0
----+---+----
| 3 | 4 | 5 | row 1
-------------
Where num cols = 3, num rows = 2
Then the underlying representation looks like:
[0, 1, 2, 3, 4, 5]
Parameters
----------
num_cols (int, required):
number of columns. Defaults to 10.
num_rows (int, required):
number of rows. Defaults to 20.
cell_item (any, optional):
create default items. Defaults to None.
grid (list[any], optional): a list to create the underlying board representation.
However len(grid) = num_cols * num_rows. Defaults to None.
"""
assert num_cols is not None and num_rows is not None
assert type(num_cols) == int and type(num_rows) == int
assert num_cols >= 0 and num_rows >= 0
self._num_rows = num_rows
self._num_cols = num_cols
if grid:
assert num_cols * num_rows == len(grid)
self._grid = grid[:]
else:
self._grid = [cell_item for _ in range(num_cols * num_rows)]
# ---------------------------------------------------------------------------- #
# --------------------------------- Required --------------------------------- #
# ---------------------------------------------------------------------------- #
def get_col(self, x):
"""Get a copy of column x
Parameters
----------
x (int):
column number
Returns
-------
list[any]:
a list copy of column x
>>> board = Board(3, 2, grid=[7, 6, 3, 9, 5, 2])
>>> print(board)
=====
7 6 3
9 5 2
=====
>>> board.get_col(1)
[6, 5]
>>> board2 = Board(2, 2, grid=[1, 0, 4, 3])
>>> print(board2)
===
1 0
4 3
===
>>> board2.get_col(0)
[1, 4]
"""
# TODO: your solution here
def get_item(self, x, y):
"""Get the item at coordinate (x, y)
Parameters
----------
x (int):
column number
y (int):
row number
Returns
-------
any:
actual item
>>> board = Board(3, 2, grid=[5, 4, 1, 3, 0, 6])
>>> print(board)
=====
5 4 1
3 0 6
=====
>>> [board.get_item(x, y) for y in range(2) for x in range(3)]
[5, 4, 1, 3, 0, 6]
>>> board2 = Board(4, 1, grid=[9, 2, 4, 1])
>>> print(board2)
=======
9 2 4 1
=======
>>> [board2.get_item(x, y) for y in range(1) for x in range(4)]
[9, 2, 4, 1]
"""
# TODO: your solution here
def set_item(self, x, y, item):
"""Overwrite the item at (x, y)
Parameters
----------
x (int):
column number
y (int):
row number
item (any):
new item
>>> board = Board(3, 2, grid=[i for i in range(6)])
>>> print(board)
=====
0 1 2
3 4 5
=====
>>> board.set_item(0, 1, 30)
>>> board.set_item(2, 0, 11)
>>> print(board)
=====
0 1 11
30 4 5
=====
"""
# TODO: your solution here
def insert_row_at(self, y, lst):
"""Insert lst as new row at row y. Increment num_rows by 1
Parameters
----------
y (int):
row number
lst (list[any]):
list of row items
>>> board = Board(3, 2, grid=list(range(6)))
>>> print(board)
=====
0 1 2
3 4 5
=====
>>> board.insert_row_at(1, [6, 7, 8])
>>> print(board)
=====
0 1 2
6 7 8
3 4 5
=====
>>> board.get_num_rows()
3
"""
self._num_rows += 1 # DO NOT touch this line
# TODO: your solution here
def valid_coordinate(self, coordinate):
"""Check if coordinate (x, y) is within the board
Parameters
----------
coordinate (tuple(x, y)):
an (x: int, y: int) coordinate
Returns
-------
bool:
if the coordinate is valid within *this* board
>>> board = Board(3, 2, grid=list(range(6)))
>>> print(board)
=====
0 1 2
3 4 5
=====
>>> sum([board.valid_coordinate((x, y)) for x in range(3) for y in range(2)]) == 6
True
>>> board.valid_coordinate((2, 1))
True
>>> board.valid_coordinate((1, 1))
True
>>> board.valid_coordinate((0, 2))
False
>>> board.valid_coordinate((0, -1))
False
>>> board.valid_coordinate((-1, 0))
False
>>> board.valid_coordinate((3, 0))
False
"""
# TODO: your solution here
# ---------------------------------------------------------------------------- #
# --------------------------- Helpers: Not Required -------------------------- #
# ---------------------------------------------------------------------------- #
def get_row(self, y):
"""Get a copy of row y
Parameters
----------
y (int):
row number
Returns
-------
list[any]:
A list copy of row y
>>> board = Board(3, 2, grid=[i for i in range(6)])
>>> print(board)
=====
0 1 2
3 4 5
=====
>>> board.get_row(0)
[0, 1, 2]
>>> board.get_row(1)
[3, 4, 5]
"""
assert 0 <= y < self._num_rows, f'Invalid y: {y}'
start_index = y * self._num_cols
return self._grid[start_index : start_index + self._num_cols]
def delete_row(self, y):
"""Delete row y and decremet num_rows count by 1
Parameters
----------
y (int):
row number
>>> board = Board(3, 3, grid=list(range(9)))
>>> print(board)
=====
0 1 2
3 4 5
6 7 8
=====
>>> board.delete_row(1)
>>> print(board)
=====
0 1 2
6 7 8
=====
>>> board.get_num_rows()
2
"""
index_start = y * self._num_cols
del self._grid[index_start : index_start + self._num_cols]
self._num_rows -= 1
def index_to_coordinate(self, index):
"""Convert an index to (x, y) coordinate
Parameters
----------
index (int):
index in underlying list representation
Returns
-------
tuple[int, int]:
tuple coordinate
>>> board = Board(3, 2, grid=[i for i in range(6)])
>>> print(board)
=====
0 1 2
3 4 5
=====
>>> board.index_to_coordinate(5)
(2, 1)
"""
assert 0 <= index < len(self._grid), f'Invalid index: {index}'
return (index % self._num_cols, index // self._num_cols)
def filter_coordinates(self, fn):
"""Extract coordinates of all item that satisfy fn and returns
a list of these coordinates in tuples
Parameters
----------
fn (any -> bool):
a boolean function that operates on items of *this* board
Returns
-------
list[tuple[int, int]]:
a list of tuple coordinates
>>> board = Board(3, 3, grid=[i for i in range(9)])
>>> board.filter_coordinates(lambda x: x % 2 == 1)
[(1, 0), (0, 1), (2, 1), (1, 2)]
"""
return [(i % self._num_cols, i // self._num_cols) \
for i, item in enumerate(self._grid) if fn(item)]
def update_grid(self, new_grid):
""" Overwrite existing underlying board with a new board
"""
assert len(new_grid) == len(self._grid), 'unequal grid lengths'
self._grid = new_grid
def get_num_rows(self):
return self._num_rows
def get_num_cols(self):
return self._num_cols
def get_grid(self):
""" Returns a COPY of the underlying grid
"""
return self._grid[:]
def __contains__(self, item):
""" Returns True if item is in this Board, False otherwise
>>> board = Board(2, 3, grid=list(range(6)))
>>> 5 in board
True
>>> 6 in board
False
"""
return self._grid.__contains__(item)
def __getitem__(self, key):
""" Using bracket notation e.g. [, ] and pass in either a number
or a coordinate.
>>> board = Board(3, 5, '*')
>>> board[4] == board[(1, 1)] == board[[1, 1]] == '*'
True
"""
if isinstance(key, int):
return self._grid[key]
return self.get_item(key[0], key[1])
def __setitem__(self, key, value):
""" Using bracket notation e.g. [, ] and pass in either a number
or a coordinate.
>>> board = Board(3, 5, '*')
>>> board[7] = 70
>>> board.get_item(1, 2)
70
"""
if isinstance(key, int):
self._grid[key] = value
else:
self.set_item(key[0], key[1], value)
def __iter__(self):
""" Iterate through the underlying grid in row major order
>>> board = Board(2, 2, grid=list(range(4)))
>>> list(board)
[0, 1, 2, 3]
"""
return self._grid.__iter__()
def __reversed__(self):
""" Iterate through the underlying grid in reverse row major order
Use the built-in reversed() call.
>>> board = Board(2, 2, grid=list(range(4)))
>>> list(reversed(board))
[3, 2, 1, 0]
"""
return self._grid.__reversed__()
def __len__(self):
""" Returns the total number of elements
>>> board = Board(3, 3, grid=list(range(9)))
>>> len(board)
9
"""
return self._grid.__len__()
def __repr__(self):
return f'<Board num_cols: {self._num_cols} num_rows: {self._num_rows}>'
def __str__(self):
""" Print out the board items in a grid
>>> board = Board(2, 3, grid=list(range(6)))
>>> print(board)
===
0 1
2 3
4 5
===
"""
s = '=' * (self._num_cols * 2 - 1) + '\n'
for i, val in enumerate(self._grid):
s += str(val)
if (i + 1) % self._num_cols != 0:
s += ' '
if (i + 1) % self._num_cols == 0:
s += '\n'
s += '=' * (self._num_cols * 2 - 1)
return s
| 26.710214
| 90
| 0.434682
|
0294473b7d8733b5932d3b64c1582a1804c3bd58
| 671
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
AlfaSakan/recipe-app-api
|
61c34252271f669d15d3e83980d33afd46469157
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
AlfaSakan/recipe-app-api
|
61c34252271f669d15d3e83980d33afd46469157
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
AlfaSakan/recipe-app-api
|
61c34252271f669d15d3e83980d33afd46469157
|
[
"MIT"
] | null | null | null |
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **kwargs):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| 30.5
| 78
| 0.651267
|
4903d2c10789bacbdab8e849a562725dcff7aa68
| 1,671
|
py
|
Python
|
doc/tools/healthcheck.py
|
d4wner/axefuzzer
|
2c5c5abc9cea3b24c14735ba9cb159911e9f1ded
|
[
"BSD-2-Clause"
] | 2
|
2019-03-13T12:30:48.000Z
|
2021-01-29T23:40:46.000Z
|
doc/tools/healthcheck.py
|
d4wner/axefuzzer
|
2c5c5abc9cea3b24c14735ba9cb159911e9f1ded
|
[
"BSD-2-Clause"
] | null | null | null |
doc/tools/healthcheck.py
|
d4wner/axefuzzer
|
2c5c5abc9cea3b24c14735ba9cb159911e9f1ded
|
[
"BSD-2-Clause"
] | 1
|
2019-03-13T12:30:52.000Z
|
2019-03-13T12:30:52.000Z
|
#!/usr/bin/env python
# encoding: utf-8
'''代理服务器健康监控工具
health check tool,
if wyproxy is error, then restart the proxy server.
$ python wyproxy.py -restart
: wyporyx.py need add -conf options
'''
from __future__ import (absolute_import, print_function, division)
import os
import json
import subprocess
"""health check conf"""
bind_addr = '106.75.199.107'
url = 'http://www.baidu.com/wy'
def read_cnf():
args = json.load(open('../.proxy.cnf', 'r'))
return args
def run_command(*arguments):
return subprocess.check_output(['curl']+ list(arguments))
def check_live():
# curl --proxy 106.75.199.107:8080 http://www.baidu.com/wyproxy
# curl --socks5-hostname 106.75.199.107:8080 http://www.baidu.com/wyproxy
try:
args = read_cnf()
if args.get('mode') == 'socks5':
opt = '--socks5-hostname'
elif args.get('mode') == 'http':
opt = '--proxy'
server = '{}:{}'.format(bind_addr, args.get('port'))
result = run_command('-q', opt, server, url, '--connect-timeout', '5')
return False if 'Failed to connect' in result else True
except Exception as e:
return False
def restart_wyproxy(pidfile):
return subprocess.check_output('python','wyproxy.py','--restart','--pid', pidfile)
def main():
print(check_live())
# print(result)
# print(check_live())
# print(__file__)
# print(os.path.realpath(__file__))
# print(os.path.dirname(os.path.realpath(__file__)))
# pwd = os.environ['PWD']
# print(pwd)
# print(os.getcwd())
if __name__ == '__main__':
try:
main()
except Exception, e:
print('error: {}'.format(e))
| 27.393443
| 86
| 0.629563
|
616427a25ab4a9af2055dcfa2757f9a003ac7293
| 5,143
|
py
|
Python
|
pipelines/processor.py
|
theboxahaan/pipelines
|
19c7ab18db2db6fe4d4316f8f843cf3e0e9681e2
|
[
"MIT"
] | 3
|
2021-05-24T02:15:05.000Z
|
2022-03-03T07:45:16.000Z
|
pipelines/processor.py
|
theboxahaan/pipelines
|
19c7ab18db2db6fe4d4316f8f843cf3e0e9681e2
|
[
"MIT"
] | 2
|
2021-05-24T15:33:57.000Z
|
2021-05-25T04:28:59.000Z
|
pipelines/processor.py
|
theboxahaan/pipelines
|
19c7ab18db2db6fe4d4316f8f843cf3e0e9681e2
|
[
"MIT"
] | 2
|
2021-07-06T02:54:14.000Z
|
2022-02-27T10:32:25.000Z
|
import uuid
import asyncio
import logging
import traceback
class Processor:
"""
Class Processor represents a node in the processing pipeline where
node := (input Q, processor, output Q, accumulator)
pipeline := node1 -> node2 -> node3
"""
def __init__(self,
name:str = None,
input_queue:asyncio.Queue=None,
output_queue:asyncio.Queue=None,
coro=None,
input_srcs:list=None,
output_dests:list=None,
env_vars:dict=None,
queue_size:int=0,
*args, **kwargs):
if queue_size == 0 :
logging.warning('unbounded input_queue, output_queue')
self._input_queue = asyncio.Queue(maxsize=queue_size) if input_queue is None else input_queue
self._output_queue = asyncio.Queue(maxsize=queue_size) if output_queue is None else output_queue
self._processor_coro = coro
self._uuid = str(uuid.uuid4())
self._name = str(name)
self._output_accumulator = []
self._input_srcs = input_srcs
self._output_dests = output_dests
self.env_vars = env_vars
self._input_handler_task = asyncio.create_task(
self._input_handler(self._input_srcs))
self._processor_task = asyncio.create_task(
self._processor(*args, **kwargs))
self._output_handler_task = asyncio.create_task(
self._output_handler(self._output_dests))
logging.info('instantiated %s', str(self))
async def _input_handler(self, input_src:list=None):
"""
Helper Function to handle multiple input sources and populate
the input_queue of each Processor object.
The only constraint is that the number of inputs coming in from each
input source are equal in number, otherwise the processor Q's will be kept
waiting...
.
"""
try:
logging.info('%s started input handler...', repr(self))
while(True):
# acquire a single input elt from each of the source
# (liason q's)
cur_input = []
if input_src is None or len(input_src) == 0:
logging.error('input sources cannot be None or empty list ... exiting input_handler')
raise asyncio.CancelledError
for _src in input_src:
cur_input.append(await _src.get())
# put the acquired input inside the Processor's input_queue
await self._input_queue.put(tuple(cur_input))
except asyncio.CancelledError:
logging.warning('%s input_handler cancelled', str(self))
except Exception as e:
logging.error('[input_handler]\n%s', traceback.format_exc())
raise
async def _output_handler(self, output_dest:list=None):
"""
Helper Function to handle multiple output destinations and populate
the output_queue of each Processor object.
"""
try:
logging.info('%s started output handler...', repr(self))
while(True):
if output_dest is None or len(output_dest) == 0:
logging.error('output dests cannot be None or empty list... exiting output_handler')
raise asyncio.CancelledError
# acquire a single output elt from the output queue
cur_output = await self._output_queue.get()
# put the acquired output into the dest(liason) q's
for _dest in output_dest:
await _dest.put(cur_output)
except asyncio.CancelledError:
logging.warning('%s output_handler cancelled', str(self))
except Exception as e:
logging.error('[output_handler]\n%s', traceback.format_exc())
raise
async def _processor(self, *args, **kwargs):
try:
logging.info('%s started processor ...', repr(self))
while(True):
_temp = await self._input_queue.get()
async def _processor_task(_temp, *Aargs, **Akwargs):
logging.info('starting processor task...')
try:
_temp = await self._processor_coro(self, _temp, *Aargs, **Akwargs)
if self._output_queue is None:
self._output_accumulator.append(_temp)
else:
await self._output_queue.put(_temp)
except Exception as e:
logging.error('[processor_task]\n%s', traceback.format_exc())
asyncio.create_task(_processor_task( _temp, *args, **kwargs))
except asyncio.CancelledError:
logging.warning('%s processor cancelled', str(self))
except Exception as e:
logging.error('[processor]\n%s', traceback.format_exc())
raise
@property
def input_queue(self) -> asyncio.Queue:
return self._input_queue
@property
def output_queue(self) -> asyncio.Queue:
return self._output_queue
@property
def uuid(self) -> str:
return self._uuid
@property
def name(self) -> str:
return self._name
@property
def liason_queues(self) -> tuple:
return (self._input_srcs, self._output_dests)
@property
def processor_coro(self):
return self._processor_coro
def __repr__(self) -> str:
return f"<Processor:{self._uuid}, coro:{self._processor_coro.__qualname__}>"
def __str__(self) -> str:
return f"<Processor:{self._uuid};{self._name}>"
class InputProcessor(Processor):
async def _processor(self, *args, **kwargs):
try:
logging.info('%s starting IO processor ...', repr(self))
await self._processor_coro(self, self._output_queue, *args, **kwargs)
except Exception as e:
logging.error('[processor]\n%s', traceback.format_exc())
| 29.728324
| 104
| 0.698425
|
f28499f97e68f8fdd8df26964566c07bc84bfd0c
| 720
|
py
|
Python
|
wsgi.py
|
ioannova/cache_app
|
dec4985b6ca7d718eaebe99cf0c2687f03e5e3ab
|
[
"MIT"
] | null | null | null |
wsgi.py
|
ioannova/cache_app
|
dec4985b6ca7d718eaebe99cf0c2687f03e5e3ab
|
[
"MIT"
] | null | null | null |
wsgi.py
|
ioannova/cache_app
|
dec4985b6ca7d718eaebe99cf0c2687f03e5e3ab
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
from flask_migrate import Migrate
from app import app, db
from app.settings import Development, Production
import sqlite3
# Alembic
migrate = Migrate(app, db)
if __name__ == "__main__":
app.run()
def reset():
from os import system, getcwd, path
import sqlite3
system('rm %s' % path.join(os.getcwd(), 'app/database.db'))
system('rm -rf %s' % path.join(os.getcwd(), 'migrations'))
sqlite3.connect('%s' % path.join(os.getcwd(), 'app/database.db'))
system('%s db init' % path.join(os.getcwd(), 'env/bin/flask'))
system('%s db migrate -m "initial"' % path.join(os.getcwd(), 'env/bin/flask'))
system('%s db upgrade' % path.join(os.getcwd(), 'env/bin/flask'))
| 30
| 82
| 0.648611
|
4778624634bc70df7a21222991c018121d4925c6
| 5,103
|
py
|
Python
|
salt/returners/memcache_return.py
|
styro/salt
|
d087d94dca02ca8bf53a6c21b94944bc7957522c
|
[
"Apache-2.0"
] | 1
|
2016-04-26T03:42:32.000Z
|
2016-04-26T03:42:32.000Z
|
salt/returners/memcache_return.py
|
styro/salt
|
d087d94dca02ca8bf53a6c21b94944bc7957522c
|
[
"Apache-2.0"
] | null | null | null |
salt/returners/memcache_return.py
|
styro/salt
|
d087d94dca02ca8bf53a6c21b94944bc7957522c
|
[
"Apache-2.0"
] | 1
|
2021-12-02T15:30:00.000Z
|
2021-12-02T15:30:00.000Z
|
# -*- coding: utf-8 -*-
'''
Return data to a memcache server
To enable this returner the minion will need the python client for memcache
installed and the following values configured in the minion or master
config, these are the defaults:
memcache.host: 'localhost'
memcache.port: '11211'
Alternative configuration values can be used by prefacing the configuration.
Any values not found in the alternative configuration will be pulled from
the default location::
alternative.memcache.host: 'localhost'
alternative.memcache.port: '11211'
python2-memcache uses 'localhost' and '11211' as syntax on connection.
To use the memcache returner, append '--return memcache' to the salt command. ex:
salt '*' test.ping --return memcache
To use the alternative configuration, append '--return_config alternative' to the salt command. ex:
salt '*' test.ping --return memcache --return_config alternative
'''
from __future__ import absolute_import
# Import python libs
import json
import logging
import salt.utils
import salt.returners
log = logging.getLogger(__name__)
# Import third party libs
try:
import memcache
HAS_MEMCACHE = True
except ImportError:
HAS_MEMCACHE = False
# Define the module's virtual name
__virtualname__ = 'memcache'
def __virtual__():
if not HAS_MEMCACHE:
return False
return __virtualname__
def _get_options(ret=None):
'''
Get the memcache options from salt.
'''
attrs = {'host': 'host',
'port': 'port'}
_options = salt.returners.get_returner_options(__virtualname__,
ret,
attrs,
__salt__=__salt__,
__opts__=__opts__)
return _options
def _get_serv(ret):
'''
Return a memcache server object
'''
_options = _get_options(ret)
host = _options.get('host')
port = _options.get('port')
log.debug('memcache server: {0}:{1}'.format(host, port))
if not host or not port:
log.error('Host or port not defined in salt config')
return
# Combine host and port to conform syntax of python memcache client
memcacheoptions = (host, port)
return memcache.Client(['{0}:{1}'.format(*memcacheoptions)], debug=0)
# # TODO: make memcacheoptions cluster aware
# Servers can be passed in two forms:
# 1. Strings of the form C{"host:port"}, which implies a default weight of 1
# 2. Tuples of the form C{("host:port", weight)}, where C{weight} is
# an integer weight value.
def prep_jid(nocache, passed_jid=None): # pylint: disable=unused-argument
'''
Do any work necessary to prepare a JID, including sending a custom id
'''
return passed_jid if passed_jid is not None else salt.utils.gen_jid()
def returner(ret):
'''
Return data to a memcache data store
'''
serv = _get_serv(ret)
serv.set('{0}:{1}'.format(ret['id'], ret['jid']), json.dumps(ret))
# The following operations are neither efficient nor atomic.
# If there is a way to make them so, this should be updated.
if ret['id'] not in get_minions():
r = serv.append('minions', ret['id'] + ',')
if not r:
serv.add('minions', ret['id'] + ',')
if ret['jid'] not in get_jids():
r = serv.append('jids', ret['jid'] + ',')
if not r:
serv.add('jids', ret['jid'] + ',')
def save_load(jid, load):
'''
Save the load to the specified jid
'''
serv = _get_serv(ret=None)
serv.set(jid, json.dumps(load))
serv.append('jids', jid)
def get_load(jid):
'''
Return the load data that marks a specified jid
'''
serv = _get_serv(ret=None)
data = serv.get(jid)
if data:
return json.loads(data)
return {}
def get_jid(jid):
'''
Return the information returned when the specified job id was executed
'''
serv = _get_serv(ret=None)
ret = {}
for minion in get_minions():
data = serv.get('{0}:{1}'.format(minion, jid))
if data:
ret[minion] = json.loads(data)
return ret
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
ret = {}
for minion in serv.smembers('minions'):
ind_str = '{0}:{1}'.format(minion, fun)
try:
jid = serv.lindex(ind_str, 0)
except Exception:
continue
data = serv.get('{0}:{1}'.format(minion, jid))
if data:
ret[minion] = json.loads(data)
return ret
def get_jids():
'''
Return a list of all job ids
'''
serv = _get_serv(ret=None)
try:
return serv.get('jids').strip(',').split(',')
except AttributeError:
return []
def get_minions():
'''
Return a list of minions
'''
serv = _get_serv(ret=None)
try:
return serv.get('minions').strip(',').split(',')
except AttributeError:
return []
| 26.304124
| 101
| 0.610033
|
dc145e835fe14e9b1e67c4b334a638609d285e96
| 435
|
py
|
Python
|
api/serializers.py
|
phemmylintry/flight-book
|
dcd187e81d36fae7424c2e19142cd4f31045f129
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
phemmylintry/flight-book
|
dcd187e81d36fae7424c2e19142cd4f31045f129
|
[
"MIT"
] | null | null | null |
api/serializers.py
|
phemmylintry/flight-book
|
dcd187e81d36fae7424c2e19142cd4f31045f129
|
[
"MIT"
] | null | null | null |
"""Define serializers for API."""
from rest_framework.serializers import ModelSerializer
from .models import Flight
class FlightSerializer(ModelSerializer):
"""flight object serializer."""
class Meta:
"""Meta data for flight object."""
model = Flight
fields = ("id", "origin", "destination", "departure_date",
"plane_type")
"""
Add new fields "plane"
"""
| 21.75
| 66
| 0.602299
|
459784e7d83034f035339b3054713dae105c3375
| 5,615
|
py
|
Python
|
example/app/api/client_routes.py
|
monoper/BlockchainDB
|
cda8bb65e2351553d4d09e6396796eb0715641b7
|
[
"MIT"
] | 1
|
2021-05-15T19:45:41.000Z
|
2021-05-15T19:45:41.000Z
|
example/app/api/client_routes.py
|
monoper/BlockchainDB
|
cda8bb65e2351553d4d09e6396796eb0715641b7
|
[
"MIT"
] | null | null | null |
example/app/api/client_routes.py
|
monoper/BlockchainDB
|
cda8bb65e2351553d4d09e6396796eb0715641b7
|
[
"MIT"
] | null | null | null |
import uuid
from typing import List
from fastapi import Depends, APIRouter, status, HTTPException
from .client_models import Client, LinkedProvider
from .provider_models import Provider
from .common_models import Appointment, AppointmentStatus
from .blockchain import BlockchainDb
from .util import verify_auth_header
api = APIRouter(
prefix="/api/client",
tags=["clients"],
dependencies=[Depends(BlockchainDb),Depends(verify_auth_header)],
responses={404: {"description": "Not found"}},
)
@api.get("/{client_id}", response_model=Client, status_code=status.HTTP_200_OK)
def get_client(client_id: str, database: BlockchainDb = Depends()):
result = database.find_one('Client', {'clientId': client_id})
if result is None:
raise HTTPException(status_code=404, detail='Client not found')
return Client(**result)
@api.put("/{client_id}", status_code=status.HTTP_200_OK)
def update_client(client_id: str, client: Client, database: BlockchainDb = Depends()):
if client.clientId != client_id:
raise HTTPException(status_code=400,
detail='Client id in query parameter doesn\'t match payload')
database.commit_transaction(client, 'EDIT', 'Client', 'clientId', client_id)
@api.get("/{client_id}/appointments",
response_model=List[Appointment],
status_code=status.HTTP_200_OK)
def get_client_appointments(client_id: str, database: BlockchainDb = Depends()):
result = database.find('Appointment', {'clientId': client_id})
if result is None:
return []
return result
@api.get("/{client_id}/appointments/{appointment_id}",
response_model=Appointment,
status_code=status.HTTP_200_OK)
def get_client_appointment(client_id: str, appointment_id: str, database: BlockchainDb = Depends()):
result = database.find_one('Appointment',
{'clientId': client_id, 'appointmentId': appointment_id})
if result is None:
raise HTTPException(status_code=404, detail='Appointment not found')
return result
@api.post("/{client_id}/appointments", status_code=status.HTTP_200_OK)
def add_client_appointment(client_id: str, appointment: Appointment,
database: BlockchainDb = Depends()):
if appointment.clientId != client_id:
raise HTTPException(status_code=400,
detail=f'Client id ({client_id}) in query \
parameter doesn\'t match payload \
({appointment.clientId}) \
{client_id == appointment.clientId}')
#need to add protect so that only 1 create block can exist for a given ID
appointment.appointmentId = str(uuid.uuid4())
provider = Provider(**database.find_one('Provider', {'providerId': appointment.providerId}))
client = Client(**database.find_one('Client', {'clientId': client_id}))
if not any(linked_provider.providerId == provider.providerId
for linked_provider in client.linkedProviders):
client.linkedProviders.append(LinkedProvider(providerId=provider.providerId,
hasAccess=True,
providerName=f'{provider.name.firstName} {provider.name.lastName}'))
database.commit_transaction(client, 'EDIT', 'Client', 'clientId', client_id)
database.commit_transaction(appointment, 'CREATE', 'Appointment',
'appointmentId', appointment.appointmentId)
@api.post("/{client_id}/linked-provider/{provider_id}/toggle", status_code=status.HTTP_200_OK)
def toggle_client_linked_provider(client_id: str, provider_id: str,
database: BlockchainDb = Depends()):
client = Client(**database.find_one('Client', {'clientId': client_id}))
for index, linked_provider in enumerate(client.linkedProviders):
if linked_provider.providerId == provider_id:
linked_provider.hasAccess = not linked_provider.hasAccess
client.linkedProviders[index] = linked_provider
database.commit_transaction(client, 'EDIT', 'Client', 'clientId', client_id)
@api.put("/{client_id}/appointments/{appointment_id}", status_code=status.HTTP_200_OK)
def update_client_appointment(client_id: str, appointment_id: str,
appointment: Appointment, database: BlockchainDb = Depends()):
if appointment.clientId != client_id or appointment.appointmentId != appointment_id:
raise HTTPException(status_code=400,
detail='Client id in query parameter doesn\'t match payload')
if appointment.status == AppointmentStatus.Completed \
or appointment.status == AppointmentStatus.Rejected:
raise HTTPException(status_code=400,
detail='Cannot update a completed or rejected appointment')
result = database.commit_transaction(appointment, 'EDIT',
'Appointment', 'appointmentId', appointment_id)
if result is None:
raise HTTPException(status_code=400, detail='Could not update appointment')
return result
@api.get("/{client_id}/prescribed-treatments", status_code=status.HTTP_200_OK)
def get_client_prescribed_treatments(client_id: str, database: BlockchainDb = Depends()):
appointments = database.find('Appointment', { 'clientId' : client_id})
if appointments is None:
return []
prescribed_treatments = []
[prescribed_treatments.extend(appointment.prescribedTreatment) for appointment in appointments]
return prescribed_treatments
| 43.527132
| 100
| 0.68602
|
086ee9c878ca501f61915e87e401f87d189cd793
| 2,164
|
py
|
Python
|
CSCI-104/hw7/hw7-test/tests/checker.py
|
liyang990803/CSCI-103
|
6f84fbc242be90f7a9c3a58bdcc6f54352e4ae5a
|
[
"MIT"
] | null | null | null |
CSCI-104/hw7/hw7-test/tests/checker.py
|
liyang990803/CSCI-103
|
6f84fbc242be90f7a9c3a58bdcc6f54352e4ae5a
|
[
"MIT"
] | null | null | null |
CSCI-104/hw7/hw7-test/tests/checker.py
|
liyang990803/CSCI-103
|
6f84fbc242be90f7a9c3a58bdcc6f54352e4ae5a
|
[
"MIT"
] | 1
|
2018-03-23T04:19:24.000Z
|
2018-03-23T04:19:24.000Z
|
import sys
from checker_bst import bst
from checker_rotate import rotate
from checker_hypercube import hypercube
from cs_grading import Grader, Homework, Problem
from cs_grading import generate_grade_report
sys.path.insert(0, '..')
import setting
RESULT_DIR = '../results/' # where test results are stored
GRADER_CONFIG = '../grader.txt'
RUBRIC_GENERAL = 'rubric/general.config'
RUBRIC_BST = 'rubric/bst.config'
RUBRIC_ROTATE = 'rubric/rotate.config'
RUBRIC_HYPERCUBE = 'rubric/hypercube.config'
RUBRIC_OTHER = '../rubric_other.tsv'
GRADE_REPORT_DIR = '../'
HOMEWORK = Homework(
7,
RESULT_DIR,
setting.REMOVE_OUTPUT,
detailed_results=setting.DETAILED_RESULT,
compile_flags=setting.COMPILE_FLAGS,
logging_level=setting.LOGGING_LEVEL,
)
P1 = Problem(HOMEWORK, 1, 'bst', 35)
P2 = Problem(HOMEWORK, 2, 'rotate', 25)
P3 = Problem(HOMEWORK, 3, 'hypercube', 40)
if setting.RUN_BST:
P1.generate_results(
bst,
setting.BST_USE_VALGRIND,
timeout=setting.BST_TIMEOUT,)
if setting.GENERATE_GRADE_REPORT:
P1.grade_problem(RUBRIC_GENERAL, RUBRIC_BST)
if setting.OPEN_RESULT:
P1.open_result(text_editor=setting.TEXT_EDITOR)
if setting.RUN_ROTATE:
P2.generate_results(
rotate,
setting.ROTATE_USE_VALGRIND,
timeout=setting.ROTATE_TIMEOUT,)
if setting.GENERATE_GRADE_REPORT:
P2.grade_problem(RUBRIC_GENERAL, RUBRIC_ROTATE)
if setting.OPEN_RESULT:
P2.open_result(text_editor=setting.TEXT_EDITOR)
if setting.RUN_HYPERCUBE:
P3.generate_results(
hypercube,
setting.HYPERCUBE_USE_VALGRIND,
timeout=setting.HYPERCUBE_TIMEOUT,)
if setting.GENERATE_GRADE_REPORT:
P3.grade_problem(RUBRIC_GENERAL, RUBRIC_HYPERCUBE)
if setting.OPEN_RESULT:
P3.open_result(text_editor=setting.TEXT_EDITOR)
if setting.GENERATE_GRADE_REPORT:
HOMEWORK.grade_other_deduction(RUBRIC_OTHER)
GRADER = Grader(GRADER_CONFIG, setting.LOGGING_LEVEL)
generate_grade_report(
HOMEWORK,
GRADER,
GRADE_REPORT_DIR,
overwrite=setting.OVERWRITE_REPORT,
logging_level=setting.LOGGING_LEVEL)
| 30.055556
| 58
| 0.737061
|
6f72209ca045276a65cf7f00b94112305dafb5d4
| 1,480
|
py
|
Python
|
src/openprocurement/tender/limited/tests/document.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 10
|
2020-02-18T01:56:21.000Z
|
2022-03-28T00:32:57.000Z
|
src/openprocurement/tender/limited/tests/document.py
|
pontostroy/api
|
5afdd3a62a8e562cf77e2d963d88f1a26613d16a
|
[
"Apache-2.0"
] | 26
|
2018-07-16T09:30:44.000Z
|
2021-02-02T17:51:30.000Z
|
src/openprocurement/tender/limited/tests/document.py
|
scrubele/prozorro-testing
|
42b93ea2f25d8cc40e66c596f582c7c05e2a9d76
|
[
"Apache-2.0"
] | 15
|
2019-08-08T10:50:47.000Z
|
2022-02-05T14:13:36.000Z
|
# -*- coding: utf-8 -*-
import unittest
from openprocurement.tender.belowthreshold.tests.document import (
TenderDocumentResourceTestMixin,
TenderDocumentWithDSResourceTestMixin,
)
from openprocurement.tender.limited.tests.base import (
BaseTenderContentWebTest,
test_tender_data,
test_tender_negotiation_data,
test_tender_negotiation_quick_data,
)
class TenderDocumentResourceTest(BaseTenderContentWebTest, TenderDocumentResourceTestMixin):
initial_data = test_tender_data
docservice = False
class TenderNegotiationDocumentResourceTest(TenderDocumentResourceTest):
initial_data = test_tender_negotiation_data
class TenderNegotiationQuickDocumentResourceTest(TenderNegotiationDocumentResourceTest):
initial_data = test_tender_negotiation_quick_data
class TenderDocumentWithDSResourceTest(TenderDocumentResourceTest, TenderDocumentWithDSResourceTestMixin):
docservice = True
class TenderNegotiationDocumentWithDSResourceTest(TenderDocumentWithDSResourceTest):
initial_data = test_tender_negotiation_data
class TenderNegotiationQuickDocumentWithDSResourceTest(TenderDocumentWithDSResourceTest):
initial_data = test_tender_negotiation_quick_data
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderDocumentResourceTest))
suite.addTest(unittest.makeSuite(TenderDocumentWithDSResourceTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite")
| 29.019608
| 106
| 0.833784
|
0c8b4fcabaaf735c3ebf6c034042f53501e9ed79
| 2,629
|
py
|
Python
|
helpers/WIndowsUSB.py
|
shloak17107/WhatsApp-Key-Database-Extractor
|
24fb112a81fe54d0b0f42a6d3253b3420b92976e
|
[
"MIT"
] | null | null | null |
helpers/WIndowsUSB.py
|
shloak17107/WhatsApp-Key-Database-Extractor
|
24fb112a81fe54d0b0f42a6d3253b3420b92976e
|
[
"MIT"
] | null | null | null |
helpers/WIndowsUSB.py
|
shloak17107/WhatsApp-Key-Database-Extractor
|
24fb112a81fe54d0b0f42a6d3253b3420b92976e
|
[
"MIT"
] | null | null | null |
import os
import re
from subprocess import check_output, getoutput
try:
import wget
from packaging import version
except ImportError:
try:
os.system('pip3 install wget packaging')
except:
os.system('python3 -m pip install wget packaging')
from CustomCI import CustomPrint
# Global Variables
appURLWhatsAppCDN = 'https://www.cdn.whatsapp.net/android/2.11.431/WhatsApp.apk'
appURLWhatsCryptCDN = 'https://whatcrypt.com/WhatsApp-2.11.431.apk'
# Global command line helpers
tmp = 'tmp/'
grep = 'bin\\grep.exe'
curl = 'bin\\curl.exe'
helpers = 'helpers/'
def AfterConnect(adb):
SDKVersion = int(getoutput(
adb + ' shell getprop ro.build.version.sdk'))
if (SDKVersion <= 13):
CustomPrint(
'Unsupported device. This method only works on Android v4.0 or higer.', 'red')
CustomPrint('Cleaning up temporary direcory.', 'red')
os.remove(tmp)
Exit()
WhatsAppapkPath = re.search('(?<=package:)(.*)(?=apk)', str(check_output(
adb + ' shell pm path com.whatsapp'))).group(1) + 'apk'
if not (WhatsAppapkPath):
CustomPrint('Looks like WhatsApp is not installed on device.', 'red')
Exit()
sdPath = getoutput(adb + ' shell "echo $EXTERNAL_STORAGE"')
# To check if APK even exists at a given path to download!
contentLength = int(re.search("(?<=Content-Length:)(.*[0-9])(?=)", str(check_output(
curl + ' -sI http://www.cdn.whatsapp.net/android/2.11.431/WhatsApp.apk'))).group(1))
versionName = re.search("(?<=versionName=)(.*?)(?=\\\\r)", str(check_output(
adb + ' shell dumpsys package com.whatsapp'))).group(1)
CustomPrint('WhatsApp V' + versionName + ' installed on device')
downloadAppFrom = appURLWhatsAppCDN if(
contentLength == 18329558) else appURLWhatsCryptCDN
if (version.parse(versionName) > version.parse('2.11.431')):
if not (os.path.isfile(helpers + 'LegacyWhatsApp.apk')):
CustomPrint(
'Downloading legacy WhatsApp V2.11.431 to helpers folder')
wget.download(downloadAppFrom, helpers + 'LegacyWhatsApp.apk')
print('\n')
else:
CustomPrint('Found legacy WhatsApp V2.11.431 apk in ' +
helpers + ' folder')
return 1, SDKVersion, WhatsAppapkPath, versionName, sdPath
def Exit():
CustomPrint('\nExiting...')
os.system('bin\\adb.exe kill-server')
quit()
def WindowsUSB(adb):
CustomPrint('Connected to ' + getoutput(adb +
' shell getprop ro.product.model'))
return AfterConnect(adb)
| 36.013699
| 92
| 0.633321
|
72312b1b26acf19c60784c6ebba4d0af31e6fe8e
| 10,775
|
py
|
Python
|
models/modelszoo/ResNet3D_VAE.py
|
qgking/FRGAN
|
b6a250c46981707c43c3889f80d8cc3ec31edaaf
|
[
"MIT"
] | 2
|
2021-08-10T02:38:23.000Z
|
2021-08-10T03:04:22.000Z
|
models/modelszoo/ResNet3D_VAE.py
|
qgking/FRGAN
|
b6a250c46981707c43c3889f80d8cc3ec31edaaf
|
[
"MIT"
] | 1
|
2022-02-21T15:57:03.000Z
|
2022-02-21T15:57:03.000Z
|
models/modelszoo/ResNet3D_VAE.py
|
qgking/FRGAN
|
b6a250c46981707c43c3889f80d8cc3ec31edaaf
|
[
"MIT"
] | 2
|
2021-11-07T13:25:51.000Z
|
2022-01-18T07:09:30.000Z
|
import torch
import torch.nn as nn
from models.modelszoo.BaseModelClass import BaseModel
"""
Implementation based on the original paper https://arxiv.org/pdf/1810.11654.pdf
"""
class GreenBlock(nn.Module):
def __init__(self, in_channels, out_channels=32, norm="group"):
super(GreenBlock, self).__init__()
if norm == "batch":
norm_1 = nn.BatchNorm3d(num_features=in_channels)
norm_2 = nn.BatchNorm3d(num_features=in_channels)
elif norm == "group":
norm_1 = nn.GroupNorm(num_groups=8, num_channels=in_channels)
norm_2 = nn.GroupNorm(num_groups=8, num_channels=in_channels)
self.layer_1 = nn.Sequential(
norm_1,
nn.ReLU())
self.layer_2 = nn.Sequential(
nn.Conv3d(in_channels=in_channels, out_channels=in_channels, kernel_size=(3, 3, 3), stride=1, padding=1),
norm_2,
nn.ReLU())
self.conv_3 = nn.Conv3d(in_channels=in_channels, out_channels=in_channels, kernel_size=(3, 3, 3),
stride=1, padding=1)
def forward(self, x):
x = self.layer_1(x)
x = self.layer_2(x)
y = self.conv_3(x)
y = y + x
return y
class DownBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DownBlock, self).__init__()
self.conv = nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3, 3),
stride=2, padding=1)
def forward(self, x):
return self.conv(x)
class BlueBlock(nn.Module):
def __init__(self, in_channels, out_channels=32):
super(BlueBlock, self).__init__()
self.conv = nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=(3, 3, 3),
stride=1, padding=1)
def forward(self, x):
return self.conv(x)
class UpBlock1(nn.Module):
"""
TODO fix transpose conv to double spatial dim
"""
def __init__(self, in_channels, out_channels):
super(UpBlock1, self).__init__()
self.transp_conv = nn.ConvTranspose3d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1, 1),
stride=2, padding=1)
def forward(self, x):
return self.transp_conv(x)
class UpBlock2(nn.Module):
def __init__(self, in_channels, out_channels):
super(UpBlock2, self).__init__()
self.conv_1 = nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, 1, 1),
stride=1)
# self.up_sample_1 = nn.Upsample(scale_factor=2, mode="bilinear") # TODO currently not supported in PyTorch 1.4 :(
self.up_sample_1 = nn.Upsample(scale_factor=2, mode="nearest")
def forward(self, x):
return self.up_sample_1(self.conv_1(x))
def reparametrize(mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
class ResNetEncoder(nn.Module):
def __init__(self, in_channels, start_channels=32):
super(ResNetEncoder, self).__init__()
self.start_channels = start_channels
self.down_channels_1 = 2 * self.start_channels
self.down_channels_2 = 2 * self.down_channels_1
self.down_channels_3 = 2 * self.down_channels_2
# print("self.down_channels_3", self.down_channels_3)
self.blue_1 = BlueBlock(in_channels=in_channels, out_channels=self.start_channels)
self.drop = nn.Dropout3d(0.2)
self.green_1 = GreenBlock(in_channels=self.start_channels)
self.down_1 = DownBlock(in_channels=self.start_channels, out_channels=self.down_channels_1)
self.green_2_1 = GreenBlock(in_channels=self.down_channels_1)
self.green_2_2 = GreenBlock(in_channels=self.down_channels_1)
self.down_2 = DownBlock(in_channels=self.down_channels_1, out_channels=self.down_channels_2)
self.green_3_1 = GreenBlock(in_channels=self.down_channels_2)
self.green_3_2 = GreenBlock(in_channels=self.down_channels_2)
self.down_3 = DownBlock(in_channels=self.down_channels_2, out_channels=self.down_channels_3)
self.green_4_1 = GreenBlock(in_channels=self.down_channels_3)
self.green_4_2 = GreenBlock(in_channels=self.down_channels_3)
self.green_4_3 = GreenBlock(in_channels=self.down_channels_3)
self.green_4_4 = GreenBlock(in_channels=self.down_channels_3)
def forward(self, x):
x = self.blue_1(x)
x = self.drop(x)
x1 = self.green_1(x)
x = self.down_1(x1)
x = self.green_2_1(x)
x2 = self.green_2_2(x)
x = self.down_2(x2)
x = self.green_3_1(x)
x3 = self.green_3_2(x)
x = self.down_3(x3)
x = self.green_4_1(x)
x = self.green_4_2(x)
x = self.green_4_3(x)
x4 = self.green_4_4(x)
return x1, x2, x3, x4
class Decoder(nn.Module):
def __init__(self, in_channels=256, classes=4):
super(Decoder, self).__init__()
out_up_1_channels = int(in_channels / 2)
out_up_2_channels = int(out_up_1_channels / 2)
out_up_3_channels = int(out_up_2_channels / 2)
self.up_1 = UpBlock2(in_channels=in_channels, out_channels=out_up_1_channels)
self.green_1 = GreenBlock(in_channels=out_up_1_channels)
self.up_2 = UpBlock2(in_channels=out_up_1_channels, out_channels=out_up_2_channels)
self.green_2 = GreenBlock(in_channels=out_up_2_channels)
self.up_3 = UpBlock2(in_channels=out_up_2_channels, out_channels=out_up_3_channels)
self.green_3 = GreenBlock(in_channels=out_up_3_channels)
self.blue = BlueBlock(in_channels=out_up_3_channels, out_channels=classes)
def forward(self, x1, x2, x3, x4):
x = self.up_1(x4)
x = self.green_1(x + x3)
x = self.up_2(x)
x = self.green_2(x + x2)
x = self.up_3(x)
x = self.green_3(x + x1)
y = self.blue(x)
return y
class VAE(nn.Module):
def __init__(self, in_channels=256, in_dim=(10, 10, 10), out_dim=(2, 64, 64, 64)):
super(VAE, self).__init__()
self.in_channels = in_channels
self.in_dim = in_dim
self.out_dim = out_dim
self.modalities = out_dim[0]
self.encoder_channels = 16 # int(in_channels >> 4)
self.split_dim = int(self.in_channels / 2)
# self.reshape_dim = (int(self.out_dim[1] / 16), int(self.out_dim[2] / 16), int(self.out_dim[3] / 16))
# self.linear_in_dim = int(16 * (in_dim[0] / 2) * (in_dim[1] / 2) * (in_dim[2] / 2))
self.reshape_dim = (int(self.out_dim[1] / self.encoder_channels), int(self.out_dim[2] / self.encoder_channels),
int(self.out_dim[3] / self.encoder_channels))
self.linear_in_dim = int(self.encoder_channels * (in_dim[0] / 2) * (in_dim[1] / 2) * (in_dim[2] / 2))
self.linear_vu_dim = self.encoder_channels * self.reshape_dim[0] * self.reshape_dim[1] * self.reshape_dim[2]
channels_vup2 = int(self.in_channels / 2) # 128
channels_vup1 = int(channels_vup2 / 2) # 64
channels_vup0 = int(channels_vup1 / 2) # 32
group_1 = nn.GroupNorm(num_groups=8, num_channels=in_channels)
relu_1 = nn.ReLU()
conv_1 = nn.Conv3d(in_channels=in_channels, out_channels=self.encoder_channels, stride=2, kernel_size=(3, 3, 3),
padding=1)
self.VD = nn.Sequential(group_1, relu_1, conv_1)
self.linear_1 = nn.Linear(self.linear_in_dim, in_channels)
# TODO VU layer here
self.linear_vu = nn.Linear(channels_vup2, self.linear_vu_dim)
relu_vu = nn.ReLU()
VUup_block = UpBlock2(in_channels=self.encoder_channels, out_channels=self.in_channels)
self.VU = nn.Sequential(relu_vu, VUup_block)
self.Vup2 = UpBlock2(in_channels, channels_vup2)
self.Vblock2 = GreenBlock(channels_vup2)
self.Vup1 = UpBlock2(channels_vup2, channels_vup1)
self.Vblock1 = GreenBlock(channels_vup1)
self.Vup0 = UpBlock2(channels_vup1, channels_vup0)
self.Vblock0 = GreenBlock(channels_vup0)
self.Vend = BlueBlock(channels_vup0, self.modalities)
def forward(self, x):
x = self.VD(x)
x = x.view(-1, self.linear_in_dim)
x = self.linear_1(x)
mu = x[:, :self.split_dim]
logvar = torch.log(x[:, self.split_dim:])
y = reparametrize(mu, logvar)
y = self.linear_vu(y)
y = y.view(-1, self.encoder_channels, self.reshape_dim[0], self.reshape_dim[1], self.reshape_dim[2])
y = self.VU(y)
y = self.Vup2(y)
y = self.Vblock2(y)
y = self.Vup1(y)
y = self.Vblock1(y)
y = self.Vup0(y)
y = self.Vblock0(y)
dec = self.Vend(y)
return dec, mu, logvar
class ResNet3dVAE(BaseModel):
def __init__(self, in_channels=2, classes=4, max_conv_channels=256, dim=(64, 64, 64)):
super(ResNet3dVAE, self).__init__()
self.dim = dim
vae_in_dim = (int(dim[0] >> 3), int(dim[1] >> 3), int(dim[0] >> 3))
vae_out_dim = (in_channels, dim[0], dim[1], dim[2])
self.classes = classes
self.modalities = in_channels
start_channels = 32 # int(max_conv_channels >> 3)
self.encoder = ResNetEncoder(in_channels=in_channels, start_channels=start_channels)
self.decoder = Decoder(in_channels=max_conv_channels, classes=classes)
self.vae = VAE(in_channels=max_conv_channels, in_dim=vae_in_dim, out_dim=vae_out_dim)
def forward(self, x):
x1, x2, x3, x4 = self.encoder(x)
y = self.decoder(x1, x2, x3, x4)
vae_out, mu, logvar = self.vae(x4)
return y, vae_out, mu, logvar
def test(self):
inp = torch.rand(1, self.modalities, self.dim[0], self.dim[1], self.dim[2])
ideal = torch.rand(1, self.classes, self.dim[0], self.dim[1], self.dim[2])
y, vae_out, mu, logvar = self.forward(inp)
assert vae_out.shape == inp.shape, vae_out.shape
assert y.shape == ideal.shape
assert mu.shape == logvar.shape
print("3D-RESNET VAE test OK!")
def test_enc_dec():
model = ResNetEncoder(in_channels=2)
input = torch.rand(1, 2, 80, 80, 80)
x1, x2, x3, x4 = model(input)
print(x1.shape)
print(x2.shape)
print(x3.shape)
print(x4.shape)
model2 = Decoder()
y = model2(x1, x2, x3, x4)
print("out", y.shape)
def testVAE():
input = torch.rand(1, 128, 10, 10, 10)
model = VAE(in_channels=128, in_dim=(10, 10, 10), out_dim=(2, 128, 128, 128))
out, mu, logvar = model(input)
print("Done.\n Final out shape is: ", out.shape)
| 35.444079
| 122
| 0.632854
|
1e34f2805c40edd53ed7ee9f96063dd3e144c81b
| 1,052
|
py
|
Python
|
kubernetes/test/test_v1beta1_rolling_update_daemon_set.py
|
itholic/python
|
dffe577a062e17057270ae80fa677ffd83e9d183
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta1_rolling_update_daemon_set.py
|
itholic/python
|
dffe577a062e17057270ae80fa677ffd83e9d183
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/test/test_v1beta1_rolling_update_daemon_set.py
|
itholic/python
|
dffe577a062e17057270ae80fa677ffd83e9d183
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.7
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1beta1_rolling_update_daemon_set import V1beta1RollingUpdateDaemonSet # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1beta1RollingUpdateDaemonSet(unittest.TestCase):
"""V1beta1RollingUpdateDaemonSet unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1RollingUpdateDaemonSet(self):
"""Test V1beta1RollingUpdateDaemonSet"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1beta1_rolling_update_daemon_set.V1beta1RollingUpdateDaemonSet() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 26.3
| 124
| 0.748099
|
7051a88e18e77b2c4292e3a10feabd5b395df914
| 19,623
|
py
|
Python
|
MIES/Spaces.py
|
caueguidotti/auto_dqn
|
0f20464fe9d67dbbddf156d97788d7fc816435bc
|
[
"MIT"
] | null | null | null |
MIES/Spaces.py
|
caueguidotti/auto_dqn
|
0f20464fe9d67dbbddf156d97788d7fc816435bc
|
[
"MIT"
] | null | null | null |
MIES/Spaces.py
|
caueguidotti/auto_dqn
|
0f20464fe9d67dbbddf156d97788d7fc816435bc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# File : Spaces.py
# Project : Project
# Created By : Caue Guidotti
# Created Date: 7/6/2021
# =============================================================================
"""
This module provides the basic classes to create an individual and
its search space.
Possible space types are: Continuous or Integer
TODO: implement nominal (discrete) space
"""
# =============================================================================
import numpy as np
POSSIBLE_SPACE_TYPES = ['i', 'c']
class Individual(object):
"""
Individual class
An individual may be recombined with another to change its characteristics and its search space state
An individual may also go through mutation
TODO: Perhaps receive the evaluation function in this class
"""
def __init__(self, search_space):
"""
An individual is created
:param search_space: Individual's search space
"""
assert isinstance(search_space, SearchSpace), 'Individual must be supplied with a object of type SearchSpace'
self.search_space = search_space
self.characteristics = None
self.objective_value = None
self.tau_r_global = None
self.tau_r_local = None
self.tau_i_global = None
self.tau_i_local = None
def __lt__(self, other_individual):
"""
Overloads < operator for Individual class. Used for heapq.
"""
return self.objective_value < other_individual.objective_value
def __eq__(self, other_individual):
"""
Overloads == operator. Used, for instance, to check if an individual is already present in a population.
"""
return (self.characteristics == other_individual.characteristics).all()
def initialize(self):
"""
Initialize the individual
"""
# obtains a random candidate solution
self.characteristics = self.get_sample()
# Defines learning rates
# TODO - enable single and multi step-size mode
# TODO - allows learning rate to be set externally
if 'c' in self.search_space.get_types():
len_c = len(self.search_space.idx_dict['c'])
self.tau_r_global = 1.0 / np.sqrt(2.0 * len_c)
self.tau_r_local = 1.0 / np.sqrt(2 * np.sqrt(len_c)) if len_c > 1 else 0
if 'i' in self.search_space.get_types():
len_i = len(self.search_space.idx_dict['i'])
self.tau_i_global = 1.0 / np.sqrt(2.0 * len_i)
self.tau_i_local = 1.0 / np.sqrt(2 * np.sqrt(len_i)) if len_i > 1 else 0
def get_sample(self) -> np.ndarray:
"""
Obtains a random candidate solution
"""
sample = []
for space in self.search_space.spaces:
sample += [space.get_sample()]
return np.asarray(sample, dtype='object')
def get_continuous_characteristics(self):
"""
Get continuous candidate solutions
"""
return self.characteristics[self.search_space.idx_dict['c']]
def set_continuous_characteristics(self, values):
"""
Set individual continuous candidate solution
:param values: New continuous parameters values
"""
assert len(self.search_space.idx_dict['c']) == len(values), 'Characteristics size must match values size'
self.characteristics[self.search_space.idx_dict['c']] = values
def get_integer_characteristics(self):
"""
Get integer candidate solutions
"""
return self.characteristics[self.search_space.idx_dict['i']]
def set_integer_characteristics(self, values):
"""
Set individual integer candidate solution
:param values: New integer parameters values
"""
assert len(self.search_space.idx_dict['i']) == len(values), 'Characteristics size must match values size'
self.characteristics[self.search_space.idx_dict['i']] = values
def set_objective_value(self, value):
"""
Set the corresponding evaluation value for this individual characteristics
:param value: Evaluated value
:return:
"""
self.objective_value = value
def recombine(self, other_individual,
characteristics_global_recombination=True,
strategy_pars_global_recombination=True,
strategy_par_random_weight=False):
"""
This individual can be recombined with another. This individual will be changed.
In MIES, characteristics associated to the solution uses dominant (or discrete) recombination.
Intermediate recombination is used for strategy parameters
:param other_individual: another individual
:param characteristics_global_recombination: (bool) use global recombination or not for solution params
:param strategy_pars_global_recombination: (bool) use global recombination or not for strategy params
:param strategy_par_random_weight: Use random weight instead of mean (0.5)
:return:
"""
# Discrete recombination for characteristics
if characteristics_global_recombination:
chosen_characteristics = np.nonzero(np.random.rand(len(self.characteristics)) > 0.5)
self.characteristics[chosen_characteristics] = other_individual.characteristics[chosen_characteristics]
else:
chosen_characteristics = np.random.rand() > 0.5
if chosen_characteristics:
self.characteristics = other_individual.characteristics
# Intermediate recombination for strategy parameters
strategy_par_weight = 0.5 if not strategy_par_random_weight else np.random.rand()
for space, other_space in zip(self.search_space.spaces, other_individual.search_space.spaces):
if strategy_par_random_weight and strategy_pars_global_recombination:
strategy_par_weight = np.random.rand()
combined_strategy_par = space.get_strategy_parameter() * strategy_par_weight + \
other_space.get_strategy_parameter() * (1 - strategy_par_weight)
space.set_strategy_parameter(combined_strategy_par)
def mutate(self):
"""
Applies the mutation algorithm
"""
continuous_spaces = self.search_space.get_continuous_spaces()
integer_spaces = self.search_space.get_integer_spaces()
if continuous_spaces is not None:
# mutate continuous spaces characteristics
spaces_sigma = continuous_spaces.get_strategy_parameters()
spaces_sigma *= np.exp(self.tau_r_global * np.random.normal(0, 1) +
self.tau_r_local * np.random.normal(0, 1, len(spaces_sigma)))
continuous_characteristics = self.get_continuous_characteristics()
continuous_characteristics += spaces_sigma * np.random.normal(0, 1, len(continuous_characteristics))
continuous_spaces.set_strategy_parameters(spaces_sigma)
self.set_continuous_characteristics(continuous_characteristics)
if integer_spaces is not None:
# mutate integer spaces characteristics
spaces_eta = integer_spaces.get_strategy_parameters()
spaces_eta *= np.exp(self.tau_i_global * np.random.normal(0, 1) +
self.tau_i_local * np.random.normal(0, 1, len(spaces_eta)))
spaces_eta = spaces_eta.clip(min=1)
integer_characteristics = self.get_integer_characteristics()
psi = 1 - ((spaces_eta / len(integer_characteristics)) /
(1 + np.sqrt(1 + (spaces_eta / len(integer_characteristics))**2)))
(u1, u2) = (np.random.rand(), np.random.rand())
g1 = np.floor(np.log(1 - u1) / np.log(1. - psi)).astype(int)
g2 = np.floor(np.log(1 - u2) / np.log(1. - psi)).astype(int)
integer_characteristics += g1 - g2
integer_spaces.set_strategy_parameters(spaces_eta)
self.set_integer_characteristics(integer_characteristics)
self.boundary_handling()
def boundary_handling(self):
"""
Keep solution within boundaries
"""
# TODO improve boundary handling - clipping adds bias?
lower_bounds, upper_bounds = list(zip(*self.search_space.get_spaces_boundaries()))
self.characteristics = np.clip(self.characteristics, lower_bounds, upper_bounds)
class SearchSpace(object):
"""
Defines a search space which is comprised of multiple spaces
"""
def __init__(self, spaces=None, boundaries=None, strategy_pars=None):
"""
A search space may be formed by providing Spaces objects directly OR boundaries and strategy pars, as space
objects already contains its boundaries.
If boundaries and strategy pars are provided, space objects are created.
:param spaces: Spaces in this search space
:param boundaries: Spaces boundaries
:param strategy_pars: Spaces strategy parameters
"""
self._spaces = spaces
self._boundaries = boundaries
self._strategy_pars = strategy_pars
self._parameters_consolidation() # reads parameters
# following is a dict of possible space types to respective indexes on self.spaces list
self.idx_dict = {space_type: [] for space_type in POSSIBLE_SPACE_TYPES}
self.spaces = []
if self._spaces is None:
for space_idx, ((lower_bound, upper_bound), strategy_par) in enumerate(zip(self._boundaries,
self._strategy_pars)):
# Make sure types are consistent. Lower limit must be lower than upper limit. After identifying type,
# specific space class is instantiated, and index added to dict of index reference
assert type(lower_bound) == type(upper_bound), f'Types of boundaries differs: {lower_bound} ({type(lower_bound)}) != {upper_bound} ({type(upper_bound)}) '
assert lower_bound < upper_bound, f'Lower limit ({lower_bound}) must be lower than upper limit ({upper_bound})'
if type(lower_bound) == int:
self.spaces.append(IntegerSpace(lower_bound, upper_bound, strategy_par))
elif type(lower_bound) == float:
self.spaces.append(ContinuousSpace(lower_bound, upper_bound, strategy_par))
self.idx_dict[self.spaces[-1].type].append(space_idx)
else:
self.spaces = self._spaces
for space_idx, space in enumerate(self.spaces):
self.idx_dict[space.type].append(space_idx)
self.spaces = np.asarray(self.spaces, dtype='object')
def _parameters_consolidation(self):
"""
Treats the case of providing either spaces or boundaries.
If strategy pars are not provided, default are used.
"""
spaces = self._spaces
boundaries = self._boundaries
strategy_pars = self._strategy_pars
if spaces is None:
assert boundaries is not None, 'Should either supply a list of spaces or space boundaries to SearchSpace'
assert hasattr(boundaries, '__iter__') and not isinstance(boundaries, str), \
'boundaries parameter must be iterable'
# force strategy_pars to be iterable
if not hasattr(strategy_pars, '__iter__') and not isinstance(strategy_pars, str):
strategy_pars = [strategy_pars]
# making sure we have a list of tuples on boundaries
if hasattr(boundaries[0], '__iter__') and not isinstance(boundaries[0], str):
boundaries = [tuple(boundary) for boundary in boundaries]
else:
# probably single list or tuple
boundaries = [tuple(boundaries)]
# making sure we have tuples with two elements
for boundary in boundaries:
assert len(boundary) % 2 == 0, 'args should represent boundaries (pairs):' \
'(lower_limit1, upper_limit1, lower_limit2, upper_limit2)'
# making sure that, if strategy pars was supplied, it matches the number of spaces boundaries
if strategy_pars[0] is not None:
assert len(boundaries) == len(strategy_pars), 'Length of boundaries must match strategy pars'
else:
strategy_pars = strategy_pars*len(boundaries)
else:
# force strategy_pars to be iterable
if not hasattr(spaces, '__iter__') and not isinstance(spaces, str):
spaces = [spaces]
self._spaces = spaces
self._boundaries = boundaries
self._strategy_pars = strategy_pars
def __mul__(self, other_search_space):
"""
Calculates the product of search space objects
:return:
"""
if isinstance(other_search_space, SearchSpace):
return SearchSpace(spaces=self.spaces.tolist() + other_search_space.spaces.tolist())
elif isinstance(other_search_space, GenericSpace):
return SearchSpace(spaces=self.spaces.tolist() + [other_search_space])
else:
raise TypeError(f"can't multiply SearchSpace with type {type(other_search_space).__name__}")
def get_spaces_boundaries(self, as_list_of_tuples=True):
"""
Obtain all spaces boundaries within this search space
:param as_list_of_tuples: Return as list of tuple if true, single line is False
"""
boundaries = []
for space in self.spaces:
if as_list_of_tuples:
boundaries.append(space.get_boundary())
else:
boundaries.extend(space.get_boundary())
return boundaries
def get_strategy_parameters(self):
"""
Get all search spaces strategy parameters
:return: strategy parameters list
"""
strategy_pars = []
for space in self.spaces:
strategy_pars += [space.get_strategy_parameter()]
return np.asarray(strategy_pars)
def set_strategy_parameters(self, values):
"""
Set strategy parameters for all search spaces.
:param values: Values to be set
"""
assert len(values) == len(self.spaces), 'Number of values must be the same as number of spaces.'
for space, strategy_par in zip(self.spaces, values):
space.set_strategy_parameter(strategy_par)
def get_types(self):
"""
Return all types of spaces in this search space.
Possible candidates are defined on ´POSSIBLE_SPACE_TYPES´
"""
return [space_type for space_type, idxs in self.idx_dict.items() if idxs]
def get_continuous_spaces(self):
"""
Return list of continuous spaces
"""
return self._get_space_type(space_type='c')
def get_integer_spaces(self):
"""
Return list of integer spaces
"""
return self._get_space_type(space_type='i')
def _get_space_type(self, space_type):
"""
Get all spaces of a specific type. Possible types are defined on ´POSSIBLE_SPACE_TYPES´
:param space_type: (str) selected type
:return: list of spaces with this type
"""
return SearchSpace(spaces=self.spaces[self.idx_dict[space_type]]) if self.idx_dict[space_type] else None
class GenericSpace(object):
"""
Implements a interface class of a Space
"""
def __init__(self, lower_limit, upper_limit):
self.lower_limit = lower_limit
self.upper_limit = upper_limit
self.type = None
def get_strategy_parameter(self):
raise NotImplementedError('GenericSpace should not be used directly. Use either Integer or Continuous space.')
def set_strategy_parameter(self, value):
raise NotImplementedError('GenericSpace should not be used directly. Use either Integer or Continuous space.')
def get_boundary(self):
return self.lower_limit, self.upper_limit
def get_sample(self):
raise NotImplementedError('GenericSpace should not be used directly. Use either Integer or Continuous space.')
def __mul__(self, other_space):
if isinstance(other_space, GenericSpace):
return SearchSpace(spaces=[self, other_space])
elif isinstance(other_space, SearchSpace):
return SearchSpace(spaces=[self] + other_space.spaces.tolist())
else:
raise TypeError(f"can't multiply GenericSpace with type {type(other_space).__name__}")
class IntegerSpace(GenericSpace):
"""
Implements the integer space
"""
def __init__(self, lower_limit, upper_limit, mean_step_size=None):
super().__init__(int(lower_limit), int(upper_limit))
self.type = 'i'
if mean_step_size is not None:
self.mean_step_size = mean_step_size
else:
# default strategy parameter
self.mean_step_size = 0.05 * (self.upper_limit - self.lower_limit)
def get_strategy_parameter(self):
"""
Return this space strategy parameter
"""
return self.mean_step_size
def set_strategy_parameter(self, value):
"""
Set a new value for this space strategy parameter
:param value: Value to be set
"""
self.mean_step_size = value
def get_sample(self):
"""
Get a random value within this space using a uniform distribution
:return: Random sample
"""
return np.random.randint(self.lower_limit, self.upper_limit + 1)
class ContinuousSpace(GenericSpace):
"""
Implements the continuous space
"""
def __init__(self, lower_limit, upper_limit, std_dev=None):
super().__init__(float(lower_limit), float(upper_limit))
self.type = 'c'
if std_dev is not None:
self.std_dev = std_dev
else:
# default strategy parameter
self.std_dev = 0.05 * (self.upper_limit-self.lower_limit)
def get_strategy_parameter(self):
"""
Return this space strategy parameter
"""
return self.std_dev
def set_strategy_parameter(self, value):
"""
Set a new value for this space strategy parameter
:param value: Value to be set
"""
self.std_dev = value
def get_sample(self):
"""
Get a random value within this space using a uniform distribution
:return: Random sample
"""
return (self.upper_limit - self.lower_limit) * np.random.rand() + self.lower_limit
if __name__ == '__main__':
# Following is used for debugging.
C = ContinuousSpace(-10, 10)
I = IntegerSpace(0, 10)
search_space1 = C
search_space2 = I
search_space1 *= C
search_space1 = C * search_space1
search_space1 = search_space1 * C
search_space2 *= I
search_space2 = I * search_space2
search_space2 = search_space2 * I
search_space = I * search_space1 * search_space2 * C
individual1 = Individual(search_space=search_space)
individual1.initialize()
individual2 = Individual(search_space=search_space)
individual2.initialize()
individual1.recombine(individual2)
individual1.mutate()
exit(0)
| 39.642424
| 170
| 0.636804
|
660d6923bf082998a3ccf923f8cdd45f54e2aea0
| 701
|
py
|
Python
|
setup.py
|
skolwa/astro-realm
|
008e6e97a256cbc2fd15ca6612f7a99c818b7d34
|
[
"MIT"
] | null | null | null |
setup.py
|
skolwa/astro-realm
|
008e6e97a256cbc2fd15ca6612f7a99c818b7d34
|
[
"MIT"
] | null | null | null |
setup.py
|
skolwa/astro-realm
|
008e6e97a256cbc2fd15ca6612f7a99c818b7d34
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="astrorealm",
version="0.0.1",
author="Sthabile Kolwa",
author_email="sthabile.kolwa@gmail.com",
description='''This package provides modules for
analysing the kinematics of gas in distant radio galaxies
using telescope datasets.''',
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/thabsko/astrorealm",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.478261
| 61
| 0.686163
|
6e5dfe88cc18ff54c530c286b03047db75316d77
| 10,163
|
py
|
Python
|
spare/cmds/farm_funcs.py
|
Spare-Network/spare-blockchain
|
9ea2677c73570131cfd02447b9cdc64cf01e0909
|
[
"Apache-2.0"
] | 122
|
2021-06-18T23:51:22.000Z
|
2022-01-15T17:51:49.000Z
|
spare/cmds/farm_funcs.py
|
zcomputerwiz/spare-blockchain
|
c48fe41ac3b2aae2e76fce0e44ab0647530147ee
|
[
"Apache-2.0"
] | 165
|
2021-06-18T23:12:20.000Z
|
2021-11-14T06:02:04.000Z
|
spare/cmds/farm_funcs.py
|
zcomputerwiz/spare-blockchain
|
c48fe41ac3b2aae2e76fce0e44ab0647530147ee
|
[
"Apache-2.0"
] | 58
|
2021-06-18T23:10:50.000Z
|
2022-03-15T08:44:02.000Z
|
from typing import Any, Dict, List, Optional
import aiohttp
from spare.cmds.units import units
from spare.consensus.block_record import BlockRecord
from spare.rpc.farmer_rpc_client import FarmerRpcClient
from spare.rpc.full_node_rpc_client import FullNodeRpcClient
from spare.rpc.wallet_rpc_client import WalletRpcClient
from spare.util.config import load_config
from spare.util.default_root import DEFAULT_ROOT_PATH
from spare.util.ints import uint16
from spare.util.misc import format_bytes
from spare.util.misc import format_minutes
SECONDS_PER_BLOCK = (24 * 3600) / 4608
async def get_plots(farmer_rpc_port: int) -> Optional[Dict[str, Any]]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
plots = await farmer_client.get_plots()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'harvester' {e}")
return None
farmer_client.close()
await farmer_client.await_closed()
return plots
async def get_blockchain_state(rpc_port: int) -> Optional[Dict[str, Any]]:
blockchain_state = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return blockchain_state
async def get_average_block_time(rpc_port: int) -> float:
try:
blocks_to_compare = 500
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
blockchain_state = await client.get_blockchain_state()
curr: Optional[BlockRecord] = blockchain_state["peak"]
if curr is None or curr.height < (blocks_to_compare + 100):
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
while curr is not None and curr.height > 0 and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
if curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
past_curr = await client.get_block_record_by_height(curr.height - blocks_to_compare)
while past_curr is not None and past_curr.height > 0 and not past_curr.is_transaction_block:
past_curr = await client.get_block_record(past_curr.prev_hash)
if past_curr is None:
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
client.close()
await client.await_closed()
return (curr.timestamp - past_curr.timestamp) / (curr.height - past_curr.height)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node is running at {rpc_port}")
else:
print(f"Exception from 'full node' {e}")
client.close()
await client.await_closed()
return SECONDS_PER_BLOCK
async def get_wallets_stats(wallet_rpc_port: int) -> Optional[Dict[str, Any]]:
amounts = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if wallet_rpc_port is None:
wallet_rpc_port = config["wallet"]["rpc_port"]
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
amounts = await wallet_client.get_farmed_amount()
#
# Don't catch any exceptions, the caller will handle it
#
finally:
wallet_client.close()
await wallet_client.await_closed()
return amounts
async def is_farmer_running(farmer_rpc_port: int) -> bool:
is_running = False
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
await farmer_client.get_connections()
is_running = True
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return is_running
async def get_challenges(farmer_rpc_port: int) -> Optional[List[Dict[str, Any]]]:
signage_points = None
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if farmer_rpc_port is None:
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
signage_points = await farmer_client.get_signage_points()
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if farmer is running at {farmer_rpc_port}")
else:
print(f"Exception from 'farmer' {e}")
farmer_client.close()
await farmer_client.await_closed()
return signage_points
async def challenges(farmer_rpc_port: int, limit: int) -> None:
signage_points = await get_challenges(farmer_rpc_port)
if signage_points is None:
return None
signage_points.reverse()
if limit != 0:
signage_points = signage_points[:limit]
for signage_point in signage_points:
print(
(
f"Hash: {signage_point['signage_point']['challenge_hash']} "
f"Index: {signage_point['signage_point']['signage_point_index']}"
)
)
async def summary(rpc_port: int, wallet_rpc_port: int, harvester_rpc_port: int, farmer_rpc_port: int) -> None:
all_plots = await get_plots(farmer_rpc_port)
blockchain_state = await get_blockchain_state(rpc_port)
farmer_running = await is_farmer_running(farmer_rpc_port)
wallet_not_ready: bool = False
wallet_not_running: bool = False
amounts = None
try:
amounts = await get_wallets_stats(wallet_rpc_port)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
wallet_not_running = True
else:
wallet_not_ready = True
print("Farming status: ", end="")
if blockchain_state is None:
print("Not available")
elif blockchain_state["sync"]["sync_mode"]:
print("Syncing")
elif not blockchain_state["sync"]["synced"]:
print("Not synced or not connected to peers")
elif not farmer_running:
print("Not running")
else:
print("Farming")
if amounts is not None:
print(f"Total spare farmed: {amounts['farmed_amount'] / units['spare']}")
print(f"User transaction fees: {amounts['fee_amount'] / units['spare']}")
print(f"Block rewards: {(amounts['farmer_reward_amount'] + amounts['pool_reward_amount']) / units['spare']}")
print(f"Last height farmed: {amounts['last_height_farmed']}")
total_plot_size = 0
total_plots = 0
if all_plots is not None:
for harvester_ip, plots in all_plots.items():
if harvester_ip == "success":
# This key is just "success": True
continue
total_plot_size_harvester = sum(map(lambda x: x["file_size"], plots["plots"]))
total_plot_size += total_plot_size_harvester
total_plots += len(plots["plots"])
print(f"Harvester {harvester_ip}:")
print(f" {len(plots['plots'])} plots of size: {format_bytes(total_plot_size_harvester)}")
print(f"Plot count for all harvesters: {total_plots}")
print("Total size of plots: ", end="")
print(format_bytes(total_plot_size))
else:
print("Plot count: Unknown")
print("Total size of plots: Unknown")
if blockchain_state is not None:
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
else:
print("Estimated network space: Unknown")
minutes = -1
if blockchain_state is not None and all_plots is not None:
proportion = total_plot_size / blockchain_state["space"] if blockchain_state["space"] else -1
minutes = int((await get_average_block_time(rpc_port) / 60) / proportion) if proportion else -1
if all_plots is not None and total_plots == 0:
print("Expected time to win: Never (no plots)")
else:
print("Expected time to win: " + format_minutes(minutes))
if amounts is None:
if wallet_not_running:
print("For details on farmed rewards and fees you should run 'spare start wallet' and 'spare wallet show'")
elif wallet_not_ready:
print("For details on farmed rewards and fees you should run 'spare wallet show'")
else:
print("Note: log into your key using 'spare wallet show' to see rewards for each key")
| 39.391473
| 119
| 0.672734
|
35862e91c71fda3ec0bd73b587b7f563435ee535
| 3,887
|
py
|
Python
|
tests/entities_update_tests.py
|
aswathm78/moncli
|
745672a335e61910181c4abbf28115ded4eb6f0e
|
[
"BSD-3-Clause"
] | 40
|
2019-11-22T19:52:18.000Z
|
2022-03-26T09:03:10.000Z
|
tests/entities_update_tests.py
|
aswathm78/moncli
|
745672a335e61910181c4abbf28115ded4eb6f0e
|
[
"BSD-3-Clause"
] | 35
|
2019-09-24T22:27:43.000Z
|
2022-03-31T19:54:03.000Z
|
tests/entities_update_tests.py
|
aswathm78/moncli
|
745672a335e61910181c4abbf28115ded4eb6f0e
|
[
"BSD-3-Clause"
] | 17
|
2020-04-10T18:55:10.000Z
|
2022-03-14T14:45:24.000Z
|
from unittest.mock import patch
from nose.tools import ok_, eq_
from moncli import client, entities as en
TEST_USER = en.User(**{'creds': None, 'id': '1', 'email': 'foo.bar@test.com'})
@patch('moncli.api_v2.get_updates')
def test_update_should_return_list_of_replies(get_updates):
# Arrange
get_updates.return_value = [{'id': '1', 'creator_id': '1', 'item_id': '1', 'replies': [{'id': '2', 'creator_id': '1'}]}]
# Act
updates = client.get_updates()
# Assert
ok_(updates != None)
eq_(len(updates), 1)
eq_(len(updates[0].replies), 1)
@patch('moncli.api_v2.get_updates')
@patch('moncli.api_v2.get_users')
def test_update_should_return_creator(get_users, get_updates):
# Arrange
get_updates.return_value = [{'id': '1', 'creator_id': '1', 'item_id': '1'}]
get_users.return_value = [TEST_USER.to_primitive()]
update = client.get_updates()[0]
# Assert
ok_(update != None)
eq_(update.creator.to_primitive(), TEST_USER.to_primitive())
@patch('moncli.api_v2.get_updates')
@patch('moncli.api_v2.get_users')
def test_update_should_return_creator_of_update_reply(get_users, get_updates):
# Arrange
get_updates.return_value = [{'id': '1', 'creator_id': '1', 'item_id': '1', 'replies': [{'id': '2', 'creator_id': '1'}]}]
get_users.return_value = [TEST_USER.to_primitive()]
reply = client.get_updates()[0].replies[0]
# Assert
ok_(reply != None)
eq_(reply.creator.to_primitive(), TEST_USER.to_primitive())
@patch('moncli.api_v2.get_updates')
def test_should_return_list_of_replies_for_an_update(get_updates):
# Arrange
reply_id = '12345'
reply_body = 'Reply text'
get_updates.return_value = [{'id': '1', 'creator_id': '1', 'item_id': '1'}]
update = client.get_updates()[0]
get_updates.return_value = [{'id': '1', 'creator_id': '1', 'item_id': '1', 'replies': [{'id': reply_id, 'body': reply_body}]}]
# Act
replies = update.get_replies()
# Assert
ok_(replies)
eq_(replies[0].id, reply_id)
eq_(replies[0].body, reply_body)
@patch('moncli.api_v2.get_updates')
@patch('moncli.api_v2.add_file_to_update')
def test_should_add_file_to_update(add_file_to_update, get_updates):
# Arrange
id = '12345'
name = '33.jpg'
url = 'https://test.monday.com/12345/33.jpg'
get_updates.return_value = [{'id': '1', 'item_id': '1', 'creator_id': TEST_USER.id}]
add_file_to_update.return_value = {'id': '12345', 'name': name, 'url': url}
update = client.get_updates()[0]
# Act
asset = update.add_file('/Users/test/33.jpg')
# Assert
ok_(asset != None)
eq_(asset.id, id)
eq_(asset.name, name)
eq_(asset.url, url)
@patch('moncli.api_v2.get_updates')
def test_should_get_files_from_update(get_updates):
# Arrange
id = '12345'
name = '33.jpg'
url = 'https://test.monday.com/12345/33.jpg'
get_updates.return_value = [{'id': '1', 'item_id': '1', 'creator_id': '1'}]
update = client.get_updates()[0]
get_updates.return_value = [{'id': '1', 'item_id': '1', 'creator_id': '1', 'assets': [{'id': id, 'name': name, 'url': url}]}]
# Act
assets = update.get_files()
# Assert
ok_(assets)
eq_(assets[0].id, id)
eq_(assets[0].name, name)
eq_(assets[0].url, url)
@patch('moncli.api_v2.get_updates')
@patch('moncli.api_v2.delete_update')
def test_should_should_update(delete_update, get_updates):
# Arrange
id = '1'
item_id = '1'
creator_id = '1'
get_updates.return_value = [{'id': id, 'item_id': item_id, 'creator_id': creator_id}]
delete_update.return_value = {'id': id, 'item_id': item_id, 'creator_id': creator_id}
update = client.get_updates()[0]
# Act
update = update.delete()
# Assert
ok_(update)
eq_(update.id, id)
eq_(update.item_id, item_id)
eq_(update.creator_id, creator_id)
| 28.166667
| 130
| 0.646771
|
14cdb270096ebf0717e20664b85c6322d0620f75
| 1,155
|
py
|
Python
|
aggcat/utils.py
|
djedi/python-aggcat
|
a922ee4a7eabf3714e62cbeda73c8fece511f5ca
|
[
"Unlicense"
] | 1
|
2021-01-14T21:44:09.000Z
|
2021-01-14T21:44:09.000Z
|
aggcat/utils.py
|
djedi/python-aggcat
|
a922ee4a7eabf3714e62cbeda73c8fece511f5ca
|
[
"Unlicense"
] | null | null | null |
aggcat/utils.py
|
djedi/python-aggcat
|
a922ee4a7eabf3714e62cbeda73c8fece511f5ca
|
[
"Unlicense"
] | null | null | null |
from StringIO import StringIO
from lxml import etree
def remove_namespaces(tree):
io = StringIO()
"""Remove the namspaces from XML for easier parsing"""
xslt = """
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="xml" indent="no"/>
<xsl:template match="/|comment()|processing-instruction()">
<xsl:copy>
<xsl:apply-templates/>
</xsl:copy>
</xsl:template>
<xsl:template match="*">
<xsl:element name="{local-name()}">
<xsl:apply-templates select="@*|node()"/>
</xsl:element>
</xsl:template>
<xsl:template match="@*">
<xsl:attribute name="{local-name()}">
<xsl:value-of select="."/>
</xsl:attribute>
</xsl:template>
</xsl:stylesheet>
"""
xslt_root = etree.XML(xslt)
transform = etree.XSLT(xslt_root)
parsed_tree = transform(tree)
parsed_tree.write(io)
return io.getvalue()
| 32.083333
| 91
| 0.502165
|
2708f38bdc1150aa45b31ba3dc352ee0cbd5a996
| 5,720
|
py
|
Python
|
dashboard/dashboard/doctype/dashboard_items/dashboard_items.py
|
TridotsTech/FrappeDashboardBuilder
|
2f8ff177694d13bd5941c3bcba4056f2bb5d19ac
|
[
"MIT"
] | 1
|
2019-08-18T08:16:00.000Z
|
2019-08-18T08:16:00.000Z
|
dashboard/dashboard/doctype/dashboard_items/dashboard_items.py
|
TridotsTech/FrappeDashboardBuilder
|
2f8ff177694d13bd5941c3bcba4056f2bb5d19ac
|
[
"MIT"
] | null | null | null |
dashboard/dashboard/doctype/dashboard_items/dashboard_items.py
|
TridotsTech/FrappeDashboardBuilder
|
2f8ff177694d13bd5941c3bcba4056f2bb5d19ac
|
[
"MIT"
] | 1
|
2020-10-11T13:22:51.000Z
|
2020-10-11T13:22:51.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, info@valiantsystems.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class DashboardItems(Document):
# def autoname(self):
# self.name=self.display_text+' - '+self.type
def validate(self):
if self.type=='Counter':
self.query_field=set_counter_query(self)
self.css_style=set_css_property(self)
elif self.type=='Table':
self.query_field=set_table_query(self)
else:
assign_condition_query(self)
@frappe.whitelist()
def set_counter_query(self):
query=''
if self.counter_type=='Count':
query='SELECT count(doc.*) as count from `tab{doctype}` doc'.format(doctype=self.reference_doctype)
else:
query='SELECT sum(doc.{field}) as sum from `tab{doctype}` doc'.format(doctype=self.reference_doctype,field=self.referred_field)
if self.is_child_table_based==1 and self.reference_child_doc_name:
query+=',`tab{childdoc}` childdoc'.format(childdoc=self.reference_child_doc_name)
if self.date_range=='Daily':
query+=' where CAST(creation as DATE)=CURDATE()'
elif self.date_range=='Weekly':
query+=' where creation BETWEEN CURDATE() and DATE_SUB(CURDATE(), INTERVAL 7 DAY)'
elif self.date_range=='Monthly':
query+=' where MONTH(CURDATE())=MONTH(creation)'
if self.date_range!="All Time":
query+=', childdoc.name=doc.name'
elif self.is_child_table_based==1 and self.date_range=="All Time":
query+=' where childdoc.parent=doc.name'
docfields=frappe.get_meta(self.reference_doctype).get("fields")
if self.conditions:
for cond in self.conditions:
if not cond.fieldtype:
cond.fieldtype=next((x.fieldtype for x in docfields if x.fieldname==cond.fieldname),None)
if query.find('where')==-1:
query+=' where '
else:
query+=' and '
conditions=get_cond_query(cond)
if conditions:
query+=conditions
return query
def get_cond_query(cond):
query=''
if cond.condition=='Equals':
cond.condition_symbol='='
query+='doc.{field} = "{value}"'.format(field=cond.fieldname,value=cond.value)
elif cond.condition=='Not Equals':
cond.condition_symbol='!='
query+='doc.{field} != "{value}"'.format(field=cond.fieldname,value=cond.value)
elif cond.condition=='Like':
cond.condition_symbol='like'
query+='doc.{field} like "%{value}%"'.format(field=cond.fieldname,value=cond.value)
elif cond.condition=='Not Like':
cond.condition_symbol='not like'
query+='doc.{field} not like "%{value}%"'.format(field=cond.fieldname,value=cond.value)
elif cond.condition=='In':
cond.condition_symbol='in'
values=cond.value.split('\n')
val='"'+'","'.join(values)+'"'
query+='{field} in ({value})'.format(field=cond.fieldname,value=val)
elif cond.condition=='Not In':
cond.condition_symbol='not in'
values=cond.value.split('\n')
val='"'+'","'.join(values)+'"'
query+='{field} not in ({value})'.format(field=cond.fieldname,value=val)
else:
cond.condition_query=cond.condition
query+='doc.{field} {operator} "{value}"'.format(field=cond.fieldname,operator=cond.condition,value=cond.value)
return query
@frappe.whitelist()
def set_table_query(self):
query='select '
if self.fields_to_specify:
query+=self.fields_to_specify[:-1]
else:
query+='*'
query+=' from `tab{doctype}`'.format(doctype=self.reference_doctype)
query=assign_conditions(self,query)
return query
@frappe.whitelist()
def assign_conditions(self,query):
if self.conditions:
docfields=frappe.get_meta(self.reference_doctype).get("fields")
for cond in self.conditions:
if not cond.fieldtype:
cond.fieldtype=next((x.fieldtype for x in docfields if x.fieldname==cond.fieldname),None)
if query.find('where')==-1:
query+=' where '
else:
query+=' and '
conditions=get_cond_query(cond)
if conditions:
query+=conditions
return query
@frappe.whitelist()
def assign_condition_query(self):
query=''
docfields=frappe.get_meta(self.reference_doctype).get("fields")
if self.datasets:
for item in self.datasets:
if self.conditions:
for c in self.conditions:
if not c.fieldtype:
c.fieldtype=next((x.fieldtype for x in docfields if x.fieldname==c.fieldname),None)
if c.condition_for==item.name:
query+=' and '
conditions=get_cond_query(c)
if conditions:
query+=conditions
item.condition_query=query
else:
query='select '
datef=next((x.fieldname for x in docfields if x.label==self.date_fields),None)
if self.value_type=='Count':
query+='count(*) as value'
elif self.value_type=='Sum':
field=next((x.fieldname for x in docfields if x.label==self.value_fields),None)
query+='sum({field}) as value'.format(field=field)
query+=',{field} as label from `tab{doctype}`'.format(doctype=self.reference_doctype,field=datef)
query=assign_conditions(self,query)
query+=' group by {field}'.format(field=datef)
query+=' order by value {type}'.format(type=('asc' if self.order_by=='Ascending' else 'desc'))
query+=' limit {limit}'.format(limit=(self.no_of_graph_records if self.no_of_graph_records>0 else 10))
self.query_field=query
@frappe.whitelist()
def set_css_property(self):
css=''
if self.text_color:
css+='color:'+self.text_color+';'
if self.bg_type=='Image Background':
if self.background_image:
css+='background-image:url("'+self.background_image+'");'
elif self.bg_type=='Gradient Background':
if self.gradient_type=='Linear':
css+='background-image:linear-gradient(to '+self.linear_gradient_direction.lower()+','+self.bg_1+','+self.bg_2+');'
else:
css+='background-image:radial-gradient('+self.bg_1+','+self.bg_2+');'
else:
css+='background:'+self.bg_1+';'
return css
| 35.974843
| 129
| 0.715909
|
8870165d006985a6aa9e568fff9254b5d4578c1d
| 5,913
|
py
|
Python
|
dataprep/clean/clean_iban.py
|
devinllu/dataprep
|
d56861e5bed3c608cace74983f797dc729072d0a
|
[
"MIT"
] | 1
|
2022-02-14T07:18:00.000Z
|
2022-02-14T07:18:00.000Z
|
dataprep/clean/clean_iban.py
|
devinllu/dataprep
|
d56861e5bed3c608cace74983f797dc729072d0a
|
[
"MIT"
] | null | null | null |
dataprep/clean/clean_iban.py
|
devinllu/dataprep
|
d56861e5bed3c608cace74983f797dc729072d0a
|
[
"MIT"
] | null | null | null |
"""
Clean and validate a DataFrame column containing IBAN numbers.
"""
# pylint: disable=too-many-lines, too-many-arguments, too-many-branches, unused-argument
from typing import Any, Union
from operator import itemgetter
import dask.dataframe as dd
import numpy as np
import pandas as pd
from stdnum import iban
from ..progress_bar import ProgressBar
from .utils import NULL_VALUES, to_dask
def clean_iban(
df: Union[pd.DataFrame, dd.DataFrame],
column: str,
output_format: str = "standard",
split: bool = False,
inplace: bool = False,
errors: str = "coerce",
progress: bool = True,
) -> pd.DataFrame:
"""
Clean IBAN type data in a DataFrame column.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be cleaned.
column
The name of the column containing data of ISBN type.
output_format
The output format of standardized number string.
If output_format = 'compact', return string without any separators.
If output_format = 'standard', return string with proper separators.
(default: "standard")
split
If True,
each component of derived from its number string will be put into its own column.
(default: False)
inplace
If True, delete the column containing the data that was cleaned.
Otherwise, keep the original column.
(default: False)
errors
How to handle parsing errors.
- ‘coerce’: invalid parsing will be set to NaN.
- ‘ignore’: invalid parsing will return the input.
- ‘raise’: invalid parsing will raise an exception.
(default: 'coerce')
progress
If True, display a progress bar.
(default: True)
Examples
--------
Clean a column of ISBN data.
>>> df = pd.DataFrame({
"iban": [
"978-9024538270",
"978-9024538271"]
})
>>> clean_iban(df, 'iban', inplace=True)
iban_clean
0 978-90-245-3827-0
1 NaN
"""
if output_format not in {"compact", "standard"}:
raise ValueError(
f"output_format {output_format} is invalid. " 'It needs to be "compact", "standard".'
)
# convert to dask
df = to_dask(df)
# To clean, create a new column "clean_code_tup" which contains
# the cleaned values and code indicating how the initial value was
# changed in a tuple. Then split the column of tuples and count the
# amount of different codes to produce the report
df["clean_code_tup"] = df[column].map_partitions(
lambda srs: [_format(x, output_format, split, errors) for x in srs],
meta=object,
)
if split:
# For some reason the meta data for the last 3 components needs to be
# set. I think this is a dask bug
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0), meta=("_temp", object)),
country_code=df["clean_code_tup"].map(itemgetter(1), meta=("country_code", object)),
check_digits=df["clean_code_tup"].map(itemgetter(2), meta=("check_digits", object)),
bban_code=df["clean_code_tup"].map(itemgetter(3), meta=("bban_code", object)),
)
else:
df = df.assign(
_temp_=df["clean_code_tup"].map(itemgetter(0)),
)
df = df.rename(columns={"_temp_": f"{column}_clean"})
df = df.drop(columns=["clean_code_tup"])
if inplace:
df[column] = df[f"{column}_clean"]
df = df.drop(columns=f"{column}_clean")
df = df.rename(columns={column: f"{column}_clean"})
with ProgressBar(minimum=1, disable=not progress):
df = df.compute()
# output a report describing the result of clean_country
return df
def validate_iban(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is IBAN in a DataFrame column. For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
column
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(iban.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(iban.is_valid)
else:
return df.applymap(iban.is_valid)
return iban.is_valid(df)
def _format(
val: Any, output_format: str = "standard", split: bool = False, errors: str = "coarse"
) -> Any:
"""
Reformat a number string with proper separators (formats).
Parameters
----------
val
The value of number string.
output_format
If output_format = 'compact', return string without any separators.
If output_format = 'standard', return string with proper separators function.
"""
val = str(val)
result: Any = []
if val in NULL_VALUES:
if split:
return [np.nan, np.nan, np.nan, np.nan]
else:
return [np.nan]
if not validate_iban(val):
if errors == "raise":
raise ValueError(f"Unable to parse value {val}")
error_result = val if errors == "ignore" else np.nan
if split:
return [error_result, np.nan, np.nan, np.nan]
else:
return [error_result]
if split:
compacted_val = iban.compact(val)
result = [compacted_val[:2], compacted_val[2:4], compacted_val[4:]]
if output_format == "compact":
result = [iban.compact(val)] + result
elif output_format == "standard":
result = [iban.format(val)] + result
return result
| 31.121053
| 97
| 0.603754
|
47d23289200133407a8a8de01a79ac3693b8e84d
| 2,319
|
py
|
Python
|
plugins/libravatar/libravatar.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 13
|
2020-01-27T09:02:25.000Z
|
2022-01-20T07:45:26.000Z
|
plugins/libravatar/libravatar.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 29
|
2020-03-22T06:57:57.000Z
|
2022-01-24T22:46:42.000Z
|
plugins/libravatar/libravatar.py
|
mohnjahoney/website_source
|
edc86a869b90ae604f32e736d9d5ecd918088e6a
|
[
"MIT"
] | 6
|
2020-07-10T00:13:30.000Z
|
2022-01-26T08:22:33.000Z
|
"""Libravatar plugin for Pelican"""
## Copyright (C) 2015 Rafael Laboissiere <rafael@laboissiere.net>
##
## This program is free software: you can redistribute it and/or modify it
## under the terms of the GNU General Affero Public License as published by
## the Free Software Foundation, either version 3 of the License, or (at
## your option) any later version.
##
## This program is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
import hashlib
from pelican import signals
def initialize(pelicanobj):
"""Initialize the Libravatar plugin"""
pelicanobj.settings.setdefault("LIBRAVATAR_MISSING", None)
pelicanobj.settings.setdefault("LIBRAVATAR_SIZE", None)
def add_libravatar(generator, metadata):
"""Article generator connector for the Libravatar plugin"""
missing = generator.settings.get("LIBRAVATAR_MISSING")
size = generator.settings.get("LIBRAVATAR_SIZE")
## Check the presence of the Email header
if "email" not in metadata.keys():
try:
metadata["email"] = generator.settings.get("AUTHOR_EMAIL")
except:
pass
## Add the Libravatar URL
if metadata["email"]:
## Compose URL using the MD5 hash
## (the ascii encoding is necessary for Python3)
email = metadata["email"].lower().encode("ascii")
md5 = hashlib.md5(email).hexdigest()
url = "http://cdn.libravatar.org/avatar/" + md5
## Add eventual "missing picture" option
if missing or size:
url = url + "?"
if missing:
url = url + "d=" + missing
if size:
url = url + "&"
if size:
url = url + "s=" + str(size)
## Add URL to the article's metadata
metadata["author_libravatar"] = url
def register():
"""Register the Libravatar plugin with Pelican"""
signals.initialized.connect(initialize)
signals.article_generator_context.connect(add_libravatar)
| 34.102941
| 75
| 0.661492
|
dd7309a51f5a56c0054b8786a504826624e60ee0
| 8,601
|
py
|
Python
|
summarytable.py
|
dalekreitler-bnl/summarytable
|
d6a82a54b73482d0fe6e607fb37f8bd88b9fe302
|
[
"MIT"
] | null | null | null |
summarytable.py
|
dalekreitler-bnl/summarytable
|
d6a82a54b73482d0fe6e607fb37f8bd88b9fe302
|
[
"MIT"
] | null | null | null |
summarytable.py
|
dalekreitler-bnl/summarytable
|
d6a82a54b73482d0fe6e607fb37f8bd88b9fe302
|
[
"MIT"
] | null | null | null |
#!/bin/python
import xmltodict
from pathlib import PurePath
import os
from time import sleep
import fcntl
def scan_directory(dirpath,
dirs_to_avoid=["dozor"],
sort_dirs=True,
filepatterns=["fast_dp.xml","autoPROC.xml"]):
path_dict = {}
for (dirpath,dirnames,filenames) in os.walk(dirpath,topdown=True):
dirnames[:] = [d for d in dirnames if d not in dirs_to_avoid]
for f in filenames:
if f in filepatterns:
path_dict[f"{os.path.join(dirpath,f)}"] = os.path.getmtime(os.path.join(dirpath,f))
return path_dict
class DataDirectory:
def __init__(self,dirpath):
self._dirpath = dirpath
self._observer_list = []
self._path_dict = {}
def attach(self,observer):
self._observer_list.append(observer)
def notify(self,path_set):
[observer.update(path_set) for observer in self._observer_list]
def check_directory(self):
path_dict = scan_directory(self._dirpath)
path_set_to_send = set([])
for pd in path_dict:
#check for new file
if pd in self._path_dict:
#check for update to file
if path_dict[pd] - self._path_dict[pd] > 0:
path_set_to_send.add(pd)
else:
path_set_to_send.add(pd)
if path_set_to_send:
self.notify(sorted(path_set_to_send,key=os.path.getmtime))
self._path_dict = path_dict
class FileObserver:
def __init__(self):
self._results = ''.join([make_header(),'\n'])
def update(self, paths):
new_results = '\n'.join([format_results_string(parse_fdp_xml(fp))
for fp in paths if parse_fdp_xml(fp)])
new_results = ''.join([new_results,'\n'])
self._results = ''.join([self._results,new_results])
with open('fast_dp.summary.txt','w') as f:
fcntl.flock(f,fcntl.LOCK_SH)
f.write(self._results)
class DisplayObserver:
def __init__(self):
print(make_header())
def update(self, paths): #display results in order files created
[print(format_results_string(parse_fdp_xml(f))) for f in paths
if parse_fdp_xml(f)]
def make_header():
first_row = ''.join([f"{'':34}",
f'{"|---------------Overall------------------|":^14}',
f'{"|-------------Outer-Shell----------------|":^14}\n',])
formatted_string=''.join([f'{"Sample Path":>34}',
f'{"Hi":>7}',
f'{"Lo":>7}',
f'{"R_mrg":>7}',
f'{"cc12":>7}',
f'{"comp":>7}',
f'{"mult":>7}',
f'{"Hi":>7}',
f'{"Lo":>7}',
f'{"R_mrg":>7}',
f'{"cc12":>7}',
f'{"comp":>7}',
f'{"mult":>7}',
f'{"symm":>12}',
f'{"a":>7}',
f'{"b":>7}',
f'{"c":>7}',
f'{"alpha":>7}',
f'{"beta":>7}',
f'{"gamma":>7}'])
return ''.join([first_row,formatted_string])
def parse_fdp_xml(filename):
with open(filename) as f:
fdp_xml = xmltodict.parse(f.read())
#sometimes there are multiple program attachment entries
try:
path=fdp_xml["AutoProcContainer"]\
["AutoProcProgramContainer"]\
["AutoProcProgramAttachment"]\
["filePath"]
except TypeError:
path=fdp_xml["AutoProcContainer"]\
["AutoProcProgramContainer"]\
["AutoProcProgramAttachment"][0]\
["filePath"]
path_parts = PurePath(path).parts
sample_name_path = '/'.join([path_parts[-4],path_parts[-3]])
try:
#all resolution shells
overall = fdp_xml["AutoProcContainer"]\
["AutoProcScalingContainer"]\
["AutoProcScalingStatistics"][0]
#pertinent values for table
res_lim_low_overall = float(overall["resolutionLimitLow"])
res_lim_high_overall = float(overall["resolutionLimitHigh"])
r_merge_overall = float(overall["rMerge"])
cc_half_overall = float(overall["ccHalf"])
comp_overall = float(overall["completeness"])
mult_overall = float(overall["multiplicity"])
#outer resolution shell
outer = fdp_xml["AutoProcContainer"]\
["AutoProcScalingContainer"]\
["AutoProcScalingStatistics"][2]
#pertinent values for table
res_lim_low_outer = float(outer["resolutionLimitLow"])
res_lim_high_outer = float(outer["resolutionLimitHigh"])
r_merge_outer = float(outer["rMerge"])
cc_half_outer = float(outer["ccHalf"])
comp_outer = float(outer["completeness"])
mult_outer = float(outer["multiplicity"])
#symmetry info
cell = fdp_xml["AutoProcContainer"]["AutoProc"]
space_group = cell["spaceGroup"]
a = float(cell["refinedCell_a"])
b = float(cell["refinedCell_b"])
c = float(cell["refinedCell_c"])
alpha = float(cell["refinedCell_alpha"])
beta = float(cell["refinedCell_beta"])
gamma = float(cell["refinedCell_gamma"])
return (sample_name_path,
res_lim_high_overall,
res_lim_low_overall,
r_merge_overall,
cc_half_overall,
comp_overall,
mult_overall,
res_lim_high_outer,
res_lim_low_outer,
r_merge_outer,
cc_half_outer,
comp_outer,
mult_outer,
space_group,
a,
b,
c,
alpha,
beta,
gamma)
except KeyError:
return ''
def format_results_string(*args):
result_string = args[0]
try:
formatted_string=''.join([f'{result_string[0]:>34}',#sample path
f'{result_string[1]:7.2f}',#high res cut overall
f'{result_string[2]:7.2f}',#low res cut overall
f'{result_string[3]:7.3f}',#R_merge overall
f'{result_string[4]:7.2f}',#cc12 overall
f'{result_string[5]:7.2f}',#completeness overall
f'{result_string[6]:7.2f}',#multiplicity overall
f'{result_string[7]:7.2f}',#high res cut outer
f'{result_string[8]:7.2f}',#low res cut outer
f'{result_string[9]:7.3f}',#R_merge outer
f'{result_string[10]:7.2f}',#cc12 outer
f'{result_string[11]:7.2f}',#completeness outer
f'{result_string[12]:7.2f}',#multiplicity outer
f'{result_string[13]:>12}',#space group
f'{result_string[14]:7.1f}',#a
f'{result_string[15]:7.1f}',#b
f'{result_string[16]:7.1f}',#c
f'{result_string[17]:7.1f}',#alpha
f'{result_string[18]:7.1f}',#beta
f'{result_string[19]:7.1f}'])#gamma
return formatted_string
except IndexError:
return result_string
if __name__ == "__main__":
test_dir = os.getcwd()
try:
if os.path.basename(test_dir) == "fast_dp_dir":
mx_directory = PurePath(test_dir).parent
print(mx_directory)
d = DataDirectory(mx_directory)
dobs = DisplayObserver()
fobs = FileObserver()
d.attach(dobs)
d.attach(fobs)
while True:
d.check_directory()
sleep(10)
else:
print("summarytable must be launched from fast_dp_dir...")
except KeyboardInterrupt:
print("\nExiting gracefully...")
| 38.397321
| 99
| 0.488897
|
83413188b8941713b5a3de52a46c23b1290228d8
| 9,540
|
py
|
Python
|
backend/pyrogram/utils.py
|
appheap/social-media-analyzer
|
0f9da098bfb0b4f9eb38e0244aa3a168cf97d51c
|
[
"Apache-2.0"
] | 5
|
2021-09-11T22:01:15.000Z
|
2022-03-16T21:33:42.000Z
|
backend/pyrogram/utils.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | null | null | null |
backend/pyrogram/utils.py
|
iamatlasss/social-media-analyzer
|
429d1d2bbd8bfce80c50c5f8edda58f87ace668d
|
[
"Apache-2.0"
] | 3
|
2022-01-18T11:06:22.000Z
|
2022-02-26T13:39:28.000Z
|
import asyncio
import base64
import functools
import hashlib
import os
import struct
from concurrent.futures.thread import ThreadPoolExecutor
from getpass import getpass
from typing import Union, List, Optional, Dict
import pyrogram
from pyrogram import raw
from pyrogram import types
from pyrogram.file_id import FileId, FileType, PHOTO_TYPES, DOCUMENT_TYPES
async def ainput(prompt: str = "", *, hide: bool = False):
"""Just like the built-in input, but async"""
with ThreadPoolExecutor(1) as executor:
func = functools.partial(getpass if hide else input, prompt)
return await asyncio.get_event_loop().run_in_executor(executor, func)
def get_input_media_from_file_id(
file_id: str,
expected_file_type: FileType = None
) -> Union["raw.types.InputMediaPhoto", "raw.types.InputMediaDocument"]:
try:
decoded = FileId.decode(file_id)
except Exception:
raise ValueError(f'Failed to decode "{file_id}". The value does not represent an existing local file, '
f'HTTP URL, or valid file id.')
file_type = decoded.file_type
if expected_file_type is not None and file_type != expected_file_type:
raise ValueError(f"Expected {expected_file_type.name}, got {file_type.name} file id instead")
if file_type in (FileType.THUMBNAIL, FileType.CHAT_PHOTO):
raise ValueError(f"This file id can only be used for download: {file_id}")
if file_type in PHOTO_TYPES:
return raw.types.InputMediaPhoto(
id=raw.types.InputPhoto(
id=decoded.media_id,
access_hash=decoded.access_hash,
file_reference=decoded.file_reference
)
)
if file_type in DOCUMENT_TYPES:
return raw.types.InputMediaDocument(
id=raw.types.InputDocument(
id=decoded.media_id,
access_hash=decoded.access_hash,
file_reference=decoded.file_reference
)
)
raise ValueError(f"Unknown file id: {file_id}")
def get_offset_date(dialogs):
for m in reversed(dialogs.messages):
if isinstance(m, raw.types.MessageEmpty):
continue
else:
return m.date
else:
return 0
def unpack_inline_message_id(inline_message_id: str) -> "raw.types.InputBotInlineMessageID":
r = inline_message_id + "=" * (-len(inline_message_id) % 4)
r = struct.unpack("<iqq", base64.b64decode(r, altchars=b"-_"))
return raw.types.InputBotInlineMessageID(
dc_id=r[0],
id=r[1],
access_hash=r[2]
)
MIN_CHANNEL_ID = -1002147483647
MAX_CHANNEL_ID = -1000000000000
MIN_CHAT_ID = -2147483647
MAX_USER_ID = 2147483647
def get_raw_peer_id(peer: raw.base.Peer) -> Optional[int]:
"""Get the raw peer id from a Peer object"""
if isinstance(peer, raw.types.PeerUser):
return peer.user_id
if isinstance(peer, raw.types.PeerChat):
return peer.chat_id
if isinstance(peer, raw.types.PeerChannel):
return peer.channel_id
return None
def get_peer_id(peer: raw.base.Peer) -> int:
"""Get the non-raw peer id from a Peer object"""
if isinstance(peer, raw.types.PeerUser):
return peer.user_id
if isinstance(peer, raw.types.PeerChat):
return -peer.chat_id
if isinstance(peer, raw.types.PeerChannel):
return MAX_CHANNEL_ID - peer.channel_id
raise ValueError(f"Peer type invalid: {peer}")
def get_peer_type(peer_id: int) -> str:
if peer_id < 0:
if MIN_CHAT_ID <= peer_id:
return "chat"
if MIN_CHANNEL_ID <= peer_id < MAX_CHANNEL_ID:
return "channel"
elif 0 < peer_id <= MAX_USER_ID:
return "user"
raise ValueError(f"Peer id invalid: {peer_id}")
def get_channel_id(peer_id: int) -> int:
return MAX_CHANNEL_ID - peer_id
def btoi(b: bytes) -> int:
return int.from_bytes(b, "big")
def itob(i: int) -> bytes:
return i.to_bytes(256, "big")
def sha256(data: bytes) -> bytes:
return hashlib.sha256(data).digest()
def xor(a: bytes, b: bytes) -> bytes:
return bytes(i ^ j for i, j in zip(a, b))
def compute_password_hash(algo: raw.types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
password: str) -> bytes:
hash1 = sha256(algo.salt1 + password.encode() + algo.salt1)
hash2 = sha256(algo.salt2 + hash1 + algo.salt2)
hash3 = hashlib.pbkdf2_hmac("sha512", hash2, algo.salt1, 100000)
return sha256(algo.salt2 + hash3 + algo.salt2)
# noinspection PyPep8Naming
def compute_password_check(r: raw.types.account.Password, password: str) -> raw.types.InputCheckPasswordSRP:
algo = r.current_algo
p_bytes = algo.p
p = btoi(algo.p)
g_bytes = itob(algo.g)
g = algo.g
B_bytes = r.srp_B
B = btoi(B_bytes)
srp_id = r.srp_id
x_bytes = compute_password_hash(algo, password)
x = btoi(x_bytes)
g_x = pow(g, x, p)
k_bytes = sha256(p_bytes + g_bytes)
k = btoi(k_bytes)
kg_x = (k * g_x) % p
while True:
a_bytes = os.urandom(256)
a = btoi(a_bytes)
A = pow(g, a, p)
A_bytes = itob(A)
u = btoi(sha256(A_bytes + B_bytes))
if u > 0:
break
g_b = (B - kg_x) % p
ux = u * x
a_ux = a + ux
S = pow(g_b, a_ux, p)
S_bytes = itob(S)
K_bytes = sha256(S_bytes)
M1_bytes = sha256(
xor(sha256(p_bytes), sha256(g_bytes))
+ sha256(algo.salt1)
+ sha256(algo.salt2)
+ A_bytes
+ B_bytes
+ K_bytes
)
return raw.types.InputCheckPasswordSRP(srp_id=srp_id, A=A_bytes, M1=M1_bytes)
async def parse_text_entities(
client: "pyrogram.Client",
text: str,
parse_mode: str,
entities: List["types.MessageEntity"]
) -> Dict[str, raw.base.MessageEntity]:
if entities:
# Inject the client instance because parsing user mentions requires it
for entity in entities:
entity._client = client
text, entities = text, [await entity.write() for entity in entities]
else:
text, entities = (await client.parser.parse(text, parse_mode)).values()
return {
"message": text,
"entities": entities
}
async def maybe_run_in_executor(func, data, length, loop, *args):
return (
func(data, *args)
if length <= pyrogram.CRYPTO_EXECUTOR_SIZE_THRESHOLD
else await loop.run_in_executor(pyrogram.crypto_executor, func, data, *args)
)
async def parse_admin_log_events(
client: "pyrogram.Client",
admin_log_results: raw.base.channels.AdminLogResults
) -> List["types.ChannelAdminLogEvent"]:
users = {i.id: i for i in admin_log_results.users}
chats = {i.id: i for i in admin_log_results.chats}
if not admin_log_results.events:
return types.List()
parsed_events = []
for event in admin_log_results.events:
parsed_event = await types.ChannelAdminLogEvent._parse(client, event, users, chats)
if parsed_event:
parsed_events.append(parsed_event)
return types.List(parsed_events) if len(parsed_events) else types.List()
async def parse_message_views(
client,
message_views: "raw.types.messages.MessageViews",
message_ids: list
) -> List["types.MessageViews"]:
users = {i.id: i for i in message_views.users}
chats = {i.id: i for i in message_views.chats}
if not message_views.views:
return types.List()
parsed_views = []
for message_id, view in zip(message_ids, message_views.views):
parsed_view = await types.MessageViews._parse(client, message_id, view, users, chats)
if parsed_view:
parsed_views.append(parsed_view)
return types.List(parsed_views)
async def parse_messages(
client,
messages: "raw.types.messages.Messages",
replies: int = 1,
is_scheduled: bool = False,
) -> List["types.Message"]:
users = {i.id: i for i in messages.users}
chats = {i.id: i for i in messages.chats}
if not messages.messages:
return types.List()
parsed_messages = []
for message in messages.messages:
parsed_messages.append(
await types.Message._parse(
client,
message,
users,
chats,
is_scheduled=is_scheduled,
replies=0
)
)
return types.List(parsed_messages)
def parse_deleted_messages(client, update) -> List["types.Message"]:
messages = update.messages
channel_id = getattr(update, "channel_id", None)
parsed_messages = []
for message in messages:
parsed_messages.append(
types.Message(
message_id=message,
chat=types.Chat(
id=get_channel_id(channel_id),
type="channel",
client=client
) if channel_id is not None else None,
client=client,
type='empty',
)
)
return types.List(parsed_messages)
def parse_search_counters(r: List["raw.types.messages.SearchCounter"]) -> List["types.SearchCounter"]:
parsed_objects = []
for raw_search_counter in r:
parsed_objects.append(
types.SearchCounter._parse(
raw_obj=raw_search_counter
)
)
return list(filter(lambda obj: obj is not None, parsed_objects))
| 27.492795
| 111
| 0.634696
|
806154c4643ec23d664f319d2d54100c301db412
| 1,487
|
py
|
Python
|
python_examples/example_box.py
|
adam-urbanczyk/cpp-solnp
|
97ac9296e39bf3dedcf3eacc1abc659e7eb7c6fa
|
[
"BSL-1.0"
] | null | null | null |
python_examples/example_box.py
|
adam-urbanczyk/cpp-solnp
|
97ac9296e39bf3dedcf3eacc1abc659e7eb7c6fa
|
[
"BSL-1.0"
] | null | null | null |
python_examples/example_box.py
|
adam-urbanczyk/cpp-solnp
|
97ac9296e39bf3dedcf3eacc1abc659e7eb7c6fa
|
[
"BSL-1.0"
] | null | null | null |
"""
To test this algorithm, then:
1) Install this package, for example through pip or by running "pip install ." from the cpp_solnp folder.
2) Run this file with Python
"""
import pysolnp
def box_objective_function(x):
result = -1 * x[0] * x[1] * x[2]
return result
def box_equality_function(x):
result = [
4 * x[0] * x[1] + 2 * x[1] * x[2] + 2 * x[2] * x[0]
]
return result
starting_point = [1.1,
1.1,
9.0]
lower_bound = [1.0,
1.0,
1.0]
upper_bound = [10.0,
10.0,
10.0]
equality_values = [100]
if __name__ == "__main__":
result = pysolnp.solve(
obj_func=box_objective_function,
par_start_value=starting_point,
par_lower_limit=lower_bound,
par_upper_limit=upper_bound,
eq_func=box_equality_function,
eq_values=equality_values)
final_parameters = result.optimum
print(final_parameters)
print(result.solve_value)
print(result.callbacks)
print(box_equality_function(final_parameters))
final_objective_value = box_objective_function(final_parameters)
equality_constaints = box_equality_function(final_parameters)
for index, value in enumerate(final_parameters):
distance_to_lower = value - lower_bound[index]
distance_to_over = upper_bound[index] - value
print(f"Distance for index {index}: lower {distance_to_lower} upper {distance_to_over}")
| 25.20339
| 105
| 0.643578
|
efb2a17bd35c02b1526c4307d92920f90e491922
| 487
|
py
|
Python
|
interview/project/tests/base.py
|
Nkarnaud/interview_mgt
|
21c0cb987135b09b37fa1f5250a2eb96d9bb61b8
|
[
"Apache-2.0"
] | 1
|
2019-08-08T10:25:48.000Z
|
2019-08-08T10:25:48.000Z
|
interview/project/tests/base.py
|
Nkarnaud/interview_mgt
|
21c0cb987135b09b37fa1f5250a2eb96d9bb61b8
|
[
"Apache-2.0"
] | null | null | null |
interview/project/tests/base.py
|
Nkarnaud/interview_mgt
|
21c0cb987135b09b37fa1f5250a2eb96d9bb61b8
|
[
"Apache-2.0"
] | null | null | null |
from flask_testing import TestCase
from project import create_app, db
# App initialisation
app = create_app()
# Defining the base test configuration
class BaseTestCase(TestCase):
def create_app(self):
app.config.from_object('project.config.TestingConfig')
return app
# Database setup
def setUp(self):
db.create_all()
db.session.commit()
# Database tear down
def tearDown(self):
db.session.remove()
db.drop_all()
| 19.48
| 62
| 0.671458
|
9dbebbb28a51f9d7841d3172e62feed098293d12
| 49,605
|
py
|
Python
|
scripts/slave/recipes/findit/chromium/test.py
|
mithro/chromium-build
|
98d83e124dc08510756906171922a22ba27b87fa
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipes/findit/chromium/test.py
|
mithro/chromium-build
|
98d83e124dc08510756906171922a22ba27b87fa
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/slave/recipes/findit/chromium/test.py
|
mithro/chromium-build
|
98d83e124dc08510756906171922a22ba27b87fa
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import json
from recipe_engine.config import Dict
from recipe_engine.config import List
from recipe_engine.config import Single
from recipe_engine.recipe_api import Property
DEPS = [
'adb',
'buildbucket',
'depot_tools/bot_update',
'chromium',
'chromium_android',
'chromium_checkout',
'chromium_swarming',
'chromium_tests',
'commit_position',
'depot_tools/gclient',
'depot_tools/git',
'filter',
'findit',
'isolate',
'recipe_engine/context',
'recipe_engine/json',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'swarming',
'test_results',
'test_utils',
]
PROPERTIES = {
'target_mastername': Property(
kind=str, help='The target master to match compile config to.'),
'target_testername': Property(
kind=str,
help='The target tester to match test config to. If the tests are run '
'on a builder, just treat the builder as a tester.'),
'good_revision': Property(
kind=str, help='The last known good revision.'),
'bad_revision': Property(
kind=str, help='The first known good revision.'),
'tests': Property(
kind=Dict(value_type=list),
default={},
help='The failed tests, the test name should be full name, e.g.: {'
' "browser_tests": ['
' "suite.test1", "suite.test2"'
' ]'
'}'),
'buildbucket': Property(
default=None,
help='The buildbucket property in which we can find build id.'
'We need to use build id to get tests.'),
'use_analyze': Property(
kind=Single(bool, empty_val=False, required=False), default=True,
help='Use analyze to skip commits that do not affect tests.'),
'suspected_revisions': Property(
kind=List(basestring), default=[],
help='A list of suspected revisions from heuristic analysis.'),
'test_on_good_revision': Property(
kind=Single(bool, empty_val=False, required=False), default=True,
help='Run test on good revision as well if the first revision '
'in range is suspected.'),
'test_repeat_count': Property(
kind=Single(int, required=False), default=20,
help='How many times to repeat the tests.'),
}
def _get_reduced_test_dict(original_test_dict, failed_tests_dict):
# Remove tests that are in both dicts from the original test dict.
if not failed_tests_dict:
return original_test_dict
reduced_dict = defaultdict(list)
for step, tests in original_test_dict.iteritems():
remain_tests = list(set(tests) - set(failed_tests_dict.get(step, [])))
if remain_tests:
reduced_dict[step] = remain_tests
return reduced_dict
def _get_flaky_tests(test_results):
# Uses pass_fail_count to get flaky tests.
flaky_tests = defaultdict(list)
if not test_results:
return flaky_tests
for step, result in test_results.iteritems():
pass_fail_counts = result.get('pass_fail_counts')
if not pass_fail_counts:
continue
for test, test_counts in pass_fail_counts.iteritems():
if test_counts.get('pass_count') and test_counts.get('fail_count'):
flaky_tests[step].append(test)
return flaky_tests
def _consolidate_flaky_tests(all_flakes, new_flakes):
for step, tests in new_flakes.iteritems():
all_flakes[step] = list(set(all_flakes[step]) | set(tests))
def RunSteps(api, target_mastername, target_testername, good_revision,
bad_revision, tests, buildbucket, use_analyze,
suspected_revisions, test_on_good_revision, test_repeat_count):
tests, target_buildername, checked_out_revision, cached_revision = (
api.findit.configure_and_sync(api, tests, buildbucket, target_mastername,
target_testername, bad_revision))
# retrieve revisions in the regression range.
revisions_to_check = api.findit.revisions_between(good_revision, bad_revision)
suspected_revision_index = [
revisions_to_check.index(r)
for r in set(suspected_revisions) if r in revisions_to_check]
# Segments revisions_to_check by suspected_revisions.
# Each sub_range will contain following elements:
# 1. Revision before a suspected_revision or None as a placeholder
# when no such revision
# 2. Suspected_revision
# 3. Revisions between a suspected_revision and the revision before next
# suspected_revision, or remaining revisions before all suspect_revisions.
# For example, if revisions_to_check are [r0, r1, ..., r6] and
# suspected_revisions are [r2, r5], sub_ranges will be:
# [[None, r0], [r1, r2, r3], [r4, r5, r6]]
if suspected_revision_index:
# If there are consecutive revisions being suspected, include them
# in the same sub_range by only saving the oldest revision.
suspected_revision_index = [i for i in suspected_revision_index
if i - 1 not in suspected_revision_index]
sub_ranges = []
remaining_revisions = revisions_to_check[:]
for index in sorted(suspected_revision_index, reverse=True):
if index > 0:
# try job will not run linearly, sets use_analyze to False.
use_analyze = False
sub_ranges.append(remaining_revisions[index - 1:])
remaining_revisions = remaining_revisions[:index - 1]
# None is a placeholder for the last known good revision.
sub_ranges.append([None] + remaining_revisions)
else:
# Treats the entire regression range as a single sub-range.
sub_ranges = [[None] + revisions_to_check]
test_results = {}
try_job_metadata = {
'regression_range_size': len(revisions_to_check)
}
report = {
'result': test_results,
'metadata': try_job_metadata,
'previously_checked_out_revision': checked_out_revision,
'previously_cached_revision': cached_revision
}
revision_being_checked = None
found = False
flakes = defaultdict(list)
try:
culprits = defaultdict(dict)
# Tests that haven't found culprits in tested revision(s).
tests_have_not_found_culprit = tests
# Iterates through sub_ranges and find culprits for each failed test.
# Sub-ranges with newer revisions are tested first so we have better chance
# that try job will reproduce exactly the same failure as in waterfall.
for sub_range in sub_ranges:
if not tests_have_not_found_culprit: # All tests have found culprits.
break
# The revision right before the suspected revision provided by
# the heuristic result.
potential_green_rev = sub_range[0]
following_revisions = sub_range[1:]
if potential_green_rev:
revision_being_checked = potential_green_rev
test_results[potential_green_rev], tests_failed_in_potential_green = (
api.findit.compile_and_test_at_revision(
api, target_mastername, target_buildername, target_testername,
potential_green_rev, tests_have_not_found_culprit, use_analyze,
test_repeat_count=test_repeat_count))
else:
tests_failed_in_potential_green = {}
# Looks for reliably failed tests.
flaky_tests_in_potential_green = _get_flaky_tests(test_results.get(
potential_green_rev))
_consolidate_flaky_tests(flakes, flaky_tests_in_potential_green)
tests_passed_in_potential_green = _get_reduced_test_dict(
tests_have_not_found_culprit, tests_failed_in_potential_green
)
# Culprits for tests that failed in potential green should be earlier, so
# removes failed tests and only runs passed ones in following revisions.
if tests_passed_in_potential_green:
tests_to_run = tests_passed_in_potential_green
for revision in following_revisions:
revision_being_checked = revision
# Since tests_to_run are tests that passed in previous revision,
# whichever test that fails now will find current revision is the
# culprit.
test_results[revision], tests_failed_in_revision = (
api.findit.compile_and_test_at_revision(
api, target_mastername, target_buildername, target_testername,
revision, tests_to_run, use_analyze, test_repeat_count))
flaky_tests_in_revision = _get_flaky_tests(test_results[revision])
reliable_failed_tests_in_revision = _get_reduced_test_dict(
tests_failed_in_revision, flaky_tests_in_revision)
_consolidate_flaky_tests(flakes, flaky_tests_in_revision)
# Removes tests that passed in potential green and failed in
# following revisions: culprits have been found for them.
tests_have_not_found_culprit = _get_reduced_test_dict(
tests_have_not_found_culprit, tests_failed_in_revision)
# Only runs tests that have not found culprits in later revisions.
tests_to_run = _get_reduced_test_dict(
tests_to_run, tests_failed_in_revision)
# Records found culprits.
for step, test_list in reliable_failed_tests_in_revision.iteritems():
for test in test_list:
culprits[step][test] = revision
if not tests_to_run:
break
if culprits and test_on_good_revision:
# Need to deflake by running on good revision.
tests_run_on_good_revision = defaultdict(list)
for step, step_culprits in culprits.iteritems():
for test, test_culprit in step_culprits.iteritems():
if test_culprit == revisions_to_check[0]:
tests_run_on_good_revision[step].append(test)
if tests_run_on_good_revision:
test_results[good_revision], tests_failed_in_revision = (
api.findit.compile_and_test_at_revision(
api, target_mastername, target_buildername, target_testername,
good_revision, tests_run_on_good_revision, use_analyze,
test_repeat_count))
if tests_failed_in_revision:
# Some tests also failed on good revision, they should be flaky.
# Should remove them from culprits.
new_culprits = defaultdict(dict)
for step, step_culprits in culprits.iteritems():
for test, test_culprit in step_culprits.iteritems():
if test in tests_failed_in_revision.get(step, []):
continue
new_culprits[step][test] = test_culprit
culprits = new_culprits
_consolidate_flaky_tests(flakes, tests_failed_in_revision)
found = bool(culprits)
except api.step.InfraFailure:
test_results[revision_being_checked] = api.findit.TestResult.INFRA_FAILED
report['metadata']['infra_failure'] = True
raise
finally:
if found:
report['culprits'] = culprits
if flakes:
report['flakes'] = flakes
# Give the full report including test results and metadata.
api.python.succeeding_step(
'report', [json.dumps(report, indent=2)], as_log='report')
return report
def GenTests(api):
def props(
tests, platform_name, tester_name, use_analyze=False, good_revision=None,
bad_revision=None, suspected_revisions=None, buildbucket=None,
test_on_good_revision=True, test_repeat_count=20):
properties = {
'path_config': 'kitchen',
'mastername': 'tryserver.chromium.%s' % platform_name,
'buildername': '%s_chromium_variable' % platform_name,
'bot_id': 'build1-a1',
'buildnumber': 1,
'target_mastername': 'chromium.%s' % platform_name,
'target_testername': tester_name,
'good_revision': good_revision or 'r0',
'bad_revision': bad_revision or 'r1',
'use_analyze': use_analyze,
'test_on_good_revision': test_on_good_revision,
'test_repeat_count': test_repeat_count,
}
if tests:
properties['tests'] = tests
if suspected_revisions:
properties['suspected_revisions'] = suspected_revisions
if buildbucket:
properties['buildbucket'] = buildbucket
return api.properties(**properties) + api.platform.name(platform_name)
yield (
api.test('nonexistent_test_step_skipped') +
props({'newly_added_tests': ['Test.One', 'Test.Two', 'Test.Three']},
'win', 'Win7 Tests (1)') +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
)
)
yield (
api.test('unaffected_test_skipped_by_analyze') +
props({'affected_tests': ['Test.One'], 'unaffected_tests': ['Test.Two']},
'win', 'Win7 Tests (1)', use_analyze=True) +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'affected_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
{
'test': 'unaffected_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r1.analyze',
api.json.output({
'status': 'Found dependency',
'compile_targets': ['affected_tests', 'affected_tests_run'],
'test_targets': ['affected_tests', 'affected_tests_run'],
})
) +
api.override_step_data(
'test r1.affected_tests (r1)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(passed_test_names=['Test.One'])
)
)
yield (
api.test('test_without_targets_not_skipped') +
props({'unaffected_tests': ['Test.One'], 'checkperms': []},
'win', 'Win7 Tests (1)', use_analyze=True) +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'unaffected_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
'scripts': [
{
'name': 'checkperms',
'script': 'checkperms.py'
},
]
},
})
) +
api.override_step_data(
'test r1.analyze',
api.json.output({
'status': 'No dependencies',
'compile_targets': [],
'test_targets': [],
})
)
)
yield (
api.test('all_test_failed') +
props({'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']},
'win', 'Win7 Tests (1)', test_on_good_revision=False) +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r1.gl_tests (r1)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One', 'Test.Two', 'Test.Three'])
)
)
yield (
api.test('all_test_passed') +
props({'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']},
'win', 'Win7 Tests (1)') +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r1.gl_tests (r1)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One', 'Test.Two', 'Test.Three'])
)
)
yield (
api.test('only_one_test_passed') +
props({'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']},
'win', 'Win7 Tests (1)') +
api.override_step_data(
'test r0.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r0.gl_tests (r0)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One', 'Test.Two'])
) +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r1.gl_tests (r1)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One', 'Test.Two'],
passed_test_names=['Test.Three'])
)
)
yield (
api.test('compile_skipped') +
props({'checkperms': []}, 'win', 'Win7 Tests (1)') +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'scripts': [
{
'name': 'checkperms',
'script': 'checkperms.py'
},
]
},
})
)
)
yield (
api.test('none_swarming_tests') +
props({'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']},
'win', 'Win7 Tests (1)', test_on_good_revision=False) +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': False},
},
],
},
})
) +
api.override_step_data(
'test r1.gl_tests (r1)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One', 'Test.Two'],
passed_test_names=['Test.Three'])
)
)
yield (
api.test('swarming_tests') +
props({'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests') +
api.override_step_data(
'test r1.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r1.gl_tests (r1)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(passed_test_names=['Test.One'])
)
)
yield (
api.test('findit_culprit_in_last_sub_range') +
props(
{'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6', suspected_revisions=['r3']) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(failed_test_names=['Test.One']))
)
yield (
api.test('findit_culprit_in_middle_sub_range') +
props(
{'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r3', 'r6']) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r5.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r6.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One'])) +
api.override_step_data(
'test r5.gl_tests (r5)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r6.gl_tests (r6)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(passed_test_names=['Test.One']))
)
yield (
api.test('findit_culprit_in_first_sub_range') +
props(
{'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r6'], test_on_good_revision=False) +
api.override_step_data(
'test r1.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r5.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r6.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r1.gl_tests (r1)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One'])) +
api.override_step_data(
'test r5.gl_tests (r5)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r6.gl_tests (r6)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(passed_test_names=['Test.One']))
)
yield (
api.test('findit_steps_multiple_culprits') +
props(
{'gl_tests': ['Test.gl_One'], 'browser_tests': ['Test.browser_One']},
'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r3','r6']) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
{
'test': 'browser_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
{
'test': 'browser_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r5.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
{
'test': 'browser_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r6.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
{
'test': 'browser_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r5.gl_tests (r5)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.gl_One'])) +
api.override_step_data(
'test r5.browser_tests (r5)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.browser_One'])) +
api.override_step_data(
'test r6.browser_tests (r6)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.browser_One']))+
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.gl_One'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.gl_One']))
)
yield (
api.test('findit_tests_multiple_culprits') +
props(
{'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']},
'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r3', 'r5']) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r4.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r5.read test spec (chromium.mac.json)', api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r6.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r4.gl_tests (r4)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One', 'Test.Three'],
failed_test_names=['Test.Two'])) +
api.override_step_data(
'test r5.gl_tests (r5)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'],
failed_test_names=['Test.Three'])) +
api.override_step_data(
'test r6.gl_tests (r6)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One']))+
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.Two'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(failed_test_names=['Test.Two']))
)
yield (
api.test('findit_consecutive_culprits') +
props(
{'gl_tests': ['Test.One']},
'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r3', 'r4']) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r4.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r4.gl_tests (r4)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One'])) +
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(passed_test_names=['Test.One']))
)
yield (
api.test('record_infra_failure') +
props({'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests') +
api.override_step_data(
'test r1.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r1.preprocess_for_goma.start_goma', retcode=1) +
api.step_data(
'test r1.preprocess_for_goma.goma_jsonstatus',
api.json.output(
data={
'notice': [
{
'infra_status': {
'ping_status_code': 408,
},
},
],
}))
)
yield (
api.test('use_build_parameter_for_tests') +
props({}, 'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r3', 'r4'],
buildbucket=json.dumps({'build': {'id': 'id1'}})) +
api.buildbucket.simulated_buildbucket_output({
'additional_build_parameters' : {
'tests': {
'gl_tests': ['Test.One']
}
}}) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r4.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r4.gl_tests (r4)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One'])) +
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(passed_test_names=['Test.One']))
)
yield (
api.test('use_build_parameter_for_tests_non_json_buildbucket') +
props({}, 'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r3', 'r4'],
buildbucket={'build': {'id': 'id1'}}) +
api.buildbucket.simulated_buildbucket_output({
'additional_build_parameters' : {
'tests': {
'gl_tests': ['Test.One']
}
}}) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r4.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r4.gl_tests (r4)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One'])) +
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(passed_test_names=['Test.One']))
)
yield (
api.test('use_analyze_set_to_False_for_non_linear_try_job') +
props(
{'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=True,
good_revision='r0', bad_revision='r6', suspected_revisions=['r3']) +
api.override_step_data(
'test r2.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r2.gl_tests (r2)',
api.swarming.canned_summary_output() +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One'])) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(failed_test_names=['Test.One']))
)
yield (
api.test('flaky_tests') +
props({'gl_tests': ['Test.One', 'Test.Two', 'Test.Three']},
'win', 'Win7 Tests (1)') +
api.override_step_data(
'test r0.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r0.gl_tests (r0)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One'],
passed_test_names=['Test.Two'])
) +
api.override_step_data(
'test r1.read test spec (chromium.win.json)',
api.json.output({
'Win7 Tests (1)': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r1.gl_tests (r1)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One', 'Test.Two'],
passed_test_names=['Test.Three']))
)
yield (
api.test('use_abbreviated_revision_in_step_name') +
props(
{'gl_tests': ['Test.One']}, 'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='1234567890abcdefg', bad_revision='gfedcba0987654321',
test_on_good_revision=False) +
api.override_step_data(
'test gfedcba.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output('gfedcba0987654321')) +
api.override_step_data(
'test gfedcba.gl_tests (gfedcba)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
failed_test_names=['Test.One']))
)
yield (
api.test('remove_culprits_for_flaky_failures') +
props(
{'gl_tests': ['Test.One', 'Test.Two']},
'mac', 'Mac10.9 Tests', use_analyze=False,
good_revision='r0', bad_revision='r6',
suspected_revisions=['r4']) +
api.override_step_data(
'test r3.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'test r4.read test spec (chromium.mac.json)',
api.json.output({
'Mac10.9 Tests': {
'gtest_tests': [
{
'test': 'gl_tests',
'swarming': {'can_use_on_swarming_builders': True},
},
],
},
})
) +
api.override_step_data(
'git commits in range',
api.raw_io.stream_output(
'\n'.join('r%d' % i for i in reversed(range(1, 7))))) +
api.override_step_data(
'test r3.gl_tests (r3)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
passed_test_names=['Test.One', 'Test.Two'])) +
api.override_step_data(
'test r4.gl_tests (r4)',
api.swarming.canned_summary_output(failure=True) +
api.test_utils.simulated_gtest_output(
flaky_test_names=['Test.One'],
failed_test_names=['Test.Two']))
)
| 36.023965
| 80
| 0.5139
|
fa8b3d62ddea2c79f0d1664ed17a6bfeca1658f7
| 4,370
|
py
|
Python
|
benchmark/startQiskit1907.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1907.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit1907.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=53
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.x(input_qubit[1]) # number=48
prog.h(input_qubit[1]) # number=26
prog.cz(input_qubit[4],input_qubit[1]) # number=27
prog.h(input_qubit[1]) # number=28
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[4],input_qubit[1]) # number=35
prog.z(input_qubit[4]) # number=46
prog.rx(0.8011061266653969,input_qubit[2]) # number=37
prog.h(input_qubit[1]) # number=36
prog.z(input_qubit[3]) # number=51
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.x(input_qubit[0]) # number=39
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.cx(input_qubit[0],input_qubit[1]) # number=42
prog.rx(-1.928937889304133,input_qubit[2]) # number=49
prog.x(input_qubit[1]) # number=43
prog.cx(input_qubit[0],input_qubit[1]) # number=44
prog.x(input_qubit[2]) # number=11
prog.y(input_qubit[1]) # number=45
prog.x(input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=41
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=22
prog.x(input_qubit[4]) # number=47
prog.x(input_qubit[0]) # number=23
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.x(input_qubit[4]) # number=52
prog.x(input_qubit[1]) # number=31
prog.cx(input_qubit[0],input_qubit[1]) # number=32
prog.x(input_qubit[2]) # number=15
prog.h(input_qubit[4]) # number=29
prog.x(input_qubit[3]) # number=16
prog.z(input_qubit[3]) # number=50
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit1907.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 32.132353
| 82
| 0.61373
|
da7b5f814fa5840ffa90a2c92ca78a68ddd4f053
| 5,927
|
py
|
Python
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/dumbo/phys/Phys_IEEE802154_GB868.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 69
|
2021-12-16T01:34:09.000Z
|
2022-03-31T08:27:39.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/dumbo/phys/Phys_IEEE802154_GB868.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 6
|
2022-01-12T18:22:08.000Z
|
2022-03-25T10:19:27.000Z
|
platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/dumbo/phys/Phys_IEEE802154_GB868.py
|
SiliconLabs/gecko_sdk
|
310814a9016b60a8012d50c62cc168a783ac102b
|
[
"Zlib"
] | 21
|
2021-12-20T09:05:45.000Z
|
2022-03-28T02:52:28.000Z
|
from pyradioconfig.calculator_model_framework.interfaces.iphy import IPhy
from pyradioconfig.parts.common.phys.phy_common import PHY_COMMON_FRAME_154
from py_2_and_3_compatibility import *
class PHYS_IEEE802154_GB868(IPhy):
def IEEE802154_GB868_154G_PHR(self, phy, model):
# Great Britain smart metering PHY from 802.15.4g
# refer to spec:
# \\silabs.com\mcuandwireless\026 Shared Docs\0260_Standards\std_body\ZigBee\docs-13-0373-12-0mwg-868-gb-smart-meter-han-technical-requirements.doc
# Override settings for 15.4g PHR and GB868 PN9 whitening
phy.profile_inputs.white_seed.value = 0x000000FF
phy.profile_inputs.white_output_bit.value = 8
phy.profile_inputs.white_poly.value = model.vars.white_poly.var_enum.PN9_BYTE
phy.profile_inputs.payload_white_en.value = True
# The whitening config above will calculate FRC->FECCTRL.BLOCKWHITEMODE
# = 2 (BYTEWHITE), but it seems we need it to be 1 (WHITE) to work as
# 15.4g has specifiesd it, so must override:
phy.profile_outputs.FRC_FECCTRL_BLOCKWHITEMODE.override = 1
phy.profile_inputs.header_size.value = 2
phy.profile_inputs.var_length_numbits.value = 11
phy.profile_inputs.var_length_byteendian.value = model.vars.var_length_byteendian.var_enum.MSB_FIRST
phy.profile_inputs.var_length_bitendian.value = model.vars.var_length_bitendian.var_enum.MSB_FIRST
phy.profile_inputs.var_length_shift.value = 0
#@TODO: Bump min/maxlength by -1 after MCUW_RADIO_CFG-325 is fixed:
phy.profile_inputs.var_length_minlength.value = 4 # 15.4e's 4
phy.profile_inputs.var_length_maxlength.value = 127 # NOT 15.4g's 2047
# GB868 does NOT support 15.4g 4-byte CRC, mode switching, or FEC
def IEEE802154_GB868_Base(self, phy, model):
# Great Britain smart metering PHY from 802.15.4g
# refer to spec:
# \\silabs.com\mcuandwireless\026 Shared Docs\0260_Standards\std_body\ZigBee\docs-13-0373-12-0mwg-868-gb-smart-meter-han-technical-requirements.doc
# Copied from phy_internal_base.py::GFSK_915M_base() at commit 110a85d7
phy.profile_inputs.base_frequency_hz.value = long(915350000)
phy.profile_inputs.baudrate_tol_ppm.value = 0
phy.profile_inputs.bitrate.value = 100000
phy.profile_inputs.channel_spacing_hz.value = 200000
phy.profile_inputs.deviation.value = 35000
phy.profile_inputs.diff_encoding_mode.value = model.vars.diff_encoding_mode.var_enum.DISABLED
phy.profile_inputs.dsss_chipping_code.value = long(0)
phy.profile_inputs.dsss_len.value = 0
phy.profile_inputs.dsss_spreading_factor.value = 0
phy.profile_inputs.fsk_symbol_map.value = model.vars.fsk_symbol_map.var_enum.MAP0
phy.profile_inputs.modulation_type.value = model.vars.modulation_type.var_enum.FSK2
phy.profile_inputs.preamble_pattern.value = 1 # 010101...
phy.profile_inputs.preamble_pattern_len.value = 2
phy.profile_inputs.preamble_length.value = 64
phy.profile_inputs.rx_xtal_error_ppm.value = 40
phy.profile_inputs.shaping_filter.value = model.vars.shaping_filter.var_enum.Gaussian
phy.profile_inputs.shaping_filter_param.value = 0.5
phy.profile_inputs.syncword_0.value = long(0x904E) # 15.4g SFD0 7209 non-FEC
phy.profile_inputs.syncword_1.value = long(0x0)
phy.profile_inputs.syncword_length.value = 16
phy.profile_inputs.tx_xtal_error_ppm.value = 0
phy.profile_inputs.xtal_frequency_hz.value = 38400000
phy.profile_inputs.symbols_in_timing_window.value = 14 # ???
phy.profile_inputs.agc_period.value = 0
phy.profile_inputs.agc_speed.value = model.vars.agc_speed.var_enum.FAST
# Copied from Phys_Datasheet.py::PHY_Datasheet_915M_2GFSK_100Kbps_50K()
# at commit 4bc304d1
phy.profile_inputs.timing_detection_threshold.value = 20
phy.profile_inputs.agc_power_target.value = -8
phy.profile_inputs.errors_in_timing_window.value = 1
phy.profile_inputs.timing_sample_threshold.value = 12
phy.profile_inputs.agc_settling_delay.value = 34
# Add 15.4 Packet Configuration
PHY_COMMON_FRAME_154(phy, model)
# Additional settings
phy.profile_inputs.rssi_period.value = 3 # 2^3 = 8 bits(symbols)
phy.profile_inputs.in_2fsk_opt_scope.value = False
# RFVALREQ-42
phy.profile_inputs.symbols_in_timing_window.value = 8
phy.profile_inputs.number_of_timing_windows.value = 3
phy.profile_outputs.rx_sync_delay_ns.override = 49000
phy.profile_outputs.rx_eof_delay_ns.override = 49000
def PHY_IEEE802154_GB868_863MHz_PHR2(self, model, phy_name=None):
# Great Britain smart metering PHY from 802.15.4g
# refer to spec:
# \\silabs.com\mcuandwireless\026 Shared Docs\0260_Standards\std_body\ZigBee\docs-13-0373-12-0mwg-868-gb-smart-meter-han-technical-requirements.doc
phy = self._makePhy(model, model.profiles.Base, readable_name='PHY_IEEE802154_GB868_863MHz_PHR2', phy_name=phy_name)
self.IEEE802154_GB868_Base(phy, model)
self.IEEE802154_GB868_154G_PHR(phy, model)
phy.profile_inputs.base_frequency_hz.value = long(863250000)
def PHY_IEEE802154_GB868_915MHz_PHR2(self, model, phy_name=None):
# Great Britain smart metering PHY from 802.15.4g
# refer to spec:
# \\silabs.com\mcuandwireless\026 Shared Docs\0260_Standards\std_body\ZigBee\docs-13-0373-12-0mwg-868-gb-smart-meter-han-technical-requirements.doc
phy = self._makePhy(model, model.profiles.Base, readable_name='PHY_IEEE802154_GB868_915MHz_PHR2', phy_name=phy_name)
self.IEEE802154_GB868_Base(phy, model)
self.IEEE802154_GB868_154G_PHR(phy, model)
| 56.990385
| 155
| 0.729037
|
bf656fe2fac3cc141792dca0c26695c579314dfa
| 3,574
|
py
|
Python
|
google-cloud-sdk/lib/surface/compute/os_login/remove_profile.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
google-cloud-sdk/lib/surface/compute/os_login/remove_profile.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 4
|
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
google-cloud-sdk/lib/surface/compute/os_login/remove_profile.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 1
|
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements the command for removing an OS Login profile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.oslogin import client
from googlecloudsdk.calliope import base
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA)
class RemoveProfile(base.Command):
"""Remove the posix account information for the current user."""
def Run(self, args):
oslogin_client = client.OsloginClient(self.ReleaseTrack())
account = (properties.VALUES.auth.impersonate_service_account.Get()
or properties.VALUES.core.account.GetOrFail())
project = properties.VALUES.core.project.Get(required=True)
project_ref = resources.REGISTRY.Parse(project, params={'user': account},
collection='oslogin.users.projects')
current_profile = oslogin_client.GetLoginProfile(account)
account_id = None
for account in current_profile.posixAccounts:
if account.accountId == project:
account_id = account.accountId
if account_id:
console_io.PromptContinue(
'Posix accounts associated with project ID [{0}] will be deleted.'
.format(project),
default=True,
cancel_on_no=True)
operating_system = getattr(args, 'operating_system', None)
res = oslogin_client.DeletePosixAccounts(project_ref, operating_system)
log.DeletedResource(account_id, details='posix account(s)')
return res
else:
log.warning('No profile found with accountId [{0}]'.format(project))
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class RemoveProfileAlpha(RemoveProfile):
"""Remove the posix account information for the current user."""
@staticmethod
def Args(parser):
os_arg = base.ChoiceArgument(
'--operating-system',
choices=('linux', 'windows'),
required=False,
default='linux',
help_str='Specifies the profile type to remove.')
os_arg.AddToParser(parser)
RemoveProfile.detailed_help = {
'brief': 'Remove the posix account information for the current user.',
'DESCRIPTION': """
*{command}* removes the posix account information for the current
user where the account ID field is set to the current project ID.
Posix account entries for G Suite users do not set the account ID
field and can only be modified by a domain administrator.
""",
'EXAMPLES': """
To remove all POSIX accounts associated with the current user and
project, run:
$ {command}
To remove all POSIX accounts associated with the current user in the
project named `example-project`, run:
$ {command} --project=example-project
"""
}
| 37.621053
| 79
| 0.715165
|
a20fd5fb67f71511bdc5f34aec6e9bede92a42a4
| 1,091
|
py
|
Python
|
src/genie/libs/parser/ironware/tests/ShowIPInterface/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 204
|
2018-06-27T00:55:27.000Z
|
2022-03-06T21:12:18.000Z
|
src/genie/libs/parser/ironware/tests/ShowIPInterface/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 468
|
2018-06-19T00:33:18.000Z
|
2022-03-31T23:23:35.000Z
|
src/genie/libs/parser/ironware/tests/ShowIPInterface/cli/equal/golden_output1_expected.py
|
balmasea/genieparser
|
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
|
[
"Apache-2.0"
] | 309
|
2019-01-16T20:21:07.000Z
|
2022-03-30T12:56:41.000Z
|
expected_output = {
'interfaces': {
'ethernet1/1': {
'ip': '10.254.32.221',
'ok': 'YES',
'method': 'NVRAM',
'status': 'admin/down',
'protocol': 'down',
'vrf': 'default-vrf'
},
'ethernet5/1': {
'ip': '10.254.32.3',
'ok': 'YES',
'method': 'NVRAM',
'status': 'up',
'protocol': 'up',
'vrf': 'default-vrf'
},
'ethernet7/1': {
'ip': '10.254.32.109',
'ok': 'YES',
'method': 'manual',
'status': 'up',
'protocol': 'up',
'vrf': 'default-vrf'
},
've150': {
'ip': '10.15.15.2',
'ok': 'YES',
'method': 'NVRAM',
'status': 'up',
'protocol': 'up',
'vrf': 'default-vrf',
'flag': 'VS'
},
'management1': {
'ip': '172.16.15.4',
'ok': 'YES',
'method': 'NVRAM',
'status': 'up',
'protocol': 'up',
'vrf': 'oob'
},
'loopback1': {
'ip': '10.69.9.9',
'ok': 'YES',
'method': 'NVRAM',
'status': 'up',
'protocol': 'up',
'vrf': 'default-vrf'
}
}
}
| 20.203704
| 29
| 0.395967
|
53a924086ad5a98167ba7b8f99bd735ce8a1c130
| 2,252
|
py
|
Python
|
social_blog/blog_posts/views.py
|
higorspinto/Social-Blog
|
bec89351bf76778059f112c0e2a66de9348dda54
|
[
"MIT"
] | null | null | null |
social_blog/blog_posts/views.py
|
higorspinto/Social-Blog
|
bec89351bf76778059f112c0e2a66de9348dda54
|
[
"MIT"
] | 4
|
2021-03-19T03:43:40.000Z
|
2022-01-13T01:39:30.000Z
|
social_blog/blog_posts/views.py
|
higorspinto/Social-Blog
|
bec89351bf76778059f112c0e2a66de9348dda54
|
[
"MIT"
] | null | null | null |
# blog_posts/views.py
from flask import render_template, redirect, url_for, flash, request, Blueprint
from flask_login import current_user, login_required
from social_blog import db
from social_blog.models import BlogPost
from social_blog.blog_posts.forms import BlogPostForm
blog_posts = Blueprint('blog_posts', __name__)
#Create
@blog_posts.route("/create", methods=["GET","POST"])
@login_required
def create_post():
form = BlogPostForm()
if form.validate_on_submit():
blog_post = BlogPost(title=form.title.data,
text=form.text.data,
user_id=current_user.id)
db.session.add(blog_post)
db.session.commit()
flash('Blog Post created!')
return redirect(url_for("core.index"))
return render_template("create_post.html", form=form)
#Blog Post (View)
@blog_posts.route("/<int:blog_post_id>")
def blog_post(blog_post_id):
blog_post = BlogPost.query.get_or_404(blog_post_id)
return render_template("blog_post.html", title=blog_post.title,
date=blog_post.date, post=blog_post)
#Update
@blog_posts.route("/<int:blog_post_id>/update", methods=["GET","POST"])
@login_required
def update_post(blog_post_id):
blog_post = BlogPost.query.get_or_404(blog_post_id)
if blog_post.author != current_user:
abort(403)
form = BlogPostForm()
if form.validate_on_submit():
blog_post.title = form.title.data
blog_post.text = form.text.data
db.session.commit()
flash('Blog Post updated!')
return redirect(url_for("blog_posts.blog_post", blog_post_id=blog_post.id))
elif request.method == "GET":
form.title.data = blog_post.title
form.text.data = blog_post.text
return render_template("create_post.html",title="Updating",form=form)
#Delete
@blog_posts.route("/<int:blog_post_id>/delete", methods=["GET","POST"])
@login_required
def delete_post(blog_post_id):
blog_post = BlogPost.query.get_or_404(blog_post_id)
if blog_post.author != current_user:
abort(403)
db.session.delete(blog_post)
db.session.commit()
flash("Blog Post deleted.")
return redirect(url_for("core.index"))
| 26.186047
| 83
| 0.683393
|
68ecea8f5958f9cac6b97c57a9ab69b733bf1a14
| 7,902
|
py
|
Python
|
src/datasets/off_road_testsets.py
|
Brazilian-Institute-of-Robotics/autonomous_perception
|
5645a2bc6811b33e9e6bf0f6873f496dff45ad94
|
[
"MIT"
] | null | null | null |
src/datasets/off_road_testsets.py
|
Brazilian-Institute-of-Robotics/autonomous_perception
|
5645a2bc6811b33e9e6bf0f6873f496dff45ad94
|
[
"MIT"
] | null | null | null |
src/datasets/off_road_testsets.py
|
Brazilian-Institute-of-Robotics/autonomous_perception
|
5645a2bc6811b33e9e6bf0f6873f496dff45ad94
|
[
"MIT"
] | 1
|
2020-12-23T23:27:30.000Z
|
2020-12-23T23:27:30.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 6 15:26:10 2019
@author: nelson
"""
import tensorflow_datasets.public_api as tfds
import os
import tensorflow as tf
import random
classes = [
{'name': 'ignore' , 'color': [ 0, 0, 0]},
{'name': 'road' , 'color': [128, 64,128]},
{'name': 'car' , 'color': [ 0, 0,142]},
{'name': 'person' , 'color': [220, 20, 60]},
{'name': 'truck' , 'color': [ 0, 0, 70]},
{'name': 'bus' , 'color': [ 0, 60,100]},
{'name': 'cone' , 'color': [153,153,153]},
{'name': 'motorcycle' , 'color': [ 0, 0,230]},
{'name': 'animal' , 'color': [190,153,153]},
{'name': 'bicycle' , 'color': [119, 11, 32]},
# {'name': 'dog' , 'color': [ 70,130,180]},
# {'name': 'traffic light', 'color': [250,170, 30]},
# {'name': 'traffic sign' , 'color': [220,220, 0]},
]
# TODO
# tfds.core.DatasetInfo(
# name='off_road_small',
# version=1.0.0,
# description='This is the dataset for xxx. It contains yyy. The images are kept at their original dimensions.',
# homepage='https://dataset-homepage.org',
# features=FeaturesDict({
# 'image': Image(shape=(1208, 1920, 3), dtype=tf.uint8),
# 'label': Image(shape=(1208, 1920, 1), dtype=tf.uint8),
# }),
# total_num_examples=5523,
# splits={
# 'test': 1048,
# 'train': 4027,
# 'validation': 448,
# },
# supervised_keys=('image', 'label'),
# citation="""@article{my-awesome-dataset-2020,
# author = {Nelson Alves, ...},"}""",
# redistribution_info=,
# )
class OffRoadTestsets(tfds.core.GeneratorBasedBuilder):
"""Short description of my dataset."""
VERSION = tfds.core.Version("1.0.0")
def _info(self):
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=("This is the dataset for xxx. It contains yyy. The "
"images are kept at their original dimensions."),
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({
"image": tfds.features.Image(shape=(1208, 1920, 3)),
# Here, labels can be of 5 distinct values.
"label": tfds.features.Image(shape=(1208, 1920, 1)),
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=("image", "label"),
# Homepage of the dataset for documentation
homepage="https://dataset-homepage.org",
# Bibtex citation for the dataset
citation=r"""@article{my-awesome-dataset-2020,
author = {Nelson Alves, ...},"}""",
)
def _split_generators(self, dl_manager):
# Download source data
extracted_path = "/home/nelson/projects/da_art_perception/data/dataset"
label_suffix = '*label_raw.png'
image_suffix = '.jpg'
paths = ['/off-road/evening/cimatec-industrial/small.txt',
'/off-road/rain/cimatec-industrial/small.txt',
'/unpaved/rain/jaua/small.txt',
'/unpaved/rain/praia-do-forte/small.txt',
'/unpaved/rain/estrada-dos-tropeiros/small.txt',
'/unpaved/day/jaua/small.txt',
'/unpaved/day/praia-do-forte/small.txt',
'/unpaved/day/estrada-dos-tropeiros/small.txt']
offroad_paths = ['/night_offroad_clean-test_subset.txt',
'/day_offroad_clean-test_subset.txt',
'/night_offroad_dusty-test_subset.txt',
'/day_offroad_dusty-test_subset.txt']
img_list = {"evening":[], "rain":[], "day":[], "day_offroad_clean":[],
"day_offroad_dusty":[], "night_offroad_clean":[],
"night_offroad_dusty":[]}
lbl_list = {"evening":[],"rain":[],"day":[], "day_offroad_clean":[],
"day_offroad_dusty":[], "night_offroad_clean":[],
"night_offroad_dusty":[]}
for path in paths:
print(path)
lineList = open(extracted_path + path).readlines()
for name in lineList:
search_name_path = path[:path.rfind('/')]+'/'+name.replace('\n', '')
full_name_path = tf.io.gfile.glob(extracted_path + search_name_path + label_suffix)[0]
if 'test' in full_name_path:
img_list[path.split("/")[2]].append(extracted_path + search_name_path + image_suffix)
lbl_list[path.split("/")[2]].append(full_name_path)
for path in offroad_paths:
print(path)
lineList = open(extracted_path + path).readlines()
for name in lineList:
search_name_path = path[:path.rfind('/')]+'/'+name.replace('.jpg\n', '')
full_name_path = tf.io.gfile.glob(extracted_path + search_name_path + label_suffix)[0]
if 'test' in full_name_path:
img_list[path.replace('/', '').split("-")[0]].append(extracted_path + search_name_path + image_suffix)
lbl_list[path.replace('/', '').split("-")[0]].append(full_name_path)
for imgs, lbls in zip(img_list, lbl_list):
random.Random(0).shuffle(img_list[imgs])
random.Random(0).shuffle(lbl_list[lbls])
# Specify the splits
return [
tfds.core.SplitGenerator(
name="evening",
gen_kwargs={
"img_list": img_list["evening"],
"lbl_list": lbl_list["evening"],
},
),
tfds.core.SplitGenerator(
name="rain",
gen_kwargs={
"img_list": img_list["rain"],
"lbl_list": lbl_list["rain"],
},
),
tfds.core.SplitGenerator(
name="day",
gen_kwargs={
"img_list": img_list["day"],
"lbl_list": lbl_list["day"],
},
),
tfds.core.SplitGenerator(
name="day_offroad_clean",
gen_kwargs={
"img_list": img_list["day_offroad_clean"],
"lbl_list": lbl_list["day_offroad_clean"],
},
),
tfds.core.SplitGenerator(
name="day_offroad_dusty",
gen_kwargs={
"img_list": img_list["day_offroad_dusty"],
"lbl_list": lbl_list["day_offroad_dusty"],
},
),
tfds.core.SplitGenerator(
name="night_offroad_clean",
gen_kwargs={
"img_list": img_list["night_offroad_clean"],
"lbl_list": lbl_list["night_offroad_clean"],
},
),
tfds.core.SplitGenerator(
name="night_offroad_dusty",
gen_kwargs={
"img_list": img_list["night_offroad_dusty"],
"lbl_list": lbl_list["night_offroad_dusty"],
},
),
]
def _generate_examples(self, img_list, lbl_list):
# Read the input data out of the source files
key=0
for image_path, label_path in zip(img_list, lbl_list):
yield key, {
"image": image_path,
"label": label_path,
}
key+=1
| 39.118812
| 122
| 0.506328
|
e9b4ecf55f6abdc4c1c21a6deab17773fa6cfac2
| 24,352
|
py
|
Python
|
ssd_liverdet/pixel_link/model.py
|
L0SG/Liver_segmentation
|
178b2367cf606ba7d704e96f855389be4c1abd14
|
[
"MIT"
] | null | null | null |
ssd_liverdet/pixel_link/model.py
|
L0SG/Liver_segmentation
|
178b2367cf606ba7d704e96f855389be4c1abd14
|
[
"MIT"
] | null | null | null |
ssd_liverdet/pixel_link/model.py
|
L0SG/Liver_segmentation
|
178b2367cf606ba7d704e96f855389be4c1abd14
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import pixel_link.pixel_link_config as config
import numpy as np
from pixel_link.pixel_link_decode import *
from layers.dcn_v2_custom import DCN
from layers import self_attn
import os
import torch
from torch.utils.checkpoint import checkpoint
def xavier(param):
nn.init.xavier_uniform_(param)
def weights_init(m):
if isinstance(m, nn.Conv2d):
xavier(m.weight.data)
m.bias.data.zero_()
class PixelLink(nn.Module):
def __init__(self, cascade_fuse, use_fuseconv, batch_norm, use_self_attention, use_self_attention_base, num_dcn_layers, groups_dcn, dcn_cat_sab, detach_sab,
max_pool_factor=1):
super(PixelLink, self).__init__()
self.vgg_groups = config.vgg_groups
self.scale = config.feature_scale
self.cascade_fuse = cascade_fuse
self.use_self_attention = use_self_attention
self.use_self_attention_base = use_self_attention_base
self.num_dcn_layers = num_dcn_layers
self.use_fuseconv = use_fuseconv
self.batch_norm = batch_norm
# TODO: modify padding
self.conv1_1 = nn.Conv2d(12, int(64*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu1_1 = nn.ReLU()
self.conv1_2 = nn.Conv2d(int(64*self.scale), int(64*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu1_2 = nn.ReLU()
self.pool1 = nn.MaxPool2d(2, ceil_mode=True)
self.conv2_1 = nn.Conv2d(int(64*self.scale), int(128*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu2_1 = nn.ReLU()
self.conv2_2 = nn.Conv2d(int(128*self.scale), int(128*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu2_2 = nn.ReLU()
self.pool2 = nn.MaxPool2d(2, ceil_mode=True)
self.conv3_1 = nn.Conv2d(int(128*self.scale), int(256*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu3_1 = nn.ReLU()
self.conv3_2 = nn.Conv2d(int(256*self.scale), int(256*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu3_2 = nn.ReLU()
self.conv3_3 = nn.Conv2d(int(256*self.scale), int(256*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu3_3 = nn.ReLU()
self.pool3 = nn.MaxPool2d(2, ceil_mode=True)
self.conv4_1 = nn.Conv2d(int(256*self.scale), int(512*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu4_1 = nn.ReLU()
self.conv4_2 = nn.Conv2d(int(512*self.scale), int(512*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu4_2 = nn.ReLU()
self.conv4_3 = nn.Conv2d(int(512*self.scale), int(512*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu4_3 = nn.ReLU()
self.pool4 = nn.MaxPool2d(2, ceil_mode=True)
self.conv5_1 = nn.Conv2d(int(512*self.scale), int(512*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu5_1 = nn.ReLU()
self.conv5_2 = nn.Conv2d(int(512*self.scale), int(512*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu5_2 = nn.ReLU()
self.conv5_3 = nn.Conv2d(int(512*self.scale), int(512*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu5_3 = nn.ReLU()
self.pool5 = nn.MaxPool2d(kernel_size=[3, 3], stride=1, padding=1, ceil_mode=True)
if config.dilation:
self.conv6 = nn.Conv2d(int(512*self.scale), int(1024*self.scale), 3, stride=1, padding=6, dilation=6, groups=self.vgg_groups)
else:
self.conv6 = nn.Conv2d(int(512*self.scale), int(1024*self.scale), 3, stride=1, padding=1, groups=self.vgg_groups)
self.relu6 = nn.ReLU()
self.conv7 = nn.Conv2d(int(1024*self.scale), int(1024*self.scale), 1, stride=1, padding=0, groups=self.vgg_groups)
self.relu7 = nn.ReLU()
self.modules_except_dcn = nn.ModuleList([self.conv1_1, self.relu1_1, self.conv1_2, self.relu1_2, self.pool1,
self.conv2_1, self.relu2_1, self.conv2_2, self.relu2_2, self.pool2,
self.conv3_1, self.relu3_1, self.conv3_2, self.relu3_2, self.conv3_3, self.relu3_3, self.pool3,
self.conv4_1, self.relu4_1, self.conv4_2, self.relu4_2, self.conv4_3, self.relu4_3, self.pool4,
self.conv5_1, self.relu5_1, self.conv5_2, self.relu5_2, self.conv5_3, self.relu5_3, self.pool5,
self.conv6, self.relu6, self.conv7, self.relu7])
if config.version == "2s":
self.out1_1 = nn.Conv2d(int(128*self.scale), 2, 1, stride=1, padding=0) #conv2_2
self.out1_2 = nn.Conv2d(int(128*self.scale), 16, 1, stride=1, padding=0)
self.modules_except_dcn.extend([self.out1_1, self.out1_2])
self.out2_1 = nn.Conv2d(int(256*self.scale), 2, 1, stride=1, padding=0) #conv3_3
self.out2_2 = nn.Conv2d(int(256*self.scale), 16, 1, stride=1, padding=0)
self.out3_1 = nn.Conv2d(int(512*self.scale), 2, 1, stride=1, padding=0) #conv4_3
self.out3_2 = nn.Conv2d(int(512*self.scale), 16, 1, stride=1, padding=0)
self.out4_1 = nn.Conv2d(int(512*self.scale), 2, 1, stride=1, padding=0) #conv5_3
self.out4_2 = nn.Conv2d(int(512*self.scale), 16, 1, stride=1, padding=0)
self.out5_1 = nn.Conv2d(int(1024*self.scale), 2, 1, stride=1, padding=0) #fc7
self.out5_2 = nn.Conv2d(int(1024*self.scale), 16, 1, stride=1, padding=0)
self.modules_except_dcn.extend([self.out2_1, self.out2_2, self.out3_1, self.out3_2, self.out4_1, self.out4_2, self.out5_1, self.out5_2])
if self.use_fuseconv:
if config.version == "2s":
self.fuse1 = nn.Conv2d(int(128*self.scale), int(128*self.scale), kernel_size=1)
self.modules_except_dcn.append(self.fuse1)
self.fuse2 = nn.Conv2d(int(256*self.scale), int(256*self.scale), kernel_size=1)
self.fuse3 = nn.Conv2d(int(512*self.scale), int(512*self.scale), kernel_size=1)
self.fuse4 = nn.Conv2d(int(512*self.scale), int(512*self.scale), kernel_size=1)
self.fuse5 = nn.Conv2d(int(1024*self.scale), int(1024*self.scale), kernel_size=1)
self.modules_except_dcn.extend([self.fuse2, self.fuse3, self.fuse4, self.fuse5])
if batch_norm:
if config.version == "2s":
self.bn_fuse1 = nn.BatchNorm2d(int(128*self.scale))
self.modules_except_dcn.append(self.bn_fuse1)
self.bn_fuse2 = nn.BatchNorm2d(int(256*self.scale))
self.bn_fuse3 = nn.BatchNorm2d(int(512*self.scale))
self.bn_fuse4 = nn.BatchNorm2d(int(512*self.scale))
self.bn_fuse5 = nn.BatchNorm2d(int(1024*self.scale))
self.modules_except_dcn.extend([self.bn_fuse2, self.bn_fuse3, self.bn_fuse4, self.bn_fuse5])
if self.cascade_fuse:
if config.version == "2s":
self.final_1 = nn.Conv2d(2 * 5, 2, 1, stride=1, padding=0)
self.final_2 = nn.Conv2d(16 * 5, 16, 1, stride=1, padding=0)
else:
self.final_1 = nn.Conv2d(2 * 4, 2, 1, stride=1, padding=0)
self.final_2 = nn.Conv2d(16 * 4, 16, 1, stride=1, padding=0)
else:
self.final_1 = nn.Conv2d(2, 2, 1, stride=1, padding=0)
self.final_2 = nn.Conv2d(16, 16, 1, stride=1, padding=0)
self.modules_except_dcn.extend([self.final_1, self.final_2])
# new: try adjusting maxpool factor
self.max_pool_factor = max_pool_factor
if self.use_self_attention_base:
self.self_attn_base_list = nn.ModuleList([])
self.self_attn_base_in_channel_list = [256, 512, 512, 1024]
if config.version == "2s":
self.self_attn_base_in_channel_list = [128] + self.self_attn_base_in_channel_list
self.self_attn_base_in_channel_list = [int(i * self.scale) for i in self.self_attn_base_in_channel_list]
for i in range(len(self.self_attn_base_in_channel_list)):
self.self_attn_base_list.append(self_attn.Self_Attn(in_channels=self.self_attn_base_in_channel_list[i],
max_pool_factor=max_pool_factor))
if self.use_self_attention:
self.self_attn_list = nn.ModuleList([])
self.self_attn_in_channel_list = [256, 512, 512, 1024]
if config.version == "2s":
self.self_attn_in_channel_list = [128] + self.self_attn_in_channel_list
self.self_attn_in_channel_list = [int(i * self.scale) for i in self.self_attn_in_channel_list]
for i in range(len(self.self_attn_in_channel_list)):
self.self_attn_list.append(self_attn.Self_Attn(in_channels=self.self_attn_in_channel_list[i],
max_pool_factor=max_pool_factor))
if self.num_dcn_layers > 0:
self.use_dcn = True
self.groups_dcn = groups_dcn
self.dcn_list = nn.ModuleList([])
# we try to match the offset of each phase after vgg conv7 stage
self.dcn_in_channel_list = [256]
self.dcn_in_channel_list = [int(i * self.scale) for i in self.dcn_in_channel_list]
self.dcn_cat_sab = dcn_cat_sab # concat sab (self_attention_base) and the original x before dcn input
self.detach_sab = detach_sab
if self.detach_sab:
assert self.dcn_cat_sab is True, "deatch_sab requires --dcn_cat_sab=True"
for i in range(len(self.dcn_in_channel_list)):
if self.dcn_cat_sab:
assert self.use_self_attention_base is True, "dcn_cat_sab requires use_self_attention_base=True"
self.dcn_list.append(
DCN(in_channels=self.dcn_in_channel_list[i] * 2, out_channels=self.dcn_in_channel_list[i],
kernel_size=3, stride=1, padding=1, deformable_groups=self.groups_dcn))
else:
self.dcn_list.append(
DCN(in_channels=self.dcn_in_channel_list[i], out_channels=self.dcn_in_channel_list[i],
kernel_size=3, stride=1, padding=1, deformable_groups=self.groups_dcn))
for j in range(self.num_dcn_layers-1):
self.dcn_list.append(DCN(in_channels=self.dcn_in_channel_list[i], out_channels=self.dcn_in_channel_list[i],
kernel_size=3, stride=1, padding=1, deformable_groups=self.groups_dcn))
else:
self.use_dcn = False
self.dcn_cat_sab = False
self.detach_sab = False
for m in self.modules():
weights_init(m)
def slice_and_cat(self, a, b):
# slice each tensor by 4 (4-phase), concat a & b for each phase, then merge together
# why?: we want to keep the "grouped" context of base convnet feature before feeding to next grouped conv
a = torch.split(a, int(a.size(1)/self.vgg_groups), dim=1)
b = torch.split(b, int(b.size(1)/self.vgg_groups), dim=1)
ab = [torch.cat([a[i], b[i]], dim=1) for i in range(len(a))]
ab = torch.cat(ab, dim=1)
return ab
def forward(self, x):
if self.use_self_attention_base:
sa_base_counter = 0
if self.use_self_attention:
sa_counter = 0
# print("forward1")
x = self.pool1(self.relu1_2(self.conv1_2(self.relu1_1(self.conv1_1(x)))))
# print("forward11")
x = self.relu2_2(self.conv2_2(self.relu2_1(self.conv2_1(x))))
# print("forward12")
if config.version == "2s":
if self.use_self_attention_base:
# x, attn_g, attnb = self.self_attn_base_list[sa_base_counter](x)
x, attn_g = checkpoint(self.self_attn_base_list[sa_base_counter], x)
sa_base_counter += 1
if self.dcn_cat_sab:
if self.detach_sab:
x = self.slice_and_cat(x, attn_g.detach())
else:
x = self.slice_and_cat(x, attn_g)
if self.use_dcn:
for i_dcn in range(self.num_dcn_layers):
x, offset = self.dcn_list[i_dcn](x)
s1 = x
if self.use_self_attention:
# s1, attn_g, attn = self.self_attn_list[sa_counter](s1)
s1, attn_g, = checkpoint(self.self_attn_list[sa_counter], s1)
sa_counter += 1
if self.use_fuseconv:
s1 = self.fuse1(s1)
if self.batch_norm:
s1 = self.bn_fuse1(s1)
l1_1x = self.out1_1(s1) #conv2_2
# print("forward13")
l1_2x = self.out1_2(s1) #conv2_2
# print("forward14")
x = self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(self.relu3_1(self.conv3_1(self.pool2(x)))))))
# print("forward15")
if self.use_self_attention_base:
# x, attn_g, attnb = self.self_attn_base_list[sa_base_counter](x)
x, attn_g = checkpoint(self.self_attn_base_list[sa_base_counter], x)
sa_base_counter += 1
if config.version != "2s" and self.use_dcn:
if self.dcn_cat_sab:
if self.detach_sab:
x = self.slice_and_cat(x, attn_g.detach())
else:
x = self.slice_and_cat(x, attn_g)
for i_dcn in range(self.num_dcn_layers):
x, offset = self.dcn_list[i_dcn](x)
s2 = x
if self.use_self_attention:
# s2, attn_g, attn = self.self_attn_list[sa_counter](s2)
s2, attn_g = checkpoint(self.self_attn_list[sa_counter], s2)
sa_counter += 1
if self.use_fuseconv:
s2 = self.fuse2(s2)
if self.batch_norm:
s2 = self.bn_fuse2(s2)
l2_1x = self.out2_1(s2) #conv3_3
# print("forward16")
l2_2x = self.out2_2(s2) #conv3_3
# print("forward17")
x = self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(self.relu4_1(self.conv4_1(self.pool3(x)))))))
if self.use_self_attention_base:
# x, attn_g, attnb = self.self_attn_base_list[sa_base_counter](x)
x, attn_g = checkpoint(self.self_attn_base_list[sa_base_counter], x)
sa_base_counter += 1
s3 = x
if self.use_self_attention:
# s3, attn_g, attn = self.self_attn_list[sa_counter](s3)
s3, attn_g = checkpoint(self.self_attn_list[sa_counter], s3)
sa_counter += 1
if self.use_fuseconv:
s3 = self.fuse3(s3)
if self.batch_norm:
s3 = self.bn_fuse3(s3)
l3_1x = self.out3_1(s3) #conv4_3
l3_2x = self.out3_2(s3) #conv4_3
x = self.relu5_3(self.conv5_3(self.relu5_2(self.conv5_2(self.relu5_1(self.conv5_1(self.pool4(x)))))))
if self.use_self_attention_base:
# x, attn_g, attnb = self.self_attn_base_list[sa_base_counter](x)
x, attn_g = checkpoint(self.self_attn_base_list[sa_base_counter], x)
sa_base_counter += 1
s4 = x
if self.use_self_attention:
# s4, attn_g, attn = self.self_attn_list[sa_counter](s4)
s4, attn_g = checkpoint(self.self_attn_list[sa_counter], s4)
sa_counter += 1
if self.use_fuseconv:
s4 = self.fuse4(s4)
if self.batch_norm:
s4 = self.bn_fuse4(s4)
l4_1x = self.out4_1(s4) #conv5_3
l4_2x = self.out4_2(s4) #conv5_3
x = self.relu7(self.conv7(self.relu6(self.conv6(self.pool5(x)))))
if self.use_self_attention_base:
# x, attn_g, attnb = self.self_attn_base_list[sa_base_counter](x)
x, attn_g = checkpoint(self.self_attn_base_list[sa_base_counter], x)
sa_base_counter += 1
s5 = x
if self.use_self_attention:
# s5, attn_g, attn = self.self_attn_list[sa_counter](s5)
s5, attn_g = checkpoint(self.self_attn_list[sa_counter], s5)
sa_counter += 1
if self.use_fuseconv:
s5 = self.fuse5(s5)
if self.batch_norm:
s5 = self.bn_fuse5(s5)
l5_1x = self.out5_1(s5) #fc7
l5_2x = self.out5_2(s5) #fc7
# print("forward3")
if self.cascade_fuse:
upsample1_1 = nn.functional.interpolate(l5_1x + l4_1x, size=l3_1x.size()[2:], mode="bilinear",
align_corners=True)
upsample2_1 = nn.functional.interpolate(upsample1_1 + l3_1x, size=l2_1x.size()[2:], mode="bilinear",
align_corners=True)
if config.version == "2s":
upsample3_1 = nn.functional.interpolate(upsample2_1 + l2_1x, size=l1_1x.size()[2:], mode="bilinear",
align_corners=True)
logit_1 = upsample3_1 + l1_1x
features = [nn.functional.interpolate(l5_1x, size=logit_1.size()[2:], mode="bilinear", align_corners=True),
nn.functional.interpolate(l5_1x + l4_1x, size=logit_1.size()[2:], mode="bilinear", align_corners=True),
nn.functional.interpolate(upsample1_1 + l3_1x, size=logit_1.size()[2:], mode="bilinear", align_corners=True),
nn.functional.interpolate(upsample2_1 + l2_1x, size=logit_1.size()[2:], mode="bilinear", align_corners=True),
logit_1]
out_1 = self.final_1(torch.cat(features, dim=1))
else:
logit_1 = upsample2_1 + l2_1x
features_1 = [nn.functional.interpolate(l5_1x, size=logit_1.size()[2:], mode="bilinear", align_corners=True),
nn.functional.interpolate(l5_1x + l4_1x, size=logit_1.size()[2:], mode="bilinear", align_corners=True),
nn.functional.interpolate(upsample1_1 + l3_1x, size=logit_1.size()[2:], mode="bilinear", align_corners=True),
logit_1]
out_1 = self.final_1(torch.cat(features_1, dim=1))
upsample1_2 = nn.functional.interpolate(l5_2x + l4_2x, size=l3_2x.size()[2:], mode="bilinear",
align_corners=True)
upsample2_2 = nn.functional.interpolate(upsample1_2 + l3_2x, size=l2_2x.size()[2:], mode="bilinear",
align_corners=True)
if config.version == "2s":
upsample3_2 = nn.functional.interpolate(upsample2_2 + l2_2x, size=l1_1x.size()[2:], mode="bilinear",
align_corners=True)
logit_2 = upsample3_2 + l1_2x
features_2 = [
nn.functional.interpolate(l5_2x, size=logit_2.size()[2:], mode="bilinear", align_corners=True),
nn.functional.interpolate(l5_2x + l4_2x, size=logit_2.size()[2:], mode="bilinear",
align_corners=True),
nn.functional.interpolate(upsample1_2 + l3_2x, size=logit_2.size()[2:], mode="bilinear",
align_corners=True),
nn.functional.interpolate(upsample2_2 + l2_2x, size=logit_2.size()[2:], mode="bilinear",
align_corners=True),
logit_2]
out_2 = self.final_2(torch.cat(features_2, dim=1))
else:
logit_2 = upsample2_2 + l2_2x
features_2 = [
nn.functional.interpolate(l5_2x, size=logit_2.size()[2:], mode="bilinear", align_corners=True),
nn.functional.interpolate(l5_2x + l4_2x, size=logit_2.size()[2:], mode="bilinear",
align_corners=True),
nn.functional.interpolate(upsample1_2 + l3_2x, size=logit_2.size()[2:], mode="bilinear",
align_corners=True),
logit_2]
out_2 = self.final_2(torch.cat(features_2, dim=1))
else:
# upsample1_1 = nn.functional.upsample(l5_1x + l4_1x, scale_factor=2, mode="bilinear", align_corners=True)
upsample1_1 = nn.functional.interpolate(l5_1x + l4_1x, size=l3_1x.size()[2:], mode="bilinear", align_corners=True)
#upsample2_1 = nn.functional.upsample(upsample1_1 + l3_1x, scale_factor=2, mode="bilinear", align_corners=True)
upsample2_1 = nn.functional.interpolate(upsample1_1 + l3_1x, size=l2_1x.size()[2:], mode="bilinear", align_corners=True)
if config.version == "2s":
# upsample3_1 = nn.functional.upsample(upsample2_1 + l2_1x, scale_factor=2, mode="bilinear", align_corners=True)
upsample3_1 = nn.functional.interpolate(upsample2_1 + l2_1x, size=l1_1x.size()[2:], mode="bilinear", align_corners=True)
out_1 = upsample3_1 + l1_1x
else:
out_1 = upsample2_1 + l2_1x
out_1 = self.final_1(out_1)
# print("forward4")
# upsample1_2 = nn.functional.upsample(l5_2x + l4_2x, scale_factor=2, mode="bilinear", align_corners=True)
upsample1_2 = nn.functional.interpolate(l5_2x + l4_2x, size=l3_2x.size()[2:], mode="bilinear", align_corners=True)
# upsample2_2 = nn.functional.upsample(upsample1_2 + l3_2x, scale_factor=2, mode="bilinear", align_corners=True)
upsample2_2 = nn.functional.interpolate(upsample1_2 + l3_2x, size=l2_2x.size()[2:], mode="bilinear", align_corners=True)
if config.version == "2s":
# upsample3_2 = nn.functional.upsample(upsample2_2 + l2_2x, scale_factor=2, mode="bilinear", align_corners=True)
upsample3_2 = nn.functional.interpolate(upsample2_2 + l2_2x, size=l1_1x.size()[2:], mode="bilinear", align_corners=True)
out_2 = upsample3_2 + l1_2x
else:
out_2 = upsample2_2 + l2_2x
out_2 = self.final_2(out_2)
# print("forward5")
return [out_1, out_2] # [ [B, 2, H, W], [B, 16, H, W] ]
def num_flat_features(self, x):
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
weight_pretrained = torch.load(base_file, map_location=lambda storage, loc: storage)
# https://discuss.pytorch.org/t/how-to-load-part-of-pre-trained-model/1113/32
from collections import OrderedDict
pretrained_dict = OrderedDict()
model_dict = self.state_dict()
for k, v in weight_pretrained.items():
if k.startswith("module."): # DataParallel case
name = k[7:] # remove `module.`
else:
name = k
if name in model_dict.keys():
if v.shape != model_dict[name].shape:
print(
"WARNING: shape of pretrained {} {} does not match the current model {}. this weight will be ignored.".format(
name, v.shape, model_dict[name].shape))
pretrained_dict[name] = v
filtered_dict = {k: v for k, v in pretrained_dict.items() if
(k in model_dict) and (model_dict[k].shape == pretrained_dict[k].shape)}
# overwite model_dict entries in the existing filtered_dict and update it
model_dict.update(filtered_dict)
self.load_state_dict(model_dict)
else:
print('Sorry only .pth and .pkl files supported.')
| 57.56974
| 160
| 0.589192
|
0ff9a9bae36874fc3a41a605b6109b76794154d0
| 991
|
py
|
Python
|
headerstest.py
|
mzeinstra/openAnalyser
|
859156117948eb15283c348e6f6025cae9352279
|
[
"MIT"
] | 1
|
2021-06-28T09:39:43.000Z
|
2021-06-28T09:39:43.000Z
|
headerstest.py
|
mzeinstra/openAnalyser
|
859156117948eb15283c348e6f6025cae9352279
|
[
"MIT"
] | null | null | null |
headerstest.py
|
mzeinstra/openAnalyser
|
859156117948eb15283c348e6f6025cae9352279
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import urllib3
from socket import timeout
import tldextract
import re
import traceback
import sys
import logging
import socket
import threading
from time import sleep
from collector import Collector
from checker import Checker
http = urllib3.PoolManager()
page = http.request('GET', "http://opennederland.nl", timeout=2)
print(page)
print("-------------------------------------------------------")
print(page.headers)
print("-------------------------------------------------------")
print(page.headers.keys())
print("-------------------------------------------------------")
print(page.headers.items())
print("-------------------------------------------------------")
t = page.headers.items()
print("-------------------------------------------------------")
d = dict((x, y) for x, y in t)
print (d)
print("-------------------------------------------------------")
if "X-Powered-By" in d:
print ("print " + d['X-Powered-By'])
| 30.030303
| 64
| 0.491423
|
13db41a3aa7fcbe8ff30af5fa8fc986a9d0efbaf
| 7,162
|
py
|
Python
|
python/kfserving/kfserving/models/v1alpha2_kf_service_list.py
|
ariefrahmansyah/kfserving
|
733e415a3715e5bf662ef9fd791fc2708d145a37
|
[
"Apache-2.0"
] | null | null | null |
python/kfserving/kfserving/models/v1alpha2_kf_service_list.py
|
ariefrahmansyah/kfserving
|
733e415a3715e5bf662ef9fd791fc2708d145a37
|
[
"Apache-2.0"
] | null | null | null |
python/kfserving/kfserving/models/v1alpha2_kf_service_list.py
|
ariefrahmansyah/kfserving
|
733e415a3715e5bf662ef9fd791fc2708d145a37
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
KFServing
Python SDK for KFServing # noqa: E501
OpenAPI spec version: v0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from kfserving.models.v1alpha2_kf_service import V1alpha2KFService # noqa: F401,E501
from kubernetes.client import V1ListMeta # noqa: F401,E501
class V1alpha2KFServiceList(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'items': 'list[V1alpha2KFService]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None): # noqa: E501
"""V1alpha2KFServiceList - a model defined in Swagger""" # noqa: E501
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1alpha2KFServiceList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha2KFServiceList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha2KFServiceList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha2KFServiceList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1alpha2KFServiceList. # noqa: E501
:return: The items of this V1alpha2KFServiceList. # noqa: E501
:rtype: list[V1alpha2KFService]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1alpha2KFServiceList.
:param items: The items of this V1alpha2KFServiceList. # noqa: E501
:type: list[V1alpha2KFService]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1alpha2KFServiceList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha2KFServiceList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha2KFServiceList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha2KFServiceList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha2KFServiceList. # noqa: E501
:return: The metadata of this V1alpha2KFServiceList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha2KFServiceList.
:param metadata: The metadata of this V1alpha2KFServiceList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(V1alpha2KFServiceList, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha2KFServiceList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.157407
| 295
| 0.63069
|
671932eb2f238490056d3f42565ba61024eb49e3
| 7,250
|
py
|
Python
|
src/NewDouban.py
|
fugary/calibre-web-douban-api
|
8a8b7c969095a7be7a4df4c26fd82fdc8fcd85e3
|
[
"Apache-2.0"
] | 69
|
2021-11-05T05:14:10.000Z
|
2022-03-30T17:55:57.000Z
|
src/NewDouban.py
|
zhoucheng8023/calibre-web-douban-api
|
8a8b7c969095a7be7a4df4c26fd82fdc8fcd85e3
|
[
"Apache-2.0"
] | 7
|
2021-11-09T08:25:40.000Z
|
2022-03-29T05:54:39.000Z
|
src/NewDouban.py
|
zhoucheng8023/calibre-web-douban-api
|
8a8b7c969095a7be7a4df4c26fd82fdc8fcd85e3
|
[
"Apache-2.0"
] | 23
|
2021-11-05T05:14:17.000Z
|
2022-03-20T03:35:05.000Z
|
import re
import time
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import urlparse, unquote
from lxml import etree
from functools import lru_cache
from cps.services.Metadata import Metadata, MetaSourceInfo, MetaRecord
DOUBAN_SEARCH_JSON_URL = "https://www.douban.com/j/search"
DOUBAN_BOOK_CAT = "1001"
DOUBAN_BOOK_CACHE_SIZE = 500 # 最大缓存数量
DOUBAN_CONCURRENCY_SIZE = 5 # 并发查询数
DEFAULT_HEADERS = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3573.0 Safari/537.36'
}
PROVIDER_NAME = "New Douban Books"
PROVIDER_ID = "new_douban"
class NewDouban(Metadata):
__name__ = PROVIDER_NAME
__id__ = PROVIDER_ID
def __init__(self):
self.searcher = DoubanBookSearcher()
super().__init__()
def search(self, query: str, generic_cover: str = "", locale: str = "en"):
if self.active:
return self.searcher.search_books(query)
class DoubanBookSearcher:
def __init__(self):
self.book_loader = DoubanBookLoader()
self.thread_pool = ThreadPoolExecutor(max_workers=10, thread_name_prefix='douban_async')
def calc_url(self, href):
query = urlparse(href).query
params = {item.split('=')[0]: item.split('=')[1] for item in query.split('&')}
url = unquote(params['url'])
return url
def load_book_urls(self, query):
url = DOUBAN_SEARCH_JSON_URL
params = {"start": 0, "cat": DOUBAN_BOOK_CAT, "q": query}
res = requests.get(url, params, headers=DEFAULT_HEADERS)
book_urls = []
if res.status_code in [200, 201]:
book_list_content = res.json()
for item in book_list_content['items'][0:DOUBAN_CONCURRENCY_SIZE]: # 获取部分数据,默认5条
html = etree.HTML(item)
a = html.xpath('//a[@class="nbg"]')
if len(a):
href = a[0].attrib['href']
parsed = self.calc_url(href)
book_urls.append(parsed)
return book_urls
def search_books(self, query):
book_urls = self.load_book_urls(query)
books = []
futures = [self.thread_pool.submit(self.book_loader.load_book, book_url) for book_url in book_urls]
for future in as_completed(futures):
book = future.result()
if book is not None:
books.append(future.result())
return books
class DoubanBookLoader:
def __init__(self):
self.book_parser = DoubanBookHtmlParser()
@lru_cache(maxsize=DOUBAN_BOOK_CACHE_SIZE)
def load_book(self, url):
book = None
start_time = time.time()
res = requests.get(url, headers=DEFAULT_HEADERS)
if res.status_code in [200, 201]:
print("下载书籍:{}成功,耗时{:.0f}ms".format(url, (time.time() - start_time) * 1000))
book_detail_content = res.content
book = self.book_parser.parse_book(url, book_detail_content.decode("utf8"))
return book
class DoubanBookHtmlParser:
def __init__(self):
self.id_pattern = re.compile(".*/subject/(\\d+)/?")
self.date_pattern = re.compile("(\\d{4})-(\\d+)")
self.tag_pattern = re.compile("criteria = '(.+)'")
def parse_book(self, url, book_content):
book = MetaRecord(
id="",
title="",
authors=[],
publisher="",
description="",
url="",
source=MetaSourceInfo(
id=PROVIDER_ID,
description=PROVIDER_NAME,
link="https://book.douban.com/"
)
)
html = etree.HTML(book_content)
title_element = html.xpath("//span[@property='v:itemreviewed']")
book.title = self.get_text(title_element)
share_element = html.xpath("//a[@data-url]")
if len(share_element):
url = share_element[0].attrib['data-url']
book.url = url
id_match = self.id_pattern.match(url)
if id_match:
book.id = id_match.group(1)
img_element = html.xpath("//a[@class='nbg']")
if len(img_element):
cover = img_element[0].attrib['href']
if not cover or cover.endswith('update_image'):
book.cover = ''
else:
book.cover = cover
rating_element = html.xpath("//strong[@property='v:average']")
book.rating = self.get_rating(rating_element)
elements = html.xpath("//span[@class='pl']")
for element in elements:
text = self.get_text(element)
if text.startswith("作者") or text.startswith("译者"):
book.authors.extend([self.get_text(author_element) for author_element in
filter(self.author_filter, element.findall("..//a"))])
elif text.startswith("出版社"):
book.publisher = self.get_tail(element)
elif text.startswith("副标题"):
book.title = book.title + ':' + self.get_tail(element)
elif text.startswith("出版年"):
book.publishedDate = self.get_publish_date(self.get_tail(element))
elif text.startswith("丛书"):
book.series = self.get_text(element.getnext())
summary_element = html.xpath("//div[@id='link-report']//div[@class='intro']")
if len(summary_element):
book.description = etree.tostring(summary_element[-1], encoding="utf8").decode("utf8").strip()
tag_elements = html.xpath("//a[contains(@class, 'tag')]")
if len(tag_elements):
book.tags = [self.get_text(tag_element) for tag_element in tag_elements]
else:
book.tags = self.get_tags(book_content)
return book
def get_tags(self, book_content):
tag_match = self.tag_pattern.findall(book_content)
if len(tag_match):
return [tag.replace('7:', '') for tag in
filter(lambda tag: tag and tag.startswith('7:'), tag_match[0].split('|'))]
return []
def get_publish_date(self, date_str):
if date_str:
date_match = self.date_pattern.fullmatch(date_str)
if date_match:
date_str = "{}-{}-1".format(date_match.group(1), date_match.group(2))
return date_str
def get_rating(self, rating_element):
return float(self.get_text(rating_element, '0')) / 2
def author_filter(self, a_element):
a_href = a_element.attrib['href']
return '/author' in a_href or '/search' in a_href
def get_text(self, element, default_str=''):
text = default_str
if len(element) and element[0].text:
text = element[0].text.strip()
elif isinstance(element, etree._Element) and element.text:
text = element.text.strip()
return text if text else default_str
def get_tail(self, element, default_str=''):
text = default_str
if isinstance(element, etree._Element) and element.tail:
text = element.tail.strip()
if not text:
text = self.get_text(element.getnext(), default_str)
return text if text else default_str
| 37.958115
| 132
| 0.6
|
2e93b372d355932294deded9335115ab9e166d81
| 980
|
py
|
Python
|
main1.py
|
HieuNguyenPhi/ChatBot
|
7ae98fef40527ced15a58a4f35d3114257d32fbd
|
[
"MIT"
] | null | null | null |
main1.py
|
HieuNguyenPhi/ChatBot
|
7ae98fef40527ced15a58a4f35d3114257d32fbd
|
[
"MIT"
] | null | null | null |
main1.py
|
HieuNguyenPhi/ChatBot
|
7ae98fef40527ced15a58a4f35d3114257d32fbd
|
[
"MIT"
] | null | null | null |
import nltk
nltk.download('punkt')
nltk.download('wordnet')
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import json
import pickle
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import random
import csv
words=[]
classes = []
documents = []
file = open("stoppingwords.txt")
csv_reader = csv.reader(file, delimiter = ',')
content = list(csv_reader)
file.close()
ignore_words = [word for word in content[0]]
data_file = open('intents.json').read()
intents = json.loads(data_file)
for intent in intents['intents']:
for pattern in intent['patterns']:
# take each word and tokenize it
w = nltk.word_tokenize(pattern)
words.extend(w)
# adding documents
documents.append((w, intent['tag']))
# adding classes to our class list
if intent['tag'] not in classes:
classes.append(intent['tag'])
| 24.5
| 51
| 0.7
|
9770979f9a5a1e2afc56b9342f33d82ccc97a78f
| 1,676
|
py
|
Python
|
ticketsystem/mainapp/urls.py
|
malfin/ISMticket
|
a29260b008b4d1721b1986c85c9d92d703839f1f
|
[
"Apache-2.0"
] | null | null | null |
ticketsystem/mainapp/urls.py
|
malfin/ISMticket
|
a29260b008b4d1721b1986c85c9d92d703839f1f
|
[
"Apache-2.0"
] | null | null | null |
ticketsystem/mainapp/urls.py
|
malfin/ISMticket
|
a29260b008b4d1721b1986c85c9d92d703839f1f
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
import mainapp.views as mainapp
app_name = 'mainapp'
urlpatterns = [
path('', mainapp.index, name='index'),
path('tickets/', mainapp.my_ticket, name='my_ticket'),
path('tickets/create/', mainapp.create_ticket, name='create_ticket'),
path('tickets/open/<int:pk>/', mainapp.open_ticket, name='open_ticket'),
path('tickets/close/<int:pk>/', mainapp.close_ticket, name='close_ticket'),
path('tickets/send/<int:pk>/', mainapp.send_message, name='send_message'),
path('profile/', mainapp.profile, name='profile'),
path('profile/edit/', mainapp.edit_profile, name='edit_profile'),
path('profile/edit/password/', mainapp.change_password, name='change_password'),
path('news/', mainapp.news, name='news'),
path('news/add/', mainapp.create_news, name='create_news'),
path('news/edit/<int:pk>/', mainapp.edit_news, name='edit_news'),
path('news/remove/<int:pk>/', mainapp.remove_news, name='remove_news'),
path('ticket/admin/', mainapp.ticket_admin, name='ticket_admin'),
path('ticket/admin/open/<int:pk>/', mainapp.open_ticket_admin, name='open_ticket_admin'),
path('ticket/admin/send/<int:pk>/', mainapp.send_message_admin, name='send_message_admin'),
path('ticket/admin/close/<int:pk>/', mainapp.close_ticket_admin, name='close_ticket_admin'),
path('ticket/admin/open_message/<int:pk>/', mainapp.open_ticket_message, name='open_ticket_message'),
path('userprofile/', mainapp.all_users, name='all_users'),
path('userprofile/edit/<int:pk>/', mainapp.edit_profile_admin, name='edit_profile_admin'),
path('userprofile/add/', mainapp.create_user, name='create_user'),
]
| 50.787879
| 105
| 0.708831
|
a9970ca270d93f59cdf6fb1d78f9bcaf06cf493f
| 3,951
|
py
|
Python
|
2019/05/day5.py
|
jscpeterson/advent-of-code-2020
|
69dce1b8ac752a63c4b064e46f0f7218a96bceb8
|
[
"Apache-2.0"
] | null | null | null |
2019/05/day5.py
|
jscpeterson/advent-of-code-2020
|
69dce1b8ac752a63c4b064e46f0f7218a96bceb8
|
[
"Apache-2.0"
] | null | null | null |
2019/05/day5.py
|
jscpeterson/advent-of-code-2020
|
69dce1b8ac752a63c4b064e46f0f7218a96bceb8
|
[
"Apache-2.0"
] | null | null | null |
ADD = 1
MULTIPLY = 2
SAVE = 3
OUTPUT = 4
JUMP_IF_TRUE = 5
JUMP_IF_FALSE = 6
LESS_THAN = 7
EQUALS = 8
HALT = 99
OP_PARAMS = {
ADD: 3,
MULTIPLY: 3,
SAVE: 1,
OUTPUT: 1,
JUMP_IF_TRUE: 2,
JUMP_IF_FALSE: 2,
LESS_THAN: 3,
EQUALS: 3,
HALT: 0,
}
POSITION = 0
IMMEDIATE = 1
class IntcodeCompiler:
def __init__(self, filepath):
with open(filepath) as f:
self.program = list(map(int, f.read().strip().split(',')))
f.close()
def read(self, position, mode):
if mode == POSITION:
return self.program[position]
elif mode == IMMEDIATE:
return position
else:
raise Exception('Unrecognized mode: {}'.format(mode))
def write(self, position, input_):
self.program[position] = input_
def run(self, input_=None):
instruction_pointer = 0
output = None
while True:
if instruction_pointer > len(self.program):
instruction_pointer = instruction_pointer % len(self.program)
instruction = self.program[instruction_pointer]
opcode = instruction % 100
modes = instruction // 100
if opcode == ADD:
operand1 = self.read(self.program[instruction_pointer + 1], modes % 10)
modes = modes // 10
operand2 = self.read(self.program[instruction_pointer + 2], modes % 10)
modes = modes // 10
self.write(self.program[instruction_pointer + 3], operand1 + operand2)
elif opcode == MULTIPLY:
operand1 = self.read(self.program[instruction_pointer + 1], modes % 10)
modes = modes // 10
operand2 = self.read(self.program[instruction_pointer + 2], modes % 10)
modes = modes // 10
self.write(self.program[instruction_pointer + 3], operand1 * operand2)
elif opcode == SAVE:
self.write(self.program[instruction_pointer + 1], input_)
elif opcode == OUTPUT:
output = self.read(self.program[instruction_pointer + 1], modes % 10)
elif opcode == JUMP_IF_TRUE:
if self.read(self.program[instruction_pointer + 1], modes % 10) == 1:
modes = modes // 10
instruction_pointer = self.read(self.program[instruction_pointer + 2], modes % 10)
continue
elif opcode == JUMP_IF_FALSE:
if self.read(self.program[instruction_pointer + 1], modes % 10) == 0:
modes = modes // 10
instruction_pointer = self.read(self.program[instruction_pointer + 2], modes % 10)
continue
elif opcode == LESS_THAN:
operand1 = self.read(self.program[instruction_pointer + 1], modes % 10)
modes = modes // 10
operand2 = self.read(self.program[instruction_pointer + 2], modes % 10)
modes = modes // 10
self.write(self.program[instruction_pointer + 3], 1 if operand1 < operand2 else 0)
elif opcode == EQUALS:
operand1 = self.read(self.program[instruction_pointer + 1], modes % 10)
modes = modes // 10
operand2 = self.read(self.program[instruction_pointer + 2], modes % 10)
modes = modes // 10
self.write(self.program[instruction_pointer + 3], 1 if operand1 == operand2 else 0)
elif opcode == HALT:
return output
else:
raise Exception('Unrecognized opcode: {}'.format(opcode))
instruction_pointer += 1 + OP_PARAMS[opcode]
assert IntcodeCompiler('test1').run(input_=1) == 999
assert IntcodeCompiler('test1').run(input_=8) == 1000
assert IntcodeCompiler('test1').run(input_=9) == 1001
print(IntcodeCompiler('input').run(input_=5))
| 37.628571
| 102
| 0.564667
|
f5ba8a33eacd571f1e35c5725e6ce1c3ca3eaab3
| 471
|
py
|
Python
|
scrooge/datasource/migrations/0004_auto_20160627_2239.py
|
SpisTresci/scrooge
|
787b7d5f8ece8f3f24feb4273505e6c0ea60b5d7
|
[
"MIT"
] | 1
|
2021-01-04T04:30:24.000Z
|
2021-01-04T04:30:24.000Z
|
scrooge/datasource/migrations/0004_auto_20160627_2239.py
|
SpisTresci/scrooge
|
787b7d5f8ece8f3f24feb4273505e6c0ea60b5d7
|
[
"MIT"
] | 6
|
2019-12-21T03:19:17.000Z
|
2020-01-07T07:28:04.000Z
|
scrooge/datasource/migrations/0004_auto_20160627_2239.py
|
SpisTresci/scrooge
|
787b7d5f8ece8f3f24feb4273505e6c0ea60b5d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-06-27 20:39
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('datasource', '0003_auto_20160627_1928'),
]
operations = [
migrations.AlterUniqueTogether(
name='xmldatafield',
unique_together=set([('name', 'data_source'), ('datafield_name', 'data_source')]),
),
]
| 23.55
| 94
| 0.63482
|
b59e05cf739be04bb254d42ffafbac9626305f41
| 379
|
py
|
Python
|
parsec/commands/tools/get_citations.py
|
erasche/parsec
|
c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6
|
[
"Apache-2.0"
] | 8
|
2015-03-27T17:09:15.000Z
|
2021-07-13T15:33:02.000Z
|
parsec/commands/tools/get_citations.py
|
erasche/parsec
|
c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6
|
[
"Apache-2.0"
] | 30
|
2015-02-27T21:21:47.000Z
|
2021-08-31T14:19:55.000Z
|
parsec/commands/tools/get_citations.py
|
erasche/parsec
|
c2f1bda7ff776f9aa121c7b94d62e3da2fad93f6
|
[
"Apache-2.0"
] | 12
|
2017-06-01T03:49:23.000Z
|
2021-07-13T15:33:06.000Z
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('get_citations')
@click.argument("tool_id", type=str)
@pass_context
@custom_exception
@json_output
def cli(ctx, tool_id):
"""Get BibTeX citations for a given tool ID.
Output:
"""
return ctx.gi.tools.get_citations(tool_id)
| 19.947368
| 59
| 0.757256
|
bbd63a07393c3ab514b58b03d170123f960e0fc4
| 14,002
|
py
|
Python
|
common/xrd-opmon-tests/testcases/test_metaservices.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 3
|
2018-03-15T14:22:50.000Z
|
2021-11-08T10:30:35.000Z
|
common/xrd-opmon-tests/testcases/test_metaservices.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 11
|
2017-04-06T09:25:41.000Z
|
2018-06-04T09:08:48.000Z
|
common/xrd-opmon-tests/testcases/test_metaservices.py
|
ria-ee/XTM
|
6103f3f5bbba387b8b59b050c0c4f1fb2180fc37
|
[
"MIT"
] | 20
|
2017-03-14T07:21:58.000Z
|
2019-05-21T09:26:30.000Z
|
#!/usr/bin/env python3
# The MIT License
# Copyright (c) 2016 Estonian Information System Authority (RIA), Population Register Centre (VRK)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Test case for verifying that the operational monitoring related data
# of metaservice requests are stored by the operational monitoring
# daemon.
# It is also verified that central monitoring client has full access to
# operational monitoring data.
import os
import common
# Base sizes of request and responses.
# Parameters sizes must be added to these values.
LISTMETHODS_QUERY_REQUEST_SOAP_BASE_SIZE = 1062
# Disabling responseSoapSize check, because tested subsystems may have
# additional services
# LISTMETHODS_QUERY_RESPONSE_SOAP_BASE_SIZE = 2735
GET_SS_METRICS_QUERY_REQUEST_SOAP_BASE_SIZE = 1308
def _listmethods_query_request_parameters_size(query_parameters):
# Request template: listmethods_producer_query_template.xml
return (
len(query_parameters["producer_instance"])
+ len(query_parameters["producer_class"])
+ len(query_parameters["producer_code"])
+ len(query_parameters["producer_system"])
+ len(query_parameters["client_instance"])
+ len(query_parameters["client_class"])
+ len(query_parameters["client_code"])
+ len(query_parameters["client_system"])
)
def _expected_keys_and_values_of_listmethods_query_rec(
xroad_message_id, security_server_address, security_server_type, query_parameters):
request_parameters_size = _listmethods_query_request_parameters_size(query_parameters)
print("Size of listmethods query request parameters: {}".format(request_parameters_size))
return [
("clientMemberClass", query_parameters["client_class"]),
("clientMemberCode", query_parameters["client_code"]),
("clientSecurityServerAddress", query_parameters["client_server_address"]),
("clientSubsystemCode", query_parameters["client_system"]),
("clientXRoadInstance", query_parameters["client_instance"]),
("messageId", xroad_message_id),
("messageProtocolVersion", "4.0"),
("requestAttachmentCount", 0),
("requestSoapSize", LISTMETHODS_QUERY_REQUEST_SOAP_BASE_SIZE + request_parameters_size),
("responseAttachmentCount", 0),
("securityServerInternalIp", security_server_address),
("securityServerType", security_server_type),
("serviceCode", "listMethods"),
("serviceMemberClass", query_parameters["producer_class"]),
("serviceMemberCode", query_parameters["producer_code"]),
("serviceSecurityServerAddress", query_parameters["producer_server_address"]),
("serviceSubsystemCode", query_parameters["producer_system"]),
("serviceVersion", "v1"),
("serviceXRoadInstance", query_parameters["producer_instance"]),
("succeeded", True),
]
def _get_ss_metrics_query_request_parameters_size(query_parameters):
# Request template: get_ss_metrics_query_template.xml
return (
2 * len(query_parameters["producer_instance"])
+ 2 * len(query_parameters["producer_class"])
+ 2 * len(query_parameters["producer_code"])
+ len(query_parameters["producer_server_code"])
+ len(query_parameters["client_instance"])
+ len(query_parameters["client_class"])
+ len(query_parameters["client_code"])
+ len(query_parameters["client_monitor_system"])
)
def _expected_keys_and_values_of_get_ss_metrics_query_rec(
xroad_message_id, security_server_address, security_server_type, query_parameters):
request_parameters_size = _get_ss_metrics_query_request_parameters_size(query_parameters)
print("Size of get ss metrics query request parameters: {}".format(request_parameters_size))
return [
("clientMemberClass", query_parameters["client_class"]),
("clientMemberCode", query_parameters["client_code"]),
("clientSecurityServerAddress", query_parameters["client_server_address"]),
("clientSubsystemCode", query_parameters["client_monitor_system"]),
("clientXRoadInstance", query_parameters["client_instance"]),
("messageId", xroad_message_id),
("messageProtocolVersion", "4.0"),
("requestAttachmentCount", 0),
("requestSoapSize", GET_SS_METRICS_QUERY_REQUEST_SOAP_BASE_SIZE + request_parameters_size),
("responseAttachmentCount", 0),
("securityServerInternalIp", security_server_address),
("securityServerType", security_server_type),
("serviceCode", "getSecurityServerMetrics"),
("serviceMemberClass", query_parameters["producer_class"]),
("serviceMemberCode", query_parameters["producer_code"]),
("serviceSecurityServerAddress", query_parameters["producer_server_address"]),
("serviceXRoadInstance", query_parameters["producer_instance"]),
("succeeded", True),
]
def run(request_template_dir, query_parameters):
client_security_server_address = query_parameters["client_server_ip"]
producer_security_server_address = query_parameters["producer_server_ip"]
ssh_user = query_parameters["ssh_user"]
listmethods_query_template_filename = os.path.join(
request_template_dir, "listmethods_producer_query_template.xml")
get_ss_metrics_query_template_filename = os.path.join(
request_template_dir, "get_ss_metrics_query_template.xml")
query_data_client_template_filename = os.path.join(
request_template_dir,
"query_operational_data_client_central_monitoring_template.xml")
query_data_producer_template_filename = os.path.join(
request_template_dir,
"query_operational_data_producer_central_monitoring_template.xml")
client_timestamp_before_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_before_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
message_id_listmethods = common.generate_message_id()
print("\nGenerated message ID {} for listMethods request".format(message_id_listmethods))
# Regular and operational data requests and the relevant checks
print("\n---- Sending a listMethods request to the client's security server ----\n")
request_contents = common.format_xroad_request_template(
listmethods_query_template_filename, message_id_listmethods, query_parameters)
print("Generated the following listMethods request: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part = common.get_multipart_soap(mime_parts[0])
common.print_multipart_soap(soap_part)
else:
common.parse_and_check_soap_response(raw_response)
message_id_get_ss_metrics = common.generate_message_id()
print("\nGenerated message ID {} for getSecurityServerMetrics request".format(
message_id_get_ss_metrics))
print("\n---- Sending a getSecurityServerMetrics request to "
"the client's security server ----\n")
request_contents = common.format_xroad_request_template(
get_ss_metrics_query_template_filename, message_id_get_ss_metrics, query_parameters)
print("Generated the following getSecurityServerMetrics request: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part = common.get_multipart_soap(mime_parts[0])
# getSecurityServerMetrics response is large, print only headers
common.print_multipart_soap_headers(soap_part)
# Program should never get here unless getSecurityServerMetrics
# will be changed to return data in attachments instead of
# SOAP Body
raise Exception("\nWARNING!!! getSecurityServerMetrics returned attachments\n")
else:
common.parse_and_check_soap_response(raw_response)
common.wait_for_operational_data()
client_timestamp_after_requests = common.get_remote_timestamp(
client_security_server_address, ssh_user)
producer_timestamp_after_requests = common.get_remote_timestamp(
producer_security_server_address, ssh_user)
# Now make operational data requests to both security servers and
# check the response payloads.
print("\n---- Sending an operational data request to the client's security server ----\n")
message_id = common.generate_message_id()
print("Generated message ID {} for query data request".format(message_id))
request_contents = common.format_query_operational_data_request_template(
query_data_client_template_filename, message_id,
client_timestamp_before_requests, client_timestamp_after_requests, query_parameters)
print("Generated the following query data request for the client's security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least
# one JSON structure.
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_listmethods_query_rec(
message_id_listmethods, client_security_server_address, "Client",
query_parameters))
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_get_ss_metrics_query_rec(
message_id_get_ss_metrics, client_security_server_address, "Client",
query_parameters))
# Check if the timestamps in the response are in the expected
# range.
common.assert_expected_timestamp_values(
json_payload,
client_timestamp_before_requests, client_timestamp_after_requests)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
# Central monitoring client is used as a service client in
# operational data request. As central monitoring client is
# registered in client's security server, let's send the
# operational data request to producer's security server via
# client's security server.
print("\n---- Sending an operational data request from central monitoring client "
"to the producer's security server ----\n")
message_id = common.generate_message_id()
print("\nGenerated message ID {} for query data request".format(message_id))
request_contents = common.format_query_operational_data_request_template(
query_data_producer_template_filename, message_id,
producer_timestamp_before_requests, producer_timestamp_after_requests,
query_parameters)
print("Generated the following query data request for the producer's "
"security server: \n")
print(request_contents)
response = common.post_xml_request(
client_security_server_address, request_contents,
get_raw_stream=True)
mime_parts, raw_response = common.parse_multipart_response(response)
if mime_parts:
soap_part, record_count = common.get_multipart_soap_and_record_count(mime_parts[0])
common.print_multipart_soap_and_record_count(soap_part, record_count, is_client=False)
json_payload = common.get_multipart_json_payload(mime_parts[1])
# Check the presence of all the required fields in at least
# one JSON structure.
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_listmethods_query_rec(
message_id_listmethods, producer_security_server_address, "Producer",
query_parameters))
common.assert_present_in_json(
json_payload, _expected_keys_and_values_of_get_ss_metrics_query_rec(
message_id_get_ss_metrics, producer_security_server_address, "Producer",
query_parameters))
# Check timestamp values
common.assert_expected_timestamp_values(
json_payload,
producer_timestamp_before_requests, producer_timestamp_after_requests)
common.assert_equal_timestamp_values(json_payload)
common.print_multipart_query_data_response(json_payload)
else:
common.parse_and_check_soap_response(raw_response)
| 46.059211
| 99
| 0.746893
|
3b4297a55ff48c9c736a7cc8d8ccb9ba0639010c
| 2,459
|
py
|
Python
|
leetcode_python/Array/spiral-matrix-iii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 18
|
2019-08-01T07:45:02.000Z
|
2022-03-31T18:05:44.000Z
|
leetcode_python/Array/spiral-matrix-iii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | null | null | null |
leetcode_python/Array/spiral-matrix-iii.py
|
yennanliu/Python_basics
|
6a597442d39468295946cefbfb11d08f61424dc3
|
[
"Unlicense"
] | 15
|
2019-12-29T08:46:20.000Z
|
2022-03-08T14:14:05.000Z
|
# V0
# V1
# https://www.jiuzhang.com/solution/spiral-matrix-iii/#tag-highlight-lang-python
def spiralMatrixIII(self, R, C, r0, c0):
"""
:type R: int
:type C: int
:type r0: int
:type c0: int
:rtype: List[List[int]]
"""
i = r0
j = c0
ans = [[i, j]]
adder = 0
while len(ans) < R * C:
adder += 1
cj = j
while j < (cj + adder):
j += 1
if 0 <= j < C and 0 <= i < R:
ans.append([i, j])
ci = i
while i < (ci + adder):
i += 1
if 0 <= j < C and 0 <= i < R:
ans.append([i, j])
adder += 1
cj = j
while j > (cj - adder):
j -= 1
if 0 <= j < C and 0 <= i < R:
ans.append([i, j])
ci = i
while i > (ci - adder):
i -= 1
if 0 <= j < C and 0 <= i < R:
ans.append([i, j])
return ans
# V1'
# https://blog.csdn.net/XX_123_1_RJ/article/details/81952905
class Solution:
def spiralMatrixIII(self, R, C, r0, c0):
res = [[r0, c0]]
if R * C == 1: return res
for k in range(1, 2*(R+C), 2): # k = lengh of every side
for dr, dc, dk in ((0, 1, k), (1, 0, k), (0, -1, k+1), (-1, 0, k+1)): # left, up, right, down (4 directions of move)
for _ in range(dk):
r0 += dr
c0 += dc
if 0 <= r0 < R and 0 <= c0 < C: # check if out of the boader
res.append([r0, c0]) # add the new route
if len(res) == R * C: # len(res) == R * C, means already finish the process
return res
# V2
# Time: O(max(m, n)^2)
# Space: O(1)
class Solution(object):
def spiralMatrixIII(self, R, C, r0, c0):
"""
:type R: int
:type C: int
:type r0: int
:type c0: int
:rtype: List[List[int]]
"""
r, c = r0, c0
result = [[r, c]]
x, y, n, i = 0, 1, 0, 0
while len(result) < R*C:
r, c, i = r+x, c+y, i+1
if 0 <= r < R and 0 <= c < C:
result.append([r, c])
if i == n//2+1:
x, y, n, i = y, -x, n+1, 0
return result
| 28.593023
| 130
| 0.371289
|
7894d791f0bec280ba937357d1a5c6f5e63d0260
| 14,780
|
py
|
Python
|
lang/py3/avro/datafile.py
|
mkram/avro
|
f6c044e56f9de7d3e34eeb1702c11fa4add04d84
|
[
"Apache-2.0"
] | 2
|
2021-11-09T12:50:18.000Z
|
2022-01-18T00:12:38.000Z
|
lang/py3/avro/datafile.py
|
mkram/avro
|
f6c044e56f9de7d3e34eeb1702c11fa4add04d84
|
[
"Apache-2.0"
] | null | null | null |
lang/py3/avro/datafile.py
|
mkram/avro
|
f6c044e56f9de7d3e34eeb1702c11fa4add04d84
|
[
"Apache-2.0"
] | 1
|
2020-09-13T13:10:00.000Z
|
2020-09-13T13:10:00.000Z
|
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Read/Write Avro File Object Containers."""
import io
import logging
import os
import zlib
from avro import schema
from avro import io as avro_io
try:
import snappy
has_snappy = True
except ImportError:
has_snappy = False
# ------------------------------------------------------------------------------
# Constants
# Version of the container file:
VERSION = 1
# Magic code that starts a data container file:
MAGIC = b'Obj' + bytes([VERSION])
# Size of the magic code, in number of bytes:
MAGIC_SIZE = len(MAGIC)
# Size of the synchronization marker, in number of bytes:
SYNC_SIZE = 16
# Interval between synchronization markers, in number of bytes:
# TODO: make configurable
SYNC_INTERVAL = 1000 * SYNC_SIZE
# Schema of the container header:
META_SCHEMA = schema.Parse("""
{
"type": "record", "name": "org.apache.avro.file.Header",
"fields": [{
"name": "magic",
"type": {"type": "fixed", "name": "magic", "size": %(magic_size)d}
}, {
"name": "meta",
"type": {"type": "map", "values": "bytes"}
}, {
"name": "sync",
"type": {"type": "fixed", "name": "sync", "size": %(sync_size)d}
}]
}
""" % {
'magic_size': MAGIC_SIZE,
'sync_size': SYNC_SIZE,
})
# Codecs supported by container files:
VALID_CODECS = frozenset(['null', 'deflate'])
if has_snappy:
VALID_CODECS = frozenset.union(VALID_CODECS, ['snappy'])
# Not used yet
VALID_ENCODINGS = frozenset(['binary'])
# Metadata key associated to the codec:
CODEC_KEY = "avro.codec"
# Metadata key associated to the schema:
SCHEMA_KEY = "avro.schema"
# ------------------------------------------------------------------------------
# Exceptions
class DataFileException(schema.AvroException):
"""Problem reading or writing file object containers."""
def __init__(self, msg):
super(DataFileException, self).__init__(msg)
# ------------------------------------------------------------------------------
class DataFileWriter(object):
"""Writes Avro data files."""
@staticmethod
def GenerateSyncMarker():
"""Generates a random synchronization marker."""
return os.urandom(SYNC_SIZE)
# TODO: make 'encoder' a metadata property
def __init__(
self,
writer,
datum_writer,
writer_schema=None,
codec='null',
):
"""Constructs a new DataFileWriter instance.
If the schema is not present, presume we're appending.
Args:
writer: File-like object to write into.
datum_writer:
writer_schema: Schema
codec:
"""
self._writer = writer
self._encoder = avro_io.BinaryEncoder(writer)
self._datum_writer = datum_writer
self._buffer_writer = io.BytesIO()
self._buffer_encoder = avro_io.BinaryEncoder(self._buffer_writer)
self._block_count = 0
self._meta = {}
# Ensure we have a writer that accepts bytes:
self._writer.write(b'')
# Whether the header has already been written:
self._header_written = False
if writer_schema is not None:
if codec not in VALID_CODECS:
raise DataFileException('Unknown codec: %r' % codec)
self._sync_marker = DataFileWriter.GenerateSyncMarker()
self.SetMeta('avro.codec', codec)
self.SetMeta('avro.schema', str(writer_schema).encode('utf-8'))
self.datum_writer.writer_schema = writer_schema
else:
# open writer for reading to collect metadata
dfr = DataFileReader(writer, avro_io.DatumReader())
# TODO: collect arbitrary metadata
# collect metadata
self._sync_marker = dfr.sync_marker
self.SetMeta('avro.codec', dfr.GetMeta('avro.codec'))
# get schema used to write existing file
schema_from_file = dfr.GetMeta('avro.schema').decode('utf-8')
self.SetMeta('avro.schema', schema_from_file)
self.datum_writer.writer_schema = schema.Parse(schema_from_file)
# seek to the end of the file and prepare for writing
writer.seek(0, 2)
self._header_written = True
# read-only properties
@property
def writer(self):
return self._writer
@property
def encoder(self):
return self._encoder
@property
def datum_writer(self):
return self._datum_writer
@property
def buffer_encoder(self):
return self._buffer_encoder
@property
def sync_marker(self):
return self._sync_marker
@property
def meta(self):
return self._meta
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
@property
def block_count(self):
return self._block_count
def GetMeta(self, key):
"""Reports the metadata associated to the given key.
Args:
key: Key of the metadata to report the value of.
Returns:
The metadata value, as bytes, or None if the key does not exist.
"""
return self._meta.get(key)
def SetMeta(self, key, value):
"""Sets the metadata value for the given key.
Note: metadata is persisted and retrieved as bytes.
Args:
key: Key of the metadata to set.
value: Value of the metadata, as bytes or str.
Strings are automatically converted to bytes.
"""
if isinstance(value, str):
value = value.encode('utf-8')
assert isinstance(value, bytes), (
'Invalid metadata value for key %r: %r' % (key, value))
self._meta[key] = value
def _WriteHeader(self):
header = {
'magic': MAGIC,
'meta': self.meta,
'sync': self.sync_marker,
}
logging.debug(
'Writing Avro data file header:\n%s\nAvro header schema:\n%s',
header, META_SCHEMA)
self.datum_writer.write_data(META_SCHEMA, header, self.encoder)
self._header_written = True
# TODO: make a schema for blocks and use datum_writer
def _WriteBlock(self):
if not self._header_written:
self._WriteHeader()
if self.block_count <= 0:
logging.info('Current block is empty, nothing to write.')
return
# write number of items in block
self.encoder.write_long(self.block_count)
# write block contents
uncompressed_data = self._buffer_writer.getvalue()
codec = self.GetMeta(CODEC_KEY).decode('utf-8')
if codec == 'null':
compressed_data = uncompressed_data
compressed_data_length = len(compressed_data)
elif codec == 'deflate':
# The first two characters and last character are zlib
# wrappers around deflate data.
compressed_data = zlib.compress(uncompressed_data)[2:-1]
compressed_data_length = len(compressed_data)
elif codec == 'snappy':
compressed_data = snappy.compress(uncompressed_data)
compressed_data_length = len(compressed_data) + 4 # crc32
else:
fail_msg = '"%s" codec is not supported.' % codec
raise DataFileException(fail_msg)
# Write length of block
self.encoder.write_long(compressed_data_length)
# Write block
self.writer.write(compressed_data)
# Write CRC32 checksum for Snappy
if self.GetMeta(CODEC_KEY) == 'snappy':
self.encoder.write_crc32(uncompressed_data)
# write sync marker
self.writer.write(self.sync_marker)
logging.debug(
'Writing block with count=%d nbytes=%d sync=%r',
self.block_count, compressed_data_length, self.sync_marker)
# reset buffer
self._buffer_writer.seek(0)
self._buffer_writer.truncate()
self._block_count = 0
def append(self, datum):
"""Append a datum to the file."""
self.datum_writer.write(datum, self.buffer_encoder)
self._block_count += 1
# if the data to write is larger than the sync interval, write the block
if self._buffer_writer.tell() >= SYNC_INTERVAL:
self._WriteBlock()
def sync(self):
"""
Return the current position as a value that may be passed to
DataFileReader.seek(long). Forces the end of the current block,
emitting a synchronization marker.
"""
self._WriteBlock()
return self.writer.tell()
def flush(self):
"""Flush the current state of the file, including metadata."""
self._WriteBlock()
self.writer.flush()
def close(self):
"""Close the file."""
self.flush()
self.writer.close()
# ------------------------------------------------------------------------------
class DataFileReader(object):
"""Read files written by DataFileWriter."""
# TODO: allow user to specify expected schema?
# TODO: allow user to specify the encoder
def __init__(self, reader, datum_reader):
"""Initializes a new data file reader.
Args:
reader: Open file to read from.
datum_reader: Avro datum reader.
"""
self._reader = reader
self._raw_decoder = avro_io.BinaryDecoder(reader)
self._datum_decoder = None # Maybe reset at every block.
self._datum_reader = datum_reader
# read the header: magic, meta, sync
self._read_header()
# ensure codec is valid
avro_codec_raw = self.GetMeta('avro.codec')
if avro_codec_raw is None:
self.codec = "null"
else:
self.codec = avro_codec_raw.decode('utf-8')
if self.codec not in VALID_CODECS:
raise DataFileException('Unknown codec: %s.' % self.codec)
self._file_length = self._GetInputFileLength()
# get ready to read
self._block_count = 0
self.datum_reader.writer_schema = (
schema.Parse(self.GetMeta(SCHEMA_KEY).decode('utf-8')))
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Perform a close if there's no exception
if type is None:
self.close()
def __iter__(self):
return self
def __next__(self):
"""Implements the iterator interface."""
return next(self)
# read-only properties
@property
def reader(self):
return self._reader
@property
def raw_decoder(self):
return self._raw_decoder
@property
def datum_decoder(self):
return self._datum_decoder
@property
def datum_reader(self):
return self._datum_reader
@property
def sync_marker(self):
return self._sync_marker
@property
def meta(self):
return self._meta
@property
def file_length(self):
"""Length of the input file, in bytes."""
return self._file_length
# read/write properties
@property
def block_count(self):
return self._block_count
def GetMeta(self, key):
"""Reports the value of a given metadata key.
Args:
key: Metadata key (string) to report the value of.
Returns:
Value associated to the metadata key, as bytes.
"""
return self._meta.get(key)
def SetMeta(self, key, value):
"""Sets a metadata.
Args:
key: Metadata key (string) to set.
value: Metadata value to set, as bytes.
"""
if isinstance(value, str):
value = value.encode('utf-8')
self._meta[key] = value
def _GetInputFileLength(self):
"""Reports the length of the input file, in bytes.
Leaves the current position unmodified.
Returns:
The length of the input file, in bytes.
"""
current_pos = self.reader.tell()
self.reader.seek(0, 2)
file_length = self.reader.tell()
self.reader.seek(current_pos)
return file_length
def is_EOF(self):
return self.reader.tell() == self.file_length
def _read_header(self):
# seek to the beginning of the file to get magic block
self.reader.seek(0, 0)
# read header into a dict
header = self.datum_reader.read_data(
META_SCHEMA, META_SCHEMA, self.raw_decoder)
# check magic number
if header.get('magic') != MAGIC:
fail_msg = "Not an Avro data file: %s doesn't match %s."\
% (header.get('magic'), MAGIC)
raise schema.AvroException(fail_msg)
# set metadata
self._meta = header['meta']
# set sync marker
self._sync_marker = header['sync']
def _read_block_header(self):
self._block_count = self.raw_decoder.read_long()
if self.codec == "null":
# Skip a long; we don't need to use the length.
self.raw_decoder.skip_long()
self._datum_decoder = self._raw_decoder
elif self.codec == 'deflate':
# Compressed data is stored as (length, data), which
# corresponds to how the "bytes" type is encoded.
data = self.raw_decoder.read_bytes()
# -15 is the log of the window size; negative indicates
# "raw" (no zlib headers) decompression. See zlib.h.
uncompressed = zlib.decompress(data, -15)
self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed))
elif self.codec == 'snappy':
# Compressed data includes a 4-byte CRC32 checksum
length = self.raw_decoder.read_long()
data = self.raw_decoder.read(length - 4)
uncompressed = snappy.decompress(data)
self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed))
self.raw_decoder.check_crc32(uncompressed);
else:
raise DataFileException("Unknown codec: %r" % self.codec)
def _skip_sync(self):
"""
Read the length of the sync marker; if it matches the sync marker,
return True. Otherwise, seek back to where we started and return False.
"""
proposed_sync_marker = self.reader.read(SYNC_SIZE)
if proposed_sync_marker != self.sync_marker:
self.reader.seek(-SYNC_SIZE, 1)
return False
else:
return True
# TODO: handle block of length zero
# TODO: clean this up with recursion
def __next__(self):
"""Return the next datum in the file."""
if self.block_count == 0:
if self.is_EOF():
raise StopIteration
elif self._skip_sync():
if self.is_EOF(): raise StopIteration
self._read_block_header()
else:
self._read_block_header()
datum = self.datum_reader.read(self.datum_decoder)
self._block_count -= 1
return datum
def close(self):
"""Close this reader."""
self.reader.close()
if __name__ == '__main__':
raise Exception('Not a standalone module')
| 27.626168
| 80
| 0.660622
|
adefc8476e326941cd9caf8aa79032dbc68547a7
| 235
|
py
|
Python
|
raissyon/raissyon/doctype/salary_register_confirmation/test_salary_register_confirmation.py
|
mhbu50/raissyon
|
73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce
|
[
"MIT"
] | null | null | null |
raissyon/raissyon/doctype/salary_register_confirmation/test_salary_register_confirmation.py
|
mhbu50/raissyon
|
73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce
|
[
"MIT"
] | null | null | null |
raissyon/raissyon/doctype/salary_register_confirmation/test_salary_register_confirmation.py
|
mhbu50/raissyon
|
73d5d7498e3e7f74b07e4c0a1c979ad10f9c37ce
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Accurate Systems and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestSalaryRegisterConfirmation(unittest.TestCase):
pass
| 21.363636
| 56
| 0.782979
|
ba419490fe692d6d238b3fc1ad1e05e1dddb5606
| 5,145
|
py
|
Python
|
qiskit/aqua/algorithms/single_sample/amplitude_estimation/ae_algorithm.py
|
l45k/qiskit-aqua
|
77589d5fa0da670c23a4f6730d8a5477c9b0d126
|
[
"Apache-2.0"
] | 1
|
2020-02-12T16:52:34.000Z
|
2020-02-12T16:52:34.000Z
|
qiskit/aqua/algorithms/single_sample/amplitude_estimation/ae_algorithm.py
|
l45k/qiskit-aqua
|
77589d5fa0da670c23a4f6730d8a5477c9b0d126
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/algorithms/single_sample/amplitude_estimation/ae_algorithm.py
|
l45k/qiskit-aqua
|
77589d5fa0da670c23a4f6730d8a5477c9b0d126
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
The Amplitude Estimation Algorithm.
"""
import logging
from abc import abstractmethod
from qiskit.aqua.algorithms import QuantumAlgorithm
from .q_factory import QFactory
logger = logging.getLogger(__name__)
class AmplitudeEstimationAlgorithm(QuantumAlgorithm):
r"""
The Quantum Amplitude Estimation (QAE) algorithm base class.
In general, QAE algorithms aim to approximate the amplitude of a certain, marked state.
This amplitude is encoded in the so-called A operator, performing the mapping
A \|0>_n \|0> = sqrt{1 - a} \|psi_0>_n \|0> + sqrt{a} \|psi_1>_n \|1>
where the amplitude `a` (in [0, 1]) is approximated, and \|psi_0> and \|psi_1> are two
normalized, not necessarily orthogonal, states.
In the QAE algorithms, the Grover operator Q is used, which is defined as
Q = -A S_0 A^{-1} S_psi0,
where S_0 reflects about the \|0>_n state and S_psi0 reflects about \|psi_0>_n.
See https://arxiv.org/abs/quant-ph/0005055 for more detail about QAE.
"""
@abstractmethod
def __init__(self, a_factory=None, q_factory=None, i_objective=None):
self._a_factory = a_factory
self._q_factory = q_factory
self._i_objective = i_objective
super().__init__()
@property
def a_factory(self):
r"""
Get the A operator encoding the amplitude `a` that's approximated, i.e.
A \|0>_n \|0> = sqrt{1 - a} \|psi_0>_n \|0> + sqrt{a} \|psi_1>_n \|1>
see the original Brassard paper (https://arxiv.org/abs/quant-ph/0005055) for more detail.
Returns:
CircuitFactory: the A operator as CircuitFactory
"""
return self._a_factory
@a_factory.setter
def a_factory(self, a_factory):
"""
Set the A operator, that encodes the amplitude to be estimated.
Args:
a_factory (CircuitFactory): the A Operator
"""
self._a_factory = a_factory
@property
def q_factory(self):
r"""
Get the Q operator, or Grover-operator for the Amplitude Estimation algorithm, i.e.
Q = -A S_0 A^{-1} S_psi0,
where S_0 reflects about the \|0>_n state and S_psi0 reflects about \|psi_0>_n.
See https://arxiv.org/abs/quant-ph/0005055 for more detail.
If the Q operator is not set, we try to build it from the A operator.
If neither the A operator is set, None is returned.
Returns:
QFactory: returns the current Q factory of the algorithm
"""
if self._q_factory is not None:
return self._q_factory
if self._a_factory is not None:
return QFactory(self._a_factory, self.i_objective)
return None
@q_factory.setter
def q_factory(self, q_factory):
"""
Set the Q operator as QFactory.
Args:
q_factory (QFactory): the specialized Q operator
"""
self._q_factory = q_factory
@property
def i_objective(self):
r"""
Get the index of the objective qubit. The objective qubit marks the \|psi_0> state (called
'bad states' in https://arxiv.org/abs/quant-ph/0005055)
with \|0> and \|psi_1> ('good' states) with \|1>.
If the A operator performs the mapping
A \|0>_n \|0> = sqrt{1 - a} \|psi_0>_n \|0> + sqrt{a} \|psi_1>_n \|1>
then, the objective qubit is the last one (which is either \|0> or \|1>).
If the objective qubit (i_objective) is not set, we check if the Q operator (q_factory) is
set and return the index specified there. If the q_factory is not defined,
the index equals the number of qubits of the A operator (a_factory) minus one.
If also the a_factory is not set, return None.
Returns:
int: the index of the objective qubit
"""
if self._i_objective is not None:
return self._i_objective
if self._q_factory is not None:
return self._q_factory.i_objective
if self._a_factory is not None:
return self.a_factory.num_target_qubits - 1
return None
@i_objective.setter
def i_objective(self, i_objective):
"""
Set the index of the objective qubit, i.e. the qubit deciding between 'good/bad' states.
Args:
i_objective (int): the index
Note:
No checks about the validity of the index are performed, since i_objective could also
be set before the A/Q operators and in that case checks cannot be done.
"""
self._i_objective = i_objective
| 32.563291
| 98
| 0.640428
|
6d2dd8e6270c241c670a8862f2de7246c19acba2
| 13,010
|
py
|
Python
|
salt/modules/freebsdservice.py
|
Jille/salt
|
286eaf923782851c9b6602583050be804c181a9a
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/freebsdservice.py
|
Jille/salt
|
286eaf923782851c9b6602583050be804c181a9a
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/freebsdservice.py
|
Jille/salt
|
286eaf923782851c9b6602583050be804c181a9a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
The service module for FreeBSD
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
'''
from __future__ import absolute_import
# Import python libs
import logging
import os
import fnmatch
import re
# Import salt libs
import salt.utils
import salt.utils.decorators as decorators
from salt.exceptions import CommandNotFoundError
__func_alias__ = {
'reload_': 'reload'
}
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = 'service'
def __virtual__():
'''
Only work on FreeBSD
'''
# Disable on these platforms, specific service modules exist:
if __grains__['os'] == 'FreeBSD':
return __virtualname__
return (False, 'The freebsdservice execution module cannot be loaded: only available on FreeBSD systems.')
@decorators.memoize
def _cmd(jail=None):
'''
Return full path to service command
.. versionchanged:: 2016.3.4
Support for jail (representing jid or jail name) keyword argument in kwargs
'''
service = salt.utils.which('service')
if not service:
raise CommandNotFoundError('\'service\' command not found')
if jail:
jexec = salt.utils.which('jexec')
if not jexec:
raise CommandNotFoundError('\'jexec\' command not found')
service = '{0} {1} {2}'.format(jexec, jail, service)
return service
def _get_jail_path(jail):
'''
.. versionadded:: 2016.3.4
Return the jail's root directory (path) as shown in jls
jail
The jid or jail name
'''
jls = salt.utils.which('jls')
if not jls:
raise CommandNotFoundError('\'jls\' command not found')
jails = __salt__['cmd.run_stdout']('{0} -n jid name path'.format(jls))
for j in jails.splitlines():
jid, jname, path = (x.split('=')[1].strip() for x in j.split())
if jid == jail or jname == jail:
return path.rstrip('/')
# XΧΧ, TODO, not sure how to handle nonexistent jail
return ''
def _get_rcscript(name, jail=None):
'''
Return full path to service rc script
.. versionchanged:: 2016.3.4
Support for jail (representing jid or jail name) keyword argument in kwargs
'''
cmd = '{0} -r'.format(_cmd(jail))
prf = _get_jail_path(jail) if jail else ''
for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines():
if line.endswith('{0}{1}'.format(os.path.sep, name)):
return os.path.join(prf, line.lstrip(os.path.sep))
return None
def _get_rcvar(name, jail=None):
'''
Return rcvar
.. versionchanged:: 2016.3.4
Support for jail (representing jid or jail name) keyword argument in kwargs
'''
if not available(name, jail):
log.error('Service {0} not found'.format(name))
return False
cmd = '{0} {1} rcvar'.format(_cmd(jail), name)
for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines():
if '_enable="' not in line:
continue
rcvar, _ = line.split('=', 1)
return rcvar
return None
def get_enabled(jail=None):
'''
Return what services are set to run on boot
.. versionchanged:: 2016.3.4
Support for jail (representing jid or jail name) keyword argument in kwargs
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
'''
ret = []
service = _cmd(jail)
prf = _get_jail_path(jail) if jail else ''
for svc in __salt__['cmd.run']('{0} -e'.format(service)).splitlines():
ret.append(os.path.basename(svc))
# This is workaround for bin/173454 bug
for svc in get_all(jail):
if svc in ret:
continue
if not os.path.exists('{0}/etc/rc.conf.d/{1}'.format(prf, svc)):
continue
if enabled(svc, jail=jail):
ret.append(svc)
return sorted(ret)
def get_disabled(jail=None):
'''
Return what services are available but not enabled to start at boot
.. versionchanged:: 2016.3.4
Support for jail (representing jid or jail name) keyword argument in kwargs
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
'''
en_ = get_enabled(jail)
all_ = get_all(jail)
return sorted(set(all_) - set(en_))
def _switch(name, # pylint: disable=C0103
on, # pylint: disable=C0103
**kwargs):
'''
Switch on/off service start at boot.
.. versionchanged:: 2016.3.4
Support for jail (representing jid or jail name) and chroot keyword argument
in kwargs. chroot should be used when jail's /etc is mounted read-only and
should point to a root directory where jail's /etc is mounted read-write.
'''
jail = kwargs.get('jail', '')
chroot = kwargs.get('chroot', '').rstrip('/')
if not available(name, jail):
return False
rcvar = _get_rcvar(name, jail)
if not rcvar:
log.error('rcvar for service {0} not found'.format(name))
return False
if jail and not chroot:
# prepend the jail's path in config paths when referring to a jail, when
# chroot is not provided. chroot should be provided when the jail's /etc
# is mounted read-only
chroot = _get_jail_path(jail)
config = kwargs.get('config',
__salt__['config.option']('service.config',
default='{0}/etc/rc.conf'.format(chroot)
)
)
if not config:
rcdir = '{0}/etc/rc.conf.d'.format(chroot)
if not os.path.exists(rcdir) or not os.path.isdir(rcdir):
log.error('{0} not exists'.format(rcdir))
return False
config = os.path.join(rcdir, rcvar.replace('_enable', ''))
nlines = []
edited = False
if on:
val = 'YES'
else:
val = 'NO'
if os.path.exists(config):
with salt.utils.fopen(config, 'r') as ifile:
for line in ifile:
if not line.startswith('{0}='.format(rcvar)):
nlines.append(line)
continue
rest = line[len(line.split()[0]):] # keep comments etc
nlines.append('{0}="{1}"{2}'.format(rcvar, val, rest))
edited = True
if not edited:
# Ensure that the file ends in a \n
if len(nlines) > 1 and nlines[-1][-1] != '\n':
nlines[-1] = '{0}\n'.format(nlines[-1])
nlines.append('{0}="{1}"\n'.format(rcvar, val))
with salt.utils.fopen(config, 'w') as ofile:
ofile.writelines(nlines)
return True
def enable(name, **kwargs):
'''
Enable the named service to start at boot
name
service name
config : /etc/rc.conf
Config file for managing service. If config value is
empty string, then /etc/rc.conf.d/<service> used.
See man rc.conf(5) for details.
Also service.config variable can be used to change default.
.. versionchanged:: 2016.3.4
jail (optional keyword argument)
the jail's id or name
chroot (optional keyword argument)
the jail's chroot, if the jail's /etc is not mounted read-write
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
'''
return _switch(name, True, **kwargs)
def disable(name, **kwargs):
'''
Disable the named service to start at boot
Arguments the same as for enable()
.. versionchanged:: 2016.3.4
jail (optional keyword argument)
the jail's id or name
chroot (optional keyword argument)
the jail's chroot, if the jail's /etc is not mounted read-write
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
'''
return _switch(name, False, **kwargs)
def enabled(name, **kwargs):
'''
Return True if the named service is enabled, false otherwise
name
Service name
.. versionchanged:: 2016.3.4
Support for jail (representing jid or jail name) keyword argument in kwargs
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
'''
jail = kwargs.get('jail', '')
if not available(name, jail):
log.error('Service {0} not found'.format(name))
return False
cmd = '{0} {1} rcvar'.format(_cmd(jail), name)
for line in __salt__['cmd.run_stdout'](cmd, python_shell=False).splitlines():
if '_enable="' not in line:
continue
_, state, _ = line.split('"', 2)
return state.lower() in ('yes', 'true', 'on', '1')
# probably will never reached
return False
def disabled(name, **kwargs):
'''
Return True if the named service is enabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
'''
return not enabled(name, **kwargs)
def available(name, jail=None):
'''
Check that the given service is available.
.. versionchanged:: 2016.3.4
jail: optional jid or jail name
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
'''
return name in get_all(jail)
def missing(name, jail=None):
'''
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
.. versionchanged:: 2016.3.4
jail: optional jid or jail name
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
'''
return name not in get_all(jail)
def get_all(jail=None):
'''
Return a list of all available services
.. versionchanged:: 2016.3.4
jail: optional jid or jail name
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
ret = []
service = _cmd(jail)
for srv in __salt__['cmd.run']('{0} -l'.format(service)).splitlines():
if not srv.isupper():
ret.append(srv)
return sorted(ret)
def start(name, jail=None):
'''
Start the specified service
.. versionchanged:: 2016.3.4
jail: optional jid or jail name
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '{0} {1} onestart'.format(_cmd(jail), name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def stop(name, jail=None):
'''
Stop the specified service
.. versionchanged:: 2016.3.4
jail: optional jid or jail name
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
'''
cmd = '{0} {1} onestop'.format(_cmd(jail), name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def restart(name, jail=None):
'''
Restart the named service
.. versionchanged:: 2016.3.4
jail: optional jid or jail name
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
'''
cmd = '{0} {1} onerestart'.format(_cmd(jail), name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def reload_(name, jail=None):
'''
Restart the named service
.. versionchanged:: 2016.3.4
jail: optional jid or jail name
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
'''
cmd = '{0} {1} onereload'.format(_cmd(jail), name)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def status(name, sig=None, jail=None):
'''
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2016.3.4
.. versionchanged:: Oxygen
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
'''
if sig:
return bool(__salt__['status.pid'](sig))
contains_globbing = bool(re.search(r'\*|\?|\[.+\]', name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
cmd = '{0} {1} onestatus'.format(_cmd(jail), service)
results[service] = not __salt__['cmd.retcode'](cmd,
python_shell=False,
ignore_retcode=True)
if contains_globbing:
return results
return results[name]
| 25.11583
| 110
| 0.600307
|
c9505a17e3fdf556e23aeea332ca6968d6640bdf
| 3,204
|
py
|
Python
|
quotes/apis.py
|
iJohnMaged/stock-market-CMS
|
fc40470c49bf9e3328538b90510923801b13a8f4
|
[
"MIT"
] | null | null | null |
quotes/apis.py
|
iJohnMaged/stock-market-CMS
|
fc40470c49bf9e3328538b90510923801b13a8f4
|
[
"MIT"
] | null | null | null |
quotes/apis.py
|
iJohnMaged/stock-market-CMS
|
fc40470c49bf9e3328538b90510923801b13a8f4
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
from datetime import date
import requests
@dataclass
class APIHandler:
original_url: str
post_data: dict = None
data: dict = None
updated: date = None
def __post_init__(self):
self.url = self.original_url
def get(self):
res = requests.get(self.url)
if res.status_code == 404:
raise HTTP404NotFound
self.data = res.json()
self.updated = date.today()
return self.data
def process_data(self, data=None):
raise NotImplementedError
class ExchangeRateAPI(APIHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.cad_rate = None
def process_data(self, data="CAD"):
if self.cad_rate is None or self.updated != date.today():
self.get()
self.cad_rate = self.data["rates"][data]
return self.cad_rate
class IEXApi(APIHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.convert_fields = [
"latestPrice",
"previousClose",
"marketCap",
"week52High",
"week52Low",
]
def ticker_available(self, ticker):
self.url = self.original_url.format(ticker)
try:
self.get()
except HTTP404NotFound:
return False
return True
def process_data(self, data=None):
self.url = self.original_url.format(data["ticker"])
self.get()
self.data["ytdChange"] *= 100
for field in self.convert_fields:
self.data[field] = round(self.data[field] * data["cad_rate"], 2)
if data.get("ticker_item", None) is not None:
self.data["shares_owned"] = data["ticker_item"].shares_owned
self.data["market_value"] = round(
float(data["ticker_item"].shares_owned) * self.data["latestPrice"], 2
)
return self.data
class CryptoAPI(APIHandler):
def ticker_available(self, ticker):
self.url = self.original_url.format(ticker)
try:
self.get()
except HTTP404NotFound:
return False
return True
def process_data(self, data):
self.url = self.original_url.format(data["ticker"])
self.get()
crypto_data = {}
crypto_data["name"] = self.data["name"]
crypto_data["symbol"] = self.data["symbol"]
crypto_data["market_cap"] = self.data["market_data"]["market_cap"]["cad"]
crypto_data["price"] = self.data["market_data"]["current_price"]["cad"]
crypto_data["volume"] = self.data["market_data"]["total_volume"]["cad"]
crypto_data["high_24"] = self.data["market_data"]["high_24h"]["cad"]
crypto_data["low_24"] = self.data["market_data"]["low_24h"]["cad"]
if data.get("ticker_item", None) is not None:
crypto_data["shares_owned"] = data["ticker_item"].shares_owned
crypto_data["market_value"] = round(
float(data["ticker_item"].shares_owned) * crypto_data["price"], 2
)
return crypto_data
class HTTP404NotFound(Exception):
pass
| 31.106796
| 85
| 0.594881
|
fa0c72f8fa69c3ae79c4b9f3e80c0e2bae2adea0
| 9,202
|
py
|
Python
|
convlab/spec/spec_util.py
|
seungjaeryanlee/ConvLab
|
a8c90f63a86b84d45384b507663c4c6fbe8a378a
|
[
"MIT"
] | 6
|
2021-09-07T14:30:22.000Z
|
2021-12-29T05:54:18.000Z
|
convlab/spec/spec_util.py
|
seungjaeryanlee/ConvLab
|
a8c90f63a86b84d45384b507663c4c6fbe8a378a
|
[
"MIT"
] | null | null | null |
convlab/spec/spec_util.py
|
seungjaeryanlee/ConvLab
|
a8c90f63a86b84d45384b507663c4c6fbe8a378a
|
[
"MIT"
] | 1
|
2021-09-02T15:12:18.000Z
|
2021-09-02T15:12:18.000Z
|
# The spec module
# Manages specification to run things in lab
import itertools
import json
import os
from string import Template
import pydash as ps
from convlab.lib import logger, util
SPEC_DIR = 'convlab/spec'
'''
All spec values are already param, inferred automatically.
To change from a value into param range, e.g.
- single: "explore_anneal_epi": 50
- continuous param: "explore_anneal_epi": {"min": 50, "max": 100, "dist": "uniform"}
- discrete range: "explore_anneal_epi": {"values": [50, 75, 100]}
'''
SPEC_FORMAT = {
"agent": [{
"name": str,
"algorithm": dict,
# "memory": dict,
# "net": dict,
}],
"env": [{
"name": str,
"max_t": (type(None), int, float),
# "max_frame": (int, float),
}],
# "body": {
# "product": ["outer", "inner", "custom"],
# "num": (int, list),
# },
"meta": {
"eval_frequency": (int, float),
"max_session": int,
"max_trial": (type(None), int),
},
"name": str,
}
logger = logger.get_logger(__name__)
def check_comp_spec(comp_spec, comp_spec_format):
'''Base method to check component spec'''
for spec_k, spec_format_v in comp_spec_format.items():
comp_spec_v = comp_spec[spec_k]
if ps.is_list(spec_format_v):
v_set = spec_format_v
assert comp_spec_v in v_set, f'Component spec value {ps.pick(comp_spec, spec_k)} needs to be one of {util.to_json(v_set)}'
else:
v_type = spec_format_v
assert isinstance(comp_spec_v, v_type), f'Component spec {ps.pick(comp_spec, spec_k)} needs to be of type: {v_type}'
if isinstance(v_type, tuple) and int in v_type and isinstance(comp_spec_v, float):
# cast if it can be int
comp_spec[spec_k] = int(comp_spec_v)
def check_body_spec(spec):
'''Base method to check body spec for multi-agent multi-env'''
ae_product = ps.get(spec, 'body.product')
body_num = ps.get(spec, 'body.num')
if ae_product == 'outer':
pass
elif ae_product == 'inner':
agent_num = len(spec['agent'])
env_num = len(spec['env'])
assert agent_num == env_num, 'Agent and Env spec length must be equal for body `inner` product. Given {agent_num}, {env_num}'
else: # custom
assert ps.is_list(body_num)
def check_compatibility(spec):
'''Check compatibility among spec setups'''
# TODO expand to be more comprehensive
if spec['meta'].get('distributed') == 'synced':
assert ps.get(spec, 'agent.0.net.gpu') == False, f'Distributed mode "synced" works with CPU only. Set gpu: false.'
def check(spec):
'''Check a single spec for validity'''
try:
spec_name = spec.get('name')
assert set(spec.keys()) >= set(SPEC_FORMAT.keys()), f'Spec needs to follow spec.SPEC_FORMAT. Given \n {spec_name}: {util.to_json(spec)}'
for agent_spec in spec['agent']:
check_comp_spec(agent_spec, SPEC_FORMAT['agent'][0])
for env_spec in spec['env']:
check_comp_spec(env_spec, SPEC_FORMAT['env'][0])
# check_comp_spec(spec['body'], SPEC_FORMAT['body'])
check_comp_spec(spec['meta'], SPEC_FORMAT['meta'])
# check_body_spec(spec)
check_compatibility(spec)
except Exception as e:
logger.exception(f'spec {spec_name} fails spec check')
raise e
return True
def check_all():
'''Check all spec files, all specs.'''
spec_files = ps.filter_(os.listdir(SPEC_DIR), lambda f: f.endswith('.json') and not f.startswith('_'))
for spec_file in spec_files:
spec_dict = util.read(f'{SPEC_DIR}/{spec_file}')
for spec_name, spec in spec_dict.items():
# fill-in info at runtime
spec['name'] = spec_name
spec = extend_meta_spec(spec)
try:
check(spec)
except Exception as e:
logger.exception(f'spec_file {spec_file} fails spec check')
raise e
logger.info(f'Checked all specs from: {ps.join(spec_files, ",")}')
return True
def extend_meta_spec(spec):
'''Extend meta spec with information for lab functions'''
extended_meta_spec = {
# reset lab indices to -1 so that they tick to 0
'experiment': -1,
'trial': -1,
'session': -1,
'cuda_offset': int(os.environ.get('CUDA_OFFSET', 0)),
'experiment_ts': util.get_ts(),
'prepath': None,
# ckpt extends prepath, e.g. ckpt_str = ckpt-epi10-totalt1000
'ckpt': None,
'git_sha': util.get_git_sha(),
'random_seed': None,
'eval_model_prepath': None,
}
spec['meta'].update(extended_meta_spec)
return spec
def get(spec_file, spec_name):
'''
Get an experiment spec from spec_file, spec_name.
Auto-check spec.
@example
spec = spec_util.get('base.json', 'base_case_openai')
'''
spec_file = spec_file.replace(SPEC_DIR, '') # cleanup
if 'data/' in spec_file:
assert spec_name in spec_file, 'spec_file in data/ must be lab-generated and contains spec_name'
spec = util.read(spec_file)
else:
spec_file = f'{SPEC_DIR}/{spec_file}' # allow direct filename
spec_dict = util.read(spec_file)
assert spec_name in spec_dict, f'spec_name {spec_name} is not in spec_file {spec_file}. Choose from:\n {ps.join(spec_dict.keys(), ",")}'
spec = spec_dict[spec_name]
# fill-in info at runtime
spec['name'] = spec_name
spec = extend_meta_spec(spec)
check(spec)
return spec
def get_eval_spec(spec_file, spec_name, prename=None):
'''Get spec for eval mode'''
spec = get(spec_file, spec_name)
spec['meta']['ckpt'] = 'eval'
spec['meta']['eval_model_prepath'] = prename
return spec
def get_param_specs(spec):
'''Return a list of specs with substituted spec_params'''
assert 'spec_params' in spec, 'Parametrized spec needs a spec_params key'
spec_params = spec.pop('spec_params')
spec_template = Template(json.dumps(spec))
keys = spec_params.keys()
specs = []
for idx, vals in enumerate(itertools.product(*spec_params.values())):
spec_str = spec_template.substitute(dict(zip(keys, vals)))
spec = json.loads(spec_str)
spec['name'] += f'_{"_".join(vals)}'
# offset to prevent parallel-run GPU competition, to mod in util.set_cuda_id
cuda_id_gap = int(spec['meta']['max_session'] / spec['meta']['param_spec_process'])
spec['meta']['cuda_offset'] += idx * cuda_id_gap
specs.append(spec)
return specs
def override_dev_spec(spec):
spec['meta']['max_session'] = 1
spec['meta']['max_trial'] = 2
return spec
#def override_enjoy_spec(spec):
# spec['meta']['max_session'] = 1
# return spec
def override_eval_spec(spec):
spec['meta']['max_session'] = 1
# evaluate by episode is set in env clock init in env/base.py
return spec
def override_test_spec(spec):
for agent_spec in spec['agent']:
# onpolicy freq is episodic
freq = 1 if agent_spec['memory']['name'] == 'OnPolicyReplay' else 8
agent_spec['algorithm']['training_frequency'] = freq
agent_spec['algorithm']['training_start_step'] = 1
agent_spec['algorithm']['training_iter'] = 1
agent_spec['algorithm']['training_batch_iter'] = 1
for env_spec in spec['env']:
env_spec['max_frame'] = 40
env_spec['max_t'] = 12
spec['meta']['log_frequency'] = 10
spec['meta']['eval_frequency'] = 10
spec['meta']['max_session'] = 1
spec['meta']['max_trial'] = 2
return spec
def save(spec, unit='experiment'):
'''Save spec to proper path. Called at Experiment or Trial init.'''
prepath = util.get_prepath(spec, unit)
util.write(spec, f'{prepath}_spec.json')
def tick(spec, unit):
'''
Method to tick lab unit (experiment, trial, session) in meta spec to advance their indices
Reset lower lab indices to -1 so that they tick to 0
spec_util.tick(spec, 'session')
session = Session(spec)
'''
meta_spec = spec['meta']
if unit == 'experiment':
meta_spec['experiment_ts'] = util.get_ts()
meta_spec['experiment'] += 1
meta_spec['trial'] = -1
meta_spec['session'] = -1
elif unit == 'trial':
if meta_spec['experiment'] == -1:
meta_spec['experiment'] += 1
meta_spec['trial'] += 1
meta_spec['session'] = -1
elif unit == 'session':
if meta_spec['experiment'] == -1:
meta_spec['experiment'] += 1
if meta_spec['trial'] == -1:
meta_spec['trial'] += 1
meta_spec['session'] += 1
else:
raise ValueError(f'Unrecognized lab unit to tick: {unit}')
# set prepath since it is determined at this point
meta_spec['prepath'] = prepath = util.get_prepath(spec, unit)
for folder in ('graph', 'info', 'log', 'model'):
folder_prepath = util.insert_folder(prepath, folder)
os.makedirs(os.path.dirname(util.smart_path(folder_prepath)), exist_ok=True)
meta_spec[f'{folder}_prepath'] = folder_prepath
return spec
| 34.988593
| 144
| 0.621169
|
1d26f4292c443334a2de5b10112eda5b20a117b1
| 1,142
|
py
|
Python
|
knapsack_problem/file_handling.py
|
netotz/optimization
|
e688904759892bb2e2048c1778434c411f29ddb4
|
[
"Unlicense"
] | 1
|
2020-02-07T04:44:26.000Z
|
2020-02-07T04:44:26.000Z
|
knapsack_problem/file_handling.py
|
netotz/optimization
|
e688904759892bb2e2048c1778434c411f29ddb4
|
[
"Unlicense"
] | 1
|
2021-08-31T15:19:53.000Z
|
2021-08-31T15:19:53.000Z
|
knapsack_problem/file_handling.py
|
netotz/optimization
|
e688904759892bb2e2048c1778434c411f29ddb4
|
[
"Unlicense"
] | null | null | null |
"""
Module for viewing subdirectories and constructing paths and file names.
"""
from os import listdir, makedirs
from os.path import dirname, join, isfile
FOLDER = 'instances'
def generateFileName(total_items, capacity, index = 0):
'''
Generates a name for the file which will store the n items and capacity of an instance.
The index parameter distinguishes instances of same size.
'''
return str(total_items) + '_' + str(capacity) + '_' + str(index) + '.dat'
def getFilePath(file_name):
'''
Returns the path of file_name.
'''
current_directory = dirname(__file__)
file_path = join(current_directory, FOLDER, file_name)
return file_path
def listFiles():
'''
Returns a list with the .dat files in the instances/ subdirectory.
'''
current_directory = dirname(__file__)
subdirectory = join(current_directory, FOLDER)
try:
files_list = listdir(subdirectory)
except FileNotFoundError:
makedirs(subdirectory)
return listFiles()
else:
return [file for file in files_list if isfile(join(subdirectory, file)) and file.endswith('.dat')]
| 29.282051
| 106
| 0.690893
|
c128eccd8bfccba16d7bebe299019485f27cb38e
| 336
|
py
|
Python
|
build-support/travis/generate_travis_yml_main.py
|
billybecker/pants
|
ee101f3e360b712aceb9dacf7723aaf9b5567f04
|
[
"Apache-2.0"
] | 94
|
2015-01-15T21:24:20.000Z
|
2022-02-16T16:55:43.000Z
|
build-support/travis/generate_travis_yml_main.py
|
billybecker/pants
|
ee101f3e360b712aceb9dacf7723aaf9b5567f04
|
[
"Apache-2.0"
] | 5
|
2020-07-18T01:04:43.000Z
|
2021-05-10T08:40:56.000Z
|
build-support/travis/generate_travis_yml_main.py
|
billybecker/pants
|
ee101f3e360b712aceb9dacf7723aaf9b5567f04
|
[
"Apache-2.0"
] | 47
|
2015-02-25T02:20:07.000Z
|
2022-03-21T00:59:16.000Z
|
# coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
from generate_travis_yml import generate_travis_yml
if __name__ == '__main__':
generate_travis_yml()
| 28
| 82
| 0.803571
|
87a080e15a30cf0a85da6b88b6cda82511adc662
| 9,765
|
py
|
Python
|
cowrie/output/donthackme.py
|
McNinja/McCowrie
|
41d1a8fdc1f0331414004f5e1139060317f5258e
|
[
"BSD-3-Clause"
] | null | null | null |
cowrie/output/donthackme.py
|
McNinja/McCowrie
|
41d1a8fdc1f0331414004f5e1139060317f5258e
|
[
"BSD-3-Clause"
] | null | null | null |
cowrie/output/donthackme.py
|
McNinja/McCowrie
|
41d1a8fdc1f0331414004f5e1139060317f5258e
|
[
"BSD-3-Clause"
] | null | null | null |
"""Cowrie API output plugin."""
# Copyright 2016 Russell Troxel
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import base64
from datetime import datetime, timedelta
from requests import Session, Request
import cowrie.core.output
# entry = {
# "entryid": "asdf",
# "session": "asdf",
# "timestamp": "isoformat",
# "src_ip": "1.2.3.4",
# "username": "asdf",
# "password": "asdf",
# "input": "cmd",
# "url": "asdf",
# "outfile": "asdf",
# "shasum": "asdf",
# "realm": "asdf",
# "version": "asdf",
# "width": 1,
# "height": 1,
# "ttylog": "asdf",
# "size": "asdf",
# "fingerprint": "asdf"
# }
def _send_request(*args, **kwargs):
req = Request(*args, **kwargs)
s = Session()
prepped = s.prepare_request(req)
resp = s.send(prepped)
if resp.status_code > 300:
raise HttpStatusCodeError(resp)
return resp.json()
class HttpStatusCodeError(Exception):
"""Error when status code was not expected."""
def __init__(self, response):
"""Initialize error from requests Response."""
msg = "Unexpected Response from Server: HTTP {0}, Body: {1}"
self.message = msg.format(response.status_code, response.text)
self.resp = response
def __str__(self):
"""Return String."""
return repr(self.message)
class Output(cowrie.core.output.Output):
"""Output over REST to the cowrie_api."""
def __init__(self, cfg):
"""Init."""
self.cfg = cfg
self.username = cfg.get('output_donthackme', "username")
self.api_key = cfg.get('output_donthackme', "api_key")
self.endpoint = cfg.get('output_donthackme', "endpoint")
self.expires = False
cowrie.core.output.Output.__init__(self, cfg)
def refresh_token(self):
"""If token is close to expiry, retrieve new."""
if self.expires and \
self.expires > datetime.utcnow() + timedelta(minutes=10):
return
agent_string = "Donthack.Me Cowrie Output Plugin v0.1, User: {0}"
headers = {
"Content-Type": "application/json",
"User-Agent": agent_string.format(self.username)
}
payload = {
"key_auth": {
"username": self.username,
"api_key": self.api_key
}
}
response = _send_request(
"GET",
"".join([self.endpoint, "/users/token"]),
data=json.dumps(payload),
headers=headers
)
expires = response["token"]["expires"]
self.token = response["token"]["id"]
self.expires = datetime.strptime(expires, "%Y-%m-%dT%H:%M:%S.%fZ")
def headers(self):
"""Prepare request headers."""
self.refresh_token()
agent_string = "Donthack.Me Cowrie Output Plugin v0.1, User: {0}"
headers = {
"Content-Type": "application/json",
"User-Agent": agent_string.format(self.username),
"X-JWT": self.token
}
return headers
def make_url(self, path):
"""Join base endpoint and path."""
return "".join((self.endpoint, "/events", path))
def prepare_entry(self, logentry, payload, path, method="PUT", popkeys=[]):
"""Prepare an entry based on base_data."""
data = {
"method": method,
"url": self.make_url(path),
"data": {
"session": logentry["session"],
"sensor_name": self.sensor,
"timestamp": logentry["timestamp"]
}
}
if "dst_ip" in logentry:
data["data"]["sensor_ip"] = logentry["dst_ip"]
data["data"].update(payload)
for key in popkeys:
data["data"].pop(key)
return data
def send_data(self, method, url, data):
"""Send data to endpoint."""
return _send_request(
method.upper(),
url,
data=json.dumps(data),
headers=self.headers()
)
def start(self):
"""Start."""
pass
def stop(self):
"""Stop."""
pass
def write(self, logentry):
"""Class write method."""
prepared_entries = []
if logentry["eventid"] == "cowrie.session.connect":
payload = {
"source_ip": logentry["src_ip"],
"start_time": logentry["timestamp"]
}
data = self.prepare_entry(
logentry,
payload,
"/session/connect",
method="POST",
popkeys=["timestamp"]
)
prepared_entries.append(data)
elif logentry["eventid"] in ["cowrie.login.success",
"cowrie.login.failed"]:
payload = {
"username": logentry["username"],
"password": logentry["password"],
"success": False
}
path = "/login/failed"
if logentry["eventid"] == "cowrie.login.success":
payload["success"] = True
path = "/login/success"
data = self.prepare_entry(
logentry,
payload,
path
)
prepared_entries.append(data)
elif logentry["eventid"] in ["cowrie.command.success",
"cowrie.command.failed"]:
payload = {
"command": logentry["input"],
"success": False
}
path = "/command/failed"
if logentry["eventid"] == "cowrie.command.success":
payload["success"] = True
path = "/command/success"
data = self.prepare_entry(
logentry,
payload,
path
)
prepared_entries.append(data)
elif logentry["eventid"] == "cowrie.session.file_download":
payload = {
"shasum": logentry["shasum"],
"url": logentry["url"],
"outfile": logentry["outfile"]
}
data = self.prepare_entry(
logentry,
payload,
"/session/file_download"
)
prepared_entries.append(data)
elif logentry["eventid"] == 'cowrie.client.version':
payload = {
"ssh_version": logentry["version"],
"ssh_kexAlgs": logentry["kexAlgs"],
"ssh_keyAlgs": logentry["keyAlgs"],
"ssh_macCS": logentry["macCS"]
}
data = self.prepare_entry(
logentry,
payload,
"/client/version",
popkeys=["timestamp"]
)
prepared_entries.append(data)
elif logentry["eventid"] == 'cowrie.client.size':
payload = {
"ttysize": {
"height": logentry["height"],
"width": logentry["width"],
}
}
data = self.prepare_entry(
logentry,
payload,
"/client/size",
popkeys=["timestamp"]
)
prepared_entries.append(data)
elif logentry["eventid"] == 'cowrie.session.closed':
data = self.prepare_entry(
logentry,
{"end_time": logentry["timestamp"]},
"/session/closed",
popkeys=["timestamp"]
)
prepared_entries.append(data)
elif logentry["eventid"] == 'cowrie.log.closed':
max_size = logentry["size"] + 1024
with open(logentry["ttylog"], "rb") as f:
ttylog = f.read(max_size)
payload = {
"ttylog": {
"size": logentry["size"],
"log_location": logentry["ttylog"],
"log_base64": base64.b64encode(ttylog)
}
}
data = self.prepare_entry(
logentry,
payload,
"/log/closed",
popkeys=["timestamp"]
)
prepared_entries.append(data)
elif logentry["eventid"] == 'cowrie.client.fingerprint':
payload = {
"username": logentry["username"],
"fingerprint": logentry["fingerprint"]
}
data = self.prepare_entry(
logentry,
payload,
"/client/fingerprint"
)
prepared_entries.append(data)
# elif logentry["eventid"] == "cowrie.direct-tcpip.request":
# payload = {
# "dest_port": logentry["dst_port"],
# "dest_ip": logentry["dst_ip"]
# }
# data = self.prepare_entry(
# logentry,
# payload,
# "/cdirect-tcpip/request"
# )
# prepared_entries.append(data)
for entry in prepared_entries:
self.send_data(**entry)
| 29.862385
| 79
| 0.498515
|
2572be34f9b825a21707b4d04f87cb18b1662ee6
| 5,794
|
py
|
Python
|
py_src/mlpiot/proto/action_execution_pb2.py
|
machine2learn/mlpiot.base
|
da0b77fccbb0e42d1ddbb6dbc490313433dc7575
|
[
"Apache-2.0"
] | 1
|
2021-03-30T20:49:54.000Z
|
2021-03-30T20:49:54.000Z
|
py_src/mlpiot/proto/action_execution_pb2.py
|
machine2learn/mlpiot.base
|
da0b77fccbb0e42d1ddbb6dbc490313433dc7575
|
[
"Apache-2.0"
] | null | null | null |
py_src/mlpiot/proto/action_execution_pb2.py
|
machine2learn/mlpiot.base
|
da0b77fccbb0e42d1ddbb6dbc490313433dc7575
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlpiot/proto/action_execution.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from mlpiot.proto import google_timestamp_pb2 as mlpiot_dot_proto_dot_google__timestamp__pb2
from mlpiot.proto import event_pb2 as mlpiot_dot_proto_dot_event__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlpiot/proto/action_execution.proto',
package='mlpiot.proto',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n#mlpiot/proto/action_execution.proto\x12\x0cmlpiot.proto\x1a#mlpiot/proto/google_timestamp.proto\x1a\x18mlpiot/proto/event.proto\"H\n\x16\x41\x63tionExecutorMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x05\x12\x0f\n\x07payload\x18\x03 \x01(\t\"\xab\x01\n\x0f\x41\x63tionExecution\x12*\n\ttimestamp\x18\x01 \x01(\x0b\x32\x17.mlpiot.proto.Timestamp\x12\x36\n\x08metadata\x18\x02 \x01(\x0b\x32$.mlpiot.proto.ActionExecutorMetadata\x12\x34\n\x17\x61\x63tion_execution_events\x18\x03 \x03(\x0b\x32\x13.mlpiot.proto.Eventb\x06proto3'
,
dependencies=[mlpiot_dot_proto_dot_google__timestamp__pb2.DESCRIPTOR,mlpiot_dot_proto_dot_event__pb2.DESCRIPTOR,])
_ACTIONEXECUTORMETADATA = _descriptor.Descriptor(
name='ActionExecutorMetadata',
full_name='mlpiot.proto.ActionExecutorMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='mlpiot.proto.ActionExecutorMetadata.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='version', full_name='mlpiot.proto.ActionExecutorMetadata.version', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='payload', full_name='mlpiot.proto.ActionExecutorMetadata.payload', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=188,
)
_ACTIONEXECUTION = _descriptor.Descriptor(
name='ActionExecution',
full_name='mlpiot.proto.ActionExecution',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp', full_name='mlpiot.proto.ActionExecution.timestamp', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='metadata', full_name='mlpiot.proto.ActionExecution.metadata', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='action_execution_events', full_name='mlpiot.proto.ActionExecution.action_execution_events', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=191,
serialized_end=362,
)
_ACTIONEXECUTION.fields_by_name['timestamp'].message_type = mlpiot_dot_proto_dot_google__timestamp__pb2._TIMESTAMP
_ACTIONEXECUTION.fields_by_name['metadata'].message_type = _ACTIONEXECUTORMETADATA
_ACTIONEXECUTION.fields_by_name['action_execution_events'].message_type = mlpiot_dot_proto_dot_event__pb2._EVENT
DESCRIPTOR.message_types_by_name['ActionExecutorMetadata'] = _ACTIONEXECUTORMETADATA
DESCRIPTOR.message_types_by_name['ActionExecution'] = _ACTIONEXECUTION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ActionExecutorMetadata = _reflection.GeneratedProtocolMessageType('ActionExecutorMetadata', (_message.Message,), {
'DESCRIPTOR' : _ACTIONEXECUTORMETADATA,
'__module__' : 'mlpiot.proto.action_execution_pb2'
# @@protoc_insertion_point(class_scope:mlpiot.proto.ActionExecutorMetadata)
})
_sym_db.RegisterMessage(ActionExecutorMetadata)
ActionExecution = _reflection.GeneratedProtocolMessageType('ActionExecution', (_message.Message,), {
'DESCRIPTOR' : _ACTIONEXECUTION,
'__module__' : 'mlpiot.proto.action_execution_pb2'
# @@protoc_insertion_point(class_scope:mlpiot.proto.ActionExecution)
})
_sym_db.RegisterMessage(ActionExecution)
# @@protoc_insertion_point(module_scope)
| 40.802817
| 580
| 0.774422
|
eb65a0bd69937e2cc6cd6c9a09e95f916e8e69de
| 26,145
|
py
|
Python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_api_version_set_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-03-24T06:26:11.000Z
|
2021-04-18T15:55:59.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_api_version_set_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 2
|
2021-11-03T06:10:36.000Z
|
2021-12-01T06:29:39.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/operations/_api_version_set_operations.py
|
mohamedshabanofficial/azure-sdk-for-python
|
81c585f310cd2ec23d2ad145173958914a075a58
|
[
"MIT"
] | 1
|
2021-05-19T02:55:10.000Z
|
2021-05-19T02:55:10.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ApiVersionSetOperations(object):
"""ApiVersionSetOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_service(
self,
resource_group_name, # type: str
service_name, # type: str
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ApiVersionSetCollection"]
"""Lists a collection of API Version Sets in the specified service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param filter: | Field | Usage | Supported operators | Supported
functions |</br>|-------------|-------------|-------------|-------------|</br>.
:type filter: str
:param top: Number of records to return.
:type top: int
:param skip: Number of records to skip.
:type skip: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ApiVersionSetCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.apimanagement.models.ApiVersionSetCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApiVersionSetCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_service.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int', minimum=1)
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int', minimum=0)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ApiVersionSetCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_service.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apiVersionSets'} # type: ignore
def get_entity_tag(
self,
resource_group_name, # type: str
service_name, # type: str
version_set_id, # type: str
**kwargs # type: Any
):
# type: (...) -> bool
"""Gets the entity state (Etag) version of the Api Version Set specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param version_set_id: Api Version Set identifier. Must be unique in the current API Management
service instance.
:type version_set_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool, or the result of cls(response)
:rtype: bool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get_entity_tag.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'versionSetId': self._serialize.url("version_set_id", version_set_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.head(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
if cls:
return cls(pipeline_response, None, response_headers)
return 200 <= response.status_code <= 299
get_entity_tag.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apiVersionSets/{versionSetId}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_name, # type: str
version_set_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ApiVersionSetContract"
"""Gets the details of the Api Version Set specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param version_set_id: Api Version Set identifier. Must be unique in the current API Management
service instance.
:type version_set_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApiVersionSetContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.ApiVersionSetContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApiVersionSetContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'versionSetId': self._serialize.url("version_set_id", version_set_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ApiVersionSetContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apiVersionSets/{versionSetId}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
service_name, # type: str
version_set_id, # type: str
parameters, # type: "_models.ApiVersionSetContract"
if_match=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.ApiVersionSetContract"
"""Creates or Updates a Api Version Set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param version_set_id: Api Version Set identifier. Must be unique in the current API Management
service instance.
:type version_set_id: str
:param parameters: Create or update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.ApiVersionSetContract
:param if_match: ETag of the Entity. Not required when creating an entity, but required when
updating an entity.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApiVersionSetContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.ApiVersionSetContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApiVersionSetContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'versionSetId': self._serialize.url("version_set_id", version_set_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApiVersionSetContract')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ApiVersionSetContract', pipeline_response)
if response.status_code == 201:
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ApiVersionSetContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apiVersionSets/{versionSetId}'} # type: ignore
def update(
self,
resource_group_name, # type: str
service_name, # type: str
version_set_id, # type: str
if_match, # type: str
parameters, # type: "_models.ApiVersionSetUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.ApiVersionSetContract"
"""Updates the details of the Api VersionSet specified by its identifier.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param version_set_id: Api Version Set identifier. Must be unique in the current API Management
service instance.
:type version_set_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:param parameters: Update parameters.
:type parameters: ~azure.mgmt.apimanagement.models.ApiVersionSetUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ApiVersionSetContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.ApiVersionSetContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ApiVersionSetContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'versionSetId': self._serialize.url("version_set_id", version_set_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ApiVersionSetUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
deserialized = self._deserialize('ApiVersionSetContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apiVersionSets/{versionSetId}'} # type: ignore
def delete(
self,
resource_group_name, # type: str
service_name, # type: str
version_set_id, # type: str
if_match, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes specific Api Version Set.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param version_set_id: Api Version Set identifier. Must be unique in the current API Management
service instance.
:type version_set_id: str
:param if_match: ETag of the Entity. ETag should match the current entity state from the header
response of the GET request or it should be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'versionSetId': self._serialize.url("version_set_id", version_set_id, 'str', max_length=80, min_length=1, pattern=r'^[^*#&+:<>?]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/apiVersionSets/{versionSetId}'} # type: ignore
| 51.772277
| 211
| 0.659361
|
2549cf4caf3f65971678ef730aa45f89c565d1f7
| 2,170
|
py
|
Python
|
delta/data/frontend/plp_test.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 1,442
|
2019-07-09T07:34:28.000Z
|
2020-11-15T09:52:09.000Z
|
delta/data/frontend/plp_test.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 93
|
2019-07-22T09:20:20.000Z
|
2020-11-13T01:59:30.000Z
|
delta/data/frontend/plp_test.py
|
didichuxing/delta
|
31dfebc8f20b7cb282b62f291ff25a87e403cc86
|
[
"Apache-2.0"
] | 296
|
2019-07-09T07:35:28.000Z
|
2020-11-16T02:27:51.000Z
|
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The model tests PLP FE."""
import delta.compat as tf
import os
from pathlib import Path
import numpy as np
from delta.data.frontend.read_wav import ReadWav
from delta.data.frontend.plp import Plp
from core.ops import PACKAGE_OPS_DIR
class PlpTest(tf.test.TestCase):
"""
Plp extraction test.
"""
def test_plp(self):
wav_path = str(Path(PACKAGE_OPS_DIR).joinpath('data/sm1_cln.wav'))
with self.cached_session(use_gpu=False, force_gpu=False):
read_wav = ReadWav.params().instantiate()
input_data, sample_rate = read_wav(wav_path)
input_data = input_data / 32768
plp = Plp.params({
'window_length': 0.025,
'frame_length': 0.010,
'plp_order': 12
}).instantiate()
plp_test = plp(input_data, sample_rate)
output_true = np.array(
[[-0.209490, -0.326126, 0.010536, -0.027167, -0.117118],
[-0.020293, -0.454695, -0.104243, 0.001560, -0.234854],
[-0.015118, -0.444044, -0.156695, -0.086221, -0.319310],
[-0.031856, -0.130708, 0.047435, -0.089916, -0.160247],
[0.052763, -0.271487, 0.011329, 0.025320, 0.012851]])
self.assertEqual(tf.rank(plp_test).eval(), 2)
# Because the povey window is used instead of the hamming window in spectrum.
self.assertAllClose(
plp_test.eval()[50:55, 5:10], output_true, rtol=1e-02, atol=1e-02)
if __name__ == '__main__':
tf.test.main()
| 35
| 83
| 0.652995
|
d1f2503da5f902a31e6622eab275488d9f499afd
| 12,543
|
py
|
Python
|
techminer/correlation_analysis.py
|
jdvelasq/techminer-new
|
a4b54c2783aee1a2d0d823ffd270131e42e5ff9a
|
[
"MIT"
] | 1
|
2020-04-29T20:35:13.000Z
|
2020-04-29T20:35:13.000Z
|
techminer/correlation_analysis.py
|
jdvelasq/techminer-new
|
a4b54c2783aee1a2d0d823ffd270131e42e5ff9a
|
[
"MIT"
] | null | null | null |
techminer/correlation_analysis.py
|
jdvelasq/techminer-new
|
a4b54c2783aee1a2d0d823ffd270131e42e5ff9a
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as pyplot
import numpy as np
import pandas as pd
import techminer.core.dashboard as dash
from techminer.core import (
DASH,
Network,
TF_matrix,
add_counters_to_axis,
corpus_filter,
limit_to_exclude,
sort_by_axis,
)
from techminer.core.dashboard import min_occurrence
from techminer.plots import (
ChordDiagram,
bubble_plot,
counters_to_node_colors,
counters_to_node_sizes,
heatmap,
)
###############################################################################
##
## MODEL
##
###############################################################################
class Model:
def __init__(
self,
data,
limit_to,
exclude,
years_range,
clusters=None,
cluster=None,
):
##
if years_range is not None:
initial_year, final_year = years_range
data = data[(data.Year >= initial_year) & (data.Year <= final_year)]
#
# Filter for cluster members
#
if clusters is not None and cluster is not None:
data = corpus_filter(data=data, clusters=clusters, cluster=cluster)
self.data = data
self.limit_to = limit_to
self.exclude = exclude
def apply(self):
x = self.data.copy()
if self.column == self.by:
##
## Drop NA from column
##
w = x[[self.column, "ID"]].dropna()
##
## Computes TF_matrix with occurrence >= min_occurrence
##
A = TF_matrix(
data=w,
column=self.column,
scheme=None,
min_occurrence=self.min_occurrence,
)
##
## Limit to/Exclude
##
A = limit_to_exclude(
data=A,
axis=1,
column=self.column,
limit_to=self.limit_to,
exclude=self.exclude,
)
##
## Select max_items
##
A = add_counters_to_axis(X=A, axis=1, data=self.data, column=self.column)
A = sort_by_axis(data=A, sort_by=self.top_by, ascending=False, axis=1)
A = A[A.columns[: self.max_items]]
if len(A.columns) > self.max_items:
top_items = A.sum(axis=0)
top_items = top_items.sort_values(ascending=False)
top_items = top_items.head(self.max_items)
A = A.loc[:, top_items.index]
rows = A.sum(axis=1)
rows = rows[rows > 0]
A = A.loc[rows.index, :]
##
## Computes correlation
##
matrix = A.corr(method=self.method)
else:
##
## Drop NA from column
##
w = x[[self.column, self.by, "ID"]].dropna()
##
## Computes TF_matrix with occurrence >= min_occurrence
##
A = TF_matrix(data=w, column=self.column, scheme=None)
##
## Limit to/Exclude
##
A = limit_to_exclude(
data=A,
axis=1,
column=self.column,
limit_to=self.limit_to,
exclude=self.exclude,
)
##
## Minimal occurrence
##
terms = A.sum(axis=0)
terms = terms.sort_values(ascending=False)
terms = terms[terms >= self.min_occurrence]
A = A.loc[:, terms.index]
##
## Select max_items
##
A = add_counters_to_axis(X=A, axis=1, data=self.data, column=self.column)
A = sort_by_axis(data=A, sort_by=self.top_by, ascending=False, axis=1)
if len(A.columns) > self.max_items:
A = A[A.columns[: self.max_items]]
##
## Computes correlation
##
B = TF_matrix(w, column=self.by, scheme=None)
matrix = np.matmul(B.transpose().values, A.values)
matrix = pd.DataFrame(matrix, columns=A.columns, index=B.columns)
matrix = matrix.corr(method=self.method)
matrix = sort_by_axis(
data=matrix,
sort_by=self.sort_r_axis_by,
ascending=self.r_axis_ascending,
axis=0,
)
matrix = sort_by_axis(
data=matrix,
sort_by=self.sort_c_axis_by,
ascending=self.c_axis_ascending,
axis=1,
)
self.X_ = matrix
def matrix(self):
self.apply()
return self.X_.style.format("{:+4.3f}").background_gradient(
cmap=self.cmap, axis=None
)
def heatmap(self):
self.apply()
return heatmap(self.X_, cmap=self.cmap, figsize=(self.width, self.height))
def bubble_plot(self):
self.apply()
return bubble_plot(
self.X_,
axis=0,
cmap=self.cmap,
figsize=(self.width, self.height),
)
def chord_diagram(self):
self.apply()
x = self.X_.copy()
terms = self.X_.columns.tolist()
node_sizes = counters_to_node_sizes(x=terms)
node_colors = counters_to_node_colors(x, cmap=pyplot.cm.get_cmap(self.cmap))
cd = ChordDiagram()
## add nodes
for idx, term in enumerate(x.columns):
cd.add_node(term, color=node_colors[idx], s=node_sizes[idx])
## add links
m = x.stack().to_frame().reset_index()
m = m[m.level_0 < m.level_1]
m.columns = ["from_", "to_", "link_"]
m = m.reset_index(drop=True)
d = {
0: {"linestyle": "-", "linewidth": 4, "color": "black"},
1: {"linestyle": "-", "linewidth": 2, "color": "black"},
2: {"linestyle": "--", "linewidth": 1, "color": "gray"},
3: {"linestyle": ":", "linewidth": 1, "color": "lightgray"},
}
for idx in range(len(m)):
key = (
0
if m.link_[idx] > 0.75
else (1 if m.link_[idx] > 0.50 else (2 if m.link_[idx] > 0.25 else 3))
)
cd.add_edge(m.from_[idx], m.to_[idx], **(d[key]))
return cd.plot(figsize=(self.width, self.height))
def correlation_map_nx(self):
self.apply()
return Network(
X=self.X_,
top_by=self.top_by,
n_labels=self.n_labels,
clustering=self.clustering,
).networkx_plot(
layout=self.layout,
iterations=self.nx_iterations,
figsize=(self.width, self.height),
)
def communities(self):
self.fit()
return Network(
X=self.X_,
top_by=self.top_by,
n_labels=self.n_labels,
clustering=self.clustering,
).cluster_members_
def correlation_map_interactive(self):
self.apply()
return Network(
X=self.X_,
top_by=self.top_by,
n_labels=self.n_labels,
clustering=self.clustering,
).pyvis_plot()
###############################################################################
##
## DASHBOARD
##
###############################################################################
class DASHapp(DASH, Model):
def __init__(
self,
data,
limit_to=None,
exclude=None,
years_range=None,
clusters=None,
cluster=None,
):
"""Dashboard app"""
Model.__init__(
self,
data=data,
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
clusters=clusters,
cluster=cluster,
)
DASH.__init__(self)
self.app_title = "Correlation Analysis"
self.menu_options = [
"Matrix",
"Heatmap",
"Bubble plot",
"Correlation map (nx)",
# "Correlation map (interactive)",
"Chord diagram",
]
COLUMNS = sorted(
[
column
for column in data.columns
if column
not in [
"Abb_Source_Title",
"Abstract",
"Affiliations",
"Authors_ID",
"Bradford_Law_Zone",
"Document_Type",
"Frac_Num_Documents",
"Global_Citations",
"Global_References",
"Historiograph_ID",
"ID",
"Local_Citations",
"Local_References",
"Num_Authors",
"Source_Title",
"Title",
"Year",
]
]
)
self.panel_widgets = [
dash.dropdown(
desc="Column:",
options=COLUMNS,
),
dash.dropdown(
desc="By:",
options=COLUMNS,
),
dash.dropdown(
desc="Method:",
options=["pearson", "kendall", "spearman"],
),
dash.min_occurrence(),
dash.max_items(),
dash.dropdown(
desc="Clustering:",
options=[
"Label propagation",
"Leiden",
"Louvain",
"Walktrap",
],
),
dash.separator(text="Visualization"),
dash.dropdown(
desc="Top by:",
options=[
"Num Documents",
"Global Citations",
],
),
dash.dropdown(
desc="Sort C-axis by:",
options=[
"Alphabetic",
"Num Documents",
"Global Citations",
],
),
dash.c_axis_ascending(),
dash.dropdown(
desc="Sort R-axis by:",
options=[
"Alphabetic",
"Num Documents",
"Global Citations",
],
),
dash.r_axis_ascending(),
dash.cmap(),
dash.nx_layout(),
dash.n_labels(),
dash.nx_iterations(),
dash.fig_width(),
dash.fig_height(),
]
super().create_grid()
def interactive_output(self, **kwargs):
DASH.interactive_output(self, **kwargs)
if self.menu in [
"Matrix",
"Heatmap",
"Bubble plot",
]:
self.set_enabled("Sort C-axis by:")
self.set_enabled("C-axis ascending:")
self.set_enabled("Sort R-axis by:")
self.set_enabled("R-axis ascending:")
else:
self.set_disabled("Sort C-axis by:")
self.set_disabled("C-axis ascending:")
self.set_disabled("Sort R-axis by:")
self.set_disabled("R-axis ascending:")
if self.menu == "Correlation map (nx)":
self.set_enabled("Layout:")
self.set_enabled("N labels:")
else:
self.set_disabled("Layout:")
self.set_disabled("N labels:")
if self.menu == "Correlation map" and self.layout == "Spring":
self.set_enabled("nx iterations:")
else:
self.set_disabled("nx iterations:")
if self.menu in ["Matrix", "Correlation map (interactive)"]:
self.set_disabled("Width:")
self.set_disabled("Height:")
else:
self.set_enabled("Width:")
self.set_enabled("Height:")
###############################################################################
##
## EXTERNAL INTERFACE
##
###############################################################################
def correlation_analysis(
input_file="techminer.csv",
limit_to=None,
exclude=None,
years_range=None,
clusters=None,
cluster=None,
):
return DASHapp(
data=pd.read_csv(input_file),
limit_to=limit_to,
exclude=exclude,
years_range=years_range,
clusters=clusters,
cluster=cluster,
).run()
| 27.997768
| 86
| 0.456111
|
823f29991129a83bb6f71f21c15154fc6481446f
| 225
|
py
|
Python
|
python/ccxtpro/test/exchange/test_ticker.py
|
kakacaca/ccxt
|
4fdb7d450fb9c9e480b6f0a3e3866b784cf24ebf
|
[
"Unlicense"
] | 1
|
2021-09-15T11:42:59.000Z
|
2021-09-15T11:42:59.000Z
|
python/ccxtpro/test/exchange/test_ticker.py
|
bikidenny/ccxt
|
4fdb7d450fb9c9e480b6f0a3e3866b784cf24ebf
|
[
"Unlicense"
] | null | null | null |
python/ccxtpro/test/exchange/test_ticker.py
|
bikidenny/ccxt
|
4fdb7d450fb9c9e480b6f0a3e3866b784cf24ebf
|
[
"Unlicense"
] | 1
|
2022-03-12T03:02:15.000Z
|
2022-03-12T03:02:15.000Z
|
# -*- coding: utf-8 -*-
__all__ = ['test_ticker']
def test_ticker(exchange, ticker, method, symbol):
print(
exchange.id,
symbol,
method,
ticker['datetime'],
ticker['last']
)
| 16.071429
| 50
| 0.528889
|
cc80cafacaa7562613b01094d3569e8dd8695c5e
| 843
|
py
|
Python
|
Project_PyCharm/maps/map3.py
|
DayongTong/ESE650LearningInRobotics
|
08d73fe237db7eee87f1e56f01b39baf11b6aa38
|
[
"MIT"
] | null | null | null |
Project_PyCharm/maps/map3.py
|
DayongTong/ESE650LearningInRobotics
|
08d73fe237db7eee87f1e56f01b39baf11b6aa38
|
[
"MIT"
] | null | null | null |
Project_PyCharm/maps/map3.py
|
DayongTong/ESE650LearningInRobotics
|
08d73fe237db7eee87f1e56f01b39baf11b6aa38
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
import matplotlib.pyplot as plt
###########################maze##################################
maze = np.full((30,40), 1)
random.seed(42)
numAgents = 20
agent_pos = []
posx = []
posy = []
for i in range(120):
while True:
row = random.randint(0,29)
col = random.randint(0,39)
if maze[row,col] == 1:
maze[row,col] = 0
break
for i in range(numAgents):
while True:
row = random.randint(0,29)
col = random.randint(0,39)
if maze[row,col] == 1 and ((row,col) not in agent_pos):
posx.append(row)
posy.append(col)
agent_pos.append((row,col))
break
# plt.imshow(maze, cmap='hot', interpolation='nearest');
# plt.savefig('map3.png')
# plt.show()
| 22.783784
| 66
| 0.500593
|
f6843a04c5fceaca943849d8669489f95b376e0e
| 5,775
|
py
|
Python
|
iogt/settings/base.py
|
sheralim012/iogt
|
72a43626636568b7b4603a193fdc045b581f5cd3
|
[
"BSD-2-Clause"
] | null | null | null |
iogt/settings/base.py
|
sheralim012/iogt
|
72a43626636568b7b4603a193fdc045b581f5cd3
|
[
"BSD-2-Clause"
] | 3
|
2021-07-26T07:55:17.000Z
|
2021-08-25T10:08:41.000Z
|
iogt/settings/base.py
|
sheralim012/iogt
|
72a43626636568b7b4603a193fdc045b581f5cd3
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Django settings for iogt project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# Application definition
INSTALLED_APPS = [
'home',
'search',
'iogt_users',
'wagtail.contrib.forms',
'wagtail.contrib.redirects',
'wagtail.embeds',
'wagtail.sites',
'wagtail.users',
'wagtail.snippets',
'wagtail.documents',
'wagtail.images',
'wagtail.search',
'wagtail.admin',
'wagtail.core',
'wagtail.contrib.modeladmin',
'wagtailmenus',
'modelcluster',
'taggit',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.contrib.redirects.middleware.RedirectMiddleware',
]
ROOT_URLCONF = 'iogt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'wagtailmenus.context_processors.wagtailmenus',
],
},
},
]
WSGI_APPLICATION = 'iogt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Authentication
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
]
AUTH_USER_MODEL = 'iogt_users.User'
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
'OPTIONS': {
'min_length': 4
}
}
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATICFILES_DIRS = [
os.path.join(PROJECT_DIR, 'static'),
]
# ManifestStaticFilesStorage is recommended in production, to prevent outdated
# JavaScript / CSS assets being served from cache (e.g. after a Wagtail upgrade).
# See https://docs.djangoproject.com/en/3.1/ref/contrib/staticfiles/#manifeststaticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Allauth settings (https://django-allauth.readthedocs.io/en/latest/configuration.html)
# ACCOUNT_SIGNUP_FORM_CLASS = 'iogt_users.forms.AccountSignUpAdditionalFieldsForm'
# Control the forms that django-allauth uses
ACCOUNT_FORMS = {
"login": "allauth.account.forms.LoginForm",
"add_email": "allauth.account.forms.AddEmailForm",
"change_password": "allauth.account.forms.ChangePasswordForm",
"set_password": "allauth.account.forms.SetPasswordForm",
"reset_password": "allauth.account.forms.ResetPasswordForm",
"reset_password_from_key": "allauth.account.forms.ResetPasswordKeyForm",
"disconnect": "allauth.socialaccount.forms.DisconnectForm",
# Use our custom signup form
"signup": "iogt_users.forms.AccountSignupForm",
}
# ACCOUNT_SIGNUP_FORM_CLASS = 'iogt_users.extra_forms.AccountSignUpAdditionalFieldsForm'
# Wagtail settings
WAGTAIL_SITE_NAME = "iogt"
ACCOUNT_ADAPTER = 'iogt_users.adapters.AccountAdapter'
WAGTAIL_USER_EDIT_FORM = 'iogt_users.forms.WagtailAdminUserEditForm'
WAGTAIL_USER_CREATION_FORM = 'iogt_users.forms.WagtailAdminUserCreateForm'
WAGTAIL_USER_CUSTOM_FIELDS = ['display_name', 'terms_accepted']
# Base URL to use when referring to full URLs within the Wagtail admin backend -
# e.g. in notification emails. Don't include '/admin' or a trailing slash
BASE_URL = 'http://example.com'
# SITE ID
SITE_ID = 1
# Miscellaneous
LOGIN_REDIRECT_URL = "/"
LOGIN_URL = 'account_login'
WAGTAIL_FRONTEND_LOGIN_URL = LOGIN_URL
| 28.875
| 95
| 0.719481
|
c94a07de7095e95c88d9f4695ca909cb6f258793
| 268
|
py
|
Python
|
backend/app/__init__.py
|
ecshreve/jeppy
|
d15be0e6457be7dfa30c7a91a7c1218baf58ef98
|
[
"MIT"
] | 1
|
2021-02-14T01:18:21.000Z
|
2021-02-14T01:18:21.000Z
|
backend/app/__init__.py
|
ecshreve/jeppy
|
d15be0e6457be7dfa30c7a91a7c1218baf58ef98
|
[
"MIT"
] | 3
|
2021-02-28T04:55:59.000Z
|
2021-02-28T08:51:18.000Z
|
backend/app/__init__.py
|
ecshreve/jeppy
|
d15be0e6457be7dfa30c7a91a7c1218baf58ef98
|
[
"MIT"
] | 1
|
2021-02-28T01:10:07.000Z
|
2021-02-28T01:10:07.000Z
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from config import Config
app = Flask(__name__)
app.config.from_object(Config)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import routes, models, helpers
| 20.615385
| 39
| 0.80597
|
936585c4d58a259d5935c0023bd65988b81b76a7
| 7,987
|
py
|
Python
|
es_distributed/dist.py
|
TrevorCMorton/deep-neuroevolution
|
d6e5c5b41202e9c2e50c986fc822809b507ede64
|
[
"MIT"
] | null | null | null |
es_distributed/dist.py
|
TrevorCMorton/deep-neuroevolution
|
d6e5c5b41202e9c2e50c986fc822809b507ede64
|
[
"MIT"
] | null | null | null |
es_distributed/dist.py
|
TrevorCMorton/deep-neuroevolution
|
d6e5c5b41202e9c2e50c986fc822809b507ede64
|
[
"MIT"
] | null | null | null |
import logging
import os
import pickle
import time
from collections import deque
from pprint import pformat
import redis
logger = logging.getLogger(__name__)
EXP_KEY = 'es:exp'
TASK_ID_KEY = 'es:task_id'
TASK_DATA_KEY = 'es:task_data'
TASK_CHANNEL = 'es:task_channel'
RESULTS_KEY = 'es:results'
ARCHIVE_KEY = 'es:archive'
def serialize(x):
return pickle.dumps(x, protocol=-1)
def deserialize(x):
return pickle.loads(x)
def retry_connect(redis_cfg, tries=300, base_delay=4.):
for i in range(tries):
try:
r = redis.StrictRedis(**redis_cfg)
r.ping()
return r
except redis.ConnectionError as e:
if i == tries - 1:
raise
else:
delay = base_delay * (1 + (os.getpid() % 10) / 9)
logger.warning('Could not connect to {}. Retrying after {:.2f} sec ({}/{}). Error: {}'.format(
redis_cfg, delay, i + 2, tries, e))
time.sleep(delay)
def retry_get(pipe, key, tries=300, base_delay=4.):
for i in range(tries):
# Try to (m)get
if isinstance(key, (list, tuple)):
vals = pipe.mget(key)
if all(v is not None for v in vals):
return vals
else:
val = pipe.get(key)
if val is not None:
return val
# Sleep and retry if any key wasn't available
if i != tries - 1:
delay = base_delay * (1 + (os.getpid() % 10) / 9)
logger.warning('{} not set. Retrying after {:.2f} sec ({}/{})'.format(key, delay, i + 2, tries))
time.sleep(delay)
raise RuntimeError('{} not set'.format(key))
class MasterClient:
def __init__(self, master_redis_cfg):
self.task_counter = 0
self.master_redis = retry_connect(master_redis_cfg)
self.max_size = None
logger.info('[master] Connected to Redis: {}'.format(self.master_redis))
def declare_experiment(self, exp):
self.master_redis.set(EXP_KEY, serialize(exp))
self.max_size = exp['config']['archive_size']
logger.info('[master] Declared experiment {}'.format(pformat(exp)))
def declare_task(self, task_data):
task_id = self.task_counter
self.task_counter += 1
serialized_task_data = serialize(task_data)
(self.master_redis.pipeline()
.mset({TASK_ID_KEY: task_id, TASK_DATA_KEY: serialized_task_data})
.publish(TASK_CHANNEL, serialize((task_id, serialized_task_data)))
.execute()) # TODO: can we avoid transferring task data twice and serializing so much?
logger.debug('[master] Declared task {}'.format(task_id))
return task_id
def pop_result(self):
task_id, result = deserialize(self.master_redis.blpop(RESULTS_KEY)[1])
logger.debug('[master] Popped a result for task {}'.format(task_id))
return task_id, result
def flush_results(self):
return max(self.master_redis.pipeline().llen(RESULTS_KEY).ltrim(RESULTS_KEY, -1, -1).execute()[0] -1, 0)
def add_to_novelty_archive(self, novelty_vector):
self.master_redis.rpush(ARCHIVE_KEY, serialize(novelty_vector))
logger.info('[master] Added novelty vector to archive')
length = self.master_redis.llen(ARCHIVE_KEY)
if self.max_size is not None and length > self.max_size:
self.master_redis.lpop(ARCHIVE_KEY)
def get_archive(self):
archive = self.master_redis.lrange(ARCHIVE_KEY, 0, -1)
return [deserialize(novelty_vector) for novelty_vector in archive]
class RelayClient:
"""
Receives and stores task broadcasts from the master
Batches and pushes results from workers to the master
"""
def __init__(self, master_redis_cfg, relay_redis_cfg):
self.master_redis = retry_connect(master_redis_cfg)
logger.info('[relay] Connected to master: {}'.format(self.master_redis))
self.local_redis = retry_connect(relay_redis_cfg)
logger.info('[relay] Connected to relay: {}'.format(self.local_redis))
self.results_published = 0
def run(self):
# Initialization: read exp and latest task from master
self.local_redis.set(EXP_KEY, retry_get(self.master_redis, EXP_KEY))
self._declare_task_local(*retry_get(self.master_redis, (TASK_ID_KEY, TASK_DATA_KEY)))
# Start subscribing to tasks
p = self.master_redis.pubsub(ignore_subscribe_messages=True)
p.subscribe(**{TASK_CHANNEL: lambda msg: self._declare_task_local(*deserialize(msg['data']))})
p.run_in_thread(sleep_time=0.001)
# Loop on RESULTS_KEY and push to master
batch_sizes, last_print_time = deque(maxlen=20), time.time() # for logging
while True:
results = []
start_time = curr_time = time.time()
while curr_time - start_time < 0.001:
results.append(self.local_redis.blpop(RESULTS_KEY)[1])
curr_time = time.time()
self.results_published += len(results)
self.master_redis.rpush(RESULTS_KEY, *results)
# Log
batch_sizes.append(len(results))
if curr_time - last_print_time > 5.0:
logger.info('[relay] Average batch size {:.3f} ({} total)'.format(sum(batch_sizes) / len(batch_sizes), self.results_published))
last_print_time = curr_time
def flush_results(self):
number_flushed = max(self.local_redis.pipeline().llen(RESULTS_KEY).ltrim(RESULTS_KEY, -1, -1).execute()[0] -1, 0)
number_flushed_master = max(self.master_redis.pipeline().llen(RESULTS_KEY).ltrim(RESULTS_KEY, -1, -1).execute()[0] -1, 0)
logger.warning('[relay] Flushed {} results from worker redis and {} from master'
.format(number_flushed, number_flushed_master))
def _declare_task_local(self, task_id, task_data):
logger.info('[relay] Received task {}'.format(task_id))
self.results_published = 0
self.local_redis.mset({TASK_ID_KEY: task_id, TASK_DATA_KEY: task_data})
self.flush_results()
class WorkerClient:
def __init__(self, relay_redis_cfg, master_redis_cfg):
self.local_redis = retry_connect(relay_redis_cfg)
logger.info('[worker] Connected to relay: {}'.format(self.local_redis))
self.master_redis = retry_connect(master_redis_cfg)
logger.warning('[worker] Connected to master: {}'.format(self.master_redis))
self.cached_task_id, self.cached_task_data = None, None
def get_experiment(self):
# Grab experiment info
exp = deserialize(retry_get(self.local_redis, EXP_KEY))
logger.info('[worker] Experiment: {}'.format(exp))
return exp
def get_archive(self):
archive = self.master_redis.lrange(ARCHIVE_KEY, 0, -1)
return [deserialize(novelty_vector) for novelty_vector in archive]
def get_current_task(self):
with self.local_redis.pipeline() as pipe:
while True:
try:
pipe.watch(TASK_ID_KEY)
task_id = int(retry_get(pipe, TASK_ID_KEY))
if task_id == self.cached_task_id:
logger.debug('[worker] Returning cached task {}'.format(task_id))
break
pipe.multi()
pipe.get(TASK_DATA_KEY)
logger.info('[worker] Getting new task {}. Cached task was {}'.format(task_id, self.cached_task_id))
self.cached_task_id, self.cached_task_data = task_id, deserialize(pipe.execute()[0])
break
except redis.WatchError:
continue
return self.cached_task_id, self.cached_task_data
def push_result(self, task_id, result):
self.local_redis.rpush(RESULTS_KEY, serialize((task_id, result)))
logger.debug('[worker] Pushed result for task {}'.format(task_id))
| 40.338384
| 143
| 0.632778
|
9f29b37ba554ae51911a54a45425d6ed00a0090b
| 7,064
|
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_gtm_datacenter.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1
|
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_gtm_datacenter.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/f5networks/f5_modules/tests/unit/modules/network/f5/test_bigip_gtm_datacenter.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_datacenter import (
ApiParameters, ModuleParameters, ModuleManager, ArgumentSpec
)
from ansible_collections.f5networks.f5_modules.tests.unit.compat import unittest
from ansible_collections.f5networks.f5_modules.tests.unit.compat.mock import Mock, patch
from ansible_collections.f5networks.f5_modules.tests.unit.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
state='present',
contact='foo',
description='bar',
location='baz',
name='datacenter'
)
p = ModuleParameters(params=args)
assert p.state == 'present'
def test_api_parameters(self):
args = load_fixture('load_gtm_datacenter_default.json')
p = ApiParameters(params=args)
assert p.name == 'asd'
def test_module_parameters_state_present(self):
args = dict(
state='present'
)
p = ModuleParameters(params=args)
assert p.state == 'present'
assert p.enabled is True
def test_module_parameters_state_absent(self):
args = dict(
state='absent'
)
p = ModuleParameters(params=args)
assert p.state == 'absent'
def test_module_parameters_state_enabled(self):
args = dict(
state='enabled'
)
p = ModuleParameters(params=args)
assert p.state == 'enabled'
assert p.enabled is True
assert p.disabled is None
def test_module_parameters_state_disabled(self):
args = dict(
state='disabled'
)
p = ModuleParameters(params=args)
assert p.state == 'disabled'
assert p.enabled is None
assert p.disabled is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.p1 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_datacenter.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
self.p2 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_datacenter.tmos_version')
self.p3 = patch('ansible_collections.f5networks.f5_modules.plugins.modules.bigip_gtm_datacenter.send_teem')
self.m2 = self.p2.start()
self.m2.return_value = '14.1.0'
self.m3 = self.p3.start()
self.m3.return_value = True
def tearDown(self):
self.p1.stop()
self.p2.stop()
self.p3.stop()
def test_create_datacenter(self, *args):
set_module_args(dict(
name='foo',
state='present',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['state'] == 'present'
def test_create_disabled_datacenter(self, *args):
set_module_args(dict(
name='foo',
state='disabled',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['enabled'] is False
assert results['disabled'] is True
def test_create_enabled_datacenter(self, *args):
set_module_args(dict(
name='foo',
state='enabled',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['enabled'] is True
assert results['disabled'] is False
def test_idempotent_disable_datacenter(self, *args):
set_module_args(dict(
name='foo',
state='disabled',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
current = ApiParameters(params=load_fixture('load_gtm_datacenter_disabled.json'))
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=True)
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.module_provisioned = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is False
| 30.713043
| 124
| 0.625425
|
0b898e852e920e2852835108928af595b10e7928
| 14,854
|
py
|
Python
|
qcfractal/server.py
|
ChayaSt/QCFractal
|
2d3c737b0e755d6e5bac743a0beb0714b5a92d0b
|
[
"BSD-3-Clause"
] | null | null | null |
qcfractal/server.py
|
ChayaSt/QCFractal
|
2d3c737b0e755d6e5bac743a0beb0714b5a92d0b
|
[
"BSD-3-Clause"
] | null | null | null |
qcfractal/server.py
|
ChayaSt/QCFractal
|
2d3c737b0e755d6e5bac743a0beb0714b5a92d0b
|
[
"BSD-3-Clause"
] | null | null | null |
"""
The FractalServer class
"""
import asyncio
import logging
import ssl
import threading
import traceback
import tornado.ioloop
import tornado.log
import tornado.options
import tornado.web
from . import interface
from . import queue
from . import services
from . import storage_sockets
from . import web_handlers
myFormatter = logging.Formatter('[%(asctime)s] %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
def _build_ssl():
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
import sys
import socket
import datetime
import ipaddress
import random
hostname = socket.gethostname()
public_ip = ipaddress.ip_address(socket.gethostbyname(hostname))
key = rsa.generate_private_key(public_exponent=65537, key_size=1024, backend=default_backend())
alt_name_list = [x509.DNSName(hostname), x509.IPAddress(ipaddress.ip_address(public_ip))]
alt_names = x509.SubjectAlternativeName(alt_name_list)
# Basic data
name = x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, hostname)])
basic_contraints = x509.BasicConstraints(ca=True, path_length=0)
now = datetime.datetime.utcnow()
# Build cert
cert = (x509.CertificateBuilder()
.subject_name(name)
.issuer_name(name)
.public_key(key.public_key())
.serial_number(int(random.random() * sys.maxsize))
.not_valid_before(now)
.not_valid_after(now + datetime.timedelta(days=10*365))
.add_extension(basic_contraints, False)
.add_extension(alt_names, False)
.sign(key, hashes.SHA256(), default_backend())) # yapf: disable
# Build and return keys
cert_pem = cert.public_bytes(encoding=serialization.Encoding.PEM)
key_pem = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption(),
) # yapf: disable
return cert_pem, key_pem
class FractalServer:
def __init__(
self,
# Server info options
port=8888,
loop=None,
security=None,
ssl_options=None,
# Database options
storage_uri="mongodb://localhost",
storage_project_name="molssistorage",
# Queue options
queue_socket=None,
# Log options
logfile_prefix=None,
# Queue options
max_active_services=10):
# Save local options
self.port = port
if ssl_options is False:
self._address = "http://localhost:" + str(self.port) + "/"
else:
self._address = "https://localhost:" + str(self.port) + "/"
self.max_active_services = max_active_services
# Setup logging.
if logfile_prefix is not None:
tornado.options.options['log_file_prefix'] = logfile_prefix
tornado.log.enable_pretty_logging()
self.logger = logging.getLogger("tornado.application")
# Build security layers
if security is None:
storage_bypass_security = True
elif security == "local":
storage_bypass_security = False
else:
raise KeyError("Security option '{}' not recognized.".format(security))
# Handle SSL
ssl_ctx = None
self.client_verify = True
if ssl_options is None:
self.logger.warning("No SSL files passed in, generating self-signed SSL certificate.")
self.logger.warning("Clients must use `verify=False` when connecting.\n")
cert, key = _build_ssl()
# Add quick names
cert_name = storage_project_name + "_ssl.crt"
key_name = storage_project_name + "_ssl.key"
ssl_options = {"crt": cert_name, "key": key_name}
with open(cert_name, "wb") as handle:
handle.write(cert)
with open(key_name, "wb") as handle:
handle.write(key)
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(ssl_options["crt"], ssl_options["key"])
# Destroy keyfiles upon close
import atexit
import os
atexit.register(os.remove, cert_name)
atexit.register(os.remove, key_name)
self.client_verify = False
elif ssl_options is False:
ssl_ctx = None
elif isinstance(ssl_options, dict):
if ("crt" not in ssl_options) or ("key" not in ssl_options):
raise KeyError("'crt' (SSL Certificate) and 'key' (SSL Key) fields are required for `ssl_options`.")
ssl_ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_ctx.load_cert_chain(ssl_options["crt"], ssl_options["key"])
else:
raise KeyError("ssl_options not understood")
# Setup the database connection
self.storage = storage_sockets.storage_socket_factory(
storage_uri, project_name=storage_project_name, bypass_security=storage_bypass_security)
self.logger.info("Connected to '{}'' with database name '{}'\n.".format(storage_uri, storage_project_name))
# Pull the current loop if we need it
self.loop = loop or tornado.ioloop.IOLoop.current()
# Build up the application
self.objects = {
"storage_socket": self.storage,
"logger": self.logger,
}
endpoints = [
# Generic web handlers
(r"/molecule", web_handlers.MoleculeHandler, self.objects),
(r"/option", web_handlers.OptionHandler, self.objects),
(r"/collection", web_handlers.CollectionHandler, self.objects),
(r"/result", web_handlers.ResultHandler, self.objects),
(r"/procedure", web_handlers.ProcedureHandler, self.objects),
# Queue Schedulers
(r"/task_queue", queue.TaskQueueHandler, self.objects),
(r"/service_queue", queue.ServiceQueueHandler, self.objects),
(r"/queue_manager", queue.QueueManagerHandler, self.objects),
]
# Queue manager if direct build
if queue_socket is not None:
if security == "local":
raise ValueError("Cannot yet use local security with a internal QueueManager")
# Add the socket to passed args
client = interface.FractalClient(self._address, verify=self.client_verify)
self.objects["queue_manager"] = queue.QueueManager(
client, queue_socket, loop=loop, logger=self.logger, cluster="FractalServer")
# Build the app
app_settings = {
"compress_response": True,
"serve_traceback": True,
# "debug": True,
}
self.app = tornado.web.Application(endpoints, **app_settings)
self.endpoints = set([v[0].replace("/", "", 1) for v in endpoints])
self.http_server = tornado.httpserver.HTTPServer(self.app, ssl_options=ssl_ctx)
self.http_server.listen(self.port)
# Add periodic callback holders
self.periodic = {}
# Exit callbacks
self.exit_callbacks = []
self.logger.info("FractalServer successfully initialized at {}".format(self._address))
self.loop_active = False
def start(self):
"""
Starts up all IOLoops and processes
"""
self.logger.info("FractalServer successfully started. Starting IOLoop.\n")
# If we have a queue socket start up the nanny
if "queue_manager" in self.objects:
# Add canonical queue callback
manager = tornado.ioloop.PeriodicCallback(self.update_tasks, 2000)
manager.start()
self.periodic["queue_manager_update"] = manager
# Add services callback
nanny_services = tornado.ioloop.PeriodicCallback(self.update_services, 2000)
nanny_services.start()
self.periodic["update_services"] = nanny_services
# Soft quit with a keyboard interrupt
try:
self.loop_active = True
if not asyncio.get_event_loop().is_running(): # Only works on Py3
self.loop.start()
except KeyboardInterrupt:
self.stop()
def stop(self):
"""
Shuts down all IOLoops and periodic updates
"""
# Shut down queue manager
if "queue_manager" in self.objects:
if self.loop_active:
# Drop this in a thread so that we are not blocking eachother
thread = threading.Thread(target=self.objects["queue_manager"].shutdown, name="QueueManager Shutdown")
thread.daemon = True
thread.start()
self.loop.call_later(5, thread.join)
else:
self.objects["queue_manager"].shutdown()
# Close down periodics
for cb in self.periodic.values():
cb.stop()
# Call exit callbacks
for func, args, kwargs in self.exit_callbacks:
func(*args, **kwargs)
# Shutdown IOLoop if needed
if asyncio.get_event_loop().is_running():
self.loop.stop()
self.loop_active = False
# Final shutdown
self.loop.close(all_fds=True)
self.logger.info("FractalServer stopping gracefully. Stopped IOLoop.\n")
def add_exit_callback(self, callback, *args, **kwargs):
"""Adds additional callbacks to perform when closing down the server
Parameters
----------
callback : callable
The function to call at exit
*args
Arguments to call with the function.
**kwargs
Kwargs to call with the function.
"""
self.exit_callbacks.append((callback, args, kwargs))
def get_address(self, endpoint=None):
"""Obtains the full URI for a given function on the FractalServer
Parameters
----------
endpoint : str, optional
Specifies a endpoint to provide the URI to
"""
if endpoint and (endpoint not in self.endpoints):
raise AttributeError("Endpoint '{}' not found.".format(endpoint))
if endpoint:
return self._address + endpoint
else:
return self._address
def update_services(self):
"""Runs through all active services and examines their current status.
"""
# Grab current services
current_services = self.storage.get_services({"status": "RUNNING"})["data"]
# Grab new services if we have open slots
open_slots = max(0, self.max_active_services - len(current_services))
if open_slots > 0:
new_services = self.storage.get_services({"status": "READY"}, limit=open_slots)["data"]
current_services.extend(new_services)
# Loop over the services and iterate
running_services = 0
new_procedures = []
complete_ids = []
for data in current_services:
# Attempt to iteration and get message
try:
obj = services.build(data["service"], self.storage, data)
finished = obj.iterate()
data = obj.get_json()
except Exception as e:
print(traceback.format_exc())
data["status"] = "ERROR"
data["error_message"] = "FractalServer Service Build and Iterate Error:\n" + traceback.format_exc()
finished = False
self.storage.update_services([(data["id"], data)])
if finished is not False:
# Add results to procedures, remove complete_ids
new_procedures.append(finished)
complete_ids.append(data["id"])
else:
running_services += 1
# Add new procedures and services
self.storage.add_procedures(new_procedures)
self.storage.del_services(complete_ids)
return running_services
### Functions only available if using a local queue_adapter
def _check_manager(self, func_name):
if "queue_manager" not in self.objects:
raise AttributeError(
"{} is only available if the server was initialized with a queue manager.".format(func_name))
def update_tasks(self):
"""Pulls tasks from the queue_adapter, inserts them into the database,
and fills the queue_adapter with new tasks.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("update_tasks")
if self.loop_active:
# Drop this in a thread so that we are not blocking each other
thread = threading.Thread(target=self.objects["queue_manager"].update, name="QueueManager Update")
thread.daemon = True
thread.start()
self.loop.call_later(5, thread.join)
else:
self.objects["queue_manager"].update()
return True
def await_results(self):
"""A synchronous method for testing or small launches
that awaits task completion before adding all queued results
to the database and returning.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("await_results")
self.logger.info("Updating tasks")
return self.objects["queue_manager"].await_results()
def await_services(self, max_iter=10):
"""A synchronous method that awaits the completion of all services
before returning.
Returns
-------
bool
Return True if the operation completed successfully
"""
self._check_manager("await_services")
self.await_results()
for x in range(1, max_iter + 1):
self.logger.info("\nAwait services: Iteration {}\n".format(x))
running_services = self.update_services()
self.await_results()
if running_services == 0:
break
return True
def list_current_tasks(self):
"""Provides a list of tasks currently in the queue along
with the associated keys
Returns
-------
ret : list of tuples
All tasks currently still in the database
"""
self._check_manager("list_current_tasks")
return self.objects["queue_manager"].list_current_tasks()
| 33.68254
| 118
| 0.61613
|
587ce641463e5b80404c88308ac402e3e7791462
| 3,603
|
py
|
Python
|
dev/website_docs/parse_tutorials.py
|
blazejdolicki/vissl
|
9c10748a19fb1c637f32687142c8cd685f2410ff
|
[
"MIT"
] | 2,512
|
2021-01-27T18:44:44.000Z
|
2022-03-31T19:33:49.000Z
|
dev/website_docs/parse_tutorials.py
|
blazejdolicki/vissl
|
9c10748a19fb1c637f32687142c8cd685f2410ff
|
[
"MIT"
] | 361
|
2021-01-27T20:12:09.000Z
|
2022-03-31T12:39:34.000Z
|
dev/website_docs/parse_tutorials.py
|
blazejdolicki/vissl
|
9c10748a19fb1c637f32687142c8cd685f2410ff
|
[
"MIT"
] | 277
|
2021-01-29T08:09:02.000Z
|
2022-03-31T07:57:35.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import nbformat
from bs4 import BeautifulSoup
from nbconvert import HTMLExporter, ScriptExporter
TEMPLATE = """const CWD = process.cwd();
const React = require('react');
const Tutorial = require(`${{CWD}}/core/Tutorial.js`);
class TutorialPage extends React.Component {{
render() {{
const {{config: siteConfig}} = this.props;
const {{baseUrl}} = siteConfig;
return <Tutorial baseUrl={{baseUrl}} tutorialID="{}"/>;
}}
}}
module.exports = TutorialPage;
"""
JS_SCRIPTS = """
<script
src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.1.10/require.min.js">
</script>
<script
src="https://cdnjs.cloudflare.com/ajax/libs/jquery/2.0.3/jquery.min.js">
</script>
""" # noqa: E501
def gen_tutorials(repo_dir: str) -> None:
"""Generate HTML tutorials for VISSL Docusaurus site from Jupyter notebooks.
Also create ipynb and py versions of tutorial in Docusaurus site for
download.
"""
with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile:
tutorial_config = json.loads(infile.read())
tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v}
for tid in tutorial_ids:
print("Generating {} tutorial".format(tid))
# convert notebook to HTML
ipynb_in_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid))
with open(ipynb_in_path, "r") as infile:
nb_str = infile.read()
nb = nbformat.reads(nb_str, nbformat.NO_CONVERT)
# displayname is absent from notebook metadata
nb["metadata"]["kernelspec"]["display_name"] = "python3"
exporter = HTMLExporter()
html, meta = exporter.from_notebook_node(nb)
# pull out html div for notebook
soup = BeautifulSoup(html, "html.parser")
nb_meat = soup.find("div", {"id": "notebook-container"})
del nb_meat.attrs["id"]
nb_meat.attrs["class"] = ["notebook"]
html_out = JS_SCRIPTS + str(nb_meat)
# generate html file
html_out_path = os.path.join(
repo_dir, "website", "_tutorials", "{}.html".format(tid)
)
with open(html_out_path, "w") as html_outfile:
html_outfile.write(html_out)
# generate JS file
script = TEMPLATE.format(tid)
js_out_path = os.path.join(
repo_dir, "website", "pages", "tutorials", "{}.js".format(tid)
)
with open(js_out_path, "w") as js_outfile:
js_outfile.write(script)
# output tutorial in both ipynb & py form
ipynb_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.ipynb".format(tid)
)
with open(ipynb_out_path, "w") as ipynb_outfile:
ipynb_outfile.write(nb_str)
exporter = ScriptExporter()
script, meta = exporter.from_notebook_node(nb)
py_out_path = os.path.join(
repo_dir, "website", "static", "files", "{}.py".format(tid)
)
with open(py_out_path, "w") as py_outfile:
py_outfile.write(script)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate JS, HTML, ipynb, and py files for tutorials."
)
parser.add_argument(
"--repo_dir", metavar="path", required=True, help="VISSL repo directory."
)
args = parser.parse_args()
gen_tutorials(args.repo_dir)
| 32.754545
| 83
| 0.634749
|
7f10e1c0cb847ca10cbdef318a6bb410df71386c
| 19,517
|
py
|
Python
|
reg/tests/test_dispatch_method.py
|
sgaist/reg
|
c6d721da1d71a299063695966a8f73c4180de1a5
|
[
"BSD-3-Clause"
] | null | null | null |
reg/tests/test_dispatch_method.py
|
sgaist/reg
|
c6d721da1d71a299063695966a8f73c4180de1a5
|
[
"BSD-3-Clause"
] | null | null | null |
reg/tests/test_dispatch_method.py
|
sgaist/reg
|
c6d721da1d71a299063695966a8f73c4180de1a5
|
[
"BSD-3-Clause"
] | null | null | null |
from types import FunctionType
import pytest
from ..context import (
dispatch,
dispatch_method,
methodify,
clean_dispatch_methods,
)
from ..predicate import match_instance
from ..error import RegistrationError
def test_dispatch_method_explicit_fallback():
def obj_fallback(self, obj):
return "Obj fallback"
class Foo(object):
@dispatch_method(match_instance("obj", fallback=obj_fallback))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "Obj fallback"
Foo.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta", obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "Obj fallback"
def test_dispatch_method_without_fallback():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "default"
Foo.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta", obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
def test_dispatch_method_string_predicates():
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "default"
Foo.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta", obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
def test_dispatch_method_add_predicates():
class Foo(object):
@dispatch_method()
def bar(self, obj):
return "default"
Foo.bar.add_predicates([match_instance("obj")])
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "default"
Foo.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta", obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
def test_dispatch_method_register_function():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "default"
Foo.bar.register(methodify(lambda obj: "Alpha"), obj=Alpha)
Foo.bar.register(methodify(lambda obj: "Beta"), obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
def test_dispatch_method_register_function_wrong_signature_too_long():
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
return "default"
class Alpha(object):
pass
with pytest.raises(RegistrationError):
Foo.bar.register(methodify(lambda obj, extra: "Alpha"), obj=Alpha)
def test_dispatch_method_register_function_wrong_signature_too_short():
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
return "default"
class Alpha(object):
pass
with pytest.raises(RegistrationError):
Foo.bar.register(methodify(lambda: "Alpha"), obj=Alpha)
def test_dispatch_method_register_non_callable():
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
return "default"
class Alpha(object):
pass
with pytest.raises(RegistrationError):
Foo.bar.register("cannot call this", obj=Alpha)
def test_dispatch_method_methodify_non_callable():
with pytest.raises(TypeError):
methodify("cannot call this")
def test_dispatch_method_register_auto():
class Foo(object):
x = "X"
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "default"
Foo.bar.register(methodify(lambda obj: "Alpha", "app"), obj=Alpha)
Foo.bar.register(
methodify(lambda app, obj: "Beta %s" % app.x, "app"), obj=Beta
)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta X"
assert foo.bar(None) == "default"
def test_dispatch_method_class_method_accessed_first():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
Foo.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta", obj=Beta)
foo = Foo()
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
def test_dispatch_method_accesses_instance():
class Foo(object):
def __init__(self, x):
self.x = x
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default %s" % self.x
class Alpha(object):
pass
class Beta(object):
pass
Foo.bar.register(lambda self, obj: "Alpha %s" % self.x, obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta %s" % self.x, obj=Beta)
foo = Foo("hello")
assert foo.bar(Alpha()) == "Alpha hello"
assert foo.bar(Beta()) == "Beta hello"
assert foo.bar(None) == "default hello"
def test_dispatch_method_inheritance_register_on_subclass():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Sub(Foo):
pass
class Alpha(object):
pass
class Beta(object):
pass
sub = Sub()
assert sub.bar(Alpha()) == "default"
Sub.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Sub.bar.register(lambda self, obj: "Beta", obj=Beta)
assert sub.bar(Alpha()) == "Alpha"
assert sub.bar(Beta()) == "Beta"
assert sub.bar(None) == "default"
def test_dispatch_method_inheritance_separation():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Sub(Foo):
pass
class Alpha(object):
pass
class Beta(object):
pass
# programmatic style:
Foo.bar.register(lambda self, obj: "Foo Alpha", obj=Alpha)
# decorator style:
Foo.bar.register(obj=Beta)(lambda self, obj: "Foo Beta")
# programmatic style:
Sub.bar.register(lambda self, obj: "Sub Alpha", obj=Alpha)
# decorator style:
Sub.bar.register(obj=Beta)(lambda self, obj: "Sub Beta")
foo = Foo()
sub = Sub()
assert foo.bar(Alpha()) == "Foo Alpha"
assert foo.bar(Beta()) == "Foo Beta"
assert foo.bar(None) == "default"
assert sub.bar(Alpha()) == "Sub Alpha"
assert sub.bar(Beta()) == "Sub Beta"
assert sub.bar(None) == "default"
def test_dispatch_method_inheritance_separation_multiple():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "bar default"
@dispatch_method(match_instance("obj"))
def qux(self, obj):
return "qux default"
class Sub(Foo):
pass
class Alpha(object):
pass
class Beta(object):
pass
Foo.bar.register(lambda self, obj: "Bar Foo Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Bar Foo Beta", obj=Beta)
Sub.bar.register(lambda self, obj: "Bar Sub Alpha", obj=Alpha)
Sub.bar.register(lambda self, obj: "Bar Sub Beta", obj=Beta)
Foo.qux.register(lambda self, obj: "Qux Foo Alpha", obj=Alpha)
Foo.qux.register(lambda self, obj: "Qux Foo Beta", obj=Beta)
Sub.qux.register(lambda self, obj: "Qux Sub Alpha", obj=Alpha)
Sub.qux.register(lambda self, obj: "Qux Sub Beta", obj=Beta)
foo = Foo()
sub = Sub()
assert foo.bar(Alpha()) == "Bar Foo Alpha"
assert foo.bar(Beta()) == "Bar Foo Beta"
assert foo.bar(None) == "bar default"
assert sub.bar(Alpha()) == "Bar Sub Alpha"
assert sub.bar(Beta()) == "Bar Sub Beta"
assert sub.bar(None) == "bar default"
assert foo.qux(Alpha()) == "Qux Foo Alpha"
assert foo.qux(Beta()) == "Qux Foo Beta"
assert foo.qux(None) == "qux default"
assert sub.qux(Alpha()) == "Qux Sub Alpha"
assert sub.qux(Beta()) == "Qux Sub Beta"
assert sub.qux(None) == "qux default"
def test_dispatch_method_api_available():
def obj_fallback(self, obj):
return "Obj fallback"
class Foo(object):
@dispatch_method(match_instance("obj", fallback=obj_fallback))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
def alpha_func(self, obj):
return "Alpha"
def beta_func(self, obj):
return "Beta"
Foo.bar.register(alpha_func, obj=Alpha)
Foo.bar.register(beta_func, obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert Foo.bar.by_args(Alpha()).component == alpha_func
assert foo.bar.by_args(Alpha()).component == alpha_func
assert foo.bar.by_args(Alpha()).all_matches == [alpha_func]
assert foo.bar.by_args(Beta()).component == beta_func
assert foo.bar.by_args(None).component is None
assert foo.bar.by_args(None).fallback is obj_fallback
assert foo.bar.by_args(None).all_matches == []
def test_dispatch_method_with_register_function_value():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "default"
def alpha_func(obj):
return "Alpha"
def beta_func(obj):
return "Beta"
Foo.bar.register(methodify(alpha_func), obj=Alpha)
Foo.bar.register(methodify(beta_func), obj=Beta)
assert unmethodify(foo.bar.by_args(Alpha()).component) is alpha_func
def test_dispatch_method_with_register_auto_value():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
assert foo.bar(Alpha()) == "default"
def alpha_func(obj):
return "Alpha"
def beta_func(app, obj):
return "Beta"
Foo.bar.register(methodify(alpha_func, "app"), obj=Alpha)
Foo.bar.register(methodify(beta_func, "app"), obj=Beta)
assert unmethodify(foo.bar.by_args(Alpha()).component) is alpha_func
assert unmethodify(foo.bar.by_args(Beta()).component) is beta_func
# actually since this is a method this is also unwrapped
assert foo.bar.by_args(Beta()).component is beta_func
def test_install_method():
class Target(object):
pass
def f(self, a):
return a
Target.m = f
t = Target()
assert t.m("A") == "A"
def test_install_auto_method_function_no_app_arg():
class Target(object):
pass
def f(a):
return a
Target.m = methodify(f, "app")
t = Target()
assert t.m("A") == "A"
assert unmethodify(t.m) is f
def test_install_auto_method_function_app_arg():
class Target(object):
pass
def g(app, a):
assert isinstance(app, Target)
return a
Target.m = methodify(g, "app")
t = Target()
assert t.m("A") == "A"
assert unmethodify(t.m) is g
def test_install_auto_method_method_no_app_arg():
class Target(object):
pass
class Foo(object):
def f(self, a):
return a
f = Foo().f
Target.m = methodify(f, "app")
t = Target()
assert t.m("A") == "A"
assert unmethodify(t.m) is f
def test_install_auto_method_method_app_arg():
class Target(object):
pass
class Bar(object):
def g(self, app, a):
assert isinstance(app, Target)
return a
g = Bar().g
Target.m = methodify(g, "app")
t = Target()
assert t.m("A") == "A"
assert unmethodify(t.m) is g
def test_install_instance_method():
class Target(object):
pass
class Bar(object):
def g(self, a):
assert isinstance(self, Bar)
return a
g = Bar().g
Target.m = methodify(g)
t = Target()
assert t.m("A") == "A"
assert unmethodify(t.m) is g
def test_dispatch_method_introspection():
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
"Return the bar of an object."
return "default"
assert Foo.bar.__name__ == "bar"
assert Foo.bar.__doc__ == "Return the bar of an object."
assert Foo.bar.__module__ == __name__
def test_dispatch_method_clean():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Qux(Foo):
pass
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
qux = Qux()
Foo.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta", obj=Beta)
Qux.bar.register(lambda self, obj: "Qux Alpha", obj=Alpha)
Qux.bar.register(lambda self, obj: "Qux Beta", obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
assert qux.bar(Alpha()) == "Qux Alpha"
assert qux.bar(Beta()) == "Qux Beta"
assert qux.bar(None) == "default"
Foo.bar.clean()
assert foo.bar(Alpha()) == "default"
# but hasn't affected qux registry
assert qux.bar(Alpha()) == "Qux Alpha"
def test_clean_dispatch_methods():
class Foo(object):
@dispatch_method(match_instance("obj"))
def bar(self, obj):
return "default"
class Qux(Foo):
pass
class Alpha(object):
pass
class Beta(object):
pass
foo = Foo()
qux = Qux()
Foo.bar.register(lambda self, obj: "Alpha", obj=Alpha)
Foo.bar.register(lambda self, obj: "Beta", obj=Beta)
Qux.bar.register(lambda self, obj: "Qux Alpha", obj=Alpha)
Qux.bar.register(lambda self, obj: "Qux Beta", obj=Beta)
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
assert qux.bar(Alpha()) == "Qux Alpha"
assert qux.bar(Beta()) == "Qux Beta"
assert qux.bar(None) == "default"
clean_dispatch_methods(Foo)
assert foo.bar(Alpha()) == "default"
# but hasn't affected qux registry
assert qux.bar(Alpha()) == "Qux Alpha"
def test_replacing_with_normal_method():
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
return "default"
class Alpha(object):
pass
class Beta(object):
pass
# At this moment Foo.bar is still a descriptor, even though it is
# not easy to see that:
assert isinstance(vars(Foo)["bar"], dispatch_method)
# Simply using Foo.bar wouldn't have worked here, as it would
# invoke the descriptor:
assert isinstance(Foo.bar, FunctionType)
# We now replace the descriptor with the actual unbound method:
Foo.bar = Foo.bar
# Now the descriptor is gone
assert isinstance(vars(Foo)["bar"], FunctionType)
# But we can still use the generic function as usual, and even
# register new implementations:
Foo.bar.register(obj=Alpha)(lambda self, obj: "Alpha")
Foo.bar.register(obj=Beta)(lambda self, obj: "Beta")
foo = Foo()
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
def test_replacing_with_normal_method_and_its_effect_on_inheritance():
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
return "default"
class SubFoo(Foo):
pass
class Alpha(object):
pass
class Beta(object):
pass
Foo.bar.register(obj=Alpha)(lambda self, obj: "Alpha")
Foo.bar.register(obj=Beta)(lambda self, obj: "Beta")
foo = Foo()
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
# SubFoo has different dispatching from Foo
subfoo = SubFoo()
assert subfoo.bar(Alpha()) == "default"
assert subfoo.bar(Beta()) == "default"
assert subfoo.bar(None) == "default"
# We now replace the descriptor with the actual unbound method:
Foo.bar = Foo.bar
# Now the descriptor is gone
assert isinstance(vars(Foo)["bar"], FunctionType)
# Foo.bar works as before:
foo = Foo()
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
# But now SubFoo.bar shares the dispatch registry with Foo:
subfoo = SubFoo()
assert subfoo.bar(Alpha()) == "Alpha"
assert subfoo.bar(Beta()) == "Beta"
assert subfoo.bar(None) == "default"
# This is exactly the same behavior we'd get by using dispatch
# instead of dispatch_method:
del Foo, SubFoo
class Foo(object):
@dispatch("obj")
def bar(self, obj):
return "default"
class SubFoo(Foo):
pass
# Foo and SubFoo share the same registry:
Foo.bar.register(obj=Alpha)(lambda self, obj: "Alpha")
SubFoo.bar.register(obj=Beta)(lambda self, obj: "Beta")
foo = Foo()
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
subfoo = SubFoo()
assert subfoo.bar(Alpha()) == "Alpha"
assert subfoo.bar(Beta()) == "Beta"
assert subfoo.bar(None) == "default"
# Now we start again, and do the replacement for both subclass and
# parent class, in this order:
del Foo, SubFoo
class Foo(object):
@dispatch_method("obj")
def bar(self, obj):
return "default"
class SubFoo(Foo):
pass
Foo.bar.register(obj=Alpha)(lambda self, obj: "Alpha")
Foo.bar.register(obj=Beta)(lambda self, obj: "Beta")
SubFoo.bar = SubFoo.bar
Foo.bar = Foo.bar
# This has kept two separate registries:
foo = Foo()
assert foo.bar(Alpha()) == "Alpha"
assert foo.bar(Beta()) == "Beta"
assert foo.bar(None) == "default"
subfoo = SubFoo()
assert subfoo.bar(Alpha()) == "default"
assert subfoo.bar(Beta()) == "default"
assert subfoo.bar(None) == "default"
def unmethodify(func):
"""Reverses methodify operation.
Given an object that is returned from a call to
:func:`reg.methodify` return the original object. This can be used to
discover the original object that was registered. You can apply
this to a function after it was attached as a method.
:param func: the methodified function.
:returns: the original function.
"""
func = getattr(func, "__func__", func)
return func.__globals__.get("_func", func)
| 24.305106
| 74
| 0.614234
|
8303f491d50b034e30739abf07d18cfed5bc216b
| 36,820
|
py
|
Python
|
second/utils/eval.py
|
CMU-Light-Curtains/ObjectDetection
|
d2002f6d1ebcf05a78f179bf0474703ed0211ac0
|
[
"BSD-3-Clause"
] | null | null | null |
second/utils/eval.py
|
CMU-Light-Curtains/ObjectDetection
|
d2002f6d1ebcf05a78f179bf0474703ed0211ac0
|
[
"BSD-3-Clause"
] | null | null | null |
second/utils/eval.py
|
CMU-Light-Curtains/ObjectDetection
|
d2002f6d1ebcf05a78f179bf0474703ed0211ac0
|
[
"BSD-3-Clause"
] | null | null | null |
import io as sysio
import time
import numba
import numpy as np
from scipy.interpolate import interp1d
from second.core.non_max_suppression.nms_gpu import rotate_iou_gpu_eval
from second.core import box_np_ops
@numba.jit
def get_thresholds(scores: np.ndarray, num_gt, num_sample_pts=41):
scores.sort()
scores = scores[::-1]
current_recall = 0
thresholds = []
for i, score in enumerate(scores):
l_recall = (i + 1) / num_gt
if i < (len(scores) - 1):
r_recall = (i + 2) / num_gt
else:
r_recall = l_recall
if (((r_recall - current_recall) < (current_recall - l_recall))
and (i < (len(scores) - 1))):
continue
# recall = l_recall
thresholds.append(score)
current_recall += 1 / (num_sample_pts - 1.0)
# print(len(thresholds), len(scores), num_gt)
return thresholds
def clean_data(gt_anno, dt_anno, current_class, difficulty):
CLASS_NAMES = [
'car', 'pedestrian', 'cyclist', 'van', 'person_sitting', 'car',
'tractor', 'trailer'
]
MIN_HEIGHT = [40, 25, 25]
MAX_OCCLUSION = [0, 1, 2]
MAX_TRUNCATION = [0.15, 0.3, 0.5]
dc_bboxes, ignored_gt, ignored_dt = [], [], []
current_cls_name = CLASS_NAMES[current_class].lower()
num_gt = len(gt_anno["name"])
num_dt = len(dt_anno["name"])
num_valid_gt = 0
for i in range(num_gt):
bbox = gt_anno["bbox"][i]
gt_name = gt_anno["name"][i].lower()
height = bbox[3] - bbox[1]
valid_class = -1
if (gt_name == current_cls_name):
valid_class = 1
elif (current_cls_name == "Pedestrian".lower()
and "Person_sitting".lower() == gt_name):
valid_class = 0
elif (current_cls_name == "Car".lower() and "Van".lower() == gt_name):
valid_class = 0
else:
valid_class = -1
ignore = False
if ((gt_anno["occluded"][i] > MAX_OCCLUSION[difficulty])
or (gt_anno["truncated"][i] > MAX_TRUNCATION[difficulty])
or (height <= MIN_HEIGHT[difficulty])):
# if gt_anno["difficulty"][i] > difficulty or gt_anno["difficulty"][i] == -1:
ignore = True
if valid_class == 1 and not ignore:
ignored_gt.append(0)
num_valid_gt += 1
elif (valid_class == 0 or (ignore and (valid_class == 1))):
ignored_gt.append(1)
else:
ignored_gt.append(-1)
# for i in range(num_gt):
if gt_anno["name"][i] == "DontCare":
dc_bboxes.append(gt_anno["bbox"][i])
for i in range(num_dt):
if (dt_anno["name"][i].lower() == current_cls_name):
valid_class = 1
else:
valid_class = -1
height = abs(dt_anno["bbox"][i, 3] - dt_anno["bbox"][i, 1])
if height < MIN_HEIGHT[difficulty]:
ignored_dt.append(1)
elif valid_class == 1:
ignored_dt.append(0)
else:
ignored_dt.append(-1)
return num_valid_gt, ignored_gt, ignored_dt, dc_bboxes
@numba.jit(nopython=True)
def image_box_overlap(boxes, query_boxes, criterion=-1):
N = boxes.shape[0]
K = query_boxes.shape[0]
overlaps = np.zeros((N, K), dtype=boxes.dtype)
for k in range(K):
qbox_area = ((query_boxes[k, 2] - query_boxes[k, 0]) *
(query_boxes[k, 3] - query_boxes[k, 1]))
for n in range(N):
iw = (min(boxes[n, 2], query_boxes[k, 2]) - max(
boxes[n, 0], query_boxes[k, 0]))
if iw > 0:
ih = (min(boxes[n, 3], query_boxes[k, 3]) - max(
boxes[n, 1], query_boxes[k, 1]))
if ih > 0:
if criterion == -1:
ua = (
(boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]) + qbox_area - iw * ih)
elif criterion == 0:
ua = ((boxes[n, 2] - boxes[n, 0]) *
(boxes[n, 3] - boxes[n, 1]))
elif criterion == 1:
ua = qbox_area
else:
ua = 1.0
overlaps[n, k] = iw * ih / ua
return overlaps
def bev_box_overlap(boxes, qboxes, criterion=-1, stable=True):
# riou = box_np_ops.riou_cc(boxes, qboxes)
riou = rotate_iou_gpu_eval(boxes, qboxes, criterion)
return riou
@numba.jit(nopython=True, parallel=True)
def box3d_overlap_kernel(boxes,
qboxes,
rinc,
criterion=-1,
z_axis=1,
z_center=1.0):
"""
z_axis: the z (height) axis.
z_center: unified z (height) center of box.
"""
N, K = boxes.shape[0], qboxes.shape[0]
for i in range(N):
for j in range(K):
if rinc[i, j] > 0:
min_z = min(
boxes[i, z_axis] + boxes[i, z_axis + 3] * (1 - z_center),
qboxes[j, z_axis] + qboxes[j, z_axis + 3] * (1 - z_center))
max_z = max(
boxes[i, z_axis] - boxes[i, z_axis + 3] * z_center,
qboxes[j, z_axis] - qboxes[j, z_axis + 3] * z_center)
iw = min_z - max_z
if iw > 0:
area1 = boxes[i, 3] * boxes[i, 4] * boxes[i, 5]
area2 = qboxes[j, 3] * qboxes[j, 4] * qboxes[j, 5]
inc = iw * rinc[i, j]
if criterion == -1:
ua = (area1 + area2 - inc)
elif criterion == 0:
ua = area1
elif criterion == 1:
ua = area2
else:
ua = 1.0
rinc[i, j] = inc / ua
else:
rinc[i, j] = 0.0
def box3d_overlap(boxes, qboxes, criterion=-1, z_axis=1, z_center=1.0):
"""kitti camera format z_axis=1.
"""
bev_axes = list(range(7))
bev_axes.pop(z_axis + 3)
bev_axes.pop(z_axis)
# t = time.time()
# rinc = box_np_ops.rinter_cc(boxes[:, bev_axes], qboxes[:, bev_axes])
rinc = rotate_iou_gpu_eval(boxes[:, bev_axes], qboxes[:, bev_axes], 2)
# print("riou time", time.time() - t)
box3d_overlap_kernel(boxes, qboxes, rinc, criterion, z_axis, z_center)
return rinc
@numba.jit(nopython=True)
def compute_statistics_jit(overlaps,
gt_datas,
dt_datas,
ignored_gt,
ignored_det,
dc_bboxes,
metric,
min_overlap,
thresh=0,
compute_fp=False,
compute_aos=False):
det_size = dt_datas.shape[0]
gt_size = gt_datas.shape[0]
dt_scores = dt_datas[:, -1]
dt_alphas = dt_datas[:, 4]
gt_alphas = gt_datas[:, 4]
dt_bboxes = dt_datas[:, :4]
# gt_bboxes = gt_datas[:, :4]
assigned_detection = [False] * det_size # if detection has already been assinged to a GT box
ignored_threshold = [False] * det_size # detections to be ignored because score is less than thresh
if compute_fp:
for i in range(det_size):
if (dt_scores[i] < thresh):
ignored_threshold[i] = True
NO_DETECTION = -10000000
tp, fp, fn, similarity = 0, 0, 0, 0
# thresholds = [0.0]
# delta = [0.0]
thresholds = np.zeros((gt_size, ))
thresh_idx = 0
delta = np.zeros((gt_size, ))
delta_idx = 0
for i in range(gt_size):
if ignored_gt[i] == -1:
continue
det_idx = -1
valid_detection = NO_DETECTION # if this GT box has been assigned some detection (ignored_dt = 0 or 1)
max_overlap = 0
assigned_ignored_det = False # if this GT box is currently assigned a detection that is to be ignored
# This loop assigns every GT box (ignored_gt = 0 or 1) a detection conditioned on:
# - there should be some min_overlap
# - the detection box should already not have been assigned
# (can be assinged to both ignored and unignored GT boxes: ignore_gt = 0/1)
# in the following order of preference:
# 1. Detection is not ignored (ignore_dt = 0) with the highest overlap (max_overlap)
# 2. Detection is ignored (ignore_dt = 0) with some overap (there is no ordering, just a greedy selection).
for j in range(det_size):
if (ignored_det[j] == -1):
continue
if (assigned_detection[j]):
continue
if (ignored_threshold[j]):
continue
overlap = overlaps[j, i]
dt_score = dt_scores[j]
if (not compute_fp and (overlap > min_overlap)
and dt_score > valid_detection):
det_idx = j
valid_detection = dt_score
elif (compute_fp and (overlap > min_overlap)
and (overlap > max_overlap or assigned_ignored_det)
and ignored_det[j] == 0):
max_overlap = overlap
det_idx = j
valid_detection = 1
assigned_ignored_det = False
elif (compute_fp and (overlap > min_overlap)
and (valid_detection == NO_DETECTION)
and ignored_det[j] == 1):
det_idx = j
valid_detection = 1
assigned_ignored_det = True
# case ignored_gt == 0 (un-ignored GT):
# case assigned detection = "no detection":
# - fn += 1
# case assigned detection = "ignored_dt = 1":
# - No tp/fp (since detection is ignored)/fn
# case assigned detection = "ignored_dt = 0":
# - tp
# case ignored_gt == 1 (ignored GT):
# case assigned detection = "no detection":
# - No tp/fp/fn
# case assigned detection = "ignored_dt = 1":
# - No tp/fp/fn
# case assigned detection = "ignored_dt = 0":
# - No tp/fp/fn
# Note: all detections (ignored or not) if assigned to any gt
# (ignored or not) will be marked as assigned (assigned_detection[] = 1)
if (valid_detection == NO_DETECTION) and ignored_gt[i] == 0:
fn += 1
elif ((valid_detection != NO_DETECTION)
and (ignored_gt[i] == 1 or ignored_det[det_idx] == 1)):
assigned_detection[det_idx] = True
elif valid_detection != NO_DETECTION:
# only a tp add a threshold.
tp += 1
# thresholds.append(dt_scores[det_idx])
thresholds[thresh_idx] = dt_scores[det_idx]
thresh_idx += 1
if compute_aos:
# delta.append(gt_alphas[i] - dt_alphas[det_idx])
delta[delta_idx] = gt_alphas[i] - dt_alphas[det_idx]
delta_idx += 1
assigned_detection[det_idx] = True
if compute_fp:
for i in range(det_size):
if (not (assigned_detection[i] or ignored_det[i] == -1
or ignored_det[i] == 1 or ignored_threshold[i])):
fp += 1
nstuff = 0
if metric == 0:
overlaps_dt_dc = image_box_overlap(dt_bboxes, dc_bboxes, 0)
for i in range(dc_bboxes.shape[0]):
for j in range(det_size):
if (assigned_detection[j]):
continue
if (ignored_det[j] == -1 or ignored_det[j] == 1):
continue
if (ignored_threshold[j]):
continue
if overlaps_dt_dc[j, i] > min_overlap:
assigned_detection[j] = True
nstuff += 1
fp -= nstuff
if compute_aos:
tmp = np.zeros((fp + delta_idx, ))
# tmp = [0] * fp
for i in range(delta_idx):
tmp[i + fp] = (1.0 + np.cos(delta[i])) / 2.0
# tmp.append((1.0 + np.cos(delta[i])) / 2.0)
# assert len(tmp) == fp + tp
# assert len(delta) == tp
if tp > 0 or fp > 0:
similarity = np.sum(tmp)
else:
similarity = -1
return tp, fp, fn, similarity, thresholds[:thresh_idx]
def get_split_parts(num, num_part):
same_part = num // num_part
remain_num = num % num_part
parts = []
if same_part > 0:
parts += [same_part] * num_part
if remain_num > 0:
parts += [remain_num]
return parts
@numba.jit(nopython=True)
def fused_compute_statistics(overlaps,
pr,
gt_nums,
dt_nums,
dc_nums,
gt_datas,
dt_datas,
dontcares,
ignored_gts,
ignored_dets,
metric,
min_overlap,
thresholds,
compute_aos=False):
gt_num = 0
dt_num = 0
dc_num = 0
for i in range(gt_nums.shape[0]):
for t, thresh in enumerate(thresholds):
overlap = overlaps[dt_num:dt_num + dt_nums[i], gt_num:gt_num +
gt_nums[i]]
gt_data = gt_datas[gt_num:gt_num + gt_nums[i]]
dt_data = dt_datas[dt_num:dt_num + dt_nums[i]]
ignored_gt = ignored_gts[gt_num:gt_num + gt_nums[i]]
ignored_det = ignored_dets[dt_num:dt_num + dt_nums[i]]
dontcare = dontcares[dc_num:dc_num + dc_nums[i]]
tp, fp, fn, similarity, _ = compute_statistics_jit(
overlap,
gt_data,
dt_data,
ignored_gt,
ignored_det,
dontcare,
metric,
min_overlap=min_overlap,
thresh=thresh,
compute_fp=True,
compute_aos=compute_aos)
pr[t, 0] += tp
pr[t, 1] += fp
pr[t, 2] += fn
if similarity != -1:
pr[t, 3] += similarity
gt_num += gt_nums[i]
dt_num += dt_nums[i]
dc_num += dc_nums[i]
def calculate_iou_partly(gt_annos,
dt_annos,
metric,
num_parts=50,
z_axis=1,
z_center=1.0):
"""fast iou algorithm. this function can be used independently to
do result analysis.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
metric: eval type. 0: bbox, 1: bev, 2: 3d
num_parts: int. a parameter for fast calculate algorithm
z_axis: height axis. kitti camera use 1, lidar use 2.
"""
assert len(gt_annos) == len(dt_annos)
total_dt_num = np.stack([len(a["name"]) for a in dt_annos], 0)
total_gt_num = np.stack([len(a["name"]) for a in gt_annos], 0)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
parted_overlaps = []
example_idx = 0
bev_axes = list(range(3))
bev_axes.pop(z_axis)
for num_part in split_parts:
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
if metric == 0:
gt_boxes = np.concatenate([a["bbox"] for a in gt_annos_part], 0)
dt_boxes = np.concatenate([a["bbox"] for a in dt_annos_part], 0)
overlap_part = image_box_overlap(gt_boxes, dt_boxes)
elif metric == 1:
loc = np.concatenate(
[a["location"][:, bev_axes] for a in gt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate(
[a["location"][:, bev_axes] for a in dt_annos_part], 0)
dims = np.concatenate(
[a["dimensions"][:, bev_axes] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = bev_box_overlap(gt_boxes,
dt_boxes).astype(np.float64)
elif metric == 2:
loc = np.concatenate([a["location"] for a in gt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in gt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in gt_annos_part], 0)
gt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
loc = np.concatenate([a["location"] for a in dt_annos_part], 0)
dims = np.concatenate([a["dimensions"] for a in dt_annos_part], 0)
rots = np.concatenate([a["rotation_y"] for a in dt_annos_part], 0)
dt_boxes = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1)
overlap_part = box3d_overlap(
gt_boxes, dt_boxes, z_axis=z_axis,
z_center=z_center).astype(np.float64)
else:
raise ValueError("unknown metric")
parted_overlaps.append(overlap_part)
example_idx += num_part
overlaps = []
example_idx = 0
for j, num_part in enumerate(split_parts):
gt_annos_part = gt_annos[example_idx:example_idx + num_part]
dt_annos_part = dt_annos[example_idx:example_idx + num_part]
gt_num_idx, dt_num_idx = 0, 0
for i in range(num_part):
gt_box_num = total_gt_num[example_idx + i]
dt_box_num = total_dt_num[example_idx + i]
overlaps.append(
parted_overlaps[j][gt_num_idx:gt_num_idx +
gt_box_num, dt_num_idx:dt_num_idx +
dt_box_num])
gt_num_idx += gt_box_num
dt_num_idx += dt_box_num
example_idx += num_part
return overlaps, parted_overlaps, total_gt_num, total_dt_num
def _prepare_data(gt_annos, dt_annos, current_class, difficulty):
gt_datas_list = []
dt_datas_list = []
total_dc_num = []
ignored_gts, ignored_dets, dontcares = [], [], []
total_num_valid_gt = 0
for i in range(len(gt_annos)):
rets = clean_data(gt_annos[i], dt_annos[i], current_class, difficulty)
num_valid_gt, ignored_gt, ignored_det, dc_bboxes = rets
ignored_gts.append(np.array(ignored_gt, dtype=np.int64))
ignored_dets.append(np.array(ignored_det, dtype=np.int64))
if len(dc_bboxes) == 0:
dc_bboxes = np.zeros((0, 4)).astype(np.float64)
else:
dc_bboxes = np.stack(dc_bboxes, 0).astype(np.float64)
total_dc_num.append(dc_bboxes.shape[0])
dontcares.append(dc_bboxes)
total_num_valid_gt += num_valid_gt
gt_datas = np.concatenate(
[gt_annos[i]["bbox"], gt_annos[i]["alpha"][..., np.newaxis]], 1)
dt_datas = np.concatenate([
dt_annos[i]["bbox"], dt_annos[i]["alpha"][..., np.newaxis],
dt_annos[i]["score"][..., np.newaxis]
], 1)
gt_datas_list.append(gt_datas)
dt_datas_list.append(dt_datas)
total_dc_num = np.stack(total_dc_num, axis=0)
return (gt_datas_list, dt_datas_list, ignored_gts, ignored_dets, dontcares,
total_dc_num, total_num_valid_gt)
def eval_class_v3(gt_annos,
dt_annos,
current_classes,
difficultys,
metric,
min_overlaps,
compute_aos=False,
z_axis=1,
z_center=1.0,
num_parts=50):
"""Kitti eval. support 2d/bev/3d/aos eval. support 0.5:0.05:0.95 coco AP.
Args:
gt_annos: dict, must from get_label_annos() in kitti_common.py
dt_annos: dict, must from get_label_annos() in kitti_common.py
current_class: int, 0: car, 1: pedestrian, 2: cyclist
difficulty: int. eval difficulty, 0: easy, 1: normal, 2: hard
metric: eval type. 0: bbox, 1: bev, 2: 3d
min_overlap: float, min overlap. official:
[[0.7, 0.5, 0.5], [0.7, 0.5, 0.5], [0.7, 0.5, 0.5]]
format: [metric, class]. choose one from matrix above.
num_parts: int. a parameter for fast calculate algorithm
Returns:
dict of recall, precision and aos
"""
assert len(gt_annos) == len(dt_annos)
num_examples = len(gt_annos)
split_parts = get_split_parts(num_examples, num_parts)
rets = calculate_iou_partly(
dt_annos,
gt_annos,
metric,
num_parts,
z_axis=z_axis,
z_center=z_center)
overlaps, parted_overlaps, total_dt_num, total_gt_num = rets
N_SAMPLE_PTS = 41
num_minoverlap = len(min_overlaps)
num_class = len(current_classes)
num_difficulty = len(difficultys)
precision = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
recall = np.zeros(
[num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
aos = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
all_thresholds = np.zeros([num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS])
for m, current_class in enumerate(current_classes):
for l, difficulty in enumerate(difficultys):
rets = _prepare_data(gt_annos, dt_annos, current_class, difficulty)
(gt_datas_list, dt_datas_list, ignored_gts, ignored_dets,
dontcares, total_dc_num, total_num_valid_gt) = rets
for k, min_overlap in enumerate(min_overlaps[:, metric, m]):
thresholdss = []
for i in range(len(gt_annos)):
rets = compute_statistics_jit(
overlaps[i],
gt_datas_list[i],
dt_datas_list[i],
ignored_gts[i],
ignored_dets[i],
dontcares[i],
metric,
min_overlap=min_overlap,
thresh=0.0,
compute_fp=False)
tp, fp, fn, similarity, thresholds = rets
thresholdss += thresholds.tolist()
thresholdss = np.array(thresholdss)
thresholds = get_thresholds(thresholdss, total_num_valid_gt)
thresholds = np.array(thresholds)
# print(thresholds)
all_thresholds[m, l, k, :len(thresholds)] = thresholds
pr = np.zeros([len(thresholds), 4])
idx = 0
for j, num_part in enumerate(split_parts):
gt_datas_part = np.concatenate(
gt_datas_list[idx:idx + num_part], 0)
dt_datas_part = np.concatenate(
dt_datas_list[idx:idx + num_part], 0)
dc_datas_part = np.concatenate(
dontcares[idx:idx + num_part], 0)
ignored_dets_part = np.concatenate(
ignored_dets[idx:idx + num_part], 0)
ignored_gts_part = np.concatenate(
ignored_gts[idx:idx + num_part], 0)
fused_compute_statistics(
parted_overlaps[j],
pr,
total_gt_num[idx:idx + num_part],
total_dt_num[idx:idx + num_part],
total_dc_num[idx:idx + num_part],
gt_datas_part,
dt_datas_part,
dc_datas_part,
ignored_gts_part,
ignored_dets_part,
metric,
min_overlap=min_overlap,
thresholds=thresholds,
compute_aos=compute_aos)
idx += num_part
for i in range(len(thresholds)):
# recall[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 2])
precision[m, l, k, i] = pr[i, 0] / (pr[i, 0] + pr[i, 1])
if compute_aos:
aos[m, l, k, i] = pr[i, 3] / (pr[i, 0] + pr[i, 1])
for i in range(len(thresholds)):
precision[m, l, k, i] = np.max(
precision[m, l, k, i:], axis=-1)
# recall[m, l, k, i] = np.max(recall[m, l, k, :i + 1], axis=-1)
if compute_aos:
aos[m, l, k, i] = np.max(aos[m, l, k, i:], axis=-1)
# use interp to calculate recall
"""
current_recalls = np.linspace(0, 1, 41)
prec_unique, inds = np.unique(precision[m, l, k], return_index=True)
current_recalls = current_recalls[inds]
f = interp1d(prec_unique, current_recalls)
precs_for_recall = np.linspace(0, 1, 41)
max_prec = np.max(precision[m, l, k])
valid_prec = precs_for_recall < max_prec
num_valid_prec = valid_prec.sum()
recall[m, l, k, :num_valid_prec] = f(precs_for_recall[valid_prec])
"""
ret_dict = {
"recall": recall, # [num_class, num_difficulty, num_minoverlap, N_SAMPLE_PTS]
"precision": precision,
"orientation": aos,
"thresholds": all_thresholds,
"min_overlaps": min_overlaps,
}
return ret_dict
def get_mAP(prec):
sums = 0
for i in range(0, prec.shape[-1], 4):
sums = sums + prec[..., i]
return sums / 11 * 100
def do_eval_v2(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
difficultys=(0, 1, 2),
z_axis=1,
z_center=1.0):
# min_overlaps: [num_minoverlap, metric, num_class]
ret = eval_class_v3(
gt_annos,
dt_annos,
current_classes,
difficultys,
0,
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
# ret: [num_class, num_diff, num_minoverlap, num_sample_points]
mAP_bbox = get_mAP(ret["precision"])
mAP_aos = None
if compute_aos:
mAP_aos = get_mAP(ret["orientation"])
ret = eval_class_v3(
gt_annos,
dt_annos,
current_classes,
difficultys,
1,
min_overlaps,
z_axis=z_axis,
z_center=z_center)
mAP_bev = get_mAP(ret["precision"])
ret = eval_class_v3(
gt_annos,
dt_annos,
current_classes,
difficultys,
2,
min_overlaps,
z_axis=z_axis,
z_center=z_center)
mAP_3d = get_mAP(ret["precision"])
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def do_eval_v3(gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos=False,
difficultys=(0, 1, 2),
z_axis=1,
z_center=1.0):
# min_overlaps: [num_minoverlap, metric, num_class]
types = ["bbox", "bev", "3d"]
metrics = {}
for i in range(3):
ret = eval_class_v3(
gt_annos,
dt_annos,
current_classes,
difficultys,
i,
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
metrics[types[i]] = ret
return metrics
def do_coco_style_eval(gt_annos,
dt_annos,
current_classes,
overlap_ranges,
compute_aos,
z_axis=1,
z_center=1.0):
# overlap_ranges: [range, metric, num_class]
min_overlaps = np.zeros([10, *overlap_ranges.shape[1:]])
for i in range(overlap_ranges.shape[1]):
for j in range(overlap_ranges.shape[2]):
min_overlaps[:, i, j] = np.linspace(*overlap_ranges[:, i, j])
mAP_bbox, mAP_bev, mAP_3d, mAP_aos = do_eval_v2(
gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos,
z_axis=z_axis,
z_center=z_center)
# ret: [num_class, num_diff, num_minoverlap]
mAP_bbox = mAP_bbox.mean(-1)
mAP_bev = mAP_bev.mean(-1)
mAP_3d = mAP_3d.mean(-1)
if mAP_aos is not None:
mAP_aos = mAP_aos.mean(-1)
return mAP_bbox, mAP_bev, mAP_3d, mAP_aos
def print_str(value, *arg, sstream=None):
if sstream is None:
sstream = sysio.StringIO()
sstream.truncate(0)
sstream.seek(0)
print(value, *arg, file=sstream)
return sstream.getvalue()
def get_official_eval_result(gt_annos,
dt_annos,
current_classes,
difficultys=[0, 1, 2],
z_axis=1,
z_center=1.0):
"""
gt_annos and dt_annos must contains following keys:
[bbox, location, dimensions, rotation_y, score]
"""
overlap_mod = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7],
[0.7, 0.5, 0.5, 0.7, 0.5, 0.7, 0.7, 0.7]])
overlap_easy = np.array([[0.7, 0.5, 0.5, 0.7, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5],
[0.5, 0.25, 0.25, 0.5, 0.25, 0.5, 0.5, 0.5]])
min_overlaps = np.stack([overlap_mod, overlap_easy], axis=0) # [2, 3, 5]
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
min_overlaps = min_overlaps[:, :, current_classes]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
metrics = do_eval_v3(
gt_annos,
dt_annos,
current_classes,
min_overlaps,
compute_aos,
difficultys,
z_axis=z_axis,
z_center=z_center)
detail = {}
for j, curcls in enumerate(current_classes):
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
class_name = class_to_name[curcls]
detail[class_name] = {}
for i in range(min_overlaps.shape[0]):
mAPbbox = get_mAP(metrics["bbox"]["precision"][j, :, i])
mAPbev = get_mAP(metrics["bev"]["precision"][j, :, i])
mAP3d = get_mAP(metrics["3d"]["precision"][j, :, i])
detail[class_name][f"bbox@{min_overlaps[i, 0, j]:.2f}"] = mAPbbox.tolist()
detail[class_name][f"bev@{min_overlaps[i, 1, j]:.2f}"] = mAPbev.tolist()
detail[class_name][f"3d@{min_overlaps[i, 2, j]:.2f}"] = mAP3d.tolist()
result += print_str(
(f"{class_to_name[curcls]} "
"AP(Average Precision)@{:.2f}, {:.2f}, {:.2f}:".format(*min_overlaps[i, :, j])))
mAPbbox = ", ".join(f"{v:.2f}" for v in mAPbbox)
mAPbev = ", ".join(f"{v:.2f}" for v in mAPbev)
mAP3d = ", ".join(f"{v:.2f}" for v in mAP3d)
result += print_str(f"bbox AP:{mAPbbox}")
result += print_str(f"bev AP:{mAPbev}")
result += print_str(f"3d AP:{mAP3d}")
if compute_aos:
mAPaos = get_mAP(metrics["bbox"]["orientation"][j, :, i])
detail[class_name][f"aos"] = mAPaos.tolist()
mAPaos = ", ".join(f"{v:.2f}" for v in mAPaos)
result += print_str(f"aos AP:{mAPaos}")
return {
"result": result,
"detail": detail,
}
def get_coco_eval_result(gt_annos,
dt_annos,
current_classes,
z_axis=1,
z_center=1.0):
class_to_name = {
0: 'Car',
1: 'Pedestrian',
2: 'Cyclist',
3: 'Van',
4: 'Person_sitting',
5: 'car',
6: 'tractor',
7: 'trailer',
}
class_to_range = {
0: [0.5, 1.0, 0.05],
1: [0.25, 0.75, 0.05],
2: [0.25, 0.75, 0.05],
3: [0.5, 1.0, 0.05],
4: [0.25, 0.75, 0.05],
5: [0.5, 1.0, 0.05],
6: [0.5, 1.0, 0.05],
7: [0.5, 1.0, 0.05],
}
class_to_range = {
0: [0.5, 0.95, 10],
1: [0.25, 0.7, 10],
2: [0.25, 0.7, 10],
3: [0.5, 0.95, 10],
4: [0.25, 0.7, 10],
5: [0.5, 0.95, 10],
6: [0.5, 0.95, 10],
7: [0.5, 0.95, 10],
}
name_to_class = {v: n for n, v in class_to_name.items()}
if not isinstance(current_classes, (list, tuple)):
current_classes = [current_classes]
current_classes_int = []
for curcls in current_classes:
if isinstance(curcls, str):
current_classes_int.append(name_to_class[curcls])
else:
current_classes_int.append(curcls)
current_classes = current_classes_int
overlap_ranges = np.zeros([3, 3, len(current_classes)])
for i, curcls in enumerate(current_classes):
overlap_ranges[:, :, i] = np.array(
class_to_range[curcls])[:, np.newaxis]
result = ''
# check whether alpha is valid
compute_aos = False
for anno in dt_annos:
if anno['alpha'].shape[0] != 0:
if anno['alpha'][0] != -10:
compute_aos = True
break
mAPbbox, mAPbev, mAP3d, mAPaos = do_coco_style_eval(
gt_annos,
dt_annos,
current_classes,
overlap_ranges,
compute_aos,
z_axis=z_axis,
z_center=z_center)
detail = {}
for j, curcls in enumerate(current_classes):
class_name = class_to_name[curcls]
detail[class_name] = {}
# mAP threshold array: [num_minoverlap, metric, class]
# mAP result: [num_class, num_diff, num_minoverlap]
o_range = np.array(class_to_range[curcls])[[0, 2, 1]]
o_range[1] = (o_range[2] - o_range[0]) / (o_range[1] - 1)
result += print_str((f"{class_to_name[curcls]} "
"coco AP@{:.2f}:{:.2f}:{:.2f}:".format(*o_range)))
result += print_str((f"bbox AP:{mAPbbox[j, 0]:.2f}, "
f"{mAPbbox[j, 1]:.2f}, "
f"{mAPbbox[j, 2]:.2f}"))
result += print_str((f"bev AP:{mAPbev[j, 0]:.2f}, "
f"{mAPbev[j, 1]:.2f}, "
f"{mAPbev[j, 2]:.2f}"))
result += print_str((f"3d AP:{mAP3d[j, 0]:.2f}, "
f"{mAP3d[j, 1]:.2f}, "
f"{mAP3d[j, 2]:.2f}"))
detail[class_name][f"bbox"] = mAPbbox[j].tolist()
detail[class_name][f"bev"] = mAPbev[j].tolist()
detail[class_name][f"3d"] = mAP3d[j].tolist()
if compute_aos:
detail[class_name][f"aos"] = mAPaos[j].tolist()
result += print_str((f"aos AP:{mAPaos[j, 0]:.2f}, "
f"{mAPaos[j, 1]:.2f}, "
f"{mAPaos[j, 2]:.2f}"))
return {
"result": result,
"detail": detail,
}
| 39.170213
| 117
| 0.514177
|
4284a9266dfb2da4cddf9c92e33d95dbb7d84c86
| 2,031
|
py
|
Python
|
amrevaluation/utils.py
|
yichao-l/amr-eager-multilingual
|
6d96445a23ff493ceedea02712fbcceffe08b879
|
[
"BSD-2-Clause"
] | null | null | null |
amrevaluation/utils.py
|
yichao-l/amr-eager-multilingual
|
6d96445a23ff493ceedea02712fbcceffe08b879
|
[
"BSD-2-Clause"
] | null | null | null |
amrevaluation/utils.py
|
yichao-l/amr-eager-multilingual
|
6d96445a23ff493ceedea02712fbcceffe08b879
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
#coding=utf-8
'''
Various routines used by scores.py
'''
def disambig(lst):
lst2 = []
for v in lst:
idx = 1
v_idx = v + '_0'
while str(v_idx) in lst2:
v_idx = v + '_' + str(idx)
idx += 1
lst2.append(str(v_idx))
return lst2
def concepts(v2c_dict):
return [str(v) for v in v2c_dict.values()]
def namedent(v2c_dict, triples):
return [str(v2c_dict[v1]) for (l,v1,v2) in triples if l == "name"]
def negations(v2c_dict, triples):
return [v2c_dict[v1] for (l,v1,v2) in triples if l == "polarity"]
def wikification(triples):
return [v2 for (l,v1,v2) in triples if l == "wiki"]
def reentrancies(v2c_dict, triples):
lst = []
vrs = []
for n in v2c_dict.keys():
parents = [(l,v1,v2) for (l,v1,v2) in triples if v2 == n and l != "instance"]
if len(parents) > 1:
#extract triples involving this (multi-parent) node
for t in parents:
lst.append(t)
vrs.extend([t[1],t[2]])
#collect var/concept pairs for all extracted nodes
dict1 = {}
for i in v2c_dict:
if i in vrs:
dict1[i] = v2c_dict[i]
return (lst, dict1)
def srl(v2c_dict, triples):
lst = []
vrs = []
for t in triples:
if t[0].startswith("ARG"):
#although the smatch code we use inverts the -of relations
#there seems to be cases where this is not done so we invert
#them here
if t[0].endswith("of"):
lst.append((t[0][0:-3],t[2],t[1]))
vrs.extend([t[2],t[1]])
else:
lst.append(t)
vrs.extend([t[1],t[2]])
#collect var/concept pairs for all extracted nodes
dict1 = {}
for i in v2c_dict:
if i in vrs:
dict1[i] = v2c_dict[i]
return (lst, dict1)
def var2concept(amr):
v2c = {}
for n, v in zip(amr.nodes, amr.node_values):
v2c[n] = v
return v2c
| 27.08
| 85
| 0.533235
|
e1767c03220a4b2ec1d1c6c2fd773d9b3838d43e
| 9,082
|
py
|
Python
|
test/backward_compatibility/check_backward_compatibility.py
|
suphoff/pytorch
|
7cdfd86a727ce01e41f2af81931064fb664513dd
|
[
"Intel"
] | null | null | null |
test/backward_compatibility/check_backward_compatibility.py
|
suphoff/pytorch
|
7cdfd86a727ce01e41f2af81931064fb664513dd
|
[
"Intel"
] | null | null | null |
test/backward_compatibility/check_backward_compatibility.py
|
suphoff/pytorch
|
7cdfd86a727ce01e41f2af81931064fb664513dd
|
[
"Intel"
] | null | null | null |
import argparse
import datetime
import re
import sys
from collections import defaultdict
import torch
from torch._C import parse_schema
# The date specifies how long the allowlist exclusion should apply to.
#
# - If we NEVER give BC guarantee for an operator, you can put the
# date arbitrarily far in the future.
# - Otherwise, pick a date that is far enough in the future that you
# believe you can land your diff before then.
#
# Allowlist entries can be removed after the date listed on them passes.
#
# Allowlist item format:
# [
# 0: function name regex
# 1: date until which the allowlist entry is valid
# 2: (optional) function argument regex
# ]
#
# NB: function name DOES NOT include overload name!
ALLOW_LIST = [
("c10_experimental", datetime.date(2222, 1, 1)),
# Internal
("static", datetime.date(9999, 1, 1)),
("prim::ModuleDictIndex", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6", datetime.date(9999, 1, 1)),
("prim::MKLDNNRelu6_", datetime.date(9999, 1, 1)),
("prim::Concat", datetime.date(9999, 1, 1)),
# Internal, profiler-specific ops
("profiler::_call_end_callbacks_on_jit_fut*", datetime.date(9999, 1, 1)),
("profiler::_record_function_enter", datetime.date(9999, 1, 1)),
("aten::linalg_matrix_rank", datetime.date(2021, 10, 30)),
("aten::linalg_pinv", datetime.date(2021, 10, 30)),
("aten::_cholesky_helper", datetime.date(9999, 1, 1)),
("aten::_lstsq_helper", datetime.date(9999, 1, 1)),
("aten::_syevd_helper", datetime.date(9999, 1, 1)),
("aten::_lu_solve_helper", datetime.date(9999, 1, 1)),
("aten::_lu_with_info", datetime.date(9999, 1, 1)),
("aten::_linalg_solve_out_helper_", datetime.date(9999, 1, 1)),
("aten::select_backward", datetime.date(9999, 1, 1)),
("aten::slice_backward", datetime.date(9999, 1, 1)),
("aten::diagonal_backward", datetime.date(9999, 1, 1)),
("aten::rowwise_prune", datetime.date(9999, 1, 1)),
("aten::adaptive_avg_pool3d_backward", datetime.date(9999, 1, 1)),
("aten::_embedding_bag_dense_backward", datetime.date(9999, 1, 1)),
("aten::randperm", datetime.date(9999, 1, 1)),
("aten::cudnn_convolution_backward", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_backward_input", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_backward_weight", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_transpose_backward", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_transpose_backward_input", datetime.date(2022, 1, 31)),
("aten::cudnn_convolution_transpose_backward_weight", datetime.date(2022, 1, 31)),
("aten::_slow_conv2d_forward", datetime.date(2022, 1, 31)),
("aten::_slow_conv2d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv3d_forward", datetime.date(2022, 1, 31)),
("aten::slow_conv3d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv_dilated2d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose2d", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose2d_backward", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose3d", datetime.date(2022, 1, 31)),
("aten::slow_conv_transpose3d_backward", datetime.date(2022, 1, 31)),
("aten::_log_softmax_backward_data", datetime.date(2021, 10, 21)),
("aten::_softmax_backward_data", datetime.date(2021, 10, 21)),
("aten::fused_moving_avg_obs_fake_quant", datetime.date(2021, 10, 21)),
("aten::_fused_moving_avg_obs_fq_helper", datetime.date(2021, 10, 21)),
("aten::_baddbmm_mkl_", datetime.date(2021, 10, 31)),
("aten::grid_sampler_2d_backward", datetime.date(2021, 10, 21)),
("aten::index_add.alpha", datetime.date(2021, 12, 31)),
("aten::index_add_.alpha", datetime.date(2021, 12, 31)),
("prim::TensorExprDynamicGuard", datetime.date(2021, 11, 20)),
("aten::split_with_sizes", datetime.date(2021, 11, 20)),
("aten::split", datetime.date(2021, 12, 20)),
("aten::vsplit", datetime.date(2021, 11, 20)),
("aten::tensor_split", datetime.date(2021, 11, 20)),
("aten::chunk", datetime.date(2021, 11, 20)),
("aten::unbind", datetime.date(2021, 11, 20)),
("aten::hsplit", datetime.date(2021, 11, 20)),
("aten::dsplit", datetime.date(2021, 11, 20)),
("aten::_convolution_nogroup", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_bias", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_convolution_transpose_backward_weight", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_input", datetime.date(9999, 1, 1)),
("aten::miopen_depthwise_convolution_backward_weight", datetime.date(9999, 1, 1)),
("caffe2::", datetime.date(2021, 10, 23)),
("prepacked::unpack_prepacked_sizes_conv2d", datetime.date(9999, 1, 1)),
("prepacked::unpack_prepacked_sizes_linear", datetime.date(9999, 1, 1)),
("q::_FloatToBfloat16Quantized", datetime.date(2021, 12, 21)),
("q::_Bfloat16QuantizedToFloat", datetime.date(2021, 12, 21)),
("aten::_inverse_helper", datetime.date(2021, 12, 31)),
]
ALLOW_LIST_COMPILED = [
(
re.compile(item[0]),
item[1],
re.compile(item[2]) if len(item) > 2 else None,
) for item in ALLOW_LIST if item[1] >= datetime.date.today()
]
def allow_listed(schema):
for item in ALLOW_LIST_COMPILED:
if item[0].search(str(schema)):
if len(item) > 2 and item[2] is not None:
# if arguments regex is present, use it
return bool(item[2].search(str(schema)))
return True
return False
# The nightly will fail to parse newly added syntax to schema declarations
# Add new schemas that will fail the nightly here
dont_parse_list = [
("_TorchScriptTesting.*", datetime.date(2099, 9, 17)),
("test_backend", datetime.date(2099, 9, 17)),
("dist_c10d", datetime.date(2099, 9, 17)),
]
def dont_parse(schema_line):
for item in dont_parse_list:
if item[1] < datetime.date.today():
continue
regexp = re.compile(item[0])
if regexp.search(schema_line):
return True
return False
def check_bc(existing_schemas):
new_schemas = torch._C._jit_get_all_schemas()
new_schemas += torch._C._jit_get_custom_class_schemas()
new_schema_dict = defaultdict(list)
for s in new_schemas:
new_schema_dict[s.name].append(s)
is_bc = True
broken_ops = []
for existing_schema in existing_schemas:
if allow_listed(existing_schema):
print("schema: ", str(existing_schema), " found on allowlist, skipping")
continue
print("processing existing schema: ", str(existing_schema))
matching_new_schemas = new_schema_dict.get(existing_schema.name, [])
found = False
for matching_new_schema in matching_new_schemas:
if matching_new_schema.is_backward_compatible_with(existing_schema):
found = True
break
if not found:
print(
"Can NOT find backward compatible schemas after changes "
"for schema {} from the following candidates:\n[\n{}\n]".format(
str(existing_schema),
"\n\t".join(str(s) for s in matching_new_schemas),
)
)
# TODO Print out more details about why candidates don't match.
broken_ops.append(str(existing_schema))
is_bc = False
if is_bc:
print("Found backward compatible schemas for all existing schemas")
else:
print(
"The PR is introducing backward incompatible changes to the "
"operator library. Please contact PyTorch team to confirm "
"whether this change is wanted or not. \n\nBroken ops: "
"[\n\t{}\n]".format("\n\t".join(broken_ops))
)
return is_bc
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--existing-schemas",
help="filename to load existing schemas",
type=str,
default="schemas.txt",
)
args = parser.parse_args()
existing_schema_dict = dict()
slist = []
with open(args.existing_schemas, "r") as f:
while True:
line = f.readline()
if not line:
break
if dont_parse(line.strip()):
print("Not parsing schema line: ", line.strip())
continue
s = parse_schema(line.strip())
slist.append(s)
if not check_bc(slist):
sys.exit(1)
| 43.247619
| 86
| 0.65261
|
55139a6204a49499741ae86fde290b2327325399
| 28,546
|
py
|
Python
|
pyjsdl/vector.py
|
jggatc/pyjsdl-ts
|
393658a58d4b9ed71efaa9adbbf1d85e368a8cca
|
[
"MIT"
] | null | null | null |
pyjsdl/vector.py
|
jggatc/pyjsdl-ts
|
393658a58d4b9ed71efaa9adbbf1d85e368a8cca
|
[
"MIT"
] | null | null | null |
pyjsdl/vector.py
|
jggatc/pyjsdl-ts
|
393658a58d4b9ed71efaa9adbbf1d85e368a8cca
|
[
"MIT"
] | null | null | null |
#Pyjsdl - Copyright (C) 2021 James Garnon <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from math import sqrt, sin, cos, atan2, pi, floor
class Vector2(object):
"""
Vector2 - 2-dimensional vector.
Operator and index functionality requires __pragma__ ('opov').
"""
__slots__ = ['_x', '_y']
# __pragma__ ('kwargs')
def __init__(self, *args, **kwargs):
l = len(args)
if l == 2:
self._x = float(args[0])
self._y = float(args[1])
elif l == 1:
if isinstance(args[0], (int, float)):
self._x = float(args[0])
self._y = float(args[0])
else:
self._x = float(args[0][0]) # __:opov
self._y = float(args[0][1]) # __:opov
else:
if len(kwargs.keys()) > 0:
if 'x' in kwargs.keys() and 'y' in kwargs.keys():
self._x = float(kwargs['x'])
self._y = float(kwargs['y'])
elif 'x' in kwargs.keys():
self._x = float(kwargs['x'])
self._y = float(kwargs['x'])
else:
self._x = float(kwargs['y'])
self._y = float(kwargs['y'])
else:
self._x = 0.0
self._y = 0.0
# __pragma__ ('nokwargs')
@property
def x(self):
return self._x
@x.setter
def x(self, val):
try:
self._x = float(val)
except ValueError:
raise TypeError('float is required')
@x.deleter
def x(self):
raise TypeError('Cannot delete the x attribute')
@property
def y(self):
return self._y
@y.setter
def y(self, val):
try:
self._y = float(val)
except ValueError:
raise TypeError('float is required')
@y.deleter
def y(self):
raise TypeError('Cannot delete the y attribute')
def __str__(self):
return '[{}, {}]'.format(self._x, self._y)
def __repr__(self):
return '<{}({}, {})>'.format(self.__class__.__name__,
self._x, self._y)
def __getitem__(self, index):
if index in (0, -2):
return self._x
elif index in (1, -1):
return self._y
elif isinstance(index, slice):
return [self._x, self._y][index]
else:
raise IndexError
def __setitem__(self, index, val):
if index == 0:
try:
self._x = float(val)
except ValueError:
raise TypeError
elif index == 1:
try:
self._y = float(val)
except ValueError:
raise TypeError
elif isinstance(index, slice):
l = [self._x, self._y]
l[index] = val
if len(l) != 2:
raise ValueError
self._x = float(l[0])
self._y = float(l[1])
else:
raise IndexError
def __delitem__(self, index):
raise TypeError('Deletion of vector components is not supported')
def __getslice__(self, start, stop):
return [self._x, self._y][start:stop]
def __setslice__(self, lower, upper, val):
l = [self._x, self._y]
l[lower:upper] = val
if len(l) != 2:
raise ValueError
self._x = float(l[0])
self._y = float(l[1])
def __iter__(self):
for val in (self._x, self._y):
yield val
def __len__(self):
return 2
def __bool__(self):
return bool(self._x or self._y)
def __nonzero__(self):
return bool(self._x or self._y)
def dot(self, vector):
"""
Return dot product with other vector.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
return (self._x * vector_x) + (self._y * vector_y)
def cross(self, vector):
"""
Return cross product with other vector.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
return (self._x * vector_y) - (self._y * vector_x)
def magnitude(self):
"""
Return magnitude of vector.
"""
return sqrt((self._x**2) + (self._y**2))
def magnitude_squared(self):
"""
Return squared magnitude of vector.
"""
return ((self._x**2) + (self._y**2))
def length(self):
#js keyword, use magnitude.
"""
Return length of vector.
"""
return sqrt((self._x**2) + (self._y**2))
def length_squared(self):
"""
Return squared length of vector.
"""
return ((self._x**2) + (self._y**2))
def normalize(self):
"""
Return normalized vector.
"""
mag = self.magnitude()
if mag == 0:
raise ValueError('Cannot normalize vector of zero length')
return Vector2(self._x/mag, self._y/mag)
def normalize_ip(self):
"""
Normalized this vector.
"""
mag = self.magnitude()
if mag == 0:
raise ValueError('Cannot normalize vector of zero length')
self._x /= mag
self._y /= mag
return None
def is_normalized(self):
"""
Check whether vector is normalized.
"""
return self.magnitude() == 1
def scale_to_length(self, length):
"""
Scale vector to length.
"""
mag = self.magnitude()
if mag == 0:
raise ValueError('Cannot scale vector of zero length')
self._x = (self._x/mag) * length
self._y = (self._y/mag) * length
return None
def reflect(self, vector):
"""
Return reflected vector at given vector.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
vn = (self._x * vector_x) + (self._y * vector_y)
nn = (vector_x * vector_x) + (vector_y * vector_y)
if nn == 0:
raise ValueError('Cannot reflect from normal of zero length')
c = 2 * vn/nn
return Vector2(self._x-(vector_x*c), self._y-(vector_y*c))
def reflect_ip(self, vector):
"""
Derive reflected vector at given vector in place.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
vn = (self._x * vector_x) + (self._y * vector_y)
nn = (vector_x * vector_x) + (vector_y * vector_y)
if nn == 0:
raise ValueError('Cannot reflect from normal of zero length')
c = 2 * vn/nn
self._x -= (vector_x*c)
self._y -= (vector_y*c)
return None
def distance_to(self, vector):
"""
Return distance to given vector.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
return sqrt((self._x-vector_x)**2 + (self._y-vector_y)**2)
def distance_squared_to(self, vector):
"""
Return squared distance to given vector.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
return (self._x-vector_x)**2 + (self._y-vector_y)**2
def lerp(self, vector, t):
"""
Return vector linear interpolated by t to the given vector.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
if t < 0.0 or t > 1.0:
raise ValueError('Argument t must be in range 0 to 1')
return Vector2(self._x*(1-t) + vector_x*t,
self._y*(1-t) + vector_y*t)
def slerp(self, vector, t):
"""
Return vector spherical interpolated by t to the given vector.
"""
if t < -1.0 or t > 1.0:
raise ValueError('Argument t must be in range -1 to 1')
if not hasattr(vector, '__iter__') or len(vector) != 2:
raise TypeError('The first argument must be a vector')
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
smag = sqrt((self._x**2) + (self._y**2))
vmag = sqrt((vector_x**2) + (vector_y**2))
if smag==0 or vmag==0:
raise ValueError('Cannot use slerp with zero-vector')
sx = self._x/smag
sy = self._y/smag
vx = vector_x/vmag
vy = vector_y/vmag
theta = atan2(vy, vx) - atan2(sy, sx)
_theta = abs(theta)
if _theta-pi > 0.000001:
theta -= (2*pi) * (theta/_theta)
elif -0.000001 < _theta-pi < 0.000001:
raise ValueError('Cannot use slerp on 180 degrees')
if t < 0.0:
t = -t
theta -= (2*pi) * (theta/abs(theta))
sin_theta = sin(theta)
if sin_theta:
a = sin((1.0-t)*theta) / sin_theta
b = sin(t*theta) / sin_theta
else:
a = 1.0
b = 0.0
v = Vector2((sx * a) + (vx * b),
(sy * a) + (vy * b))
smag = ((1.0-t)*smag) + (t*vmag)
v.x *= smag
v.y *= smag
return v
def elementwise(self):
"""
Elementwice operation.
"""
return VectorElementwiseProxy(self._x, self._y)
def rotate(self, angle):
"""
Return vector rotated by angle in degrees.
"""
rad = angle/180.0*pi
c = round(cos(rad),6)
s = round(sin(rad),6)
return Vector2((c*self._x) - (s*self._y),
(s*self._x) + (c*self._y))
def rotate_rad(self, angle):
"""
Return vector rotated by angle in radians.
"""
c = cos(angle)
s = sin(angle)
return Vector2((c*self._x) - (s*self._y),
(s*self._x) + (c*self._y))
def rotate_ip(self, angle):
"""
Rotate vector by angle in degrees.
"""
r = angle/180.0*pi
c = round(cos(r),6)
s = round(sin(r),6)
x = self._x
y = self._y
self._x = (c*x) - (s*y)
self._y = (s*x) + (c*y)
return None
def rotate_ip_rad(self, angle):
"""
Rotate vector by angle in radians.
"""
c = cos(angle)
s = sin(angle)
x = self._x
y = self._y
self._x = (c*x) - (s*y)
self._y = (s*x) + (c*y)
return None
def angle_to(self, vector):
"""
Return angle to given vector.
"""
vector_x = vector[0] # __:opov
vector_y = vector[1] # __:opov
return (atan2(vector_y, vector_x)
- atan2(self._y, self._x)) * (180.0/pi)
def as_polar(self):
"""
Return radial distance and azimuthal angle.
"""
r = self.magnitude()
phi = atan2(self._y, self._x) * (180.0/pi)
return (r, phi)
def from_polar(self, coordinate):
"""
Set vector with polar coordinate tuple.
"""
if len(coordinate) != 2:
raise TypeError('coodinate must be of length 2')
r = coordinate[0]
phi = coordinate[1] * (pi/180.0)
self._x = round(r * cos(phi), 6)
self._y = round(r * sin(phi), 6)
return None
# __pragma__ ('kwargs')
def update(self, *args, **kwargs):
"""
Update vector.
"""
l = len(args)
if l == 2:
self._x = float(args[0])
self._y = float(args[1])
elif l == 1:
if isinstance(args[0], (int, float)):
self._x = float(args[0])
self._y = float(args[0])
else:
self._x = float(args[0][0]) # __:opov
self._y = float(args[0][1]) # __:opov
else:
if len(kwargs.keys()) > 0:
if 'x' in kwargs.keys() and 'y' in kwargs.keys():
self._x = float(kwargs['x'])
self._y = float(kwargs['y'])
elif 'x' in kwargs.keys():
self._x = float(kwargs['x'])
self._y = float(kwargs['x'])
else:
self._x = float(kwargs['y'])
self._y = float(kwargs['y'])
else:
self._x = 0.0
self._y = 0.0
# __pragma__ ('nokwargs')
def __pos__(self):
return Vector2(self._x, self._y)
def __neg__(self):
return Vector2(-self._x, -self._y)
def __add__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x + other_x, self._y + other_y)
else:
return Vector2(self._x + other, self._y + other)
def __sub__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x - other_x, self._y - other_y)
else:
return Vector2(self._x - other, self._y - other)
def __mul__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
if not isinstance(other, VectorElementwiseProxy):
return (self._x * other_x) + (self._y * other_y)
else:
return Vector2(self._x * other_x, self._y * other_y)
else:
return Vector2(self._x * other, self._y * other)
def __div__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x / other_x, self._y / other_y)
else:
return Vector2(self._x / other, self._y / other)
def __truediv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x / other_x, self._y / other_y)
else:
return Vector2(self._x / other, self._y / other)
def __floordiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(floor(self._x/other_x), floor(self._y/other_y))
else:
return Vector2(floor(self._x/other), floor(self._y/other))
def __eq__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return ( abs(self._x-other_x) < 0.000001 and
abs(self._y-other_y) < 0.000001 )
else:
return False
else:
return ( abs(self._x-other) < 0.000001 and
abs(self._y-other) < 0.000001 )
def __ne__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return ( abs(self._x-other_x) > 0.000001 or
abs(self._y-other_y) > 0.000001 )
else:
return True
else:
return ( abs(self._x-other) > 0.000001 or
abs(self._y-other) > 0.000001 )
def __gt__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return other.__lt__(self)
def __ge__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return other.__le__(self)
def __lt__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return other.__gt__(self)
def __le__(self, other):
if not isinstance(other, VectorElementwiseProxy):
msg = 'This operation is not supported by vectors'
raise TypeError(msg)
return other.__ge__(self)
def __radd__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x + other_x, self._y + other_y)
else:
return Vector2(self._x + other, self._y + other)
def __rsub__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(other_x - self._x, other_y - self._y)
else:
return Vector2(other - self._x, other - self._y)
def __rmul__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
if not isinstance(other, VectorElementwiseProxy):
return (self._x * other_x) + (self._y * other_y)
else:
return Vector2(self._x * other_x, self._y * other_y)
else:
return Vector2(self._x * other, self._y * other)
def __rdiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(other_x / self._x, other_y / self._y)
else:
return Vector2(other / self._x, other / self._y)
def __rtruediv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(other_x / self._x, other_y / self._y)
else:
return Vector2(other / self._x, other / self._y)
def __rfloordiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(floor(other_x/self._x), floor(other_y/self._y))
else:
return Vector2(floor(other/self._x), floor(other/self._y))
def __iadd__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
self._x += other_x
self._y += other_y
else:
self._x += other
self._y += other
return self
def __isub__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
self._x -= other_x
self._y -= other_y
else:
self._x -= other
self._y -= other
return self
def __imul__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
self._x *= other_x
self._y *= other_y
else:
self._x *= other
self._y *= other
return self
def __idiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
self._x /= other_x
self._y /= other_y
else:
self._x /= other
self._y /= other
return self
def __itruediv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
self._x /= other_x
self._y /= other_y
else:
self._x /= other
self._y /= other
return self
def __ifloordiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
self._x = float(floor(self._x / other_x))
self._y = float(floor(self._y / other_y))
else:
self._x = float(floor(self._x / other))
self._y = float(floor(self._y / other))
return self
class VectorElementwiseProxy(object):
def __init__(self, x, y):
self._x = x
self._y = y
def __getitem__(self, index):
if index in (0, -2):
return self._x
elif index in (1, -1):
return self._y
def __iter__(self):
for val in (self._x, self._y):
yield val
def __len__(self):
return 2
def __bool__(self):
return bool(self._x or self._y)
def __nonzero__(self):
return bool(self._x or self._y)
def __pos__(self):
return Vector2(self._x, self._y)
def __neg__(self):
return Vector2(-self._x, -self._y)
def __abs__(self):
return (abs(self._x), abs(self._y))
def __add__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x + other_x, self._y + other_y)
else:
return Vector2(self._x + other, self._y + other)
def __sub__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x - other_x, self._y - other_y)
else:
return Vector2(self._x - other, self._y - other)
def __mul__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x * other_x, self._y * other_y)
else:
return Vector2(self._x * other, self._y * other)
def __div__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x / other_x, self._y / other_y)
else:
return Vector2(self._x / other, self._y / other)
def __truediv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x / other_x, self._y / other_y)
else:
return Vector2(self._x / other, self._y / other)
def __floordiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(floor(self._x/other_x), floor(self._y/other_y))
else:
return Vector2(floor(self._x/other), floor(self._y/other))
def __pow__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
if (other_x%1 and self._x<0) or (other_y%1 and self._y<0):
raise ValueError('negative number cannot be raised to a fractional power')
return Vector2(self._x**other_x, self._y**other_y)
else:
if other%1 and (self._x<0 or self._y<0):
raise ValueError('negative number cannot be raised to a fractional power')
return Vector2(self._x**other, self._y**other)
def __mod__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x%other_x, self._y%other_y)
else:
return Vector2(self._x%other, self._y%other)
def __eq__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return ( abs(self._x-other_x) < 0.000001 and
abs(self._y-other_y) < 0.000001 )
else:
return False
else:
return ( abs(self._x-other) < 0.000001 and
abs(self._y-other) < 0.000001 )
def __ne__(self, other):
if hasattr(other, '__iter__'):
if len(other) == 2:
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return ( abs(self._x-other_x) > 0.000001 or
abs(self._y-other_y) > 0.000001 )
else:
return True
else:
return ( abs(self._x-other) > 0.000001 or
abs(self._y-other) > 0.000001 )
def __gt__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return bool(self._x>other_x and self._y>other_y)
else:
return bool(self._x>other and self._y>other)
def __ge__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return bool(self._x>=other_x and self._y>=other_y)
else:
return bool(self._x>=other and self._y>=other)
def __lt__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return bool(self._x<other_x and self._y<other_y)
else:
return bool(self._x<other and self._y<other)
def __le__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return bool(self._x<=other_x and self._y<=other_y)
else:
return bool(self._x<=other and self._y<=other)
def __radd__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x + other_x, self._y + other_y)
else:
return Vector2(self._x + other, self._y + other)
def __rsub__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(other_x - self._x, other_y - self._y)
else:
return Vector2(other - self._x, other - self._y)
def __rmul__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(self._x * other_x, self._y * other_y)
else:
return Vector2(self._x * other, self._y * other)
def __rdiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(other_x / self._x, other_y / self._y)
else:
return Vector2(other / self._x, other / self._y)
def __rtruediv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(other_x / self._x, other_y / self._y)
else:
return Vector2(other / self._x, other / self._y)
def __rfloordiv__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(floor(other_x/self._x), floor(other_y/self._y))
else:
return Vector2(floor(other/self._x), floor(other/self._y))
def __rpow__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
if (other_x<0 and self._x%1) or (other_y<0 and self._y%1):
raise ValueError('negative number cannot be raised to a fractional power')
return Vector2(other_x**self._x, other_y**self._y)
else:
if other<0 and (self._x%1 or self._y%1):
raise ValueError('negative number cannot be raised to a fractional power')
return Vector2(other**self._x, other**self._y)
def __rmod__(self, other):
if hasattr(other, '__iter__'):
other_x = other[0] # __:opov
other_y = other[1] # __:opov
return Vector2(other_x%self._x, other_y%self._y)
else:
return Vector2(other%self._x, other%self._y)
| 32.365079
| 90
| 0.508933
|
c07414cafc565f90834e59efde6c25643fb556ed
| 189
|
py
|
Python
|
musicstore/musicapp/admin.py
|
hannahclee/recordreview
|
73855cdc006db5170c9fca214bf5ce6d142040b3
|
[
"Apache-2.0"
] | null | null | null |
musicstore/musicapp/admin.py
|
hannahclee/recordreview
|
73855cdc006db5170c9fca214bf5ce6d142040b3
|
[
"Apache-2.0"
] | null | null | null |
musicstore/musicapp/admin.py
|
hannahclee/recordreview
|
73855cdc006db5170c9fca214bf5ce6d142040b3
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from .models import Artist, Record, Review
# Register your models here.
admin.site.register(Artist)
admin.site.register(Record)
admin.site.register(Review)
| 27
| 42
| 0.809524
|
2861e4e01e81763f3230d96375eb16ecf7fa5c84
| 972
|
py
|
Python
|
a2ml/api/a2ml_dataset.py
|
arita37/a2ml
|
3e92bede2c2ef6e63be74560cc6b904d3ec9d931
|
[
"Apache-2.0"
] | 2
|
2020-04-09T16:59:22.000Z
|
2020-04-09T17:01:10.000Z
|
a2ml/api/a2ml_dataset.py
|
arita37/a2ml
|
3e92bede2c2ef6e63be74560cc6b904d3ec9d931
|
[
"Apache-2.0"
] | null | null | null |
a2ml/api/a2ml_dataset.py
|
arita37/a2ml
|
3e92bede2c2ef6e63be74560cc6b904d3ec9d931
|
[
"Apache-2.0"
] | null | null | null |
from a2ml.api.utils.crud_runner import CRUDRunner
from a2ml.api.utils.show_result import show_result
class A2MLDataset(object):
"""Contains the dataset CRUD operations that interact with provider."""
def __init__(self, ctx, provider):
"""Initializes a new a2ml.
Args:
provider (str): The automl provider/s you wish to run. For example 'auger,azure,google'.
Returns:
A2ML object
"""
super(A2MLDataset, self).__init__()
self.ctx = ctx
self.runner = CRUDRunner(ctx, provider, 'dataset')
@show_result
def list(self):
return self.runner.execute('list')
@show_result
def create(self, source = None):
return self.runner.execute('create', source)
@show_result
def delete(self, name = None):
return self.runner.execute('delete', name)
@show_result
def select(self, name = None):
return self.runner.execute('select', name)
| 28.588235
| 100
| 0.639918
|
994ed9ed21b6233033178db357596123c0fcc744
| 9,204
|
py
|
Python
|
Pyrado/pyrado/algorithms/episodic/nes.py
|
theogruner/SimuRLacra
|
4893514ccdeb10a736c55de9aa7753fd51c5afec
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null |
Pyrado/pyrado/algorithms/episodic/nes.py
|
theogruner/SimuRLacra
|
4893514ccdeb10a736c55de9aa7753fd51c5afec
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null |
Pyrado/pyrado/algorithms/episodic/nes.py
|
theogruner/SimuRLacra
|
4893514ccdeb10a736c55de9aa7753fd51c5afec
|
[
"DOC",
"Zlib",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from typing import Optional
import numpy as np
import torch as to
import pyrado
from pyrado.algorithms.episodic.parameter_exploring import ParameterExploring
from pyrado.environments.base import Env
from pyrado.exploration.stochastic_params import NormalParamNoise, SymmParamExplStrat
from pyrado.logger.step import StepLogger
from pyrado.policies.base import Policy
from pyrado.sampling.parameter_exploration_sampler import ParameterSamplingResult
from pyrado.utils.data_processing import standardize
class NES(ParameterExploring):
"""
Simplified variant of Natural Evolution Strategies (NES)
.. seealso::
[1] D. Wierstra, T. Schaul, T. Glasmachers, Y. Sun, J. Peters, J. Schmidhuber, "Natural Evolution Strategies",
JMLR, 2014
[2] This implementation was inspired by https://github.com/pybrain/pybrain/blob/master/pybrain/optimization/distributionbased/snes.py
"""
name: str = "nes"
def __init__(
self,
save_dir: pyrado.PathLike,
env: Env,
policy: Policy,
max_iter: int,
num_init_states_per_domain: int,
expl_std_init: float,
expl_std_min: float = 0.01,
num_domains: int = 1,
pop_size: Optional[int] = None,
eta_mean: float = 1.0,
eta_std: Optional[float] = None,
symm_sampling: bool = False,
transform_returns: bool = True,
num_workers: int = 4,
logger: Optional[StepLogger] = None,
):
"""
Constructor
:param save_dir: directory to save the snapshots i.e. the results in
:param env: the environment which the policy operates
:param policy: policy to be updated
:param max_iter: maximum number of iterations (i.e. policy updates) that this algorithm runs
:param num_init_states_per_domain: number of rollouts to cover the variance over initial states
:param num_domains: number of rollouts due to the variance over domain parameters
:param expl_std_init: initial standard deviation for the exploration strategy
:param expl_std_min: minimal standard deviation for the exploration strategy
:param pop_size: number of solutions in the population
:param eta_mean: step size factor for the mean
:param eta_std: step size factor for the standard deviation
:param symm_sampling: use an exploration strategy which samples symmetric populations
:param transform_returns: use a rank-transformation of the returns to update the policy
:param num_workers: number of environments for parallel sampling
:param logger: logger for every step of the algorithm, if `None` the default logger will be created
"""
# Call ParameterExploring's constructor
super().__init__(
save_dir=save_dir,
env=env,
policy=policy,
max_iter=max_iter,
num_init_states_per_domain=num_init_states_per_domain,
num_domains=num_domains,
pop_size=pop_size,
num_workers=num_workers,
logger=logger,
)
# Store the inputs
self.transform_returns = transform_returns
# Explore using normal noise
self._expl_strat = NormalParamNoise(
self._policy.num_param,
std_init=expl_std_init,
std_min=expl_std_min,
use_cuda=self._policy.device != "cpu",
)
if symm_sampling:
# Exploration strategy based on symmetrical normally distributed noise
# Symmetric buffer needs to have an even number of samples
if self.pop_size % 2 != 0:
self.pop_size += 1
self._expl_strat = SymmParamExplStrat(self._expl_strat)
# Utility coefficients (ignored for transform_returns = False)
# Use pop_size + 1 since we are also considering the current policy
eta_std = eta_std if eta_std is not None else (3 + np.log(policy.num_param)) / np.sqrt(self.pop_size + 1) / 5.0
self.eta_mean_util, self.eta_std_util = self.compute_utilities(self.pop_size + 1, eta_mean, eta_std)
# Learning rates [2]
# Use pop_size + 1 since we are also considering the current policy
self.lr_mean = 1.0 if transform_returns else 1e-2
self.lr_std = 0.6 * (3 + np.log(self.pop_size + 1)) / 3.0 / np.sqrt(self.pop_size + 1)
@staticmethod
def compute_utilities(pop_size: Optional[int], eta_mean: float, eta_std: float):
"""
Compute the utilities as described in section 3.1 of [1] (a.k.a. Hansen ranking with uniform baseline)
:param pop_size: number of solutions in the population
:param eta_mean: step size factor for the mean
:param eta_std: step size factor for the standard deviation
:return: utility coefficient for the mean, and utility coefficient for the standard deviation
"""
# Compute common utility vector
log_half = np.log(pop_size / 2.0 + 1)
log_k = np.log(np.arange(1, pop_size + 1))
num = np.maximum(0, log_half - log_k)
utils = num / np.sum(num) - 1.0 / pop_size
# Convert to PyTorch tensors
eta_mean_util = to.from_numpy(eta_mean * utils).to(to.get_default_dtype())
eta_std_util = to.from_numpy(eta_std / 2.0 * utils).to(to.get_default_dtype())
return eta_mean_util, eta_std_util
def update(self, param_results: ParameterSamplingResult, ret_avg_curr: float = None):
# Average the return values over the rollouts
rets_avg_ros = param_results.mean_returns
# Get the perturbations (deltas from the current policy parameters)
s = param_results.parameters - self._policy.param_values
# also divide by the standard deviation to fully standardize
s /= self._expl_strat.std
if self.transform_returns:
# Ascending sort according to return values
idcs_acs = np.argsort(rets_avg_ros)[::-1]
s_asc = s[list(idcs_acs), :]
# Update the mean (see [1, 2])
delta_mean = self._expl_strat.std * (self.eta_mean_util @ s_asc)
self._policy.param_values += self.lr_mean * delta_mean
# Update the std (see [1, 2])
grad_std = self.eta_std_util @ (s_asc ** 2 - 1.0)
new_std = self._expl_strat.std * to.exp(self.lr_std * grad_std / 2.0)
self._expl_strat.adapt(std=new_std)
else:
# Standardize averaged returns over all pop_size rollouts
rets_stdized = standardize(rets_avg_ros)
rets_stdized = to.from_numpy(rets_stdized).to(to.get_default_dtype())
# delta_mean = 1./len(param_results) * (rets_stdized @ s)
delta_mean = 1.0 / (self._expl_strat.std * len(param_results)) * (rets_stdized @ s)
self._policy.param_values += self.lr_mean * delta_mean
# Update the std (monotonous exponential decay)
new_std = self._expl_strat.std * 0.999 ** self._curr_iter
self._expl_strat.adapt(std=new_std)
self.logger.add_value("min expl strat std", to.min(self._expl_strat.std), 4)
self.logger.add_value("avg expl strat std", to.mean(self._expl_strat.std), 4)
self.logger.add_value("max expl strat std", to.max(self._expl_strat.std), 4)
self.logger.add_value("expl strat entropy", self._expl_strat.get_entropy(), 4)
| 47.2
| 141
| 0.686875
|
113fdaaf14ead638f626a61a7766e9d2979329b9
| 1,244
|
py
|
Python
|
superset/migrations/versions/55e910a74826_add_metadata_column_to_annotation_model_.py
|
Manikantan22/incubator-superset
|
ec325c871e60ae2a050aae595b430d6fc2888d1a
|
[
"Apache-2.0"
] | 6
|
2019-06-14T11:16:54.000Z
|
2020-11-08T16:02:00.000Z
|
superset/migrations/versions/55e910a74826_add_metadata_column_to_annotation_model_.py
|
Manikantan22/incubator-superset
|
ec325c871e60ae2a050aae595b430d6fc2888d1a
|
[
"Apache-2.0"
] | 203
|
2019-05-31T11:13:10.000Z
|
2020-03-31T02:50:54.000Z
|
superset/migrations/versions/55e910a74826_add_metadata_column_to_annotation_model_.py
|
Manikantan22/incubator-superset
|
ec325c871e60ae2a050aae595b430d6fc2888d1a
|
[
"Apache-2.0"
] | 14
|
2019-05-31T11:32:40.000Z
|
2021-01-28T11:18:16.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add_metadata_column_to_annotation_model.py
Revision ID: 55e910a74826
Revises: 1a1d627ebd8e
Create Date: 2018-08-29 14:35:20.407743
"""
# revision identifiers, used by Alembic.
revision = "55e910a74826"
down_revision = "1a1d627ebd8e"
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column("annotation", sa.Column("json_metadata", sa.Text(), nullable=True))
def downgrade():
op.drop_column("annotation", "json_metadata")
| 31.897436
| 85
| 0.766881
|
51872b7bf71a067a06ea7e2ec6458e4b7615c9a5
| 3,862
|
py
|
Python
|
python/download-trade.py
|
Kento-Bento/binance-public-data
|
a4309127213f2f8af194626013598b278a541c73
|
[
"MIT"
] | 637
|
2021-01-14T03:41:05.000Z
|
2022-03-29T13:02:56.000Z
|
python/download-trade.py
|
Kento-Bento/binance-public-data
|
a4309127213f2f8af194626013598b278a541c73
|
[
"MIT"
] | 115
|
2021-01-15T02:52:21.000Z
|
2022-03-29T09:30:37.000Z
|
python/download-trade.py
|
Kento-Bento/binance-public-data
|
a4309127213f2f8af194626013598b278a541c73
|
[
"MIT"
] | 267
|
2021-01-19T13:36:17.000Z
|
2022-03-31T15:56:19.000Z
|
#!/usr/bin/env python
"""
script to download trades.
set the absoluate path destination folder for STORE_DIRECTORY, and run
e.g. STORE_DIRECTORY=/data/ ./download-trade.py
"""
import sys
from datetime import *
import pandas as pd
from enums import *
from utility import download_file, get_all_symbols, get_parser, get_start_end_date_objects, convert_to_date_object, \
get_path
def download_monthly_trades(trading_type, symbols, num_symbols, years, months, start_date, end_date, folder, checksum):
current = 0
date_range = None
if start_date and end_date:
date_range = start_date + " " + end_date
if not start_date:
start_date = START_DATE
else:
start_date = convert_to_date_object(start_date)
if not end_date:
end_date = END_DATE
else:
end_date = convert_to_date_object(end_date)
print("Found {} symbols".format(num_symbols))
for symbol in symbols:
print("[{}/{}] - start download monthly {} trades ".format(current+1, num_symbols, symbol))
for year in years:
for month in months:
current_date = convert_to_date_object('{}-{}-01'.format(year, month))
if current_date >= start_date and current_date <= end_date:
path = get_path(trading_type, "trades", "monthly", symbol)
file_name = "{}-trades-{}-{}.zip".format(symbol.upper(), year, '{:02d}'.format(month))
download_file(path, file_name, date_range, folder)
if checksum == 1:
checksum_path = get_path(trading_type, "trades", "monthly", symbol)
checksum_file_name = "{}-trades-{}-{}.zip.CHECKSUM".format(symbol.upper(), year, '{:02d}'.format(month))
download_file(checksum_path, checksum_file_name, date_range, folder)
current += 1
def download_daily_trades(trading_type, symbols, num_symbols, dates, start_date, end_date, folder, checksum):
current = 0
date_range = None
if start_date and end_date:
date_range = start_date + " " + end_date
if not start_date:
start_date = START_DATE
else:
start_date = convert_to_date_object(start_date)
if not end_date:
end_date = END_DATE
else:
end_date = convert_to_date_object(end_date)
print("Found {} symbols".format(num_symbols))
for symbol in symbols:
print("[{}/{}] - start download daily {} trades ".format(current+1, num_symbols, symbol))
for date in dates:
current_date = convert_to_date_object(date)
if current_date >= start_date and current_date <= end_date:
path = get_path(trading_type, "trades", "daily", symbol)
file_name = "{}-trades-{}.zip".format(symbol.upper(), date)
download_file(path, file_name, date_range, folder)
if checksum == 1:
checksum_path = get_path(trading_type, "trades", "daily", symbol)
checksum_file_name = "{}-trades-{}.zip.CHECKSUM".format(symbol.upper(), date)
download_file(checksum_path, checksum_file_name, date_range, folder)
current += 1
if __name__ == "__main__":
parser = get_parser('trades')
args = parser.parse_args(sys.argv[1:])
if not args.symbols:
print("fetching all symbols from exchange")
symbols = get_all_symbols(args.type)
num_symbols = len(symbols)
else:
symbols = args.symbols
num_symbols = len(symbols)
print("fetching {} symbols from exchange".format(num_symbols))
if args.dates:
dates = args.dates
else:
dates = pd.date_range(end = datetime.today(), periods = MAX_DAYS).to_pydatetime().tolist()
dates = [date.strftime("%Y-%m-%d") for date in dates]
download_monthly_trades(args.type, symbols, num_symbols, args.years, args.months, args.startDate, args.endDate, args.folder, args.checksum)
download_daily_trades(args.type, symbols, num_symbols, dates, args.startDate, args.endDate, args.folder, args.checksum)
| 34.792793
| 145
| 0.684878
|
5276ecf712911672f88b4005caab490f5941ef1b
| 1,204
|
py
|
Python
|
src/omnibus/app.py
|
hmajid2301/omnibus
|
99de78329c28e99bcfefd523640f4fdd377bf8d4
|
[
"Apache-2.0"
] | 1
|
2022-02-25T13:05:27.000Z
|
2022-02-25T13:05:27.000Z
|
src/omnibus/app.py
|
hmajid2301/omnibus
|
99de78329c28e99bcfefd523640f4fdd377bf8d4
|
[
"Apache-2.0"
] | null | null | null |
src/omnibus/app.py
|
hmajid2301/omnibus
|
99de78329c28e99bcfefd523640f4fdd377bf8d4
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Callable, Dict, List, Type, Union
from beanie import init_beanie
from beanie.odm.documents import DocType
from fastapi import FastAPI
from fastapi_health import health
from motor import motor_asyncio
from omnibus.config.settings import OmnibusSettings
from omnibus.healthcheck import default_healthcheck
from omnibus.log.logger import get_logger, setup_logger
from omnibus.middleware.cors import add_cors
async def setup_app(
app: FastAPI,
get_settings: Callable[..., OmnibusSettings],
document_models: List[Union[Type["DocType"], str]],
healthcheck: Callable[..., Union[Dict[str, Any], bool]] = default_healthcheck,
):
config = get_settings()
setup_logger(log_level=config.LOG_LEVEL, env=config.ENVIRONMENT, uvicorn_log_level=config.UVICORN_LOG_LEVEL)
uri = config.get_mongodb_uri()
client = motor_asyncio.AsyncIOMotorClient(uri)
await init_beanie(database=client[config.DB_NAME], document_models=document_models)
add_cors(app=app, cors=config.CORS, regex_cors=config.REGEX_CORS)
app.add_api_route("/health", health([healthcheck]))
log = get_logger()
log.info(f"starting {app.title} {config.WEB_HOST}:{config.WEB_PORT}")
| 37.625
| 112
| 0.775748
|
8fa7a58c8e55a8cf09335f185287c7bbde6d6829
| 18,454
|
py
|
Python
|
PAPC/models/detect/pointpillars/core/__init__.py
|
AgentMaker/PAPC
|
f5f385c6fed53d2ca2599171a317efaac348dfb5
|
[
"MIT"
] | 70
|
2021-02-20T03:44:38.000Z
|
2022-01-17T12:55:45.000Z
|
PAPC/models/detect/pointpillars/core/__init__.py
|
AgentMaker/PAPC
|
f5f385c6fed53d2ca2599171a317efaac348dfb5
|
[
"MIT"
] | 1
|
2021-02-20T05:26:14.000Z
|
2021-02-20T13:22:13.000Z
|
PAPC/models/detect/pointpillars/core/__init__.py
|
AgentMaker/PAPC
|
f5f385c6fed53d2ca2599171a317efaac348dfb5
|
[
"MIT"
] | 8
|
2021-02-20T03:35:25.000Z
|
2022-02-08T03:22:55.000Z
|
import numpy as np
from functools import partial
import pickle
import paddle
from core.box_coders import (GroundBox3dCoderPaddle, BevBoxCoderPaddle)
from core.voxel_generator import VoxelGenerator
from core.target_assigner import TargetAssigner
from core.similarity_calculator import (RotateIouSimilarity,NearestIouSimilarity,DistanceSimilarity)
from core.anchor_generator import (AnchorGeneratorStride, AnchorGeneratorRange)
from core import losses
from data.dataset import KittiDataset, DatasetWrapper
from data.preprocess import prep_pointcloud
from libs.preprocess import DBFilterByMinNumPoint, DBFilterByDifficulty, DataBasePreprocessor
from libs.ops.sample_ops import DataBaseSamplerV2
from libs.tools import learning_schedules
def build_anchor_generator(anchor_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
if 'anchor_generator_stride' in anchor_config:
config = anchor_config.anchor_generator_stride
ag = AnchorGeneratorStride(
sizes=list(config.sizes),
anchor_strides=list(config.strides),
anchor_offsets=list(config.offsets),
rotations=list(config.rotations),
match_threshold=config.matched_threshold,
unmatch_threshold=config.unmatched_threshold,
class_id=config.class_name)
return ag
elif 'anchor_generator_range' in anchor_config:
config = anchor_config.anchor_generator_range
ag = AnchorGeneratorRange(
sizes=list(config.sizes),
anchor_ranges=list(config.anchor_ranges),
rotations=list(config.rotations),
match_threshold=config.matched_threshold,
unmatch_threshold=config.unmatched_threshold,
class_id=config.class_name)
return ag
else:
raise ValueError(" unknown anchor generator type")
def build_similarity_calculator(similarity_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
if 'rotate_iou_similarity' in similarity_config:
return RotateIouSimilarity()
elif 'nearest_iou_similarity' in similarity_config:
return NearestIouSimilarity()
elif 'distance_similarity' in similarity_config:
cfg = similarity_config.distance_similarity
return DistanceSimilarity(distance_norm=cfg.distance_norm,
with_rotation=cfg.with_rotation,
rotation_alpha=cfg.rotation_alpha)
else:
raise ValueError("unknown similarity type")
def build_voxel_generator(voxel_generator_config):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
voxel_generator = VoxelGenerator(
voxel_size=list(voxel_generator_config.VOXEL_SIZE),
point_cloud_range=list(voxel_generator_config.POINT_CLOUD_RANGE),
max_num_points=voxel_generator_config.MAX_NUMBER_OF_POINTS_PER_VOXEL,
max_voxels=voxel_generator_config.MAX_VOXELS)
return voxel_generator
def build_box_coder(box_coder_config):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
if box_coder_config.BOX_CODER_TYPE == 'ground_box3d_coder':
return GroundBox3dCoderPaddle(box_coder_config.LINEAR_DIM, box_coder_config.ENCODE_ANGLE_VECTOR)
elif box_coder_config.BOX_CODER_TYPE == 'bev_box_coder':
return BevBoxCoderPaddle(box_coder_config.LINEAR_DIM, box_coder_config.ENCODE_ANGLE_VECTOR,
box_coder_config.Z_FIXED, box_coder_config.H_FIXED)
else:
raise ValueError("unknown box_coder type")
def build_target_assigner(target_assigner_config, bv_range, box_coder):
"""Builds a tensor dictionary based on the InputReader config.
Args:
input_reader_config: A input_reader_pb2.InputReader object.
Returns:
A tensor dict based on the input_reader_config.
Raises:
ValueError: On invalid input reader proto.
ValueError: If no input paths are specified.
"""
anchor_cfg = target_assigner_config.ANCHOR_GENERATORS
anchor_generators = []
for a_cfg in anchor_cfg:
anchor_generator = build_anchor_generator(a_cfg)
anchor_generators.append(anchor_generator)
similarity_calc = build_similarity_calculator(
target_assigner_config.REGION_SIMILARITY_CALCULATOR)
positive_fraction = target_assigner_config.SAMPLE_POSITIVE_FRACTION
if positive_fraction < 0:
positive_fraction = None
target_assigner = TargetAssigner(
box_coder=box_coder,
anchor_generators=anchor_generators,
region_similarity_calculator=similarity_calc,
positive_fraction=positive_fraction,
sample_size=target_assigner_config.SAMPLE_SIZE)
return target_assigner
def build_losses(loss_config):
"""Build losses based on the config.
Builds classification, localization losses and optionally a hard example miner
based on the config.
Args:
loss_config: A yaml.Loss object.
Returns:
classification_loss: Classification loss object.
localization_loss: Localization loss object.
classification_weight: Classification loss weight.
localization_weight: Localization loss weight.
hard_example_miner: Hard example miner object.
Raises:
ValueError: If hard_example_miner is used with sigmoid_focal_loss.
"""
classification_loss = _build_classification_loss(
loss_config.classification_loss)
localization_loss = _build_localization_loss(
loss_config.localization_loss)
classification_weight = loss_config.classification_weight
localization_weight = loss_config.localization_weight
hard_example_miner = None
return (classification_loss, localization_loss,
classification_weight,
localization_weight, hard_example_miner)
def _build_localization_loss(loss_config):
"""Builds a localization loss based on the loss config.
Args:
loss_config: A yaml.LocalizationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if 'weighted_l2' in loss_config:
config = loss_config.weighted_l2
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedL2LocalizationLoss(code_weight)
if 'weighted_smooth_l1' in loss_config:
config = loss_config.weighted_smooth_l1
if len(config.code_weight) == 0:
code_weight = None
else:
code_weight = config.code_weight
return losses.WeightedSmoothL1LocalizationLoss(config.sigma, code_weight)
else:
raise ValueError('Empty loss config.')
def _build_classification_loss(loss_config):
"""Builds a classification loss based on the loss config.
Args:
loss_config: A yaml.ClassificationLoss object.
Returns:
Loss based on the config.
Raises:
ValueError: On invalid loss_config.
"""
if 'weighted_sigmoid' in loss_config:
return losses.WeightedSigmoidClassificationLoss()
if 'weighted_sigmoid_focal' in loss_config:
config = loss_config.weighted_sigmoid_focal
# alpha = None
# if config.HasField('alpha'):
# alpha = config.alpha
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SigmoidFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if 'weighted_softmax_focal' in loss_config :
config = loss_config.weighted_softmax_focal
# alpha = None
# if config.HasField('alpha'):
# alpha = config.alpha
if config.alpha > 0:
alpha = config.alpha
else:
alpha = None
return losses.SoftmaxFocalClassificationLoss(
gamma=config.gamma,
alpha=alpha)
if 'weighted_softmax' in loss_config:
config = loss_config.weighted_softmax
return losses.WeightedSoftmaxClassificationLoss(
logit_scale=config.logit_scale)
if 'bootstrapped_sigmoid' in loss_config:
config = loss_config.bootstrapped_sigmoid
return losses.BootstrappedSigmoidClassificationLoss(
alpha=config.alpha,
bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
else:
raise ValueError('Empty loss config.')
def build_optimizer(optimizer_config, params, name=None):
"""Create optimizer based on config.
Args:
optimizer_config: A Optimizer proto message.
Returns:
An optimizer and a list of variables for summary.
Raises:
ValueError: when using an unsupported input data type.
"""
if optimizer_config.name == 'rms_prop_optimizer':
optimizer = paddle.optimizer.RMSProp(
parameters = params,
learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),
rho=optimizer_config.decay,
momentum=optimizer_config.momentum_optimizer_value,
epsilon=optimizer_config.epsilon,
weight_decay=optimizer_config.weight_decay)
if optimizer_config.name =='momentum_optimizer':
optimizer = paddle.optimizer.SGD(
parameters = params,
learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),
weight_decay=optimizer_config.weight_decay)
if optimizer_config.name =='adam_optimizer':
optimizer = paddle.optimizer.Adam(
parameters = params,
learning_rate=_get_base_lr_by_lr_scheduler(optimizer_config.learning_rate),
weight_decay=optimizer_config.weight_decay)
if optimizer is None:
raise ValueError('Optimizer %s not supported.' % optimizer_config.name)
if optimizer_config.use_moving_average:
raise ValueError('paddle don\'t support moving average')
if name is None:
# assign a name to optimizer for checkpoint system
optimizer.name = optimizer_config.name
else:
optimizer.name = name
return optimizer
def _get_base_lr_by_lr_scheduler(learning_rate_config):
base_lr = None
learning_rate_type = learning_rate_config.name
if learning_rate_type == 'constant_learning_rate':
base_lr = learning_rate_config.learning_rate
if learning_rate_type == 'exponential_decay_learning_rate':
base_lr = learning_rate_config.initial_learning_rate
if learning_rate_type == 'manual_step_learning_rate':
base_lr = learning_rate_config.initial_learning_rate
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
if learning_rate_type == 'cosine_decay_learning_rate':
base_lr = learning_rate_config.learning_rate_base
if base_lr is None:
raise ValueError(
'Learning_rate %s not supported.' % learning_rate_type)
return base_lr
def build_db_preprocess(cfg):
prepors = []
for k,v in cfg.items():
if k == 'filter_by_min_num_points':
prepor = DBFilterByMinNumPoint(v.min_num_point_pairs)
prepors.append(prepor)
elif k == 'filter_by_difficulty':
prepor = DBFilterByDifficulty(v.removed_difficulties)
prepors.append(prepor)
else:
raise ValueError("unknown database prep type")
return prepors
def build_dbsampler(db_sampler_cfg):
cfg = db_sampler_cfg
groups = cfg.sample_groups
prepors = build_db_preprocess(cfg.database_prep_steps) # list
db_prepor = DataBasePreprocessor(prepors)
rate = cfg.rate
grot_range = cfg.global_random_rotation_range_per_object
groups = [g.name_to_max_num for g in groups]
info_path = cfg.database_info_path
with open(info_path,'rb') as f:
db_infos = pickle.load(f)
if len(grot_range) == 0:
grot_range =None
sampler = DataBaseSamplerV2(db_infos , groups, db_prepor,rate,grot_range)
return sampler
def build_dataset(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner=None):
"""Builds a tensor dictionary based on the InputReader config.
Returns:
A tensor dict based on the input_reader_config.
"""
generate_bev = model_config.POST_PROCESSING.use_bev
without_reflectivity = model_config.WITHOUT_REFLECTIVITY
num_point_features = model_config.NUM_POINT_FEATURES
out_size_factor = model_config.BACKBONE.layer_strides[0] //model_config.BACKBONE.upsample_strides[0]
cfg = input_reader_config
db_sampler_cfg = input_reader_config.DATABASE_SAMPLER
db_sampler = None
if len(db_sampler_cfg.sample_groups) > 0:
db_sampler = build_dbsampler(db_sampler_cfg)
try:
u_db_sampler_cfg = input_reader_config.UNLABELED_DATABASE_SAMPLER
u_db_sampler = None
if len(u_db_sampler_cfg.sample_groups) > 0:
u_db_sampler = build_dbsampler(u_db_sampler_cfg)
except:
u_db_sampler = None
grid_size = voxel_generator.grid_size #[352,400]
feature_map_size = grid_size[:2] // out_size_factor
feature_map_size = [*feature_map_size, 1][::-1]
prep_func = partial(
prep_pointcloud,
root_path = cfg.KITTI_ROOT_PATH,
class_names = cfg.CLASS_NAMES,
voxel_generator=voxel_generator,
target_assigner=target_assigner,
training=training,
max_voxels = cfg.MAX_NUMBER_OF_VOXELS,
remove_outside_points = False,
create_targets = training,
shuffle_points = cfg.SHUFFLE_POINTS,
gt_rotation_noise = cfg.GROUNDTRUTH_ROTATION_UNIFORM_NOISE,
gt_loc_noise_std = cfg.GROUNDTRUTH_LOCALIZATION_NOISE_STD,
global_rotation_noise = cfg.GLOBAL_ROTATION_UNIFORM_NOISE,
global_scaling_noise = cfg.GLOBAL_SCALING_UNIFORM_NOISE,
global_loc_noise_std = (0.2, 0.2, 0.2),
global_random_rot_range = cfg.GLOBAL_RANDOM_ROTATION_RANGE_PER_OBJECT,
db_sampler = db_sampler,
unlabeled_db_sampler = u_db_sampler,
generate_bev = generate_bev,
without_reflectivity=without_reflectivity,
num_point_features=num_point_features,
anchor_area_threshold=cfg.ANCHOR_AREA_THRESHOLD,
gt_points_drop=cfg.GROUNDTRUTH_POINTS_DROP_PERCENTAGE,
gt_drop_max_keep=cfg.GROUNDTRUTH_DROP_MAX_KEEP_POINTS,
remove_points_after_sample=cfg.REMOVE_POINTS_AFTER_SAMPLE,
remove_environment=cfg.REMOVE_ENVIRONMENT,
use_group_id=False,
out_size_factor=out_size_factor)
dataset = KittiDataset(
info_path = cfg.KITTI_INFO_PATH,
root_path=cfg.KITTI_ROOT_PATH,
num_point_features=num_point_features,
target_assigner=target_assigner,
feature_map_size=feature_map_size,
prep_func=prep_func
)
return dataset
def build_input_reader(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner=None) -> DatasetWrapper:
dataset = build_dataset(input_reader_config,
model_config,
training,
voxel_generator,
target_assigner)
dataset = DatasetWrapper(dataset)
return dataset
def build_lr_schedules(optimizer_config, optimizer, last_step=-1):
return _create_learning_rate_scheduler(optimizer_config.learning_rate,
optimizer,
last_step=last_step)
def _create_learning_rate_scheduler(learning_rate_config, optimizer, last_step=-1):
"""Create optimizer learning rate scheduler based on config.
Args:
learning_rate_config: A LearningRate proto message.
Returns:
A learning rate.
Raises:
ValueError: when using an unsupported input data type.
"""
lr_scheduler = None
learning_rate_type = learning_rate_config.name
if learning_rate_type == 'constant_learning_rate':
lr_scheduler = learning_schedules.Constant(
optimizer, last_step=last_step)
if learning_rate_type == 'exponential_decay_learning_rate':
config = learning_rate_config
lr_scheduler = learning_schedules.ExponentialDecay(
optimizer, config.decay_steps,
config.decay_factor, config.staircase, last_step=last_step)
if learning_rate_type == 'manual_step_learning_rate':
config = learning_rate_config
if not config.schedule:
raise ValueError('Empty learning rate schedule.')
learning_rate_step_boundaries = [x.step for x in config.schedule]
learning_rate_sequence = [config.initial_learning_rate]
learning_rate_sequence += [x.learning_rate for x in config.schedule]
lr_scheduler = learning_schedules.ManualStepping(
optimizer, learning_rate_step_boundaries, learning_rate_sequence,
last_step=last_step)
if learning_rate_type == 'cosine_decay_learning_rate':
config = learning_rate_config.cosine_decay_learning_rate
lr_scheduler = learning_schedules.CosineDecayWithWarmup(
optimizer, config.total_steps,
config.warmup_learning_rate, config.warmup_steps,
last_step=last_step)
if lr_scheduler is None:
raise ValueError('Learning_rate %s not supported.' % learning_rate_type)
return lr_scheduler
| 34.950758
| 104
| 0.700336
|
f1923e6a60d6703b867a3d141b6b252c7f6100ff
| 216
|
py
|
Python
|
catalog/bindings/gmd/bounded_feature_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/bounded_feature_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/bounded_feature_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from bindings.gmd.abstract_feature_type import AbstractFeatureType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class BoundedFeatureType(AbstractFeatureType):
pass
| 21.6
| 66
| 0.828704
|
3a6ee928b99933fe9275310a59ba9981df8294f1
| 64,738
|
py
|
Python
|
distributed/dashboard/components/scheduler.py
|
MichaelSchreier/distributed
|
7b9fccb1f5d6e5e262500c342650afbd5bfeccd7
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/dashboard/components/scheduler.py
|
MichaelSchreier/distributed
|
7b9fccb1f5d6e5e262500c342650afbd5bfeccd7
|
[
"BSD-3-Clause"
] | null | null | null |
distributed/dashboard/components/scheduler.py
|
MichaelSchreier/distributed
|
7b9fccb1f5d6e5e262500c342650afbd5bfeccd7
|
[
"BSD-3-Clause"
] | null | null | null |
from collections import defaultdict
import logging
import math
from numbers import Number
import operator
import os
from bokeh.layouts import column, row
from bokeh.models import (
ColumnDataSource,
ColorBar,
DataRange1d,
HoverTool,
ResetTool,
PanTool,
WheelZoomTool,
TapTool,
OpenURL,
Range1d,
value,
NumeralTickFormatter,
BoxZoomTool,
AdaptiveTicker,
BasicTicker,
NumberFormatter,
BoxSelectTool,
GroupFilter,
CDSView,
)
from bokeh.models.markers import Triangle
from bokeh.models.widgets import DataTable, TableColumn
from bokeh.plotting import figure
from bokeh.palettes import Viridis11
from bokeh.themes import Theme
from bokeh.transform import factor_cmap, linear_cmap
from bokeh.io import curdoc
import dask
from dask.utils import format_bytes, key_split
from toolz import pipe
from tornado import escape
try:
import numpy as np
except ImportError:
np = False
from distributed.dashboard.components import add_periodic_callback
from distributed.dashboard.components.shared import (
DashboardComponent,
ProfileTimePlot,
ProfileServer,
SystemMonitor,
)
from distributed.dashboard.utils import (
transpose,
BOKEH_VERSION,
PROFILING,
without_property_validation,
update,
)
from distributed.metrics import time
from distributed.utils import log_errors, format_time, parse_timedelta
from distributed.diagnostics.progress_stream import color_of, progress_quads
from distributed.diagnostics.graph_layout import GraphLayout
from distributed.diagnostics.task_stream import TaskStreamPlugin
try:
from cytoolz.curried import map, concat, groupby
except ImportError:
from toolz.curried import map, concat, groupby
if dask.config.get("distributed.dashboard.export-tool"):
from distributed.dashboard.export_tool import ExportTool
else:
ExportTool = None
logger = logging.getLogger(__name__)
from jinja2 import Environment, FileSystemLoader
env = Environment(
loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "..", "templates"))
)
BOKEH_THEME = Theme(os.path.join(os.path.dirname(__file__), "..", "theme.yaml"))
nan = float("nan")
inf = float("inf")
class Occupancy(DashboardComponent):
""" Occupancy (in time) per worker """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"occupancy": [0, 0],
"worker": ["a", "b"],
"x": [0.0, 0.1],
"y": [1, 2],
"ms": [1, 2],
"color": ["red", "blue"],
"escaped_worker": ["a", "b"],
}
)
fig = figure(
title="Occupancy",
tools="",
id="bk-occupancy-plot",
x_axis_type="datetime",
**kwargs,
)
rect = fig.rect(
source=self.source, x="x", width="ms", y="y", height=1, color="color"
)
rect.nonselection_glyph = None
fig.xaxis.minor_tick_line_alpha = 0
fig.yaxis.visible = False
fig.ygrid.visible = False
# fig.xaxis[0].formatter = NumeralTickFormatter(format='0.0s')
fig.x_range.start = 0
tap = TapTool(callback=OpenURL(url="./info/worker/@escaped_worker.html"))
hover = HoverTool()
hover.tooltips = "@worker : @occupancy s."
hover.point_policy = "follow_mouse"
fig.add_tools(hover, tap)
self.root = fig
@without_property_validation
def update(self):
with log_errors():
workers = list(self.scheduler.workers.values())
y = list(range(len(workers)))
occupancy = [ws.occupancy for ws in workers]
ms = [occ * 1000 for occ in occupancy]
x = [occ / 500 for occ in occupancy]
total = sum(occupancy)
color = []
for ws in workers:
if ws in self.scheduler.idle:
color.append("red")
elif ws in self.scheduler.saturated:
color.append("green")
else:
color.append("blue")
if total:
self.root.title.text = "Occupancy -- total time: %s wall time: %s" % (
format_time(total),
format_time(total / self.scheduler.total_nthreads),
)
else:
self.root.title.text = "Occupancy"
if occupancy:
result = {
"occupancy": occupancy,
"worker": [ws.address for ws in workers],
"ms": ms,
"color": color,
"escaped_worker": [escape.url_escape(ws.address) for ws in workers],
"x": x,
"y": y,
}
update(self.source, result)
class ProcessingHistogram(DashboardComponent):
""" How many tasks are on each worker """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{"left": [1, 2], "right": [10, 10], "top": [0, 0]}
)
self.root = figure(
title="Tasks Processing (Histogram)",
id="bk-nprocessing-histogram-plot",
name="processing_hist",
y_axis_label="frequency",
tools="",
**kwargs,
)
self.root.xaxis.minor_tick_line_alpha = 0
self.root.ygrid.visible = False
self.root.toolbar.logo = None
self.root.toolbar_location = None
self.root.quad(
source=self.source,
left="left",
right="right",
bottom=0,
top="top",
color="deepskyblue",
fill_alpha=0.5,
)
@without_property_validation
def update(self):
L = [len(ws.processing) for ws in self.scheduler.workers.values()]
counts, x = np.histogram(L, bins=40)
self.source.data.update({"left": x[:-1], "right": x[1:], "top": counts})
class NBytesHistogram(DashboardComponent):
""" How many tasks are on each worker """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{"left": [1, 2], "right": [10, 10], "top": [0, 0]}
)
self.root = figure(
title="Bytes Stored (Histogram)",
name="nbytes_hist",
id="bk-nbytes-histogram-plot",
y_axis_label="frequency",
tools="",
**kwargs,
)
self.root.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
self.root.xaxis.ticker = AdaptiveTicker(mantissas=[1, 256, 512], base=1024)
self.root.xaxis.major_label_orientation = -math.pi / 12
self.root.xaxis.minor_tick_line_alpha = 0
self.root.ygrid.visible = False
self.root.toolbar.logo = None
self.root.toolbar_location = None
self.root.quad(
source=self.source,
left="left",
right="right",
bottom=0,
top="top",
color="deepskyblue",
fill_alpha=0.5,
)
@without_property_validation
def update(self):
nbytes = np.asarray([ws.nbytes for ws in self.scheduler.workers.values()])
counts, x = np.histogram(nbytes, bins=40)
d = {"left": x[:-1], "right": x[1:], "top": counts}
self.source.data.update(d)
self.root.title.text = "Bytes stored (Histogram): " + format_bytes(nbytes.sum())
class BandwidthTypes(DashboardComponent):
""" Bar chart showing bandwidth per type """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"bandwidth": [1, 2],
"bandwidth-half": [0.5, 1],
"type": ["a", "b"],
"bandwidth_text": ["1", "2"],
}
)
fig = figure(
title="Bandwidth by Type",
tools="",
id="bk-bandwidth-type-plot",
name="bandwidth_type_histogram",
y_range=["a", "b"],
**kwargs,
)
rect = fig.rect(
source=self.source,
x="bandwidth-half",
y="type",
width="bandwidth",
height=1,
color="blue",
)
fig.x_range.start = 0
fig.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
fig.xaxis.ticker = AdaptiveTicker(mantissas=[1, 256, 512], base=1024)
rect.nonselection_glyph = None
fig.xaxis.minor_tick_line_alpha = 0
fig.ygrid.visible = False
fig.toolbar.logo = None
fig.toolbar_location = None
hover = HoverTool()
hover.tooltips = "@type: @bandwidth_text / s"
hover.point_policy = "follow_mouse"
fig.add_tools(hover)
self.fig = fig
@without_property_validation
def update(self):
with log_errors():
bw = self.scheduler.bandwidth_types
self.fig.y_range.factors = list(sorted(bw))
result = {
"bandwidth": list(bw.values()),
"bandwidth-half": [b / 2 for b in bw.values()],
"type": list(bw.keys()),
"bandwidth_text": list(map(format_bytes, bw.values())),
}
self.fig.title.text = "Bandwidth: " + format_bytes(self.scheduler.bandwidth)
update(self.source, result)
class BandwidthWorkers(DashboardComponent):
""" How many tasks are on each worker """
def __init__(self, scheduler, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"bandwidth": [1, 2],
"source": ["a", "b"],
"destination": ["a", "b"],
"bandwidth_text": ["1", "2"],
}
)
values = [hex(x)[2:] for x in range(64, 256)][::-1]
mapper = linear_cmap(
field_name="bandwidth",
palette=["#" + x + x + "FF" for x in values],
low=0,
high=1,
)
fig = figure(
title="Bandwidth by Worker",
tools="",
id="bk-bandwidth-worker-plot",
name="bandwidth_worker_heatmap",
x_range=["a", "b"],
y_range=["a", "b"],
**kwargs,
)
fig.xaxis.major_label_orientation = -math.pi / 12
rect = fig.rect(
source=self.source,
x="source",
y="destination",
color=mapper,
height=1,
width=1,
)
self.color_map = mapper["transform"]
color_bar = ColorBar(
color_mapper=self.color_map,
label_standoff=12,
border_line_color=None,
location=(0, 0),
)
color_bar.formatter = NumeralTickFormatter(format="0.0 b")
color_bar.ticker = AdaptiveTicker(
mantissas=[1, 64, 128, 256, 512], base=1024
)
fig.add_layout(color_bar, "right")
fig.toolbar.logo = None
fig.toolbar_location = None
hover = HoverTool()
hover.tooltips = """
<div>
<p><b>Source:</b> @source </p>
<p><b>Destination:</b> @destination </p>
<p><b>Bandwidth:</b> @bandwidth_text / s</p>
</div>
"""
hover.point_policy = "follow_mouse"
fig.add_tools(hover)
self.fig = fig
@without_property_validation
def update(self):
with log_errors():
bw = self.scheduler.bandwidth_workers
if not bw:
return
def name(address):
ws = self.scheduler.workers[address]
if ws.name is not None:
return str(ws.name)
else:
return address
x, y, value = zip(*[(name(a), name(b), c) for (a, b), c in bw.items()])
self.color_map.high = max(value)
factors = list(sorted(set(x + y)))
self.fig.x_range.factors = factors
self.fig.y_range.factors = factors[::-1]
result = {
"source": x,
"destination": y,
"bandwidth": value,
"bandwidth_text": list(map(format_bytes, value)),
}
self.fig.title.text = "Bandwidth: " + format_bytes(self.scheduler.bandwidth)
update(self.source, result)
class MemoryByKey(DashboardComponent):
""" Bar chart showing memory use by key prefix"""
def __init__(self, scheduler, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"name": ["a", "b"],
"nbytes": [100, 1000],
"count": [1, 2],
"color": ["blue", "blue"],
}
)
fig = figure(
title="Memory Use",
tools="",
id="bk-memory-by-key-plot",
name="memory_by_key",
x_range=["a", "b"],
**kwargs,
)
rect = fig.vbar(
source=self.source, x="name", top="nbytes", width=0.9, color="color"
)
fig.yaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
fig.yaxis.ticker = AdaptiveTicker(mantissas=[1, 256, 512], base=1024)
fig.xaxis.major_label_orientation = -math.pi / 12
rect.nonselection_glyph = None
fig.xaxis.minor_tick_line_alpha = 0
fig.ygrid.visible = False
fig.toolbar.logo = None
fig.toolbar_location = None
hover = HoverTool()
hover.tooltips = "@name: @nbytes_text"
hover.tooltips = """
<div>
<p><b>Name:</b> @name</p>
<p><b>Bytes:</b> @nbytes_text </p>
<p><b>Count:</b> @count objects </p>
</div>
"""
hover.point_policy = "follow_mouse"
fig.add_tools(hover)
self.fig = fig
@without_property_validation
def update(self):
with log_errors():
counts = defaultdict(int)
nbytes = defaultdict(int)
for ws in self.scheduler.workers.values():
for ts in ws.has_what:
ks = key_split(ts.key)
counts[ks] += 1
nbytes[ks] += ts.nbytes
names = list(sorted(counts))
self.fig.x_range.factors = names
result = {
"name": names,
"count": [counts[name] for name in names],
"nbytes": [nbytes[name] for name in names],
"nbytes_text": [format_bytes(nbytes[name]) for name in names],
"color": [color_of(name) for name in names],
}
self.fig.title.text = "Total Use: " + format_bytes(sum(nbytes.values()))
update(self.source, result)
class CurrentLoad(DashboardComponent):
""" How many tasks are on each worker """
def __init__(self, scheduler, width=600, **kwargs):
with log_errors():
self.last = 0
self.scheduler = scheduler
self.source = ColumnDataSource(
{
"nprocessing": [1, 2],
"nprocessing-half": [0.5, 1],
"nprocessing-color": ["red", "blue"],
"nbytes": [1, 2],
"nbytes-half": [0.5, 1],
"nbytes_text": ["1B", "2B"],
"cpu": [1, 2],
"cpu-half": [0.5, 1],
"worker": ["a", "b"],
"y": [1, 2],
"nbytes-color": ["blue", "blue"],
"escaped_worker": ["a", "b"],
}
)
processing = figure(
title="Tasks Processing",
tools="",
id="bk-nprocessing-plot",
name="processing_hist",
width=int(width / 2),
**kwargs,
)
rect = processing.rect(
source=self.source,
x="nprocessing-half",
y="y",
width="nprocessing",
height=1,
color="nprocessing-color",
)
processing.x_range.start = 0
rect.nonselection_glyph = None
nbytes = figure(
title="Bytes stored",
tools="",
id="bk-nbytes-worker-plot",
width=int(width / 2),
name="nbytes_hist",
**kwargs,
)
rect = nbytes.rect(
source=self.source,
x="nbytes-half",
y="y",
width="nbytes",
height=1,
color="nbytes-color",
)
rect.nonselection_glyph = None
cpu = figure(
title="CPU Utilization",
tools="",
id="bk-cpu-worker-plot",
width=int(width / 2),
name="cpu_hist",
x_range=(0, None),
**kwargs,
)
rect = cpu.rect(
source=self.source,
x="cpu-half",
y="y",
width="cpu",
height=1,
color="blue",
)
rect.nonselection_glyph = None
nbytes.axis[0].ticker = BasicTicker(mantissas=[1, 256, 512], base=1024)
nbytes.xaxis[0].formatter = NumeralTickFormatter(format="0.0 b")
nbytes.xaxis.major_label_orientation = -math.pi / 12
nbytes.x_range.start = 0
for fig in [processing, nbytes, cpu]:
fig.xaxis.minor_tick_line_alpha = 0
fig.yaxis.visible = False
fig.ygrid.visible = False
tap = TapTool(
callback=OpenURL(url="./info/worker/@escaped_worker.html")
)
fig.add_tools(tap)
fig.toolbar.logo = None
fig.toolbar_location = None
fig.yaxis.visible = False
hover = HoverTool()
hover.tooltips = "@worker : @nprocessing tasks"
hover.point_policy = "follow_mouse"
processing.add_tools(hover)
hover = HoverTool()
hover.tooltips = "@worker : @nbytes_text"
hover.point_policy = "follow_mouse"
nbytes.add_tools(hover)
hover = HoverTool()
hover.tooltips = "@worker : @cpu %"
hover.point_policy = "follow_mouse"
cpu.add_tools(hover)
self.processing_figure = processing
self.nbytes_figure = nbytes
self.cpu_figure = cpu
processing.y_range = nbytes.y_range
cpu.y_range = nbytes.y_range
@without_property_validation
def update(self):
with log_errors():
workers = list(self.scheduler.workers.values())
y = list(range(len(workers)))
cpu = [int(ws.metrics["cpu"]) for ws in workers]
nprocessing = [len(ws.processing) for ws in workers]
processing_color = []
for ws in workers:
if ws in self.scheduler.idle:
processing_color.append("red")
elif ws in self.scheduler.saturated:
processing_color.append("green")
else:
processing_color.append("blue")
nbytes = [ws.metrics["memory"] for ws in workers]
nbytes_text = [format_bytes(nb) for nb in nbytes]
nbytes_color = []
max_limit = 0
for ws, nb in zip(workers, nbytes):
limit = (
getattr(self.scheduler.workers[ws.address], "memory_limit", inf)
or inf
)
if limit > max_limit and limit != inf:
max_limit = limit
if nb > limit:
nbytes_color.append("red")
elif nb > limit / 2:
nbytes_color.append("orange")
else:
nbytes_color.append("blue")
now = time()
if any(nprocessing) or self.last + 1 < now:
self.last = now
result = {
"cpu": cpu,
"cpu-half": [c / 2 for c in cpu],
"nprocessing": nprocessing,
"nprocessing-half": [np / 2 for np in nprocessing],
"nprocessing-color": processing_color,
"nbytes": nbytes,
"nbytes-half": [nb / 2 for nb in nbytes],
"nbytes-color": nbytes_color,
"nbytes_text": nbytes_text,
"worker": [ws.address for ws in workers],
"escaped_worker": [escape.url_escape(ws.address) for ws in workers],
"y": y,
}
self.nbytes_figure.title.text = "Bytes stored: " + format_bytes(
sum(nbytes)
)
self.nbytes_figure.x_range.end = max_limit
if self.scheduler.workers:
self.cpu_figure.x_range.end = (
max(ws.nthreads or 1 for ws in self.scheduler.workers.values())
* 100
)
else:
self.cpu_figure.x_range.end = 100
update(self.source, result)
class StealingTimeSeries(DashboardComponent):
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.source = ColumnDataSource(
{"time": [time(), time() + 1], "idle": [0, 0.1], "saturated": [0, 0.1]}
)
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
fig = figure(
title="Idle and Saturated Workers Over Time",
x_axis_type="datetime",
y_range=[-0.1, len(scheduler.workers) + 0.1],
height=150,
tools="",
x_range=x_range,
**kwargs,
)
fig.line(source=self.source, x="time", y="idle", color="red")
fig.line(source=self.source, x="time", y="saturated", color="green")
fig.yaxis.minor_tick_line_color = None
fig.add_tools(
ResetTool(), PanTool(dimensions="width"), WheelZoomTool(dimensions="width")
)
self.root = fig
@without_property_validation
def update(self):
with log_errors():
result = {
"time": [time() * 1000],
"idle": [len(self.scheduler.idle)],
"saturated": [len(self.scheduler.saturated)],
}
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(result, 10000)
)
else:
self.source.stream(result, 10000)
class StealingEvents(DashboardComponent):
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.steal = scheduler.extensions["stealing"]
self.last = 0
self.source = ColumnDataSource(
{
"time": [time() - 20, time()],
"level": [0, 15],
"color": ["white", "white"],
"duration": [0, 0],
"radius": [1, 1],
"cost_factor": [0, 10],
"count": [1, 1],
}
)
x_range = DataRange1d(follow="end", follow_interval=20000, range_padding=0)
fig = figure(
title="Stealing Events",
x_axis_type="datetime",
y_axis_type="log",
height=250,
tools="",
x_range=x_range,
**kwargs,
)
fig.circle(
source=self.source,
x="time",
y="cost_factor",
color="color",
size="radius",
alpha=0.5,
)
fig.yaxis.axis_label = "Cost Multiplier"
hover = HoverTool()
hover.tooltips = "Level: @level, Duration: @duration, Count: @count, Cost factor: @cost_factor"
hover.point_policy = "follow_mouse"
fig.add_tools(
hover,
ResetTool(),
PanTool(dimensions="width"),
WheelZoomTool(dimensions="width"),
)
self.root = fig
def convert(self, msgs):
""" Convert a log message to a glyph """
total_duration = 0
for msg in msgs:
time, level, key, duration, sat, occ_sat, idl, occ_idl = msg
total_duration += duration
try:
color = Viridis11[level]
except (KeyError, IndexError):
color = "black"
radius = math.sqrt(min(total_duration, 10)) * 30 + 2
d = {
"time": time * 1000,
"level": level,
"count": len(msgs),
"color": color,
"duration": total_duration,
"radius": radius,
"cost_factor": min(10, self.steal.cost_multipliers[level]),
}
return d
@without_property_validation
def update(self):
with log_errors():
log = self.steal.log
n = self.steal.count - self.last
log = [log[-i] for i in range(1, n + 1) if isinstance(log[-i], list)]
self.last = self.steal.count
if log:
new = pipe(
log,
map(groupby(1)),
map(dict.values),
concat,
map(self.convert),
list,
transpose,
)
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(new, 10000)
)
else:
self.source.stream(new, 10000)
class Events(DashboardComponent):
def __init__(self, scheduler, name, height=150, **kwargs):
self.scheduler = scheduler
self.action_ys = dict()
self.last = 0
self.name = name
self.source = ColumnDataSource(
{"time": [], "action": [], "hover": [], "y": [], "color": []}
)
x_range = DataRange1d(follow="end", follow_interval=200000)
fig = figure(
title=name,
x_axis_type="datetime",
height=height,
tools="",
x_range=x_range,
**kwargs,
)
fig.circle(
source=self.source,
x="time",
y="y",
color="color",
size=50,
alpha=0.5,
**{"legend_field" if BOKEH_VERSION >= "1.4" else "legend": "action"},
)
fig.yaxis.axis_label = "Action"
fig.legend.location = "top_left"
hover = HoverTool()
hover.tooltips = "@action<br>@hover"
hover.point_policy = "follow_mouse"
fig.add_tools(
hover,
ResetTool(),
PanTool(dimensions="width"),
WheelZoomTool(dimensions="width"),
)
self.root = fig
@without_property_validation
def update(self):
with log_errors():
log = self.scheduler.events[self.name]
n = self.scheduler.event_counts[self.name] - self.last
if log:
log = [log[-i] for i in range(1, n + 1)]
self.last = self.scheduler.event_counts[self.name]
if log:
actions = []
times = []
hovers = []
ys = []
colors = []
for msg in log:
times.append(msg["time"] * 1000)
action = msg["action"]
actions.append(action)
try:
ys.append(self.action_ys[action])
except KeyError:
self.action_ys[action] = len(self.action_ys)
ys.append(self.action_ys[action])
colors.append(color_of(action))
hovers.append("TODO")
new = {
"time": times,
"action": actions,
"hover": hovers,
"y": ys,
"color": colors,
}
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(new, 10000)
)
else:
self.source.stream(new, 10000)
class TaskStream(DashboardComponent):
def __init__(self, scheduler, n_rectangles=1000, clear_interval="20s", **kwargs):
self.scheduler = scheduler
self.offset = 0
es = [p for p in self.scheduler.plugins if isinstance(p, TaskStreamPlugin)]
if not es:
self.plugin = TaskStreamPlugin(self.scheduler)
else:
self.plugin = es[0]
self.index = max(0, self.plugin.index - n_rectangles)
self.workers = dict()
self.n_rectangles = n_rectangles
clear_interval = parse_timedelta(clear_interval, default="ms")
self.clear_interval = clear_interval
self.last = 0
self.last_seen = 0
self.source, self.root = task_stream_figure(clear_interval, **kwargs)
# Required for update callback
self.task_stream_index = [0]
@without_property_validation
def update(self):
if self.index == self.plugin.index:
return
with log_errors():
if self.index and len(self.source.data["start"]):
start = min(self.source.data["start"])
duration = max(self.source.data["duration"])
boundary = (self.offset + start - duration) / 1000
else:
boundary = self.offset
rectangles = self.plugin.rectangles(
istart=self.index, workers=self.workers, start_boundary=boundary
)
n = len(rectangles["name"])
self.index = self.plugin.index
if not rectangles["start"]:
return
# If it has been a while since we've updated the plot
if time() > self.last_seen + self.clear_interval:
new_start = min(rectangles["start"]) - self.offset
old_start = min(self.source.data["start"])
old_end = max(
map(
operator.add,
self.source.data["start"],
self.source.data["duration"],
)
)
density = (
sum(self.source.data["duration"])
/ len(self.workers)
/ (old_end - old_start)
)
# If whitespace is more than 3x the old width
if (new_start - old_end) > (old_end - old_start) * 2 or density < 0.05:
self.source.data.update({k: [] for k in rectangles}) # clear
self.offset = min(rectangles["start"]) # redefine offset
rectangles["start"] = [x - self.offset for x in rectangles["start"]]
self.last_seen = time()
# Convert to numpy for serialization speed
if n >= 10 and np:
for k, v in rectangles.items():
if isinstance(v[0], Number):
rectangles[k] = np.array(v)
if PROFILING:
curdoc().add_next_tick_callback(
lambda: self.source.stream(rectangles, self.n_rectangles)
)
else:
self.source.stream(rectangles, self.n_rectangles)
def task_stream_figure(clear_interval="20s", **kwargs):
"""
kwargs are applied to the bokeh.models.plots.Plot constructor
"""
clear_interval = parse_timedelta(clear_interval, default="ms")
source = ColumnDataSource(
data=dict(
start=[time() - clear_interval],
duration=[0.1],
key=["start"],
name=["start"],
color=["white"],
duration_text=["100 ms"],
worker=["foo"],
y=[0],
worker_thread=[1],
alpha=[0.0],
)
)
x_range = DataRange1d(range_padding=0)
y_range = DataRange1d(range_padding=0)
root = figure(
name="task_stream",
title="Task Stream",
id="bk-task-stream-plot",
x_range=x_range,
y_range=y_range,
toolbar_location="above",
x_axis_type="datetime",
min_border_right=35,
tools="",
**kwargs,
)
rect = root.rect(
source=source,
x="start",
y="y",
width="duration",
height=0.4,
fill_color="color",
line_color="color",
line_alpha=0.6,
fill_alpha="alpha",
line_width=3,
)
rect.nonselection_glyph = None
root.yaxis.major_label_text_alpha = 0
root.yaxis.minor_tick_line_alpha = 0
root.yaxis.major_tick_line_alpha = 0
root.xgrid.visible = False
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 12px; font-weight: bold;">@name:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@duration_text</span>
</div>
""",
)
tap = TapTool(callback=OpenURL(url="/profile?key=@name"))
root.add_tools(
hover,
tap,
BoxZoomTool(),
ResetTool(),
PanTool(dimensions="width"),
WheelZoomTool(dimensions="width"),
)
if ExportTool:
export = ExportTool()
export.register_plot(root)
root.add_tools(export)
return source, root
class TaskGraph(DashboardComponent):
"""
A dynamic node-link diagram for the task graph on the scheduler
See also the GraphLayout diagnostic at
distributed/diagnostics/graph_layout.py
"""
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
self.layout = GraphLayout(scheduler)
self.invisible_count = 0 # number of invisible nodes
self.node_source = ColumnDataSource(
{"x": [], "y": [], "name": [], "state": [], "visible": [], "key": []}
)
self.edge_source = ColumnDataSource({
"x0": [],
"x1": [],
"cx0": [],
"cx1": [],
"cy0": [],
"cy1": [],
"y0": [],
"y1": [],
"visible": []
})
node_view = CDSView(
source=self.node_source,
filters=[GroupFilter(column_name="visible", group="True")],
)
edge_view = CDSView(
source=self.edge_source,
filters=[GroupFilter(column_name="visible", group="True")],
)
node_colors = factor_cmap(
"state",
factors=["waiting", "processing", "memory", "released", "erred"],
palette=["gray", "#4daf4a", "#e41a1c", "#377eb8", "black"],
)
self.root = figure(title="Task Graph", output_backend="webgl", **kwargs)
self.root.bezier(
x0="x0",
y0="y0",
x1="x1",
y1="y1",
cx0="cx0",
cy0="cy0",
cx1="cx1",
cy1="cy1",
source=self.edge_source,
line_width=1,
view=edge_view,
color="#377eb8",
alpha=1
)
# prevents edges intersecting with nodes
self.root.circle(
x="x",
y="y",
size=15,
line_width=0,
color='white',
name="margin",
source=self.node_source,
view=node_view,
**{"legend_field" if BOKEH_VERSION >= "1.4" else "legend": "state"},
)
arrow = Triangle(
angle=-np.pi/2,
x="x1",
y="y1",
size=8,
line_color=None,
line_width=0,
fill_color='#377eb8'
)
self.root.add_glyph(self.edge_source, arrow)
rect = self.root.circle(
x="x",
y="y",
size=10,
line_width=2,
color=node_colors,
fill_alpha=0.5,
source=self.node_source,
view=node_view,
**{"legend_field" if BOKEH_VERSION >= "1.4" else "legend": "state"},
)
self.root.xgrid.grid_line_color = None
self.root.ygrid.grid_line_color = None
self.root.xaxis.visible = False
self.root.yaxis.visible = False
hover = HoverTool(
point_policy="follow_mouse",
tooltips="<b>@name</b>: @state",
renderers=[rect],
)
tap = TapTool(callback=OpenURL(url="info/task/@key.html"), renderers=[rect])
rect.nonselection_glyph = None
self.root.add_tools(hover, tap)
@without_property_validation
def update(self):
with log_errors():
# conditionally hide "margin" if number of nodes is large
renderers = self.root.renderers
if len(self.node_source.data["x"]) > 1e3:
for renderer in renderers:
if renderer.name == "margin":
renderer.visible = False
else:
for renderer in renderers:
if renderer.name == "margin":
renderer.visible = True
# occasionally reset the column data source to remove old nodes
if self.invisible_count > len(self.node_source.data["x"]) / 2:
self.layout.reset_index()
self.invisible_count = 0
update = True
else:
update = False
new, self.layout.new = self.layout.new, []
new_edges = self.layout.new_edges
self.layout.new_edges = []
self.add_new_nodes_edges(new, new_edges, update=update)
self.patch_updates()
@without_property_validation
def add_new_nodes_edges(self, new, new_edges, update=False):
if new or update:
node_key = []
node_x = []
node_y = []
node_state = []
node_name = []
edge_x = []
edge_y = []
x = self.layout.x
y = self.layout.y
tasks = self.scheduler.tasks
for key in new:
try:
task = tasks[key]
except KeyError:
continue
xx = x[key]
yy = y[key]
node_key.append(escape.url_escape(key))
node_x.append(xx)
node_y.append(yy)
node_state.append(task.state)
node_name.append(task.prefix.name)
for a, b in new_edges:
try:
edge_x.append([x[a], x[b]])
edge_y.append([y[a], y[b]])
except KeyError:
pass
node = {
"x": node_x,
"y": node_y,
"state": node_state,
"name": node_name,
"key": node_key,
"visible": ["True"] * len(node_x),
}
control_offset = .5
end_offset = .05
edge = {
"x0": [x[0] + end_offset for x in edge_x],
"x1": [x[1] - end_offset for x in edge_x],
"cx0": [x[0] + control_offset + end_offset for x in edge_x],
"cx1": [x[1] - control_offset - end_offset for x in edge_x],
"cy0": [y[0] for y in edge_y],
"cy1": [y[1] for y in edge_y],
"y0": [y[0] for y in edge_y],
"y1": [y[1] for y in edge_y],
"visible": ["True"] * len(edge_x)
}
if update or not len(self.node_source.data["x"]):
# see https://github.com/bokeh/bokeh/issues/7523
self.node_source.data.update(node)
self.edge_source.data.update(edge)
else:
self.node_source.stream(node)
self.edge_source.stream(edge)
@without_property_validation
def patch_updates(self):
"""
Small updates like color changes or lost nodes from task transitions
"""
n = len(self.node_source.data["x"])
m = len(self.edge_source.data["x0"])
if self.layout.state_updates:
state_updates = self.layout.state_updates
self.layout.state_updates = []
updates = [(i, c) for i, c in state_updates if i < n]
self.node_source.patch({"state": updates})
if self.layout.visible_updates:
updates = self.layout.visible_updates
updates = [(i, c) for i, c in updates if i < n]
self.visible_updates = []
self.node_source.patch({"visible": updates})
self.invisible_count += len(updates)
if self.layout.visible_edge_updates:
updates = self.layout.visible_edge_updates
updates = [(i, c) for i, c in updates if i < m]
self.visible_updates = []
self.edge_source.patch({"visible": updates})
def __del__(self):
self.scheduler.remove_plugin(self.layout)
class TaskProgress(DashboardComponent):
""" Progress bars per task type """
def __init__(self, scheduler, **kwargs):
self.scheduler = scheduler
data = progress_quads(
dict(all={}, memory={}, erred={}, released={}, processing={})
)
self.source = ColumnDataSource(data=data)
x_range = DataRange1d(range_padding=0)
y_range = Range1d(-8, 0)
self.root = figure(
id="bk-task-progress-plot",
title="Progress",
name="task_progress",
x_range=x_range,
y_range=y_range,
toolbar_location=None,
tools="",
**kwargs,
)
self.root.line( # just to define early ranges
x=[0, 0.9], y=[-1, 0], line_color="#FFFFFF", alpha=0.0
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="left",
right="right",
fill_color="#aaaaaa",
line_color="#aaaaaa",
fill_alpha=0.1,
line_alpha=0.3,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="left",
right="released-loc",
fill_color="color",
line_color="color",
fill_alpha=0.6,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="released-loc",
right="memory-loc",
fill_color="color",
line_color="color",
fill_alpha=1.0,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="memory-loc",
right="erred-loc",
fill_color="black",
fill_alpha=0.5,
line_alpha=0,
)
self.root.quad(
source=self.source,
top="top",
bottom="bottom",
left="erred-loc",
right="processing-loc",
fill_color="gray",
fill_alpha=0.35,
line_alpha=0,
)
self.root.text(
source=self.source,
text="show-name",
y="bottom",
x="left",
x_offset=5,
text_font_size=value("10pt"),
)
self.root.text(
source=self.source,
text="done",
y="bottom",
x="right",
x_offset=-5,
text_align="right",
text_font_size=value("10pt"),
)
self.root.ygrid.visible = False
self.root.yaxis.minor_tick_line_alpha = 0
self.root.yaxis.visible = False
self.root.xgrid.visible = False
self.root.xaxis.minor_tick_line_alpha = 0
self.root.xaxis.visible = False
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 14px; font-weight: bold;">Name:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@name</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">All:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@all</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Memory:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@memory</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Erred:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@erred</span>
</div>
<div>
<span style="font-size: 14px; font-weight: bold;">Ready:</span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@processing</span>
</div>
""",
)
self.root.add_tools(hover)
@without_property_validation
def update(self):
with log_errors():
state = {
"memory": {},
"erred": {},
"released": {},
"processing": {},
"waiting": {},
}
for tp in self.scheduler.task_prefixes.values():
active_states = tp.active_states
if any(active_states.get(s) for s in state.keys()):
state["memory"][tp.name] = active_states["memory"]
state["erred"][tp.name] = active_states["erred"]
state["released"][tp.name] = active_states["released"]
state["processing"][tp.name] = active_states["processing"]
state["waiting"][tp.name] = active_states["waiting"]
state["all"] = {
k: sum(v[k] for v in state.values()) for k in state["memory"]
}
if not state["all"] and not len(self.source.data["all"]):
return
d = progress_quads(state)
update(self.source, d)
totals = {
k: sum(state[k].values())
for k in ["all", "memory", "erred", "released", "waiting"]
}
totals["processing"] = totals["all"] - sum(
v for k, v in totals.items() if k != "all"
)
self.root.title.text = (
"Progress -- total: %(all)s, "
"in-memory: %(memory)s, processing: %(processing)s, "
"waiting: %(waiting)s, "
"erred: %(erred)s" % totals
)
class WorkerTable(DashboardComponent):
""" Status of the current workers
This is two plots, a text-based table for each host and a thin horizontal
plot laying out hosts by their current memory use.
"""
excluded_names = {"executing", "in_flight", "in_memory", "ready", "time"}
def __init__(self, scheduler, width=800, **kwargs):
self.scheduler = scheduler
self.names = [
"name",
"address",
"nthreads",
"cpu",
"memory",
"memory_limit",
"memory_percent",
"num_fds",
"read_bytes",
"write_bytes",
"cpu_fraction",
]
workers = self.scheduler.workers.values()
self.extra_names = sorted(
{
m
for ws in workers
for m, v in ws.metrics.items()
if m not in self.names and isinstance(v, (str, int, float))
}
- self.excluded_names
)
table_names = [
"name",
"address",
"nthreads",
"cpu",
"memory",
"memory_limit",
"memory_percent",
"num_fds",
"read_bytes",
"write_bytes",
]
self.source = ColumnDataSource({k: [] for k in self.names})
columns = {
name: TableColumn(field=name, title=name.replace("_percent", " %"))
for name in table_names
}
formatters = {
"cpu": NumberFormatter(format="0.0 %"),
"memory_percent": NumberFormatter(format="0.0 %"),
"memory": NumberFormatter(format="0 b"),
"memory_limit": NumberFormatter(format="0 b"),
"read_bytes": NumberFormatter(format="0 b"),
"write_bytes": NumberFormatter(format="0 b"),
"num_fds": NumberFormatter(format="0"),
"nthreads": NumberFormatter(format="0"),
}
if BOKEH_VERSION < "0.12.15":
dt_kwargs = {"row_headers": False}
else:
dt_kwargs = {"index_position": None}
table = DataTable(
source=self.source,
columns=[columns[n] for n in table_names],
reorderable=True,
sortable=True,
width=width,
**dt_kwargs,
)
for name in table_names:
if name in formatters:
table.columns[table_names.index(name)].formatter = formatters[name]
extra_names = ["name", "address"] + self.extra_names
extra_columns = {
name: TableColumn(field=name, title=name.replace("_percent", "%"))
for name in extra_names
}
extra_table = DataTable(
source=self.source,
columns=[extra_columns[n] for n in extra_names],
reorderable=True,
sortable=True,
width=width,
**dt_kwargs,
)
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 10px; font-family: Monaco, monospace;">Worker (@name): </span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@memory_percent</span>
</div>
""",
)
mem_plot = figure(
title="Memory Use (%)",
toolbar_location=None,
x_range=(0, 1),
y_range=(-0.1, 0.1),
height=60,
width=width,
tools="",
**kwargs,
)
mem_plot.circle(
source=self.source, x="memory_percent", y=0, size=10, fill_alpha=0.5
)
mem_plot.ygrid.visible = False
mem_plot.yaxis.minor_tick_line_alpha = 0
mem_plot.xaxis.visible = False
mem_plot.yaxis.visible = False
mem_plot.add_tools(hover, BoxSelectTool())
hover = HoverTool(
point_policy="follow_mouse",
tooltips="""
<div>
<span style="font-size: 10px; font-family: Monaco, monospace;">Worker (@name): </span>
<span style="font-size: 10px; font-family: Monaco, monospace;">@cpu</span>
</div>
""",
)
cpu_plot = figure(
title="CPU Use (%)",
toolbar_location=None,
x_range=(0, 1),
y_range=(-0.1, 0.1),
height=60,
width=width,
tools="",
**kwargs,
)
cpu_plot.circle(
source=self.source, x="cpu_fraction", y=0, size=10, fill_alpha=0.5
)
cpu_plot.ygrid.visible = False
cpu_plot.yaxis.minor_tick_line_alpha = 0
cpu_plot.xaxis.visible = False
cpu_plot.yaxis.visible = False
cpu_plot.add_tools(hover, BoxSelectTool())
self.cpu_plot = cpu_plot
if "sizing_mode" in kwargs:
sizing_mode = {"sizing_mode": kwargs["sizing_mode"]}
else:
sizing_mode = {}
components = [cpu_plot, mem_plot, table]
if self.extra_names:
components.append(extra_table)
self.root = column(*components, id="bk-worker-table", **sizing_mode)
@without_property_validation
def update(self):
data = {name: [] for name in self.names + self.extra_names}
for i, (addr, ws) in enumerate(
sorted(self.scheduler.workers.items(), key=lambda kv: str(kv[1].name))
):
for name in self.names + self.extra_names:
data[name].append(ws.metrics.get(name, None))
data["name"][-1] = ws.name if ws.name is not None else i
data["address"][-1] = ws.address
if ws.memory_limit:
data["memory_percent"][-1] = ws.metrics["memory"] / ws.memory_limit
else:
data["memory_percent"][-1] = ""
data["memory_limit"][-1] = ws.memory_limit
data["cpu"][-1] = ws.metrics["cpu"] / 100.0
data["cpu_fraction"][-1] = ws.metrics["cpu"] / 100.0 / ws.nthreads
data["nthreads"][-1] = ws.nthreads
for name in self.names + self.extra_names:
if name == "name":
data[name].insert(
0, "Total ({nworkers})".format(nworkers=len(data[name]))
)
continue
try:
data[name].insert(0, sum(data[name]))
except TypeError:
data[name].insert(0, None)
self.source.data.update(data)
def systemmonitor_doc(scheduler, extra, doc):
with log_errors():
sysmon = SystemMonitor(scheduler, sizing_mode="stretch_both")
doc.title = "Dask: Scheduler System Monitor"
add_periodic_callback(doc, sysmon, 500)
for subdoc in sysmon.root.children:
doc.add_root(subdoc)
doc.template = env.get_template("system.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def stealing_doc(scheduler, extra, doc):
with log_errors():
occupancy = Occupancy(scheduler, height=200, sizing_mode="scale_width")
stealing_ts = StealingTimeSeries(scheduler, sizing_mode="scale_width")
stealing_events = StealingEvents(scheduler, sizing_mode="scale_width")
stealing_events.root.x_range = stealing_ts.root.x_range
doc.title = "Dask: Work Stealing"
add_periodic_callback(doc, occupancy, 500)
add_periodic_callback(doc, stealing_ts, 500)
add_periodic_callback(doc, stealing_events, 500)
doc.add_root(
column(
occupancy.root,
stealing_ts.root,
stealing_events.root,
sizing_mode="scale_width",
)
)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def events_doc(scheduler, extra, doc):
with log_errors():
events = Events(scheduler, "all", height=250)
events.update()
add_periodic_callback(doc, events, 500)
doc.title = "Dask: Scheduler Events"
doc.add_root(column(events.root, sizing_mode="scale_width"))
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def workers_doc(scheduler, extra, doc):
with log_errors():
table = WorkerTable(scheduler)
table.update()
add_periodic_callback(doc, table, 500)
doc.title = "Dask: Workers"
doc.add_root(table.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def tasks_doc(scheduler, extra, doc):
with log_errors():
ts = TaskStream(
scheduler,
n_rectangles=dask.config.get(
"distributed.scheduler.dashboard.tasks.task-stream-length"
),
clear_interval="60s",
sizing_mode="stretch_both",
)
ts.update()
add_periodic_callback(doc, ts, 5000)
doc.title = "Dask: Task Stream"
doc.add_root(ts.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def graph_doc(scheduler, extra, doc):
with log_errors():
graph = TaskGraph(scheduler, sizing_mode="stretch_both")
doc.title = "Dask: Task Graph"
graph.update()
add_periodic_callback(doc, graph, 200)
doc.add_root(graph.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def status_doc(scheduler, extra, doc):
with log_errors():
task_stream = TaskStream(
scheduler,
n_rectangles=dask.config.get(
"distributed.scheduler.dashboard.status.task-stream-length"
),
clear_interval="5s",
sizing_mode="stretch_both",
)
task_stream.update()
add_periodic_callback(doc, task_stream, 100)
task_progress = TaskProgress(scheduler, sizing_mode="stretch_both")
task_progress.update()
add_periodic_callback(doc, task_progress, 100)
if len(scheduler.workers) < 50:
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.nbytes_figure)
doc.add_root(current_load.processing_figure)
else:
nbytes_hist = NBytesHistogram(scheduler, sizing_mode="stretch_both")
nbytes_hist.update()
processing_hist = ProcessingHistogram(scheduler, sizing_mode="stretch_both")
processing_hist.update()
add_periodic_callback(doc, nbytes_hist, 100)
add_periodic_callback(doc, processing_hist, 100)
current_load_fig = row(
nbytes_hist.root, processing_hist.root, sizing_mode="stretch_both"
)
doc.add_root(nbytes_hist.root)
doc.add_root(processing_hist.root)
doc.title = "Dask: Status"
doc.add_root(task_progress.root)
doc.add_root(task_stream.root)
doc.theme = BOKEH_THEME
doc.template = env.get_template("status.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
def individual_task_stream_doc(scheduler, extra, doc):
task_stream = TaskStream(
scheduler, n_rectangles=1000, clear_interval="10s", sizing_mode="stretch_both"
)
task_stream.update()
add_periodic_callback(doc, task_stream, 100)
doc.add_root(task_stream.root)
doc.theme = BOKEH_THEME
def individual_nbytes_doc(scheduler, extra, doc):
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.nbytes_figure)
doc.theme = BOKEH_THEME
def individual_cpu_doc(scheduler, extra, doc):
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.cpu_figure)
doc.theme = BOKEH_THEME
def individual_nprocessing_doc(scheduler, extra, doc):
current_load = CurrentLoad(scheduler, sizing_mode="stretch_both")
current_load.update()
add_periodic_callback(doc, current_load, 100)
doc.add_root(current_load.processing_figure)
doc.theme = BOKEH_THEME
def individual_progress_doc(scheduler, extra, doc):
task_progress = TaskProgress(scheduler, height=160, sizing_mode="stretch_both")
task_progress.update()
add_periodic_callback(doc, task_progress, 100)
doc.add_root(task_progress.root)
doc.theme = BOKEH_THEME
def individual_graph_doc(scheduler, extra, doc):
with log_errors():
graph = TaskGraph(scheduler, sizing_mode="stretch_both")
graph.update()
add_periodic_callback(doc, graph, 200)
doc.add_root(graph.root)
doc.theme = BOKEH_THEME
def individual_profile_doc(scheduler, extra, doc):
with log_errors():
prof = ProfileTimePlot(scheduler, sizing_mode="scale_width", doc=doc)
doc.add_root(prof.root)
prof.trigger_update()
doc.theme = BOKEH_THEME
def individual_profile_server_doc(scheduler, extra, doc):
with log_errors():
prof = ProfileServer(scheduler, sizing_mode="scale_width", doc=doc)
doc.add_root(prof.root)
prof.trigger_update()
doc.theme = BOKEH_THEME
def individual_workers_doc(scheduler, extra, doc):
with log_errors():
table = WorkerTable(scheduler)
table.update()
add_periodic_callback(doc, table, 500)
doc.add_root(table.root)
doc.theme = BOKEH_THEME
def individual_bandwidth_types_doc(scheduler, extra, doc):
with log_errors():
bw = BandwidthTypes(scheduler, sizing_mode="stretch_both")
bw.update()
add_periodic_callback(doc, bw, 500)
doc.add_root(bw.fig)
doc.theme = BOKEH_THEME
def individual_bandwidth_workers_doc(scheduler, extra, doc):
with log_errors():
bw = BandwidthWorkers(scheduler, sizing_mode="stretch_both")
bw.update()
add_periodic_callback(doc, bw, 500)
doc.add_root(bw.fig)
doc.theme = BOKEH_THEME
def individual_memory_by_key_doc(scheduler, extra, doc):
with log_errors():
component = MemoryByKey(scheduler, sizing_mode="stretch_both")
component.update()
add_periodic_callback(doc, component, 500)
doc.add_root(component.fig)
doc.theme = BOKEH_THEME
def profile_doc(scheduler, extra, doc):
with log_errors():
doc.title = "Dask: Profile"
prof = ProfileTimePlot(scheduler, sizing_mode="stretch_both", doc=doc)
doc.add_root(prof.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
prof.trigger_update()
def profile_server_doc(scheduler, extra, doc):
with log_errors():
doc.title = "Dask: Profile of Event Loop"
prof = ProfileServer(scheduler, sizing_mode="stretch_both", doc=doc)
doc.add_root(prof.root)
doc.template = env.get_template("simple.html")
doc.template_variables.update(extra)
doc.theme = BOKEH_THEME
prof.trigger_update()
| 32.417626
| 104
| 0.512651
|
07694b597b9191a7951ccd0b714e74ca0c95319c
| 18,349
|
py
|
Python
|
student_tools/tools.py
|
mahart-studio/UniCloud
|
ab137c82680f23f2fea6fc226867b32967019ede
|
[
"MIT"
] | 3
|
2019-12-06T15:31:14.000Z
|
2021-01-09T05:45:52.000Z
|
student_tools/tools.py
|
avour/UniCloud
|
ab137c82680f23f2fea6fc226867b32967019ede
|
[
"MIT"
] | null | null | null |
student_tools/tools.py
|
avour/UniCloud
|
ab137c82680f23f2fea6fc226867b32967019ede
|
[
"MIT"
] | null | null | null |
# my kivy side import
import kivy
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.spinner import Spinner, SpinnerOption
from kivy.uix.dropdown import DropDown
from kivy.clock import Clock
from kivy.garden.androidtabs import AndroidTabsBase
from kivy.lang import Builder
from kivy.uix.modalview import ModalView
from kivy.garden.navigationdrawer import NavigationDrawer
from kivy.properties import StringProperty, ObjectProperty
from kivy.storage.jsonstore import JsonStore
from kivy.factory import Factory
#python side import
import time
from functools import partial
from io import open
import os, os.path
from mahartstudios.widgets.buttons import DropButton
from screenbase import ScreenBase
student_store = JsonStore('student_store.json')
gp_store = JsonStore('gp_store.json')
# defualt password
global password
password = '0000'
if student_store.exists('password_on'):
password_on = [student_store.get('password_on')]
else:
password_on = [True]
Builder.load_file('student_tools.kv')
class MyTab1(BoxLayout, AndroidTabsBase):
pass
class CalculatorPage(Screen):
def __init__(self, **kwargs):
super(CalculatorPage, self).__init__(**kwargs)
manager = ScreenManager()
self.add_widget(manager)
manager.add_widget(CalculatorHome())
manager.add_widget(Result_list())
manager.add_widget(Result_view())
manager.add_widget(GpCalculator())
class CalculatorHome(ScreenBase):
def __init__(self, **kwargs):
super(CalculatorHome, self).__init__(**kwargs)
self.result_btn = self.ids.result_btn
self.password_pop = PasswordPop() #password popup modalview
self.password_pop.screen_guy = self
self.result_btn.bind(on_release=self.password_dicide)
def password_dicide(self, value):
print([password_on[0]])
if password_on[0]:
self.password_pop.open(self)
else:
self.manger.current = 'result_list_page'
class GpCalculator(ScreenBase):
def __init__(self, **kwargs):
super(GpCalculator, self).__init__(**kwargs)
self.grid_guy = self.ids.grid_guy
self.calc_option = self.ids.calc_option
self.course_num = 6
self.Gp_values1=[]
#my drop down custom
self.options = Factory.MyDropDown(auto_width=False, width='150dp')
self.resest_btn = Button(text='Reset', size_hint=(1, None),
height='50dp', background_normal='', background_color=(1, 1, 1, 1), color=(0, 0, 0, 1))
self.resest_btn.bind(on_release = lambda btn: self.reset_values())
self.delete_btn = Button(text='Delete', size_hint=(1, None),
height='50dp', background_normal='', background_color=(1, 1, 1, 1), color=(0, 0, 0, 1))
self.delete_btn.bind(on_release= lambda btn: self.delete_course())
#add course to dropdown
self.options.add_widget(self.resest_btn)
self.options.add_widget(self.delete_btn)
self.calc_option.bind(on_release=self.options.open)
Clock.schedule_once(lambda dt: self.set_values())
def set_values(self):
self.grid_guy.clear_widgets()
for i in range(5): #add 5 courses to the gridlayout
self.course_text = 'Course {}'
self.grid_guy.add_widget(Label(text=self.course_text.format(i+1), size_hint_y=None, height='38dp',
color=(.1, .1, .1, 1)))
self.grid_guy.add_widget(Factory.Spinner(text='2', values=('1', '2', '3', '4', '5')))
self.grid_guy.add_widget(Factory.Spinner(text='B', values=('A', 'B', 'C', 'D', 'E', 'F')))
def delete_course(self): # remove 3 widget from the grid
self.grid_list = self.grid_guy.children
for j in range(3):
if len(self.grid_list) <= 3:
self.course_num = 2
else:
self.grid_guy.remove_widget(self.grid_list[0])
if len(self.grid_list) != 3:
self.course_num -= 1
def reset_values(self):
self.set_values()
def add_btn(self): #add a new button to the gridlayout
self.course_text = 'Course {}'
self.grid_guy.add_widget(Label(text=self.course_text.format(self.course_num), size_hint_y=None, height='38dp', color=(.1, .1,.1, 1)))
self.grid_guy.add_widget(Factory.Spinner(text='2', values=('1', '2', '3', '4', '5')))
self.grid_guy.add_widget(Factory.Spinner(text='B', values=('A', 'B', 'C', 'D', 'E', 'F')))
self.course_num += 1
def open_drop(self):
self.options.open(self.calc_option)
def calculate_gp(self, grades, units):
grade_values = []
cal_grade = []
sum_of_unit = 0
sum_of_grade = 0
for i in grades:
if i == 'E':
grade_values.append('1')
if i == 'D':
grade_values.append('2')
if i == 'C':
grade_values.append('3')
if i == 'B':
grade_values.append('4')
if i == 'A':
grade_values.append('5')
if i == 'F':
grade_values.append('0')
for i in range(len(grade_values)):
multi = int(units[i]) * int(grade_values[i])
cal_grade.append(multi)
print((units[i], grades[i]))
for i in units:
sum_of_unit += int(i)
for i in cal_grade:
sum_of_grade += i
gp = sum_of_grade/sum_of_unit
gp = str(gp)
return [gp, sum_of_unit]
def get_values(self):
list_value = self.grid_guy.children
course_list = []
grade_list = []
unit_list = []
j = 2 # get the course list
for i in range((len(list_value)//3)):
course = list_value[j]
course = course.text
course_list.append(course)
j += 3
courses = len(course_list)
j = 1 #get the unit list
for i in range((len(list_value)//3)):
unit = list_value[j]
unit = unit.text
unit_list.append(unit)
j += 3
j = 0# get the grade list
for grade in range((len(list_value)//3)):
grade = list_value[j].text
grade_list.append(grade)
j += 3
#then we send values to the calculator func
gp, sum_of_unit = self.calculate_gp(grade_list, unit_list)
print(gp)
# if screens.previous() == 'result_view_page':
# pass
course_values = [course_list, unit_list, grade_list]
# then we store
gp_store.put(time.ctime(), gp=gp, sum_of_unit=sum_of_unit, course_values=course_values)
self.parent.current = 'result_view_page'
self.parent.get_screen('result_view_page').set_values(gp, sum_of_unit, course_values)
class Result_view(ScreenBase):
def __init__(self, **kwargs):
super(Result_view, self).__init__(**kwargs)
self.gp_view = self.ids.gp_view
self.total_unit = self.ids.total_unit
self.grid_lay = self.ids.grid_lay
def set_values(self, gp, sum_of_unit, course_values):
'function called when we get to set values for page'
self.grid_lay.clear_widgets()
for course, unit, grade in zip(course_values[0], course_values[1], course_values[2]):
course_label = Label(text='ssg4', color=(.1, .1,.1, 1), size_hint_y=None, height=40)
unit_spin = Factory.MySpinner(text='2', values=('1', '2', '3', '4', '5'))
grade_spin = Factory.MySpinner(text='B', values=('A', 'B', 'C', 'D', 'E', 'F'))
course_label.text = course
unit_spin.text = unit
grade_spin.text = grade
#then add to the gridlayout
self.grid_lay.add_widget(course_label)
self.grid_lay.add_widget(unit_spin)
self.grid_lay.add_widget(grade_spin)
self.gp_view.text = 'Grade Point: {}'.format(gp)
self.total_unit.text = 'Sum of Unit: {}'.format(sum_of_unit)
self.ids.num_of_course.text = 'Total Number Of Courses: {}'.format(str(len(course_values[0])))
class Result_list(ScreenBase):
def __init__(self, **kwargs):
super(Result_list, self).__init__(**kwargs)
self.result_scroll = self.ids.result_scroll
self.result_scroll.bind(minimum_height=self.result_scroll.setter('height'))
def on_enter(self):
self.set_values()
def set_values(self):
self.result_scroll.clear_widgets()
if len(gp_store.keys()) < 1:
self.error_msg = Label(text='Sorry no Result were found', color=(.3,.3, .3, 1) )
self.result_scroll.add_widget(self.error_msg)
return
for gp in gp_store.keys():
result_btn = Result_Button()
result_btn.time = '{}'.format(gp)
result_btn.gp = 'Gp: {} Sum of unit: {}'.format(gp_store.get(gp)['gp'], gp_store.get(gp)['sum_of_unit'])
result_btn.gp_values = gp_store.get(gp)['course_values']
self.result_scroll.add_widget(result_btn)
result_btn.load=partial(self.load_data, result_btn.time)
result_btn.delete=partial(self.delete_data, result_btn.time)
def load_data(self, time):
self.manager.current = 'result_view_page'
gp = gp_store.get(time)['gp']
sum_of_unit = gp_store.get(time)['sum_of_unit']
course_values = gp_store.get(time)['course_values']
self.manager.get_screen('result_view_page').set_values(gp, sum_of_unit, course_values)
def delete_data(self, time):
child = list(filter(lambda child: child.time == time, self.result_scroll.children))[0]
self.result_scroll.remove_widget(child)
gp_store.delete(child.time)
def delete_all(self):
pass
def refresh_list(self):
self.set_values()
def on_result_press(self, pos):
Gp_values2 = Gp_values_list[pos]
self.manager.current = 'result_view_page'
class PasswordPop(ModalView):
screen_guy = ObjectProperty(None)
def __init__(self, **kwargs):
super(PasswordPop, self).__init__(**kwargs)
self.password_state = self.ids.password_state
self.pass_btn = self.ids.pass_btn
self.password_value = self.ids.password_value
def on_open(self):
if password == '0000': # set the text if the user hasnt change the default password
self.password_value.text = '0000'
def comfirm_pass(self):
if self.password_value.text == password:
self.dismiss()
self.screen_guy.manager.current = 'result_list_page'
else:
self.password_value.text = ''
self.password_state.color = (1, .2, .2, 1)
self.password_state.text = 'Incorrect Password'
class Settings_password(Screen):
def __init__(self, **kwargs):
super(Settings_password, self).__init__(**kwargs)
self.warning = self.ids.warning
self.old_password = self.ids.old_password
self.new_password = self.ids.new_password
self.comfirm_pass = self.ids.comfirm_password
self.password_switch = self.ids.password_switch
if password == '0000': # set the text if the user hasnt change the default password
self.old_password.text = '0000'
def set_warning(self): #this func is being called on_text event
if self.new_password.text != self.comfirm_pass.text:
if len(self.comfirm_pass.text) >= len(self.new_password.text):
self.warning.text = "Password Does Not Macth"
else:
self.warning.text = ''
def set_password(self):
if self.old_password.text == password:
password = self.new_password.text
self.old_password.text = self.new_password.text
else:
pass
print((self.password1))
def on_leave(self):
#turm password on/off
if self.password_switch.active:
password_on[0] = True
else:
password_on[0] = False
print((password_on[0]))
class Settings(Screen):
def __init__(self, **kwargs):
super(Settings, self).__init__(**kwargs)
def go_back(self): #go back to previous page cause you might have being coming from any page
previous = self.manager.previous()
self.manager.current = previous
class Home_page(Screen):
def __init__(self, **kwargs):
super(Home_page, self).__init__(**kwargs)
class TimeTableButton(DropButton):
pass
class Lecture_time_table(Screen):
def display(self):
days = ['Monday', 'Tuesday', 'Wenesday', 'Thusday', 'Friday']
codes = ['GEG 101', 'Phy 111', 'SSG 201', 'EEG 301']
times = ['8:00-10:00AM', '12:00-2:00PM', '2:00-4:00pPM', '4:00-6:00PM']
venues = ['Elt', 'Room 201', 'Room 211', 'Room 101']
for day in days:
drop_btn = TimeTableButton(day=day)
for time, code, venue in zip(times, codes, venues):
box = BoxLayout(size_hint_y=None, height='60dp')
box.add_widget(Label(color=(0,0,0,1), text=time))
box.add_widget(Label(color=(0,0,0,1), text=code))
box.add_widget(Label(color=(0,0,0,1), text=venue))
drop_btn.drop_container.add_widget(box)
self.ids.grid.add_widget(drop_btn)
def on_enter(self):
self.ids.grid.clear_widgets()
self.display()
class Exam_time_table(Screen):
def __init__(self, **kwargs):
super(Exam_time_table, self).__init__(**kwargs)
class Box_guy(BoxLayout):
pass
class Matrix_detector(ScreenBase): #detector side of app
def __init__(self, **kwargs):
super(Matrix_detector, self).__init__(**kwargs)
file = open('data/unilag_courses.txt', 'r')
self.courses = {}
file_lines = file.readlines()
temp = []
temp_fal = ''
first = True
# print(file_lines)
for line in file_lines:
line = line.strip('\n')
if line == '':
pass
else:
if line[0] == '*':
if first:
temp_fal = line
else:
self.courses[temp_fal[1:]] = temp
temp = []
temp_fal = line
# print('-----------------------')
# print('FACULTY OF ', line[1:])
# print('-----------------------')
first = False
else:
first = False
temp.append(line)
# print(line)
def go_back(self): #go back to previous page cause you might have being coming from any page
previous = self.manager.previous()
manager.current = previous
def get_year(self, matrix):
self.year = str(matrix)[0:2]
self.current_year = time.ctime()
self.year = int('20' + self.year)
self.current_year = int(self.current_year[20:24])
self.year_dif = self.current_year - self.year
self.level = str(self.year_dif) + '00'
self.year_of_entry = (str(self.year) + '/' + str(self.year + 1))
return [self.level, self.year_of_entry]
def get_falculty(self, matrix):
self.list_falculty = list(self.courses)
self.list_falculty.sort()
matrix = str(matrix)
if matrix[2] == '0':
self.falc_no = int(matrix[3])-1
self.falculty = self.list_falculty[self.falc_no]
else:
self.falc_no = int(matrix[2:4])-1
self.falculty = self.list_falculty[self.falc_no]
self.return_val = self.falculty
return self.return_val
def get_department(self, matrix):
self.falculty = self.get_falculty(matrix)
matrix = str(matrix)
if matrix[4] == '0':
dept_no = int(matrix[5])-1
department = self.courses[self.falculty][dept_no]
else:
dept_no = int(matrix[4:6])
department = self.courses[self.falculty][dept_no]
return department
def get_position(self, matrix):
matrix = str(matrix)
if matrix[7] == '0' and matrix[6] == '0':
self.position = matrix[8]
elif matrix[6] == '0':
self.position = matrix[7:9]
else:
self.position = matrix[6:9]
return self.position
def set_info(self, matrix):
box_guy = Box_guy()
year_display = self.ids.year_display
level_display = self.ids.level_display
department_display = self.ids.department_display
falculty_display = self.ids.falculty_display
position_display = self.ids.position_display
try:
matrix = int(matrix)
year_level = self.get_year(matrix)
year = str(year_level[1])
level = str(year_level[0])
department = str(self.get_department(matrix))
falculty = str(self.get_falculty(matrix))
position = str(self.get_position(matrix))
except Exception as e:
import traceback
traceback.print_exc()
year_display.text = ''
level_display.text = ''
department_display.text = ''
falculty_display.text = ''
position_display.text = ''
from mahartstudios.android.notification import fast_toast
fast_toast('Not found in database.')
else:
year_display.text = year
level_display.text = level
department_display.text = department
falculty_display.text = falculty
position_display.text = position
class Result_Button(DropButton):
pass
| 32.361552
| 141
| 0.588915
|
ff1bc782d52f33bcf94fbdbc4a24c493b2b7240d
| 69,456
|
py
|
Python
|
packages/python/plotly/plotly/graph_objs/parcats/line/__init__.py
|
thatneat/plotly.py
|
2e7c0ff2565f703394d872b0d8ea13f45aba3498
|
[
"MIT"
] | 1
|
2020-02-29T09:46:51.000Z
|
2020-02-29T09:46:51.000Z
|
packages/python/plotly/plotly/graph_objs/parcats/line/__init__.py
|
hermitspirit/plotly.py
|
2e7c0ff2565f703394d872b0d8ea13f45aba3498
|
[
"MIT"
] | null | null | null |
packages/python/plotly/plotly/graph_objs/parcats/line/__init__.py
|
hermitspirit/plotly.py
|
2e7c0ff2565f703394d872b0d8ea13f45aba3498
|
[
"MIT"
] | null | null | null |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of plotly.graph_objs.parcats.line.colorbar.Tickfont
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcats.line.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-format/blob/master/READM
E.md#locale_format And for dates see:
https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one item to
d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.parcats.line.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.parcats.line.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.parcats.line.c
olorbar.tickformatstopdefaults), sets the default property
values to use for elements of
parcats.line.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of plotly.graph_objs.parcats.line.colorbar.Tickformatstop
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.parcats.line.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on plot.ly for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on plot.ly for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of plotly.graph_objs.parcats.line.colorbar.Title
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.parcats.line.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use parcats.line.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.parcats.line.colorbar.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use parcats.line.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "parcats.line"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of plotly.graph_objects.parcats.line.colorbar.T
ickformatstop instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcat
s.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcats.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.parcats.line.colorbar.Title
instance or dict with compatible properties
titlefont
Deprecated: Please use parcats.line.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use parcats.line.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.parcats.line.ColorBar
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see: https://github.com/d3/d3-form
at/blob/master/README.md#locale_format And for dates
see: https://github.com/d3/d3-time-
format/blob/master/README.md#locale_format We add one
item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of plotly.graph_objects.parcats.line.colorbar.T
ickformatstop instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.parcat
s.line.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
parcats.line.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on plot.ly for ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.parcats.line.colorbar.Title
instance or dict with compatible properties
titlefont
Deprecated: Please use parcats.line.colorbar.title.font
instead. Sets this color bar's title font. Note that
the title's font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use parcats.line.colorbar.title.side
instead. Determines the location of color bar's title
with respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcats.line.ColorBar
constructor must be a dict or
an instance of plotly.graph_objs.parcats.line.ColorBar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.parcats.line import colorbar as v_colorbar
# Initialize validators
# ---------------------
self._validators["bgcolor"] = v_colorbar.BgcolorValidator()
self._validators["bordercolor"] = v_colorbar.BordercolorValidator()
self._validators["borderwidth"] = v_colorbar.BorderwidthValidator()
self._validators["dtick"] = v_colorbar.DtickValidator()
self._validators["exponentformat"] = v_colorbar.ExponentformatValidator()
self._validators["len"] = v_colorbar.LenValidator()
self._validators["lenmode"] = v_colorbar.LenmodeValidator()
self._validators["nticks"] = v_colorbar.NticksValidator()
self._validators["outlinecolor"] = v_colorbar.OutlinecolorValidator()
self._validators["outlinewidth"] = v_colorbar.OutlinewidthValidator()
self._validators["separatethousands"] = v_colorbar.SeparatethousandsValidator()
self._validators["showexponent"] = v_colorbar.ShowexponentValidator()
self._validators["showticklabels"] = v_colorbar.ShowticklabelsValidator()
self._validators["showtickprefix"] = v_colorbar.ShowtickprefixValidator()
self._validators["showticksuffix"] = v_colorbar.ShowticksuffixValidator()
self._validators["thickness"] = v_colorbar.ThicknessValidator()
self._validators["thicknessmode"] = v_colorbar.ThicknessmodeValidator()
self._validators["tick0"] = v_colorbar.Tick0Validator()
self._validators["tickangle"] = v_colorbar.TickangleValidator()
self._validators["tickcolor"] = v_colorbar.TickcolorValidator()
self._validators["tickfont"] = v_colorbar.TickfontValidator()
self._validators["tickformat"] = v_colorbar.TickformatValidator()
self._validators["tickformatstops"] = v_colorbar.TickformatstopsValidator()
self._validators[
"tickformatstopdefaults"
] = v_colorbar.TickformatstopValidator()
self._validators["ticklen"] = v_colorbar.TicklenValidator()
self._validators["tickmode"] = v_colorbar.TickmodeValidator()
self._validators["tickprefix"] = v_colorbar.TickprefixValidator()
self._validators["ticks"] = v_colorbar.TicksValidator()
self._validators["ticksuffix"] = v_colorbar.TicksuffixValidator()
self._validators["ticktext"] = v_colorbar.TicktextValidator()
self._validators["ticktextsrc"] = v_colorbar.TicktextsrcValidator()
self._validators["tickvals"] = v_colorbar.TickvalsValidator()
self._validators["tickvalssrc"] = v_colorbar.TickvalssrcValidator()
self._validators["tickwidth"] = v_colorbar.TickwidthValidator()
self._validators["title"] = v_colorbar.TitleValidator()
self._validators["x"] = v_colorbar.XValidator()
self._validators["xanchor"] = v_colorbar.XanchorValidator()
self._validators["xpad"] = v_colorbar.XpadValidator()
self._validators["y"] = v_colorbar.YValidator()
self._validators["yanchor"] = v_colorbar.YanchorValidator()
self._validators["ypad"] = v_colorbar.YpadValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("borderwidth", None)
self["borderwidth"] = borderwidth if borderwidth is not None else _v
_v = arg.pop("dtick", None)
self["dtick"] = dtick if dtick is not None else _v
_v = arg.pop("exponentformat", None)
self["exponentformat"] = exponentformat if exponentformat is not None else _v
_v = arg.pop("len", None)
self["len"] = len if len is not None else _v
_v = arg.pop("lenmode", None)
self["lenmode"] = lenmode if lenmode is not None else _v
_v = arg.pop("nticks", None)
self["nticks"] = nticks if nticks is not None else _v
_v = arg.pop("outlinecolor", None)
self["outlinecolor"] = outlinecolor if outlinecolor is not None else _v
_v = arg.pop("outlinewidth", None)
self["outlinewidth"] = outlinewidth if outlinewidth is not None else _v
_v = arg.pop("separatethousands", None)
self["separatethousands"] = (
separatethousands if separatethousands is not None else _v
)
_v = arg.pop("showexponent", None)
self["showexponent"] = showexponent if showexponent is not None else _v
_v = arg.pop("showticklabels", None)
self["showticklabels"] = showticklabels if showticklabels is not None else _v
_v = arg.pop("showtickprefix", None)
self["showtickprefix"] = showtickprefix if showtickprefix is not None else _v
_v = arg.pop("showticksuffix", None)
self["showticksuffix"] = showticksuffix if showticksuffix is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("thicknessmode", None)
self["thicknessmode"] = thicknessmode if thicknessmode is not None else _v
_v = arg.pop("tick0", None)
self["tick0"] = tick0 if tick0 is not None else _v
_v = arg.pop("tickangle", None)
self["tickangle"] = tickangle if tickangle is not None else _v
_v = arg.pop("tickcolor", None)
self["tickcolor"] = tickcolor if tickcolor is not None else _v
_v = arg.pop("tickfont", None)
self["tickfont"] = tickfont if tickfont is not None else _v
_v = arg.pop("tickformat", None)
self["tickformat"] = tickformat if tickformat is not None else _v
_v = arg.pop("tickformatstops", None)
self["tickformatstops"] = tickformatstops if tickformatstops is not None else _v
_v = arg.pop("tickformatstopdefaults", None)
self["tickformatstopdefaults"] = (
tickformatstopdefaults if tickformatstopdefaults is not None else _v
)
_v = arg.pop("ticklen", None)
self["ticklen"] = ticklen if ticklen is not None else _v
_v = arg.pop("tickmode", None)
self["tickmode"] = tickmode if tickmode is not None else _v
_v = arg.pop("tickprefix", None)
self["tickprefix"] = tickprefix if tickprefix is not None else _v
_v = arg.pop("ticks", None)
self["ticks"] = ticks if ticks is not None else _v
_v = arg.pop("ticksuffix", None)
self["ticksuffix"] = ticksuffix if ticksuffix is not None else _v
_v = arg.pop("ticktext", None)
self["ticktext"] = ticktext if ticktext is not None else _v
_v = arg.pop("ticktextsrc", None)
self["ticktextsrc"] = ticktextsrc if ticktextsrc is not None else _v
_v = arg.pop("tickvals", None)
self["tickvals"] = tickvals if tickvals is not None else _v
_v = arg.pop("tickvalssrc", None)
self["tickvalssrc"] = tickvalssrc if tickvalssrc is not None else _v
_v = arg.pop("tickwidth", None)
self["tickwidth"] = tickwidth if tickwidth is not None else _v
_v = arg.pop("title", None)
self["title"] = title if title is not None else _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("xanchor", None)
self["xanchor"] = xanchor if xanchor is not None else _v
_v = arg.pop("xpad", None)
self["xpad"] = xpad if xpad is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
_v = arg.pop("yanchor", None)
self["yanchor"] = yanchor if yanchor is not None else _v
_v = arg.pop("ypad", None)
self["ypad"] = ypad if ypad is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.graph_objs.parcats.line import colorbar
| 37.341935
| 98
| 0.571427
|
aad9ebc919506cdb9632b02701488ee4ee2b0a42
| 907
|
py
|
Python
|
tests/test_grid.py
|
roure/aima-drivers
|
45ed0153b614ca28979f58591956b59bc4ffcedf
|
[
"MIT"
] | null | null | null |
tests/test_grid.py
|
roure/aima-drivers
|
45ed0153b614ca28979f58591956b59bc4ffcedf
|
[
"MIT"
] | 2
|
2019-03-12T12:59:50.000Z
|
2019-03-12T13:09:06.000Z
|
tests/test_grid.py
|
roure/aima-drivers
|
45ed0153b614ca28979f58591956b59bc4ffcedf
|
[
"MIT"
] | null | null | null |
import pytest
from grid import * # noqa
def compare_list(x, y):
return all([elm_x == y[i] for i, elm_x in enumerate(x)])
def test_distance():
assert distance((1, 2), (5, 5)) == 5.0
def test_distance_squared():
assert distance_squared((1, 2), (5, 5)) == 25.0
def test_vector_clip():
assert vector_clip((-1, 10), (0, 0), (9, 9)) == (0, 9)
def test_turn_heading():
assert turn_heading((0, 1), 1) == (-1, 0)
assert turn_heading((0, 1), -1) == (1, 0)
assert turn_heading((1, 0), 1) == (0, 1)
assert turn_heading((1, 0), -1) == (0, -1)
assert turn_heading((0, -1), 1) == (1, 0)
assert turn_heading((0, -1), -1) == (-1, 0)
assert turn_heading((-1, 0), 1) == (0, -1)
assert turn_heading((-1, 0), -1) == (0, 1)
def test_turn_left():
assert turn_left((0, 1)) == (-1, 0)
def test_turn_right():
assert turn_right((0, 1)) == (1, 0)
if __name__ == '__main__':
pytest.main()
| 21.595238
| 60
| 0.577729
|
259ff7f5dee426d2c362f1800bb1be98a804421c
| 5,184
|
py
|
Python
|
extensionGadget.py
|
GarnetSunset/chromeExtensionsFetcher
|
dd2f620f9cfe8897235b8bf98a5b614d5669c6e2
|
[
"MIT"
] | null | null | null |
extensionGadget.py
|
GarnetSunset/chromeExtensionsFetcher
|
dd2f620f9cfe8897235b8bf98a5b614d5669c6e2
|
[
"MIT"
] | null | null | null |
extensionGadget.py
|
GarnetSunset/chromeExtensionsFetcher
|
dd2f620f9cfe8897235b8bf98a5b614d5669c6e2
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from selenium import webdriver
from six.moves.urllib.request import urlretrieve
import ctypes, os, re, json, socket, subprocess, sys, time, zipfile
localappdata = os.getenv('LocalAPPDATA')
dir = localappdata + r"\Google\Chrome\User Data\Default\Extensions"
dragNDrop = ''.join(sys.argv[1:])
local = False
names = None
owd = os.getcwd()
returnMan = []
searchURL = "https://chrome.google.com/webstore/search/"
if not os.path.exists("Machines"):
os.makedirs("Machines")
def admin():
try:
return ctypes.windll.shell32.IsUserAnAdmin()
except:
return False
if os.path.isfile('chrome.ini'):
ini = open('chrome.ini', 'r')
locationString = ini.read()
elif os.path.isfile('chromedriver.exe'):
locationString = 'chromedriver.exe'
else:
response = urlretrieve('https://chromedriver.storage.googleapis.com/2.38/chromedriver_win32.zip','chromedriver.zip')
zip_ref = zipfile.ZipFile("chromedriver.zip", 'r')
zip_ref.extractall(owd)
zip_ref.close
locationString = 'chromedriver.exe'
if dragNDrop == "":
pass
else:
directory_list = []
with open(dragNDrop) as f:
for line in f:
line = line.replace('\n', '')
if len(line) == 32:
directory_list.append(line)
extension = dragNDrop.index(".")
fileName = dragNDrop[:extension]
open("Machines/" + fileName + "-extensions.txt", 'w').close()
choice = raw_input("1. Your machine or 2. Someone else's?\n>")
if choice == "1" and dragNDrop == "":
directory_list = list()
hostnameIP = socket.gethostname()
open("Machines/" + hostnameIP + "-extensions.txt", 'w').close()
for root, dirs, files in os.walk(dir, topdown=False):
for name in dirs:
if len(name) == 32:
directory_list.append(name)
if choice == "2" and dragNDrop == "":
hostnameIP = raw_input("Input hostname or IP\n>")
username = raw_input("Input username\n>")
open("Machines/" + hostnameIP + "-extensions.txt", 'w').close()
if admin():
batcmd = r"dir \"\\\\"+hostnameIP+r"\c$\Users\\"+username+r"\AppData\Local\Google\Chrome\User Data\Default\Extensions\""
result = subprocess.check_output(batcmd, shell=True)
directory_list = list()
while("<DIR>" in result):
dirLoc = result.find("<DIR>")
result = result[dirLoc+15:]
newLine = result.find("\n")
name = result[:newLine]
name.rstrip()
name = name[:-1]
if len(name) == 32:
directory_list.append(name)
else:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, __file__, None, 1)
if choice == "2":
driver = webdriver.Chrome(executable_path=(locationString))
driver.set_window_position(4000, 651)
driver.set_page_load_timeout(600)
for x in directory_list:
driver.get(searchURL+x)
time.sleep(2)
requestRec = driver.page_source
soup = BeautifulSoup(requestRec, 'lxml')
soup.prettify()
for tagStuff in soup.find_all('div', {'class': 'a-na-d-w'}):
names = tagStuff.text
print(tagStuff.text.encode('utf-8'))
if names == None:
names = "Unknown ID: " + x
if dragNDrop == "":
text_file = open("Machines/" + hostnameIP + "-extensions.txt", "a")
else:
text_file = open("Machines/" + fileName + "-extensions.txt", "a")
text_file.write(names.encode('utf-8')+"\n")
text_file.close()
names = ""
driver.close()
else:
local = True
for dirpath, subdirs, files in os.walk(dir):
for x in files:
if x == "manifest.json":
with open(dirpath+"\manifest.json") as f:
herewego = json.load(f)
if("__MSG_appName__" != herewego["name"]):
if("__MSG_APP_NAME__" != herewego["name"]):
if("__MSG_extName__" != herewego["name"]):
returnMan.append(herewego["name"])
if os.path.isfile("debug.log"):
try:
os.remove("debug.log")
except:
print("")
if os.path.isfile("chromedriver.zip"):
try:
os.remove("chromedriver.zip")
except:
print("")
if dragNDrop == "":
with open("Machines/" + hostnameIP + "-extensions.txt", 'r') as myfile:
data=myfile.read().replace('\n\n', '\n')
data=data.replace('\n\n', '\n')
myfile.close()
with open("Machines/" + hostnameIP + "-extensions.txt", 'w') as newfile:
if(local == True):
for x in returnMan:
newfile.write(x+"\n")
else:
newfile.write(data)
newfile.close()
else:
with open("Machines/" + fileName + "-extensions.txt", 'r') as myfile:
data=myfile.read().replace('\n\n', '\n')
data=data.replace('\n\n', '\n')
myfile.close()
with open("Machines/" + fileName + "-extensions.txt", 'w') as newfile:
newfile.write(data)
newfile.close()
print("All Done")
| 34.105263
| 128
| 0.575424
|
c54a7f8aaa93fe7d262b1a26ae1192dcbf79e7f3
| 1,672
|
py
|
Python
|
leetcode/tree/binary-tree-inorder-traversal.py
|
jaimeulloa61/data-structure-and-algorithms
|
76140bb36b62ebc7c60914c48a323aae4956fb0a
|
[
"MIT"
] | 81
|
2020-05-22T14:22:04.000Z
|
2021-12-18T10:11:23.000Z
|
leetcode/tree/binary-tree-inorder-traversal.py
|
jaimeulloa61/data-structure-and-algorithms
|
76140bb36b62ebc7c60914c48a323aae4956fb0a
|
[
"MIT"
] | 4
|
2020-08-06T21:08:00.000Z
|
2021-03-31T16:07:50.000Z
|
leetcode/tree/binary-tree-inorder-traversal.py
|
jaimeulloa61/data-structure-and-algorithms
|
76140bb36b62ebc7c60914c48a323aae4956fb0a
|
[
"MIT"
] | 37
|
2020-05-22T14:25:21.000Z
|
2021-12-30T03:13:13.000Z
|
"""
Given a binary tree, return the inorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,3,2]
Follow up: Recursive solution is trivial, could you do it iteratively?
"""
# Solutions
# Recursive Solution
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
"""
Recursive Solution
"""
def __init__(self):
self.list = []
def inorderTraversal(self, root: TreeNode) -> List[int]:
if root:
self.inorderTraversal(root.left)
if root.val:
self.list.append(root.val)
self.inorderTraversal(root.right)
return self.list
# Runtime: 40 ms
# Memory Usage: 13.8 MB
# Iterative Solution
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
"""
Iterative Solution
Time Complexity: O( n )
Space Complexity: O( n )
"""
def inorderTraversal(self, root: TreeNode) -> List[int]:
# Base Case
if not root:
return
stack = []
res = []
while root or stack:
# Traverse to the extreme left
while root:
stack.append(root)
root = root.left
root = stack.pop()
res.append(root.val)
root = root.right
return res
# Runtime: 20 ms, faster than 96.86% of Python3 online submissions
# Memory Usage: 12.6 MB, less than 100.00% of Python3 online submissions
| 18.577778
| 72
| 0.560407
|
17523b234697d2b2d456c0d5207e7e64e93cef44
| 6,964
|
py
|
Python
|
testing/conformance/conformance/applications/topology.py
|
vishalbelsare/wallaroo
|
2b985a3e756786139316c72ebcca342346546ba7
|
[
"Apache-2.0"
] | 1,459
|
2017-09-16T13:13:15.000Z
|
2020-10-05T06:19:50.000Z
|
testing/conformance/conformance/applications/topology.py
|
vishalbelsare/wallaroo
|
2b985a3e756786139316c72ebcca342346546ba7
|
[
"Apache-2.0"
] | 1,413
|
2017-09-14T18:18:14.000Z
|
2020-09-28T08:10:30.000Z
|
testing/conformance/conformance/applications/topology.py
|
vishalbelsare/wallaroo
|
2b985a3e756786139316c72ebcca342346546ba7
|
[
"Apache-2.0"
] | 80
|
2017-09-27T23:16:23.000Z
|
2020-06-02T09:18:53.000Z
|
# Copyright 2019 The Wallaroo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import logging
import os
import time
from ..__init__ import root # absolute path for Wallaroo repo
from ..base import (Application,
update_dict)
from ..completes_when import data_in_sink_count_is
from integration.cluster import runner_data_format
from integration.end_points import (Reader,
Sender,
sequence_generator)
from integration.errors import TimeoutError
from integration.external import run_shell_cmd
########################
# Topology Testing App #
########################
base_topology_policy = {
'command_parameters': {},
'sender_interval': 0.01,
'batch_size': 10,
'workers': 1,
'topology': [],}
SINK_EXPECT_MODIFIER = {'filter': 0.5, 'multi': 2}
def get_expect_modifier(topology):
last = topology[0]
expect_mod = SINK_EXPECT_MODIFIER.get(last, 1)
for s in topology[1:]:
# successive filters don't reduce because the second filter only
# receives input that already passed the first filter
if not s in SINK_EXPECT_MODIFIER:
continue
if s == 'filter' and s == last:
continue
expect_mod *= SINK_EXPECT_MODIFIER[s]
last = s
return expect_mod
def find_send_and_expect_values(expect_mod, min_expect=20):
send = 10
while True:
if ((int(send * expect_mod) == send * expect_mod) and
send * expect_mod >= min_expect):
break
else:
send += 1
return (send, int(send * expect_mod))
class TopologyTestError(Exception):
pass
class TopologyTesterBaseApp(Application):
name = 'TopologyTestingBaseApp'
command = None
validation_command = None
sources = ['topology_tester']
split_streams = False
def __init__(self, config={}):
super().__init__()
self.config = base_topology_policy.copy()
update_dict(self.config, config)
self.workers = self.config['workers']
self.topology = self.config['topology']
# Add steps from topology to command and validation_command
steps_val = ' '.join('--{}'.format(s) for s in self.topology)
self.command = ("{cmd} {steps}".format(
cmd=self.command,
steps=steps_val))
self.validation_command = (
"{validation_cmd} {steps} "
"--output {{output}}".format(
validation_cmd=self.validation_command,
steps=steps_val))
expect_mod = get_expect_modifier(self.topology)
logging.info("Expect mod is {} for topology {!r}".format(expect_mod,
self.topology))
send, expect = find_send_and_expect_values(expect_mod)
self.expect = expect
logging.info("Sending {} messages per key".format(send))
logging.info("Expecting {} final messages per key".format(expect))
logging.info("Running {!r} with config {!r}".format(
self.__class__.name, self.config))
self.source_gens = [(sequence_generator(send, 0, '>I', 'key_0'),
'topology_tester'),
(sequence_generator(send, 0, '>I', 'key_1'),
'topology_tester')]
def start_senders(self):
logging.info("Starting senders")
src_name = self.sources[0]
for gen, src_name in self.source_gens:
sender = Sender(address = self.cluster.source_addrs[0][src_name],
reader = Reader(gen))
self.cluster.add_sender(sender, start=True)
logging.debug("end of send_tcp")
def join_senders(self):
logging.info("Joining senders")
for sender in self.cluster.senders:
sender.join(self.sender_join_timeout)
if sender.error:
raise sender.error
if sender.is_alive():
raise TimeoutError("Sender failed to join in {} seconds"
.format(self.sender_join_timeout))
def sink_await(self):
logging.info("Awaiting on sink")
self.completes_when(data_in_sink_count_is(
expected=(len(self.source_gens) * self.expect),
timeout=90,
sink=-1,
allow_more=False))
def validate(self):
logging.info("Validating output")
sink_files = []
for i, s in enumerate(self.cluster.sinks):
sink_files.extend(s.save(
os.path.join(self.cluster.log_dir, "sink.{}.dat".format(i))))
with open(os.path.join(self.cluster.log_dir, "ops.log"), "wt") as f:
for op in self.cluster.ops:
f.write("{}\n".format(op))
command = self.validation_command.format(output = ",".join(sink_files))
res = run_shell_cmd(command)
if res.success:
if res.output:
logging.info("Validation command '%s' completed successfully "
"with the output:\n--\n%s", ' '.join(res.command),
res.output)
else:
logging.info("Validation command '%s' completed successfully",
' '.join(res.command))
else:
outputs = runner_data_format(self.persistent_data.get('runner_data',
[]))
if outputs:
logging.error("Application outputs:\n{}".format(outputs))
error = ("Validation command '%s' failed with the output:\n"
"--\n%s" % (' '.join(res.command), res.output))
# Save logs to file in case of error
raise TopologyTestError(error)
class TopologyTesterPython2(TopologyTesterBaseApp):
name = 'python2_TopologyTester'
command = 'machida --application-module topology_tester'
validation_command = ('python2 '
'{root}/testing/correctness/apps/'
'topology_tester/topology_tester.py').format(root=root)
class TopologyTesterPython3(TopologyTesterBaseApp):
name = 'python3_TopologyTester'
command = 'machida3 --application-module topology_tester'
validation_command = ('python3 '
'{root}/testing/correctness/apps/'
'topology_tester/topology_tester.py').format(root=root)
| 37.240642
| 80
| 0.596927
|
0f8a0012b869a206f29d6c7aa045fc77e3095113
| 209
|
py
|
Python
|
BoardGame/connectfour/ConnectFour.py
|
kitman0804/BoardGame
|
be8c08adf10f5d236d3904ca7673f2210f925979
|
[
"MIT"
] | null | null | null |
BoardGame/connectfour/ConnectFour.py
|
kitman0804/BoardGame
|
be8c08adf10f5d236d3904ca7673f2210f925979
|
[
"MIT"
] | null | null | null |
BoardGame/connectfour/ConnectFour.py
|
kitman0804/BoardGame
|
be8c08adf10f5d236d3904ca7673f2210f925979
|
[
"MIT"
] | null | null | null |
from ..prototype.mnkgame.Game import Game
from .GameBoard import GameBoard
class ConnectFour(Game):
def __init__(self):
super().__init__(m=6, n=7, k=4)
self._core.gameboard = GameBoard()
| 23.222222
| 42
| 0.684211
|
6c631fd16496abf26de9e86bf061d6fb44cd8b38
| 2,210
|
py
|
Python
|
harnessed_jobs/BOT_EO_analysis/v0/producer_BOT_EO_analysis.py
|
duncanwood/EO-analysis-jobs
|
26d22e49c0d2e32fbf2759f504048754f66ecc45
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2018-07-26T09:32:46.000Z
|
2019-05-28T20:57:43.000Z
|
harnessed_jobs/BOT_EO_analysis/v0/producer_BOT_EO_analysis.py
|
duncanwood/EO-analysis-jobs
|
26d22e49c0d2e32fbf2759f504048754f66ecc45
|
[
"BSD-3-Clause-LBNL"
] | 3
|
2018-03-18T21:55:07.000Z
|
2019-04-18T18:26:06.000Z
|
harnessed_jobs/BOT_EO_analysis/v0/producer_BOT_EO_analysis.py
|
duncanwood/EO-analysis-jobs
|
26d22e49c0d2e32fbf2759f504048754f66ecc45
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-11-12T19:47:42.000Z
|
2022-02-25T21:43:03.000Z
|
#!/usr/bin/env python
"""
Producer script for BOT analyses.
"""
from __future__ import print_function
from multiprocessor_execution import run_device_analysis_pool
from camera_components import camera_info
from bot_eo_analyses import fe55_jh_task, bias_frame_jh_task, \
read_noise_jh_task, \
dark_current_jh_task, bright_defects_jh_task, dark_defects_jh_task, \
ptc_jh_task, flat_pairs_jh_task, bf_jh_task, \
cte_jh_task, tearing_jh_task, \
get_analysis_types, raft_jh_noise_correlations, raft_results_task
# Use the all of the available cores for processing.
processes = None
task_mapping = {'gain': (fe55_jh_task,),
'bias': (bias_frame_jh_task,),
'biasnoise': (read_noise_jh_task,),
'dark': (dark_current_jh_task,),
'badpixel': (bright_defects_jh_task, dark_defects_jh_task),
'ptc': (ptc_jh_task,),
'brighterfatter': (bf_jh_task,),
'linearity': (flat_pairs_jh_task,),
'cti': (cte_jh_task,),
'tearing': (tearing_jh_task,)}
analysis_types = get_analysis_types()
# Detector-level analyses
det_names = camera_info.get_det_names()
for analysis_type in analysis_types:
print("**************************************")
print("Running analysis type %s" % analysis_type)
print("**************************************")
if analysis_type not in task_mapping:
print(" not in task_mapping. skipping")
continue
tasks = task_mapping[analysis_type]
for task in tasks:
run_device_analysis_pool(task, det_names, processes=processes)
# Raft-level analyses
raft_names = camera_info.get_raft_names()
if 'biasnoise' in analysis_types:
print("**************************************")
print("Running analysis type biasnoise")
print("**************************************")
run_device_analysis_pool(raft_jh_noise_correlations, raft_names,
processes=processes)
print("**************************************")
print("Running raft_results_task")
print("**************************************")
run_device_analysis_pool(raft_results_task, raft_names, processes=processes)
| 38.77193
| 76
| 0.625792
|
0403b4cbfe7710d35f2e8dedf3d8f4cd81c9329d
| 2,454
|
py
|
Python
|
tests/games/test_uttt_monte_carlo.py
|
DoveDoof/QLUT
|
8d48a5430a267b6b1018312d8267236d4df899d9
|
[
"MIT"
] | null | null | null |
tests/games/test_uttt_monte_carlo.py
|
DoveDoof/QLUT
|
8d48a5430a267b6b1018312d8267236d4df899d9
|
[
"MIT"
] | 13
|
2018-07-19T09:42:28.000Z
|
2018-09-25T15:08:05.000Z
|
tests/games/test_uttt_monte_carlo.py
|
DoveDoof/QLUT
|
8d48a5430a267b6b1018312d8267236d4df899d9
|
[
"MIT"
] | null | null | null |
import unittest as test
import time
import games.uttt as ut
import techniques.monte_carlo as mc
class TestUltimateTicTacToe(test.TestCase):
@test.skip
def test_move(self):
# Test single move of monte carlo tree search algorithm
game_spec = ut.UltimateTicTacToeGameSpec()
# generate board with 10 random moves
random_func = game_spec.get_random_player_func()
board_state = ut._new_board()
side = 1
for _ in range(10):
move = random_func(board_state, side)
board_state = game_spec.apply_move(board_state, move, side)
side = -1*side
print("")
ut.print_board_state(board_state, side)
result, move = mc._monte_carlo_sample(game_spec, board_state, side)
print("result: ", result)
print("move: ", move)
mc_func = game_spec.get_monte_carlo_player_func()
result, move = mc.monte_carlo_tree_search(game_spec, board_state, side, 100)
print(result)
print(move)
@test.skip
def test_game(self):
# test game against monte carlo tree search algorithm
game_spec = ut.UltimateTicTacToeGameSpec()
player_func = game_spec.get_monte_carlo_player_func()
opponent_func = game_spec.get_random_player_func()
# opponent_func = game_spec.get_manual_player_func()
game_spec.play_game(player_func, opponent_func, log = 1)
def test_performance(self):
# test performance of mcts against random bot
game_spec = ut.UltimateTicTacToeGameSpec()
n = 10
number_of_samples = 27
mcts_func = game_spec.get_monte_carlo_player_func(number_of_samples)
rand_func = game_spec.get_random_player_func()
t = time.perf_counter()
ut.play_game(mcts_func, rand_func)
elapsed_time = time.perf_counter() - t
print('One game takes %s seconds, so %s will take %s seconds' % (elapsed_time, n, n*elapsed_time))
t = time.perf_counter()
resultsX = [ut.play_game(mcts_func, rand_func) for i in range(n)]
resultsO = [ut.play_game(rand_func, mcts_func) for i in range(n)]
elapsed_time = time.perf_counter() - t
print('Elapsed time:', elapsed_time)
print('mcts as X:')
print('mcts wins: ', resultsX.count(1))
print('random wins: ', resultsX.count(-1))
print('Draws : ', resultsX.count(0))
print('Winrate : ', 0.5 + 1.*sum(resultsX)/2/n)
print('mcts as O:')
print('mcts wins: ', resultsO.count(-1))
print('random wins: ', resultsO.count(1))
print('Draws : ', resultsO.count(0))
print('Winrate : ', 0.5 - 1.*sum(resultsO)/2/n)
if __name__ == '__main__':
test.main()
| 29.926829
| 100
| 0.717604
|
c8eac20fdd8e250bad5331b3b5c52ede01e4b5e9
| 924
|
py
|
Python
|
davidgram/users/migrations/0005_auto_20180418_1957.py
|
lopun/davidgram
|
74abe2598d3bfd15b6ff88243696e71f451ba250
|
[
"MIT"
] | 2
|
2018-12-24T23:00:59.000Z
|
2019-05-18T23:52:02.000Z
|
davidgram/users/migrations/0005_auto_20180418_1957.py
|
ujin43255252/davidgram
|
74abe2598d3bfd15b6ff88243696e71f451ba250
|
[
"MIT"
] | 11
|
2020-06-05T18:23:02.000Z
|
2022-03-08T22:01:34.000Z
|
davidgram/users/migrations/0005_auto_20180418_1957.py
|
ujin43255252/davidgram
|
74abe2598d3bfd15b6ff88243696e71f451ba250
|
[
"MIT"
] | 1
|
2018-10-17T12:43:40.000Z
|
2018-10-17T12:43:40.000Z
|
# Generated by Django 2.0.4 on 2018-04-18 10:57
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20180418_1952'),
]
operations = [
migrations.AddField(
model_name='user',
name='followers',
field=models.ManyToManyField(related_name='_user_followers_+', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='user',
name='following',
field=models.ManyToManyField(related_name='_user_following_+', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='user',
name='gender',
field=models.CharField(choices=[('not-specified', 'Not specified'), ('male', 'Male'), ('female', 'Female')], max_length=80, null=True),
),
]
| 30.8
| 147
| 0.608225
|
2a12d69e35e3d4db9e0a5d907ab51a8f8d523306
| 2,062
|
py
|
Python
|
profiles_api/models.py
|
eklonsousa/profiles-rest-api
|
fbe9d2c917a597657fcfa97c4d3d47373f3cdff0
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
eklonsousa/profiles-rest-api
|
fbe9d2c917a597657fcfa97c4d3d47373f3cdff0
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
eklonsousa/profiles-rest-api
|
fbe9d2c917a597657fcfa97c4d3d47373f3cdff0
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('Email cannot be blank')
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new speruser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name for user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status updte"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text= models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text
| 26.779221
| 63
| 0.675558
|
e88d64287de85e2970296dd42d7e7ee7c5b686f2
| 23,173
|
py
|
Python
|
src/practice_problem3.py
|
tnafiu/13-Exam2Practice
|
2c9f91a79eb7358a8376b3554abcc1dc082f6f04
|
[
"MIT"
] | null | null | null |
src/practice_problem3.py
|
tnafiu/13-Exam2Practice
|
2c9f91a79eb7358a8376b3554abcc1dc082f6f04
|
[
"MIT"
] | null | null | null |
src/practice_problem3.py
|
tnafiu/13-Exam2Practice
|
2c9f91a79eb7358a8376b3554abcc1dc082f6f04
|
[
"MIT"
] | null | null | null |
"""
PRACTICE Test 2, practice_problem 3.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Toluwa Nafiu.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
########################################################################
# Students:
#
# These problems have DIFFICULTY and TIME ratings:
# DIFFICULTY rating: 1 to 10, where:
# 1 is very easy
# 3 is an "easy" Test 2 question.
# 5 is a "typical" Test 2 question.
# 7 is a "hard" Test 2 question.
# 10 is an EXTREMELY hard problem (too hard for a Test 2 question)
#
# TIME ratings: A ROUGH estimate of the number of minutes that we
# would expect a well-prepared student to take on the problem.
#
# IMPORTANT: For ALL the problems in this module,
# if you reach the time estimate and are NOT close to a solution,
# STOP working on that problem and ASK YOUR INSTRUCTOR FOR HELP
# on it, in class or via Piazza.
########################################################################
import simple_testing as st
import math
import rosegraphics as rg
def main():
""" Calls the TEST functions in this module. """
run_test_practice_problem3a()
run_test_practice_problem3b()
run_test_practice_problem3c()
run_test_practice_problem3d()
run_test_practice_problem3e()
def is_prime(n):
"""
What comes in: An integer.
What goes out: Returns True if the given integer is prime.
Returns False if the given integer is NOT prime.
Side effects: None.
Examples:
This function returns True or False, depending on whether
the given integer is prime or not. Since the smallest prime is 2,
this function returns False on all integers < 2.
It returns True on 2, 3, 5, 7, and other primes.
Note: The algorithm used here is simple and clear but slow.
Type hints:
:type n: int
"""
if n < 2:
return False
for k in range(2, int(math.sqrt(n) + 0.1) + 1):
if n % k == 0:
return False
return True
# ------------------------------------------------------------------
# Students:
# Do NOT touch the above is_prime function - it has no TODO.
# Do NOT copy code from this function.
#
# Instead, ** CALL ** this function as needed in the problems below.
# ------------------------------------------------------------------
# ----------------------------------------------------------------------
# Students: Some of the testing code below uses SimpleTestCase objects,
# from the imported simple_testing (st) module.
# See details in the test code below.
# ----------------------------------------------------------------------
def run_test_practice_problem3a():
""" Tests the practice_problem3a function. """
# ------------------------------------------------------------------
# 6 tests.
# They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem3a((rg.Circle(rg.Point(5, 10), 20),
# rg.Circle(rg.Point(2, 20), 20),
# rg.Circle(rg.Point(7, 30), 10),
# rg.Circle(rg.Point(10, 40), 20),
# rg.Circle(rg.Point(2, 50), 10)))
# and compare the returned value against 1400 (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem3a,
[(rg.Circle(rg.Point(5, 10), 20),
rg.Circle(rg.Point(2, 20), 20),
rg.Circle(rg.Point(7, 30), 10),
rg.Circle(rg.Point(10, 40), 20),
rg.Circle(rg.Point(2, 50), 10))],
5 * 2 * 7 * 10 * 2),
st.SimpleTestCase(practice_problem3a,
[(rg.Circle(rg.Point(58, 10), 20),)],
58),
st.SimpleTestCase(practice_problem3a,
[(rg.Circle(rg.Point(84, 100), 200),
rg.Circle(rg.Point(28, 200), 200),
rg.Circle(rg.Point(10005, 300), 100))],
84 * 28 * 10005),
st.SimpleTestCase(practice_problem3a,
[()],
1),
st.SimpleTestCase(practice_problem3a,
[(rg.Circle(rg.Point(5, 10), 20),
rg.Circle(rg.Point(0, 20), 20),
rg.Circle(rg.Point(7, 30), 10),
rg.Circle(rg.Point(10, 40), 20),
rg.Circle(rg.Point(2, 50), 10))],
5 * 0 * 7 * 10 * 2),
]
circles = []
for k in range(1, 101):
circles.append(rg.Circle(rg.Point(k, k + 20), 5 * k))
answer = math.factorial(100)
tests.append(st.SimpleTestCase(practice_problem3a,
[circles],
answer))
# ------------------------------------------------------------------
# Run the 6 tests in the tests list constructed above.
# ------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem3a', tests)
def practice_problem3a(circles):
"""
What comes in: A sequence of rg.Circles.
What goes out: Returns the product of the x-coordinates
of the centers of the rg.Circles.
Returns 1 if the given sequence is empty.
Side effects: None.
Examples:
If the sequence is a list containing these 5 rg.Circles:
rg.Circle(rg.Point(5, 10), 20)
rg.Circle(rg.Point(2, 20), 20)
rg.Circle(rg.Point(7, 30), 10)
rg.Circle(rg.Point(10, 40), 20)
rg.Circle(rg.Point(2, 50), 10)
then this function returns:
5 x 2 x 7 x 10 x 2, which is 1400.
Type hints:
:type sequence: [rg.Circle]
"""
####################################################################
# TODO: 2. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 7
# TIME ESTIMATE: 10 minutes.
####################################################################
def run_test_practice_problem3b():
""" Tests the practice_problem3b function. """
# ------------------------------------------------------------------
# 13 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem3b([12, 33, 18, 'hello', 9, 13, 3, 9])
# and compare the returned value against True (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem3b,
[[12, 33, 18, 'hello', 9, 13, 3, 9]],
True),
st.SimpleTestCase(practice_problem3b,
[[12, 12, 33, 'hello', 5, 33, 5, 9]],
False),
st.SimpleTestCase(practice_problem3b,
[(77, 112, 33, 'hello', 0, 43, 5, 77)],
True),
st.SimpleTestCase(practice_problem3b,
[[1, 1, 1, 1, 1, 1, 2]],
False),
st.SimpleTestCase(practice_problem3b,
[['aa', 'a']],
False),
st.SimpleTestCase(practice_problem3b,
['aaa'],
True),
st.SimpleTestCase(practice_problem3b,
[['aa', 'a', 'b', 'a', 'b', 'a']],
True),
st.SimpleTestCase(practice_problem3b,
[[9]],
False),
st.SimpleTestCase(practice_problem3b,
[(12, 33, 8, 'hello', 99, 'hello')],
True),
st.SimpleTestCase(practice_problem3b,
[['hello there', 'he', 'lo', 'hello']],
False),
st.SimpleTestCase(practice_problem3b,
[((8,), '8', (4 + 4, 4 + 4), [8, 8], 7, 8)],
False),
st.SimpleTestCase(practice_problem3b,
[[(8,), '8', [4 + 4, 4 + 4],
(8, 8), 7, [8, 8]]],
True),
st.SimpleTestCase(practice_problem3b,
[[(8,), '8', [4 + 4, 4 + 4],
[8, 8], 7, (8, 8)]],
False),
]
# Run the 13 tests in the tests list constructed above.
st.SimpleTestCase.run_tests('practice_problem3b', tests)
def practice_problem3b(sequence):
"""
What comes in: A non-empty sequence.
What goes out: Returns True if the last item of the sequence
appears again somewhere else in the sequence. Returns False
if the last item of the sequence does NOT appear somewhere
else in the sequence.
Side effects: None.
Examples:
If the sequence is [12, 33, 18, 'hello', 9, 13, 3, 9],
this function returns True because the last item (9)
DOES appear elsewhere in the sequence (namely, at index 4).
If the sequence is [12, 12, 33, 'hello', 5, 33, 5, 9],
this function returns False because the last item (9)
does NOT appear elsewhere in the sequence.
If the sequence is (77, 112, 33, 'hello', 0, 43, 5, 77),
this function returns True because the last item (77)
DOES appear elsewhere in the sequence (namely, at index 0).
If the sequence is [9], this function returns False
because the last item (9) does NOT appear elsewhere
in the sequence.
If the sequence is [12, 33, 8, 'hello', 99, 'hello'],
this function returns True since the last item ('hello')
DOES appear elsewhere in the sequence
(namely, at indices 3 and 5).
If the sequence is ['hello there', 'he', 'lo', 'hello'],
this function returns False because the last item ('hello')
does NOT appear elsewhere in the sequence.
If the sequence is 'hello there',
this function returns True since the last item ('e') DOES
appear elsewhere in the sequence (namely, at indices 1 and 8).
Type hints:
:type: sequence: list or tuple or string
"""
####################################################################
# TODO: 3. Implement and test this function.
# The testing code is already written for you (above).
#
# IMPLEMENTATION REQUIREMENT: You are NOT allowed to use the
# 'count' or 'index' methods for sequences in this problem
# (because here we want you to demonstrate your ability
# to use explicit looping).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 8 minutes.
####################################################################
def run_test_practice_problem3c():
""" Tests the practice_problem3c function. """
# ------------------------------------------------------------------
# 4 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem3c((9, 0, 8, 0, 0, 4, 4, 0))
# and compare the returned value against [1, 3, 4, 7]
# (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem3c,
[(9, 0, 8, 0, 0, 4, 4, 0)],
[1, 3, 4, 7]),
st.SimpleTestCase(practice_problem3c,
[(9, 9, 9, 9, 0, 9, 9, 9)],
[4]),
st.SimpleTestCase(practice_problem3c,
[(4, 5, 4, 5, 4, 5, 4)],
[]),
st.SimpleTestCase(practice_problem3c,
[[0, 0, 0]],
[0, 1, 2]),
st.SimpleTestCase(practice_problem3c,
[[0, 0]],
[0, 1]),
st.SimpleTestCase(practice_problem3c,
[[0, 77]],
[0]),
st.SimpleTestCase(practice_problem3c,
[[-40, 0]],
[1]),
st.SimpleTestCase(practice_problem3c,
[[-40, 67]],
[]),
st.SimpleTestCase(practice_problem3c,
[(1, 0, 2, 0, 0, 0, 0, 6, 9, 0, 0, 12)],
[1, 3, 4, 5, 6, 9, 10]),
]
# Run the tests in the tests list constructed above.
st.SimpleTestCase.run_tests('practice_problem3c', tests)
def practice_problem3c(sequence):
"""
What comes in: A non-empty sequence of integers.
What goes out: Returns a list of integers,
where the integers are the places (indices)
for which the item at that place equals 0.
Side effects: None.
Examples:
Given sequence (9, 0, 8, 0, 0, 4, 4, 0)
-- this function returns [1, 3, 4, 7]
since 0 appears at indices 1, 3, 4, and 7.
Given sequence [9, 9, 9, 9, 0, 9, 9, 9]
-- this function returns [4]
since 0 appears only at index 4.
Given sequence (4, 5, 4, 5, 4, 5, 4)
-- this function returns []
since none of the items are 0.
Given sequence [0, 0, 0]
-- this function returns [0, 1, 2]
since 0 appears at indices 0, 1 and 2.
Type hints:
:type: sequence: list or tuple or string
"""
####################################################################
# TODO: 4. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 8 minutes.
####################################################################
def run_test_practice_problem3d():
""" Tests the practice_problem3d function. """
# ------------------------------------------------------------------
# 4 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem3d((9, 0, 8, 0, 0, 4, 4, 0))
# and compare the returned value against 1 (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem3d,
[(9, 0, 8, 0, 0, 4, 4, 0)],
1),
st.SimpleTestCase(practice_problem3d,
[(9, 9, 9, 9, 0, 9, 9, 9)],
4),
st.SimpleTestCase(practice_problem3d,
[(4, 5, 4, 5, 4, 5, 4)],
- 1),
st.SimpleTestCase(practice_problem3d,
[[0, 0, 0]],
0),
st.SimpleTestCase(practice_problem3d,
[[0, 0]],
0),
st.SimpleTestCase(practice_problem3d,
[[0, 77]],
0),
st.SimpleTestCase(practice_problem3d,
[[-40, 0]],
1),
st.SimpleTestCase(practice_problem3d,
[[-40, 67]],
- 1),
st.SimpleTestCase(practice_problem3d,
[(1, 0, 2, 0, 0, 0, 0, 6, 9, 0, 0, 12)],
1),
]
# Run the tests in the tests list constructed above.
st.SimpleTestCase.run_tests('practice_problem3d', tests)
def practice_problem3d(sequence):
"""
What comes in: A sequence of integers.
What goes out: Returns the first (leftmost) place (index)
for which the item at that place equals 0.
Returns -1 if the sequence contains no items equal to 0.
Side effects: None.
Examples:
Given sequence (9, 0, 8, 0, 0, 4, 4, 0)
-- this function returns 1
since 0 first appears at index 1
Given sequence [9, 9, 9, 9, 0, 9, 9, 9]
-- this function returns 4
since 0 first appears at index 4
Given sequence (4, 5, 4, 5, 4, 5, 4)
-- this function returns -1
since none of the items are 0.
Given sequence [0, 0, 0]
-- this function returns 0
since 0 first appears at index 0
Type hints:
:type: sequence: list or tuple or string
"""
####################################################################
# TODO: 5. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 8 minutes for each part of this problem.
####################################################################
####################################################################
# TODO: 6. Just ABOVE this TODO, you should have implemented
# a solution for the practice_problem3d function.
# Here, put ANOTHER solution, as follows:
#
# -- Your FIRST solution (ABOVE this TODO)
# should be a solution that IGNORES
# practice_problem3c (the previous problem).
#
# -- Your SECOND solution (BELOW this TODO)
# should be a solution that USES (calls)
# practice_problem3c.
#
# This solution should *** HAVE NO LOOP (no FOR). ***
####################################################################
def run_test_practice_problem3e():
""" Tests the practice_problem3e function. """
# ------------------------------------------------------------------
# 5 tests. They use the imported simple_testing (st) module.
# Each test is a SimpleTestCase with 3 arguments:
# -- the function to test,
# -- a list containing the argument(s) to send to the function,
# -- the correct returned value.
# For example, the first test below will call
# practice_problem3e((12, 33, 18, 9, 13, 3, 9, 20, 19, 20))
# and compare the returned value against 161 (the correct answer).
# ------------------------------------------------------------------
tests = [st.SimpleTestCase(practice_problem3e,
[(12, 33, 18, 9, 13, 3, 99, 20, 19, 20)], 161),
st.SimpleTestCase(practice_problem3e,
[(3, 12, 10, 8, 8, 9, 8, 11)], 29),
st.SimpleTestCase(practice_problem3e,
[(-9999999999, 8888888888)], -9999999999),
st.SimpleTestCase(practice_problem3e,
[(8888888888, -9999999999)], 8888888888),
st.SimpleTestCase(practice_problem3e,
[(-77, 20000, -33, 40000, -55, 60000, -11)], -176),
st.SimpleTestCase(practice_problem3e,
[()], 0),
st.SimpleTestCase(practice_problem3e,
[[]], 0),
st.SimpleTestCase(practice_problem3e,
[[8]], 8),
st.SimpleTestCase(practice_problem3e,
[(-77, 8)], -77),
st.SimpleTestCase(practice_problem3e,
[(-77, 8, 77)], 0),
st.SimpleTestCase(practice_problem3e,
[(-77, 8, 78)], 1),
st.SimpleTestCase(practice_problem3e,
[(-77, 8, 78, 100)], 1),
]
# ------------------------------------------------------------------
# Run the 5 tests in the tests list constructed above.
# ------------------------------------------------------------------
st.SimpleTestCase.run_tests('practice_problem3e', tests)
def practice_problem3e(sequence):
"""
What comes in:
A sequence of numbers.
What goes out:
Returns the sum of the numbers at EVEN INDICES of the sequence.
Side effects: None.
Examples:
If the sequence is:
(12, 33, 18, 9, 13, 3, 99, 20, 19, 20)
then this function returns
12 + 18 + 13 + 99 + 19, which is 161.
Type hints:
:type sequence: list(float) or tuple(float)
"""
# ------------------------------------------------------------------
# TODO: 7. Implement and test this function.
# The testing code is already written for you (above).
####################################################################
# DIFFICULTY AND TIME RATINGS (see top of this file for explanation)
# DIFFICULTY: 5
# TIME ESTIMATE: 8 minutes.
####################################################################
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 43.072491
| 82
| 0.453933
|
9120d8b84eea6c8bcc4685d92ee2b7afdd5b86ea
| 3,163
|
py
|
Python
|
Utilitarios/adivinhador_cesar.py
|
GregorioFornetti/CriPython
|
12b5bc64188de3a7c81d8f9d0a392f6edc7ac827
|
[
"MIT"
] | 1
|
2020-05-17T03:00:18.000Z
|
2020-05-17T03:00:18.000Z
|
Utilitarios/adivinhador_cesar.py
|
GregorioFornetti/Cripythongrafia
|
12b5bc64188de3a7c81d8f9d0a392f6edc7ac827
|
[
"MIT"
] | 1
|
2020-05-17T15:59:26.000Z
|
2020-05-17T15:59:26.000Z
|
Utilitarios/adivinhador_cesar.py
|
GregorioFornetti/Cripythongrafia
|
12b5bc64188de3a7c81d8f9d0a392f6edc7ac827
|
[
"MIT"
] | null | null | null |
import Cifras.cifra_de_cesar as cifra_de_cesar
import Cifras.utilidades_cifras as utilidades
import dicionarios
def imprimir_melhor_mensagem_adivinhada_apenas_letras(mensagem):
lista_melhor_msg_e_indice = adivinhar_cesar_apenas_letras(mensagem)
if lista_melhor_msg_e_indice:
print(dicionarios.retorna_mensagens_adivinhador_cesar(lista_melhor_msg_e_indice[0], lista_melhor_msg_e_indice[1]))
else:
print(dicionarios.retorna_mensagem_com_bordas(dicionarios.retorna_erro_mensagem(), 127))
def imprimir_melhor_mensagem_adivinhada_varios_caracteres(mensagem):
lista_melhor_msg_e_indice = adivinhar_cesar_varios_caracteres(mensagem)
if lista_melhor_msg_e_indice:
print(dicionarios.retorna_mensagens_adivinhador_cesar(lista_melhor_msg_e_indice[0], lista_melhor_msg_e_indice[1]))
else:
print(dicionarios.retorna_mensagem_com_bordas(dicionarios.retorna_erro_mensagem(), 127))
def adivinhar_cesar_apenas_letras(mensagem, idioma_teste=''):
if not mensagem:
return False
lista_mensagens = []
lista_pontuacoes = []
for chave in range(1, 27):
nova_mensagem = cifra_de_cesar.traduzir_modo_apenas_letras([str(chave)], mensagem)
lista_mensagens.append(nova_mensagem)
lista_pontuacoes.append(calcula_pontuacao(nova_mensagem.lower(), idioma_a_testar=idioma_teste))
index_melhor_possibilidade = lista_pontuacoes.index(min(lista_pontuacoes))
return [lista_mensagens[index_melhor_possibilidade], index_melhor_possibilidade + 1]
def adivinhar_cesar_varios_caracteres(mensagem, idioma_teste=''):
if not mensagem:
return False
lista_mensagens = []
lista_pontuacoes = []
for chave in range(1, 668):
nova_mensagem = cifra_de_cesar.traduzir_modo_varios_caracteres([str(chave)], mensagem)
lista_mensagens.append(nova_mensagem)
lista_pontuacoes.append(calcula_pontuacao(nova_mensagem, idioma_a_testar=idioma_teste))
index_melhor_possibilidade = lista_pontuacoes.index(min(lista_pontuacoes))
return [lista_mensagens[index_melhor_possibilidade], index_melhor_possibilidade + 1]
def calcula_pontuacao(mensagem, idioma_a_testar=''):
freq_perc_geral = dicionarios.retorna_frequencia_letras(idioma_teste=idioma_a_testar)
dicionario_pontuacao_letras = {'a':0, 'b':0, 'c':0, 'd':0, 'e':0, 'f':0, 'g':0, 'h':0, 'i':0, 'j':0, 'k':0,
'l':0, 'm':0, 'n':0, 'o':0, 'p':0, 'q':0, 'r':0, 's':0, 't':0, 'u':0, 'v':0,
'w':0, 'x':0, 'y':0, 'z':0}
total_letras_validas = 1
pontuacao_mensagem = 0
for caractere in mensagem:
if ord(caractere) > utilidades.MIN_MINUSCULA and ord(caractere) < utilidades.MAX_MINUSCULA: # Verificar se o caractere atual é uma letra sem acento.
dicionario_pontuacao_letras[caractere] += 1
total_letras_validas += 1
for letra, frequencia in dicionario_pontuacao_letras.items():
frequencia_perc_atual = dicionario_pontuacao_letras[letra] / total_letras_validas * 100
pontuacao_mensagem += abs(frequencia_perc_atual - freq_perc_geral[letra])
return pontuacao_mensagem
| 52.716667
| 157
| 0.739488
|
0ad3de628147dff99fe766c197042c4cda7f923b
| 943
|
py
|
Python
|
tuyaha/devices/factory.py
|
xremix/tuyaha
|
5ce4e352131bbf3b1e1a5fabf9bd6af8102c83a8
|
[
"MIT"
] | 153
|
2019-07-19T14:32:44.000Z
|
2022-02-11T20:44:22.000Z
|
tuyaha/devices/factory.py
|
xremix/tuyaha
|
5ce4e352131bbf3b1e1a5fabf9bd6af8102c83a8
|
[
"MIT"
] | 97
|
2019-07-11T03:13:04.000Z
|
2021-10-04T02:02:11.000Z
|
tuyaha/devices/factory.py
|
xremix/tuyaha
|
5ce4e352131bbf3b1e1a5fabf9bd6af8102c83a8
|
[
"MIT"
] | 96
|
2019-08-12T16:04:13.000Z
|
2021-12-30T16:16:21.000Z
|
from tuyaha.devices.climate import TuyaClimate
from tuyaha.devices.cover import TuyaCover
from tuyaha.devices.fan import TuyaFanDevice
from tuyaha.devices.light import TuyaLight
from tuyaha.devices.lock import TuyaLock
from tuyaha.devices.scene import TuyaScene
from tuyaha.devices.switch import TuyaSwitch
def get_tuya_device(data, api):
dev_type = data.get("dev_type")
devices = []
if dev_type == "light":
devices.append(TuyaLight(data, api))
elif dev_type == "climate":
devices.append(TuyaClimate(data, api))
elif dev_type == "scene":
devices.append(TuyaScene(data, api))
elif dev_type == "fan":
devices.append(TuyaFanDevice(data, api))
elif dev_type == "cover":
devices.append(TuyaCover(data, api))
elif dev_type == "lock":
devices.append(TuyaLock(data, api))
elif dev_type == "switch":
devices.append(TuyaSwitch(data, api))
return devices
| 32.517241
| 48
| 0.698834
|
e5edfa5325c6c2bd0c21090104d440e84f9838a1
| 499
|
py
|
Python
|
alipay/aop/api/response/AlipayEcoMycarDialogonlineAnswererUpdateResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayEcoMycarDialogonlineAnswererUpdateResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/AlipayEcoMycarDialogonlineAnswererUpdateResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcoMycarDialogonlineAnswererUpdateResponse(AlipayResponse):
def __init__(self):
super(AlipayEcoMycarDialogonlineAnswererUpdateResponse, self).__init__()
def parse_response_content(self, response_content):
response = super(AlipayEcoMycarDialogonlineAnswererUpdateResponse, self).parse_response_content(response_content)
| 31.1875
| 121
| 0.801603
|
25d82d38fb40d0d16451f5a9f183f995446d8552
| 7,003
|
py
|
Python
|
app/dao/provider_details_dao.py
|
alphagov/notify-notifications-api
|
e604385e0cf4c2ab8c6451b7120ceb196cce21b5
|
[
"MIT"
] | null | null | null |
app/dao/provider_details_dao.py
|
alphagov/notify-notifications-api
|
e604385e0cf4c2ab8c6451b7120ceb196cce21b5
|
[
"MIT"
] | null | null | null |
app/dao/provider_details_dao.py
|
alphagov/notify-notifications-api
|
e604385e0cf4c2ab8c6451b7120ceb196cce21b5
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from flask import current_app
from notifications_utils.timezones import convert_utc_to_bst
from sqlalchemy import asc, desc, func
from app import db
from app.dao.dao_utils import autocommit
from app.models import (
SMS_TYPE,
FactBilling,
ProviderDetails,
ProviderDetailsHistory,
User,
)
def get_provider_details_by_id(provider_details_id):
return ProviderDetails.query.get(provider_details_id)
def get_provider_details_by_identifier(identifier):
return ProviderDetails.query.filter_by(identifier=identifier).one()
def get_alternative_sms_provider(identifier):
if identifier == 'firetext':
return 'mmg'
elif identifier == 'mmg':
return 'firetext'
raise ValueError('Unrecognised sms provider {}'.format(identifier))
def dao_get_provider_versions(provider_id):
return ProviderDetailsHistory.query.filter_by(
id=provider_id
).order_by(
desc(ProviderDetailsHistory.version)
).limit(
100 # limit results instead of adding pagination
).all()
def _adjust_provider_priority(provider, new_priority):
current_app.logger.info(
f'Adjusting provider priority - {provider.identifier} going from {provider.priority} to {new_priority}'
)
provider.priority = new_priority
# Automatic update so set as notify user
provider.created_by_id = current_app.config['NOTIFY_USER_ID']
# update without commit so that both rows can be changed without ending the transaction
# and releasing the for_update lock
_update_provider_details_without_commit(provider)
def _get_sms_providers_for_update(time_threshold):
"""
Returns a list of providers, while holding a for_update lock on the provider details table, guaranteeing that those
providers won't change (but can still be read) until you've committed/rolled back your current transaction.
if any of the providers have been changed recently, it returns an empty list - it's still your responsiblity to
release the transaction in that case
"""
# get current priority of both providers
q = ProviderDetails.query.filter(
ProviderDetails.notification_type == 'sms',
ProviderDetails.active
).with_for_update().all()
# if something updated recently, don't update again. If the updated_at is null, treat it as min time
if any((provider.updated_at or datetime.min) > datetime.utcnow() - time_threshold for provider in q):
current_app.logger.info(f"Not adjusting providers, providers updated less than {time_threshold} ago.")
return []
return q
@autocommit
def dao_reduce_sms_provider_priority(identifier, *, time_threshold):
"""
Will reduce a chosen sms provider's priority, and increase the other provider's priority by 10 points each.
If either provider has been updated in the last `time_threshold`, then it won't take any action.
"""
amount_to_reduce_by = 10
providers_list = _get_sms_providers_for_update(time_threshold)
if len(providers_list) < 2:
current_app.logger.info("Not adjusting providers, number of active providers is less than 2.")
return
providers = {provider.identifier: provider for provider in providers_list}
other_identifier = get_alternative_sms_provider(identifier)
reduced_provider = providers[identifier]
increased_provider = providers[other_identifier]
# always keep values between 0 and 100
reduced_provider_priority = max(0, reduced_provider.priority - amount_to_reduce_by)
increased_provider_priority = min(100, increased_provider.priority + amount_to_reduce_by)
_adjust_provider_priority(reduced_provider, reduced_provider_priority)
_adjust_provider_priority(increased_provider, increased_provider_priority)
@autocommit
def dao_adjust_provider_priority_back_to_resting_points():
"""
Provided that neither SMS provider has been modified in the last hour, move both providers by 10 percentage points
each towards their defined resting points (set in SMS_PROVIDER_RESTING_POINTS in config.py).
"""
amount_to_reduce_by = 10
time_threshold = timedelta(hours=1)
providers = _get_sms_providers_for_update(time_threshold)
for provider in providers:
target = current_app.config['SMS_PROVIDER_RESTING_POINTS'][provider.identifier]
current = provider.priority
if current != target:
if current > target:
new_priority = max(target, provider.priority - amount_to_reduce_by)
else:
new_priority = min(target, provider.priority + amount_to_reduce_by)
_adjust_provider_priority(provider, new_priority)
def get_provider_details_by_notification_type(notification_type, supports_international=False):
filters = [ProviderDetails.notification_type == notification_type]
if supports_international:
filters.append(ProviderDetails.supports_international == supports_international)
return ProviderDetails.query.filter(*filters).order_by(asc(ProviderDetails.priority)).all()
@autocommit
def dao_update_provider_details(provider_details):
_update_provider_details_without_commit(provider_details)
def _update_provider_details_without_commit(provider_details):
"""
Doesn't commit, for when you need to control the database transaction manually
"""
provider_details.version += 1
provider_details.updated_at = datetime.utcnow()
history = ProviderDetailsHistory.from_original(provider_details)
db.session.add(provider_details)
db.session.add(history)
def dao_get_provider_stats():
# this query does not include the current day since the task to populate ft_billing runs overnight
current_bst_datetime = convert_utc_to_bst(datetime.utcnow())
first_day_of_the_month = current_bst_datetime.date().replace(day=1)
subquery = db.session.query(
FactBilling.provider,
func.sum(FactBilling.billable_units * FactBilling.rate_multiplier).label('current_month_billable_sms')
).filter(
FactBilling.notification_type == SMS_TYPE,
FactBilling.bst_date >= first_day_of_the_month
).group_by(
FactBilling.provider
).subquery()
result = db.session.query(
ProviderDetails.id,
ProviderDetails.display_name,
ProviderDetails.identifier,
ProviderDetails.priority,
ProviderDetails.notification_type,
ProviderDetails.active,
ProviderDetails.updated_at,
ProviderDetails.supports_international,
User.name.label('created_by_name'),
func.coalesce(subquery.c.current_month_billable_sms, 0).label('current_month_billable_sms')
).outerjoin(
subquery, ProviderDetails.identifier == subquery.c.provider
).outerjoin(
User, ProviderDetails.created_by_id == User.id
).order_by(
ProviderDetails.notification_type,
ProviderDetails.priority,
).all()
return result
| 36.097938
| 119
| 0.74668
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.