hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b36beaae20e75b8473f902d6834581e7dcae6799 | 61,158 | py | Python | python/vtool/maya_lib/core.py | louisVottero/vtool | 4e2592df5841829e790251dc6923e45c8d013091 | [
"MIT"
] | 3 | 2022-02-22T01:00:59.000Z | 2022-03-07T16:19:27.000Z | python/vtool/maya_lib/core.py | louisVottero/vtool | 4e2592df5841829e790251dc6923e45c8d013091 | [
"MIT"
] | 4 | 2022-03-04T05:25:44.000Z | 2022-03-11T04:51:35.000Z | python/vtool/maya_lib/core.py | louisVottero/vtool | 4e2592df5841829e790251dc6923e45c8d013091 | [
"MIT"
] | 1 | 2022-03-31T23:07:09.000Z | 2022-03-31T23:07:09.000Z | # Copyright (C) 2022 Louis Vottero louis.vot@gmail.com All rights reserved.
from __future__ import absolute_import
import os
import string
import traceback
from functools import wraps
from .. import util, util_file
from . import api
from vtool.util import get_log_tabs
in_maya = util.is_in_maya()
if in_maya:
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMaya as OpenMaya
import maya.OpenMayaUI as OpenMayaUI
undo_chunk_active = False
current_progress_bar = None
MAYA_BINARY = 'mayaBinary'
MAYA_ASCII = 'mayaAscii'
maya_data_mappings = {
'bool' : 'attributeType',
'long' : 'attributeType',
'long2' : 'attributeType',
'long3' : 'attributeType',
'short': 'attributeType',
'short2' : 'attributeType',
'short3' : 'attributeType',
'byte' : 'attributeType',
'char' : 'attributeType',
'enum' : 'attributeType',
'float' : 'attributeType',
'float2' : 'attributeType',
'float3' : 'attributeType',
'double' : 'attributeType',
'double2' : 'attributeType',
'double3' : 'attributeType',
'doubleAngle' : 'attributeType',
'doubleLinear' : 'attributeType',
'doubleArray' : 'dataType',
'string' : 'dataType',
'stringArray' : 'dataType',
'compound' : 'attributeType',
'message' : 'attributeType',
'time' : 'attributeType',
'matrix' : 'dataType',
'fltMatrix' : 'attributeType',
'reflectanceRGB' : 'dataType',
'reflectance' : 'attributeType',
'spectrumRGB' : 'dataType',
'spectrum' : 'attributeType',
'Int32Array' : 'dataType',
'vectorArray' : 'dataType',
'nurbsCurve' : 'dataType',
'nurbsSurface' : 'dataType',
'mesh' : 'dataType',
'lattice' : 'dataType',
'pointArray' : 'dataType'
}
class FindUniqueName(util.FindUniqueString):
"""
This class is intended to find a name that doesn't clash with other names in the Maya scene.
It will increment the last number in the name.
If no number is found it will append a 1 to the end of the name.
"""
def __init__(self, test_string):
super(FindUniqueName, self).__init__(test_string)
self.work_on_last_number = True
def _get_scope_list(self):
if cmds.namespace(exists = self.increment_string):
return [self.increment_string]
if cmds.objExists(self.increment_string):
return [self.increment_string]
if not cmds.objExists(self.increment_string):
if not cmds.namespace(exists = self.increment_string):
return []
def _format_string(self, number):
if number == 0:
number = 1
self.increment_string = '%s_%s' % (self.test_string, number)
if number > 1:
if self.work_on_last_number:
self.increment_string = util.increment_last_number(self.increment_string)
if not self.work_on_last_number:
self.increment_string = util.increment_first_number(self.increment_string)
def _get_number(self):
if self.work_on_last_number:
number = util.get_last_number(self.test_string)
if not self.work_on_last_number:
number = util.get_first_number(self.test_string)
if number == None:
return 0
return number
def get_last_number(self, bool_value):
self.work_on_last_number = bool_value
class TrackNodes(object):
"""
This helps track new nodes that get added to a scene after a function runs.
Usage:
track_nodes = TrackNodes()
track_nodes.load()
my_function()
new_nodes = track_nodes.get_delta()
"""
def __init__(self):
self.nodes = None
self.node_type = None
self.delta = None
def load(self, node_type = None):
"""
node_type corresponds to the maya node type.
For example, you can give node_type the string "animCurve" to load only keyframes.
When after running get_delta(), the delta will only contain keyframes.
Args:
node_type (str): Maya named type, ie animCurve, transform, joint, etc
Returns:
None
"""
self.node_type = node_type
if self.node_type:
self.nodes = cmds.ls(type = node_type, l = True)
if not self.node_type:
self.nodes = cmds.ls(l = True)
def get_delta(self):
"""
Get the new nodes in the Maya scene created after load() was executed.
The load() node_type variable is stored in the class and used when getting the delta.
Returns:
list: list of new nodes.
"""
if self.node_type:
current_nodes = cmds.ls(type = self.node_type, l = True)
if not self.node_type:
current_nodes = cmds.ls(l = True)
#new_set = set(self.nodes).difference(current_nodes)
new_set = set(current_nodes).difference(self.nodes)
return list(new_set)
class ProgressBar(object):
"""
Manipulate the maya progress bar.
Args:
title (str): The name of the progress bar.
count (int): The number of items to iterate in the progress bar.
"""
inc_value = 0
def __init__(self, title = '', count = None, begin = True):
self.progress_ui = None
self._orig_tool_context = None
if is_batch():
self.title = title
self.count = count
message = '%s count: %s' % (title, count)
self.status_string = ''
util.show(message)
return
if not is_batch():
self.progress_ui = get_progress_bar()
if begin:
#check if not cancelled completely because of bug
self.__class__.inc_value = 0
self.end()
if not title:
title = cmds.progressBar(self.progress_ui, q = True, status = True)
if not count:
count = cmds.progressBar( self.progress_ui, q = True, maxValue = True)
cmds.progressBar( self.progress_ui,
edit=True,
beginProgress=begin,
isInterruptable=True,
status = title,
maxValue= count )
self._orig_tool_context = get_tool()
set_tool_to_select()
global current_progress_bar
current_progress_bar = self
def set_count(self, int_value):
if self.progress_ui:
cmds.progressBar( self.progress_ui, edit = True, maxValue = int_value, beginProgress = True, isInterruptable = True)
else:
self.count = int_value
def get_count(self):
if self.progress_ui:
return cmds.progressBar( self.progress_ui, q = True, maxValue = True)
else:
return self.count
def get_current_inc(self):
return self.__class__.inc_value
#return cmds.progressBar( self.progress_ui, q = True, step = True)
def inc(self, inc = 1):
"""
Set the current increment.
"""
if is_batch():
return
self.__class__.inc_value += inc
cmds.progressBar(self.progress_ui, edit=True, step=inc)
def next(self):
if is_batch():
return
self.__class__.inc_value += 1
cmds.progressBar(self.progress_ui, edit=True, step=1)
def end(self):
"""
End the progress bar.
"""
if is_batch():
return
if cmds.progressBar(self.progress_ui, query = True, isCancelled = True):
cmds.progressBar( self.progress_ui,
edit=True,
beginProgress=True)
cmds.progressBar(self.progress_ui, edit=True, ep = True)
if self._orig_tool_context:
set_tool(self._orig_tool_context)
def status(self, status_string):
"""
Set that status string of the progress bar.
"""
if is_batch():
self.status_string = status_string
# util.show(status_string)
return
cmds.progressBar(self.progress_ui, edit=True, status = status_string)
def break_signaled(self):
"""
break the progress bar loop so that it stops and disappears.
"""
run = eval( util.get_env('VETALA_RUN'))
stop = eval( util.get_env('VETALA_STOP'))
if is_batch():
return False
if run == True:
if stop == True:
util.show('VETALA_STOP is True')
self.end()
return True
break_progress = cmds.progressBar(self.progress_ui, query=True, isCancelled=True )
if break_progress:
self.end()
if run == True:
util.set_env('VETALA_STOP', True)
return True
return False
def get_current_camera():
camera = api.get_current_camera()
return camera
def set_current_camera(camera_name):
api.set_current_camera(camera_name)
class StoreDisplaySettings(object):
def __init__(self):
self.style = None
self.setting_id = None
self.view = OpenMayaUI.M3dView.active3dView()
def store(self):
self.setting_id = self.view.objectDisplay()
self.style = self.view.displayStyle()
def restore(self):
self.view.setObjectDisplay(self.setting_id)
self.view.setDisplayStyle(self.style)
class ManageNodeEditors():
def __init__(self):
node_editors = get_node_editors()
self.node_editors = node_editors
self._additive_state_dict = {}
for editor in self.node_editors:
current_value = cmds.nodeEditor(editor, q = True, ann = True)
self._additive_state_dict[editor] = current_value
def turn_off_add_new_nodes(self):
for editor in self.node_editors:
cmds.nodeEditor(editor, e = True, ann = False)
def restore_add_new_nodes(self):
for editor in self.node_editors:
if editor in self._additive_state_dict:
cmds.nodeEditor(editor, e = True, ann = self._additive_state_dict[editor])
def undo_off(function):
@wraps(function)
def wrapper(*args, **kwargs):
global current_progress_bar
if not util.is_in_maya():
return
return_value = None
undo_state = cmds.undoInfo(state = True, q = True)
if undo_state:
cmds.undoInfo(state = False)
try:
return_value = function(*args, **kwargs)
except:
if undo_state:
cmds.undoInfo( state = True )
# do not remove
util.error( traceback.format_exc() )
if current_progress_bar:
current_progress_bar.end()
current_progress_bar = None
raise(RuntimeError)
if undo_state:
cmds.undoInfo( state = True )
return return_value
return wrapper
def undo_chunk(function):
@wraps(function)
def wrapper(*args, **kwargs):
global undo_chunk_active
global current_progress_bar
if not in_maya:
return
undo_state = cmds.undoInfo(state = True, q = True)
return_value = None
closed = True
if not undo_chunk_active and undo_state:
cmds.undoInfo(openChunk = True)
undo_chunk_active = True
closed = False
try:
return_value = function(*args, **kwargs)
except:
if undo_chunk_active:
cmds.undoInfo(closeChunk = True)
closed = True
undo_chunk_active = False
# do not remove
util.error( traceback.format_exc() )
if current_progress_bar:
current_progress_bar.end()
current_progress_bar = None
raise(RuntimeError)
if not closed:
if undo_chunk_active:
cmds.undoInfo(closeChunk = True)
undo_chunk_active = False
return return_value
return wrapper
def viewport_off( function ):
@wraps(function)
def wrap( *args, **kwargs ):
if not in_maya:
return
if not cmds.ogs(q = True, pause = True):
cmds.ogs(pause = True)
try:
return function( *args, **kwargs )
except Exception:
if cmds.ogs(q = True, pause = True):
cmds.ogs(pause = True)
raise(RuntimeError)
finally:
if cmds.ogs(q = True, pause = True):
cmds.ogs(pause = True)
return wrap
def refresh():
paused = False
if cmds.ogs(q = True, pause = True):
paused = True
cmds.ogs(pause = True)
cmds.refresh()
if paused:
cmds.ogs(pause = True)
def is_batch():
"""
Returns:
bool: True if Maya is in batch mode.
"""
return cmds.about(batch = True)
def is_transform(node):
"""
Is the node a transform.
Args:
node (str): The name of the node to test.
Returns:
bool
"""
if not cmds.objExists(node):
return False
if cmds.objectType(node, isAType = 'transform'):
return True
return False
def is_a_shape(node):
"""
Test whether the node is a shape.
Args:
node (str): The name of a node.
Returns:
bool
"""
if cmds.objectType(node, isAType = 'shape'):
return True
return False
def is_referenced(node):
"""
Args:
node (str): Name of a node in maya. Check to see if it is referenced.
Returns:
bool
"""
if not node:
return False
if not cmds.objExists(node):
return False
is_node_referenced = cmds.referenceQuery(node, isNodeReferenced = True)
return is_node_referenced
def is_empty(node):
if is_referenced(node):
return False
if is_transform(node):
relatives = cmds.listRelatives(node)
if relatives:
return False
attrs = cmds.listAttr(node, ud = True, k = True)
if attrs:
return False
default_nodes = ['defaultLightSet', 'defaultObjectSet', 'initialShadingGroup', 'uiConfigurationScriptNode', 'sceneConfigurationScriptNode']
if node in default_nodes:
return False
connections = cmds.listConnections(node)
if connections != ['defaultRenderGlobals']:
if connections:
return False
return True
def is_undeletable(node):
try: #might fail in earlier versions of maya
nodes = cmds.ls(undeletable = True)
if node in nodes:
return True
except:
return False
return False
def is_unique(name):
scope = cmds.ls(name)
count = len(scope)
if count > 1:
return False
if count == 1:
return True
return True
def is_namespace(namespace):
if cmds.namespace(exists = namespace):
return True
return False
def inc_name(name, inc_last_number = True):
"""
Finds a unique name by adding a number to the end.
Args:
name (str): Name to start from.
Returns:
str: Modified name, number added if not unique..
"""
if not cmds.objExists(name) and not cmds.namespace(exists = name):
return name
unique = FindUniqueName(name)
unique.get_last_number(inc_last_number)
return unique.get()
def prefix_name(node, prefix, name, separator = '_'):
"""
Convenience to quickly rename a Maya node.
Args:
node (str): Name of a node in maya to rename.
prefix (str)
name (str)
separator (str)
Returns:
str: prefix + separator + name
"""
new_name = cmds.rename(node, '%s%s%s' % (prefix,separator, name))
return new_name
def get_node_name(node_type, description):
return inc_name('%s_%s' % (node_type, description))
def create_node(node_type, description):
name = get_node_name(node_type, description)
new_name = cmds.createNode(node_type, n = name)
return new_name
def rename_node(node, description):
node_type = cmds.nodeType(node)
node_name = get_node_name(node_type, description)
new_name = cmds.rename(node, node_name)
return new_name
def prefix_hierarchy(top_group, prefix):
"""
Prefix all the names in a hierarchy.
Args:
top_group (str): Name of the top node of a hierarchy.
prefix (str): Prefix to add in front of top_group and all children.
Returns:
list: The renamed hierarchy including top_group.
"""
relatives = cmds.listRelatives(top_group, ad = True, f = True)
relatives.append(top_group)
renamed = []
prefix = prefix.strip()
for relative in relatives:
short_name = get_basename(relative)
new_name = cmds.rename(relative, '%s_%s' % (prefix, short_name))
renamed.append(new_name)
renamed.reverse()
return renamed
def pad_number(name):
"""
Add a number to a name.
"""
number = util.get_last_number(name)
if number == None:
number = 0
number_string = str(number)
index = name.rfind(number_string)
if number < 10:
number_string = number_string.zfill(2)
new_name = name[0:index] + number_string + name[index+1:]
renamed = cmds.rename(name, new_name)
return renamed
def get_outliner_sets(include_nested = False):
"""
Get the sets found in the outliner.
Returns:
list: List of sets in the outliner.
"""
sets = cmds.ls(type = 'objectSet')
top_sets = []
for object_set in sets:
if object_set == 'defaultObjectSet' or object_set == 'defaultLightSet':
continue
if cmds.sets(object_set, q = True, r = True):
continue
if cmds.sets(object_set, q = True, v = True):
continue
if cmds.sets(object_set, q = True, fc = True):
continue
if cmds.sets(object_set, q = True, eg = True):
continue
if cmds.sets(object_set, q = True, ep = True):
continue
if not include_nested:
if cmds.listSets(o = object_set):
continue
top_sets.append(object_set)
return top_sets
def delete_outliner_sets():
"""
Delete objectSets that usually appear in the outliner
"""
outliner_sets = get_outliner_sets()
for set_name in outliner_sets:
delete_set(set_name)
#cmds.delete(get_outliner_sets())
def get_top_dag_nodes(exclude_cameras = True, namespace = None):
"""
Get transforms that sit at the very top of the hierarchy.
Returns:
list
"""
top_transforms = cmds.ls(assemblies = True)
if exclude_cameras:
cameras = ['persp', 'top', 'front', 'side']
for camera in cameras:
try:
top_transforms.remove(camera)
except:
pass
if namespace:
found = []
for transform in top_transforms:
if transform.startswith(namespace + ':'):
found.append(transform)
top_transforms = found
return top_transforms
def get_top_dag_nodes_in_list(list_of_transforms):
"""
Given a list of transforms, return only the ones at the top of the hierarchy
"""
found = []
for transform in list_of_transforms:
long_name = cmds.ls(transform, l = True)
if long_name:
if long_name[0].count('|') == 1:
found.append(transform)
return found
def get_first_shape(transform):
"""
returns first active shape
"""
shapes = get_shapes(transform)
for shape in shapes:
if not cmds.getAttr('%s.intermediateObject'):
return shape
def get_shapes(transform, shape_type = None, no_intermediate = False):
"""
Get all the shapes under a transform.
Args:
transform (str): The name of a transform.
Returns:
list: The names of shapes under the transform
"""
transforms = util.convert_to_sequence(transform)
found = []
for transform in transforms:
if is_a_shape(transform):
parent = cmds.listRelatives(transform, p = True, f = True)
shapes_list = cmds.listRelatives(parent, s = True, f = True, ni = no_intermediate)
if shapes_list:
found += shapes_list
if found:
continue
if shape_type:
shape_type_list = cmds.listRelatives(transform, s = True, f = True, type = shape_type, ni = no_intermediate)
if shape_type_list:
found += shape_type_list
if not shape_type:
none_shape_type_list = cmds.listRelatives(transform, s = True, f = True, ni = no_intermediate)
if none_shape_type_list:
found += none_shape_type_list
if found:
return found
def get_shape_node_type(node):
shapes = get_shapes(node)
if shapes:
return cmds.nodeType(shapes[0])
def get_node_types(nodes, return_shape_type = True):
"""
Get the maya node types for the nodes supplied.
Returns:
dict: dict[node_type_name] node dict of matching nodes
"""
found_type = {}
for node in nodes:
node_type = cmds.nodeType(node)
if node_type == 'transform':
if return_shape_type:
shapes = get_shapes(node)
if shapes:
node_type = cmds.nodeType(shapes[0])
if not node_type in found_type:
found_type[node_type] = []
found_type[node_type].append(node)
return found_type
def get_transforms_with_shape_of_type(shape_type):
shapes = cmds.ls(type = shape_type, l = True)
parents = {}
for shape in shapes:
parent = cmds.listRelatives(shape, p = True, f = True)
if parent:
parents[parent[0]] = None
found = list(parents.keys())
return found
def get_basename(name, remove_namespace = True, remove_attribute = False):
"""
Get the basename in a hierarchy name.
If top|model|face is supplied, face will be returned.
"""
split_name = name.split('|')
basename = split_name[-1]
if remove_attribute:
basename_split = basename.split('.')
basename = basename_split[0]
return basename
if remove_namespace:
split_basename = basename.split(':')
return split_basename[-1]
return split_name[-1]
def get_namespace(name):
namespace = name.rpartition(':')[0]
return namespace
def get_dg_nodes():
nodes = cmds.ls(dep = True)
return nodes
def remove_namespace_from_string(name):
sub_name = name.split(':')
new_name = ''
if sub_name:
new_name = sub_name[-1]
return new_name
def get_characters():
namespaces = cmds.namespaceInfo(lon = True)
found = []
check_for_groups = ['controls', 'model', 'geo', 'setup', 'DO_NOT_TOUCH', 'rig']
for namespace in namespaces:
for group in check_for_groups:
if cmds.objExists(namespace + ':' + group):
if not namespace in found:
found.append(namespace)
return found
def delete_unknown_nodes():
"""
This will find all unknown nodes. Unlock and delete them.
"""
unknown = cmds.ls(type = 'unknown')
deleted = []
for node in unknown:
if cmds.objExists(node):
cmds.lockNode(node, lock = False)
cmds.delete(node)
deleted.append(node)
util.show('Deleted unknowns: %s' % deleted)
def rename_shapes(transform):
"""
Rename all the shapes under a transform.
Renames them to match the name of the transform.
Args:
transform (str): The name of a transform.
"""
shapes = get_shapes(transform)
if not shapes:
return
if shapes:
cmds.rename(shapes[0], '%sShape' % transform)
if len(shapes) == 1:
return
if not shapes:
return
inc = 1
for shape in shapes[1:]:
cmds.rename(shape, '%sShape%s' % (transform, inc))
inc += 1
def get_shapes_in_hierarchy(transform, shape_type = '', return_parent = False, skip_first_relative = False):
"""
Get all the shapes in the child hierarchy excluding intermediates.
This is good when calculating bounding box of a group.
Args:
transform (str): The name of a transform.
Returns:
list: The list of shape nodes.
"""
if not cmds.objExists(transform):
util.warning('%s does not exist. Could not get hierarchy' % transform)
return
hierarchy = [transform]
relatives = cmds.listRelatives(transform, ad = True, type = 'transform', f = True)
if relatives:
hierarchy += relatives
shapes = []
if skip_first_relative:
hierarchy = hierarchy[1:]
for child in hierarchy:
found_shapes = get_shapes(child, shape_type)
sifted_shapes = []
if not found_shapes:
continue
for found_shape in found_shapes:
if cmds.getAttr('%s.intermediateObject' % found_shape):
continue
if return_parent:
found_shape = child
sifted_shapes.append( found_shape )
if sifted_shapes:
shapes += sifted_shapes
return shapes
def has_shape_of_type(node, maya_type):
"""
Test whether the node has a shape of the supplied type.
Args:
node (str): The name of a node.
maya_type (str): Can be a mesh, nurbsCurve, or any maya shape type.
Returns:
bool
"""
test = None
if not cmds.objExists(node):
return False
if cmds.objectType(node, isAType = 'shape'):
test = node
if not cmds.objectType(node, isAType = 'shape'):
shapes = get_shapes(node)
if shapes:
test = shapes[0]
if test:
if maya_type == cmds.nodeType(test):
return True
def get_orig_nodes(parent = None):
"""
Get all the orig nodes in a scene, or just the ones under the parent.
"""
shapes = None
if not parent:
shapes = cmds.ls(type = 'shape', l = True)
if parent:
shapes = cmds.listRelatives(parent, shapes = True, f = True)
if not shapes:
return
found = []
for shape in shapes:
if is_referenced(shape):
continue
if cmds.getAttr('%s.intermediateObject' % shape):
found.append(shape)
return found
def get_active_orig_node(transform):
origs = get_orig_nodes(transform)
for orig in origs:
connections = cmds.listConnections(orig)
if connections:
return orig
def get_component_count(transform):
"""
Get the number of components under a transform.
This does not include hierarchy.
Args:
transform (str): The name of a transform.
Returns:
int: The number of components under transform, eg. verts, cvs, etc.
"""
components = get_components(transform)
return len( cmds.ls(components[0], flatten = True) )
def get_components(transform):
"""
Get the name of the components under a transform.
This does not include hierarchy.
Args:
transform (str): The name of a transform.
Returns:
list: The name of all components under transform, eg verts, cvs, etc.
"""
shapes = get_shapes(transform)
return get_components_from_shapes(shapes)
def get_components_in_hierarchy(transform):
"""
Get the components in the hierarchy.
This includes all transforms with shapes parented under the transform.
Args:
transform (str): The name of a transform.
Returns:
list: The name of all components under transform, eg verts, cvs, etc.
"""
shapes = get_shapes_in_hierarchy(transform)
return get_components_from_shapes(shapes)
def get_components_from_shapes(shapes = None):
"""
Get the components from the a list of shapes. Curntly supports cv and vtx components
Args:
shapes (list): List of shape names.
Returns:
list: The components of the supplied shapes.
"""
components = []
if shapes:
for shape in shapes:
found_components = None
if cmds.nodeType(shape) == 'nurbsSurface':
found_components = '%s.cv[*]' % shape
if cmds.nodeType(shape) == 'nurbsCurve':
found_components = '%s.cv[*]' % shape
if cmds.nodeType(shape) == 'mesh':
found_components = '%s.vtx[*]' % shape
if found_components:
components.append( found_components )
return components
def create_group(name, parent = None):
if not name:
return
sequence = util.convert_to_sequence(name)
parent = util.convert_to_sequence(parent)
if parent:
parent = parent[0]
found = []
for sub_name in sequence:
if not cmds.objExists(sub_name):
sub_name = cmds.group(em = True, n = sub_name)
if parent and cmds.objExists(parent):
actual_parent = None
actual_parent = cmds.listRelatives(sub_name, p = True)
if actual_parent:
actual_parent = actual_parent[0]
if not parent == actual_parent:
cmds.parent(sub_name, parent)
found.append(sub_name)
return found
def create_display_layer(name, nodes, display_type = 2, recursive_add = False):
"""
Create a display layer containing the supplied nodes.
Args:
name (str): The name to give the display layer.
nodes (str): The nodes that should be in the display layer.
"""
layer = cmds.createDisplayLayer( name = name )
no_recursive = True
if recursive_add:
no_recursive = False
cmds.editDisplayLayerMembers( layer, nodes, noRecurse = no_recursive)
cmds.setAttr( '%s.displayType' % layer, display_type )
return layer
def delete_display_layers():
"""
Deletes all display layers.
"""
layers = cmds.ls(type = 'displayLayer')
for layer in layers:
if layer == 'defaultLayer':
continue
cmds.delete(layer)
def print_help(string_value):
log_tab_str = util.get_log_tabs()
string_value = string_value.replace('\n', '\nV:\t\t')
OpenMaya.MGlobal.displayInfo('V:\t\t' + string_value)
util.record_temp_log('\n%s%s' % (log_tab_str,string_value))
def print_warning(string_value):
string_value = string_value.replace('\n', '\nV:\t\t')
OpenMaya.MGlobal.displayWarning('V:\t\t' + string_value)
util.record_temp_log('\nWarning!: %s' % string_value)
def print_error(string_value):
string_value = string_value.replace('\n', '\nV:\t\t')
OpenMaya.MGlobal.displayError('V:\t\t' + string_value)
util.record_temp_log('\nError!: %s' % string_value)
def delete_set_contents(set_name):
children = cmds.sets(set_name, no = True, q = True)
if children:
found_dag = []
found_dg = []
for child in children:
if cmds.nodeType(child) == 'objectSet':
delete_set_contents(set_name)
else:
if cmds.objectType(child, isAType='transform'):
found_dag.append(child)
else:
found_dg.append(child)
found = found_dag + found_dg
cmds.sets(found, remove = set_name)
cmds.delete(found_dg)
cmds.delete(found_dag)
def delete_set(set_name):
#deletes the set and any sub sets
children = cmds.sets(set_name, no = True, q = True)
if children:
for child in children:
if cmds.nodeType(child) == 'objectSet':
delete_set(child)
if cmds.objExists(set_name):
cmds.delete(set_name)
def add_to_set(nodes, set_name):
nodes = util.convert_to_sequence(nodes)
if not cmds.objExists(set_name):
object_set = cmds.createNode('objectSet')
cmds.rename(object_set, set_name)
if not cmds.nodeType(set_name) == 'objectSet':
print_warning('%s is not an object set. Could not add to it.' % set_name)
cmds.sets(nodes, add = set_name)
def get_set_children(set_name):
#this will get all set children recursively, but only notices children that are not sets
children = cmds.sets(set_name, no = True, q = True)
if not children:
return
found = []
for child in children:
if cmds.nodeType(child) == 'objectSet':
sub_children = get_set_children(child)
if sub_children:
found += sub_children
else:
found.append(child)
return found
def load_plugin(plugin_name):
if not cmds.pluginInfo(plugin_name, query = True, loaded = True):
util.show('Loading plugin: %s' % plugin_name)
cmds.loadPlugin(plugin_name)
def remove_non_existent(list_value):
list_value = util.convert_to_sequence(list_value)
found = []
for thing in list_value:
if thing and cmds.objExists(thing):
found.append(thing)
return found
def remove_referenced_in_list(list_value):
found = []
for thing in list_value:
if not cmds.referenceQuery(thing, isNodeReferenced = True):
found.append(thing)
return found
def get_hierarchy_by_depth(transforms):
"""
Gets a hierarchy in order of depth. Least deep first
"""
rels = transforms
rel_count = {}
for rel in rels:
count = rel.count('|')
if not count in rel_count:
rel_count[count] = []
rel_count[count].append(rel)
counts = list(rel_count.keys())
counts.sort()
rels = []
for count in counts:
rel_list = rel_count[count]
rel_list.reverse
rels += rel_list
return rels
def get_hierarchy(transform):
rels = cmds.listRelatives(transform, ad = True, type = 'transform', f = True)
if not rels:
return []
rels.reverse()
return rels
#--- file
def get_scene_file(directory = False):
path = cmds.file(q=True, sn=True)
if directory and path:
path = util_file.get_dirname(path)
return path
def start_new_scene():
cmds.file(new = True, f = True)
cmds.flushIdleQueue()
def open_file(filepath):
cmds.file(filepath, f = True, o = True, prompt = False)
auto_focus_view()
def import_file(filepath):
"""
Import a maya file in a generic vtool way.
"""
cmds.file(filepath, f = True, i = True, iv = True, prompt = False, pr = True)# rpr = "vetala_clash")#, mergeNamespacesOnClash = True, renameAll = False)
auto_focus_view()
def import_usd_file(filepath):
cmds.file(filepath, type = 'USD Import',f = True, i = True, iv = True, prompt = False, pr = True)# rpr = "vetala_clash")#, mergeNamespacesOnClash = True, renameAll = False)
auto_focus_view()
def save(filepath):
saved = False
util.show('Saving: %s' % filepath)
file_type = 'mayaAscii'
if filepath:
if filepath.endswith('.mb'):
file_type = 'mayaBinary'
try:
cmds.file(rename = filepath)
cmds.file(save = True, type = file_type)
saved = True
except:
status = traceback.format_exc()
util.error(status)
saved = False
if not filepath:
saved = False
#if saved:
# util.show('Scene Saved')
if not saved:
if not is_batch():
cmds.confirmDialog(message = 'Warning:\n\n Vetala was unable to save!', button = 'Confirm')
print_error('Scene not saved. Filepath: %s' % filepath)
if filepath:
util.show('This is a Maya save bug, not necessarily an issue with Vetala. Try saving "Save As" to the filepath with Maya and you should get a similar error.')
permission = util_file.get_permission(filepath)
if not permission:
print_error('Could not get write permission.')
return False
return saved
#--- reference
def get_reference_namespace(filepath):
namespace = os.path.basename(filepath)
split_name = namespace.split('.')
if split_name:
namespace = '_'.join(split_name[:-1])
return namespace
def reference_file(filepath, namespace = None):
"""
Reference a maya file in a generic vtool way.
Args:
filepath (str): The full path and filename.
namespace (str): The namespace to add to the nodes in maya. Default is the name of the file.
"""
if namespace == None:
namespace = get_reference_namespace(filepath)
if namespace == False:
namespace = ':'
reference = cmds.file( filepath,
reference = True,
gl = True,
mergeNamespacesOnClash = False,
namespace = namespace,
options = "v=0;")
return reference
def reference_usd_file(filepath, namespace = None):
reference = cmds.file( filepath,
reference = True,
type = 'USD Import',
gl = True
)
return reference
def replace_reference(reference_node, new_path):
"""
Not tested
"""
rn_node = cmds.referenceQuery(reference_node, rfn = True)
cmds.file(new_path,loadReference = rn_node)
#file -loadReference "TyrannosaurusRexRN" -type "mayaAscii" -options "v=0;" "N:/projects/dinodana/assets/Character/TyrannosaurusRex/SURF/publish/maya/TyrannosaurusRex.v024.ma";
def reload_reference(reference_node):
rn_node = cmds.referenceQuery(reference_node, rfn = True)
filepath = cmds.referenceQuery(rn_node, filename = True)
cmds.file(filepath, loadReference = rn_node)
def get_reference_filepath(reference_node):
if not reference_node:
return
filepath = cmds.referenceQuery(reference_node, filename = True)
if filepath[-3] == '{' and filepath[-1] == '}':
filepath = filepath[:-3]
filepath = util_file.fix_slashes(filepath)
return filepath
def get_reference_node_from_namespace(namespace):
ref_nodes = cmds.ls(type = 'reference')
for ref_node in ref_nodes:
test_namespace = None
try:
test_namespace = cmds.referenceQuery(ref_node, namespace = True)
except:
pass
if not test_namespace:
continue
if test_namespace.startswith(':'):
test_namespace = test_namespace[1:]
if namespace == test_namespace:
return ref_node
def remove_reference(reference_node):
namespace = None
if not cmds.objExists(reference_node):
return
#try getting the namespace
try:
namespace = cmds.referenceQuery(reference_node, ns = True)
except:
#if you can't get the namespace then something is wrong with the reference node, try deleting.
cmds.lockNode(reference_node, l = False)
try:
cmds.delete(reference_node)
return
except:
util.warning('Could not remove %s' % reference_node)
return
#try removing the good way after finding namespace
try:
cmds.file( removeReference = True, referenceNode = reference_node)
except:
#if it can't be removed the good way with a namespace then something is wrong, try deleting.
cmds.lockNode(reference_node, l = False)
try:
cmds.delete(reference_node)
return
except:
util.warning('Could not remove %s' % reference_node)
return
#try to remove the namespace incase it gets left behind.
try:
if namespace:
cmds.namespace(dnc = True, rm = namespace)
except:
pass
return
#--- ui
def get_tool():
return cmds.currentCtx()
def set_tool_to_select():
g_select = mel.eval('$tmp = $gSelect;')
cmds.setToolTo(g_select)
def set_tool(context):
try:
cmds.setToolTo(context)
except:
util.warning('Was unable to set context to %s' % context)
def get_progress_bar():
gMainProgressBar = mel.eval('$tmp = $gMainProgressBar');
return gMainProgressBar
def get_node_editors():
found = []
if is_batch():
return []
for panel in cmds.getPanel(type='scriptedPanel'):
if cmds.scriptedPanel(panel, query=True, type=True) == "nodeEditorPanel":
nodeEditor = panel + "NodeEditorEd"
found.append(nodeEditor)
return found
def get_under_cursor(use_qt = True):
"""
Get what is currently under the cursor using qt or not.
When not using qt it is more of a hack.
"""
if not use_qt:
try:
menu = cmds.popupMenu()
cmds.dagObjectHit(mn = menu)
items = cmds.popupMenu(menu, q = True, ia = True)
if not items:
return
selected_item = cmds.menuItem(items[0], q = True, l = True)
cmds.deleteUI(menu)
selected_item = selected_item[:-3]
return selected_item
except:
return
if use_qt:
from vtool import qt
pos = qt.QCursor.pos()
widget = qt.qApp.widgetAt(pos)
if not widget:
return
relpos = widget.mapFromGlobal(pos)
panel = cmds.getPanel(underPointer=True) or ""
if not "modelPanel" in panel:
return
return (cmds.hitTest(panel, relpos.x(), relpos.y()) or [None])[0]
def get_visible_hud_displays():
"""
Get viewport hud displays.
Returns:
list: List of names of heads up displays.
"""
found = []
displays = cmds.headsUpDisplay(q = True, lh = True)
for display in displays:
visible = cmds.headsUpDisplay(display, q = True, vis = True)
if visible:
found.append(display)
return found
def set_hud_visibility(bool_value, displays = None):
"""
Set the viewport hud display visibility.
Args:
bool_value (bool): True turns visiliblity on, False turns it off.
displays (list): List of heads up displays by name.
"""
if not displays:
displays = cmds.headsUpDisplay(q = True, lh = True)
for display in displays:
cmds.headsUpDisplay(display, e = True, vis = bool_value)
def set_hud_lines(lines, name):
"""
Set the viewport hud text for the named hud.
Args:
lines (list): Each entry in the list is a new text line in the display.
name (str): The name of the heads up display to work on.
"""
inc = 0
for line in lines:
hud_name = '%s%s' % (name, inc)
if cmds.headsUpDisplay(hud_name, ex = True):
cmds.headsUpDisplay(hud_name, remove = True)
cmds.headsUpDisplay( hud_name, section = 1, block = inc, blockSize = 'large', labelFontSize = "large", dataFontSize = 'large')
cmds.headsUpDisplay( hud_name, edit = True, label = line)
inc += 1
def show_channel_box():
"""
Makes the channel box visible.
"""
docks = mel.eval('global string $gUIComponentDockControlArray[]; string $goo[] = $gUIComponentDockControlArray;')
if util.get_maya_version() < 2017:
if 'Channel Box / Layer Editor' in docks:
index = docks.index('Channel Box / Layer Editor')
dock = docks[index + 1]
if cmds.dockControl(dock, q = True, visible = True):
cmds.dockControl(dock, edit = True, visible = False)
cmds.dockControl(dock, edit = True, visible = True)
index = docks.index('Channel Box')
dock = docks[index + 1]
if cmds.dockControl(dock, q = True, visible = True):
cmds.dockControl(dock, edit = True, visible = False)
cmds.dockControl(dock, edit = True, visible = True)
if util.get_maya_version() > 2016:
if 'Channel Box / Layer Editor' in docks:
index = docks.index('Channel Box / Layer Editor')
dock = docks[index + 1]
if cmds.workspaceControl(dock, q = True, visible = True):
cmds.workspaceControl(dock, edit = True, visible = False)
cmds.workspaceControl(dock, edit = True, visible = True)
index = docks.index('Channel Box')
dock = docks[index + 1]
if cmds.workspaceControl( dock, q = True, visible = True):
cmds.workspaceControl(dock, edit = True, visible = False)
cmds.workspaceControl(dock, edit = True, visible = True)
def add_to_isolate_select(nodes):
"""
Add the specified nodes into every viewport's isolate select.
This will only work on viewports that have isolate select turned on.
Use when nodes are not being evaluated because isolate select causes them to be invisible.
Args:
nodes (list): The nodes to add to isolate select.
"""
if is_batch():
return
nodes = util.convert_to_sequence(nodes)
model_panels = get_model_panels()
for panel in model_panels:
if cmds.isolateSelect(panel, q = True, state = True):
for node in nodes:
cmds.isolateSelect(panel, addDagObject = node)
#cmds.isolateSelect(panel, update = True)
def get_model_panels():
"""
Good to use when editing viewports.
"""
return cmds.getPanel(type = 'modelPanel')
def get_current_audio_node():
"""
Get the current audio node. Important when getting sound in a playblast.
Returns:
str: Name of the audio node.
"""
play_slider = mel.eval('global string $gPlayBackSlider; string $goo = $gPlayBackSlider')
return cmds.timeControl(play_slider, q = True, s = True)
def xray_joints(bool_value = True):
cmds.modelEditor('modelPanel1', e = True, jointXray = bool_value)
cmds.modelEditor('modelPanel2', e = True, jointXray = bool_value)
cmds.modelEditor('modelPanel3', e = True, jointXray = bool_value)
cmds.modelEditor('modelPanel4', e = True, jointXray = bool_value)
def display_textures(bool_value = True):
cmds.modelEditor('modelPanel1', e = True, displayTextures = bool_value)
cmds.modelEditor('modelPanel2', e = True, displayTextures = bool_value)
cmds.modelEditor('modelPanel3', e = True, displayTextures = bool_value)
cmds.modelEditor('modelPanel4', e = True, displayTextures = bool_value)
def auto_focus_view(selection = False):
if is_batch():
return
settings_path = util.get_env('VETALA_SETTINGS')
settings = util_file.SettingsFile()
settings.set_directory(settings_path)
auto_focus = settings.get('auto_focus_scene')
if not auto_focus:
util.show('Auto focus turned off in settings')
return
try:
if selection:
cmds.viewFit(an = True, fitFactor = 1)
else:
cmds.viewFit(an = True, fitFactor = 1, all = True)
except:
util.show('Could not center view')
util.show('Auto focus')
fix_camera()
def fix_camera():
camera_pos = cmds.xform('persp', q = True, ws = True, t = True)
distance = util.get_distance([0,0,0], camera_pos)
distance = (distance*10)
try:
cmds.setAttr('persp.farClipPlane', distance)
except:
pass
near = 0.1
if distance > 10000:
near = (distance/10000) * near
try:
cmds.setAttr('persp.nearClipPlane', near)
except:
pass
#--- garbage
def remove_unused_plugins():
list_cmds = dir(cmds)
if not 'unknownPlugin' in list_cmds:
return
unknown = cmds.ls(type = 'unknown')
if unknown:
return
unused = []
unknown_plugins = cmds.unknownPlugin(query = True, list = True)
if unknown_plugins:
for unknown_plugin in unknown_plugins:
try:
cmds.unknownPlugin(unknown_plugin, remove = True)
except:
continue
unused.append(unknown_plugin)
if unused:
util.show('Removed unused plugins: %s' % unused)
def delete_turtle_nodes():
plugin_list = cmds.pluginInfo(query = True, pluginsInUse = True)
nodes = []
if plugin_list:
for plugin in plugin_list:
if plugin[0] == 'Turtle':
turtle_types = ['ilrBakeLayer',
'ilrBakeLayerManager',
'ilrOptionsNode',
'ilrUIOptionsNode']
nodes = delete_nodes_of_type(turtle_types)
break
if nodes:
util.show('Removed Turtle nodes: %s' % nodes )
def delete_nodes_of_type(node_type):
"""
Delete all the nodes of type.
Good for cleaning up scenes.
Args:
node_type (str): The name of a node type. Eg. hyperView, ilrBakeLayouerManger, etc
"""
node_type = util.convert_to_sequence(node_type)
deleted = []
for node_type_name in node_type:
nodes = cmds.ls(type = node_type_name)
for node in nodes:
if node == 'hyperGraphLayout':
continue
if not cmds.objExists(node):
continue
cmds.lockNode(node, lock = False)
cmds.delete(node)
deleted.append(node)
return deleted
def delete_garbage():
straight_delete_types = []
if util.get_maya_version() > 2014:
#maya 2014 crashes when trying to delete hyperView or hyperLayout nodes in some files.
straight_delete_types += ['hyperLayout','hyperView']
deleted_nodes = delete_nodes_of_type(straight_delete_types)
check_connection_node_type = ['shadingEngine', 'partition','objectSet']
check_connection_nodes = []
for check_type in check_connection_node_type:
nodes = cmds.ls(type = check_type)
check_connection_nodes += nodes
garbage_nodes = []
if deleted_nodes:
garbage_nodes = deleted_nodes
immortals = cmds.ls(ud = True)
for node in check_connection_nodes:
if node in immortals:
continue
if not node or not cmds.objExists(node):
continue
if is_empty(node):
cmds.lockNode(node, lock = False)
try:
cmds.delete(node)
except:
pass
if not cmds.objExists(node):
garbage_nodes.append(node)
if garbage_nodes:
util.show('Deleted Garbage nodes: %s' % garbage_nodes)
def delete_empty_orig_nodes():
origs = get_empty_orig_nodes()
for orig in origs:
cmds.delete(orig)
if origs:
print_help('Deleted Unused Intermediate Object or Orig nodes: %s' % origs)
def delete_empty_nodes():
nodes = get_empty_nodes()
cmds.delete(nodes)
print_help('Deleted Empty (Unconnected) nodes: %s' % nodes)
#--- empty
def get_empty_groups():
groups = cmds.ls(type = 'transform')
found = []
for group in groups:
if cmds.nodeType(group) == 'joint':
continue
if is_empty(group):
found.append(group)
return found
def get_empty_nodes():
dg_nodes = get_dg_nodes()
found = []
undel_nodes = []
try:
undel_nodes = cmds.ls(undeletable = True)
except:
pass
if undel_nodes:
node_set = set(dg_nodes)
undel_set = set(undel_nodes)
dg_nodes = list(node_set - undel_set)
for node in dg_nodes:
if is_empty(node):
found.append(node)
return found
def get_empty_orig_nodes():
origs = get_orig_nodes()
found = []
for orig in origs:
connections = cmds.listConnections(orig)
if not connections:
found.append(orig)
return found
def get_empty_reference_nodes():
references = cmds.ls(type = 'reference')
found = []
for reference in references:
try:
cmds.referenceQuery(reference, filename = True)
except:
found.append(found)
return found()
def get_non_unique_names():
dag_nodes = cmds.ls(type = 'dagNode')
found = []
for dag_node in dag_nodes:
if dag_node.find('|') > -1:
found.append(dag_node)
return found
def is_hidden(transform, skip_connected = True, shape = True):
vis_attr = '%s.visibility' % transform
if cmds.getAttr(vis_attr) == 0:
if skip_connected and not cmds.listConnections(vis_attr, s = True, d = False, p = True):
return True
if not skip_connected:
return True
if shape:
shapes = cmds.listRelatives(transform, shapes = True)
if shapes:
shape_hidden_count = 0
for sub_shape in shapes:
if is_hidden(sub_shape, skip_connected, shape = False):
shape_hidden_count += 1
if len(shapes) == shape_hidden_count:
return True
return False
def is_parent_hidden(transform, skip_connected = True):
"""
Searches the parent hierarchy to find one parent that is hidden.
"""
parent = cmds.listRelatives(transform, p = True, f = True)
if parent:
parent = parent[0]
parent_invisible = False
while parent:
hidden = is_hidden(transform, skip_connected)
if hidden:
parent_invisible = True
break
parent = cmds.listRelatives(parent, p = True, f = True)
if parent:
parent = parent[0]
return parent_invisible
| 27.073041 | 181 | 0.531672 | 9,465 | 0.154763 | 0 | 0 | 2,800 | 0.045783 | 0 | 0 | 12,754 | 0.208542 |
b36d5099ac12b98b41d1f637481fe1af850d8737 | 5,586 | py | Python | soap/flopoco/common.py | gitter-badger/soap | 4f5eb7848e4dc516a6ff972db5c8c46ec9037c47 | [
"MIT"
] | 22 | 2016-02-08T16:57:30.000Z | 2021-03-12T20:32:06.000Z | soap/flopoco/common.py | gitter-badger/soap | 4f5eb7848e4dc516a6ff972db5c8c46ec9037c47 | [
"MIT"
] | 1 | 2018-07-11T21:21:27.000Z | 2018-07-17T19:53:19.000Z | soap/flopoco/common.py | gitter-badger/soap | 4f5eb7848e4dc516a6ff972db5c8c46ec9037c47 | [
"MIT"
] | 6 | 2016-02-01T13:30:56.000Z | 2018-11-28T04:35:27.000Z | import math
import os
import tempfile
from contextlib import contextmanager
from soap import logger
from soap.common.cache import cached
from soap.expression import operators, OutputVariableTuple
from soap.semantics.error import IntegerInterval, ErrorSemantics
flopoco_command_map = {
'IntAdder': ('{wi}', ),
'IntMultiplier': ('{wi}', '{wi}', '{wi}', '1', '1', '0'),
'FPAdder': ('{we}', '{wf}'),
'FPMultiplier': ('{we}', '{wf}', '{wf}'),
'FPSquarer': ('{we}', '{wf}', '{wf}'),
'FPDiv': ('{we}', '{wf}'),
'FPPow': ('{we}', '{wf}'),
'FPExp': ('{we}', '{wf}'),
'FPLog': ('{we}', '{wf}', '0'),
}
flopoco_operators = tuple(flopoco_command_map)
operators_map = {
operators.ADD_OP: ['FPAdder', 'IntAdder'],
operators.SUBTRACT_OP: ['FPAdder', 'IntAdder'],
operators.MULTIPLY_OP: ['FPMultiplier', 'IntMultiplier'],
operators.DIVIDE_OP: 'FPDiv',
operators.LESS_OP: ['FPAdder', 'IntAdder'],
operators.LESS_EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.GREATER_OP: ['FPAdder', 'IntAdder'],
operators.GREATER_EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.NOT_EQUAL_OP: ['FPAdder', 'IntAdder'],
operators.TERNARY_SELECT_OP: 'Multiplexer',
operators.FIXPOINT_OP: 'Null',
operators.UNARY_SUBTRACT_OP: 'OneLUT',
}
we_min, we_max = 5, 15
we_range = list(range(we_min, we_max + 1))
wf_min, wf_max = 10, 112
wf_range = list(range(wf_min, wf_max + 1))
wi_min, wi_max = 1, 100
wi_range = list(range(wi_min, wi_max + 1))
directory = os.path.dirname(__file__)
default_file = os.path.join(directory, 'luts.pkl')
template_file = os.path.join(directory, 'template.vhdl')
device_name = 'Virtex6'
device_model = 'xc6vlx760'
@contextmanager
def cd(d):
import sh
p = os.path.abspath(os.curdir)
if d:
sh.mkdir('-p', d)
sh.cd(d)
try:
yield
except Exception:
raise
finally:
sh.cd(p)
def flopoco_key(fop, we=-1, wf=-1, wi=-1):
try:
format_tuple = flopoco_command_map[fop]
except KeyError:
raise ValueError('Unrecognised operator {}'.format(fop))
args = [fop]
args += [a.format(we=we, wf=wf, wi=wi) for a in format_tuple]
return tuple(args)
def flopoco(key, file_name=None, dir_name=None):
import sh
file_name = file_name or tempfile.mktemp(suffix='.vhdl', dir='')
cmd = ('-target=' + device_name, '-outputfile=' + file_name) + key
logger.debug('flopoco: {!r}'.format(cmd))
dir_name = dir_name or tempfile.mktemp(suffix='/')
with cd(dir_name):
sh.flopoco(*cmd)
try:
with open(file_name) as fh:
if not fh.read():
raise IOError()
except (IOError, FileNotFoundError):
logger.error('Flopoco failed to generate file ' + file_name)
raise
return file_name, dir_name
def get_luts(file_name):
from bs4 import BeautifulSoup
with open(file_name, 'r') as f:
f = BeautifulSoup(f.read())
app = f.document.application
util = app.find('section', stringid='XST_DEVICE_UTILIZATION_SUMMARY')
luts = util.find('item', stringid='XST_NUMBER_OF_SLICE_LUTS')
if luts:
return int(luts.get('value'))
logger.warning('{} requires no LUTs'.format(file_name))
return 0
def xilinx(file_name, dir_name=None):
import sh
file_base = os.path.split(file_name)[1]
file_base = os.path.splitext(file_base)[0]
synth_name = file_base + '.ngc'
cmd = ['run', '-p', device_model]
cmd += ['-ifn', file_name, '-ifmt', 'VHDL']
cmd += ['-ofn', synth_name, '-ofmt', 'NGC']
logger.debug('xst: {!r}'.format(cmd))
dir_name = dir_name or tempfile.mktemp(suffix='/')
with cd(dir_name):
out_file_name = file_base + '.out.log'
err_file_name = file_base + '.err.log'
sh.xst(sh.echo(*cmd), _out=out_file_name, _err=err_file_name)
return get_luts(file_base + '.ngc_xst.xrpt')
_FILTER_OPERATORS = operators.TRADITIONAL_OPERATORS + [
operators.TERNARY_SELECT_OP
]
@cached
def _datatype_exponent(op, label):
if isinstance(label, OutputVariableTuple):
exponent = 0
for l in label:
label_datatype, label_exponent = _datatype_exponent(op, l)
exponent += label_exponent
return None, exponent
if op == operators.FIXPOINT_OP:
return None, 0
if op not in _FILTER_OPERATORS:
return None, None
bound = label.bound
datatype = type(bound)
if datatype is IntegerInterval:
if bound.is_top():
return datatype, flopoco.wi_max
if bound.is_bottom():
return datatype, flopoco.wi_min
bound_max = max(abs(bound.min), abs(bound.max), 1)
width_max = int(math.ceil(math.log(bound_max + 1, 2)) + 1)
return datatype, width_max
if datatype is ErrorSemantics:
bound = bound.v
if bound.is_top():
return datatype, flopoco.we_max
if bound.is_bottom():
return datatype, flopoco.we_min
bound_max = max(abs(bound.min), abs(bound.max), 1)
try:
exp_max = math.floor(math.log(bound_max, 2))
except OverflowError:
return datatype, flopoco.we_max
try:
exponent = int(math.ceil(math.log(exp_max + 1, 2) + 1))
return datatype, max(exponent, flopoco.we_min)
except ValueError:
return datatype, flopoco.we_min
raise TypeError('Unrecognized type of bound {!r}'.format(bound))
| 29.712766 | 77 | 0.621196 | 0 | 0 | 201 | 0.035983 | 1,697 | 0.303795 | 0 | 0 | 832 | 0.148944 |
b37189a6c01fc79a353e9a5cc40d97e3e7ee592d | 5,651 | py | Python | coralquant/taskmanage.py | dabuc/CoralQuant | 26ba2e0b39a897d8947166796c6a4e9f5ab202fa | [
"MIT"
] | null | null | null | coralquant/taskmanage.py | dabuc/CoralQuant | 26ba2e0b39a897d8947166796c6a4e9f5ab202fa | [
"MIT"
] | null | null | null | coralquant/taskmanage.py | dabuc/CoralQuant | 26ba2e0b39a897d8947166796c6a4e9f5ab202fa | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from coralquant.models.odl_model import BS_Stock_Basic, BS_SZ50_Stocks, TS_Stock_Basic, TS_TradeCal
from coralquant.spider.bs_stock_basic import get_stock_basic
from coralquant import logger
from datetime import date, datetime, timedelta
from sqlalchemy import MetaData
from coralquant.database import session_scope
from coralquant.settings import CQ_Config
from coralquant.models.orm_model import TaskTable
from coralquant.stringhelper import TaskEnum
from sqlalchemy import func, distinct
_logger = logger.Logger(__name__).get_log()
meta = MetaData()
def create_task(
task: TaskEnum,
begin_date: date,
end_date: date,
codes: list = [],
type: str = None,
status: str = None,
market: str = None,
isdel=False,
):
"""创建任务
:param task: 任务类型
:type task: TaskEnum
:param begin_date: 如果开始时间(begin_date)为None,开始时间取股票上市(IPO)时间
:type begin_date: date
:param end_date: 结束时间
:type end_date: date
:param codes: 股票代码列表, defaults to []
:type codes: list, optional
:param type: 证券类型,其中1:股票,2:指数,3:其它, defaults to None
:type type: str, optional
:param status: 上市状态,其中1:上市,0:退市, defaults to None
:type status: str, optional
:param market: 市场类型 (主板/中小板/创业板/科创板/CDR), defaults to None
:type market: str, optional
:param isdel: 是否删除删除原有的相同任务的历史任务列表, defaults to False
:type isdel: bool, optional
"""
with session_scope() as sm:
if not codes:
query = sm.query(BS_Stock_Basic.code, BS_Stock_Basic.ipoDate)
if market:
query = query.join(TS_Stock_Basic, BS_Stock_Basic.code == TS_Stock_Basic.bs_code).filter(
TS_Stock_Basic.market == market
)
if CQ_Config.IDB_DEBUG == "1": # 如果是测试环境
query = query.join(BS_SZ50_Stocks, BS_Stock_Basic.code == BS_SZ50_Stocks.code)
if status:
query = query.filter(BS_Stock_Basic.status == status)
if type:
query = query.filter(BS_Stock_Basic.type == type)
codes = query.all()
if isdel:
# 删除原有的相同任务的历史任务列表
query = sm.query(TaskTable).filter(TaskTable.task == task.value)
query.delete()
sm.commit()
_logger.info("任务:{}-历史任务已删除".format(task.name))
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code=c.code,
begin_date=begin_date if begin_date is not None else c.ipoDate,
end_date=end_date,
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def create_bs_task(task: TaskEnum, tmpcodes=None):
"""
创建BS任务列表
"""
# 删除原有的相同任务的历史任务列表
TaskTable.del_with_task(task)
with session_scope() as sm:
query = sm.query(BS_Stock_Basic.code, BS_Stock_Basic.ipoDate, BS_Stock_Basic.outDate, BS_Stock_Basic.ts_code)
if CQ_Config.IDB_DEBUG == "1": # 如果是测试环境
if tmpcodes:
query = query.filter(BS_Stock_Basic.code.in_(tmpcodes))
else:
query = query.join(BS_SZ50_Stocks, BS_Stock_Basic.code == BS_SZ50_Stocks.code)
# query = query.filter(BS_Stock_Basic.status == True) #取上市的
codes = query.all()
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code=c.ts_code,
bs_code=c.code,
begin_date=c.ipoDate,
end_date=c.outDate if c.outDate is not None else datetime.now().date(),
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def create_ts_task(task: TaskEnum):
"""
创建TS任务列表
"""
# 删除原有的相同任务的历史任务列表
TaskTable.del_with_task(task)
with session_scope() as sm:
codes = (
sm.query(
TS_Stock_Basic.ts_code, TS_Stock_Basic.bs_code, TS_Stock_Basic.list_date, TS_Stock_Basic.delist_date
)
.filter(TS_Stock_Basic.list_status == "L")
.all()
)
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code=c.ts_code,
bs_code=c.bs_code,
begin_date=c.list_date,
end_date=c.delist_date if c.delist_date is not None else datetime.now().date(),
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
def create_ts_cal_task(task: TaskEnum):
"""
创建基于交易日历的任务列表
"""
# 删除历史任务
TaskTable.del_with_task(task)
with session_scope() as sm:
rp = sm.query(distinct(TS_TradeCal.date).label("t_date")).filter(
TS_TradeCal.is_open == True, TS_TradeCal.date <= datetime.now().date() # noqa
)
codes = rp.all()
tasklist = []
for c in codes:
tasktable = TaskTable(
task=task.value,
task_name=task.name,
ts_code="按日期更新",
bs_code="按日期更新",
begin_date=c.t_date,
end_date=c.t_date,
)
tasklist.append(tasktable)
sm.bulk_save_objects(tasklist)
_logger.info("生成{}条任务记录".format(len(codes)))
if __name__ == "__main__":
pass
| 31.569832 | 117 | 0.595116 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,482 | 0.240936 |
b3736fa542dd14804246448905f3d93d659686aa | 733 | py | Python | Scripts13/Script104.py | jonfisik/ScriptsPython | 1d15221b3a41a06a189e3e04a5241fa63df9cf3f | [
"MIT"
] | 1 | 2020-09-05T22:25:36.000Z | 2020-09-05T22:25:36.000Z | Scripts13/Script104.py | jonfisik/ScriptsPython | 1d15221b3a41a06a189e3e04a5241fa63df9cf3f | [
"MIT"
] | null | null | null | Scripts13/Script104.py | jonfisik/ScriptsPython | 1d15221b3a41a06a189e3e04a5241fa63df9cf3f | [
"MIT"
] | null | null | null | '''Exercício Python 104: Crie um programa que tenha a função leiaInt(), que vai funcionar de forma semelhante 'a função input() do Python, só que fazendo a validação para aceitar apenas um valor numérico.
Ex: n = leiaInt('Digite um n: ')'''
def leiaInt(msg):
ok = False
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
ok = True
else:
print(f'''\033[0;31m
ERRO! Você digitou "{n}".
Digite um número inteiro válido.\033[m''')
print()
if ok:
break
return valor
#programa
print('---'*10)
n = leiaInt('Digite um número: ')
print(f'Você digitou o número {n}.')
print('---'*10) | 29.32 | 204 | 0.560709 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.576203 |
b37394b50acd1c191fcfdfaab1bed649bfdb75c6 | 1,055 | py | Python | pgctl/timestamp.py | struys/pgctl | 7e64765c01d8f9798d659dfbe409a3e5cb05bf7a | [
"MIT"
] | null | null | null | pgctl/timestamp.py | struys/pgctl | 7e64765c01d8f9798d659dfbe409a3e5cb05bf7a | [
"MIT"
] | null | null | null | pgctl/timestamp.py | struys/pgctl | 7e64765c01d8f9798d659dfbe409a3e5cb05bf7a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Format stolen from daemontools' tai64nlocal:
2015-10-19 17:43:37.772152500
We'd usually use the s6 tool, but there's a problem:
http://www.mail-archive.com/skaware@list.skarnet.org/msg00575.html
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
def timestamp():
from datetime import datetime
return datetime.now().strftime('%F %T.%f ')
def prepend_timestamps(infile, outfile):
needstamp = True
while True:
c = infile.read(1)
if c == b'': # EOF
break
elif needstamp:
outfile.write(timestamp().encode('UTF-8'))
needstamp = False
outfile.write(c)
if c == b'\n':
needstamp = True
def main():
import sys
import io
infile = io.open(sys.stdin.fileno(), buffering=0, mode='rb')
outfile = io.open(sys.stdout.fileno(), buffering=0, mode='wb')
prepend_timestamps(infile, outfile)
if __name__ == '__main__':
exit(main())
| 23.977273 | 70 | 0.636019 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.269194 |
b37435ce932427a215af1a8179eed317d2590e74 | 1,752 | py | Python | scripts/Figures and Videos/Figure 2D/produce_fig_2D.py | jfaccioni/clovars | 64e24286a2dc185490384aeb08027d88eb9462c4 | [
"MIT"
] | null | null | null | scripts/Figures and Videos/Figure 2D/produce_fig_2D.py | jfaccioni/clovars | 64e24286a2dc185490384aeb08027d88eb9462c4 | [
"MIT"
] | null | null | null | scripts/Figures and Videos/Figure 2D/produce_fig_2D.py | jfaccioni/clovars | 64e24286a2dc185490384aeb08027d88eb9462c4 | [
"MIT"
] | null | null | null | from pathlib import Path
import random
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from clovars.main import main as clovars_main
sns.set()
RANDOMNESS_SEED = 42
def main():
"""Main function of this script."""
random.seed(RANDOMNESS_SEED)
np.random.seed(RANDOMNESS_SEED)
dfs = []
for treatment_name in ['control', 'tmz']:
sys.argv = ['', 'run', f'Fig_2D_run_{treatment_name}.toml', f'Fig_2D_colonies_{treatment_name}.toml']
clovars_main()
path = Path('output')
data = pd.read_csv(path / f'colony_output_{treatment_name}.csv', index_col=None)
data['run_name'] = treatment_name
dfs.append(data)
remove_tree(path=path)
df = pd.concat(dfs, ignore_index=True)
fig, ax = plt.subplots()
palette = ['#50993e', '#993e50']
sns.lineplot(
data=df,
ax=ax,
x='simulation_days',
y='size',
hue='run_name',
palette=palette,
linestyle='dashed',
linewidth=5,
zorder=2,
)
sns.lineplot(
data=df,
ax=ax,
x='simulation_days',
y='size',
hue='run_name',
palette=palette,
linestyle='solid',
linewidth=2,
zorder=1,
alpha=0.7,
units='name',
estimator=None,
legend=False,
)
plt.show()
def remove_tree(path: Path):
"""Recursively deletes files and folders starting from path."""
# Source: https://stackoverflow.com/a/57892171/11161432
for child in path.iterdir():
if child.is_file():
child.unlink()
else:
remove_tree(child)
path.rmdir()
if __name__ == '__main__':
main()
| 23.675676 | 109 | 0.591895 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.239155 |
b374c23ca0c9952e52528d5ab3d944cd2eb56f6c | 8,156 | py | Python | numba_celltree/geometry_utils.py | Huite/numba_celltree | f6562153fb09468c64c6594b5ed4c48ce827997a | [
"MIT"
] | 2 | 2021-08-30T08:44:45.000Z | 2021-08-30T11:55:25.000Z | numba_celltree/geometry_utils.py | Deltares/numba_celltree | 898c54a18731e56407d03636f7f18e67d84a1f00 | [
"MIT"
] | null | null | null | numba_celltree/geometry_utils.py | Deltares/numba_celltree | 898c54a18731e56407d03636f7f18e67d84a1f00 | [
"MIT"
] | null | null | null | from typing import Sequence, Tuple
import numba as nb
import numpy as np
from .constants import (
FILL_VALUE,
NDIM,
PARALLEL,
TOLERANCE_ON_EDGE,
Box,
FloatArray,
FloatDType,
IntArray,
Point,
Vector,
)
from .utils import allocate_box_polygon, allocate_polygon
@nb.njit(inline="always")
def to_vector(a: Point, b: Point) -> Vector:
return Vector(b.x - a.x, b.y - a.y)
@nb.njit(inline="always")
def as_point(a: FloatArray) -> Point:
return Point(a[0], a[1])
@nb.njit(inline="always")
def as_box(arr: FloatArray) -> Box:
return Box(
arr[0],
arr[1],
arr[2],
arr[3],
)
@nb.njit(inline="always")
def to_point(t: float, a: Point, V: Vector) -> Point:
return Point(a.x + t * V.x, a.y + t * V.y)
@nb.njit(inline="always")
def cross_product(u: Vector, v: Vector) -> float:
return u.x * v.y - u.y * v.x
@nb.njit(inline="always")
def dot_product(u: Vector, v: Vector) -> float:
return u.x * v.x + u.y * v.y
@nb.njit(inline="always")
def polygon_length(face: IntArray) -> int:
# A minimal polygon is a triangle
n = len(face)
for i in range(3, n):
if face[i] == FILL_VALUE:
return i
return n
@nb.njit(inline="always")
def polygon_area(polygon: Sequence) -> float:
length = len(polygon)
area = 0.0
a = Point(polygon[0][0], polygon[0][1])
b = Point(polygon[1][0], polygon[1][1])
U = to_vector(a, b)
for i in range(2, length):
c = Point(polygon[i][0], polygon[i][1])
V = to_vector(c, a)
area += abs(cross_product(U, V))
b = c
U = V
return 0.5 * area
@nb.njit(inline="always")
def point_in_polygon(p: Point, poly: Sequence) -> bool:
# Refer to: https://wrf.ecse.rpi.edu/Research/Short_Notes/pnpoly.html
# Copyright (c) 1970-2003, Wm. Randolph Franklin
# MIT license.
#
# Quote:
# > I run a semi-infinite ray horizontally (increasing x, fixed y) out from
# > the test point, and count how many edges it crosses. At each crossing,
# > the ray switches between inside and outside. This is called the Jordan
# > curve theorem.
# >
# > The case of the ray going thru a vertex is handled correctly via a
# > careful selection of inequalities. Don't mess with this code unless
# > you're familiar with the idea of Simulation of Simplicity. This pretends
# > to shift the ray infinitesimally down so that it either clearly
# > intersects, or clearly doesn't touch. Since this is merely a conceptual,
# > infinitesimal, shift, it never creates an intersection that didn't exist
# > before, and never destroys an intersection that clearly existed before.
# >
# > The ray is tested against each edge thus:
# > 1. Is the point in the half-plane to the left of the extended edge? and
# > 2. Is the point's Y coordinate within the edge's Y-range?
# >
# > Handling endpoints here is tricky.
#
# For the Simulation of Simplicity concept, see:
# Edelsbrunner, H., & Mücke, E. P. (1990). Simulation of simplicity: a
# technique to cope with degenerate cases in geometric algorithms. ACM
# Transactions on Graphics (tog), 9(1), 66-104.
#
# In this case, this guarantees there will be no "on-edge" answers, which
# are degenerative. For another application of simulation of simplicity,
# see:
# Rappoport, A. (1991). An efficient algorithm for line and polygon
# clipping. The Visual Computer, 7(1), 19-28.
length = len(poly)
v0 = as_point(poly[-1])
c = False
for i in range(length):
v1 = as_point(poly[i])
# Do not split this in two conditionals: if the first conditional fails,
# the second will not be executed in Python's (and C's) execution model.
# This matters because the second can result in division by zero.
if (v0.y > p.y) != (v1.y > p.y) and p.x < (
(v1.x - v0.x) * (p.y - v0.y) / (v1.y - v0.y) + v0.x
):
c = not c
v0 = v1
return c
@nb.njit(inline="always")
def point_in_polygon_or_on_edge(p: Point, poly: FloatArray) -> bool:
length = len(poly)
v0 = as_point(poly[-1])
U = to_vector(p, v0)
c = False
for i in range(length):
v1 = as_point(poly[i])
V = to_vector(p, v1)
twice_area = abs(cross_product(U, V))
if twice_area < TOLERANCE_ON_EDGE:
W = to_vector(v0, v1)
if W.x != 0.0:
t = (p.x - v0.x) / W.x
elif W.y != 0.0:
t = (p.y - v0.y) / W.y
else:
continue
if 0 <= t <= 1:
return True
if (v0.y > p.y) != (v1.y > p.y) and p.x < (
(v1.x - v0.x) * (p.y - v0.y) / (v1.y - v0.y) + v0.x
):
c = not c
v0 = v1
U = V
return c
@nb.njit(inline="always")
def boxes_intersect(a: Box, b: Box) -> bool:
"""
Parameters
----------
a: (xmin, xmax, ymin, ymax)
b: (xmin, xmax, ymin, ymax)
"""
return a.xmin < b.xmax and b.xmin < a.xmax and a.ymin < b.ymax and b.ymin < a.ymax
@nb.njit(inline="always")
def bounding_box(
polygon: IntArray, vertices: FloatArray
) -> Tuple[float, float, float, float]:
max_n_verts = len(polygon)
first_vertex = vertices[polygon[0]]
xmin = xmax = first_vertex[0]
ymin = ymax = first_vertex[1]
for i in range(1, max_n_verts):
index = polygon[i]
if index == FILL_VALUE:
break
vertex = vertices[index]
x = vertex[0]
y = vertex[1]
xmin = min(xmin, x)
xmax = max(xmax, x)
ymin = min(ymin, y)
ymax = max(ymax, y)
return (xmin, xmax, ymin, ymax)
@nb.njit
def build_bboxes(
faces: IntArray,
vertices: FloatArray,
) -> Tuple[FloatArray, IntArray]:
# Make room for the bounding box of every polygon.
n_polys = len(faces)
bbox_coords = np.empty((n_polys, NDIM * 2), FloatDType)
for i in nb.prange(n_polys): # pylint: disable=not-an-iterable
polygon = faces[i]
bbox_coords[i] = bounding_box(polygon, vertices)
return bbox_coords
@nb.njit(inline="always")
def copy_vertices(vertices: FloatArray, face: IntArray) -> FloatArray:
length = polygon_length(face)
out = allocate_polygon()
for i in range(length):
v = vertices[face[i]]
out[i, 0] = v[0]
out[i, 1] = v[1]
return out[:length]
@nb.njit(inline="always")
def copy_vertices_into(
vertices: FloatArray, face: IntArray, out: FloatArray
) -> FloatArray:
length = polygon_length(face)
for i in range(length):
v = vertices[face[i]]
out[i, 0] = v[0]
out[i, 1] = v[1]
return out[:length]
@nb.njit(inline="always")
def copy_box_vertices(box: Box) -> FloatArray:
a = allocate_box_polygon()
a[0, 0] = box.xmin
a[0, 1] = box.ymin
a[1, 0] = box.xmax
a[1, 1] = box.ymin
a[2, 0] = box.xmax
a[2, 1] = box.ymax
a[3, 0] = box.xmin
a[3, 1] = box.ymax
return a
@nb.njit(inline="always")
def point_inside_box(a: Point, box: Box):
return box.xmin < a.x and a.x < box.xmax and box.ymin < a.y and a.y < box.ymax
@nb.njit(inline="always")
def flip(face: IntArray, length: int) -> None:
end = length - 1
for i in range(int(length / 2)):
j = end - i
face[i], face[j] = face[j], face[i]
return
@nb.njit(parallel=PARALLEL, cache=True)
def counter_clockwise(vertices: FloatArray, faces: IntArray) -> None:
n_face = len(faces)
for i_face in nb.prange(n_face):
face = faces[i_face]
length = polygon_length(face)
a = as_point(vertices[face[length - 2]])
b = as_point(vertices[face[length - 1]])
for i in range(length):
c = as_point(vertices[face[i]])
u = to_vector(a, b)
v = to_vector(a, c)
product = cross_product(u, v)
if product == 0:
a = b
b = c
elif product < 0:
flip(face, length)
else:
break
return
| 28.418118 | 86 | 0.581045 | 0 | 0 | 0 | 0 | 7,799 | 0.956111 | 0 | 0 | 2,187 | 0.268113 |
2fa1d4df373d09a209451df383587b67198a18fa | 438 | py | Python | Study/src/com/company/test/favorite_languages.py | guptasuresh2004/python_handson | 8f47784b09bddabd5ab28d38641cfca671869623 | [
"MIT"
] | null | null | null | Study/src/com/company/test/favorite_languages.py | guptasuresh2004/python_handson | 8f47784b09bddabd5ab28d38641cfca671869623 | [
"MIT"
] | null | null | null | Study/src/com/company/test/favorite_languages.py | guptasuresh2004/python_handson | 8f47784b09bddabd5ab28d38641cfca671869623 | [
"MIT"
] | null | null | null | from collections import OrderedDict
favorite_languages = OrderedDict()
favorite_languages["Suresh"]="python"
favorite_languages["Kumar"]="Java"
favorite_languages["Gupta"]="c"
for key in favorite_languages.keys():
print(key+" like to use :"+favorite_languages[key].title())
lang = {}
lang["Suresh"]="python"
lang["Kumar"]="Java"
lang["Gupta"]="c"
lang["1"]="2"
for key in lang.keys():
print(key+" like to use :"+lang[key])
| 24.333333 | 63 | 0.694064 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.26484 |
2fa1d59d738f6050541565b7625bf68ab9c9b3ab | 43 | py | Python | demo_zinnia_wordpress/__init__.py | django-blog-zinnia/wordpress2zinnia | 656df6d431418a660f0e590d2226af5e6dd7a3e6 | [
"BSD-3-Clause"
] | 7 | 2015-08-16T18:50:52.000Z | 2021-05-23T11:28:22.000Z | demo_zinnia_wordpress/__init__.py | django-blog-zinnia/wordpress2zinnia | 656df6d431418a660f0e590d2226af5e6dd7a3e6 | [
"BSD-3-Clause"
] | 5 | 2015-06-20T07:04:01.000Z | 2018-08-02T14:12:41.000Z | demo_zinnia_wordpress/__init__.py | django-blog-zinnia/wordpress2zinnia | 656df6d431418a660f0e590d2226af5e6dd7a3e6 | [
"BSD-3-Clause"
] | 7 | 2015-04-17T14:57:37.000Z | 2020-10-17T04:32:02.000Z | """Demo of Zinnia with wordpress import"""
| 21.5 | 42 | 0.72093 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.976744 |
2fa20b3896ff72ff33be98125f29809dab379a74 | 260 | py | Python | helper.py | vanashimko/discrete-fourier-transform | 9430ba2b0b7b994c495d6ea2a9beb1099c512e0c | [
"MIT"
] | 1 | 2018-10-06T10:17:42.000Z | 2018-10-06T10:17:42.000Z | helper.py | vanashimko/discrete-fourier-transform | 9430ba2b0b7b994c495d6ea2a9beb1099c512e0c | [
"MIT"
] | null | null | null | helper.py | vanashimko/discrete-fourier-transform | 9430ba2b0b7b994c495d6ea2a9beb1099c512e0c | [
"MIT"
] | null | null | null | from itertools import repeat
from random import randrange
def randoms_from(values, length=None):
_range = range(length) if length is not None else repeat(0)
values_len = len(values)
for _ in _range:
yield values[randrange(0, values_len)]
| 26 | 63 | 0.723077 | 0 | 0 | 199 | 0.765385 | 0 | 0 | 0 | 0 | 0 | 0 |
2fa21b6782bbe1bd275b38f48a0d897b2c47128c | 3,317 | py | Python | azure/cart_svc_serverless/getCartTotal/__init__.py | ishrivatsa/lambda-samples | 17acaa5014401cd9947445140de7ee638aff0b61 | [
"MIT"
] | null | null | null | azure/cart_svc_serverless/getCartTotal/__init__.py | ishrivatsa/lambda-samples | 17acaa5014401cd9947445140de7ee638aff0b61 | [
"MIT"
] | 1 | 2021-06-02T01:00:22.000Z | 2021-06-02T01:00:22.000Z | azure/cart_svc_serverless/getCartTotal/__init__.py | ishrivatsa/serverless-examples | 17acaa5014401cd9947445140de7ee638aff0b61 | [
"MIT"
] | null | null | null | import logging
import os
from os import environ
import redis
import json
import azure.functions as func
redisHost= ""
redisPort = 6379
redisPassword = ""
def connectRedis(host, port, password):
try:
logging.info("Connecting to Redis ")
redisConnection = redis.StrictRedis(host=host, port=port, password=password, db=0)
except Exception as e:
logging.error("Error connecting to REDIS %s", e)
return func.HttpResponse('Could not connect to REDIS', status_code=500)
try:
logging.info(redisConnection.ping())
except Exception as e:
logging.error("Could not Ping Redis server %s", e)
return func.HttpResponse('Could not Ping REDIS', status_code=500)
logging.info("Successfully Connected to Redis")
return redisConnection
## Get data from the redis db
def getItems(id, r):
if r.exists(id):
data = json.loads(r.get(id))
logging.info("Received data")
logging.info(data)
else:
data = 0
return data
## Request POST /cart/item/add/{userid}
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('getCartTotal was triggered')
if environ.get("REDIS_HOST") is not None:
if os.environ["REDIS_HOST"] != "":
redisHost = os.environ["REDIS_HOST"]
else:
logging.info("REDIS_HOST is empty")
return func.HttpResponse(status_code=500)
else:
logging.error("REDIS_HOST is not Set")
return func.HttpResponse(status_code=500)
if environ.get("REDIS_PORT") is not None:
if os.environ["REDIS_PORT"] != "":
redisPort = os.environ["REDIS_PORT"]
else:
redisPort = 6379
else:
logging.error("Could not find REDIS_PORT")
return func.HttpResponse(status_code=500)
if environ.get("REDIS_PASSWORD") is not None:
if os.environ["REDIS_PASSWORD"] != "":
redisPassword = os.environ["REDIS_PASSWORD"]
else:
logging.info("REDIS_PASSWORD is empty")
return func.HttpResponse(status_code=500)
else:
logging.error("REDIS_PASSWORD is not Set")
return func.HttpResponse(status_code=500)
## Connect to REDIS
r = connectRedis(redisHost, redisPort,redisPassword)
logging.info(req.route_params["userid"])
userID = req.route_params["userid"]
if userID != "":
existing_data = getItems(userID, r)
else:
logging.error("Missing User ID from path parameter")
return func.HttpResponse('Missing UserID from the request', status_code=400)
total = 0
if (existing_data):
for items in existing_data:
quantity = items['quantity']
price = items['price']
total += (float(quantity)*float(price))
response = {}
response['userid'] = userID
response['carttotal']=total
response = json.dumps(response)
logging.info("The total for user %s is %f", userID, total)
else:
logging.info('No items found in cart')
return func.HttpResponse('No items found in cart', status_code=204)
return func.HttpResponse(response, status_code=200)
| 30.431193 | 91 | 0.614109 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 754 | 0.227314 |
2fa406a1d9616f7c6d29469b8cff1b0491bb7fd2 | 122 | py | Python | resources/files/__init__.py | vishu221b/bookme-flask-REST-API-Collection | 9ee923e13d786af9b11421370edac718743855af | [
"MIT"
] | null | null | null | resources/files/__init__.py | vishu221b/bookme-flask-REST-API-Collection | 9ee923e13d786af9b11421370edac718743855af | [
"MIT"
] | null | null | null | resources/files/__init__.py | vishu221b/bookme-flask-REST-API-Collection | 9ee923e13d786af9b11421370edac718743855af | [
"MIT"
] | null | null | null | from .documentFileUpload import DocumentFileUploadResource
from .documentFileDownload import DocumentFileDownloadResource
| 40.666667 | 62 | 0.918033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2fa4bb5e606b31eeef997386be13e4aa1bf047f9 | 1,239 | py | Python | src/waldur_core/core/api_groups_mapping.py | ahti87/waldur-mastermind | 772268e62dfd8eadb387b2ec3789785817a6e621 | [
"MIT"
] | null | null | null | src/waldur_core/core/api_groups_mapping.py | ahti87/waldur-mastermind | 772268e62dfd8eadb387b2ec3789785817a6e621 | [
"MIT"
] | null | null | null | src/waldur_core/core/api_groups_mapping.py | ahti87/waldur-mastermind | 772268e62dfd8eadb387b2ec3789785817a6e621 | [
"MIT"
] | null | null | null | API_GROUPS = {
'authentication': ['/api-auth/', '/api/auth-valimo/',],
'user': ['/api/users/', '/api/user-invitations/', '/api/user-counters/',],
'organization': [
'/api/customers/',
'/api/customer-permissions-log/',
'/api/customer-permissions-reviews/',
'/api/customer-permissions/',
],
'marketplace': [
'/api/marketplace-bookings/',
'/api/marketplace-cart-items/',
'/api/marketplace-categories/',
'/api/marketplace-category-component-usages/',
'/api/marketplace-checklists-categories/',
'/api/marketplace-checklists/',
'/api/marketplace-component-usages/',
'/api/marketplace-offering-files/',
'/api/marketplace-offerings/',
'/api/marketplace-order-items/',
'/api/marketplace-orders/',
'/api/marketplace-plans/',
'/api/marketplace-plugins/',
'/api/marketplace-public-api/',
'/api/marketplace-resource-offerings/',
'/api/marketplace-resources/',
'/api/marketplace-screenshots/',
'/api/marketplace-service-providers/',
],
'reporting': [
'/api/support-feedback-average-report/',
'/api/support-feedback-report/',
],
}
| 35.4 | 78 | 0.583535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 909 | 0.733656 |
2fa4e15f7417a6448e8ac7c6f3304b5c54934d36 | 1,758 | py | Python | python/345.py | kylekanos/project-euler-1 | af7089356a4cea90f8ef331cfdc65e696def6140 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | python/345.py | kylekanos/project-euler-1 | af7089356a4cea90f8ef331cfdc65e696def6140 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | python/345.py | kylekanos/project-euler-1 | af7089356a4cea90f8ef331cfdc65e696def6140 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-09-17T00:55:58.000Z | 2019-09-17T00:55:58.000Z | #!/usr/bin/env python
s="""
7 53 183 439 863 497 383 563 79 973 287 63 343 169 583
627 343 773 959 943 767 473 103 699 303 957 703 583 639 913
447 283 463 29 23 487 463 993 119 883 327 493 423 159 743
217 623 3 399 853 407 103 983 89 463 290 516 212 462 350
960 376 682 962 300 780 486 502 912 800 250 346 172 812 350
870 456 192 162 593 473 915 45 989 873 823 965 425 329 803
973 965 905 919 133 673 665 235 509 613 673 815 165 992 326
322 148 972 962 286 255 941 541 265 323 925 281 601 95 973
445 721 11 525 473 65 511 164 138 672 18 428 154 448 848
414 456 310 312 798 104 566 520 302 248 694 976 430 392 198
184 829 373 181 631 101 969 613 840 740 778 458 284 760 390
821 461 843 513 17 901 711 993 293 157 274 94 192 156 574
34 124 4 878 450 476 712 914 838 669 875 299 823 329 699
815 559 813 459 522 788 168 586 966 232 308 833 251 631 107
813 883 451 509 615 77 281 613 459 205 380 274 302 35 805
"""
arr = []
sarr = s.split('\n')
for i in xrange(1,16):
arr.append([])
sarr2 = sarr[i].split(' ')
for j in xrange(15):
arr[i-1].append(int(sarr2[j]))
maxsum = [0]*15
prev = 0
for j in xrange(14,-1,-1):
big = 0
for i in xrange(15):
big = max(big,arr[i][j])
maxsum[j] = prev+big
prev=maxsum[j]
maxsum.append(0)
#print maxsum
#def calcmins(marr, iarr):
# prev=0
# for j in xrange(14,-1,-1):
# iarr[j] = prev+arr[j][marr[j]]
# prev = iarr[j]
best=0
def recurse(j, tot, s, row):
if j==15:
global best
best = max(best, tot)
for i in xrange(15):
if (s>>i)&1 or tot+arr[i][j]+maxsum[j+1] < best: continue
row[j]=i
recurse(j+1,tot+arr[i][j],s|(1<<i),row)
#if j==1 or j==2: print j,i
rowuse = [0]*15
recurse(0,0,0,rowuse)
print best
| 28.354839 | 65 | 0.632537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.616041 |
2fa52a2c878f2d09417143bd09b47087769bc67b | 3,944 | py | Python | utils/logger.py | awesome-archive/deep-q-learning | bd959029adb733754966115fbb0a08f66f6dbad0 | [
"MIT"
] | 1 | 2019-10-12T09:40:30.000Z | 2019-10-12T09:40:30.000Z | utils/logger.py | awesome-archive/deep-q-learning | bd959029adb733754966115fbb0a08f66f6dbad0 | [
"MIT"
] | null | null | null | utils/logger.py | awesome-archive/deep-q-learning | bd959029adb733754966115fbb0a08f66f6dbad0 | [
"MIT"
] | null | null | null | from tensorboardX import SummaryWriter
class Logger:
def __init__(self, log_dir):
self.env_name = 'Pong-v0'
# TensorBoard
self.writer = SummaryWriter(log_dir=log_dir)
# Episode Values
self.ep = 0
self.ep_rewards = []
self.ep_max_reward = 0.0
self.ep_min_reward = 0.0
# Updates Values
self.grad_count = int(0)
self.total_q = 0.0
self.total_loss = 0.0
self.mb_loss = 0.0
self.mb_q = 0.0
# Counters
self.epsilon_val = 0.0
self.update_count = 0.0
self.step = 0.0
def network(self, net):
for name, param in net.named_parameters():
self._log(name, param.clone().cpu().data.numpy(),
self.step, type='histogram')
def epsilon(self, eps, step):
self.step = step
self.epsilon_val = eps
self._log('epsilon', self.epsilon_val, self.step)
def q_loss(self, q, loss, step):
self.step = step
self.update_count += 1
self.mb_loss = loss.data.cpu().sum()
self.mb_q = q.sum().data.cpu().sum() / int(q.size()[0])
self.total_q += self.mb_q
self.total_loss += self.mb_loss
avg_loss = self.total_loss / self.update_count
avg_q = self.total_q / self.update_count
self._log('update.average_q', avg_q, self.step)
self._log('update.average_loss', avg_loss, self.step)
self._log('update.minibatch_loss', self.mb_loss, self.step)
self._log('update.minibatch_q', self.mb_q, self.step)
def episode(self, reward):
self.ep_rewards.append(reward)
self.ep_max_reward = max(self.ep_max_reward, reward)
self.ep_min_reward = min(self.ep_min_reward, reward)
def display(self):
avg_loss = None if self.update_count == 0 else self.total_loss / self.update_count
avg_q = None if self.update_count == 0 else self.total_q / self.update_count
nonzero_reward_list = [
reward for reward in self.ep_rewards if reward != 0]
avg_ep_nonzero_reward = None if len(nonzero_reward_list) == 0 else sum(
nonzero_reward_list) / float(len(nonzero_reward_list))
values = {
'Episode': self.ep,
'Step': self.step,
'Avg. Loss': avg_loss,
'Avg. Q': avg_q,
'Episode Avg. Reward': sum(self.ep_rewards) / float(len(self.ep_rewards)),
'Episode Avg. Reward Non-0': avg_ep_nonzero_reward,
'Episode Min. Reward': self.ep_min_reward,
'Episode Max. Reward': self.ep_max_reward,
'Minibatch Loss': self.mb_loss,
'Minibatch Q': self.mb_q,
'Epsilon': self.epsilon_val
}
print('-------')
for key in values:
print('{}: {}'.format(key, values[key]))
def reset_episode(self):
avg_ep_reward = sum(self.ep_rewards) / float(len(self.ep_rewards))
nonzero_reward_list = [
reward for reward in self.ep_rewards if reward != 0]
avg_ep_nonzero_reward = sum(
nonzero_reward_list) / float(len(nonzero_reward_list))
self._log('ep.average_reward_nonzero', avg_ep_nonzero_reward, self.ep)
self._log('ep.average_reward', avg_ep_reward, self.ep)
self._log('ep.min_reward', self.ep_min_reward, self.ep)
self._log('ep.max_reward', self.ep_max_reward, self.ep)
self.ep += 1
self.ep_rewards = []
self.ep_max_reward = 0.0
self.ep_min_reward = 0.0
def _log(self, name, value, step, type='scalar'):
# Add Env.Name to name
name = '{}/{}'.format(self.env_name, name)
# Log in Tensorboard
if type == 'scalar':
self.writer.add_scalar(name, value, step)
self.writer.scalar_dict = {}
elif type == 'histogram':
self.writer.add_histogram(name, value, step)
| 35.214286 | 90 | 0.592546 | 3,902 | 0.989351 | 0 | 0 | 0 | 0 | 0 | 0 | 497 | 0.126014 |
2fa634a1aa1d4eb31ea04f48918ed21eb9719789 | 1,241 | py | Python | docs/html_docs/manual/notebook_to_markdown.py | jtran10/pyNastran | 4aed8e05b91576c2b50ee835f0497a9aad1d2cb0 | [
"BSD-3-Clause"
] | null | null | null | docs/html_docs/manual/notebook_to_markdown.py | jtran10/pyNastran | 4aed8e05b91576c2b50ee835f0497a9aad1d2cb0 | [
"BSD-3-Clause"
] | null | null | null | docs/html_docs/manual/notebook_to_markdown.py | jtran10/pyNastran | 4aed8e05b91576c2b50ee835f0497a9aad1d2cb0 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import os
import shutil
import pyNastran
from pyNastran.utils import print_bad_path
pkg_path = pyNastran.__path__[0]
def create_rst_from_ipython_notebooks():
#curdir = os.getcwd()
notebook_dir = os.path.join(pkg_path, '..', 'docs', 'quick_start', 'demo')
pydocs_dir = os.path.join(pkg_path, '..', 'docs', 'html_docs', 'quick_start')
assert os.path.exists(pydocs_dir), print_bad_path(quick_start_pydocs_dir)
assert os.path.exists(notebook_dir), print_bad_path(notebook_dir)
os.chdir(notebook_dir)
for fname in os.listdir(notebook_dir):
fnamei = os.path.basename(fname)
base = os.path.splitext(fnamei)[0]
fname2 = base + '.rst'
if fnamei.startswith('.'):
continue
if not fnamei.endswith('.ipynb'):
continue
os.system('ipython nbconvert --to rst %s' % fname)
if not os.path.exists(fname2):
print('%s was not made...' % fname2)
continue
moved_fname2 = os.path.join(pydocs_dir, fname2)
try:
if os.path.exists(moved_fname2):
os.remove(moved_fname2)
os.rename(fname2, moved_fname2)
except:
pass
| 31.025 | 81 | 0.63336 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.122482 |
2fa6768e2256b3fe0e7edd9f950ccec3c27501ae | 2,852 | py | Python | src/generator_ui.py | WillMorrison/JouleQuest | 68212e4be8afb85d42e45b83b074cc028588318f | [
"BSD-3-Clause"
] | null | null | null | src/generator_ui.py | WillMorrison/JouleQuest | 68212e4be8afb85d42e45b83b074cc028588318f | [
"BSD-3-Clause"
] | null | null | null | src/generator_ui.py | WillMorrison/JouleQuest | 68212e4be8afb85d42e45b83b074cc028588318f | [
"BSD-3-Clause"
] | null | null | null | import pygame
import pygame_gui
from rules_python.python.runfiles import runfiles
class GeneratorWindow(pygame_gui.elements.ui_window.UIWindow):
def __init__(self, position, ui_manager, generator):
super().__init__(pygame.Rect(position, (320, 120)), ui_manager,
window_display_title=generator.name,
object_id='#generator_window')
self._generator = generator
self.button = pygame_gui.elements.UIButton(pygame.Rect((64, 0), (150, 30)),
'Unknown',
ui_manager,
container=self,
object_id='#toggle_button')
self.output_label = pygame_gui.elements.UILabel(pygame.Rect((64, 30), (160, 25)),
f'Current Output: {self._generator.current_output}',
ui_manager,
container=self,
object_id='#output_label')
self._load_images()
self._image = pygame_gui.elements.UIImage(pygame.Rect((0,0), (64, 64)),
self._toggle_off_image,
ui_manager,
container=self,
object_id='#toggle_image')
def _load_images(self):
r = runfiles.Create()
with open(r.Rlocation('joule_quest/assets/images/light_switch_off_256x256.png'), 'r') as f:
self._toggle_off_image = pygame.transform.scale(pygame.image.load(f), (64,64)).convert_alpha()
with open(r.Rlocation('joule_quest/assets/images/light_switch_on_256x256.png'), 'r') as f:
self._toggle_on_image = pygame.transform.scale(pygame.image.load(f), (64,64)).convert_alpha()
def process_event(self, event):
handled = super().process_event(event)
if (event.type == pygame.USEREVENT and
event.user_type == pygame_gui.UI_BUTTON_PRESSED and
event.ui_object_id == "#generator_window.#toggle_button" and
event.ui_element == self.button):
handled = True
self._generator.toggle_output_connected()
return handled
def update(self, time_delta):
super().update(time_delta)
self.output_label.set_text(f'Current Output: {self._generator.current_output}')
self.output_label.update(time_delta)
self.button.set_text('Connected' if self._generator.output_connected else 'Disconnected')
self.button.update(time_delta)
self._image.set_image(self._toggle_on_image if self._generator.output_connected else self._toggle_off_image)
self._image.set_dimensions((64, 64))
self._image.update(time_delta)
| 42.567164 | 116 | 0.582398 | 2,766 | 0.969846 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.123422 |
2faa39feebe9d6a660e11520ad56bb2a92a90312 | 1,325 | py | Python | compliance_suite/functions/update_server_settings.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 1 | 2019-09-18T14:38:55.000Z | 2019-09-18T14:38:55.000Z | compliance_suite/functions/update_server_settings.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 14 | 2019-05-24T18:55:23.000Z | 2022-02-25T16:56:28.000Z | compliance_suite/functions/update_server_settings.py | alextsaihi/rnaget-compliance-suite | a3accae431b9e4f7791dfa5ae867e70da2dd6278 | [
"Apache-2.0"
] | 8 | 2019-04-08T14:48:35.000Z | 2022-02-04T16:59:59.000Z | # -*- coding: utf-8 -*-
"""Module compliance_suite.functions.update_server_settings.py
Functions to update server config/settings based on the response of a previous
API request. Each function should accept a Runner object and modify its
"retrieved_server_settings" attribute
"""
def update_supported_filters(runner, resource, response_obj):
"""Update server settings with the supported filters for a resource
Arguments:
runner (Runner): reference to Runner object
resource (str): identifies project, study, expression, continuous
response_obj (Response): response object to parse
"""
for filter_obj in response_obj:
runner.retrieved_server_settings[resource]["supp_filters"]\
.append(filter_obj["filter"])
def update_expected_format(runner, resource, response_obj):
"""Update server settings with the expected file format for a resource
Arguments:
runner (Runner): reference to Runner object
resource (str): identifies project, study, expression, continuous
response_obj (Response): response object to parse
"""
format_str = response_obj["fileType"]
runner.retrieved_server_settings["expressions"]["exp_format"] = format_str
runner.retrieved_server_settings["continuous"]["exp_format"] = format_str
| 37.857143 | 79 | 0.731321 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.69283 |
2fab631d5340c51e5036835293e80b75871b2ef7 | 1,272 | py | Python | rnacentral_pipeline/rnacentral/ftp_export/fasta.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | 1 | 2018-08-09T14:41:16.000Z | 2018-08-09T14:41:16.000Z | rnacentral_pipeline/rnacentral/ftp_export/fasta.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | 60 | 2015-02-04T16:43:53.000Z | 2022-01-27T10:28:43.000Z | rnacentral_pipeline/rnacentral/ftp_export/fasta.py | RNAcentral/rnacentral-import-pipeline | 238e573440c72581a051b16c15f56fcd25bece74 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import itertools as it
from Bio import SeqIO
NHMMER_PATTERN = re.compile("^[ABCDGHKMNRSTVWXYU]+$", re.IGNORECASE)
def is_valid_nhmmer_record(record):
"""
Checks if a sequence is valid for nhmmer usage.
"""
return bool(NHMMER_PATTERN.match(str(record.seq)))
def valid_nhmmer(handle, output):
sequences = SeqIO.parse(handle, "fasta")
accepted = filter(is_valid_nhmmer_record, sequences)
SeqIO.write(accepted, output, "fasta")
def invalid_nhmmer(handle, output):
sequences = SeqIO.parse(handle, "fasta")
rejected = it.filterfalse(is_valid_nhmmer_record, sequences)
SeqIO.write(rejected, output, "fasta")
| 31.02439 | 72 | 0.751572 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 724 | 0.569182 |
2fb2669f969726a9b2f0b2041c3aff48c8729aac | 1,746 | py | Python | setup.py | RicoViking9000/profanityfilter | 1c1c1dbc0e895b85ee1e685ab5ef8b4c37c5db00 | [
"BSD-3-Clause"
] | 63 | 2016-09-21T17:33:43.000Z | 2021-12-04T18:36:05.000Z | setup.py | RicoViking9000/profanityfilter | 1c1c1dbc0e895b85ee1e685ab5ef8b4c37c5db00 | [
"BSD-3-Clause"
] | 12 | 2017-07-14T04:47:37.000Z | 2022-02-15T09:50:21.000Z | setup.py | RicoViking9000/profanityfilter | 1c1c1dbc0e895b85ee1e685ab5ef8b4c37c5db00 | [
"BSD-3-Clause"
] | 28 | 2017-07-07T21:52:42.000Z | 2022-02-05T09:49:22.000Z | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='profanityfilter',
version='2.0.6.',
description='A universal Python library for detecting and/or filtering profane words.',
long_description='For more details visit https://github.com/areebbeigh/profanityfilter',
url='https://github.com/areebbeigh/profanityfilter',
author='Areeb Beigh',
author_email='areebbeigh@gmail.com',
license='BSD',
classifiers=[
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Text Processing :: Linguistic',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='profanity filter clean content',
packages=find_packages(exclude=['tests']),
install_requires=['inflection'],
package_data={
'profanityfilter': ['data/badwords.txt'],
},
entry_points={
'console_scripts': [
'profanityfilter=profanityfilter:main',
],
},
)
| 34.92 | 92 | 0.616838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,085 | 0.62142 |
2fb4415b648fd4703b55126d3179e70a5cb6c8e0 | 12,657 | py | Python | src/boids_core/boids.py | v-hill/advanced-computational-physics | 0eec966c5e10859e50913536ddf0e555807d38ef | [
"MIT"
] | null | null | null | src/boids_core/boids.py | v-hill/advanced-computational-physics | 0eec966c5e10859e50913536ddf0e555807d38ef | [
"MIT"
] | null | null | null | src/boids_core/boids.py | v-hill/advanced-computational-physics | 0eec966c5e10859e50913536ddf0e555807d38ef | [
"MIT"
] | null | null | null | """
This script contains the code implementing my version of the Boids artificial
life programme.
"""
# ---------------------------------- Imports ----------------------------------
# Allow imports from parent folder
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# Standard library imports
import timeit
import time
import numpy as np
from math import atan2, sqrt
# Repo module imports
import boids_core.generate_values as generate_values
# Code from delauney triangulation module
from delauney_triangulation.triangulation_core.triangulation import triangulate
from delauney_triangulation.triangulation_core.linear_algebra import (vector_add,
vector_sub,
list_divide,
perpendicular,
normalise)
# ----------------------------- Class definitions -----------------------------
class World():
"""
A 2D world for the Boids to live in.
"""
def __init__(self, world_size):
self.x_min = world_size[0]
self.x_max = world_size[1]
self.y_min = world_size[2]
self.y_max = world_size[3]
class Object():
def __init__(self, idx, position, stationary=False):
self.index = idx
self.stationary = stationary
self.pos = position
class Obstacle(Object):
def __init__(self, idx, position):
super().__init__(idx, position, stationary=True)
class Boid(Object):
"""
Class to represent a single Boid.
"""
def __init__(self, idx, position, velocity, options):
super().__init__(idx, position)
self.vel = velocity
self.neighbours = []
self.max_speed = options['max_speed']
self.field_of_view = options['field_of_view']
self.vision_distance = options['vision_distance']
self.safety_zone = options['safety_zone']
self.alignment_perception = options['alignment_perception']
self.cohesion_perception = options['cohesion_perception']
self.separation_perception = options['seperation_perception']
def __repr__(self):
return f"{self.index}, {self.pos}, {self.vel}"
def magnitude(self):
return sqrt(self.vel[0]**2 + self.vel[1]**2)
def direction(self):
return atan2(self.vel[1], self.vel[0])
def make_tri(self, height, width):
"""
Generate the co-ordinates of the three points of a triangle used to
plot the boid.
Parameters
----------
height : int
The height of the boid in pixels.
width : int
The width of the boid in pixels.
Returns
-------
numpy.array
Numpy array with the triangle coordiantes.
"""
offset_h = list_divide(self.vel, self.magnitude()/height)
offset_w = list_divide(self.vel, self.magnitude()/width)
offset_w = perpendicular(offset_w)
p1 = vector_add(self.pos, list_divide(offset_h, 2))
p2 = p3 = vector_sub(self.pos, list_divide(offset_h, 2))
p2 = vector_add(p2, list_divide(offset_w, 2))
p3 = vector_sub(p3, list_divide(offset_w, 2))
return (np.asarray([p1, p2, p3]).astype(int))
def restrict_fov(self, positions):
"""
Function to limit the field of view of the boid. Neighbours beyond the
self.field_of_view/2 angle are removed from the set of neighbours.
Parameters
----------
positions : list
List of all coordinates of the boids.
"""
new_neighbours = []
boid_dir = atan2(self.vel[0], self.vel[1])
for neighbour in self.neighbours:
n_pos = positions[neighbour[1]]
# Find the angle between boid direction and neighbour
angle = atan2(n_pos[0]-self.pos[0], n_pos[1]-self.pos[1])
# print(f"{boid_dir},{boid_dir - self.field_of_view/2},{angle},{boid_dir + self.field_of_view/2}")
if ((boid_dir - self.field_of_view/2) < angle and
angle < (boid_dir + self.field_of_view/2)):
diff_x = n_pos[0] - self.pos[0]
diff_y = n_pos[1] - self.pos[1]
distance = sqrt(diff_x**2 + diff_y**2)
if distance < self.vision_distance:
new_neighbours.append(neighbour)
self.neighbours = new_neighbours
def separation(self, positions):
"""
Function to implemen the boids seperation rule.
"""
resultant_x = 0
resultant_y = 0
counter = 0
for neighbour in self.neighbours:
n_pos = positions[neighbour[1]]
diff_x = n_pos[0] - self.pos[0]
diff_y = n_pos[1] - self.pos[1]
distance = sqrt(diff_x**2 + diff_y**2)
if distance < self.safety_zone:
counter += 1
resultant_x -= diff_x / distance
resultant_y -= diff_y / distance
if counter != 0:
resultant_x /= counter
resultant_y /= counter
vs_x = self.separation_perception * resultant_x
vs_y = self.separation_perception * resultant_y
# print(f"separation,{vs_x:0.4f},{vs_y:0.4f}")
return [vs_x, vs_y]
def cohesion(self, positions):
"""
Function to implemen the boids cohesion rule.
"""
num_neighbours = len(self.neighbours)
resultant_x = 0
resultant_y = 0
for neighbour in self.neighbours:
n_pos = positions[neighbour[1]]
resultant_x += n_pos[0]
resultant_y += n_pos[1]
resultant_x /= num_neighbours
resultant_y /= num_neighbours
vc_x = self.cohesion_perception * (resultant_x - self.pos[0])
vc_y = self.cohesion_perception * (resultant_y - self.pos[1])
# print(f"cohesion,{vc_x:0.4f},{vc_y:0.4f}")
return [vc_x, vc_y]
def alignment(self, velocities):
"""
Function to implemen the boids alignment rule.
"""
num_neighbours = len(self.neighbours)
resultant_vx = 0
resultant_vy = 0
for neighbour in self.neighbours:
n_velo = velocities[neighbour[1]]
resultant_vx += n_velo[0]
resultant_vy += n_velo[1]
resultant_vx /= num_neighbours
resultant_vy /= num_neighbours
va_x = self.alignment_perception * resultant_vx
va_y = self.alignment_perception * resultant_vy
# print(f"alignment,{va_x:0.4f},{va_y:0.4f}")
return [va_x, va_y]
def wrap_world(self, world):
"""
Apply period boundary conditions, so if the boid goes off the edge
of the world it reappears on the opposite edge.
"""
if self.pos[0] < 0:
self.pos[0] = world.x_max + self.pos[0]
if self.pos[0] > world.x_max:
self.pos[0] = self.pos[0] - world.x_max
if self.pos[1] < 0:
self.pos[1] = world.y_max + self.pos[1]
if self.pos[1] > world.y_max:
self.pos[1] = self.pos[1] - world.y_max
def update_boid(self, positions, velocities, world):
"""
Function to apply all the boid rules to update the position and
velocity of a boid for a single time-step.
"""
self.restrict_fov(positions)
# print(f"current pos: {self.pos[0]:0.4f}, {self.pos[1]:0.4f}")
# print(f"current vel: {self.vel[0]:0.4f}, {self.vel[1]:0.4f}")
if len(self.neighbours) >= 1:
ali = self.alignment(velocities)
coh = self.cohesion(positions)
sep = self.separation(positions)
self.vel[0] += (coh[0] + ali[0] + sep[0])
self.vel[1] += (coh[1] + ali[1] + sep[1])
# curl = perpendicular(self.vel)
# self.vel = vector_add(self.vel, list_divide(curl, 20))
if sqrt(self.vel[0]**2 + self.vel[1]**2) > self.max_speed:
new_v = normalise(self.vel, self.max_speed)
self.vel = new_v
self.pos[0] += self.vel[0]
self.pos[1] += self.vel[1]
self.wrap_world(world)
# print(f"new pos: {self.pos[0]:0.4f}, {self.pos[1]:0.4f}")
# print(f"new vel: {self.vel[0]:0.4f}, {self.vel[1]:0.4f}")
# print("-"*32)
class Boids():
"""
A Class to store the full set of Boid Class objects, along with associated
functions on all boids.
"""
def __init__(self, number, world, options):
self.num = number
self.world = world
self.members = []
self.positions = []
self.velocities = []
self.triangulation = None
self.max_speed = options['max_speed']
def add_boid(self, new_boid):
self.members.append(new_boid)
def generate_boids(self, options, distribution='random'):
"""
Setup the inital positions and velocities of the boids.
Parameters
----------
options : dict
Dictionary of setup options.
distribution : TYPE, optional
Choose how the boids are initially distributed.
The default is 'random'. 'lattice' and 'lattice_with_noise' are
alternative options.
"""
if distribution == 'random':
positions = generate_values.random(self.num, self.world)
if distribution == 'lattice':
positions = generate_values.lattice(self.num, self.world)
if distribution == 'lattice_with_noise':
positions = generate_values.noisy_lattice(self.num, self.world)
velocities = generate_values.random_velocities(self.num, self.max_speed)
for i in range(self.num):
new_boid = Boid(i, positions[i], velocities[i], options)
self.add_boid(new_boid)
def get_pos_vel(self):
positions = []
velocities = []
for boid in self.members:
positions.append(boid.pos)
velocities.append(boid.vel)
self.positions = positions
self.velocities = velocities
def sort_boids(self):
"""
Perform a lexicographic sort on the boids by position.
"""
sorted_b = sorted(self.members, key=lambda b: [b.pos[0], b.pos[1]])
self.members = sorted_b
def triangulate_boids(self):
"""
Use the delauney_triangulation module to triangulate the set of boids.
"""
self.sort_boids()
self.get_pos_vel()
self.triangulation = triangulate(self.positions)
def setup_triangulate_boids(self):
"""
Setup the triangulation with actually performing the Delauney
triangulation algorithm. This is used for the MPI implementation
(in 'run_boids_mpi_cli.py) where there is a custom MPI triangulate
function.
"""
self.sort_boids()
self.get_pos_vel()
def make_neighbourhoods(self):
"""
Make neighbourhoods using the Delanunay triangulation module.
"""
points_seen = []
for edge in self.triangulation.edges:
if edge.org not in points_seen and not edge.deactivate:
connections = edge.find_connections(self.triangulation.edges)
self.members[edge.org].neighbours = connections
def make_neighbourhoods_basic(self, max_dist=5):
"""
Make neighbourhoods using the linear seach algorithm.
"""
for member in self.members:
member.neighbours = []
for i, pos in enumerate(self.positions):
diff_x = pos[0] - member.pos[0]
diff_y = pos[1] - member.pos[1]
distance = sqrt(diff_x**2 + diff_y**2)
if 0<distance<max_dist:
# print(i, member.pos, pos)
member.neighbours.append([member.index, i])
| 37.117302 | 111 | 0.544758 | 11,524 | 0.910484 | 0 | 0 | 0 | 0 | 0 | 0 | 3,736 | 0.295173 |
2fb45227a52d47d6e4bff6091b0b18ceef4fe544 | 2,904 | py | Python | chateau/session/session.py | softwerks/chateau | 6218dd623b21298a29f255e9c0d88d69651796e0 | [
"Apache-2.0"
] | null | null | null | chateau/session/session.py | softwerks/chateau | 6218dd623b21298a29f255e9c0d88d69651796e0 | [
"Apache-2.0"
] | null | null | null | chateau/session/session.py | softwerks/chateau | 6218dd623b21298a29f255e9c0d88d69651796e0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Softwerks LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import time
from typing import List, Optional
import flask
import redis.client
@dataclasses.dataclass
class Session:
"""Session data persisted between requests."""
address: str
authenticated: bool = dataclasses.field(init=False)
created: str
id_: str = dataclasses.field(init=False)
last_seen: str
token: str
user_agent: str
game_id: Optional[str] = None
feedback: Optional[str] = None
time_zone: Optional[str] = None
user_id: Optional[str] = None
def __post_init__(self) -> None:
if self.user_id is not None:
self.authenticated = True
self.id_ = self.user_id
else:
self.authenticated = False
self.id_ = self.token
if self.authenticated:
self.game_id = flask.g.redis.hget("games", self.id_)
self.last_seen = str(time.time())
flask.g.redis.hset(f"session:{self.token}", "last_seen", self.last_seen)
def all(self) -> List["Session"]:
"""Return a list of the user's sessions."""
if self.authenticated:
return [
Session(token=token, **flask.g.redis.hgetall(f"session:{token}"))
for token in flask.g.redis.smembers(f"user:sessions:{self.user_id}")
]
else:
return [self]
def update_feedback_timestamp(self) -> str:
self.feedback = str(time.time())
flask.g.redis.hset(f"session:{self.token}", "feedback", self.feedback)
return self.feedback
def delete_all(self) -> None:
"""Delete all of the user's sessions."""
if self.authenticated:
pipeline: redis.client.Pipeline = flask.g.redis.pipeline()
for token in flask.g.redis.smembers(f"user:sessions:{self.user_id}"):
pipeline.delete(f"session:{token}")
pipeline.delete(f"user:sessions:{self.user_id}")
pipeline.execute()
flask.session.clear()
else:
self.delete()
def delete(self) -> None:
"""Delete the session (log out)."""
if self.authenticated and self.user_id is not None:
flask.g.redis.srem(f"user:sessions:{self.user_id}", self.token)
flask.g.redis.delete(f"session:{self.token}")
flask.session.clear()
| 31.225806 | 84 | 0.635331 | 2,201 | 0.75792 | 0 | 0 | 2,224 | 0.76584 | 0 | 0 | 994 | 0.342287 |
2fb4bdf20e4a003ba2d5d20ee0a6939756b1f837 | 19,736 | py | Python | RDS/Service.py | Sciebo-RDS/py-research-data-services-common | a699d4241055fac226cf60e6146e4cbf96936a5c | [
"MIT"
] | null | null | null | RDS/Service.py | Sciebo-RDS/py-research-data-services-common | a699d4241055fac226cf60e6146e4cbf96936a5c | [
"MIT"
] | null | null | null | RDS/Service.py | Sciebo-RDS/py-research-data-services-common | a699d4241055fac226cf60e6146e4cbf96936a5c | [
"MIT"
] | null | null | null | from .Token import Token, OAuth2Token
from .User import User
from .Informations import LoginMode, FileTransferMode, FileTransferArchive
from urllib.parse import urlparse, urlunparse
import requests
import json
from datetime import datetime, timedelta
from typing import Union
import logging
import mimetypes
import base64
import os.path
from io import IOBase
from urllib import parse
logger = logging.getLogger()
def initService(obj: Union[str, dict]):
"""
Returns a Service or oauthService object for json String or dict.
"""
if isinstance(obj, (LoginService, OAuth2Service)):
return obj
if not isinstance(obj, (str, dict)):
raise ValueError("Given object not from type str or dict.")
from RDS.Util import try_function_on_dict
load = try_function_on_dict(
[
OAuth2Service.from_json,
OAuth2Service.from_dict,
LoginService.from_json,
LoginService.from_dict,
BaseService.from_json,
BaseService.from_dict
]
)
return load(obj)
class BaseService:
"""
Represents a service, which can be used in RDS.
"""
_servicename = None
_implements = None
_fileTransferMode = None
_fileTransferArchive = None
_description = None
_icon = None
_infoUrl = None
_helpUrl = None
_displayName = None
def __init__(
self,
servicename: str,
implements: list = None,
fileTransferMode: FileTransferMode = FileTransferMode.active,
fileTransferArchive: FileTransferArchive = FileTransferArchive.none,
description: dict = None,
icon: str = "",
infoUrl: str = "",
helpUrl: str = "",
displayName: str = None
):
"""Initialize Service without any authentication.
Args:
servicename (str): The name of the service, which will be registered. Must be unique.
implements (list, optional): Specified the implemented port endpoints. Defaults to empty list.
fileTransferMode (int, optional): Set the mode for transfering files. Defaults to 0=active. Alternative is 1=passive.
fileTransferArchive (str, optional): Set the archive, which is needed for transfering files. Defaults to "". Other value is "zip"
description (dict, optional): Set a short description for this service with corresponding language. Defaults to {"en":""}.
icon: (str, optional): Takes a filepath, so the mimetype and base64 can be calculated for later usage. Defaults to "".
infoUrl: (str, optional): Set the infoUrl for this service, so the user can be redirected to it to find more information about the service. Defaults to "".
helpUrl: (str, optional): Set the helpUrl for this service, so the user can be redirected to a helpdesk page about this service. Defaults to "".
displayName: (str, optional): Set the displayName for this service, which can be different as the servicename. Servicename will be used for identifiers. Defaults to "".
"""
self.check_string(servicename, "servicename")
self._servicename = servicename.lower()
if description is None:
self._description = {"en": ""}
else:
self._description = description
if infoUrl is not None:
if parse.unquote_plus(infoUrl) == infoUrl:
self._infoUrl = parse.quote_plus(infoUrl)
else:
self._infoUrl = infoUrl
else:
self._infoUrl = ""
if helpUrl is not None:
if parse.unquote_plus(helpUrl) == helpUrl:
self._helpUrl = parse.quote_plus(helpUrl)
else:
self._helpUrl = helpUrl
else:
self._helpUrl = ""
if displayName is not None:
self._displayName = displayName
else:
self._displayName = ""
if icon is not None and icon != "":
if isinstance(icon, (str)) and str(icon).startswith("data:"):
self._icon = icon
elif os.path.isfile(icon):
mime = mimetypes.guess_type(icon)[0]
with open(icon, "rb") as f:
b64 = base64.b64encode(f.read()).decode("utf-8")
self._icon = f"data:{mime};base64,{b64}"
else:
raise FileNotFoundError
self._implements = implements
if implements is None:
self._implements = []
valid_implements = ["fileStorage", "metadata"]
if len(self._implements) == 0 or len(self._implements) > 2:
raise ValueError(
"implements is empty or over 2 elements. Value: {}, Only valid: {}".format(
len(self._implements), valid_implements)
)
for impl in self._implements:
if impl not in valid_implements:
raise ValueError("implements holds an invalid value: {}. Only valid: {}".format(
impl, valid_implements))
self._fileTransferMode = fileTransferMode
self._fileTransferArchive = fileTransferArchive
@property
def servicename(self):
return self._servicename
@property
def fileTransferMode(self):
return self._fileTransferMode
@property
def fileTransferArchive(self):
return self._fileTransferArchive
@property
def description(self):
return self._description
@property
def icon(self):
return self._icon
@property
def infoUrl(self):
return self._infoUrl
@property
def helpUrl(self):
return self._helpUrl
@property
def displayName(self):
return self._displayName
@property
def implements(self):
return self._implements
def check_string(self, obj: str, string: str):
if not obj:
raise ValueError(f"{string} cannot be an empty string.")
def is_valid(self, token: Token, user: User):
pass
def __eq__(self, obj):
try:
return self.servicename == obj.servicename
except:
return False
def __str__(self):
return json.dumps(self)
def __repr__(self):
return json.dumps(self.to_dict())
def to_json(self):
"""
Returns this object as a json string.
"""
data = {"type": self.__class__.__name__, "data": self.to_dict()}
return data
def to_dict(self):
"""
Returns this object as a dict.
"""
data = {
"servicename": self._servicename,
"implements": self._implements,
"fileTransferMode": self.fileTransferMode.value,
"fileTransferArchive": self.fileTransferArchive.value,
"description": self._description,
"icon": self._icon,
"infoUrl": self._infoUrl,
"helpUrl": self._helpUrl,
"displayName": self._displayName
}
return data
@classmethod
def from_json(cls, serviceStr: str):
"""
Returns an service object from a json string.
"""
data = serviceStr
while (
type(data) is not dict
): # FIX for bug: JSON.loads sometimes returns a string
data = json.loads(data)
if "type" in data and str(data["type"]).endswith("Service") and "data" in data:
data = data["data"]
return BaseService(
servicename=data["servicename"],
implements=data.get("implements"),
fileTransferMode=FileTransferMode(
data.get("fileTransferMode", 0)),
fileTransferArchive=FileTransferArchive(
data.get("fileTransferArchive", 0)),
description=data.get("description"),
icon=data.get("icon"),
infoUrl=data.get("infoUrl"),
helpUrl=data.get("helpUrl"),
displayName=data.get("displayName")
)
raise ValueError("not a valid service json string.")
@classmethod
def from_dict(cls, serviceDict: dict):
"""
Returns an service object from a dict string.
"""
try:
return BaseService(
servicename=serviceDict["servicename"],
implements=serviceDict.get("implements", ["metadata"]),
fileTransferMode=FileTransferMode(
serviceDict.get("fileTransferMode", 0)),
fileTransferArchive=FileTransferArchive(
serviceDict.get("fileTransferArchive", 0)),
description=serviceDict.get("description"),
icon=serviceDict.get("icon"),
infoUrl=serviceDict.get("infoUrl"),
helpUrl=serviceDict.get("helpUrl"),
displayName=serviceDict.get("displayName")
)
except Exception as e:
logger.error(e, exc_info=True)
raise ValueError("not a valid service dict for class {}".format(
cls.__class__))
class LoginService(BaseService):
_userId = None
_password = None
def __init__(
self,
userId: bool = True,
password: bool = True,
*args, **kwargs
):
"""Initialize Service with username:password authentication.
Args:
userId (bool, optional): Set True, if username is needed to work. Defaults to True.
password (bool, optional): Set True, if password is needed to work. Defaults to True.
"""
super().__init__(*args, **kwargs)
self._userId = userId
self._password = password
@property
def userId(self):
return self._userId
@property
def password(self):
return self._password
def to_json(self):
"""
Returns this object as a json string.
"""
data = super().to_json()
data["type"] = self.__class__.__name__
data["data"].update(self.to_dict())
return data
def to_dict(self):
"""
Returns this object as a dict.
"""
data = super().to_dict()
data["credentials"] = {
"userId": self.userId, "password": self.password
}
return data
@classmethod
def from_json(cls, serviceStr: str):
"""
Returns an oauthservice object from a json string.
"""
data = serviceStr
while (
type(data) is not dict
): # FIX for bug: JSON.loads sometimes returns a string
data = json.loads(data)
service = super().from_json(serviceStr)
try:
data = data["data"]
cred = data.get("credentials", {})
return cls.from_service(
service,
cred.get("userId", True),
cred.get("password", True)
)
except:
raise ValueError("not a valid oauthservice json string.")
@classmethod
def from_dict(cls, serviceDict: dict):
"""
Returns an oauthservice object from a dict.
"""
service = super().from_dict(serviceDict)
try:
cred = serviceDict.get("credentials", {})
return cls.from_service(
service,
cred.get("userId", True),
cred.get("password", True)
)
except:
raise ValueError("not a valid loginservice dict.")
@classmethod
def from_service(
cls,
service: BaseService,
userId: bool,
password: bool
):
return cls(
userId=userId,
password=password,
servicename=service.servicename,
implements=service.implements,
fileTransferMode=service.fileTransferMode,
fileTransferArchive=service.fileTransferArchive,
description=service.description,
icon=service.icon,
infoUrl=service.infoUrl,
helpUrl=service.helpUrl,
displayName=service.displayName,
)
class OAuth2Service(BaseService):
"""
Represents an OAuth2 service, which can be used in RDS.
This service enables the oauth2 workflow.
"""
_authorize_url = None
_refresh_url = None
_client_id = None
_client_secret = None
def __init__(
self,
authorize_url: str = "",
refresh_url: str = "",
client_id: str = "",
client_secret: str = "",
*args, **kwargs
):
"""Initialize a service for oauth2.
Args:
authorize_url (str, optional): The authorize url from oauth2 workflow. Defaults to "".
refresh_url (str, optional): The refresh url from oauth2 workflow. Defaults to "".
client_id (str, optional): The client id from oauth2 workflow. Defaults to "".
client_secret (str, optional): The client secret from oauth2 workflow. Defaults to "".
"""
super().__init__(*args, **kwargs)
self.check_string(authorize_url, "authorize_url")
self.check_string(refresh_url, "refresh_url")
self.check_string(client_id, "client_id")
self.check_string(client_secret, "client_secret")
self._authorize_url = self.parse_url(authorize_url)
self._refresh_url = self.parse_url(refresh_url)
self._client_id = client_id
self._client_secret = client_secret
def parse_url(self, url: str):
u = urlparse(url)
if not u.netloc:
raise ValueError("URL needs a protocoll")
# check for trailing slash for url
if u.path and u.path[-1] == "/":
u = u._replace(path=u.path[:-1])
return u
def refresh(self, token: OAuth2Token):
"""
Refresh the given oauth2 token for specified user.
"""
if not isinstance(token, OAuth2Token):
logger.debug("call refresh on non oauth token.")
raise ValueError("parameter token is not an oauthtoken.")
import os
data = {
"grant_type": "refresh_token",
"refresh_token": token.refresh_token,
"redirect_uri": "{}".format(
os.getenv("RDS_OAUTH_REDIRECT_URI",
"http://localhost:8080/redirect")
),
"client_id": self.client_id,
"client_secret": self.client_secret,
}
logger.debug(f"send data {data}")
req = requests.post(
self.refresh_url,
data=data,
auth=(self.client_id, self.client_secret),
verify=(os.environ.get("VERIFY_SSL", "True") == "True"),
)
logger.debug(f"status code: {req.status_code}")
if req.status_code >= 400:
data = json.loads(req.text)
if "error" in data:
error_type = data["error"]
if error_type == "invalid_request":
from RDS.ServiceException import OAuth2InvalidRequestError
raise OAuth2InvalidRequestError()
elif error_type == "invalid_client":
from RDS.ServiceException import OAuth2InvalidClientError
raise OAuth2InvalidClientError()
elif error_type == "invalid_grant":
from RDS.ServiceException import OAuth2InvalidGrantError
raise OAuth2InvalidGrantError()
elif error_type == "unauthorized_client":
from RDS.ServiceException import OAuth2UnauthorizedClient
raise OAuth2UnauthorizedClient()
elif error_type == "unsupported_grant_type":
from RDS.ServiceException import OAuth2UnsupportedGrantType
raise OAuth2UnsupportedGrantType()
from RDS.ServiceException import OAuth2UnsuccessfulResponseError
raise OAuth2UnsuccessfulResponseError()
data = json.loads(req.text)
logger.debug(f"response data {data}")
exp_date = data["expires_in"]
if exp_date > 3600:
exp_date = 3600
date = datetime.now() + timedelta(seconds=exp_date)
new_token = OAuth2Token(
token.user,
self,
data["access_token"],
data.get("refresh_token", token.refresh_token),
date,
)
logger.debug(f"new token {new_token}")
return new_token
@property
def refresh_url(self):
return urlunparse(self._refresh_url)
@property
def authorize_url(self):
return urlunparse(self._authorize_url)
@property
def client_id(self):
return self._client_id
@property
def client_secret(self):
return self._client_secret
@classmethod
def from_service(
cls,
service: BaseService,
authorize_url: str,
refresh_url: str,
client_id: str,
client_secret: str,
):
"""
Converts the given Service to an oauth2service.
"""
return cls(
authorize_url=authorize_url,
refresh_url=refresh_url,
client_id=client_id,
client_secret=client_secret,
servicename=service.servicename,
implements=service.implements,
fileTransferMode=service.fileTransferMode,
fileTransferArchive=service.fileTransferArchive,
description=service.description,
icon=service.icon,
infoUrl=service.infoUrl,
helpUrl=service.helpUrl,
displayName=service.displayName,
)
def __eq__(self, obj):
return super().__eq__(obj)
def to_json(self):
"""
Returns this object as a json string.
"""
data = super().to_json()
data["type"] = self.__class__.__name__
data["data"].update(self.to_dict())
return data
def to_dict(self):
"""
Returns this object as a dict.
"""
data = super().to_dict()
data["authorize_url"] = self.authorize_url
data["refresh_url"] = self.refresh_url
data["client_id"] = self._client_id
data["client_secret"] = self._client_secret
return data
@classmethod
def from_json(cls, serviceStr: str):
"""
Returns an oauthservice object from a json string.
"""
data = serviceStr
while (
type(data) is not dict
): # FIX for bug: JSON.loads sometimes returns a string
data = json.loads(data)
service = super().from_json(serviceStr)
try:
data = data["data"]
return cls.from_service(
service,
data["authorize_url"],
data["refresh_url"],
data["client_id"],
data.get("client_secret", ""),
)
except:
raise ValueError("not a valid oauthservice json string.")
@classmethod
def from_dict(cls, serviceDict: dict):
"""
Returns an oauthservice object from a dict.
"""
service = super().from_dict(serviceDict)
try:
return cls.from_service(
service,
serviceDict["authorize_url"],
serviceDict["refresh_url"],
serviceDict["client_id"],
serviceDict.get("client_secret", ""),
)
except:
raise ValueError("not a valid oauthservice dict.")
| 30.223583 | 180 | 0.576054 | 18,658 | 0.945379 | 0 | 0 | 7,067 | 0.358077 | 0 | 0 | 5,196 | 0.263275 |
2fb59f7dc9cf3f57bdfd77504fb5c1199e6cd587 | 20,166 | py | Python | mBusi/serializers.py | hellohufan/beautyServer | 8a5109881b7065fd742f0a330b142248a7cdcef6 | [
"BSD-2-Clause"
] | null | null | null | mBusi/serializers.py | hellohufan/beautyServer | 8a5109881b7065fd742f0a330b142248a7cdcef6 | [
"BSD-2-Clause"
] | 5 | 2020-06-06T01:11:25.000Z | 2021-09-08T02:02:34.000Z | mBusi/serializers.py | hellohufan/beautyServer | 8a5109881b7065fd742f0a330b142248a7cdcef6 | [
"BSD-2-Clause"
] | null | null | null | from rest_framework import serializers
from ic_shop import models as model
from django.contrib.auth import get_user_model
from mBusi import settings as setting
from django.http import request
User = get_user_model()
class LoginSerializer(serializers.ModelSerializer):
username = serializers.CharField(required=False, max_length=1024)
password = serializers.CharField(required=False, max_length=1024)
class Meta:
model = User
fields = ('id', 'username', 'password', 'nickname')
class ProfileTypeSerializer(serializers.ModelSerializer):
class Meta:
model = model.ProfileType
fields = '__all__'
class ProfileSerializer(serializers.ModelSerializer):
username = serializers.CharField(help_text='手机账户名', default='')
password = serializers.CharField(help_text='密码', write_only=True, default='')
sex = serializers.CharField(help_text='性别', allow_null=True, allow_blank=True, required=False)
birthDate = serializers.CharField(help_text='出生日期', allow_null=True, allow_blank=True, required=False)
avatar = serializers.FileField(help_text='头像', allow_null=True, required=False)
nickName = serializers.CharField(help_text='头像', allow_null=True, allow_blank=True, required=False)
mobile = serializers.CharField(help_text='手机号', allow_null=False, allow_blank=False, required=True,
error_messages={
'required': '需填写手机号'})
company_id = serializers.CharField(help_text='公司名称', allow_null=True, allow_blank=True, required=False)
class Meta:
model = model.Profile
fields = 'id', 'username', 'password', 'sex', 'birthDate', 'is_active', 'first_name', 'last_name', \
'email', 'nickName', 'mobile', 'avatar', 'last_login', 'company_id'
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
def update(self, instance, validated_data):
for attr, value in validated_data.items():
if attr == 'password':
instance.set_password(value)
else:
setattr(instance, attr, value)
instance.save()
return instance
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = model.Tag
fields = '__all__'
class ProfileAvatarSerializer(serializers.ModelSerializer):
class Meta:
model = model.ProfileAvatar
fields = '__all__'
class DeviceTypeSerializer(serializers.ModelSerializer):
class Meta:
model = model.DeviceType
fields = '__all__'
class BrandSerializer(serializers.ModelSerializer):
class Meta:
model = model.Brand
fields = 'id', 'title', 'status', 'description'
class ShopItemSerializer(serializers.ModelSerializer):
brand_id = serializers.CharField(help_text='brand_id', required=False)
brandName = serializers.SerializerMethodField('get_shop_item_brand', read_only=True)
# categoryName = serializers.SerializerMethodField('Get_shopItemCategory', read_only=True)
file_prefix = serializers.SerializerMethodField('get_host_url', read_only=True)
# tag_id = serializers.CharField(help_text='tag_id', required=False, read_only=True)
# storage = ShopItemStorageSerializer(many=True, source='shopItems', required=False, read_only=True)
# img = serializers.FileField(help_text='img file', required=False)
class Meta:
model = model.ShopItem
fields = '__all__'
def get_host_url(self, obj):
request = self.context.get('request', None)
if request is not None:
return request.build_absolute_uri('/').strip('/')
return ''
@staticmethod
def get_shop_item_brand(self, obj):
# shopItem = list(model.ShopItem.objects.filter(deviceSlot=obj.id).values())
shop_item_brand = model.Brand.objects.get(id=obj.brand_id)
print(shop_item_brand.title)
if shop_item_brand:
return shop_item_brand.title
return ''
# def Get_shopItemCategory(self, obj):
# shopItemCategory = model.ShopItemCategory.objects.get(id=obj.category_id)
# # print(shopItemCategory)
# if shopItemCategory:
# return shopItemCategory.name
# return ''
# def Get_deviceSlot(self, obj):
# shopItemStorage = list(model.ShopItemStorage.objects.filter(shopItem=obj.id).values())
# # print(deviceSlot)
# if len(shopItemStorage) > 0:
# deviceSlot = model.DeviceSlot.objects.filter(id=shopItemStorage[0]['deviceSlot_id']).values()
# if len(deviceSlot) > 0:
# return deviceSlot[0]
# return ''
#
# return ''
# class DeviceSlotSerializer(serializers.ModelSerializer):
# # shopItem = ShopItemSerializer(read_only=True, many=True)
# device_id = serializers.CharField(help_text='device_id', required=True)
# max_capacity = serializers.CharField(help_text='max_capacity', required=True)
# status = serializers.CharField(help_text='-0- 未激活, 1 - 激活, 2 - 过期, 3 - 已经被占用, 4, - 故障', required=False)
# faultCode = serializers.CharField(help_text='1:正常 , 2:送料电机故障,3:顶出的电机故障,4、电动门电磁阀故障', required=False)
#
# currentStorage = serializers.SerializerMethodField('Get_currentStorage', read_only=True)
# shopItem = serializers.SerializerMethodField('Get_shopItem', read_only=True)
# updateVersion = serializers.SerializerMethodField('Get_updateVersion', read_only=True)
#
# # deviceSlot = serializers.SerializerMethodField('Get_deviceSlot', read_only=True)
# # shopItem = ShopItemSerializer(read_only=True, many=True)
#
# class Meta:
# model = model.DeviceSlot
# fields = 'id', 'device_id', 'max_capacity', 'slotNum', 'status', 'faultCode', 'shopItem', 'currentStorage', 'updateVersion'
#
# # # # 获取many to many 相关联对象model的单一field,
# def Get_shopItem(self, obj):
# # shopItem = list(model.ShopItem.objects.filter(deviceSlot=obj.id).values())
# shopItem = list(model.ShopItem.objects.filter(deviceSlot=obj.id).values())
#
# if len(shopItem) > 0:
# # category = model.ShopItemCategory.objects.get(id=shopItem[0]['category_id'])
# brand = model.Brand.objects.get(id=shopItem[0]['brand_id'])
# # shopItem[0]['categoryName'] = category.name
# shopItem[0]['brandName'] = brand.title
#
# url = shopItem[0]['img']
# request = self.context.get('request', None)
# if request is not None:
# shopItem[0]['img'] = request.build_absolute_uri('/').strip('/') + setting.MEDIA_URL + url
# return shopItem
#
# return {}
#
# # 获取many to many 相关联对象model的单一field,
# def Get_currentStorage(self, obj):
# shopItemStorage = list(model.ShopItemStorage.objects.filter(deviceSlot=obj.id).values())
# # print(shopItemStorage)
# if len(shopItemStorage) > 0:
# return shopItemStorage[0]['currentStorage']
#
# return 0
#
# def Get_updateVersion(self, obj):
# try:
# device = model.Device.objects.get(id=obj.device_id)
# return device.updateVersion
# except model.Device.DoesNotExist:
# return -1
# class ShopItemStorageSerializer(serializers.ModelSerializer):
# currentStorage = serializers.CharField(help_text='当前存货', required=True)
# deviceSlot_id = serializers.CharField(help_text='deviceSlot_id', required=True)
# shopItem_id = serializers.CharField(help_text='shopItem_id', required=True)
# new_shopItem_id = serializers.CharField(help_text='如果需要更改卡槽的商品,添加此字段', required=False, write_only=True)
#
# class Meta:
# # depth = 3
# model = model.ShopItemStorage
# fields = 'currentStorage', 'deviceSlot_id', 'shopItem_id', 'new_shopItem_id'
# class ShopItemStorageHistorySerializer(serializers.ModelSerializer):
# shopItem = serializers.SerializerMethodField('Get_shopItem', read_only=True)
# pre_shopItem = serializers.SerializerMethodField('Get_preShopItem', read_only=True)
#
# class Meta:
# model = model.ShopItemStorageHistory
# fields = '__all__'
#
# def Get_shopItem(self, obj):
# shopItem = list(model.ShopItem.objects.filter(id=obj.shopItem_id).values())
# if len(shopItem) > 0:
# url = shopItem[0]['img']
# request = self.context.get('request', None)
# if request is not None:
# shopItem[0]['img'] = request.build_absolute_uri('/').strip('/') + setting.MEDIA_URL + url
# return shopItem[0]
#
# return {}
#
# def Get_preShopItem(self, obj):
# shopItem = list(model.ShopItem.objects.filter(id=obj.pre_shopItem_id).values())
# if len(shopItem) > 0:
# url = shopItem[0]['img']
# request = self.context.get('request', None)
# if request is not None:
# shopItem[0]['img'] = request.build_absolute_uri('/').strip('/') + setting.MEDIA_URL + url
# return shopItem[0]
#
# return {}
# class DeviceAdsTypeSerializer(serializers.ModelSerializer):
# class Meta:
# model = model.DeviceAdsType
# fields = '__all__'
class DeviceMacStatusSerializer(serializers.ModelSerializer):
class Meta:
model = model.DeviceMacStatus
fields = '__all__'
class DeviceSerializer(serializers.ModelSerializer):
class Meta:
# depth = 1
model = model.Device
fields = '__all__'
name = serializers.CharField(help_text='name', required=True)
description = serializers.CharField(help_text='description', required=False)
updateVersion = serializers.FloatField(help_text='补货员更新存货-版本号', required=False, default='1.0')
status = serializers.CharField(help_text='0:未激活状态,1:正常运作,2:离线状态,3:报废', required=True, write_only=True)
# republishStatus = serializers.CharField(help_text='设备补货状态码,0:无需补货,1:待补货,2:亟待补货', required=True, write_only=True)
deviceType_id = serializers.CharField(help_text='deviceType_id', required=True, write_only=True)
company_id = serializers.CharField(help_text='company_id', required=True, write_only=True)
deviceLocation = serializers.SerializerMethodField('Get_deviceLocation', read_only=True)
deviceTypeName = serializers.SerializerMethodField('Get_deviceTypeName', read_only=True)
deviceCompany = serializers.SerializerMethodField('Get_deviceCompany', read_only=True)
# slotCapacity = serializers.SerializerMethodField('Get_totalSlotCapacity', read_only=True)
# totalShopItemCapacity = serializers.SerializerMethodField('Get_totalShopItemCapacity',read_only=True)
deviceStatus = serializers.SerializerMethodField('Get_deviceStatus', read_only=True)
# deviceSlotNum = serializers.SerializerMethodField('Get_totalSlotNumber', read_only=True)
def create(self, validated_data):
# print(validated_data.get('deviceType_id'))
device = model.Device.objects.create(name=validated_data.get('name'), updateVersion=validated_data.get('updateVersion'),
deviceType_id=validated_data.get('deviceType_id'),
appVersion=validated_data.get('appVersion'), androidVersion=validated_data.get('androidVersion'),
deviceVersion=validated_data.get('deviceVersion'),
deviceSn=validated_data.get('deviceSn'), company_id=validated_data.get('company_id'),
settingTemperature=validated_data.get('settingTemperature'),
temperature=validated_data.get('temperature'),
)
deviceMacStatus = model.DeviceMacStatus.objects.create(device_id=device.id, status=validated_data.get('status'),
republishStatus=validated_data.get('republishStatus'))
return device
@staticmethod
def Get_deviceStatus(obj):
try:
deviceStatus = model.DeviceMacStatus.objects.get(device=obj.id)
# print(shopItem)
if deviceStatus:
status = {}
status['status'] = deviceStatus.status
status['republishStatus'] = deviceStatus.republishStatus
status['temperatureStatus'] = deviceStatus.temperatureStatus
status['faultStatus'] = deviceStatus.faultStatus
return status
except model.DeviceMacStatus.DoesNotExist:
return {}
@staticmethod
def Get_deviceLocation(self, obj):
try:
deviceLocation = list(model.DeviceLocation.objects.filter(device=obj.id).values())
# print(shopItem)
if len(deviceLocation) > 0:
deviceLocation[0]['fullAddress'] = deviceLocation[0]['provinceName'] + deviceLocation[0]['cityName'] \
+ deviceLocation[0]['regionName'] + deviceLocation[0]['addressDetail']
return deviceLocation[0]
except model.DeviceLocation.DoesNotExist:
return ''
@staticmethod
def Get_deviceTypeName(self, obj):
try:
deviceType = model.DeviceType.objects.get(id=obj.deviceType_id)
# print(shopItem)
if deviceType:
return deviceType.name
except model.DeviceType.DoesNotExist:
return ''
def Get_deviceCompany(self, obj):
try:
deviceCompany = model.Company.objects.get(id=obj.company_id)
# print(shopItem)
if deviceCompany:
return deviceCompany.name
except model.Company.DoesNotExist:
return ''
# def Get_totalSlotNumber(self, obj):
# try:
# deviceSlot = model.DeviceSlot.objects.filter(device_id=obj.id)
# # print(shopItem)
# if len(deviceSlot) > 0:
# return deviceSlot.count()
# except model.DeviceSlot.DoesNotExist:
# return 0
def Get_totalSlotCapacity(self, obj):
try:
arr_slots = list(model.DeviceSlot.objects.filter(device=obj.id).values())
# print(shopItem)
maxCapacity = 0
currentShopItemStorage = 0
if len(arr_slots) > 0 and isinstance(arr_slots, list):
for item in arr_slots:
# storage = model.ShopItemStorage.objects.filter(deviceSlot_id=item['id'])
# print('storage: ' + str(len(storage)))
maxCapacity += int(item['max_capacity'])
# print(maxCapacity)
shopItemStorage = list(model.ShopItemStorage.objects.filter(deviceSlot_id=item['id']).values())
if len(shopItemStorage) > 0:
currentShopItemStorage += int(shopItemStorage[0]['currentStorage'])
return {
'maxSlotCapacity': maxCapacity,
'currentItemStorage': currentShopItemStorage,
}
except model.DeviceSlot.DoesNotExist:
return {}
class DeviceOperationCodeSerializer(serializers.ModelSerializer):
class Meta:
model = model.DeviceOperationCode
fields = '__all__'
class UserDeviceSerializer(serializers.ModelSerializer):
class Meta:
model = model.UserDevice
fields = '__all__'
class DeviceLocationSerializer(serializers.ModelSerializer):
device = DeviceSerializer(read_only=True, required=False)
fullAddress = serializers.SerializerMethodField('Get_deviceFullAddress',required=False, read_only=True)
class Meta:
# depth = 1
model = model.DeviceLocation
fields = '__all__'
def Get_deviceFullAddress(self, obj):
return obj.provinceName + obj.cityName + obj.regionName + obj.addressDetail
class DeviceAdsSerializer(serializers.ModelSerializer):
# device_id = serializers.CharField(help_text='device_id', required=True)
class Meta:
model = model.DeviceAds
fields = '__all__'
class DeviceLocationHistorySerializer(serializers.ModelSerializer):
# device_id = serializers.CharField(help_text='device_id', required=True)
class Meta:
model = model.DeviceLocationHistory
fields = '__all__'
class ImageSerializer(serializers.ModelSerializer):
class Meta:
model = model.ImageUploader
fields = '__all__'
class ItemOrderSerializer(serializers.ModelSerializer):
shopItem_id = serializers.CharField(help_text='shopItem_id', required=True)
deviceSlot_id = serializers.CharField(help_text='deviceSlot_id', required=True)
device_id = serializers.CharField(help_text='device_id', required=True)
company_id = serializers.CharField(help_text='company_id', required=True)
orderTitle = serializers.CharField(help_text='商品名称', required=True)
orderNum = serializers.CharField(help_text='商品订单号 - 无需填写,后台生成', required=False)
update_timestamp = serializers.CharField(help_text='订单生成当前时间戳 - 无需填写,后台生成', required=False)
orderStatus = serializers.CharField(help_text='订单状态', required=False, read_only=True)
totalCount = serializers.CharField(help_text='商品总数', required=True)
actualTotalCount = serializers.CharField(help_text='实际出货商品总数', required=False)
itemOrderDetail = serializers.SerializerMethodField('Get_itemOrderStatus', read_only=True)
class Meta:
model = model.ItemOrder
fields = 'orderTitle', 'totalCount', 'actualTotalCount', 'totalPrize', 'shopItem_id', 'deviceSlot_id', 'device_id', 'company_id', \
'orderNum', 'update_timestamp', 'orderStatus', 'itemOrderDetail'
def Get_itemOrderStatus(self, obj):
try:
orderStatus = model.ItemOrderStatus.objects.get(orderNum_id=obj.orderNum)
return{
'status': orderStatus.orderStatus,
'buyer_user_id': orderStatus.buyer_user_id,
'buyer_logon_id': orderStatus.buyer_logon_id,
'orderCompleteStatus': orderStatus.orderCompleteStatus,
}
except model.ItemOrderStatus.DoesNotExist:
return {}
class ItemOrderStatusSerializer(serializers.ModelSerializer):
class Meta:
model = model.ItemOrderStatus
fields = '__all__'
class CommentSerializer(serializers.ModelSerializer):
user_id = serializers.CharField(help_text='user_id', required=True)
shopItem_id = serializers.CharField(help_text='shopItem_id', required=True)
class Meta:
model = model.Comment
fields = 'title', 'description', 'user_id', 'shopItem_id'
class CompanyTypeSerializer(serializers.ModelSerializer):
class Meta:
model = model.CompanyType
fields = '__all__'
class CompanySerializer(serializers.ModelSerializer):
type_id = serializers.CharField(help_text='type_id, 公司类型ID', required=True)
leader_id = serializers.CharField(help_text='leader_id, 公司负责人ID', required=True)
file_prefix = serializers.SerializerMethodField('Get_hostUrl', read_only=True)
class Meta:
model = model.Company
fields = 'id', 'name', 'describe', 'address', 'leader_id', 'mobile', 'type_id', 'logo', 'file_prefix',\
'offic_acc_url', 'offic_acc_content'
def Get_hostUrl(self, obj):
request = self.context.get('request', None)
if request is not None:
return request.build_absolute_uri('/').strip('/')
return ''
class DeviceUpdatePackageSerializer(serializers.ModelSerializer):
class Meta:
model = model.CompanyDeviceUpdatePackage
fields = '__all__'
class AreaInfoRegionSerializer(serializers.ModelSerializer):
class Meta:
model = model.AreasInfo
fields = '__all__'
| 41.071283 | 142 | 0.656451 | 15,619 | 0.757248 | 0 | 0 | 1,807 | 0.087608 | 0 | 0 | 8,703 | 0.421943 |
2fb6a56a00127a671a07053f33f3506270e44752 | 2,024 | py | Python | Python/alexandrian_integers.py | chathu1996/hacktoberfest2020 | eaf64ac051709984cde916259e90cb24213b5c2f | [
"MIT"
] | 71 | 2020-10-06T05:53:59.000Z | 2021-11-27T03:14:42.000Z | Python/alexandrian_integers.py | chathu1996/hacktoberfest2020 | eaf64ac051709984cde916259e90cb24213b5c2f | [
"MIT"
] | 92 | 2020-10-05T19:18:14.000Z | 2021-10-09T04:35:16.000Z | Python/alexandrian_integers.py | chathu1996/hacktoberfest2020 | eaf64ac051709984cde916259e90cb24213b5c2f | [
"MIT"
] | 572 | 2020-10-05T20:11:28.000Z | 2021-10-10T16:28:29.000Z | import itertools
import sys
# The numbers are of the form p(p+d)(p+(p^2+1)/d), where d runs over divisors
# of p^2+1 and p runs over all positive integers.
class Problem():
def __init__(self):
self.bound = 100000
self.primes = []
self._sieve()
def _sieve(self):
visited = [False] * (self.bound + 1)
visited[0] = visited[1] = True
for i in range(2, self.bound + 1):
if not visited[i]:
self.primes.append(i)
for j in range(i + i, self.bound + 1, i):
visited[j] = True
print('Prime count:', len(self.primes))
def solve(self):
alexandrian_integers = set()
for p in range(1, self.bound + 1):
if p % 100 == 0:
print('Current', p)
divisors = self._get_all_divisors(p**2 + 1)
for d in divisors:
if d > p:
break
alexandrian_integers.add(p*(p+d)*(p+(p**2+1)//d))
alexandrian_integers = list(alexandrian_integers)
alexandrian_integers.sort()
print(alexandrian_integers[150000-1])
def _get_all_divisors(self, n):
rv = []
factors = self._factorize(n)
for x in itertools.product(*map(lambda c: [c ** i for i in range(factors[c] + 1)], factors)):
z = 1
for y in x:
z *= y
rv.append(z)
rv.sort()
return rv
def _factorize(self, n):
d = n
rv = {}
for i in range(len(self.primes)):
p = self.primes[i]
if d == 1 or p > d:
break
count = 0
while d % p == 0:
d = d//p
count += 1
if count > 0:
rv[p] = count
if d > 1:
rv[d] = 1
return rv
def main():
problem = Problem()
problem.solve()
if __name__ == '__main__':
sys.exit(main())
| 28.914286 | 101 | 0.464427 | 1,729 | 0.854249 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.08004 |
2fb7fc495d1ad6e7249191b7a040562727f395c7 | 10,737 | py | Python | src/myastro/orb_deter.py | benitocm/practical-astronomy | 4bfea9d5b2bb49997f35e8c7b1ada2708ee6c978 | [
"Apache-2.0"
] | null | null | null | src/myastro/orb_deter.py | benitocm/practical-astronomy | 4bfea9d5b2bb49997f35e8c7b1ada2708ee6c978 | [
"Apache-2.0"
] | null | null | null | src/myastro/orb_deter.py | benitocm/practical-astronomy | 4bfea9d5b2bb49997f35e8c7b1ada2708ee6c978 | [
"Apache-2.0"
] | null | null | null | """
This module contains functions related to orbit calculations
"""
# Standard library imports
from typing import Any,Dict,List,Tuple,Sequence
#https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html
from functools import partial
from math import isclose
import sys
from io import StringIO
# Third party imports
import pandas as pd
import numpy as np
from numpy import sin, cos, tan, arcsin, arccos, arctan2, arctan, sqrt,cosh,sinh,deg2rad,rad2deg, sqrt, arcsinh
from toolz import pipe, compose, first, valmap
from myastro.util import pow, k_gauss, GM, c_light
import myastro.coord as co
import myastro.timeutil as tc
from math import isclose, fmod
from myastro.coord import polarFcartesian, make_ra, make_lat, mtx_eclip_prec, mtx_eclipFequat, cartesianFpolar
from myastro.timeutil import PI, reduce_rad, TWOPI, PI_HALF, epochformat2jd, jd2mjd, MDJ_J2000, CENTURY, T_J2000, mjd2jd
from myastro.planets import g_xyz_equat_sun_j2000, g_rlb_eclip_sun_eqxdate
from myastro.gauss_lagrange_eq import solve_equation
from myastro.orbit import calc_orb_elms_from_2rs, ElemsData, find_eta, show_orbital_elements
from numpy.linalg import multi_dot, norm
from myastro.cluegen import Datum
from itertools import count
from myastro.log import get_logger
logger = get_logger(__file__.split('/')[-1])
def select (rho_a, rho_b, rho_c, n_sol, n ):
"""
Selection of a single solution rho of the Gauss-Lagrangian equation
Args:
rho_a : First solution
rho_b : Second solution
rho_c : Third solution
n_sol : Total number of solutions
n : Number of selected solution
Returns:
The selected soluton rho
"""
if (n < 1) or (n_sol < n):
logger.error(f"Error in select n={n} and n_sol = {n_sol}")
return
else :
if n == 1 :
return rho_a
elif n == 2 :
return rho_b
elif n == 3 :
return rho_c
else :
return 0.0
def retard (t0_mjds, rhos) :
"""
Light time correction and computation of time differences
Args:
t0_mjds : List of three time of observations (t1', t2', t3') in Modified Julian Date
rhos : np.array with 3 elements where each element is the geocentric distance in [AU]
Returns:
t_mjds : List of times of light emittance (t1, t2, t3) in Modified Julian Date
taus : np.array with 3 elements where each element is the scales time differences.
"""
t_mjds = [0.0, 0.0, 0.0]
for i in range(0,3) :
t_mjds[i] = t0_mjds[i] - rhos[i]/c_light
tau = np.zeros(3)
tau[0] = k_gauss * (t_mjds[2] - t_mjds[1])
tau[1] = k_gauss * (t_mjds[2] - t_mjds[0])
tau[2] = k_gauss * (t_mjds[1] - t_mjds[0])
return t_mjds, tau
def gauss_method(R_suns, es, t0_mjds, n_run, max_iter=100, epsilon=1e-10):
"""
Orbit determination using Gauss's method
Args:
R_suns : List of three sun positions vectors in ecliptic-cartesian coordinates
It is a numpy 3x3 where each row is a r vector, e.g. for ith row, R_sun[i,:]
e : List of three observation direction unit vectors. It is a numpy 3x3 where each row
a e vector , e.g. for ith row, e[i,:]
t0_mjds : List of three observation times (Modified Julian Day), It is a numpy of 3 where
each value is a time reference
Returns:
A tuple with:
n_sol : Number of solutions found so that the caller method can iterate.
ElemsData : A data class with the Orbital elements.
"""
# d vectors (pg 232)
ds = [np.cross(es[1],es[2]),
np.cross(es[2],es[0]),
np.cross(es[0],es[1])]
# D matrix
D = np.zeros((3,3))
for i in range(0,3) :
for j in range (0,3) :
D[i,j] = np.dot(ds[i], R_suns[j])
# Det
det = np.dot(es[2], ds[2])
# Direction cosine of observation unit vector with respect to the Sun
# direction at time of second observation
gamma = np.dot(es[1],R_suns[1])/norm(R_suns[1])
# Time differences tau[i] and initial approximations of mu[0] and mu[2]
tau = np.zeros(3)
tau[0] = k_gauss * (t0_mjds[2] - t0_mjds[1])
tau[1] = k_gauss * (t0_mjds[2] - t0_mjds[0])
tau[2] = k_gauss * (t0_mjds[1] - t0_mjds[0])
mu = np.zeros(3)
mu[0] = (1.0/6.0) * tau[0]*tau[2] * (1.0+tau[0]/tau[1])
mu[2] = (1.0/6.0) * tau[0]*tau[2] * (1.0+tau[2]/tau[1])
rho = np.zeros(3)
n0 = np.zeros(3)
n = np.zeros(3)
eta = np.zeros(3)
r = np.zeros((3,3))
for i in count(0) :
rho_old = rho[1]
#Determine geocentric distance rho at time of second observation
# from the Gauss-Lagrangian equation
n0[0] = tau[0]/tau[1]
n0[2] = tau[2]/tau[1]
L = - ( n0[0]*D[1,0]-D[1,1]+n0[2]*D[1,2] ) / det
l = ( mu[0]*D[1,0] + mu[2]*D[1,2] ) / det
rho_min, rho_mean, rho_max, n_sol = solve_equation(gamma, norm(R_suns[1]), L, l )
print(f" Iter:{i}, n_sol: {n_sol}, {rho_min} {rho_mean} {rho_max}")
rho[1] = select(rho_min, rho_mean, rho_max, n_sol, n_run)
r[1,:] = rho[1]*es[1] - R_suns[1]
# Compute n1 and n3
n[0] = n0[0] + mu[0]/pow(norm(r[1,:]),3)
n[2] = n0[2] + mu[2]/pow(norm(r[1,:]),3)
# Geocentric distances rho_1 and rho_3 from n_1 and n_3
rho[0] = ( n[0]*D[0,0] - D[0,1] + n[2]*D[0,2] ) / (n[0]*det)
rho[2] = ( n[0]*D[2,0] - D[2,1] + n[2]*D[2,2] ) / (n[2]*det)
# Apply light time corrections and compute scaled time differences
# Retard
t_mjds, tau = retard(t0_mjds, rho)
# Heliocentric coordinate vector
for j in range(3) :
r[j,:] = rho[j]*es[j] - R_suns[j]
# Sector/triangle ratios eta_i
eta[0] = find_eta ( r[1,:], r[2,:], tau[0] )
eta[1] = find_eta ( r[0,:], r[2,:], tau[1] )
eta[2] = find_eta ( r[0,:], r[1,:], tau[2] )
# Improved values of mu_1, mu_3
mu[0] = ( eta[1]/eta[0] - 1.0 ) * (tau[0]/tau[1]) * pow(norm(r[1,:]),3)
mu[2] = ( eta[1]/eta[2] - 1.0 ) * (tau[2]/tau[1]) * pow(norm(r[1,:]),3)
if isclose(rho[1], rho_old, abs_tol=epsilon) :
break
if i == max_iter :
logger.error(f"Not converged after {max_iter} iterations and epsilon {epsilon}")
return
# Because the distances has been calculated, they are printed
rows=[]
for j in range(3) :
row = {}
row['Geocentric rho [AU]'] = f"{rho[j]}"
row['Heliocentric r [AU]'] = f"{norm(r[j,:])}"
rows.append(row)
df = pd.DataFrame(rows)
df = df.T
df.columns=['1 Obs.','2 Obs.','3 Obs.']
print (df)
print ()
return n_sol, calc_orb_elms_from_2rs(GM, r[0,:], t_mjds[0], r[2,:], t_mjds[2])
EXP_DT = {
# "year" : object,
# "month" : object,
# "day" : object,
# "fh" : object,
"ra_h" : object,
"ra_m" : object,
"ra_s" : object,
"dec_dg" : object,
"dec_min" : object,
"dec_sec" : object }
def read_body_points(body_observs):
df = pd.read_csv(body_observs, sep="\s+", dtype= EXP_DT)
df['jd'] = df.apply(lambda row: tc.datetime2jd(row['year'], row['month'], row['day'], hour=row['fh']), axis=1)
df['mjd'] = df['jd'].map(tc.jd2mjd)
df['ra'] = df['ra_h']+'h'+df['ra_m']+'m'+df['ra_s']+'s'
df['ra'] = df['ra'].map(co.make_ra)
df['dec'] = df['dec_dg']+'°'+df['dec_min']+'m'+df['dec_sec']+'s'
df['dec'] = df['dec'].map(co.make_lat)
cols =['jd','mjd','ra','dec']
return df[cols].copy()
CERES_OBSERVS = StringIO("""year month day fh ra_h ra_m ra_s dec_dg dec_min dec_sec
1805 09 05 24.165 6 23 57.54 22 21 27.08
1806 01 17 22.095 6 45 14.69 30 21 24.20
1806 05 23 20.399 8 07 44.60 28 02 47.04""")
ORKISZ_OBSERVS = StringIO("""year month day fh ra_h ra_m ra_s dec_dg dec_min dec_sec
1925 04 05 2.786 22 26 43.51 16 37 16.00
1925 04 08 2.731 22 29 42.90 19 46 25.10
1925 04 11 2.614 22 32 55.00 23 04 52.30""")
def main (observs, T_eqx0=0, T_eqx=0):
"""
Print the obtital elements obtained from the observations
Args:
observs : Datafrane with the 3 observations
T_eqx0 : Equinox in centuries of the observations
T_eqx : Equinox in centuries when we need the prediction
Returns:
None
"""
MTX_Teqx0_Teqx = mtx_eclip_prec(T_eqx0,T_eqx)
g_rlb_eclip_bodys = []
t_jds = []
g_xyz_eclip_suns = []
df = read_body_points(observs)
for row in df.itertuples(index=False):
g_rlb_eclip_bodys.append(pipe(np.array([1,row.ra,row.dec]),
cartesianFpolar,
MTX_Teqx0_Teqx.dot(mtx_eclipFequat(T_eqx)).dot,
polarFcartesian))
t_jds.append(row.jd)
T = (row.mjd - MDJ_J2000)/CENTURY
g_xyz_eclip_suns.append( pipe (g_rlb_eclip_sun_eqxdate(mjd2jd(row.mjd), tofk5=True) ,
cartesianFpolar,
mtx_eclip_prec(T,T_eqx0).dot))
# Print observations
print (f"Julian Day Solar Longitud [deg] Body Longitude [deg] Body Latitude [deg] ")
for i in range(0,3):
print (f"{t_jds[i]} {rad2deg(polarFcartesian(g_xyz_eclip_suns[i])[1]):03.6f} {rad2deg(g_rlb_eclip_bodys[i][1])} {rad2deg(g_rlb_eclip_bodys[i][2])}")
t_mjds = [jd2mjd(t) for t in t_jds]
g_xyx_eclip_bodys = [cartesianFpolar(g_rlb) for g_rlb in g_rlb_eclip_bodys]
for n_run in count(1):
# In the run 1, the solution 1 will be used
# In the run 2, the solution 2 will be used (in case it exist)
# In the run 3, the solution 3 will be used (in case it exist)
n_sol, elems = gauss_method(g_xyz_eclip_suns, g_xyx_eclip_bodys, t_mjds, n_run)
show_orbital_elements(elems)
if n_run >= n_sol :
break
def ceres():
T_eqx0 = (1806 - 2000)/100.0
T_eqx = (1806 - 2000)/100.0
main (CERES_OBSERVS, T_eqx0, T_eqx)
def orkisz():
T_eqx0 = (1925 - 2000)/100.0
T_eqx = (1925 - 2000)/100.0
main (ORKISZ_OBSERVS, T_eqx0, T_eqx)
if __name__ == "__main__" :
#orkisz()
ceres()
| 35.203279 | 177 | 0.566173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,111 | 0.382846 |
2fb9281e34c7da3db73175b83cad1e4923b159c2 | 1,950 | py | Python | process.py | SuperMaxine/Gaze_Tracking_Exp | fbecc09bf084faa881d63d2f1bc196104941ffb5 | [
"MIT"
] | null | null | null | process.py | SuperMaxine/Gaze_Tracking_Exp | fbecc09bf084faa881d63d2f1bc196104941ffb5 | [
"MIT"
] | null | null | null | process.py | SuperMaxine/Gaze_Tracking_Exp | fbecc09bf084faa881d63d2f1bc196104941ffb5 | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
opencv实现人脸识别
参考:
1、https://github.com/opencv/opencv/tree/master/data/haarcascades
2、http://www.cnblogs.com/hanson1/p/7105265.html
"""
import cv2
def detect_face(image):
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
'''
# 获取人脸识别训练数据
对于人脸特征的一些描述,opencv在读取完数据后很据训练中的样品数据,
就可以感知读取到的图片上的特征,进而对图片进行人脸识别。
xml数据下载,
参考:https://github.com/opencv/opencv/tree/master/data/haarcascades
'''
face_cascade = cv2.CascadeClassifier(r'./haarcascade_frontalface_default.xml')
# 探测人脸
# 根据训练的数据来对新图片进行识别的过程。
faces = face_cascade.detectMultiScale(
gray,
scaleFactor=1.15,
minNeighbors=5,
minSize=(5, 5),
# flags = cv2.HAAR_SCALE_IMAGE
)
# 我们可以随意的指定里面参数的值,来达到不同精度下的识别。返回值就是opencv对图片的探测结果的体现。
# 处理人脸探测的结果
print("发现{0}个人脸!".format(len(faces)))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + w), (0, 255, 0), 2)
# cv2.circle(image,((x+x+w)/2,(y+y+h)/2),w/2,(0,255,0),2)
return image
# # 待检测的图片路径
# imagepath="nba.jpg"
#
# image = cv2.imread(imagepath)
# gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
#
#
# '''
# # 获取人脸识别训练数据
#
# 对于人脸特征的一些描述,opencv在读取完数据后很据训练中的样品数据,
# 就可以感知读取到的图片上的特征,进而对图片进行人脸识别。
# xml数据下载,
# 参考:https://github.com/opencv/opencv/tree/master/data/haarcascades
# '''
# face_cascade = cv2.CascadeClassifier(r'./haarcascade_frontalface_default.xml')
#
# # 探测人脸
# # 根据训练的数据来对新图片进行识别的过程。
# faces = face_cascade.detectMultiScale(
# gray,
# scaleFactor = 1.15,
# minNeighbors = 5,
# minSize = (5,5),
# #flags = cv2.HAAR_SCALE_IMAGE
# )
#
# # 我们可以随意的指定里面参数的值,来达到不同精度下的识别。返回值就是opencv对图片的探测结果的体现。
#
# # 处理人脸探测的结果
# print ("发现{0}个人脸!".format(len(faces)))
# for(x,y,w,h) in faces:
# cv2.rectangle(image,(x,y),(x+w,y+w),(0,255,0),2)
# # cv2.circle(image,((x+x+w)/2,(y+y+h)/2),w/2,(0,255,0),2)
#
# cv2.imshow("image",image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
| 22.941176 | 82 | 0.650769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,117 | 0.806784 |
2fb9ac82ed503a345c0e22abf61b359a4bddd5eb | 10,806 | py | Python | userbot/modules/warn.py | akborana/Devil | 30ef9c5ac910d6344e206921e343a0932ffd6460 | [
"MIT"
] | 1 | 2021-05-06T18:30:50.000Z | 2021-05-06T18:30:50.000Z | userbot/modules/warn.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null | userbot/modules/warn.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null | import re
import hashlib
import asyncio
import datetime
import logging
from userbot import CMD_HELP
import os
import math
import html
import os.path
import sys
import time
from typing import Tuple, Union
from userbot import bot
from telethon import errors
from telethon import events
from telethon.tl import types
from telethon.tl.functions.channels import (EditAdminRequest,
EditBannedRequest,
EditPhotoRequest)
from telethon.utils import get_display_name
from telethon.tl.functions.messages import GetPeerDialogsRequest
from telethon.tl.functions.channels import GetParticipantRequest
from telethon.tl.types import ChannelParticipantAdmin, ChannelParticipantCreator, ChatBannedRights
import userbot.modules.sql_helper.warns_sql as sql
from userbot.events import javes05
async def is_admin(chat_id, user_id):
req_jo = await bot(GetParticipantRequest(channel=chat_id,user_id=user_id))
chat_participant = req_jo.participant
if isinstance(chat_participant, ChannelParticipantCreator) or isinstance(chat_participant, ChannelParticipantAdmin):
return True
return False
MUTE_RIGHTS = ChatBannedRights(until_date=None, send_messages=True)
javes = bot
from userbot.events import rekcah05
@javes05(outgoing=True, pattern="^!warn(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
try:
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
warn_reason = event.pattern_match.group(1)
reply_message = await event.get_reply_message()
except:
return await event.edit("`Sorry canot warn users here`")
if not admin and not creator:
return await event.edit("`I have to be admin to warn people.`")
if await is_admin(event.chat_id, reply_message.sender.id):
return await event.edit("`I'm not going to warn an admin!`")
limit, soft_warn = sql.get_warn_setting(event.chat_id)
num_warns, reasons = sql.warn_user(reply_message.sender.id, event.chat_id, warn_reason)
if num_warns >= limit:
if soft_warn:
reply = "{} warnings, <u><a href='tg://user?id={}'>user</a></u> has been muted!".format(limit, reply_message.sender.id)
await event.client.edit_permissions(chat, reply_message.sender.id, until_date=None, send_messages=False)
else:
await event.client.edit_permissions(chat, reply_message.sender.id, until_date=None, view_messages=False)
reply = "{} warnings, <u><a href='tg://user?id={}'>user</a></u> has been banned!".format(limit, reply_message.sender.id)
else:
reply = "<u><a href='tg://user?id={}'>user</a></u> has {}/{} warnings... watch out!".format(reply_message.sender.id, num_warns, limit)
if warn_reason:
reply += "\nReason for last warn:\n{}".format(html.escape(warn_reason))
#
await event.edit(reply, parse_mode="html")
@javes.on(rekcah05(pattern=f"warn(?: |$)(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
try:
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
warn_reason = event.pattern_match.group(1)
reply_message = await event.get_reply_message()
except:
return await event.reply("`Sorry canot warn users here`")
if not admin and not creator:
return await event.reply("`I have to be admin to warn people.`")
if await is_admin(event.chat_id, reply_message.sender.id):
return await event.reply("`I'm not going to warn an admin!`")
limit, soft_warn = sql.get_warn_setting(event.chat_id)
num_warns, reasons = sql.warn_user(reply_message.sender.id, event.chat_id, warn_reason)
if num_warns >= limit:
if soft_warn:
reply = "{} warnings, <u><a href='tg://user?id={}'>user</a></u> has been muted!".format(limit, reply_message.sender.id)
await event.client.edit_permissions(chat, reply_message.sender.id, until_date=None, send_messages=False)
else:
await event.client.edit_permissions(chat, reply_message.sender.id, until_date=None, view_messages=False)
reply = "{} warnings, <u><a href='tg://user?id={}'>user</a></u> has been banned!".format(limit, reply_message.sender.id)
else:
reply = "<u><a href='tg://user?id={}'>user</a></u> has {}/{} warnings... watch out!".format(reply_message.sender.id, num_warns, limit)
if warn_reason:
reply += "\nReason for last warn:\n{}".format(html.escape(warn_reason))
#
await event.reply(reply, parse_mode="html")
@javes05(outgoing=True, pattern="^!warns(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
result = sql.get_warns(reply_message.sender.id, event.chat_id)
if result and result[0] != 0:
num_warns, reasons = result
limit, soft_warn = sql.get_warn_setting(event.chat_id)
if reasons:
text = "This user has {}/{} warnings, for the following reasons:".format(num_warns, limit)
text += "\r\n"
text += reasons
await event.edit(text)
else:
await event.edit("This user has {} / {} warning, but no reasons for any of them.".format(num_warns, limit))
else:
await event.edit("This user hasn't got any warnings!")
@javes.on(rekcah05(pattern=f"warns(?: |$)(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
result = sql.get_warns(reply_message.sender.id, event.chat_id)
if result and result[0] != 0:
num_warns, reasons = result
limit, soft_warn = sql.get_warn_setting(event.chat_id)
if reasons:
text = "This user has {}/{} warnings, for the following reasons:".format(num_warns, limit)
text += "\r\n"
text += reasons
await event.reply(text)
else:
await event.reply("This user has {} / {} warning, but no reasons for any of them.".format(num_warns, limit))
else:
await event.reply("This user hasn't got any warnings!")
@javes05(outgoing=True, pattern="^!setwarnmode(?: |$)(.*)")
async def set_warn_strength(event):
try:
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
args = event.pattern_match.group(1)
except:
return await event.edit("`Error`")
if args:
if args in ("ban"):
sql.set_warn_strength(event.chat_id, False)
await event.edit("Warn mode Set To Ban User.")
return
elif args in ("mute"):
sql.set_warn_strength(event.chat_id, True)
await event.edit("Warn mode Set To Kick User.")
return
else:
await event.edit("`Error usage !setwarnmode kick or mute`")
else:
limit, soft_warn = sql.get_warn_setting(event.chat_id)
if soft_warn:
await event.edit("I Am **muting** User's For Now.")
else:
await event.edit("I Am **Baning** User's For Now.")
return ""
@javes.on(rekcah05(pattern=f"setwarnmode(?: |$)(.*)", allow_sudo=True))
async def set_warn_strength(event):
try:
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
args = event.pattern_match.group(1)
except:
return await event.reply("`Error`")
if args:
if args in ("ban"):
sql.set_warn_strength(event.chat_id, False)
await event.reply("Warn mode Set To Ban User.")
return
elif args in ("mute"):
sql.set_warn_strength(event.chat_id, True)
await event.reply("warn mode Set To Kick User.")
return
else:
await event.reply("`Error usage !setwarnmode kick or mute`")
else:
limit, soft_warn = sql.get_warn_setting(event.chat_id)
if soft_warn:
await event.reply("I Am **muting** User's For Now.")
else:
await event.reply("I Am **Baning** User's For Now.")
return ""
@javes05(outgoing=True, pattern="^!setwarnlimit(?: |$)(.*)")
async def set_warn_limit(event):
try:
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
input_str = event.pattern_match.group(1)
except:
return await event.edit("`Error`")
if input_str:
if int(input_str) < 3:
await event.edit("`The minimum warn limit is 3!`")
else:
sql.set_warn_limit(event.chat_id, int(input_str))
await event.edit("`Updated the warn limit to` {}".format(input_str))
return
else:
limit, soft_warn = sql.get_warn_setting(event.chat_id)
await event.edit("`The current warn limit is {}`".format(limit))
return ""
@javes.on(rekcah05(pattern=f"setwarnlimit(?: |$)(.*)", allow_sudo=True))
async def set_warn_limit(event):
try:
chat = await event.get_chat()
admin = chat.admin_rights
creator = chat.creator
input_str = event.pattern_match.group(1)
except:
return await event.reply("`Error`")
if input_str:
if int(input_str) < 3:
await event.reply("`The minimum warn limit is 3!`")
else:
sql.set_warn_limit(event.chat_id, int(input_str))
await event.reply("`Updated the warn limit to` {}".format(input_str))
return
else:
limit, soft_warn = sql.get_warn_setting(event.chat_id)
await event.reply("`The current warn limit is {}`".format(limit))
return ""
@javes05(outgoing=True, pattern="^!resetwarns(?: |$)(.*)")
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
sql.reset_warns(reply_message.sender.id, event.chat_id)
await event.edit("Warnings have been reset!")
@javes.on(rekcah05(pattern=f"resetwarns(?: |$)(.*)", allow_sudo=True))
async def _(event):
if event.fwd_from:
return
reply_message = await event.get_reply_message()
sql.reset_warns(reply_message.sender.id, event.chat_id)
await event.reply("Warnings have been reset!")
CMD_HELP.update({
"warn":
"!warn\
\nUsage: Warn a user.\
\n\n!warns \
\nUsage: See a user's warnings.\
\n\n!setwarnmode <ban/mute>\
\nUsage: Set the chat's warn mode. \
\n\n!setwarnlimit <number>\
\nUsage: Set the number of warnings before users are punished. \
\n\n!resetwarns \
\nUsage: Reset all of a user's warnings to 0. \
\n\nAll commands support Sudo ( type !help sudo for more info)\
"
})
| 35.781457 | 142 | 0.638627 | 0 | 0 | 0 | 0 | 9,048 | 0.837313 | 8,730 | 0.807885 | 2,292 | 0.212104 |
2fbc2bb1fd50ab3ce270b80f068eb92420a9525e | 17,242 | py | Python | eis_pointing/eis_aia_registration.py | gpelouze/eis_pointing | 2ee714a2295bafae3492ab956792535336dd2a81 | [
"MIT"
] | 3 | 2019-04-01T09:35:01.000Z | 2021-12-14T15:39:40.000Z | eis_pointing/eis_aia_registration.py | gpelouze/eis_pointing | 2ee714a2295bafae3492ab956792535336dd2a81 | [
"MIT"
] | null | null | null | eis_pointing/eis_aia_registration.py | gpelouze/eis_pointing | 2ee714a2295bafae3492ab956792535336dd2a81 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import datetime
import os
import warnings
import numpy as np
import scipy.interpolate as si
import matplotlib as mpl
from matplotlib.backends import backend_pdf
import matplotlib.pyplot as plt
from .utils import aia_raster
from .utils import cli
from .utils import eis
from .utils import num
from .utils import plots
from . import coregister as cr
class OptPointingVerif(object):
def __init__(self,
verif_dir, eis_name, aia_band,
pointings,
raster_builder, eis_int,
titles, ranges, offsets, cross_correlations,
start_time, stop_time,
):
''' Build and save pointing verification data
Parameters
==========
verif_dir : str
eis_name : str
aia_band : int
pointings : list of eis.EISPointing
raster_builder : aia_raster.SyntheticRasterBuilder
eis_int : 2D array
titles : list of str
ranges : list
Items can be either 3-tuples of cr.tools.OffsetSet, or None.
offsets : list
Items can be either 3-tuples of floats, or arrays of shape (n, 3).
cross_correlations : list of arrays
start_time : datetime.datetime
stop_time : datetime.datetime
'''
self.verif_dir = verif_dir
self.eis_name = eis_name
self.aia_band = aia_band
self.pointings = pointings
self.raster_builder = raster_builder
self.eis_int = eis_int
self.titles = titles
self.ranges = ranges
self.offsets = offsets
self.cross_correlations = cross_correlations
self.start_time = start_time
self.stop_time = stop_time
self.rms = []
if not os.path.exists(self.verif_dir):
os.makedirs(self.verif_dir)
def save_all(self):
self.save_npz()
self.save_figures()
self.save_summary()
def save_npz(self):
''' Save cc, offset, and new coordinates '''
np.savez(
os.path.join(self.verif_dir, 'offsets.npz'),
offset=np.array(self.offsets, dtype=object),
cc=np.array(self.cross_correlations, dtype=object),
x=self.pointings[-1].x, y=self.pointings[-1].y,
)
def save_summary(self):
''' Print and save yaml summary '''
if not self.rms:
self.rms = [None] * (len(titles) + 1)
run_specs = [
('verif_dir', self.verif_dir),
('initial_rms', self.rms[0]),
('steps', self._repr_steps(
self.titles,
self.ranges,
self.offsets,
self.cross_correlations,
self.rms[1:],
indent=2)),
('exec_time', self.stop_time - self.start_time),
]
summary = ''
for spec in run_specs:
summary += self._repr_kv(*spec, indent=0)
print('\n---\n', summary, '...', sep='')
with open(os.path.join(self.verif_dir, 'summary.yml'), 'w') as f:
f.write(summary)
def _repr_offset(self, offset):
offset = list(offset)
offset[0], offset[1] = offset[1], offset[0]
return offset
def _repr_kv(self, name, value, indent=0, sep=': ', end='\n'):
form = '{:#.6g}'
if isinstance(value, (list, tuple)):
value = [form.format(v)
if np.issubdtype(type(v), (float, np.inexact))
else str(v)
for v in value]
value = '[' + ', '.join(value) + ']'
if value is None:
value = 'null'
elif np.issubdtype(type(value), (float, np.inexact)):
value = form.format(value)
else:
value = str(value)
string = ''.join([indent * ' ', name, sep, str(value), end])
return string
def _repr_steps(self, titles, all_ranges, offsets, ccs, rmss, indent=0):
indent += 2
ret = '\n'
for name, ranges, offset, cc, rms in \
zip(titles, all_ranges, offsets, ccs, rmss):
ret += ' '*(indent-2) + '- '
ret += self._repr_kv('name', name, indent=0)
if ranges:
ry, rx, ra = ranges
ret += self._repr_kv('range_x', rx, indent=indent)
ret += self._repr_kv('range_y', ry, indent=indent)
ret += self._repr_kv('range_a', ra, indent=indent)
if len(offset) <= 3:
ret += self._repr_kv('offset', self._repr_offset(offset), indent=indent)
ret += self._repr_kv('cc_max', np.nanmax(cc), indent=indent)
if rms is not None:
ret += self._repr_kv('rms', rms, indent=indent)
if ret[-1] == '\n':
ret = ret[:-1]
return ret
def save_figures(self):
''' plot alignment results '''
diff_norm = mpl.colors.Normalize(vmin=-3, vmax=+3)
n_pointings = len(self.pointings)
for i, pointing in enumerate(self.pointings):
name = 'step_{}'.format(i)
if i == 0:
name += '_original'
elif i == n_pointings - 1:
name += '_optimal'
self.plot_intensity(pointing, name=name, diff_norm=diff_norm)
self.plot_slit_align()
def _get_interpolated_maps(self, pointing, save_to=None):
''' get maps and interpolate them on an evenly-spaced grid '''
x, y = pointing.x, pointing.y
aia_int = self.raster_builder.get_raster(
x, y, pointing.t / 3600,
extrapolate_t=True)
y_interp = np.linspace(y.min(), y.max(), y.shape[0])
x_interp = np.linspace(x.min(), x.max(), x.shape[1])
xi_interp = np.moveaxis(np.array(np.meshgrid(x_interp, y_interp)), 0, -1)
points = (x.flatten(), y.flatten())
eis_int_interp = si.LinearNDInterpolator(points, self.eis_int.flatten())
eis_int_interp = eis_int_interp(xi_interp)
aia_int_interp = si.LinearNDInterpolator(points, aia_int.flatten())
aia_int_interp = aia_int_interp(xi_interp)
if save_to:
np.savez(
save_to,
x=x,
y=y,
eis_int=self.eis_int,
aia_int=aia_int,
x_interp=x_interp,
y_interp=y_interp,
eis_int_interp=eis_int_interp,
aia_int_interp=aia_int_interp,
)
return x_interp, y_interp, eis_int_interp, aia_int_interp
def _normalize_intensity(self, a, b, norm=mpl.colors.Normalize):
def normalize(arr):
arr_stat = arr[~(arr == 0)] # exclude possibly missing AIA data
arr = (arr - np.nanmean(arr_stat)) / np.nanstd(arr_stat)
return arr
a = normalize(a)
b = normalize(b)
offset = - np.nanmin((a, b))
offset += .1
a += offset
b += offset
norm = norm(vmin=np.nanmin((a, b)), vmax=np.nanmax((a, b)))
return a, b, norm
def plot_intensity(self, pointing, name='', diff_norm=None):
''' plot intensity maps of EIS and AIA rasters '''
if name:
name = '_' + name
filenames = {
'npy': 'intensity_data{}.npz',
'intensity': 'intensity_maps{}.pdf',
'diff': 'intensity_diff{}.pdf',
}
filenames = {k: os.path.join(self.verif_dir, v.format(name))
for k, v in filenames.items()}
# build and save normalized intensity maps
x, y, eis_int, aia_int = self._get_interpolated_maps(
pointing, save_to=filenames['npy'])
eis_int, aia_int, norm = self._normalize_intensity(
eis_int, aia_int, mpl.colors.LogNorm)
# plot maps
pp = backend_pdf.PdfPages(filenames['intensity'])
intensity_plots = (
(eis_int, 'EIS'),
(aia_int, 'synthetic raster from AIA {}'.format(self.aia_band)),
)
for int_map, title in intensity_plots:
plt.clf()
plots.plot_map(
plt.gca(),
int_map, coordinates=[x, y],
cmap='gray', norm=norm)
plt.title(title)
plt.xlabel('X [arcsec]')
plt.ylabel('Y [arcsec]')
plt.savefig(pp)
pp.close()
# plot difference
diff = eis_int - aia_int
rms = np.sqrt(np.nanmean(diff**2))
self.rms.append(rms)
if not diff_norm:
vlim = np.nanmax(np.abs(diff))
diff_norm = mpl.colors.Normalize(vmin=-vlim, vmax=+vlim)
plt.clf()
im = plots.plot_map(
plt.gca(),
diff, coordinates=[x, y],
cmap='gray', norm=diff_norm)
cb = plt.colorbar(im)
cb.set_label('normalised EIS − AIA')
plt.title('RMS = {:.2g}'.format(rms))
plt.xlabel('X [arcsec]')
plt.ylabel('Y [arcsec]')
plt.savefig(filenames['diff'])
def _get_slit_offset(self):
slit_offsets = []
for offset in self.offsets:
if np.array(offset).ndim > 1:
slit_offsets.append(offset)
if len(slit_offsets) == 0:
return None
elif len(slit_offsets) > 1:
warnings.warn('Multiple slitshift steps. Plotting the first one')
return slit_offsets[0]
def plot_slit_align(self):
''' plot offsets and slit coordinates '''
slit_offset = self._get_slit_offset()
if slit_offset is None:
return
pp = backend_pdf.PdfPages(os.path.join(self.verif_dir, 'slit_align.pdf'))
x_color = '#2ca02c'
y_color = '#1f77b4'
old_color = '#d62728'
new_color = '#000000'
# offset
plt.clf()
plt.plot(slit_offset.T[1], '.', label='X', color=x_color)
plt.plot(slit_offset.T[0], '.', label='Y', color=y_color)
plt.title(self.eis_name)
plt.xlabel('slit position')
plt.ylabel('offset [arcsec]')
plt.legend()
plt.savefig(pp)
# new coordinates
plots = [
('X', self.pointings[-1].x, self.pointings[0].x),
('Y', self.pointings[-1].y, self.pointings[0].y),
]
for name, aligned, original in plots:
plt.clf()
plt.plot(original[0], ',', label='original ' + name, color=old_color)
plt.plot(aligned[0], ',', label='aligned ' + name, color=new_color)
plt.legend()
plt.title(self.eis_name)
plt.xlabel('slit position')
plt.ylabel(name + ' [arcsec]')
plt.savefig(pp)
pp.close()
def shift_step(x, y, eis_int, aia_int, cores=None, **kwargs):
cli.print_now('> correct translation')
x, y, offset = cr.images.align(
eis_int, x, y,
aia_int, x, y,
cores=cores, **kwargs)
y_offset, x_offset, cc = offset
offset = [y_offset, x_offset, 0]
offset_set = None
title = 'shift'
return title, offset_set, offset, cc, x, y
def rotshift_step(x, y, dates_rel_hours, eis_int, raster_builder,
cores=None, **kwargs):
cli.print_now('> align rasters')
x, y, offset = cr.rasters.align(
eis_int, x, y, dates_rel_hours, raster_builder,
cores=cores, **kwargs)
y_offset, x_offset, a_offset, cc = offset
offset = [y_offset, x_offset, a_offset]
offset_set = (kwargs['y_set'], kwargs['x_set'], kwargs['a_set'])
title = 'rotshift'
return title, offset_set, offset, cc, x, y
def slitshift_step(x, y, dates_rel_hours, eis_int, raster_builder,
cores=None, **kwargs):
cli.print_now('> align slit positions')
x, y, offset = cr.slits.align(
eis_int, x, y, dates_rel_hours, raster_builder,
cores=cores, **kwargs)
offset, cc = offset
offset_set = (kwargs['y_set'], kwargs['x_set'], kwargs['a_set'])
title = 'slitshift'
return title, offset_set, offset, cc, x, y
def optimal_pointing(eis_data, cores=None, aia_band=None,
verif_dir=None, aia_cache=None, eis_name=None, steps_file=None):
''' Determine the EIS pointing using AIA data as a reference.
Parameters
==========
eis_data : eis.EISData
Object containing the EIS intensity and pointing.
cores : int or None
Number of cores to use for multiprocessing, if any.
aia_band : int
The reference AIA channel. Eg. 193.
verif_dir : str
Path to the directory where to save verification plots.
aia_cache : str
Path to the synthetic AIA raster builder cache file.
eis_name : str
Name of the l0 EIS file eg. eis_l0_20140810_010438
steps_file : str
Path to a yaml file containing the registration steps.
Returns
=======
pointing : eis.EISPointing
Optimal EIS pointing.
'''
if steps_file:
registration_steps = cli.load_corr_steps(steps_file)
else:
warnings.warn('No steps file provided, falling back to default.')
registration_steps = {'steps': [
{'type': 'shift',
'cc_function': 'explicit',
'cc_boundary': 'drop',
'sub_px': True,
},
{'type': 'rotshift',
'x_set': cr.tools.OffsetSet((-10.0, 10.0), number=11),
'y_set': cr.tools.OffsetSet((-5.0, 5.0), number=11),
'a_set': cr.tools.OffsetSet((-3.0, 3.0), step=0.2),
},
{'type': 'slitshift',
'x_set': cr.tools.OffsetSet((-20.0, 20.0), number=21),
'y_set': cr.tools.OffsetSet((-20.0, 20.0), number=21),
'a_set': cr.tools.OffsetSet((0.0, 0.0), number=1),
'mp_mode': 'track'
},
]}
cli.print_now('> build relative and absolute date arrays') # ----------------------
dates_rel = num.seconds_to_timedelta(eis_data.pointing.t)
dates_rel_hours = eis_data.pointing.t / 3600
date_ref = eis_data.pointing.t_ref
dates_abs = date_ref + dates_rel
cli.print_now('> get EIS grid info and add margin') # -----------------------------
x, y = eis_data.pointing.x, eis_data.pointing.y
x_margin = (np.max(x) - np.min(x)) / 2
y_margin = (np.max(y) - np.min(y)) / 2
x_margin = np.max(x_margin)
y_margin = np.max(y_margin)
ny, y_slice = cr.tools.create_margin(y, y_margin, 0)
nx, x_slice = cr.tools.create_margin(x, x_margin, 1)
new_shape = 1, ny, nx
new_slice = slice(None), y_slice, x_slice
eis_int = eis_data.data
cli.print_now('> get AIA data') # -------------------------------------------------
single_aia_frame = registration_steps.get('single_aia_frame', False)
if single_aia_frame:
single_aia_frame = num.dt_average(np.min(dates_abs), np.max(dates_abs))
aia_cache = None
raster_builder = aia_raster.SyntheticRasterBuilder(
dates=[np.min(dates_abs), np.max(dates_abs)],
date_ref=date_ref,
channel=aia_band,
file_cache=aia_cache,
single_frame=single_aia_frame,
)
raster_builder.get_data()
# degrade raster_builder resolution to 3 arcsec (see DelZanna+2011)
raster_builder.degrade_resolution(3, cores=cores)
# crop raster_builder cached data to fix multiprocessing
x_min, x_max = x.min(), x.max()
y_min, y_max = y.min(), y.max()
x_cen = (x_min + x_max) / 2
y_cen = (y_min + y_max) / 2
r = np.sqrt((x_max - x_cen)**2 + (y_max - y_cen)**2)
raster_builder.crop_data(x_cen - r, x_cen + r, y_cen - r, y_cen + r)
# compute alignment -------------------------------------------------------
titles = []
offset_sets = []
offsets = []
pointings = [eis_data.pointing]
cross_correlations = []
start_time = datetime.datetime.now()
for step in registration_steps['steps']:
registration_type = step.pop('type')
if registration_type == 'shift':
aia_int = raster_builder.get_raster(
x, y, dates_rel_hours,
extrapolate_t=True)
result = shift_step(x, y, eis_int, aia_int, cores=cores, **step)
elif registration_type == 'rotshift':
result = rotshift_step(x, y, dates_rel_hours,
eis_int, raster_builder,
cores=cores, **step)
elif registration_type == 'slitshift':
result = slitshift_step(x, y, dates_rel_hours,
eis_int, raster_builder,
cores=cores, **step)
else:
raise ValueError('unknown registration step')
title, offset_set, offset, cc, x, y = result
titles.append(title)
offset_sets.append(offset_set)
offsets.append(offset)
pointings.append(eis.EISPointing(x, y, eis_data.pointing.t, date_ref))
cross_correlations.append(cc)
stop_time = datetime.datetime.now()
if verif_dir:
verif = OptPointingVerif(
verif_dir, eis_name, aia_band,
pointings,
raster_builder, eis_int,
titles, offset_sets, offsets, cross_correlations,
start_time, stop_time,
)
verif.save_all()
return pointings[-1]
| 35.331967 | 88 | 0.563334 | 10,343 | 0.599559 | 0 | 0 | 0 | 0 | 0 | 0 | 3,318 | 0.192337 |
2fbc8aea51a6ab86c4dd3b376ee10300c6209509 | 1,179 | py | Python | src/olympia/accounts/urls.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/accounts/urls.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/accounts/urls.py | mstriemer/olympia | 2e700c20e0a8ed3f0dd389d1521c3798bf7ed7f7 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import include, url
from rest_framework.routers import SimpleRouter
from rest_framework_nested.routers import NestedSimpleRouter
from olympia.reviews.views import ReviewViewSet
from . import views
accounts = SimpleRouter()
accounts.register(r'account', views.AccountViewSet, base_name='account')
# Router for children of /accounts/account/{account_pk}/.
sub_accounts = NestedSimpleRouter(accounts, r'account', lookup='account')
sub_accounts.register('reviews', ReviewViewSet, base_name='account-review')
urlpatterns = [
url(r'', include(accounts.urls)),
url(r'', include(sub_accounts.urls)),
url(r'^authenticate/$', views.AuthenticateView.as_view(),
name='accounts.authenticate'),
url(r'^login/$', views.LoginView.as_view(), name='accounts.login'),
url(r'^login/start/$',
views.LoginStartView.as_view(),
name='accounts.login_start'),
url(r'^profile/$', views.ProfileView.as_view(), name='accounts.profile'),
url(r'^register/$', views.RegisterView.as_view(),
name='accounts.register'),
url(r'^super-create/$', views.AccountSuperCreate.as_view(),
name='accounts.super-create'),
]
| 35.727273 | 77 | 0.72095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 338 | 0.286684 |
2fbc9bbca722cf63185383229fc9699a6409f21e | 2,565 | py | Python | plotting/plotly.py | TPei/jawbone_visualizer | dadefbddb47450c3c43d474abaf9ae8d1317e03b | [
"MIT"
] | null | null | null | plotting/plotly.py | TPei/jawbone_visualizer | dadefbddb47450c3c43d474abaf9ae8d1317e03b | [
"MIT"
] | 1 | 2015-05-11T07:21:21.000Z | 2015-05-11T07:21:21.000Z | plotting/plotly.py | TPei/jawbone_visualizer | dadefbddb47450c3c43d474abaf9ae8d1317e03b | [
"MIT"
] | null | null | null | __author__ = 'TPei'
from plotting.plotly.graph_objs import *
from data.DataHandler import get_all_the_data
"""
Working with the plotly api to create more interactive diagrams
"""
def sleep():
data = get_all_the_data()
traces = []
categories = ['bed', 'sleep', 'sound', 'light', 'awake', 'averages']
sleep_data = [[], [], [], [], [], []]
for date in data:
entry = data[date]
for i in range(0, len(categories)-1):
sleep_data[i].append(entry[categories[i]])
total_sleep = 0
averages = []
for i in range(0, len(sleep_data[1])):
total_sleep += sleep_data[1][i]
averages.append(total_sleep / float(i+1))
sleep_data[5] = averages
for i in range(0, len(sleep_data)):
traces.append(Scatter(y=sleep_data[i], name=categories[i]))
data = Data(traces)
unique_url = py.plot(data, filename='sleep')
'''
trace0 = Scatter(
#x=[1, 2, 3, 4],
y=sleep
)
trace1 = Scatter(
#x=[1, 2, 3, 4],
y=[16, 5, 11, 9]
)
data = Data([trace0, trace1])
unique_url = py.plot(data, filename = 'basic-line')'''
def coffee_vs_sleep():
data = get_all_the_data()
categories = ['0 cups', '1 or 2 cups', '3 or 4 cups', '5+ cups']
count = [0, 0, 0, 0]
average_counter = [0, 0, 0, 0]
average = [0, 0, 0, 0]
import collections
od = collections.OrderedDict(sorted(data.items()))
print(od)
category = 'sleep'
for day in od:
if 'coffee' in od[day] and category in od[day]:
#coffee.append(od[day]['coffee'])
if od[day]['coffee'] == 0:
count[0] += od[day][category]
average_counter[0] += 1
elif od[day]['coffee'] < 3:
count[1] += od[day][category]
average_counter[1] += 1
elif od[day]['coffee'] < 5:
count[2] += od[day][category]
average_counter[2] += 1
else:
count[3] += od[day][category]
average_counter[3] += 1
else:
count[0] += od[day][category]
average_counter[0] += 1
#calculate average
for i in range(0, len(count)):
if average_counter[i] == 0:
average[i] = 0
else:
average[i] = (count[i] / float(average_counter[i]))
trace = Bar(y=average, x=categories)
data = Data([trace])
unique_url = py.plot(data, filename='coffee_vs_sleep')
if __name__ == '__main__':
#sleep()
coffee_vs_sleep() | 24.198113 | 72 | 0.534113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 539 | 0.210136 |
2fbcd6e97434f0c483e0b9f7376b9e31fd73e295 | 569 | py | Python | faddr/database.py | kido5217/faddr | 116c789ac3985cc3f461203e249d8043dcb73428 | [
"MIT"
] | 1 | 2022-03-10T17:52:13.000Z | 2022-03-10T17:52:13.000Z | faddr/database.py | kido5217/faddr | 116c789ac3985cc3f461203e249d8043dcb73428 | [
"MIT"
] | 45 | 2021-08-08T15:23:42.000Z | 2022-03-28T20:23:57.000Z | faddr/database.py | kido5217/faddr | 116c789ac3985cc3f461203e249d8043dcb73428 | [
"MIT"
] | 1 | 2021-10-22T00:46:35.000Z | 2021-10-22T00:46:35.000Z | from tinydb import TinyDB
# TODO: redo this, maybe use SQL
class Database:
def __init__(self, db_file):
"""Create database file, erase previous one if exists."""
self.db = TinyDB(db_file)
# TODO: implement db files rotation, for now just replace all data
self.db.truncate()
# self.db_path = pathlib.Path(db_file)
# self.db_file_name = self.db_path.name
# self.db_dir = self.db_path.parent
def __db_path_is_writable(self, db_file):
pass
def insert(self, data):
self.db.insert(data)
| 28.45 | 74 | 0.643234 | 507 | 0.891037 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.469244 |
2fbd171f1eaf438bc81a6f1722cdfea67b114a0f | 691 | py | Python | passing/HelloWorld/test_hello.py | RubenRubens/cq-testing | 558631a56b5751ef529f33d0aea4fe62ed8d5549 | [
"WTFPL"
] | null | null | null | passing/HelloWorld/test_hello.py | RubenRubens/cq-testing | 558631a56b5751ef529f33d0aea4fe62ed8d5549 | [
"WTFPL"
] | null | null | null | passing/HelloWorld/test_hello.py | RubenRubens/cq-testing | 558631a56b5751ef529f33d0aea4fe62ed8d5549 | [
"WTFPL"
] | null | null | null | import unittest
class TestHelloWorld(unittest.TestCase):
# Import and validate HW1
def test_HW1(self):
import HelloWorld.HW1
global R1
R1 = HelloWorld.HW1.result
self.assertTrue(R1.val().isValid())
# Import and validate HW2
def test_HW2(self):
import HelloWorld.HW2
global R2
R2 = HelloWorld.HW2.result
self.assertTrue(R2.val().isValid())
# Tests if the objects are equal
def test_equal(self):
self.assertEqual(R1.cut(R2).val().Volume(), 0)
self.assertEqual(R1.union(R2).val().Volume(), R1.val().Volume())
self.assertEqual(R1.intersect(R2).val().Volume(), R1.val().Volume())
| 28.791667 | 76 | 0.626628 | 673 | 0.973951 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.118669 |
2fbd42af72d663b2e85a1906a00c7fcaf0c287e8 | 2,014 | py | Python | examples/cifar10.py | Fragile-azalea/homura | 900d1d63affb9c8af3accd9b196b5276cb2e14b6 | [
"Apache-2.0"
] | 1 | 2020-06-30T01:55:41.000Z | 2020-06-30T01:55:41.000Z | examples/cifar10.py | Fragile-azalea/homura | 900d1d63affb9c8af3accd9b196b5276cb2e14b6 | [
"Apache-2.0"
] | null | null | null | examples/cifar10.py | Fragile-azalea/homura | 900d1d63affb9c8af3accd9b196b5276cb2e14b6 | [
"Apache-2.0"
] | null | null | null | import hydra
import torch
import torch.nn.functional as F
from homura import optim, lr_scheduler, callbacks, reporters, trainers
from homura.vision.data import vision_loaders
from homura.vision.models.classification import resnet20, wrn28_10
@hydra.main('config/cifar10.yaml')
def main(cfg):
model = {"resnet20": resnet20,
"wrn28_10": wrn28_10}[cfg.model](num_classes=10)
weight_decay = {"resnet20": 1e-4,
"wrn28_10": 5e-4}[cfg.model]
lr_decay = {"resnet20": 0.1,
"wrn28_10": 0.2}[cfg.model]
train_loader, test_loader = vision_loaders("cifar10", cfg.batch_size)
optimizer = None if cfg.bn_no_wd else optim.SGD(lr=1e-1, momentum=0.9, weight_decay=weight_decay)
scheduler = lr_scheduler.MultiStepLR([100, 150], gamma=lr_decay)
tq = reporters.TQDMReporter(range(cfg.epochs), verb=True)
c = [callbacks.AccuracyCallback(),
callbacks.LossCallback(),
reporters.IOReporter("."),
reporters.TensorboardReporter("."),
callbacks.WeightSave("."),
tq]
if cfg.bn_no_wd:
def set_optimizer(trainer):
bn_params = []
non_bn_parameters = []
for name, p in trainer.model.named_parameters():
if "bn" in name:
bn_params.append(p)
else:
non_bn_parameters.append(p)
optim_params = [
{"params": bn_params, "weight_decay": 0},
{"params": non_bn_parameters, "weight_decay": weight_decay},
]
trainer.optimizer = torch.optim.SGD(optim_params, lr=1e-1, momentum=0.9)
trainers.SupervisedTrainer.set_optimizer = set_optimizer
with trainers.SupervisedTrainer(model, optimizer, F.cross_entropy, callbacks=c,
scheduler=scheduler) as trainer:
for _ in tq:
trainer.train(train_loader)
trainer.test(test_loader)
if __name__ == '__main__':
main()
| 35.964286 | 101 | 0.61569 | 0 | 0 | 0 | 0 | 1,728 | 0.857994 | 0 | 0 | 157 | 0.077954 |
2fc09613b242d7e2c9c0beca77e4a8f3159ea82b | 1,591 | py | Python | payments/domain/tests/test_paypal_payment.py | anandrgit/snet-marketplace-service | 22dd66e9e34a65580eaffa70928bbdb1f67061e8 | [
"MIT"
] | null | null | null | payments/domain/tests/test_paypal_payment.py | anandrgit/snet-marketplace-service | 22dd66e9e34a65580eaffa70928bbdb1f67061e8 | [
"MIT"
] | null | null | null | payments/domain/tests/test_paypal_payment.py | anandrgit/snet-marketplace-service | 22dd66e9e34a65580eaffa70928bbdb1f67061e8 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import patch, Mock
from payments.domain.paypal_payment import PaypalPayment
class TestPaypal(unittest.TestCase):
@patch("paypalrestsdk.Payment", return_value=Mock(links=[Mock(rel="approval_url", href="url")],
id="PAYID-123", create=Mock(return_value=True)))
def test_initiate_payment(self, mock_object):
payment_id = "123"
order_id = "order-123"
amount = 123
payment_status = ""
created_at = "2000-01-01 00:00:00"
payment_details = {}
currency = "USD"
response = PaypalPayment(payment_id, amount, currency,
payment_status, created_at, payment_details).initiate_payment(order_id)
expected_response = {'payment': {'id': 'PAYID-123', 'payment_url': 'url'}}
self.assertDictEqual(response, expected_response)
@patch("paypalrestsdk.Payment.find", return_value=Mock(links=[Mock(rel="approval_url", href="url")],
id="PAYID-123", execute=Mock(return_value=True)))
def test_execute_payment(self, mock_object):
payment_id = "123"
amount = 123
payment_status = ""
created_at = "2000-01-01 00:00:00"
currency = "USD"
payment_details = {"payment_id": "PAYID-123"}
assert PaypalPayment(payment_id, amount, currency, payment_status, created_at, payment_details) \
.execute_transaction({"payer_id": "PAYER-123"})
if __name__ == "__main__":
TestPaypal()
| 41.868421 | 108 | 0.613451 | 1,431 | 0.899434 | 0 | 0 | 1,383 | 0.869265 | 0 | 0 | 284 | 0.178504 |
2fc18eb9e174b4ec483d343f9eaf11394db8c812 | 1,234 | py | Python | software-defined-networks/Topo2.py | hasithsen/CSNE | 30d0386186a6684207bc5cf9f75b3b13f8a5bbc8 | [
"MIT"
] | null | null | null | software-defined-networks/Topo2.py | hasithsen/CSNE | 30d0386186a6684207bc5cf9f75b3b13f8a5bbc8 | [
"MIT"
] | null | null | null | software-defined-networks/Topo2.py | hasithsen/CSNE | 30d0386186a6684207bc5cf9f75b3b13f8a5bbc8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Create below topology in Mininet.
[c1]
/\
/ \
/ \
/ \
[s1]--------[s2]
/\ /\
[h1] [h2] [h3] [h4]
[cX] - controller
[sX] - switch
[hX] - host
"""
from mininet.net import Mininet
from mininet.node import Controller, OVSKernelSwitch, RemoteController
from mininet.cli import CLI
from mininet.log import setLogLevel, info
__email__ = "i@hsen.tech"
__date__ = "2020/4/1"
def alphaNet():
""" Create topology and start Mininet CLI"""
net = Mininet(controller=RemoteController, switch=OVSKernelSwitch)
c1 = net.addController('c1', controller=RemoteController, ip='127.0.0.1', port=6633)
h1 = net.addHost( 'h1', ip='10.0.0.1' )
h2 = net.addHost( 'h2', ip='10.0.0.2' )
h3 = net.addHost( 'h3', ip='10.0.0.3' )
h4 = net.addHost( 'h4', ip='10.0.0.4' )
s1 = net.addSwitch( 's1' )
s2 = net.addSwitch( 's2' )
s1.linkTo( h1 )
s1.linkTo( h2 )
s2.linkTo( h3 )
s2.linkTo( h4 )
s1.linkTo( s2 )
net.build()
net.staticArp()
c1.start()
s1.start([c1])
s2.start([c1])
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
alphaNet()
| 19.903226 | 86 | 0.567261 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.365478 |
2fc259b569e3c130b039ed738e6f79be4ca77223 | 6,768 | py | Python | pymap/bytes/__init__.py | BoniLindsley/pymap | b3190d20799a6d342888e51bfc55cdfcbfe3ed26 | [
"MIT"
] | null | null | null | pymap/bytes/__init__.py | BoniLindsley/pymap | b3190d20799a6d342888e51bfc55cdfcbfe3ed26 | [
"MIT"
] | null | null | null | pymap/bytes/__init__.py | BoniLindsley/pymap | b3190d20799a6d342888e51bfc55cdfcbfe3ed26 | [
"MIT"
] | null | null | null | """Defines useful types and utilities for working with bytestrings."""
from __future__ import annotations
import zlib
from abc import abstractmethod, ABCMeta
from collections.abc import Iterable, Sequence
from io import BytesIO
from itertools import chain
from typing import cast, final, Any, Final, TypeVar, SupportsBytes, Union, \
Protocol
__all__ = ['MaybeBytes', 'MaybeBytesT', 'WriteStream', 'Writeable',
'BytesFormat']
#: A bytes object, memoryview, or an object with a ``__bytes__`` method.
MaybeBytes = Union[bytes, bytearray, memoryview, SupportsBytes]
#: A type variable bound to :class:`MaybeBytes`.
MaybeBytesT = TypeVar('MaybeBytesT', bound=MaybeBytes)
_FormatArg = Union[MaybeBytes, int]
class WriteStream(Protocol):
"""Typing protocol indicating the object implements the :meth:`.write`
method.
See Also:
:class:`~asyncio.StreamWriter`, :class:`~typing.BinaryIO`
"""
@abstractmethod
def write(self, data: bytes) -> Any:
"""Defines an abstract method where ``data`` is written to a stream or
buffer.
Args:
data: The data to write.
"""
...
class HashStream(WriteStream):
"""A stream that a :class:`Writeable` can use to generate a
non-cryptographic hash using :func:`zlib.adler32`.
"""
__slots__ = ['_digest']
def __init__(self) -> None:
super().__init__()
self._digest = zlib.adler32(b'')
def write(self, data: bytes) -> None:
self._digest = zlib.adler32(data, self._digest)
def digest(self, data: Writeable = None) -> bytes:
"""Return the digest of the data written to the hash stream.
Args:
data: The data to write before computing the digest.
"""
if data is not None:
data.write(self)
return self._digest.to_bytes(4, 'big')
class Writeable(metaclass=ABCMeta):
"""Base class for types that can be written to a stream."""
__slots__: Sequence[str] = []
@final
def tobytes(self) -> bytes:
"""Convert the writeable object back into a bytestring using the
:meth:`.write` method.
"""
writer = BytesIO()
self.write(writer)
return writer.getvalue()
@classmethod
def empty(cls) -> Writeable:
"""Return a :class:`Writeable` for an empty string."""
return _EmptyWriteable()
@classmethod
def wrap(cls, data: MaybeBytes) -> Writeable:
"""Wrap the bytes in a :class:`Writeable`.
Args:
data: The object to wrap.
"""
return _WrappedWriteable(data)
@classmethod
def concat(cls, data: Iterable[MaybeBytes]) -> Writeable:
"""Wrap the iterable in a :class:`Writeable` that will write each item.
Args:
data: The iterable to wrap.
"""
return _ConcatWriteable(data)
def write(self, writer: WriteStream) -> None:
"""Write the object to the stream, with one or more calls to
:meth:`~WriteStream.write`.
Args:
writer: The output stream.
"""
writer.write(bytes(self))
def __bool__(self) -> bool:
return True
def __len__(self) -> int:
return len(bytes(self))
@abstractmethod
def __bytes__(self) -> bytes:
...
class _EmptyWriteable(Writeable):
__slots__: Sequence[str] = []
def write(self, writer: WriteStream) -> None:
pass
def __bytes__(self) -> bytes:
return b''
def __repr__(self) -> str:
return '<Writeable empty>'
class _WrappedWriteable(Writeable):
__slots__ = ['data']
def __init__(self, data: MaybeBytes) -> None:
if isinstance(data, bytes):
self.data = data
else:
self.data = bytes(data)
def __bytes__(self) -> bytes:
return self.data
def __repr__(self) -> str:
return f'<Writeable {repr(self.data)}>'
class _ConcatWriteable(Writeable):
__slots__ = ['data']
def __init__(self, data: Iterable[MaybeBytes]) -> None:
self.data = list(data)
def write(self, writer: WriteStream) -> None:
for item in self.data:
if isinstance(item, Writeable):
item.write(writer)
else:
writer.write(bytes(item))
def __bytes__(self) -> bytes:
return BytesFormat(b'').join(self.data)
def __repr__(self) -> str:
return f'<Writeable {repr(self.data)}>'
class BytesFormat:
"""Helper utility for performing formatting operations that produce
bytestrings. While similar to the builtin formatting and join
operations, this class intends to provide cleaner typing.
Args:
how: The formatting string or join delimiter to use.
"""
__slots__ = ['how']
def __init__(self, how: bytes) -> None:
super().__init__()
self.how: Final = how
def __mod__(self, other: Union[_FormatArg, Iterable[_FormatArg]]) -> bytes:
"""String interpolation, shortcut for :meth:`.format`.
Args:
other: The data interpolated into the format string.
"""
if isinstance(other, bytes):
return self.format([other])
elif hasattr(other, '__bytes__'):
supports_bytes = cast(SupportsBytes, other)
return self.format([bytes(supports_bytes)])
elif hasattr(other, '__iter__'):
items = cast(Iterable[_FormatArg], other)
return self.format(items)
return NotImplemented
@classmethod
def _fix_format_arg(cls, data: _FormatArg) -> Any:
if isinstance(data, int):
return data
else:
return bytes(data)
def format(self, data: Iterable[_FormatArg]) -> bytes:
"""String interpolation into the format string.
Args:
data: The data interpolated into the format string.
Examples:
::
BytesFormat(b'Hello, %b!') % b'World'
BytesFormat(b'%b, %b!') % (b'Hello', b'World')
"""
fix_arg = self._fix_format_arg
return self.how % tuple(fix_arg(item) for item in data)
@classmethod
def _fix_join_arg(cls, data: _FormatArg) -> Any:
if isinstance(data, int):
return b'%d' % data
else:
return bytes(data)
def join(self, *data: Iterable[_FormatArg]) -> bytes:
"""Iterable join on a delimiter.
Args:
data: Iterable of items to join.
Examples:
::
BytesFormat(b' ').join([b'one', b'two', b'three'])
"""
fix_arg = self._fix_join_arg
return self.how.join(fix_arg(item) for item in chain(*data))
| 25.73384 | 79 | 0.599143 | 6,023 | 0.889923 | 0 | 0 | 1,496 | 0.22104 | 0 | 0 | 2,566 | 0.379137 |
2fc47242aa3943850747abcba7e7e472c1fce0f5 | 113 | py | Python | _solutions/pandas/dataframe/pandas_df_sample_a.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 1d6d856608e9dbe25b139e8968c48b7f46753b84 | [
"MIT"
] | null | null | null | _solutions/pandas/dataframe/pandas_df_sample_a.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 1d6d856608e9dbe25b139e8968c48b7f46753b84 | [
"MIT"
] | null | null | null | _solutions/pandas/dataframe/pandas_df_sample_a.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 1d6d856608e9dbe25b139e8968c48b7f46753b84 | [
"MIT"
] | null | null | null |
df = pd.read_csv(DATA)
df = df.sample(frac=1.0)
df.reset_index(drop=True, inplace=True)
result = df.tail(n=10)
| 16.142857 | 39 | 0.699115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2fc4bf739d973fcce88100f4ac6687879d1a4115 | 466 | py | Python | Informatik1/Midterms Prep/midterms hs19/to_binary.py | Queentaker/uzh | 35cccaf910b95d15db21be80c8567eb427202591 | [
"MIT"
] | 8 | 2021-11-21T10:02:08.000Z | 2022-03-15T21:02:02.000Z | Informatik1/Midterms Prep/midterms hs19/to_binary.py | Queentaker/uzh | 35cccaf910b95d15db21be80c8567eb427202591 | [
"MIT"
] | null | null | null | Informatik1/Midterms Prep/midterms hs19/to_binary.py | Queentaker/uzh | 35cccaf910b95d15db21be80c8567eb427202591 | [
"MIT"
] | 3 | 2021-11-19T18:52:56.000Z | 2022-02-27T15:45:59.000Z | def to_binary(number):
binarynumber=""
if (number!=0):
while (number>=1):
if (number %2==0):
binarynumber=binarynumber+"0"
number=number/2
else:
binarynumber=binarynumber+"1"
number=(number-1)/2
else:
binarynumber="0"
return "".join(reversed(binarynumber))
if __name__ == '__main__':
print(to_binary(35))
print("\n")
print(bin(35)) | 23.3 | 45 | 0.512876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.05794 |
2fc5eb9ac5bb61a25a0a92e01993111c6aae7c68 | 967 | py | Python | coursework1/task6/reducer.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | coursework1/task6/reducer.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | coursework1/task6/reducer.py | foundnet/UOE_EP_coursework1 | 25ab8ff2beaa4fbde5a0d4519abb84d8e43cf9d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
import math
prev_key = ""
hresult = 0.0
totalcnt = 0
cntlist = []
for line in sys.stdin:
key,info = line.strip().split("\t",1)
if key != prev_key:
if prev_key != "":
hresult = 0.0
cntlist = strinfo.split(",")
cntlist = map(int,cntlist)
totalcnt = sum(cntlist)
for v in cntlist:
hresult = hresult - float(v) / totalcnt *\
math.log(float(v)/totalcnt,2)
print "%s\t%f"%(prev_key,hresult)
prev_key = key
strinfo = info.strip()
else:
strinfo = strinfo + "," + info.strip()
if prev_key != "":
hresult = 0.0
cntlist = strinfo.split(",")
cntlist = map(int,cntlist)
totalcnt = sum(cntlist)
for v in cntlist:
hresult = hresult - float(v) / totalcnt *\
math.log(float(v)/totalcnt,2)
print "%s\t%f"%(prev_key,hresult)
| 25.447368 | 64 | 0.511892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 52 | 0.053775 |
2fc7dff3416e231ccfed10d4c3e8b24fde9aebea | 200 | py | Python | opmd_viewer/__init__.py | soerenjalas/openPMD-viewer | 1754ade96040920016a79ecc5b7b40597a5a6bf5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | opmd_viewer/__init__.py | soerenjalas/openPMD-viewer | 1754ade96040920016a79ecc5b7b40597a5a6bf5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2016-01-04T18:09:31.000Z | 2016-01-04T18:09:31.000Z | opmd_viewer/__init__.py | soerenjalas/openPMD-viewer | 1754ade96040920016a79ecc5b7b40597a5a6bf5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | # Make the OpenPMDTimeSeries object accessible from outside the package
from .openpmd_timeseries import OpenPMDTimeSeries, FieldMetaInformation
__all__ = ['OpenPMDTimeSeries', 'FieldMetaInformation']
| 50 | 71 | 0.85 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.56 |
2fc81c5c29c2d5dd633814fb31be3a026447928b | 1,032 | py | Python | utilities/send_custom_mail.py | stefanos90/Utilities | e28c2bcb7ee4480b589d5ca3be64c14cc2a4a8e9 | [
"MIT"
] | 3 | 2019-06-05T04:36:03.000Z | 2020-10-30T00:16:16.000Z | utilities/send_custom_mail.py | stefanos90/Utilities | e28c2bcb7ee4480b589d5ca3be64c14cc2a4a8e9 | [
"MIT"
] | null | null | null | utilities/send_custom_mail.py | stefanos90/Utilities | e28c2bcb7ee4480b589d5ca3be64c14cc2a4a8e9 | [
"MIT"
] | null | null | null | # In order to use this function you have to create a Django project
from django.conf import settings
from django.template import Context
from django.template.loader import render_to_string, get_template
from django.core.mail import EmailMultiAlternatives
def send_custom_mail(ctx, subject_path, template_path, recipients):
# recipients must be list (array) ['info@example.com']
# settings.FROM u'S.I.Tsaklidis <sefanos@tsaklidis.gr'
text_content = u''
if 'False' in recipients:
recipients.remove('False')
template = get_template(template_path, using='django')
context = Context(ctx)
subject = render_to_string(subject_path, ctx, using='django')
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
content = template.render(context)
message = EmailMultiAlternatives(
subject, text_content,
settings.FROM, recipients)
message.attach_alternative(content, 'text/html')
message.content_subtype = "html"
message.send()
| 34.4 | 68 | 0.73062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.262597 |
2fc89444d96a05f12276b73f9c9d8e359e37e447 | 2,405 | py | Python | farmrisk_needs.py | chum8/farmrisk.v2 | 29d5314db9809bdfb8a12dcdd7d3940fbbe50c6b | [
"MIT"
] | null | null | null | farmrisk_needs.py | chum8/farmrisk.v2 | 29d5314db9809bdfb8a12dcdd7d3940fbbe50c6b | [
"MIT"
] | null | null | null | farmrisk_needs.py | chum8/farmrisk.v2 | 29d5314db9809bdfb8a12dcdd7d3940fbbe50c6b | [
"MIT"
] | null | null | null | # this class exists to extract data from a csv file
# and return it formatted for persistence into a MySql database
# import needed libraries
import csv
from farmrisk_lib import *
# define Product class
class Need():
# def __init__(self):
#----data functions----#
# populate product_info list with product data extracted from csv
def read_needs(self, filename1):
try:
with open(filename1) as f1:
reader = csv.reader(f1)
# set True if there is a header row
# header_row = True
header_row = False
i = 0
sql = []
for row in reader:
if header_row:
header_row = False
else:
# extract field information from file
temp_n, temp_p, temp_s, temp_z = float(row[dft_col_n]), float(row[dft_col_p]), float(row[dft_col_s]), float(row[dft_col_z])
composition = 0b0000 # bit version for programming purposes
if temp_n > 0:
composition = composition | 0b1000
if temp_p > 0:
composition = composition | 0b0100
if temp_s > 0:
composition = composition | 0b0010
if temp_z > 0:
composition = composition | 0b0001
composition_hr = get_composition_hr(composition)
# note: the following line doesn't save the data
# it creates a list of sql commands
# that the calling module will handle
# to persist the data to the database
temp = "insert into needs(id_need, name, grain, n, p, s, z, composition, composition_hr) values("+str(bin(2**i))+",'"+str(row[0])+"','"+str(row[1])+"',"+str(temp_n)+","+str(temp_p)+","+str(temp_s)+","+str(temp_z)+","+str(bin(composition))+",'"+composition_hr+"')"
sql.append(temp)
i += 1
return sql
except:
print("Attempted to load field need information from " + filename1 + " and could not. Did you specify a valid filename?")
| 47.156863 | 288 | 0.495634 | 2,190 | 0.910603 | 0 | 0 | 0 | 0 | 0 | 0 | 794 | 0.330146 |
2fc8e9cafddd4c2893cba84f77225d601ce8cb1e | 3,734 | py | Python | torchrl/agents/ddpg_agent.py | srikarym/torchrl | fee98e78ac1657a2c9a4063dd8d63ba207a121e2 | [
"Apache-2.0"
] | 3 | 2019-02-27T19:00:32.000Z | 2020-07-19T03:18:28.000Z | torchrl/agents/ddpg_agent.py | srikarym/torchrl | fee98e78ac1657a2c9a4063dd8d63ba207a121e2 | [
"Apache-2.0"
] | null | null | null | torchrl/agents/ddpg_agent.py | srikarym/torchrl | fee98e78ac1657a2c9a4063dd8d63ba207a121e2 | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
import numpy as np
import torch.nn.functional as F
from torch import nn
from torch.optim import Adam
from .base_agent import BaseAgent
from ..policies import OUNoise
from ..models import DDPGActorNet, DDPGCriticNet
def polyak_average_(source, target, tau=1e-3):
"""
In-place Polyak Average from the source to the target
:param tau: Polyak Averaging Parameter
:param source: Source Module
:param target: Target Module
:return:
"""
assert isinstance(source, nn.Module), \
'"source" should be of type nn.Module, found "{}"'.format(type(source))
assert isinstance(target, nn.Module), \
'"target" should be of type nn.Module, found "{}"'.format(type(target))
for src_param, target_param in zip(source.parameters(), target.parameters()):
target_param.data.copy_(tau * src_param.data +
(1.0 - tau) * target_param.data)
class BaseDDPGAgent(BaseAgent):
def __init__(self, observation_space, action_space,
actor_lr=1e-4,
critic_lr=1e-3,
gamma=0.99,
tau=1e-2):
super(BaseDDPGAgent, self).__init__(observation_space, action_space)
self.actor = DDPGActorNet(observation_space.shape[0],
action_space.shape[0], 256)
self.target_actor = deepcopy(self.actor)
self.actor_optim = Adam(self.actor.parameters(), lr=actor_lr)
self.critic = DDPGCriticNet(observation_space.shape[0],
action_space.shape[0], 256)
self.target_critic = deepcopy(self.critic)
self.critic_optim = Adam(self.critic.parameters(), lr=critic_lr)
self.gamma = gamma
self.tau = tau
self.noise = OUNoise(self.action_space)
# Internal vars
self._step = 0
@property
def models(self):
return [
self.actor, self.target_actor,
self.critic, self.target_critic
]
@property
def checkpoint(self):
return {
'actor': self.actor.state_dict(),
'critic': self.critic.state_dict(),
}
@checkpoint.setter
def checkpoint(self, cp):
self.actor.load_state_dict(cp['actor'])
self.critic.load_state_dict(cp['critic'])
self.target_actor = deepcopy(self.actor)
self.target_critic = deepcopy(self.critic)
def act(self, obs, **kwargs):
obs_tensor = self.obs_to_tensor(obs)
action = self.actor(obs_tensor)
action = action.cpu().detach().numpy()
action = self.noise.get_action(action, self._step)
action = self.clip_action(action)
self._step += 1
return np.expand_dims(action, axis=1)
def clip_action(self, action: np.array):
low_bound = self.action_space.low
upper_bound = self.action_space.high
action = low_bound + (action + 1.0) * 0.5 * (upper_bound - low_bound)
action = np.clip(action, low_bound, upper_bound)
return action
def learn(self, obs, action, reward, next_obs, done, **kwargs):
actor_loss = - self.critic(obs, self.actor(obs)).mean()
next_action = self.target_actor(next_obs).detach()
current_q = self.critic(obs, action)
target_q = reward + (1.0 - done.float()) * self.gamma * self.target_critic(next_obs, next_action) # pylint: disable=line-too-long
critic_loss = F.mse_loss(current_q, target_q.detach())
self.actor_optim.zero_grad()
actor_loss.backward()
self.actor_optim.step()
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
polyak_average_(self.actor, self.target_actor, self.tau)
polyak_average_(self.critic, self.target_critic, self.tau)
return actor_loss.detach().cpu().item(), \
critic_loss.detach().cpu().item()
def reset(self):
self.noise.reset()
self._step = 0
| 30.357724 | 134 | 0.670327 | 2,825 | 0.756561 | 0 | 0 | 493 | 0.13203 | 0 | 0 | 355 | 0.095072 |
2fc9fb68210db21c95b92026433909a2cce6b7a9 | 934 | py | Python | model.py | Pontohi/GoIndex | 6727da99960c540512496e5647665535317b6a83 | [
"MIT"
] | null | null | null | model.py | Pontohi/GoIndex | 6727da99960c540512496e5647665535317b6a83 | [
"MIT"
] | null | null | null | model.py | Pontohi/GoIndex | 6727da99960c540512496e5647665535317b6a83 | [
"MIT"
] | null | null | null | #A template for when we actually build the model.
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, LSTM, Dropout
from tensorflow.keras import Sequential
categories = [] #List out category string names here
reproducibility = 7 #Constant seed for reproducibility
np.random.seed(reproducibility)
#Load the data here; probably pickle or something like that
train_x,test_x,train_y,test_y = train_test_split(x,y,
test_size=0.2,random_state=reproducibility)
model = Sequential()
#Layers will go here
#Compiled given that we're working with categorization.
model.add(Dense(len(categories),activation="softmax"))
model.compile(optimizer="adam",
loss="categorical_crossentropy",metrics=['accuracy'])
#--
bsize=64
epochs=1000
#--
model.fit(x=train_x,y=train_y, verbose=1,
validation_data=(test_x,test_y),batch_size=bsize,epochs=epochs) | 40.608696 | 75 | 0.768737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.331906 |
2fca64024836c385ff3a0a64305e1d0f1eaa2ba8 | 6,539 | py | Python | tf_coder/value_search/search_space_from_weight.py | hstrohm/PyTorch-Coder-cheat | de201c28849549c53010603a0c87482973729cfd | [
"Apache-2.0"
] | 245 | 2020-08-24T22:50:18.000Z | 2022-03-17T18:25:39.000Z | tf_coder/value_search/search_space_from_weight.py | hstrohm/PyTorch-Coder | 2159a5561d8b71d5539e299cf0c4b77eb305df8a | [
"Apache-2.0"
] | 4 | 2020-09-09T09:41:05.000Z | 2021-12-23T02:55:33.000Z | tf_coder/value_search/search_space_from_weight.py | hstrohm/PyTorch-Coder | 2159a5561d8b71d5539e299cf0c4b77eb305df8a | [
"Apache-2.0"
] | 31 | 2020-08-26T20:53:36.000Z | 2022-01-24T22:04:42.000Z | # Copyright 2021 The TF-Coder Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Computes the size of value search's search space."""
import collections
import functools
import operator
import os
import sys
from absl import app
from absl import flags
from tf_coder import tf_coder_utils
from tf_coder import tf_functions
from tf_coder.benchmarks import all_benchmarks
from tf_coder.natural_language import description_handler_factory
from tf_coder.value_search import value as value_module
from tf_coder.value_search import value_search
from tf_coder.value_search import value_search_settings as settings_module
FLAGS = flags.FLAGS
flags.DEFINE_string('benchmark_name', 'google_02',
'The name of a benchmark to analyze.')
flags.DEFINE_multi_string('settings',
[],
'Settings to override the defaults.')
# Inspired by https://stackoverflow.com/a/45669280/9589593.
class SuppressPrint(object):
"""A context manager for suppressing print() calls temporarily."""
def __enter__(self):
self._old_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
del exc_type, exc_val, exc_tb
sys.stdout.close()
sys.stdout = self._old_stdout
def compute_search_space_size(benchmark, settings, description_handler):
"""Computes and prints the size of the search space.
This counts the total number of expressions with weight at most max_weight.
The weights come from the benchmark (for constants and inputs) and the
description handler (for determining the op weights). Distinct expressions
will be counted separately even if they evaluate to the same value, unlike in
TF-Coder's value_search algorithm which does value-based pruning.
Args:
benchmark: The Benchmark object defining the problem to analyze.
settings: A Settings object containing settings for value search.
description_handler: The DescriptionHandler used, which can modify weights
of operations.
Returns:
Nothing. All output is printed to stdout.
"""
max_weight = settings.max_weight
print('Computing search space.\n'
'Benchmark name: {}\n'
'Description handler: {}\n'
'Max weight: {}'.format(
benchmark.name, description_handler, max_weight))
# TODO(kshi): Update to load the tensor features model/config.
operations = value_search.get_reweighted_operations(benchmark,
settings,
description_handler,
tensor_model=None,
tensor_config=None)
# These loops are not the most efficient, but it doesn't really matter.
print('\nFound {} operations.'.format(len(operations)))
print()
for weight in range(1, max(op.weight for op in operations) + 1):
print('# operations with weight {}: {}'.format(
weight, sum(1 for op in operations if op.weight == weight)))
print()
for arity in range(1, max(op.num_args for op in operations) + 1):
print('# operations with arity {}: {}'.format(
arity, sum(1 for op in operations if op.num_args == arity)))
output_value = value_module.OutputValue(benchmark.examples[0].output)
values_by_weight = [collections.OrderedDict()
for _ in range(max_weight + 1)]
constant_operation = None
for operation in operations:
if operation.name == tf_functions.CONSTANT_OPERATION_NAME:
constant_operation = operation
break
with SuppressPrint():
value_search._add_constants_and_inputs_and_print( # pylint: disable=protected-access
values_by_weight, benchmark, output_value, constant_operation, settings)
num_expressions_with_weight = [len(values_with_weight)
for values_with_weight in values_by_weight]
print()
max_weight_with_initial_value = max(w for w in range(max_weight + 1)
if num_expressions_with_weight[w])
for weight in range(1, max_weight_with_initial_value + 1):
print('# initial values with weight {}: {}'.format(
weight, num_expressions_with_weight[weight]))
for total_weight in range(2, max_weight + 1):
for operation in operations:
# All operations should have strictly positive weight and num_args.
op_weight = operation.weight
op_arity = operation.num_args
if total_weight - op_weight < op_arity:
continue
# Partition `total_weight - op_weight` into `op_arity` positive pieces.
# Equivalently, partition `total_weight - op_weight - op_arity` into
# `op_arity` nonnegative pieces.
for partition in tf_coder_utils.generate_partitions(
total_weight - op_weight - op_arity, op_arity):
arg_weights = [part + 1 for part in partition]
num_expressions_with_weight[total_weight] += functools.reduce(
operator.mul,
(num_expressions_with_weight[w] for w in arg_weights))
print()
for weight in range(1, max_weight + 1):
print('# expressions with weight exactly {}: {}'.format(
weight, num_expressions_with_weight[weight]))
print()
for weight in range(1, max_weight + 1):
print('# expressions with weight up to {}: {}'.format(
weight, sum(num_expressions_with_weight[:weight + 1])))
def main(unused_argv):
settings = settings_module.from_list(FLAGS.settings)
description_handler = description_handler_factory.create_handler(
settings.description_handler_name)
benchmark = all_benchmarks.find_benchmark_with_name(FLAGS.benchmark_name)
if not benchmark:
raise ValueError('Unknown benchmark: {}'.format(FLAGS.benchmark_name))
compute_search_space_size(benchmark=benchmark,
settings=settings,
description_handler=description_handler)
if __name__ == '__main__':
app.run(main)
| 38.922619 | 89 | 0.694296 | 335 | 0.051231 | 0 | 0 | 0 | 0 | 0 | 0 | 2,362 | 0.361217 |
2fcb561133a4b45129e3d2727e197182d5c62b3e | 227 | py | Python | train.py | tamasandacian/twitter-sentiment-analysis | 154083feae49d4ff159aee6cfae0df6807a662b3 | [
"MIT"
] | 1 | 2019-08-08T05:08:42.000Z | 2019-08-08T05:08:42.000Z | train.py | tamasandacian/twitter-sentiment-analysis | 154083feae49d4ff159aee6cfae0df6807a662b3 | [
"MIT"
] | null | null | null | train.py | tamasandacian/twitter-sentiment-analysis | 154083feae49d4ff159aee6cfae0df6807a662b3 | [
"MIT"
] | null | null | null | # Create FastText model using from raw train data
import fastText
TRAIN_FILE = './datasets/raw_data/tweets.train'
su_model = fastText.train_supervised(input=TRAIN_FILE, wordNgrams=3)
su_model.save_model('model_sentiment.bin') | 32.428571 | 68 | 0.814978 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.45815 |
2fcc935d3a1ea6c475bcf83feda813ece95347aa | 3,794 | py | Python | test.py | Carmen0645/stylegan-jt | 846e54d1befd6446ee23621aa25e8b0f09deb106 | [
"MIT"
] | 1 | 2022-01-19T12:26:04.000Z | 2022-01-19T12:26:04.000Z | test.py | Carmen0645/stylegan-jt | 846e54d1befd6446ee23621aa25e8b0f09deb106 | [
"MIT"
] | null | null | null | test.py | Carmen0645/stylegan-jt | 846e54d1befd6446ee23621aa25e8b0f09deb106 | [
"MIT"
] | null | null | null | import numpy as np
import math
import jittor as jt
from model.model import StyledGenerator
jt.flags.use_cuda = True
import argparse
import pathlib
import os
@jt.no_grad()
def style_mixing(generator, step, mean_style, n_source, n_target):
source_code = jt.randn(n_source, 512)
target_code = jt.randn(n_target, 512)
shape = 4 * 2 ** step
alpha = 1
images = [jt.ones((1, 3, shape, shape)) * -1]
source_image = generator(
source_code, step=step, alpha=alpha, mean_style=mean_style, style_weight=0.7
)
target_image = generator(
target_code, step=step, alpha=alpha, mean_style=mean_style, style_weight=0.7
)
images.append(source_image)
# print(source_code.shape)
for i in range(n_target):
image = generator(
[target_code[i].unsqueeze(0).repeat(n_source, 1), source_code],
step=step,
alpha=alpha,
mean_style=mean_style,
style_weight=0.7,
mixing_range=(0, 1),
)
images.append(target_image[i].unsqueeze(0))
images.append(image)
images = jt.concat(images, 0)
# print(images.shape)
return images
def inference(args):
#init
generator = StyledGenerator(512)
ckpt = jt.load(args.ckpt)
generator.load_state_dict(ckpt)
generator.eval()
mean_style = None
for i in range(10):
with jt.no_grad():
style = generator.mean_style(jt.randn(1024,512))
if mean_style is None:
mean_style = style
else:
mean_style += style
mean_style /= 10
step = int(math.log(args.resolution, 2)) - 2
first_code = jt.randn(50, 512)
last_code = jt.randn(50, 512)
inter_times = 2000
with jt.no_grad():
first_img = generator(
first_code,
step=step,
alpha=1,
mean_style=mean_style,
style_weight=0.7,
)
with jt.no_grad():
last_img = generator(
first_code,
step=step,
alpha=1,
mean_style=mean_style,
style_weight=0.7,
)
delta_code = (last_code - first_code)/inter_times
pathlib.Path(args.output_dir).mkdir(parents=True,exist_ok=True)
pathlib.Path(args.stylemixing_dir).mkdir(parents=True,exist_ok=True)
for i in range(inter_times):
image = generator(
first_code + delta_code*i,
step=step,
alpha=1,
mean_style=mean_style,
style_weight=0.7,
# mixing_range=(0, 1),
)
jt.save_image(
image, os.path.join(args.output_dir,f'sample_{i}.png'), nrow=10, normalize=True, range=(-1, 1)
)
with jt.no_grad():
img = generator(
jt.randn(25, 512),
step=step,
alpha=1,
mean_style=mean_style,
style_weight=0.7,
)
jt.save_image(img, os.path.join(args.stylemixing_dir,f'sample.jpg'), nrow=5, normalize=True, range=(-1, 1))
print(img[0,:,0,0])
for j in range(20):
img = style_mixing(generator, step, mean_style, 5, 10)
jt.save_image(
img, os.path.join(args.stylemixing_dir,f'sample_mixing_{j}.jpg'), nrow=5 + 1, normalize=True, range=(-1, 1)
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ckpt', default='./checkpoints/symbol_80w_ckpt/800000.model',type=str,help='checkpoint path')
parser.add_argument('--resolution',default=128,type=int)
parser.add_argument('--output_dir',default='./output/interpolation_80_80w',type=str)
parser.add_argument('--stylemixing_dir',default='./output/style_mixing_80_80w',type=str)
args = parser.parse_args()
inference(args)
| 30.111111 | 119 | 0.603321 | 0 | 0 | 0 | 0 | 1,016 | 0.267791 | 0 | 0 | 315 | 0.083026 |
2fcd9f0a4668f30c4930f01c38dd7a96779a0ba3 | 195 | py | Python | snippets/urls.py | rudra012/django_rest | bbfc0535cefcf20d1b788aab0336c090d58c506d | [
"MIT"
] | null | null | null | snippets/urls.py | rudra012/django_rest | bbfc0535cefcf20d1b788aab0336c090d58c506d | [
"MIT"
] | null | null | null | snippets/urls.py | rudra012/django_rest | bbfc0535cefcf20d1b788aab0336c090d58c506d | [
"MIT"
] | null | null | null | from django.conf.urls import url
from api.snippets import snippets_api
urlpatterns = [
url(r'^$', snippets_api.snippet_list),
url(r'^(?P<pk>[0-9]+)/$', snippets_api.snippet_detail),
]
| 19.5 | 59 | 0.687179 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.128205 |
2fcf3382e080f3b77010b56e004724a5a0e7a4f8 | 1,806 | py | Python | models/peak.py | anva-kn/ramanflow | 0a8852a0a8d57d97e5ccd011bc6bc8659ecd666c | [
"MIT"
] | null | null | null | models/peak.py | anva-kn/ramanflow | 0a8852a0a8d57d97e5ccd011bc6bc8659ecd666c | [
"MIT"
] | 1 | 2021-12-05T06:40:27.000Z | 2021-12-07T07:18:18.000Z | models/peak.py | anva-kn/ramanflow | 0a8852a0a8d57d97e5ccd011bc6bc8659ecd666c | [
"MIT"
] | null | null | null | import numpy as np
from dataclasses import dataclass, field
from spectrum_component import SpectrumComponent
from scipy.optimize import minimize
from typing import List, Optional
@dataclass
class Peak(SpectrumComponent):
__beta_fit: np.ndarray = field(init=False)
__yhat_fit: np.ndarray = field(init=False)
def fit(self, x_data, y_data, range_of_indices=None):
"""
Parameters
----------
x_data
y_data
Returns
-------
"""
# find the coefficients beta that best fit the function
# TODO: add verbose option
if range_of_indices is not None:
peak_pos = list(range(range_of_indices[0], range_of_indices[1], 1))
result = minimize(self.loss_fit_beta, self.get_beta_init(), args=(x_data[peak_pos], y_data[peak_pos]),
method='Nelder-Mead',
tol=1e-12)
else:
result = minimize(self.loss_fit_beta, self.get_beta_init(), args=(x_data, y_data),
method='Nelder-Mead',
tol=1e-12)
# params, params_covariance = curve_fit(self.fit_fun, x_data, y_rec, p0=self.__beta_init)
self.__beta_fit = result.x
self.__loss_fit = result.fun
self.__yhat_fit = self.fit_fun(x_data, self.__beta_fit)
return self.__yhat_fit
def get_beta_init(self):
return super().get_beta_init()
def get_beta_size(self):
return super().get_beta_size()
def get_beta_fit(self):
return self.__beta_fit
def get_loss_fit(self):
return self.__loss_fit
def get_yhat_fit(self):
return self.__yhat_fit
def reset(self):
self.__beta_fit = np.zeros(1)
self.__yhat_fit = np.zeros(1) | 30.1 | 114 | 0.611849 | 1,614 | 0.893688 | 0 | 0 | 1,625 | 0.899779 | 0 | 0 | 314 | 0.173865 |
2fcfdfc34dd0b46d25701e6315fbd7285e1e2ee4 | 555 | py | Python | backend/media/migrations/0010_auto_20210613_0444.py | LakesideMiners/rt911 | ce2eb82efa6eedbaabf16f567b3db1d8fb808809 | [
"Unlicense"
] | 8 | 2020-07-25T04:54:44.000Z | 2022-01-31T16:08:53.000Z | backend/media/migrations/0010_auto_20210613_0444.py | LakesideMiners/rt911 | ce2eb82efa6eedbaabf16f567b3db1d8fb808809 | [
"Unlicense"
] | 18 | 2020-07-20T00:53:19.000Z | 2022-03-03T21:48:17.000Z | backend/media/migrations/0010_auto_20210613_0444.py | LakesideMiners/rt911 | ce2eb82efa6eedbaabf16f567b3db1d8fb808809 | [
"Unlicense"
] | 2 | 2021-10-29T23:19:02.000Z | 2021-11-06T06:44:54.000Z | # Generated by Django 3.1.12 on 2021-06-13 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('media', '0009_auto_20210611_1441'),
]
operations = [
migrations.RemoveField(
model_name='collection',
name='media_item',
),
migrations.AddField(
model_name='collection',
name='media',
field=models.ManyToManyField(blank=True, limit_choices_to={'approved': True}, to='media.Media'),
),
]
| 24.130435 | 108 | 0.592793 | 461 | 0.830631 | 0 | 0 | 0 | 0 | 0 | 0 | 146 | 0.263063 |
2fd104b4ffadfde3fd53dbf7ef3b1e8c8f59cfc2 | 547 | py | Python | aioinflux/compat.py | slazarov/aioinflux | ba3fa980e528f4edad6e31544942b55fe2a9d23e | [
"MIT"
] | null | null | null | aioinflux/compat.py | slazarov/aioinflux | ba3fa980e528f4edad6e31544942b55fe2a9d23e | [
"MIT"
] | null | null | null | aioinflux/compat.py | slazarov/aioinflux | ba3fa980e528f4edad6e31544942b55fe2a9d23e | [
"MIT"
] | null | null | null | import warnings
no_pandas_warning = "Pandas/Numpy is not available. Support for 'dataframe' mode is disabled."
no_redis_warning = "Redis dependencies not available. Support for caching is disabled."
try:
import pandas as pd
import numpy as np
except ModuleNotFoundError:
pd = None
np = None
warnings.warn(no_pandas_warning)
try:
import aioredis
import lz4.block as lz4
except ModuleNotFoundError:
aioredis = None
lz4 = None
__all__ = ['no_pandas_warning', 'no_redis_warning', 'pd', 'np', 'aioredis', 'lz4']
| 24.863636 | 94 | 0.723949 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.369287 |
2fd243b0141adccef1b512ff16e5f007093b92d7 | 1,691 | py | Python | station/controller/handlers/read_letters_handler.py | GLO3013-E4/COViRondelle2021 | f8d23903d0a906e93a7698a555d90ebecdf83969 | [
"MIT"
] | null | null | null | station/controller/handlers/read_letters_handler.py | GLO3013-E4/COViRondelle2021 | f8d23903d0a906e93a7698a555d90ebecdf83969 | [
"MIT"
] | null | null | null | station/controller/handlers/read_letters_handler.py | GLO3013-E4/COViRondelle2021 | f8d23903d0a906e93a7698a555d90ebecdf83969 | [
"MIT"
] | null | null | null | import json
import rospy
from std_msgs.msg import String
from handlers.handler import Handler
from mapping.command_panel import CommandPanel
from mapping.resistance import Resistance
class ReadLettersHandler(Handler):
def initialize(self):
self.sub = rospy.Subscriber('letters', String, self.read_letters) # TODO: checker le nom du topic
self.is_finished = False
def handle(self, handled_data=None):
self.initialize()
command_panel = CommandPanel()
command_panel.set_resistance(handled_data['resistance'])
# handled_data["calculate_pucks_pub"].publish(True)
self.handled_data = handled_data
handled_data["read_letters_pub"].publish(True)
while not self.is_finished:
pass
rounded_resistance, _ = Resistance(handled_data["resistance"]).get_resistance_and_colors()
handled_data["letters"] = self.letters
command_panel.set_mapped_letters(self.letters)
command_panel.set_resistance(rounded_resistance)
first_corner = command_panel.find_first_corner_letter()
second_corner = first_corner.get_next_letter()
third_corner = second_corner.get_next_letter()
handled_data["corners"] = [first_corner.value, second_corner.value, third_corner.value]
return handled_data
def read_letters(self, data):
letters = json.loads(data.data)
self.letters = letters
rospy.logerr("READ LETTERS " + str(self.letters))
self.is_finished = len(letters) == 9
if not self.is_finished:
self.handled_data["read_letters_pub"].publish(True)
def unregister(self):
self.sub.unregister()
| 34.510204 | 105 | 0.702543 | 1,504 | 0.889415 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.108811 |
2fd2bfab59ea98625232ec8c5527f3a9ee521861 | 28 | py | Python | python/testData/editing/enterAfterColonOfCaseClauseWithoutBody.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/editing/enterAfterColonOfCaseClauseWithoutBody.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | python/testData/editing/enterAfterColonOfCaseClauseWithoutBody.py | 06needhamt/intellij-community | 63d7b8030e4fdefeb4760e511e289f7e6b3a5c5b | [
"Apache-2.0"
] | null | null | null | match x:
case 42:<caret> | 14 | 19 | 0.607143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2fd2f559dbfd281655f2be8fc7d8f814570f9fcc | 76 | py | Python | Fundamentos/variables.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | Fundamentos/variables.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | Fundamentos/variables.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | x = 5
y = 3
z = x + y
print(x)
print(y)
print(x + y)
print(z)
w = z
print(w) | 8.444444 | 12 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2fd317fc787755e3ef71bee2cc7d489378eb6f71 | 4,047 | py | Python | gisimulation/interferometer/gratings.py | mariabuechner/gi_simulation | 5dafd4652836a3867fff219ab6b247310e9a501f | [
"MIT"
] | null | null | null | gisimulation/interferometer/gratings.py | mariabuechner/gi_simulation | 5dafd4652836a3867fff219ab6b247310e9a501f | [
"MIT"
] | 16 | 2017-10-18T14:51:45.000Z | 2018-03-13T10:14:19.000Z | gisimulation/interferometer/gratings.py | mariabuechner/gisimulation | 5dafd4652836a3867fff219ab6b247310e9a501f | [
"MIT"
] | null | null | null | """
@author: buechner_m <maria.buechner@gmail.com>
"""
import sys
sys.path.append('..') # To allow importing from neighbouring folder
import simulation.materials as materials
import logging
logger = logging.getLogger(__name__)
class Grating(object):
"""
Parent class for gratings.
Parameters
==========
pitch: grating pitch in [um], in x-direction
material: grating material
design_energy: x-ray energy [keV]
height: grating height in [um], in z-direction; default=0 (no height
specified)
duty_cycle: default=0.5
shape: shape of grating, choices = ['flat','circular'], default='flat'
Examples
========
"""
def __init__(self, pitch, material, design_energy, height=0,
duty_cycle=0.5, shape='flat'):
self.pitch = pitch # [um]
self.material = material
self.design_energy = design_energy # [keV]
self.height = height # [um]
self.duty_cycle = duty_cycle
self.shape = shape
class PhaseGrating(Grating):
"""
Child class from Grating class, adds phase properties.
Parameters
==========
phase_shift: required phase shift at given design energy; default=0 (no
shift specified)
Notes
=====
Either a grating height or a required phase shift needs to be defined, the
other is calculated accordingly.
Examples
========
"""
def __init__(self, pitch, material, design_energy, height=0,
duty_cycle=0.5, shape='flat', phase_shift=0):
# call init from parent class
super(PhaseGrating, self).__init__(pitch, material, design_energy,
height, duty_cycle, shape)
# Calculate height or phase shift respectively
if self.height:
self.phase_shift = materials.height_to_shift(self.height,
self.material,
self.design_energy)
elif phase_shift:
self.height = materials.shift_to_height(phase_shift, self.material,
self.design_energy) # [um]
self.phase_shift = phase_shift
else:
raise Exception('Neither height of grating nor phase shift are '
'defined.')
class AbsorptionGrating(Grating):
"""
Child class from Grating class, adds absorption properties.
Parameters
==========
absorption: required percentage of absorbed x-rays at design energy;
default=0 (no absorption specified)
Notes
=====
Either a grating height or a required absorption needs to be defined, the
other is calculated accordingly.
Examples
========
"""
def __init__(self, pitch, material, design_energy, height=0,
duty_cycle=0.5, shape='flat', absorption=0):
# call init from parent class
super(AbsorptionGrating, self).__init__(pitch, material,
design_energy, height,
duty_cycle, shape)
# Calculate height or absorption respectively
if self.height:
self.absorption = materials.height_to_absorption(self.height,
self.material,
self.
design_energy)
# [%]
elif absorption:
self.height = materials.absorption_to_height(absorption,
self.material,
self.
design_energy) # [um]
self.absorption = absorption # [%]
else:
raise Exception('Neither height of grating nor absorption are '
'defined.')
| 32.376 | 79 | 0.531999 | 3,809 | 0.941191 | 0 | 0 | 0 | 0 | 0 | 0 | 1,591 | 0.393131 |
2fd399ef21d1427fd4d75eb210dc4fb1e1e0b5ec | 152 | py | Python | Exercicios/Todos/ex049.py | Edson921/exerciciosResolvidos | 72a3089f4848650c62ac0dd876abf5695a64525a | [
"MIT"
] | null | null | null | Exercicios/Todos/ex049.py | Edson921/exerciciosResolvidos | 72a3089f4848650c62ac0dd876abf5695a64525a | [
"MIT"
] | null | null | null | Exercicios/Todos/ex049.py | Edson921/exerciciosResolvidos | 72a3089f4848650c62ac0dd876abf5695a64525a | [
"MIT"
] | null | null | null | #Calculadora de tabuada
n = int(input('Deseja ver tabuada de que numero?'))
for c in range(1, 13):
print('{} X {:2}= {} '. format(n, c, n * c))
| 30.4 | 51 | 0.572368 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.486842 |
2fd4591d238347442d7ceffec0998337ad85bf70 | 362 | py | Python | rede/eevc/models/RegistroTotalizadorMatriz.py | marcioinfo/edi_reader | 9d19cb3fa462c4eb91a0a2f983e12e3de1b596eb | [
"MIT"
] | null | null | null | rede/eevc/models/RegistroTotalizadorMatriz.py | marcioinfo/edi_reader | 9d19cb3fa462c4eb91a0a2f983e12e3de1b596eb | [
"MIT"
] | null | null | null | rede/eevc/models/RegistroTotalizadorMatriz.py | marcioinfo/edi_reader | 9d19cb3fa462c4eb91a0a2f983e12e3de1b596eb | [
"MIT"
] | null | null | null | COLUMNS = [
'tipo_registro',
'nro_pv_matriz',
'vl_total_bruto',
'qtde_cvnsu',
'vl_total_rejeitado',
'vl_total_rotativo',
'vl_total_parcelado_sem_juros',
'vl_total_parcelado_iata',
'vl_total_dolar',
'vl_total_desconto',
'vl_total_liquido',
'vl_total_gorjeta',
'vl_total_tx_embarque',
'qtde_cvnsu_acatados'
] | 22.625 | 35 | 0.676796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 266 | 0.734807 |
2fd57a70c9f8b7c42925422d2f16850567876bbd | 5,607 | py | Python | openslides_backend/action/action_handler.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | openslides_backend/action/action_handler.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | openslides_backend/action/action_handler.py | r-peschke/openslides-backend | 83d0dab68bb914f06a0f50cffe23fc10ca45376f | [
"MIT"
] | null | null | null | from copy import deepcopy
from typing import Dict, Iterable, List, Optional, Tuple, Union
import fastjsonschema
from ..shared.exceptions import ActionException, DatastoreException, EventStoreException
from ..shared.handlers.base_handler import BaseHandler
from ..shared.interfaces.write_request_element import WriteRequestElement
from ..shared.schema import schema_version
from .action import merge_write_request_elements
from .relations.relation_manager import RelationManager
from .util.actions_map import actions_map
from .util.typing import (
ActionResponse,
ActionResponseResults,
ActionResponseResultsElement,
Payload,
)
payload_schema = fastjsonschema.compile(
{
"$schema": schema_version,
"title": "Schema for action API",
"description": "An array of actions.",
"type": "array",
"items": {
"type": "object",
"properties": {
"action": {
"description": "Name of the action to be performed on the server",
"type": "string",
"minLength": 1,
},
"data": {
"description": "Data for the action (array)",
"type": "array",
"items": {"type": "object"},
},
},
"required": ["action", "data"],
"additionalProperties": False,
},
}
)
class ActionHandler(BaseHandler):
"""
Action handler. It is the concret implementation of Action interface.
"""
MAX_RETRY = 3
@classmethod
def get_actions_dev_status(cls) -> Iterable[Tuple[str, Union[str, Dict]]]:
"""
Returns name and development status of all actions
"""
for name, action in actions_map.items():
if getattr(action, "is_dummy", False):
yield name, "Not implemented"
else:
yield name, action.schema
def handle_request(self, payload: Payload, user_id: int) -> ActionResponse:
"""
Takes payload and user id and handles this request by validating and
parsing all actions. In the end it sends everything to the event store.
"""
self.user_id = user_id
# Validate payload of request
try:
self.validate(payload)
except fastjsonschema.JsonSchemaException as exception:
raise ActionException(exception.message)
retried = 0
payload_copy = deepcopy(payload)
while True:
# Parse actions and creates events
write_request_element, results = self.parse_actions(payload)
# Send events to datastore
if write_request_element:
try:
self.datastore.write(write_request_element)
except DatastoreException as exception:
retried += 1
payload = deepcopy(payload_copy)
if retried > self.MAX_RETRY:
raise ActionException(exception.message)
continue
except EventStoreException as exception:
raise ActionException(exception.message)
break
# Return action result
# TODO: This is a fake result because in this place all actions were
# always successful.
self.logger.debug("Request was successful. Send response now.")
return ActionResponse(
success=True, message="Actions handled successfully", results=results
)
def validate(self, payload: Payload) -> None:
"""
Validates actions requests sent by client. Raises JsonSchemaException if
input is invalid.
"""
self.logger.debug("Validate actions request.")
payload_schema(payload)
def parse_actions(
self, payload: Payload
) -> Tuple[Optional[WriteRequestElement], ActionResponseResults]:
"""
Parses actions request send by client. Raises ActionException or
PermissionDenied if something went wrong.
"""
all_write_request_elements: List[WriteRequestElement] = []
all_action_response_results: ActionResponseResults = []
relation_manager = RelationManager(self.datastore)
for element in payload:
action_name = element["action"]
ActionClass = actions_map.get(action_name)
if ActionClass is None or ActionClass.internal:
raise ActionException(f"Action {action_name} does not exist.")
self.logger.debug(f"Perform action {action_name}.")
action = ActionClass(self.services, relation_manager)
action_results = action.perform(element["data"], self.user_id)
response_elements: List[Optional[ActionResponseResultsElement]] = []
for item in action_results:
if isinstance(item, WriteRequestElement):
self.logger.debug(f"Prepared write request element {item}.")
all_write_request_elements.append(item)
else:
# item = cast(ActionResponseResultsElement, item)
self.logger.debug(f"Got action response element {item}.")
response_elements.append(item)
all_action_response_results.append(response_elements or None)
self.logger.debug("Write request is ready.")
return (
merge_write_request_elements(all_write_request_elements),
all_action_response_results,
)
| 37.38 | 88 | 0.608525 | 4,159 | 0.741751 | 363 | 0.064741 | 380 | 0.067772 | 0 | 0 | 1,500 | 0.267523 |
2fd57e4ea7c49631ccd30b28d1e5572f8c08ea1c | 537 | py | Python | 2020/examples-in-class-2020-10-22/list_vs_dictionary1.py | ati-ozgur/course-python | 38237d120043c07230658b56dc3aeb01c3364933 | [
"Apache-2.0"
] | 1 | 2021-02-04T16:59:11.000Z | 2021-02-04T16:59:11.000Z | 2020/examples-in-class-2020-10-22/list_vs_dictionary1.py | ati-ozgur/course-python | 38237d120043c07230658b56dc3aeb01c3364933 | [
"Apache-2.0"
] | null | null | null | 2020/examples-in-class-2020-10-22/list_vs_dictionary1.py | ati-ozgur/course-python | 38237d120043c07230658b56dc3aeb01c3364933 | [
"Apache-2.0"
] | 1 | 2019-10-30T14:37:48.000Z | 2019-10-30T14:37:48.000Z | # Ask user to enter their country name.
# print out their phone code.
l_country_phone_code = ["China","86","Germany","49","Turkey","90"]
def country_name_to_phone_code_list(country_name):
for index in range(len(l_country_phone_code)):
if l_country_phone_code[index] == country_name:
return l_country_phone_code[index+1]
def country_name_to_phone_code_dict(country_name):
pass
print(country_name_to_phone_code_list("Turkey"))
print(country_name_to_phone_code_list("Germany"))
print(country_name_to_phone_code_list("China")) | 26.85 | 66 | 0.795158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.238361 |
2fd65f43e5f02b710a34a13d8f580c8410046d5d | 5,173 | py | Python | defects_dlmbl/disc_loss.py | bbarad/defects_DLMBL | 66afe939bf90996738cc6bbd323024591bec0cdc | [
"MIT"
] | null | null | null | defects_dlmbl/disc_loss.py | bbarad/defects_DLMBL | 66afe939bf90996738cc6bbd323024591bec0cdc | [
"MIT"
] | null | null | null | defects_dlmbl/disc_loss.py | bbarad/defects_DLMBL | 66afe939bf90996738cc6bbd323024591bec0cdc | [
"MIT"
] | null | null | null | import torch
from torch import nn
import torch.nn.functional as F
class DiscriminativeLoss(nn.Module):
""" This class computes the loss
The process happens in two steps:
1) we compute hungarian assignment between ground truth boxes and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
"""
def __init__(self, device, delta_dist=3.0):
""" Create the criterion.
Parameters:
"""
super().__init__()
self.delta_dist = delta_dist
self.param_var = 1.0
self.param_dist = 1.0
self.param_reg = 0.0001
self.device = device
self.bce_loss = torch.nn.BCEWithLogitsLoss()
def instance_loss(self, outputs, targets):
embedding = outputs
correct_label = targets
# flatten
_,feature_dim,_,_ = embedding.shape
embedding = embedding.view(feature_dim,-1).transpose(0,1) # pixels x features
_,num_instances,_,_ = correct_label.shape
correct_label = correct_label.view(num_instances,-1) # instances x pixels
# add bg instance
bg = torch.unsqueeze((torch.clip(torch.sum(correct_label,dim=0), 0., 1.)-1)*(-1),0) # 1 x pixels
label_list = list(torch.split(correct_label, dim=0, split_size_or_sections=1)) + [bg]
correct_label = torch.cat(label_list, dim=0) # instances+1 x pixels
num_instances += 1
# calculate mean embedding
counts = torch.sum(correct_label, dim=1) # instances
mu = torch.matmul(correct_label, embedding)/torch.unsqueeze((counts+1e-8),1) # instances x feature_dim
# calculates losses
l_var = self._lvar(correct_label, mu, num_instances, embedding)
# _ldist throws errors and makes no sense for <2 instances, therefore it's set to zero in this case
if num_instances>1:
l_dist = self._ldist(mu, num_instances, feature_dim)
else:
l_dist = 0.0
l_reg = torch.mean(torch.norm(mu, dim=1))*self.param_reg
disc_loss = l_var + l_dist + l_reg
loss_dict = {
'l_var': l_var,
'l_dist': l_dist,
'l_reg': l_reg,
'disc_loss': disc_loss
}
return disc_loss, loss_dict
def _ldist(self, mu, num_instances, feature_dim):
# Get L1-,distance for each pair of clusters like this:
# mu_1 - mu_1
# mu_1 - mu_2
# mu_1 - mu_3
mu = mu.transpose(0,1).unsqueeze(2).expand(feature_dim,num_instances,num_instances) # feature_dim x instances x instances
mu_band_rep = mu.reshape(feature_dim, num_instances*num_instances) # feature_dim x instances*instances
mu_interleaved_rep = mu.permute(0, 2, 1).reshape(feature_dim, num_instances*num_instances) # feature_dim x instances*instances
mu_diff = mu_band_rep - mu_interleaved_rep # feature_dim x instances*instances
mu_dist = torch.norm(mu_diff, dim=0)
mask = torch.logical_not(mu_dist.eq(0.0))
mu_dist = torch.masked_select(mu_dist, mask)
mu_dist = 2*self.delta_dist - mu_dist # apply hinge
mu_dist = F.relu(mu_dist) # remove the ones below the hinge
mu_dist = torch.square(mu_dist)
l_dist = torch.mean(mu_dist)
return l_dist
def _lvar(self, correct_label, mu, num_instances, embedding):
# l_var
mu_expand = torch.matmul(correct_label.transpose(0,1),mu) # pixels x feature_dim
counts = torch.sum(correct_label, dim=1) # instances+1
distance = torch.norm(mu_expand - embedding, dim=1, keepdim=True) # 1 x pixels
distance = torch.square(distance) # 1 x pixels
l_var = torch.squeeze(torch.matmul(correct_label, distance)) # instance + 1
l_var = l_var/(counts+1e-8)
l_var = torch.sum(l_var)
l_var = l_var/(num_instances+1e-8)
return l_var
def forward(self, input, target):
# split
# don't incorporate fg/bg bce loss here
# pred_fgbg = input[:,0:1,:,:]
pred_emb = input#[:,1:,:,:]
#
acc_loss = []
b = input.shape[0]
if b>1:
for idx in range(b):
tmp_target = F.one_hot(target.long())
tmp_target = target[idx] # 1 x H x W
tmp_target = torch.permute(F.one_hot(tmp_target.long()), (0,3,1,2))
_, tmp_target = torch.unique(tmp_target, return_inverse=True) # make labels consecutive numbers
inst_loss, loss_dict = self.instance_loss(pred_emb[idx].unsqueeze(0), tmp_target.float())
acc_loss.append(inst_loss)
inst_loss = torch.mean(torch.stack(acc_loss))
else:
_, tmp_target = torch.unique(target, return_inverse=True)
tmp_target = torch.permute(F.one_hot(tmp_target.squeeze(0).long()), (0,3,1,2))
inst_loss, loss_dict = self.instance_loss(pred_emb, tmp_target.float())
# prepare fgbg target
# target_fgbg = (target>0).float()
# fg_bg_loss = self.bce_loss(input=pred_fgbg, target=target_fgbg)
return inst_loss# + fg_bg_loss | 47.027273 | 134 | 0.628842 | 5,106 | 0.987048 | 0 | 0 | 0 | 0 | 0 | 0 | 1,279 | 0.247245 |
2fd8b1114b40364b1013c8b3a880cb6bb7d459b7 | 2,670 | py | Python | tests/test_cache.py | cdusold/DriveLink | b48ff68f7913e33fc17a07e91ad185b1222fdeb8 | [
"MIT"
] | null | null | null | tests/test_cache.py | cdusold/DriveLink | b48ff68f7913e33fc17a07e91ad185b1222fdeb8 | [
"MIT"
] | 7 | 2017-09-05T01:07:48.000Z | 2020-10-12T16:40:12.000Z | tests/test_cache.py | cdusold/DriveLink | b48ff68f7913e33fc17a07e91ad185b1222fdeb8 | [
"MIT"
] | null | null | null | import unittest as ut
from tests._utils._timer import Timer
from drivelink import cached
#from Process import freeze_support
def uncachedFib(a):
if a in [0, 1]:
return a
if a < 0:
raise Exception("Reverse fibonacci sequence not implemented.")
return uncachedFib(a - 1) + uncachedFib(a - 2)
def test_fib():
assert uncachedFib(0) == 0
assert uncachedFib(1) == 1
assert uncachedFib(2) == 1
assert uncachedFib(3) == 2
assert uncachedFib(4) == 3
assert uncachedFib(5) == 5
class cachedTest(ut.TestCase):
c = None
def setUp(self):
@cached(self.id(), 1, 1)
def fib(a):
if a in [0, 1]:
return a
if a < 0:
raise Exception("Reverse fibonacci sequence not implemented.")
return fib(a - 1) + fib(a - 2)
self.c = fib
def test_fib(self):
self.assertEqual(
self.c(0), 0, "The zeroth element of the Fibonnaci sequence is 0, not {0}.".format(str(self.c(0))))
self.assertEqual(
self.c(1), 1, "The first element of the Fibonnaci sequence is 1, not {0}.".format(str(self.c(1))))
self.assertEqual(
self.c(2), 1, "The second element of the Fibonnaci sequence is 1, not {0}.".format(str(self.c(2))))
self.assertEqual(
self.c(3), 2, "The third element of the Fibonnaci sequence is 2, not {0}.".format(str(self.c(3))))
self.assertEqual(
self.c(4), 3, "The fourth element of the Fibonnaci sequence is 3, not {0}.".format(str(self.c(4))))
self.assertEqual(
self.c(5), 5, "The fifth element of the Fibonnaci sequence is 5, not {0}.".format(str(self.c(5))))
def test_init(self):
self.assertEqual(len(self.c.c), 0, "The cache was malformed.")
self.assertEqual(self.c.f(0), uncachedFib(0), "The function was not entered correctly.")
def test_cache(self):
i = self.c(0)
self.assertEqual(len(self.c.c), 1, "The value was not cached properly.")
self.assertEqual(self.c(0), i, "The cached answer was incorrect.")
def test_speed(self):
with Timer() as t1:
_ = uncachedFib(32)
self.c.n = -1
with Timer() as t2:
_ = self.c(32)
self.assertTrue(t2.interval < t1.interval,
"There isn't a speed up... This is useless then, I suppose.")
with Timer() as t1:
_ = self.c(32)
self.assertTrue(t2.interval > t1.interval,
"There isn't a speed up... This is useless then, I suppose.")
if __name__ == '__main__':
freeze_support()
ut.main()
| 34.230769 | 111 | 0.579026 | 2,078 | 0.778277 | 0 | 0 | 241 | 0.090262 | 0 | 0 | 755 | 0.282772 |
2fd9550a294f6b02e8dabd77fe3e2f1337833926 | 1,813 | py | Python | openstates/openstates-master/openstates/ca/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/ca/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/ca/actions.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | from billy.scrape.actions import Rule, BaseCategorizer
# These are regex patterns that map to action categories.
_categorizer_rules = (
Rule((r'\(Ayes (?P<yes_votes>\d+)\.\s+Noes\s+'
r'(?P<no_votes>\d+)\.( Page \S+\.)?\)')),
Rule(r'^Introduced', 'bill:introduced'),
Rule(r'(?i)Referred to (?P<committees>.+)', 'committee:referred'),
Rule(r'(?i)Referred to (?P<committees>.+?)(\.\s+suspense)',
'committee:referred'),
Rule(r're-refer to Standing (?P<committees>[^.]+)\.',
'committee:referred'),
Rule(r'Read first time\.', 'bill:reading:1'),
Rule(r'Read second time and amended',
['bill:reading:2']),
Rule(r'Read third time', 'bill:reading:3'),
Rule(r'Read third time. Refused passage\.',
'bill:failed'),
Rule([r'(?i)read third time.{,5}passed',
r'(?i)Read third time.+?Passed'],
['bill:passed', 'bill:reading:3']),
Rule(r'Approved by the Governor', 'governor:signed'),
Rule(r'Approved by the Governor with item veto',
'governor:vetoed:line-item'),
Rule('Vetoed by Governor', 'governor:vetoed'),
Rule(r'To Governor', 'governor:received'),
Rule(r'amendments concurred in', 'amendment:passed'),
Rule(r'refused to concur in Assembly amendments', 'amendment:failed'),
Rule(r'Failed passage in committee', 'committee:failed'),
Rule(r'(?i)From committee', 'committee:passed'),
Rule(r'(?i)From committee: Do pass', 'committee:passed:favorable'),
Rule(r'From committee with author\'s amendments', 'committee:passed'),
# Resolutions
Rule(r'Adopted', 'bill:passed'),
Rule(r'Read', 'bill:reading:1'),
Rule(r'^From committee: Be adopted', 'committee:passed:favorable'),
)
class CACategorizer(BaseCategorizer):
rules = _categorizer_rules
| 35.54902 | 74 | 0.630998 | 68 | 0.037507 | 0 | 0 | 0 | 0 | 0 | 0 | 1,234 | 0.68064 |
2fda6bc66c1b95398d9470c936e328c867ca38b6 | 1,241 | py | Python | tests/unit_tests/test_managers/test_resource.py | radical-project/radical.dreamer | 74bb2a9a705fc90b0dc773963f2bfd48af6e1b84 | [
"MIT"
] | 4 | 2021-04-30T04:25:12.000Z | 2021-12-16T19:53:37.000Z | tests/unit_tests/test_managers/test_resource.py | radical-project/radical.dreamer | 74bb2a9a705fc90b0dc773963f2bfd48af6e1b84 | [
"MIT"
] | 1 | 2021-04-20T22:08:24.000Z | 2021-04-20T22:08:24.000Z | tests/unit_tests/test_managers/test_resource.py | radical-project/radical.dreamer | 74bb2a9a705fc90b0dc773963f2bfd48af6e1b84 | [
"MIT"
] | 1 | 2021-01-10T20:09:19.000Z | 2021-01-10T20:09:19.000Z | # pylint: disable=unused-argument
__copyright__ = 'Copyright 2021, The RADICAL-Cybertools Team'
__license__ = 'MIT'
import pika.exceptions
from radical.dreamer.configs import cfg_default
from radical.dreamer.managers import ResourceManager
from radical.dreamer.managers.ext import Schedule
from radical.dreamer.units import Resource, Workload
from unittest import TestCase, mock
class ResourceManagerTestClass(TestCase):
@mock.patch('radical.dreamer.managers._base.generate_id', return_value='R0')
@mock.patch('radical.dreamer.managers._base.Logger')
def test_init(self, *args, **kwargs):
with self.assertRaises(Exception):
# no configuration set
ResourceManager()
with self.assertRaises(pika.exceptions.AMQPConnectionError):
# no local RMQ running or no correct RMQ URL set
ResourceManager(cfg=cfg_default)
def test_processing(self):
input_data = [Resource(), Workload(), Schedule()]
for idx in range(len(input_data)):
updated_input_data = list(input_data)
updated_input_data[idx] = 'wrong_obj'
with self.assertRaises(ValueError):
ResourceManager.processing(*updated_input_data)
| 32.657895 | 80 | 0.710717 | 853 | 0.687349 | 0 | 0 | 460 | 0.370669 | 0 | 0 | 251 | 0.202256 |
2fdb8c001da8514aa3f21ad18a492ee9f190de86 | 5,199 | py | Python | aniconforanilist.py | EnArvy/anicon | 28ecfbe7b42a4c78a575b92a20554a72ef7904de | [
"MIT"
] | 5 | 2021-03-23T18:21:29.000Z | 2021-06-21T22:30:31.000Z | aniconforanilist.py | EnArvy/anicon | 28ecfbe7b42a4c78a575b92a20554a72ef7904de | [
"MIT"
] | null | null | null | aniconforanilist.py | EnArvy/anicon | 28ecfbe7b42a4c78a575b92a20554a72ef7904de | [
"MIT"
] | 2 | 2021-03-22T12:53:45.000Z | 2021-03-22T21:38:04.000Z | from warnings import filterwarnings
from PIL import Image, ImageOps
import requests
from requests import get
import re
import os
import json
print('''Run this in your anime folder
For help and info, check out
https://github.com/EnArvy/anicon
''')
filterwarnings("ignore")
folderlist = next(os.walk('.'))[1]
if folderlist is None or len(folderlist) == 0:
# In case the file is placed inside an inner most directory which contains only files and no other folders, this list will be empty.
# Thus adding the current directory path as an element of the list.
folderlist = [str(os.getcwd())]
automode = True if input('Use AutoMode? Y/N : ').upper() == 'Y' else False
def getname(name: str) -> str:
lastwords = ['bd', 's0', '480p', '720p', '1080p']
wordstoremove = ['bluray', 'x265', 'x264', 'hevc', 'hi10p', 'avc', '10bit', 'dual', 'audio', 'eng', 'english', 'subbed', ' sub ', 'dubbed', 'dub']
name = name.lower().replace('_', ' ').replace('.', ' ')
for word in wordstoremove:
name = name.replace(word, '')
name = re.sub(r"(?<=\[)(.*?)(?=\])", '', name)
name = re.sub(r"(?<=\()(.*?)(?=\))", '', name)
name = name.replace('()', '').replace('[]', '')
for word in lastwords:
rexstr = "(?<=" + word + ")(?s)(.*$)"
name = re.sub(rexstr, '', name).replace(word, '')
return(name.strip())
def getartwork(name: str) -> tuple:
url="https://graphql.anilist.co"
query = '''
query($name:String) {
Page{
media(search:$name,format_not_in:[MANGA,ONE_SHOT,NOVEL,MUSIC]) {
id
type
title {
romaji
english
}
coverImage {
extraLarge
}
}
}
}
'''
variables = {
'name':name
}
print(name)
results = requests.post(url,json={'query':query,'variables':variables})
jsonobj = json.loads(results.content)
if automode:
return(jsonobj['data']['Page']['media'][0]['coverImage']['extraLarge'],jsonobj['data']['Page']['media'][0]['type'])
else:
counter = 1
for id in jsonobj['data']['Page']['media']:
print(str(counter)+' - '+id['title']['romaji'])
counter = counter + 1
ch = input('\n>')
if ch == '':
ch = 1
return(jsonobj['data']['Page']['media'][int(ch)-1]['coverImage']['extraLarge'] , jsonobj['data']['Page']['media'][int(ch)-1]['type'])
def createicon(folder: str, link: str):
art = get(link)
open(jpgfile, 'wb').write(art.content)
img = Image.open(jpgfile)
img = ImageOps.expand(img, (69, 0, 69, 0), fill=0)
img = ImageOps.fit(img, (500,500)).convert("RGBA")
datas = img.getdata()
newData = []
for item in datas:
if item[0] == 0 and item[1] == 0 and item[2] == 0:
newData.append((0, 0, 0, 0))
else:
newData.append(item)
img.putdata(newData)
os.remove(jpgfile)
img.save(icofile)
img.close()
return(icofile)
for folder in folderlist:
name = getname(folder)
# Extracting the name of the folder without the path and then performing search for the same. This will be the name of the anime
# episode, thus instead of performing a search for the directory path, now performing a search for the directory name.
name = name.rpartition('\\')[2].strip()
iconname = name.replace(' ', '_')
jpgfile = folder + '\\' + iconname + '.jpg'
icofile = folder + '\\' + iconname + '.ico'
if os.path.isfile(icofile):
print('An icon is already present. Delete the older icon and `desktop.ini` file before applying a new icon')
continue
try:
link, Type = getartwork(name)
icon = createicon(folder, link)
except:
print('Ran into an error. Blame the dev :(')
continue
f = open(folder + "\\desktop.ini","w+")
f.write("[.ShellClassInfo]\nConfirmFileOp=0\n")
f.write("IconResource={},0".format(icofile.replace(folder, "").strip("\\")))
f.write("\nIconFile={}\nIconIndex=0".format(icofile.replace(folder, "").strip("\\")))
f.write("\n[ViewState]\nMode=\nVid=\nFolderType=Videos")
if Type is not None and len(Type) > 0:
# If the result has a type, then using this as the infotip for the desktop icon.
f.write("\nInfoTip={}".format(Type))
# Closing the output stream. All the text will be written into `desktop.ini` file only when the output is being closed.
f.close()
# Not marking the `desktop.ini` file as a system file. This will make sure that the file can be seen if display hidden items is enabled.
os.system('attrib +r +s \"{}\\{}\"'.format(os.getcwd(), folder))
os.system('attrib +h \"{}\\desktop.ini\"'.format(folder))
os.system('attrib +h \"{}\"'.format(icon))
| 34.892617 | 151 | 0.543758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,326 | 0.447394 |
2fdc9a6bca1bf7a1a5438725748d103fc7be8779 | 4,300 | py | Python | C_util/functions.py | catnlp/MultiNER | 76da417ea2ef0a0fd3f1f1f912009d159ad8652a | [
"MIT"
] | null | null | null | C_util/functions.py | catnlp/MultiNER | 76da417ea2ef0a0fd3f1f1f912009d159ad8652a | [
"MIT"
] | null | null | null | C_util/functions.py | catnlp/MultiNER | 76da417ea2ef0a0fd3f1f1f912009d159ad8652a | [
"MIT"
] | null | null | null | # encoding:utf-8
'''
@Author: catnlp
@Email: wk_nlp@163.com
@Time: 2018/5/2 21:04
'''
import numpy as np
def normalize_word(word):
new_word = ''
for char in word:
if char.isdigit():
new_word += '0'
else:
new_word += char
return new_word
def read_instance(input_file, word_alphabet, label_alphabet, number_normalized, max_sent_length):
in_lines = open(input_file, 'r').readlines()
instance_texts = []
instance_ids = []
words = []
# chars = []
labels = []
word_ids = []
# char_ids = []
labels_ids = []
for line in in_lines:
if len(line) > 2:
pairs = line.strip().split()
word = pairs[0] # catnlp
if number_normalized:
word = normalize_word(word)
label = pairs[-1]
words.append(word)
labels.append(label)
word_ids.append(word_alphabet.get_index(word))
labels_ids.append(label_alphabet.get_index(label))
# char_list = []
# char_id = []
# for char in word:
# char_list.append(char)
# if char_padding_size > 0:
# char_number = len(char_list)
# if char_number < char_padding_size:
# char_list = char_list + [char_padding_symbol] * (char_padding_size - char_number)
# assert (len(char_list) == char_padding_size)
# for char in char_list:
# char_id.append(char_alphabet.get_index(char))
# chars.append(char_list)
# char_ids.append(char_id)
else:
if (max_sent_length < 0) or (len(words) < max_sent_length):
instance_texts.append([words, labels])
instance_ids.append([word_ids, labels_ids])
words = []
# chars = []
labels = []
word_ids = []
# char_ids = []
labels_ids = []
return instance_texts, instance_ids
def build_pretrain_embedding(embedding_path, word_alphabet, embed_dim=100, norm=True):
embed_dict = dict()
if embedding_path != None:
embed_dict, embed_dim = load_pretrain_emb(embedding_path)
alphabet_size = word_alphabet.size()
scale = np.sqrt(3.0 / embed_dim)
pretrain_emb = np.empty([word_alphabet.size(), embed_dim])
perfect_match = 0
case_match = 0
not_match = 0
for word, index in word_alphabet.iteritems():
if word in embed_dict:
if norm:
pretrain_emb[index, :] = norm2one(embed_dict[word])
else:
pretrain_emb[index, :] = embed_dict[word]
perfect_match += 1
elif word.lower() in embed_dict:
if norm:
pretrain_emb[index, :] = norm2one(embed_dict[word.lower()])
else:
pretrain_emb[index, :] = embed_dict[word.lower()]
case_match += 1
else:
pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embed_dim])
not_match += 1
pretrain_size = len(embed_dict)
print('Embedding:\n\tpretrain word:%s, perfect match:%s, case_match:%s, oov:%s, oov%%:%s'
% (pretrain_size, perfect_match, case_match, not_match, (not_match+0.)/alphabet_size))
return pretrain_emb, embed_dim
def norm2one(vec):
root_sum_square = np.sqrt(np.sum(np.square(vec)))
return vec/root_sum_square
def load_pretrain_emb(embedding_path):
embed_dim = -1
embed_dict = dict()
with open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
if len(line) == 0:
continue
tokens = line.split()
if embed_dim < 0:
embed_dim = len(tokens) - 1
# else:
# if(len(tokens) != embed_dim + 1):
# print(tokens)
# assert(embed_dim + 1 == len(tokens))
embed = np.empty([1, embed_dim])
embed[:] = tokens[len(tokens) - embed_dim: ]
name = tokens[0]
size = len(tokens) - embed_dim - 1
for i in range(size):
name += ' ' + tokens[i+1]
embed_dict[name] = embed # catnlp
return embed_dict, embed_dim
| 34.95935 | 103 | 0.55186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 808 | 0.187907 |
2fdcb0a723367a5527e63bd7e6909f29ebc28c3f | 38,912 | py | Python | ziggurat_foundations/tests/test_permissions.py | ergo/ziggurat_foundations | 613adf1b6022e9b5401ef7de9f5a066c88cfb6e8 | [
"BSD-3-Clause"
] | 59 | 2015-02-18T10:58:57.000Z | 2021-06-15T19:52:29.000Z | ziggurat_foundations/tests/test_permissions.py | ergo/ziggurat_foundations | 613adf1b6022e9b5401ef7de9f5a066c88cfb6e8 | [
"BSD-3-Clause"
] | 50 | 2015-02-18T10:12:17.000Z | 2021-09-09T20:13:17.000Z | ziggurat_foundations/tests/test_permissions.py | ergo/ziggurat_foundations | 613adf1b6022e9b5401ef7de9f5a066c88cfb6e8 | [
"BSD-3-Clause"
] | 24 | 2015-02-18T10:29:47.000Z | 2020-03-28T20:28:56.000Z | # -*- coding: utf-8 -*-
from __future__ import with_statement, unicode_literals
import pytest
from ziggurat_foundations.models.services.group_permission import GroupPermissionService
from ziggurat_foundations.models.services.group_resource_permission import (
GroupResourcePermissionService,
)
from ziggurat_foundations.models.services.user_permission import UserPermissionService
from ziggurat_foundations.models.services.user_resource_permission import (
UserResourcePermissionService,
)
from ziggurat_foundations.models.services.resource import ResourceService
from ziggurat_foundations.permissions import PermissionTuple, ALL_PERMISSIONS
from ziggurat_foundations.tests import (
add_user,
check_one_in_other,
add_resource,
add_resource_b,
add_group,
BaseTestCase,
)
from ziggurat_foundations.tests.conftest import (
User,
UserPermission,
GroupPermission,
UserResourcePermission,
GroupResourcePermission,
ResourceTestobjB,
)
from ziggurat_foundations.models.services.group import GroupService
from ziggurat_foundations.models.services.user import UserService
class TestUserPermissions(BaseTestCase):
def test_user_permissions(self, db_session):
created_user = add_user(db_session)
permissions = UserService.permissions(created_user, db_session=db_session)
expected = [
PermissionTuple(
created_user, "alter_users", "user", None, None, False, True
),
PermissionTuple(created_user, "root", "user", None, None, False, True),
]
check_one_in_other(permissions, expected)
def test_owned_permissions(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
created_user.resources.append(resource)
db_session.flush()
resources = UserService.resources_with_perms(
created_user, ["test_perm"], db_session=db_session
).all()
assert resources[0] == resource
permission = ResourceService.direct_perms_for_user(resource, created_user)[0]
assert permission.owner is True
assert permission.allowed is True
assert permission.user.id == created_user.id
def test_resources_with_perm(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
permission = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource.resource_id,
)
resource.user_permissions.append(permission)
db_session.flush()
resources = UserService.resources_with_perms(
created_user, ["test_perm"], db_session=db_session
).all()
assert resources[0] == resource
def test_mixed_perms(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
permission = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource.resource_id,
)
resource.user_permissions.append(permission)
resource2 = add_resource(db_session, 2, "test_resource")
created_user.resources.append(resource2)
add_resource(db_session, 3, "test_resource")
add_resource_b(db_session, 4, "test_resource")
db_session.flush()
resources = UserService.resources_with_perms(
created_user, ["test_perm"], db_session=db_session
).all()
found_ids = [r.resource_id for r in resources]
assert sorted(found_ids) == [1, 2]
def test_resources_with_perm_type_found(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
permission = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource.resource_id,
)
resource.user_permissions.append(permission)
db_session.flush()
resources = UserService.resources_with_perms(
created_user,
["test_perm"],
resource_types=["test_resource"],
db_session=db_session,
).all()
assert resources[0] == resource
def test_resources_with_perm_type_not_found(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
permission = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource.resource_id,
)
resource.user_permissions.append(permission)
db_session.flush()
resources = UserService.resources_with_perms(
created_user,
["test_perm"],
resource_types=["test_resource_b"],
db_session=db_session,
).all()
assert resources == []
def test_resources_with_perm_type_other_found(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
resource2 = add_resource_b(db_session, 2, "test_resource")
resource3 = add_resource(db_session, 3, "test_resource")
resource4 = add_resource_b(db_session, 4, "test_resource")
db_session.flush()
permission = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource.resource_id,
)
resource.user_permissions.append(permission)
permission2 = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource2.resource_id,
)
resource2.user_permissions.append(permission2)
permission3 = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource3.resource_id,
)
resource3.user_permissions.append(permission3)
permission4 = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource4.resource_id,
)
resource4.user_permissions.append(permission4)
db_session.flush()
resources = UserService.resources_with_perms(
created_user,
["test_perm"],
resource_types=["test_resource_b"],
db_session=db_session,
).all()
assert len(resources) == 2
def test_resources_with_wrong_perm(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
permission = UserResourcePermission(
perm_name="test_perm_bad",
user_id=created_user.id,
resource_id=resource.resource_id,
)
with pytest.raises(AssertionError):
resource.user_permissions.append(permission)
def test_multiple_resources_with_perm(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
permission = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource.resource_id,
)
resource.user_permissions.append(permission)
resource2 = add_resource(db_session, 2, "test_resource2")
permission2 = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource2.resource_id,
)
resource2.user_permissions.append(permission2)
resources = UserService.resources_with_perms(
created_user, ["test_perm"], db_session=db_session
).all()
assert resources == [resource, resource2]
def test_resources_ids_with_perm(self, db_session):
created_user = add_user(db_session)
resource1 = add_resource(db_session, 1, "test_resource1")
resource2 = add_resource(db_session, 2, "test_resource2")
resource3 = add_resource(db_session, 3, "test_resource3")
permission1 = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource1.resource_id,
)
permission2 = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource2.resource_id,
)
permission3 = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource3.resource_id,
)
resource1.user_permissions.append(permission1)
resource2.user_permissions.append(permission2)
resource3.user_permissions.append(permission3)
db_session.flush()
resources = UserService.resources_with_perms(
created_user, ["test_perm"], resource_ids=[1, 3], db_session=db_session
).all()
assert resources == [resource1, resource3]
def test_resources_with_wrong_group_permission(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
group = add_group(db_session)
group.users.append(created_user)
group_permission = GroupResourcePermission(
perm_name="test_perm_bad",
group_id=group.id,
resource_id=resource.resource_id,
)
with pytest.raises(AssertionError):
resource.group_permissions.append(group_permission)
def test_resources_with_group_permission(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
resource2 = add_resource(db_session, 2, "test_resource2")
add_resource(db_session, 3, "test_resource3")
group = add_group(db_session)
group.users.append(created_user)
group_permission = GroupResourcePermission(
perm_name="test_perm", group_id=1, resource_id=resource.resource_id
)
group_permission2 = GroupResourcePermission(
perm_name="foo_perm", group_id=1, resource_id=resource2.resource_id
)
resource.group_permissions.append(group_permission)
resource2.group_permissions.append(group_permission2)
db_session.flush()
resources = UserService.resources_with_perms(
created_user, ["foo_perm"], db_session=db_session
).all()
assert resources[0] == resource2
def test_resources_with_direct_user_perms(self, db_session):
self.set_up_user_group_and_perms(db_session)
# test_perm1 from group perms should be ignored
perms = ResourceService.direct_perms_for_user(
self.resource, self.user, db_session=db_session
)
second = [
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resources_with_direct_group_perms(self, db_session):
self.set_up_user_group_and_perms(db_session)
# test_perm1 from group perms should be ignored
perms = ResourceService.group_perms_for_user(
self.resource, self.user, db_session=db_session
)
second = [
PermissionTuple(
self.user, "group_perm", "group", self.group, self.resource, False, True
)
]
check_one_in_other(perms, second)
def test_resources_with_user_perms(self, db_session):
self.maxDiff = 9999
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.perms_for_user(
self.resource, self.user, db_session=db_session
)
second = [
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resource_users_for_perm(self, db_session):
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.users_for_perm(
self.resource, "foo_perm", db_session=db_session
)
second = [
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
)
]
check_one_in_other(perms, second)
def test_resource_users_for_any_perm(self, db_session):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.users_for_perm(
self.resource, "__any_permission__", db_session=db_session
)
second = [
PermissionTuple(
self.user, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
PermissionTuple(
self.user4,
"group_perm",
"group",
self.group2,
self.resource,
False,
True,
),
]
check_one_in_other(perms, second)
def test_resource_users_for_any_perm_resource_2(self, db_session):
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.users_for_perm(
self.resource2, "__any_permission__", db_session=db_session
)
second = [
PermissionTuple(
self.user2, "foo_perm", "user", None, self.resource2, False, True
),
PermissionTuple(
self.user3, "test_perm", "user", None, self.resource2, False, True
),
]
check_one_in_other(perms, second)
def test_resource_users_limited_users(self, db_session):
self.maxDiff = 9999
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.users_for_perm(
self.resource,
"__any_permission__",
user_ids=[self.user.id],
db_session=db_session,
)
second = [
PermissionTuple(
self.user, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resource_users_limited_group(self, db_session):
self.maxDiff = 9999
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.users_for_perm(
self.resource,
"__any_permission__",
user_ids=[self.user.id],
group_ids=[self.group2.id],
db_session=db_session,
)
second = [
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resource_users_limited_group_other_user_3(self, db_session):
self.maxDiff = 9999
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.users_for_perm(
self.resource2,
"__any_permission__",
user_ids=[self.user3.id],
db_session=db_session,
)
second = [
PermissionTuple(
self.user3, "test_perm", "user", None, self.resource2, False, True
)
]
check_one_in_other(perms, second)
def test_resource_users_limited_group_other_user_4(self, db_session):
self.maxDiff = 9999
self.set_up_user_group_and_perms(db_session)
perms = ResourceService.users_for_perm(
self.resource,
"__any_permission__",
user_ids=[self.user4.id],
group_ids=[self.group2.id],
db_session=db_session,
)
second = [
PermissionTuple(
self.user4,
"group_perm",
"group",
self.group2,
self.resource,
False,
True,
)
]
check_one_in_other(perms, second)
def test_resource_users_limited_group_ownage(self, db_session):
self.maxDiff = 9999
self.set_up_user_group_and_perms(db_session)
resource = ResourceTestobjB(
resource_id=99, resource_name="other", owner_user_id=self.user2.id
)
group3 = add_group(db_session, "group 3")
user2_permission = UserResourcePermission(
perm_name="foo_perm", user_id=self.user2.id
)
group3_permission = GroupResourcePermission(
perm_name="group_perm", group_id=group3.id
)
resource.group_permissions.append(group3_permission)
resource.user_permissions.append(user2_permission)
group3.users.append(self.user3)
self.user.resources.append(resource)
self.group2.resources.append(resource)
db_session.flush()
perms = ResourceService.users_for_perm(
resource, "__any_permission__", db_session=db_session
)
second = [
PermissionTuple(
self.user2, "foo_perm", "user", None, resource, False, True
),
PermissionTuple(
self.user, ALL_PERMISSIONS, "user", None, resource, True, True
),
PermissionTuple(
self.user4, ALL_PERMISSIONS, "group", self.group2, resource, True, True
),
PermissionTuple(
self.user3, "group_perm", "group", group3, resource, False, True
),
]
check_one_in_other(perms, second)
def test_users_for_perms(self, db_session):
user = User(user_name="aaa", email="aaa", status=0)
UserService.set_password(user, "password")
aaa_perm = UserPermission(perm_name="aaa")
bbb_perm = UserPermission(perm_name="bbb")
bbb2_perm = UserPermission(perm_name="bbb")
user.user_permissions.append(aaa_perm)
user.user_permissions.append(bbb_perm)
user2 = User(user_name="bbb", email="bbb", status=0)
UserService.set_password(user2, "password")
user2.user_permissions.append(bbb2_perm)
user3 = User(user_name="ccc", email="ccc", status=0)
UserService.set_password(user3, "password")
group = add_group(db_session)
group.users.append(user3)
db_session.add(user)
db_session.add(user2)
db_session.flush()
users = UserService.users_for_perms(["aaa"], db_session=db_session)
assert len(users.all()) == 1
assert users[0].user_name == "aaa"
users = UserService.users_for_perms(["bbb"], db_session=db_session).all()
assert len(users) == 2
assert ["aaa", "bbb"] == sorted([u.user_name for u in users])
users = UserService.users_for_perms(
["aaa", "bbb", "manage_apps"], db_session=db_session
)
assert ["aaa", "bbb", "ccc"] == sorted([u.user_name for u in users])
def test_resources_with_possible_perms(self, db_session):
self.set_up_user_group_and_perms(db_session)
resource = ResourceTestobjB(
resource_id=3, resource_name="other", owner_user_id=self.user.id
)
self.user.resources.append(resource)
resource_g = ResourceTestobjB(resource_id=4, resource_name="group owned")
self.group.resources.append(resource_g)
db_session.flush()
perms = UserService.resources_with_possible_perms(
self.user, db_session=db_session
)
second = [
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, ALL_PERMISSIONS, "user", None, resource, True, True
),
PermissionTuple(
self.user, ALL_PERMISSIONS, "group", self.group, resource_g, True, True
),
]
check_one_in_other(perms, second)
def test_resource_users_for_any_perm_additional_users(self, db_session):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
user6 = add_user(db_session, 6, "user 6")
user7 = add_user(db_session, 7, "user 7")
perm2 = GroupResourcePermission(
perm_name="group_perm2", resource_id=self.resource.resource_id
)
self.group.resource_permissions.append(perm2)
self.group.users.append(user6)
self.group.users.append(user7)
perms = ResourceService.users_for_perm(
self.resource, "__any_permission__", db_session=db_session
)
second = [
PermissionTuple(
self.user, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
user6, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
user7, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user,
"group_perm2",
"group",
self.group,
self.resource,
False,
True,
),
PermissionTuple(
user6, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
user7, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
PermissionTuple(
self.user4,
"group_perm",
"group",
self.group2,
self.resource,
False,
True,
),
]
check_one_in_other(perms, second)
def test_resource_users_for_any_perm_limited_group_perms(self, db_session):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
user6 = add_user(db_session, 6, "user 6")
user7 = add_user(db_session, 7, "user 7")
perm2 = GroupResourcePermission(
perm_name="group_perm2", resource_id=self.resource.resource_id
)
self.group.resource_permissions.append(perm2)
self.group.users.append(user6)
self.group.users.append(user7)
perms = ResourceService.users_for_perm(
self.resource,
"__any_permission__",
limit_group_permissions=True,
db_session=db_session,
)
second = [
PermissionTuple(
None, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
None, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
PermissionTuple(
None, "group_perm", "group", self.group2, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resource_groups_for_any_perm_additional_users(self, db_session):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
user6 = add_user(db_session, 6, "user 6")
user7 = add_user(db_session, 7, "user 7")
perm2 = GroupResourcePermission(
perm_name="group_perm2", resource_id=self.resource.resource_id
)
self.group.resource_permissions.append(perm2)
self.group.users.append(user6)
self.group.users.append(user7)
perms = ResourceService.groups_for_perm(
self.resource, "__any_permission__", db_session=db_session
)
second = [
PermissionTuple(
self.user, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
user6, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
user7, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user,
"group_perm2",
"group",
self.group,
self.resource,
False,
True,
),
PermissionTuple(
user6, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
user7, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user4,
"group_perm",
"group",
self.group2,
self.resource,
False,
True,
),
]
check_one_in_other(perms, second)
def test_resource_groups_for_any_perm_just_group_perms_limited(self, db_session):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
user6 = add_user(db_session, 6, "user 6")
user7 = add_user(db_session, 7, "user 7")
perm2 = GroupResourcePermission(
perm_name="group_perm2", resource_id=self.resource.resource_id
)
self.group.resource_permissions.append(perm2)
self.group.users.append(user6)
self.group.users.append(user7)
perms = ResourceService.groups_for_perm(
self.resource,
"__any_permission__",
limit_group_permissions=True,
db_session=db_session,
)
second = [
PermissionTuple(
None, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
None, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
None, "group_perm", "group", self.group2, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resource_users_for_any_perm_excluding_group_perms(self, db_session):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
user6 = add_user(db_session, 6, "user 6")
user7 = add_user(db_session, 7, "user 7")
perm2 = GroupResourcePermission(
perm_name="group_perm2", resource_id=self.resource.resource_id
)
self.group.resource_permissions.append(perm2)
self.group.users.append(user6)
self.group.users.append(user7)
perms = ResourceService.users_for_perm(
self.resource,
"__any_permission__",
limit_group_permissions=True,
skip_group_perms=True,
db_session=db_session,
)
second = [
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resource_groups_for_any_perm_just_group_perms_limited_empty_group(
self, db_session
):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
user6 = add_user(db_session, 6, "user 6")
user7 = add_user(db_session, 7, "user 7")
perm2 = GroupResourcePermission(
perm_name="group_perm2", resource_id=self.resource.resource_id
)
self.group.resource_permissions.append(perm2)
self.group.users.append(user6)
self.group.users.append(user7)
group3 = add_group(db_session, "Empty group")
perm3 = GroupResourcePermission(
perm_name="group_permx", resource_id=self.resource.resource_id
)
group3.resource_permissions.append(perm3)
perms = ResourceService.groups_for_perm(
self.resource,
"__any_permission__",
limit_group_permissions=True,
db_session=db_session,
)
second = [
PermissionTuple(
None, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
None, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
None, "group_perm", "group", self.group2, self.resource, False, True
),
PermissionTuple(
None, "group_permx", "group", group3, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_resource_users_for_any_perm_limited_group_perms_empty_group(
self, db_session
):
self.maxDiff = 99999
self.set_up_user_group_and_perms(db_session)
user6 = add_user(db_session, 6, "user 6")
user7 = add_user(db_session, 7, "user 7")
perm2 = GroupResourcePermission(
perm_name="group_perm2", resource_id=self.resource.resource_id
)
self.group.resource_permissions.append(perm2)
self.group.users.append(user6)
self.group.users.append(user7)
group3 = add_group(db_session, "Empty group")
perm3 = GroupResourcePermission(
perm_name="group_permx", resource_id=self.resource.resource_id
)
group3.resource_permissions.append(perm3)
perms = ResourceService.users_for_perm(
self.resource,
"__any_permission__",
limit_group_permissions=True,
db_session=db_session,
)
second = [
PermissionTuple(
None, "group_perm", "group", self.group, self.resource, False, True
),
PermissionTuple(
None, "group_perm2", "group", self.group, self.resource, False, True
),
PermissionTuple(
self.user, "test_perm2", "user", None, self.resource, False, True
),
PermissionTuple(
self.user, "foo_perm", "user", None, self.resource, False, True
),
PermissionTuple(
None, "group_perm", "group", self.group2, self.resource, False, True
),
PermissionTuple(
None, "group_permx", "group", group3, self.resource, False, True
),
]
check_one_in_other(perms, second)
def test_get_resource_permission(self, db_session):
created_user = add_user(db_session)
resource = add_resource(db_session, 1, "test_resource")
permission = UserResourcePermission(
perm_name="test_perm",
user_id=created_user.id,
resource_id=resource.resource_id,
)
resource.user_permissions.append(permission)
db_session.flush()
perm = UserResourcePermissionService.get(
user_id=created_user.id,
resource_id=resource.resource_id,
perm_name="test_perm",
db_session=db_session,
)
assert perm.perm_name == "test_perm"
assert perm.resource_id == resource.resource_id
assert perm.user_id == created_user.id
class TestGroupPermission(BaseTestCase):
def test_repr(self, db_session):
group_permission = GroupPermission(group_id=1, perm_name="perm")
assert repr(group_permission) == "<GroupPermission: perm>"
def test_get(self, db_session):
org_group = add_group(db_session, "group1")
group = GroupPermissionService.get(
group_id=org_group.id, perm_name="manage_apps", db_session=db_session
)
assert group.group_id == 1
assert group.perm_name == "manage_apps"
def test_by_group_and_perm(self, db_session):
add_group(db_session)
queried = GroupPermissionService.by_group_and_perm(
1, "manage_apps", db_session=db_session
)
assert queried.group_id == 1
assert queried.perm_name == "manage_apps"
def test_by_group_and_perm_wrong_group(self, db_session):
add_group(db_session)
queried = GroupPermissionService.by_group_and_perm(
2, "manage_apps", db_session=db_session
)
assert queried is None
def test_by_group_and_perm_wrong_perm(self, db_session):
add_group(db_session)
queried = GroupPermissionService.by_group_and_perm(
1, "wrong_perm", db_session=db_session
)
assert queried is None
def test_resources_with_possible_perms(self, db_session):
self.set_up_user_group_and_perms(db_session)
perms = GroupService.resources_with_possible_perms(self.group)
second = [
PermissionTuple(
None, "group_perm", "group", self.group, self.resource, False, True
)
]
check_one_in_other(perms, second)
def test_resources_with_possible_perms_group2(self, db_session):
self.set_up_user_group_and_perms(db_session)
resource3 = add_resource_b(db_session, 3, "other resource")
self.group2.resources.append(resource3)
group_permission2 = GroupResourcePermission(
perm_name="group_perm2", group_id=self.group2.id
)
self.resource2.group_permissions.append(group_permission2)
perms = GroupService.resources_with_possible_perms(self.group2)
second = [
PermissionTuple(
None, "group_perm", "group", self.group2, self.resource, False, True
),
PermissionTuple(
None, "group_perm2", "group", self.group2, self.resource2, False, True
),
PermissionTuple(
None, ALL_PERMISSIONS, "group", self.group2, resource3, True, True
),
]
check_one_in_other(perms, second)
def test_group_resource_permission(self, db_session):
self.set_up_user_group_and_perms(db_session)
add_resource_b(db_session, 3, "other resource")
db_session.flush()
group_permission2 = GroupResourcePermission(
perm_name="group_perm2", group_id=self.group2.id
)
row = GroupResourcePermissionService.get(
group_id=self.group2.id,
resource_id=self.resource2.resource_id,
perm_name="group_perm2",
db_session=db_session,
)
assert row is None
self.resource2.group_permissions.append(group_permission2)
row = GroupResourcePermissionService.get(
group_id=self.group2.id,
resource_id=self.resource2.resource_id,
perm_name="group_perm2",
db_session=db_session,
)
assert row is not None
def test_group_resource_permission_wrong(self, db_session):
self.set_up_user_group_and_perms(db_session)
perm_name = "group_permX"
perm = ResourceService.perm_by_group_and_perm_name(
resource_id=self.resource.resource_id,
group_id=self.group.id,
perm_name=perm_name,
db_session=db_session,
)
assert perm is None
def test_group_resource_permission2(self, db_session):
self.set_up_user_group_and_perms(db_session)
perm_name = "group_perm"
perm = ResourceService.perm_by_group_and_perm_name(
resource_id=self.resource.resource_id,
group_id=self.group.id,
perm_name=perm_name,
db_session=db_session,
)
assert perm.group_id == self.group.id
assert perm.resource_id == self.resource.resource_id
assert perm.perm_name == perm_name
class TestUserPermission(BaseTestCase):
def test_repr(self, db_session):
user_permission = UserPermission(user_id=1, perm_name="perm")
assert repr(user_permission) == "<UserPermission: perm>"
def test_get(self, db_session):
user = add_user(db_session)
perm = UserPermissionService.get(
user_id=user.id, perm_name="root", db_session=db_session
)
assert perm.user_id == user.id
assert perm.perm_name == "root"
def test_by_user_and_perm(self, db_session):
add_user(db_session)
user_permission = UserPermissionService.by_user_and_perm(
1, "root", db_session=db_session
)
assert user_permission.user_id == 1
assert user_permission.perm_name == "root"
def test_by_user_and_perm_wrong_username(self, db_session):
add_user(db_session)
user_permission = UserPermissionService.by_user_and_perm(
999, "root", db_session=db_session
)
assert user_permission is None
def test_by_user_and_perm_wrong_permname(self, db_session):
add_user(db_session)
user_permission = UserPermissionService.by_user_and_perm(
1, "wrong", db_session=db_session
)
assert user_permission is None
| 37.34357 | 88 | 0.612022 | 37,781 | 0.970934 | 0 | 0 | 0 | 0 | 0 | 0 | 3,115 | 0.080052 |
2fddc51ceee9b1fd3f73ffe1e2e6a10a1fa87650 | 2,333 | py | Python | waveshare_eink/server/start.py | dhvie/waveshare-eink | f826c2b46318bdefb3893dd62f522c317898ae32 | [
"MIT"
] | null | null | null | waveshare_eink/server/start.py | dhvie/waveshare-eink | f826c2b46318bdefb3893dd62f522c317898ae32 | [
"MIT"
] | null | null | null | waveshare_eink/server/start.py | dhvie/waveshare-eink | f826c2b46318bdefb3893dd62f522c317898ae32 | [
"MIT"
] | null | null | null | from flask import Flask
from flask import render_template
import datetime as dt
import json
import jinja2 as j2
import requests
import argparse
import os
import feedparser
from pathlib import Path
class OpenWeatherAPI():
url = j2.Template('https://api.openweathermap.org/data/2.5/onecall?lat={{lat}}&lon={{lon}}&appid={{api_key}}&units={{units}}')
def __init__(self, api_key):
self.__api_key = api_key
def call(self, lon, lat, units='imperial'):
return json.loads(requests.get(OpenWeatherAPI.url.render(api_key=self.__api_key, lon=lon, lat=lat, units=units)).text)
args = {
'own': os.environ['OWN_API'],
'lon': os.environ['LON'],
'lat': os.environ['LAT']
}
app = Flask("ws-eink")
own_api = OpenWeatherAPI(args['own'])
def get_weather():
return own_api.call(args['lon'], args['lat'])
def get_news():
bbc_rss = "http://feeds.bbci.co.uk/news/rss.xml?edition=uk"
feed = feedparser.parse( bbc_rss )
return feed
icon_map = {
'01': '<span class="material-icons">wb_sunny</span>',
'02': '<span class="material-icons">wb_cloudy</span>',
'03': '<span class="material-icons">wb_cloudy</span>',
'04': '<span class="material-icons">wb_cloudy</span>',
'09': '<img src="/icons/rain" />',
'10': '<img src="/icons/rain" />',
'11': '<span class="material-icons">flash_on</span>',
'13': '<span class="material-icons">ac_unit</span>',
'50': '<img src="/icons/fog" />'
}
@app.template_filter()
def icon(value):
material = icon_map.get(value[:2], "")
return material
@app.template_filter()
def format_datetime(value, format='full'):
if format == 'date':
dt_format="%d/%m/%Y"
elif format == 'time':
dt_format = '%H:%M'
else:
dt_format="%d/%m/%Y %H:%M"
return dt.datetime.fromtimestamp(value).strftime(dt_format)
@app.route('/icons/<name>', methods=['GET'])
def get_icons(name):
current_file = Path(__file__)
with open(f'{current_file.parent}/icons/{name}.svg', 'r') as icon:
icon_str = icon.read()
return icon_str
@app.route('/', methods=['GET'])
@app.route('/<page>', methods=['GET'])
def pages(page=None):
if page is None:
page = 'main'
weather = get_weather()
feed = get_news()
return render_template(f'{page}.html', weather=weather, news=feed)
| 26.511364 | 130 | 0.635662 | 403 | 0.172739 | 0 | 0 | 864 | 0.370339 | 0 | 0 | 777 | 0.333048 |
2fe058dd084d71d3320c034887d2a6bdbc3b3c7b | 322 | py | Python | basic_ml/notebooks/numpy/performance_test.py | jmetzz/ml-laboratory | 26b1e87bd0d80efa4f15280f7f32ad46d59efc1f | [
"MIT"
] | 1 | 2021-09-10T16:55:35.000Z | 2021-09-10T16:55:35.000Z | basic_ml/notebooks/numpy/performance_test.py | jmetzz/ml-laboratory | 26b1e87bd0d80efa4f15280f7f32ad46d59efc1f | [
"MIT"
] | 14 | 2022-03-12T01:06:08.000Z | 2022-03-30T14:30:22.000Z | basic_ml/notebooks/numpy/performance_test.py | jmetzz/ml-laboratory | 26b1e87bd0d80efa4f15280f7f32ad46d59efc1f | [
"MIT"
] | null | null | null | import numpy as np
# Create an array with 10^7 elements
arr = np.arange(1e7)
# Converting ndarray to list
larr = arr.tolist()
def list_times(alist, scalar):
return [val * scalar for val in alist]
# Using IPython's magic
# timeit command timeit arr * 1.1
# timeit list_times(larr, 1.1)
# box(x, y){line=snake}
| 15.333333 | 42 | 0.692547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 173 | 0.537267 |
2fe0955e36b279f30967dd3090b31e43c2bf286b | 7,582 | py | Python | tests/file_formats/variables/egf_vars.py | HFM3/strix | 94bbc568f614bbb0f525d8ce17de4c64ef3b46d2 | [
"MIT"
] | null | null | null | tests/file_formats/variables/egf_vars.py | HFM3/strix | 94bbc568f614bbb0f525d8ce17de4c64ef3b46d2 | [
"MIT"
] | null | null | null | tests/file_formats/variables/egf_vars.py | HFM3/strix | 94bbc568f614bbb0f525d8ce17de4c64ef3b46d2 | [
"MIT"
] | null | null | null | """
EGF string variables for testing.
"""
# POINT
valid_pt = """PT
Park Name, City, Pond, Fountain
Post office Square, Boston, FALSE, TRUE
42.356243, -71.055631, 2.0
Boston Common, Boston, TRUE, TRUE
42.355465, -71.066412, 10.0
"""
invalid_pt_geom = """PTs
Park Name, City, Pond, Fountain
Post office Square, Boston, FALSE, TRUE
42.356243, -71.055631, 2.0
Boston Common, Boston, TRUE, TRUE
42.355465, -71.066412, 10.0
"""
invalid_pt_last_line_1 = """PT
Park Name, City, Pond, Fountain
Post office Square, Boston, FALSE, TRUE
42.356243, -71.055631, 2.0
Boston Common, Boston, TRUE, TRUE
42.355465, -71.066412, 10.0
"""
invalid_pt_last_line_2 = """PT
Park Name, City, Pond, Fountain
Post office Square, Boston, FALSE, TRUE
42.356243, -71.055631, 2.0
Boston Common, Boston, TRUE, TRUE
42.355465, -71.066412, 10.0
a
"""
invalid_pt_coord_sets = """PT
Park Name, City, Pond, Fountain
Post office Square, Boston, FALSE, TRUE
42.356243, -71.055631, 2.0
42.355465, -71.066412, 10.0
Boston Common, Boston, TRUE, TRUE
42.355465, -71.066412, 10.0
"""
invalid_pt_headers = """PT
Park Name, City, Pond, Fountain
Park Name, City, Pond, Fountain
Post office Square, Boston, FALSE, TRUE
42.356243, -71.055631, 2.0
Boston Common, Boston, TRUE, TRUE
42.355465, -71.066412, 10.0
"""
invalid_pt_sections = """PT
Park Name, City, Pond, Fountain
"""
invalid_pt_section_separators = """PT
Park Name, City, Pond, Fountain
Post office Square, Boston, FALSE, TRUE
42.356243, -71.055631, 2.0
Boston Common, Boston, TRUE, TRUE
42.355465, -71.066412, 10.0
"""
# LINESTRING
valid_ls = """LS
Park Name, Feature Description
Post Office Square, A walk by the fountain
42.356716, -71.055685, 0.0
42.356587, -71.055769, 0.0
42.356566, -71.055754, 0.0
42.356539, -71.055746, 0.0
42.356511, -71.055757, 0.0
42.356495, -71.05579, 0.0
42.356485, -71.05583, 0.0
42.356389, -71.055842, 0.0
42.356252, -71.055796, 0.0
42.356046, -71.055642, 0.0
42.355876, -71.055697, 0.0
42.355828, -71.055758, 0.0
Boston Common, A walk by the fountain
42.356251, -71.062737, 0.0
42.35621, -71.063012, 0.0
42.356153, -71.06305, 0.0
42.356144, -71.063115, 0.0
42.356136, -71.063261, 0.0
42.355825, -71.064018, 0.0
"""
invalid_ls_coord_sets_1 = """LS
Park Name, Feature Description
Post Office Square, A walk by the fountain
42.356716, -71.055685, 0.0
42.356587, -71.055769, 0.0
42.356566, -71.055754, 0.0
42.356539, -71.055746, 0.0
42.356511, -71.055757, 0.0
42.356495, -71.05579, 0.0
42.356485, -71.05583, 0.0
42.356389, -71.055842, 0.0
42.356252, -71.055796, 0.0
42.356046, -71.055642, 0.0
42.355876, -71.055697, 0.0
42.355828, -71.055758, 0.0
Boston Common, A walk by the fountain
42.356251, -71.062737, 0.0
"""
invalid_ls_coord_sets_2 = """LS
Park Name, Feature Description
Post Office Square, A walk by the fountain
42.356716, -71.055685, 0.0
42.356587, -71.055769, 0.0
42.356566, -71.055754, 0.0
42.356539, -71.055746, 0.0
42.356511, -71.055757, 0.0
42.356495, -71.05579, 0.0
42.356485, -71.05583, 0.0
42.356389, -71.055842, 0.0
42.356252, -71.055796, 0.0
42.356046, -71.055642, 0.0
42.355876, -71.055697, 0.0
42.355828, -71.055758, 0.0
Boston Common, A walk by the fountain
42.356251, -71.062737, 0.0
42.35621, -71.063012, 0.0
42.356153, -71.06305, 0.0
42.356144, -71.063115, 0.0
42.356136, -71.063261, 0.0
42.355825, -71.064018, 0.0
"""
invalid_ls_sections = """LS
Park Name, Feature Description
Post Office Square, A walk by the fountain
42.356716, -71.055685, 0.0
42.356587, -71.055769, 0.0
42.356566, -71.055754, 0.0
42.356539, -71.055746, 0.0
42.356511, -71.055757, 0.0
42.356495, -71.05579, 0.0
42.356485, -71.05583, 0.0
42.356389, -71.055842, 0.0
42.356252, -71.055796, 0.0
42.356046, -71.055642, 0.0
42.355876, -71.055697, 0.0
42.355828, -71.055758, 0.0
Boston Common, A walk by the fountain
42.356251, -71.062737, 0.0
42.35621, -71.063012, 0.0
42.356153, -71.06305, 0.0
42.356144, -71.063115, 0.0
42.356136, -71.063261, 0.0
42.355825, -71.064018, 0.0
"""
# POLYGON
valid_poly = """POLY
Park Name, Feature Description
Post Office Square, Boundary of Post Office Square with holes for buildings
42.356856, -71.055757, 0.0
42.35608, -71.054976, 0.0
42.355697, -71.055636, 0.0
42.356003, -71.055941, 0.0
42.356767, -71.05622, 0.0
42.355955, -71.055522, 0.0
42.355894, -71.055458, 0.0
42.355846, -71.055546, 0.0
42.355908, -71.055615, 0.0
42.356089, -71.055312, 0.0
42.356005, -71.055226, 0.0
42.355969, -71.055288, 0.0
42.356058, -71.055373, 0.0
Boston Common, Boundary of Boston Common with a hole for the Frog Pond
42.356514, -71.062157, 0.0
42.355222, -71.063337, 0.0
42.352457, -71.064638, 0.0
42.352639, -71.067238, 0.0
42.356132, -71.06915, 0.0
42.357591, -71.06326, 0.0
42.356047, -71.065045, 0.0
42.355953, -71.065107, 0.0
42.355911, -71.065249, 0.0
42.356018, -71.065909, 0.0
42.35601, -71.066016, 0.0
42.355918, -71.066198, 0.0
42.355854, -71.066417, 0.0
42.355876, -71.066521, 0.0
42.355938, -71.066564, 0.0
42.355985, -71.066547, 0.0
42.356221, -71.066, 0.0
42.356296, -71.065647, 0.0
42.35627, -71.065341, 0.0
42.356186, -71.065127, 0.0
42.356123, -71.065061, 0.0
"""
invalid_poly_coord_sets_1 = """POLY
Park Name, Feature Description
Post Office Square, Boundary of Post Office Square with holes for buildings
42.356856, -71.055757, 0.0
42.35608, -71.054976, 0.0
42.355697, -71.055636, 0.0
42.356003, -71.055941, 0.0
42.356767, -71.05622, 0.0
42.356856, -71.055757, 0.0
42.355955, -71.055522, 0.0
42.355894, -71.055458, 0.0
42.355846, -71.055546, 0.0
42.355908, -71.055615, 0.0
42.355955, -71.055522, 0.0
42.356089, -71.055312, 0.0
42.356005, -71.055226, 0.0
42.355969, -71.055288, 0.0
42.356058, -71.055373, 0.0
42.356089, -71.055312, 0.0
Boston Common, Boundary of Boston Common with a hole for the Frog Pond
42.356514, -71.062157, 0.0
42.355222, -71.063337, 0.0
42.356047, -71.065045, 0.0
42.355953, -71.065107, 0.0
42.355911, -71.065249, 0.0
42.356018, -71.065909, 0.0
42.35601, -71.066016, 0.0
42.355918, -71.066198, 0.0
42.355854, -71.066417, 0.0
42.355876, -71.066521, 0.0
42.355938, -71.066564, 0.0
42.355985, -71.066547, 0.0
42.356221, -71.066, 0.0
42.356296, -71.065647, 0.0
42.35627, -71.065341, 0.0
42.356186, -71.065127, 0.0
42.356123, -71.065061, 0.0
42.356047, -71.065045, 0.0
"""
invalid_poly_coord_sets_2 = """POLY
Park Name, Feature Description
Post Office Square, Boundary of Post Office Square with holes for buildings
42.356856, -71.055757, 0.0
42.35608, -71.054976, 0.0
42.355697, -71.055636, 0.0
42.356003, -71.055941, 0.0
42.356767, -71.05622, 0.0
42.356856, -71.055757, 0.0
42.355955, -71.055522, 0.0
42.355894, -71.055458, 0.0
42.355846, -71.055546, 0.0
42.355908, -71.055615, 0.0
42.355955, -71.055522, 0.0
42.356089, -71.055312, 0.0
42.356005, -71.055226, 0.0
42.355969, -71.055288, 0.0
42.356058, -71.055373, 0.0
42.356089, -71.055312, 0.0
Boston Common, Boundary of Boston Common with a hole for the Frog Pond
42.356514, -71.062157, 0.0
42.355222, -71.063337, 0.0
42.352457, -71.064638, 0.0
42.352639, -71.067238, 0.0
42.356132, -71.06915, 0.0
42.357591, -71.06326, 0.0
42.356514, -71.062157, 0.0
42.356047, -71.065045, 0.0
42.355953, -71.065107, 0.0
42.355911, -71.065249, 0.0
42.356018, -71.065909, 0.0
42.35601, -71.066016, 0.0
42.355918, -71.066198, 0.0
42.355854, -71.066417, 0.0
42.355876, -71.066521, 0.0
42.355938, -71.066564, 0.0
42.355985, -71.066547, 0.0
42.356221, -71.066, 0.0
42.356296, -71.065647, 0.0
42.35627, -71.065341, 0.0
42.356186, -71.065127, 0.0
42.356123, -71.065061, 0.0
42.356047, -71.065045, 0.0
""" | 17.67366 | 75 | 0.680427 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,215 | 0.951596 |
2fe1655ebd31d04947a0a2208508011d402682bf | 204 | py | Python | app/__init__.py | jesiqueira/work | 4baf5277af4ca0537fca9ba8eeec45d4281ef76d | [
"MIT"
] | null | null | null | app/__init__.py | jesiqueira/work | 4baf5277af4ca0537fca9ba8eeec45d4281ef76d | [
"MIT"
] | null | null | null | app/__init__.py | jesiqueira/work | 4baf5277af4ca0537fca9ba8eeec45d4281ef76d | [
"MIT"
] | null | null | null | from flask import Flask
def create_app():
app = Flask(__name__)
#Rotas
from app.controllers.main.rotas import main
#Registrar Blueprint
app.register_blueprint(main)
return app
| 15.692308 | 47 | 0.70098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.127451 |
2fe1e63f271fbc465668c88a0d44f3a5739d9e29 | 10,050 | py | Python | risk_register/rules.py | justin441/risk_management | 2f5f0f62aae34998db7cf4155297ce4f6a8d774e | [
"MIT"
] | null | null | null | risk_register/rules.py | justin441/risk_management | 2f5f0f62aae34998db7cf4155297ce4f6a8d774e | [
"MIT"
] | null | null | null | risk_register/rules.py | justin441/risk_management | 2f5f0f62aae34998db7cf4155297ce4f6a8d774e | [
"MIT"
] | null | null | null | import rules
# ------------predicates------------
# Processus
@rules.predicate
def is_process_manager(user, processus):
return processus.proc_manager == user
@rules.predicate
def is_process_upper_mgt(user, processus):
bu = processus.business_unit
return bu.bu_manager == user
# Activités
@rules.predicate
def is_activity_owner(user, activite):
return activite.responsable == user
@rules.predicate
def is_activity_supervisor(user, activite):
processus = activite.processus
return processus.proc_manager == user
@rules.predicate
def is_activity_upper_mgt(user, activite):
processus = activite.processus
bu = processus.business_unit
return bu.bu_manager == user
# Risques des activités
@rules.predicate
def is_activity_risk_reporter(user, activiterisque):
return activiterisque.soumis_par == user
@rules.predicate
def is_activity_risk_owner(user, activiterisque):
return activiterisque.proprietaire == user
@rules.predicate
def is_activity_risk_monitor(user, activiterisque):
activite = activiterisque.activite
return activite.responsable == user
@rules.predicate
def is_activity_risk_supervisor(user, activiterisque):
processus = activiterisque.activite.processus
return processus.proc_manager == user
@rules.predicate
def is_activity_risk_upper_mgt(user, activiterisque):
processus = activiterisque.activite.processus
bu = processus.business_unit
return bu.bu_manager == user
# Risques des processus
@rules.predicate
def is_process_risk_reporter(user, processusrisque):
if processusrisque.soumis_par:
return processusrisque.soumis_par == user
return False
@rules.predicate
def is_process_risk_owner(user, processusrisque):
return processusrisque.proprietaire == user
@rules.predicate
def is_process_risk_monitor(user, processusrisque):
processus = processusrisque.processus
return processus.proc_manager == user
@rules.predicate
def is_process_risk_upper_mgt(user, processusrisque):
bu = processusrisque.processus.business_unit
return bu.bu_manager == user
# Estimations
@rules.predicate
def is_estimation_monitor(user, estimation):
try:
return estimation.content_object.activite.processus.proc_manager == user
except AttributeError:
return estimation.content_object.processus.proc_manager == user
# Contrôles
@rules.predicate
def is_controle_creator(user, controle):
return controle.cree_par == user
@rules.predicate
def is_controle_owner(user, controle):
if controle.assigne_a:
return controle.assigne_a == user
return False
@rules.predicate
def is_controle_reviewer(user, controle):
try:
return controle.content_object.activite.processus.proc_manager == user
except AttributeError:
return controle.content_object.processus.proc_manager == user
# Risques
@rules.predicate
def is_risk_creator(user, risque):
return risque.cree_par == user
# Identification Risques
@rules.predicate
def is_risk_verifier(user, identificationrisque):
if identificationrisque.get_class == 'ProcessusRisque':
return is_process_risk_monitor(user, identificationrisque) \
or is_process_risk_upper_mgt(user, identificationrisque)
elif identificationrisque.get_class == 'ActiviteRisque':
return is_activity_risk_monitor(user, identificationrisque) \
or is_activity_risk_supervisor(user, identificationrisque) \
or is_activity_risk_upper_mgt(user, identificationrisque)
# ------------rules------------
# Risques
rules.add_rule('change_risque', is_risk_creator)
# Identification Risques
rules.add_rule('verify_risk', is_risk_verifier)
# Processus
rules.add_rule('change_processus', is_process_upper_mgt)
rules.add_rule('delete_processus', is_process_upper_mgt)
rules.add_rule('add_activity_to_process', is_process_manager | is_process_upper_mgt)
rules.add_rule('add_process_data', is_process_manager | is_process_upper_mgt)
rules.add_rule('add_process_risk', rules.is_authenticated)
# Activités
rules.add_rule('change_activite', is_activity_supervisor | is_activity_upper_mgt)
rules.add_rule('delete_activite', is_activity_supervisor | is_activity_upper_mgt)
rules.add_rule('add_activity_risk', rules.is_authenticated)
rules.add_rule('complete_activity', is_activity_owner)
# Risques des activités
rules.add_rule('set_seuil_activity_risk', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_rule('set_review_date_activity_risk', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_rule('add_control_activity_risk', is_activity_risk_reporter | is_activity_risk_monitor |
is_activity_risk_owner | is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_rule('assign_activity_risk', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_rule('estimate_activity_risk', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_rule('change_activiterisque', is_activity_risk_supervisor |
is_activity_risk_upper_mgt | is_activity_risk_reporter)
rules.add_rule('delete_activiterisque', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
# Risques des processus
rules.add_rule('set_seuil_process_risk', is_process_risk_monitor | is_process_risk_upper_mgt)
rules.add_rule('set_review_date_process_risk', is_process_risk_upper_mgt | is_process_risk_monitor)
rules.add_rule('add_control_process_risk', is_process_risk_owner | is_process_risk_monitor | is_process_risk_upper_mgt
| is_activity_risk_reporter)
rules.add_rule('assign_process_risk', is_process_risk_monitor | is_process_risk_upper_mgt)
rules.add_rule('estimate_process_risk', is_process_risk_upper_mgt | is_process_risk_monitor)
rules.add_rule('change_processusrisque', is_process_risk_upper_mgt |
is_process_risk_monitor | is_process_risk_reporter)
rules.add_rule('delete_processusrisque', is_process_risk_monitor | is_process_risk_upper_mgt)
# Estimations
rules.add_rule('set_estimation_review_date', is_estimation_monitor)
# Contrôles
rules.add_rule('assign_control', is_controle_reviewer)
rules.add_rule('complete_control', is_controle_owner)
rules.add_rule('change_controle', is_controle_reviewer | is_controle_creator)
rules.add_rule('delete_controle', is_controle_creator | is_controle_reviewer)
rules.add_rule('approve_controle', is_controle_reviewer)
rules.add_rule('validate_controle_completion', is_controle_reviewer | is_controle_creator)
# ------------permissions------------
# Risques
rules.add_perm('risk_register.change_risque', is_risk_creator)
# Identification Risques
rules.add_perm('risk_register.verify_risque', is_risk_verifier)
# Processus
rules.add_perm('risk_register.change_processus', is_process_upper_mgt)
rules.add_perm('risk_register.delete_processus', is_process_upper_mgt)
rules.add_perm('risk_register.add_activity_to_process', is_process_manager | is_process_upper_mgt)
rules.add_perm('risk_register.add_process_data', is_process_manager | is_process_upper_mgt)
rules.add_perm('risk_register.add_process_risk', rules.is_authenticated)
# Activités
rules.add_perm('risk_register.change_activite', is_activity_supervisor | is_activity_upper_mgt)
rules.add_perm('risk_register.delete_activite', is_activity_supervisor | is_activity_upper_mgt)
rules.add_perm('risk_register.add_activity_risk', rules.is_authenticated)
rules.add_perm('risk_register_complete_activity', is_activity_owner)
# Risques des activités
rules.add_perm('risk_register.set_seuil_activity_risk', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_perm('risk_register.set_review_date_activity_risk', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_perm('risk_register.add_control_activity_risk', is_activity_risk_reporter | is_activity_risk_monitor |
is_activity_risk_owner | is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_perm('risk_register.assign_activity_risk', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
rules.add_perm('risk_register.estimate_activity_risk',
is_activity_risk_supervisor | is_activity_risk_upper_mgt | is_activity_risk_reporter)
rules.add_perm('risk_register.change_activiterisque', is_activity_risk_supervisor |
is_activity_risk_upper_mgt | is_activity_risk_reporter)
rules.add_perm('risk_register.delete_activiterisque', is_activity_risk_supervisor | is_activity_risk_upper_mgt)
# Risques des processus
rules.add_perm('risk_register.set_seuil_process_risk', is_process_risk_monitor | is_process_risk_upper_mgt)
rules.add_perm('risk_register.set_review_date_process_risk', is_process_risk_upper_mgt | is_process_risk_monitor)
rules.add_perm('risk_register.add_control_process_risk', is_process_risk_owner | is_process_risk_monitor |
is_process_risk_upper_mgt | is_process_risk_reporter)
rules.add_perm('risk_register.assign_process_risk', is_process_risk_monitor | is_process_risk_upper_mgt)
rules.add_perm('risk_register.estimate_process_risk', is_process_risk_upper_mgt | is_process_risk_monitor |
is_process_risk_owner | is_process_risk_reporter)
rules.add_perm('risk_register.change_processusrisque', is_process_risk_upper_mgt |
is_process_risk_monitor | is_process_risk_reporter)
rules.add_perm('risk_register.delete_processusrisque', is_process_risk_monitor | is_process_risk_upper_mgt)
# Estimations
rules.add_perm('risk_register.set_estimation_review_date', is_estimation_monitor)
# Contrôles
rules.add_perm('risk_register.assign_control', is_controle_reviewer)
rules.add_perm('risk_register.complete_control', is_controle_owner)
rules.add_perm('risk_register.change_controle', is_controle_reviewer | is_controle_creator)
rules.add_perm('risk_register.delete_controle', is_controle_creator | is_controle_reviewer)
rules.add_perm('risk_register.approve_controle', is_controle_reviewer)
rules.add_perm('risk_register.validate_controle_completion', is_controle_reviewer | is_controle_creator)
| 40.361446 | 119 | 0.816816 | 0 | 0 | 0 | 0 | 3,289 | 0.326971 | 0 | 0 | 2,347 | 0.233323 |
2fe2c1e144f5662d680175f25d92e5d935c15f8b | 1,546 | py | Python | tests/integration/test_ims.py | acidjunk/python-zeep | 05fe1903d14de4d079113915a6b654200677eb88 | [
"MIT"
] | null | null | null | tests/integration/test_ims.py | acidjunk/python-zeep | 05fe1903d14de4d079113915a6b654200677eb88 | [
"MIT"
] | null | null | null | tests/integration/test_ims.py | acidjunk/python-zeep | 05fe1903d14de4d079113915a6b654200677eb88 | [
"MIT"
] | null | null | null | import os
import uuid
import requests_mock
import zeep
def read_file(file_name, folder="wsdl_ims"):
file = os.path.join(os.path.dirname(os.path.realpath(__file__)), folder, file_name)
with open(file) as f:
return f.read()
def test_find_customer():
with requests_mock.mock() as m:
m.get("http://example.com/inventory?wsdl", text=read_file("inventory.wsdl"))
m.post(
"http://example.com/Inventory/inventoryhttps",
text=read_file("find_customer_by_name_response.xml", "mock_ims"),
)
# set strict to True -> then data will be available in _raw_elements
client = zeep.Client(
"http://example.com/inventory?wsdl",
settings=zeep.settings.Settings(strict=False),
)
filter_fields = [
{
"FilterField": {
"Name": "Name",
"SelectedOperator": "OperationEquals",
"Value": "SURFNET",
}
}
]
ims_filter = {"Filters": filter_fields}
pager = {
"StartElement": 0,
"Descending": False,
"NumberOfElements": 10,
"OrderByProperty": None,
}
result = client.service.GetAllCustomersFiltered(
pager=pager, filter=ims_filter, sessionToken=str(uuid.uuid4())
)
assert result.GetAllCustomersFilteredResult.Customer[0].Id == 2644557
assert result.GetAllCustomersFilteredResult.Customer[0].Name == "SURFNET"
| 30.92 | 87 | 0.576973 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 410 | 0.265201 |
2fe2e978cb293b53e1d50c670400eb304e6da2dc | 982 | py | Python | EMPIRIC_POSTPROCESS/SPalignResults/run_pdb.py | yvehchan/TIM_EMPIRIC | 273091a4a257ed51dfd529709138396a18fa49ac | [
"MIT"
] | null | null | null | EMPIRIC_POSTPROCESS/SPalignResults/run_pdb.py | yvehchan/TIM_EMPIRIC | 273091a4a257ed51dfd529709138396a18fa49ac | [
"MIT"
] | null | null | null | EMPIRIC_POSTPROCESS/SPalignResults/run_pdb.py | yvehchan/TIM_EMPIRIC | 273091a4a257ed51dfd529709138396a18fa49ac | [
"MIT"
] | null | null | null | import subprocess as sub
import sys
# tm_pdb = ('Tm',"pdb1i4n_A.ent")
# tt_pdb = ('Tt',"pdb1vc4_A.ent")
# ss_pdb = ('Ss',"pdb2c3z_A.ent")
if sys.argv[1] == 'Tm':
template_name, current_template = 'Tm',"pdb1i4n_A.ent"
elif sys.argv[1] == 'Tt':
template_name, current_template = 'Tt',"pdb1vc4_A.ent"
elif sys.argv[1] == 'Ss':
template_name, current_template = 'Ss',"pdb2c3z_A.ent"
else:
print "blah! wrong input: Tm Tt or Ss are acceptable only!"
print "nohup python run_pdb.py Tm(Tt,Ss) &"
sys.exit(1)
our_pds = [s.strip() for s in sub.check_output('ls ./pdbA/ | grep .ent',shell=True).strip().split('\n')]
def get_cmd(pdb1,pdb2):
return "./SPalignNS -pair ./pdbA/%s ./pdbA/%s"%(pdb1,pdb2)
for pdb in our_pds:
result = sub.check_output(get_cmd(current_template,pdb),shell=True)
fp = open(pdb.replace('.','_')+'_%s.aln'%template_name,'w')
fp.write(result)
fp.close()
# with open(pdb+'.aln','w') as fp:
# fp.write(result)
| 29.757576 | 104 | 0.638493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 397 | 0.404277 |
2fe31a354876b84e075d42b8513e52335b95d67e | 407 | py | Python | search/migrations/0006_auto_20180516_0457.py | kimyou7/ParkGoGreen | a0b21210823d711af56d76226919950aa01a2b92 | [
"MIT"
] | null | null | null | search/migrations/0006_auto_20180516_0457.py | kimyou7/ParkGoGreen | a0b21210823d711af56d76226919950aa01a2b92 | [
"MIT"
] | null | null | null | search/migrations/0006_auto_20180516_0457.py | kimyou7/ParkGoGreen | a0b21210823d711af56d76226919950aa01a2b92 | [
"MIT"
] | null | null | null | # Generated by Django 2.0.2 on 2018-05-16 11:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0005_auto_20180516_0455'),
]
operations = [
migrations.AlterField(
model_name='park',
name='zip_code',
field=models.CharField(blank=True, max_length=5, null=True),
),
]
| 21.421053 | 72 | 0.604423 | 314 | 0.771499 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.235872 |
2fe5fa8c0f690ef86c1d2f99a56c03f36914d199 | 338 | py | Python | tests/conftest.py | davidkyburz/gtfs-lite | cc3a5df7a9e582264130771a688b12eb2ea0c08c | [
"MIT"
] | 4 | 2020-06-03T14:44:27.000Z | 2022-03-24T01:11:04.000Z | tests/conftest.py | davidkyburz/gtfs-lite | cc3a5df7a9e582264130771a688b12eb2ea0c08c | [
"MIT"
] | 3 | 2020-06-18T15:48:35.000Z | 2021-03-31T14:45:13.000Z | tests/conftest.py | davidkyburz/gtfs-lite | cc3a5df7a9e582264130771a688b12eb2ea0c08c | [
"MIT"
] | 2 | 2021-03-13T00:15:21.000Z | 2021-04-13T21:38:23.000Z | from datetime import date, time
import pytest
@pytest.fixture
def feed_zipfile():
return r"data/metra_2020-02-23.zip"
@pytest.fixture
def test_date():
return date(2020, 2, 24)
@pytest.fixture
def test_timerange():
return [time(0, 0), time(23, 59)]
@pytest.fixture
def test_stop_ids():
return [time(0, 0), time(23, 59)] | 18.777778 | 39 | 0.695266 | 0 | 0 | 0 | 0 | 285 | 0.843195 | 0 | 0 | 28 | 0.08284 |
2fe6856451079b7092c8013f6dea797816021d81 | 263 | py | Python | dotplug/__init__.py | arubertoson/dotplug | c95e06add33864a43e4934e1c5e54f90eb5e262d | [
"MIT"
] | null | null | null | dotplug/__init__.py | arubertoson/dotplug | c95e06add33864a43e4934e1c5e54f90eb5e262d | [
"MIT"
] | 1 | 2018-07-24T07:47:08.000Z | 2018-07-24T07:47:08.000Z | dotplug/__init__.py | arubertoson/dotplug | c95e06add33864a43e4934e1c5e54f90eb5e262d | [
"MIT"
] | null | null | null | """
Entry Point
"""
import asyncio
from dotplug.main import main
from dotplug.console import ncurses
def _main():
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
with ncurses():
asyncio.run(main())
input("")
| 14.611111 | 59 | 0.676806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.079848 |
2fe822a53dc8ca2f474f7ee2540a5f1ad8fb13ec | 1,903 | py | Python | signalr_async/core/messages.py | sam-mosleh/signalr-async | 40bf79d5482051f76522987cb348e7632bcc0c07 | [
"MIT"
] | 4 | 2021-01-20T18:11:52.000Z | 2022-01-12T16:24:39.000Z | signalr_async/core/messages.py | sam-mosleh/signalr-async | 40bf79d5482051f76522987cb348e7632bcc0c07 | [
"MIT"
] | 2 | 2021-10-15T15:21:44.000Z | 2021-12-08T22:26:39.000Z | signalr_async/core/messages.py | sam-mosleh/signalr-async | 40bf79d5482051f76522987cb348e7632bcc0c07 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from enum import IntEnum
from typing import Any, Dict, List, Optional, Union
class MessageTypes(IntEnum):
INVOCATION = 1
STREAM_ITEM = 2
COMPLETION = 3
STREAM_INVOCATION = 4
CANCEL_INVOCATION = 5
PING = 6
CLOSE = 7
class HubMessageBase:
type_: MessageTypes
@dataclass
class InvocationMessage(HubMessageBase):
invocation_id: str
target: str
arguments: List[Any]
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.INVOCATION
stream_ids: List[str] = field(default_factory=list)
@dataclass
class StreamItemMessage(HubMessageBase):
invocation_id: str
item: Dict[str, Any]
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.STREAM_ITEM
@dataclass
class CompletionMessage(HubMessageBase):
invocation_id: str
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.COMPLETION
error: Optional[str] = None
result: Optional[Dict[str, Any]] = None
@dataclass
class StreamInvocationMessage(HubMessageBase):
invocation_id: str
target: str
arguments: list
stream_ids: list
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.STREAM_INVOCATION
@dataclass
class CancelInvocationMessage(HubMessageBase):
invocation_id: str
headers: Optional[Dict[str, Any]] = None
type_: MessageTypes = MessageTypes.CANCEL_INVOCATION
@dataclass
class PingMessage(HubMessageBase):
type_: MessageTypes = MessageTypes.PING
@dataclass
class CloseMessage(HubMessageBase):
type_: MessageTypes = MessageTypes.CLOSE
error: str = None
allow_reconnect: bool = None
HubMessage = Union[
InvocationMessage,
StreamItemMessage,
CompletionMessage,
StreamInvocationMessage,
CancelInvocationMessage,
PingMessage,
CloseMessage,
]
| 22.388235 | 56 | 0.7299 | 1,495 | 0.785602 | 0 | 0 | 1,362 | 0.715712 | 0 | 0 | 0 | 0 |
2fea604bea2a0fd4db7527c2e44b6035cbadde44 | 1,613 | py | Python | setuper web app/handlers/admin/adminhandler.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | 22 | 2015-01-08T12:54:20.000Z | 2021-05-16T04:15:45.000Z | setuper web app/handlers/admin/adminhandler.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | null | null | null | setuper web app/handlers/admin/adminhandler.py | dragondjf/CloudSetuper | 31aefe629f7f2d59d287981eda3e4e618ace9e9f | [
"MIT"
] | 11 | 2015-01-25T01:26:45.000Z | 2021-08-18T01:40:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from tornado.web import authenticated, removeslash
from handlers.basehandlers import BaseHandler
adminusers = [
{
'username': 'admin',
'password': 'admin'
}
]
class AdminLoginHandler(BaseHandler):
role = "admin"
@removeslash
def get(self):
if self.current_user:
self.redirect('/admin')
self.render("admin/login.html", title="Cloud Setuper", username=self.current_user)
def post(self):
username = self.get_argument("username", "")
password = self.get_argument("password", "")
user = {
'username': username,
'password': password
}
checkstatus = self.checkUser(user)
if checkstatus['status']:
self.set_secure_cookie(self.role, username)
response = {
'status': "success",
'info': checkstatus['info']
}
else:
response = {
'status': "fail",
'info': checkstatus['info']
}
self.write(response)
def checkUser(self, user):
if user in adminusers:
return {
'status': True,
'info': "login success"
}
else:
return {
'status': False,
'info': "please check username or password."
}
class AdminLogoutHandler(BaseHandler):
role = "admin"
@authenticated
@removeslash
def post(self):
self.clear_cookie(self.get_current_user())
| 24.439394 | 90 | 0.520769 | 1,376 | 0.853069 | 0 | 0 | 290 | 0.179789 | 0 | 0 | 319 | 0.197768 |
2fece6a8f98a445b6d9a8e7fac1daf4aac526e8f | 3,792 | py | Python | testbyxcj/datasender.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | testbyxcj/datasender.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | null | null | null | testbyxcj/datasender.py | AlsikeE/Ez | 2f84ac1896a5b6d8f467c14d3618274bdcfd2cad | [
"Apache-2.0"
] | 1 | 2021-05-08T02:23:00.000Z | 2021-05-08T02:23:00.000Z | #coding:utf-8
from mininet.net import Mininet
from mininet.topo import LinearTopo
from mininet.cli import CLI
# from eventlet import greenthread
import argparse
import threading
import re
from time import sleep
import logging
logger = logging.getLogger(__name__)
logger.setLevel(level = logging.INFO)
handler = logging.FileHandler("hahalog.txt")
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
# import eventlet
# 四个交换机每个下边挂载一个主机
# import ctypes
IPERF_SERVER_LOG_DIR = '/root/ez-segway/logs/iperflogs/server/'
IPERF_CLIENT_LOG_DIR = '/root/ez-segway/logs/iperflogs/client/'
class DataSender(object):
def __init__(self,net, filepath, wait_time):
self.net = net
self.filepath = filepath
self.wait_time = wait_time
self.conf = []#all flows' confs
self.srv_ports = [] #(server,port)
def read_conf(self,filepath):
f = open(filepath, 'r')
line = f.readline()
while line:
des = line.strip('\n').split("\t")
nodes_index = des[1].strip('(').strip(')').split(',')
hosts = (net.hosts[int(nodes_index[0])], net.hosts[int(nodes_index[1])])
obj = dict()
obj['uuid'] = des[0]
obj['hosts'] = hosts
obj['port'] = int(des[2])
obj['vol'] = (des[3] + 'M')
obj['seconds'] = float(des[4])
obj['goal'] = float(des[5])
self.conf.append(obj)
line = f.readline()
# return self.conf
def _iperf(self, hosts, l4Type="UDP", udpBw='10M', fmt=None, seconds=10, port=5001,uuid=None):
server, client = hosts
if((server,port) not in self.srv_ports):
self.srv_ports.append((server,port))
server.cmd('iperf3 -s -p %d -i 1 > '%port +IPERF_SERVER_LOG_DIR+'server%s.txt&'% uuid)
# server.cmd('iperf3 -s -u -p %d -i 1 > '%port +IPERF_SERVER_LOG_DIR+'server%s.txt&'% uuid)
logger.info('operf -s -p %d' %port)
iperfArgs = 'iperf3 -p %d ' % port
bwArgs = ''
if l4Type == 'UDP':
iperfArgs += '-u '
bwArgs = '-b ' + udpBw + ' '
elif l4Type != 'TCP':
raise Exception( 'Unexpected l4 type: %s' % l4Type )
if fmt:
iperfArgs += '-f %s ' % fmt
if l4Type == 'TCP':
if not waitListening( client, server.IP(), port ):
raise Exception( 'Could not connect to iperf on port %d'
% port )
client.cmd( iperfArgs + '-t %d -i 1 -c ' % seconds +
server.IP() + ' ' + bwArgs +' > ' + IPERF_CLIENT_LOG_DIR +'client%s.txt &'%uuid)
logger.info(iperfArgs + '-t %d -c ' % seconds +
server.IP() + ' ' + bwArgs)
def send_iperfs(self):
for c in self.conf:
self._iperf(hosts=c['hosts'], l4Type="UDP", udpBw=c['vol'], seconds=c['seconds'], port=c['port'],uuid=c['uuid'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='ctrl')
parser.add_argument('--iperf', nargs='?',
type=int, default=0)
parser.add_argument('--filepath', nargs='?',
type=str, default=None)
args = parser.parse_args()
iperf = args.iperf
filepath = args.filepath
print(filepath)
wait_time = 10
Linear4 = LinearTopo(k=4)
net = Mininet(topo=Linear4)
net.start()
if(filepath):
ds = DataSender(net, filepath, wait_time)
ds.read_conf(ds.filepath)
logger.info(ds.conf)
if(iperf):
ds.send_iperfs()
CLI(net)
# net.pingAll()
net.stop()
| 33.857143 | 124 | 0.563819 | 2,382 | 0.623234 | 0 | 0 | 0 | 0 | 0 | 0 | 807 | 0.211146 |
2fedeac01f028413cd75d7f9ca26c2452c3cbbfd | 7,678 | py | Python | pullcord-export.py | tsudoko/pullcord-export | 6c6b296cb3437dbbf9896573ecb3f7d00e33b411 | [
"Unlicense"
] | null | null | null | pullcord-export.py | tsudoko/pullcord-export | 6c6b296cb3437dbbf9896573ecb3f7d00e33b411 | [
"Unlicense"
] | null | null | null | pullcord-export.py | tsudoko/pullcord-export | 6c6b296cb3437dbbf9896573ecb3f7d00e33b411 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import collections
import datetime
import glob
import html
import re
import sys
# this is a mess right now, feel free to make it less bad if you feel like it
try:
# python 3.7+
datetime.datetime.fromisoformat
except AttributeError:
# not fully correct, but good enough for this use case
adjtz_re = re.compile(r"([-+][0-9]+):([0-9]+)")
rmmil_re = re.compile(r"\..[0-9]*")
class ___datetime(datetime.datetime):
@staticmethod
def fromisoformat(f):
return datetime.datetime.strptime(adjtz_re.sub(r"\1\2", rmmil_re.sub("", f)), "%Y-%m-%dT%H:%M:%S%z")
datetime.datetime = ___datetime
del ___datetime
Entry = collections.namedtuple("Entry", ["timestamp", "dead", "type", "fields"])
Member = collections.namedtuple("Member", ["name", "discriminator", "avatar", "nick", "roles"])
Role = collections.namedtuple("Role", ["name", "color", "pos", "perms", "hoist"])
def mkrole(name, color, pos, perms, hoist=''):
return Role(name, int(color), int(pos), int(perms), bool(hoist))
def read_guild(f):
g = {
"guild": {},
"channel": {},
"member": {},
"role": {},
"emoji": {},
}
for l in f.readlines():
ts, _, op, type, id, *rest = l.strip().split("\t")
if type not in g:
continue
if id not in g[type]:
g[type][id] = []
if type == "member":
name, discriminator, *rest = rest
if rest:
avatar, *rest = rest
else:
avatar = None
if rest:
nick, *rest = rest
else:
nick = None
if rest:
roles, *rest = rest
else:
roles = ''
rest = Member(name, int(discriminator), avatar, nick, roles)
elif type == "role":
rest = mkrole(*rest)
g[type][id].append(Entry(datetime.datetime.fromisoformat(ts), op != "add", type, rest))
return g
class Message:
def __init__(self, id, author):
self.id = id
self.author = author
self.content = None
self.editedtime = None
self.deletedtime = None
self.attachments = []
self.embeds = []
def __str__(self):
return f"<Message {self.id} by {self.author} " + str([self.content, self.editedtime, self.deletedtime, self.attachments, self.embeds])
def timestamp(self):
return ((int(self.id) >> 22) + 1420070400000)/1000
member_re = re.compile("<@!?([0-9]+)>")
role_re = re.compile("<@&([0-9]+)>")
channel_re = re.compile("<#([0-9]+)>") # TODO
def mention(guild, date, msg, wrap=lambda a: a):
def member_name(m):
id, *_ = m.groups()
o = guild["member"].get(id)
if o:
member = close_to(o, date).fields
return wrap("@" + (member.nick or member.name))
else:
return m.group(0)
def role_name(m):
id, *_ = m.groups()
o = guild["role"].get(id)
if o:
role = close_to(o, date).fields
return wrap("@" + role[0])
else:
return m.group(0)
msg = member_re.sub(member_name, msg)
msg = role_re.sub(role_name, msg)
return msg
def unescape_msg(msg):
return msg.replace("\\n", "\n").replace("\\t", "\t").replace("\\\\", "\\")
def read_channel(f):
msgs = collections.OrderedDict()
attachbuf = (None, [])
reactbuf = (None, [])
for l in f.readlines():
ts, _, op, type, id, *rest = l.strip().split("\t")
if type == "message":
if op == "del":
if id in msgs:
del msgs[id] # TODO: show deletions
continue
authorid, *rest = rest
msgs[id] = Message(id, authorid)
if attachbuf[0] == id:
msgs[id].attachments = attachbuf[1]
attachbuf = (None, [])
if reactbuf[0] == id:
msgs[id].reactions = reactbuf[1]
reactbuf = (None, [])
if rest:
editedtime, *rest = rest
msgs[id].editedtime = editedtime if editedtime else None
# FIXME: handle variable number of fields properly instead of doing these kinds of hacks
if rest:
tts, content, *_ = rest
msgs[id].content = unescape_msg(content)
elif type == "attachment":
msgid, *_ = rest
if msgid in msgs:
msgs[msgid].attachments.append(id)
else:
if attachbuf[0] is None:
attachbuf = (msgid, [])
elif attachbuf[0] != msgid:
raise Exception(f"attachbuf id mismatch ({attachbuf[0]} != {msgid})")
attachbuf[1].append(id)
elif type == "reaction":
...
return msgs
def close_to(versions, dt):
ret = versions[0]
for v in versions[1:]:
if v.timestamp >= dt:
break
ret = v
return ret
def print_text(guild, cid, msgs):
for _, m in msgs.items():
date = datetime.datetime.fromtimestamp(m.timestamp(), datetime.timezone.utc)
author = close_to(guild["member"][m.author], date).fields
print(f"[{date.strftime('%Y-%m-%d %H:%M:%S')}] {author.nick or author.name}: ", end="")
if m.content:
print(mention(guild, date, m.content), end=" ")
if m.attachments:
for a in m.attachments:
path = f"attachments/{cid}/{a}/"
path = glob.glob(f"{path}/*")[0]
url = "https://cdn.discordapp.com/" + path
# TODO: use attachment name from the log if present
print(f"{url} ", end="")
print()
# TODO: animated emoji
emoji_re = re.compile("<:([^:]+):([0-9]+)>")
def emoji_img(m):
name, id = m.groups()
return f'<img class="emoji" title=":{html.escape(name)}:" src="emojis/{id}.png">'
def print_html(guild, cid, msgs):
import markdown
md = markdown.Markdown(
extensions=[
"nl2br",
"discord_mdext.fenced_code",
"discord_mdext.strikethrough",
"discord_mdext.standard_subset",
"mdx_urlize",
]
)
first = True
lastauthor = None
for _, m in msgs.items():
date = datetime.datetime.fromtimestamp(m.timestamp(), datetime.timezone.utc)
author = close_to(guild["member"][m.author], date).fields
roles = sorted(((r, close_to(guild["role"][r], date).fields) for r in author.roles.split(',')), key=lambda r: r[1].pos)
if lastauthor != m.author:
if not first:
print("</div></div>")
first = False
lastauthor = m.author
print('<div class="msg">')
print(' <div class="msg-left">')
av = glob.glob(f"avatars/{m.author}/{author.avatar}.*")
av = av[0] if av else f"embed/avatars/{author.discriminator%5}.png"
print(f' <img class="msg-avatar" src="{html.escape(av)}">')
print(" </div>")
print(' <div class="msg-right">')
print(f' <span class="msg-user"', end="")
if roles[-1][1].color:
print(f" style=\"color: #{'%x' % roles[-1][1].color}\"", end="")
print(f' title="{html.escape(author.name)}#{author.discriminator:04d}">{html.escape(author.nick or author.name)}</span>')
print(' <span class="msg-date">', end="")
print(f"{date.strftime('%Y-%m-%d %H:%M:%S')}</span>")
if m.content:
print(" ", end="")
print('<div class="msg-content">', end="")
msg = mention(guild, date, m.content, lambda c: '<span class="mention">' + c + '</span>')
msg = emoji_re.sub(emoji_img, msg)
msg = md.convert(msg)
# annyoing hack, we can't pass <div class="msg-content"> to prevent
# adding <p>s since markdown doesn't process the text inside the div
if msg.startswith("<p>"):
msg = msg[len("<p>"):]
if msg.endswith("</p>"):
msg = msg[:-len("</p>")]
msg = re.sub("</p>\n<p>", "<br /><br />", msg, flags=re.MULTILINE)
msg = re.sub("</?p>", "", msg)
print(msg, end="")
print("</div>")
if m.attachments:
for a in m.attachments:
path = f"attachments/{cid}/{a}/"
path = glob.glob(f"{path}/*")[0]
# TODO: use attachment name from the log if present
print(' <div class="msg-attachment">')
print(f' <a href="{html.escape(path)}">')
# TODO: handle other file types
print(f' <img class="msg-attachment" src="{html.escape(path)}">')
print(" </a>\n </div>")
if __name__ == "__main__":
_, gid, cid, *_ = sys.argv
with open(f"channels/{gid}/guild.tsv", newline="\n") as f:
guild = read_guild(f)
with open(f"channels/{gid}/{cid}.tsv", newline="\n") as f:
msgs = read_channel(f)
print_html(guild, cid, msgs)
| 29.644788 | 136 | 0.617479 | 617 | 0.080359 | 0 | 0 | 141 | 0.018364 | 0 | 0 | 2,317 | 0.301771 |
2fee36e9dc29dc70fa3759df2bfd752c12817031 | 9,432 | py | Python | sem/gui/misc.py | YoannDupont/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 22 | 2016-11-13T21:08:58.000Z | 2021-04-26T07:04:54.000Z | sem/gui/misc.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 15 | 2016-11-15T10:21:07.000Z | 2021-11-08T10:08:05.000Z | sem/gui/misc.py | Raphencoder/SEM | ff21c5dc9a8e99eda81dc266e67cfa97dec7c243 | [
"MIT"
] | 8 | 2016-11-15T10:21:41.000Z | 2022-03-04T21:28:05.000Z | """
file: misc.py
author: Yoann Dupont
MIT License
Copyright (c) 2018 Yoann Dupont
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import re
from sem.storage.annotation import Tag
from sem.storage import Trie
from sem.constants import NUL
try:
import Tkinter as tkinter
except ImportError:
import tkinter
def fill_with(t, value):
def fill_rec(t1):
keys = [key for key in t1 if key != NUL]
for key in keys:
fill_rec(t1[key])
t1[NUL] = value[:]
fill_rec(t.data)
def find_potential_separator(target):
regex = re.compile(u"(\W+)", re.U)
found = list(set(regex.findall(target)))
if len(found) == 1:
return found[0]
return None
def find_occurrences(target, content, whole_word=True):
#1. regex = re.compile(u'((?<=\W)|(?<=^))' + target + u"(?=\W|$)", re.U + re.M)
#2. regex = re.compile(u'\b' + target + u"\b", re.U + re.M)
target = target.strip()
if whole_word:
pattern = (u"\\b" if target[0].isalnum() else u"((?<=\\s)|(?<=^))") + re.escape(target) + (u"\\b" if target[-1].isalnum() else u"(?=\\s|$)")
else:
pattern = re.escape(target)
regex = re.compile(pattern, re.U + re.M)
for match in regex.finditer(content):
yield match
def random_color():
import random, colorsys
red = (random.randrange(0, 256) + 256) / 2.0;
green = (random.randrange(0, 256) + 256) / 2.0;
blue = (random.randrange(0, 256) + 256) / 2.0;
def to_hex(i):
hx = hex(int(i))[2:]
if len(hx) == 1:
hx = "0" + hx
return hx
def to_color(r,g,b):
cs = [to_hex(c) for c in [r,g,b]]
return "#{cs[0]}{cs[1]}{cs[2]}".format(cs=cs)
def darker(r,g,b):
h,l,s = colorsys.rgb_to_hls(r/256.0, g/256.0, b/256.0)
cs = [to_hex(256.0*c) for c in colorsys.hls_to_rgb(h,l/2.5,s)]
return "#{cs[0]}{cs[1]}{cs[2]}".format(cs=cs)
return {"background":to_color(red,green,blue),"foreground":darker(red,green,blue)}
class Adder():
l2t = {}
t2l = {}
@classmethod
def label2type(cls, label, level, default=None):
return cls.l2t.get(level, {}).get(label, default)
@classmethod
def type2label(cls, type, level, default=None):
return cls.t2l.get(level, {}).get(type, default)
@classmethod
def clear(cls):
cls.l2t.clear()
cls.t2l.clear()
def __init__(self, frame, the_type, available, level=0):
if len(available[level]) == 0:
raise ValueError("No more available shortcuts!")
self.frame = frame
self.type = the_type[:]
self.level = level
self.label = the_type.lower()
self.shortcut = None
found = False
if self.level == 0:
if len(self.frame.spare_colors) > 0:
self.color = self.frame.spare_colors.pop()
else:
self.color = random_color()
self.frame.text.tag_configure(self.type, **self.color)
for i in range(len(self.label)):
target = self.label[i].lower()
found = target in available[level]
if found:
available[level].remove(target)
self.shortcut = target
self.label = self.label + u" [{0} or Shift+{0}]".format(self.label[i])
break
if not found and len(available[level]) > 0:
char = available[level][0]
available[level].remove(char)
self.shortcut = char
self.label += " [{0} or Shift+{0}]".format(char)
if self.level not in Adder.l2t:
Adder.l2t[self.level] = {}
Adder.t2l[self.level] = {}
Adder.l2t[self.level][self.label] = self.type
Adder.t2l[self.level][self.type] = self.label
def add(self, event, remove_focus=False):
if self.frame.current_selection is not None:
f_cs = self.frame.current_selection
tag = Tag(self.type, f_cs.lb, f_cs.ub)
first = self.frame.charindex2position(f_cs.lb)
last = self.frame.charindex2position(f_cs.ub)
if tag in self.frame.current_annotations and self.frame.current_type_hierarchy_level == 0:
return
else:
first = "sel.first"
last = "sel.last"
self.frame.wish_to_add = [self.type, first, last]
self.frame.add_annotation(None, remove_focus)
def add_all(self, event):
if self.frame.current_selection is not None:
start = self.frame.charindex2position(self.frame.current_selection.lb)
end = self.frame.charindex2position(self.frame.current_selection.ub)
else:
start, end = ("sel.first", "sel.last")
try:
target = re.escape(self.frame.text.get(start, end).strip())
pattern = (u"\\b" if target[0].isalnum() else u"((?<=\\s)|(?<=^))") + target + (u"\\b" if target[-1].isalnum() else u"(?=\\s|$)")
regex = re.compile(pattern, re.U + re.M)
for match in regex.finditer(self.frame.doc.content):
cur_start, cur_end = self.frame.charindex2position(match.start()), self.frame.charindex2position(match.end())
if Tag(self.type, match.start(), match.end()) not in self.frame.current_annotations:
self.frame.wish_to_add = [self.type, cur_start, cur_end]
self.frame.add_annotation(None, remove_focus=False)
except tkinter.TclError:
raise
self.frame.type_combos[self.level].current(0)
self.frame.wish_to_add = None
self.frame.current_selection = None
self.frame.current_type_hierarchy_level = 0
self.frame.update_level()
self.frame.text.tag_remove("BOLD", "1.0", 'end')
class Adder2(object):
def __init__(self, tagset, levels, shortcut_trie):
self.tagset = tagset
self.levels = levels
self.shortcut_trie = shortcut_trie
self.current_annotation = None
self.current_hierarchy_level = 0
@classmethod
def from_tagset(cls, tagset):
levels = [tag.split(u".") for tag in tagset]
chars = list(u"abcdefghijklmnopqrstuvwxyz") + [u'F1', u'F2', u'F3', u'F4', u'F5', u'F6', u'F7', u'F8', u'F9', u'F10', u'F11', u'F12', u'*']
trie = Trie()
for level in levels:
trie.add(level)
fill_with(trie, chars)
shortcut_trie = Trie()
for level in levels:
hierarchy = []
for depth, sublevel in enumerate(level):
ident = u".".join(hierarchy + [sublevel])
if hierarchy + [sublevel] in shortcut_trie:
hierarchy.append(sublevel)
continue
sub = trie.goto(hierarchy)
available = sub[NUL]
for c in sublevel.lower():
found = c in available
if found:
available.remove(c)
break
if not found:
c = available[0]
available.remove(c)
hierarchy.append(sublevel)
shortcut_trie.add_with_value(hierarchy, c)
return Adder2(tagset, levels, shortcut_trie)
def max_depth(self):
return max([len(l) for l in self.levels])
def up_one_level(self):
if self.current_annotation is None:
self.current_hierarchy_level = 0
else:
self.current_hierarchy_level += 1
if self.current_hierarchy_level >= self.max_depth():
self.current_hierarchy_level = 0
def down_one_level(self):
if self.current_annotation is None:
self.current_hierarchy_level = 0
else:
self.current_hierarchy_level -= 1
if self.current_hierarchy_level < 0:
self.current_hierarchy_level = self.max_depth()-1
def type_from_letter(self, letter):
if self.current_annotation is not None and len(self.current_annotation.levels) < self.current_hierarchy_level:
return None
path = (self.current_annotation.levels[ : self.current_hierarchy_level] if self.current_annotation else [])
sub = self.shortcut_trie.goto(path)
for key,val in sub.items():
if key != NUL and val[NUL] == letter:
return key
| 38.655738 | 148 | 0.593723 | 6,433 | 0.68204 | 552 | 0.058524 | 1,548 | 0.164122 | 0 | 0 | 1,657 | 0.175679 |
2feed576dc500b8d42c7907dda91a0c63a0e4230 | 4,573 | py | Python | character-identifier/embedding_loader.py | vtt-project/vtt-char-identify | 8d991bc5b3bec1415cea2e5ea4dcc37b6c6b36b1 | [
"Apache-2.0"
] | 12 | 2018-05-20T22:01:05.000Z | 2020-05-08T06:38:38.000Z | character-identifier/embedding_loader.py | vtt-project/vtt-char-identify | 8d991bc5b3bec1415cea2e5ea4dcc37b6c6b36b1 | [
"Apache-2.0"
] | null | null | null | character-identifier/embedding_loader.py | vtt-project/vtt-char-identify | 8d991bc5b3bec1415cea2e5ea4dcc37b6c6b36b1 | [
"Apache-2.0"
] | 5 | 2018-11-18T21:16:27.000Z | 2021-09-23T13:46:52.000Z | import os
import sys
import numpy as np
import gensim
from gensim.models import word2vec
import data_utils
from config_utils import data_paths
DECREASE_FACTOR=1e-4 # TODO @Future: there should be a smarter way
def load_word2vec_embeddings(filename):
binary_file = ".bin" in filename
return gensim.models.KeyedVectors.load_word2vec_format(filename, binary=binary_file)
def filter_embeddings(word_vectors, vocabulary_idx_to_word, normalise_word=False, rnd_init=False):
"""
:param normalise_word: set words to lowercase and replace whitespace by '_'
:param rnd_init: If True, set initial weights to random numbers drawn from uniform distribution, otherwise set weights to zero.
"""
unknown_inds = []
found_inds = []
if rnd_init:
rel_vectors = np.random.rand(len(vocabulary_idx_to_word), word_vectors.vector_size)*DECREASE_FACTOR
else:
rel_vectors = np.zeros(shape=(len(vocabulary_idx_to_word), word_vectors.vector_size))
for (idx, word) in enumerate(vocabulary_idx_to_word):
if normalise_word:
word = word.replace(" ", "_").lower()
if word in word_vectors:
rel_vectors[idx,:] = word_vectors.wv[word]
found_inds.append(idx)
else:
unknown_inds.append(idx)
return rel_vectors, unknown_inds, found_inds
def fill_missing_embeddings(word_embeddings, unk_inds, found_inds):
"""
For unknown entities: add average emb vector of found entities to their random initialisation
TODO @Future: Is it better to initialize these as zeros instead of averages?
"""
avg_entity_vecs = np.mean(word_embeddings[found_inds],0)
word_embeddings[unk_inds] += avg_entity_vecs*1e-2
def load_word_embeddings(embeddings_fname, training_datapath, training_data, logger=None):
"""
:param embeddings_fname: The name of the file containing pre-trained embeddings.
E.g., the Google-news w2v embeddings
:param training_datapath: The name of the file containing the training data for
a model which uses word embeddings (loaded from embeddings_fname).
"""
# vocab_fname: The name of the file containing the relevant vocabulary.
# Each line contains the word idx and the word, separated by tabs ("\t").
vocab_fname = training_datapath.replace(".conll", ".vocab")
word_emb_fname = data_utils.get_embeddings_path_for_vocab(embeddings_fname, vocab_fname)
if os.path.exists(word_emb_fname):
if logger:
logger.whisper("Loading token embedding from {0}".format(word_emb_fname))
word_embeddings = np.load(word_emb_fname)
else:
vocabulary_idx_to_word,_ = data_utils.get_vocabulary(vocab_fname, extract_from=training_data, logger=logger)
all_word_vectors = load_word2vec_embeddings(embeddings_fname)
word_embeddings,_,_ = filter_embeddings(all_word_vectors, vocabulary_idx_to_word)
save_word_embeddings(word_embeddings, word_emb_fname)
return word_embeddings
def load_entity_embeddings(embeddings_fname, vocab_fname, logger=None):
"""
:param embeddings_fname: The name of the file containing pre-trained embeddings.
E.g., the Google-news w2v embeddings
:param vocab_fname: The name of the file containing the relevant vocabulary (entity names).
Each line contains the word idx and the word, separated by tabs ("\t").
"""
if not embeddings_fname.endswith(".npy"):
embeddings_fname = data_utils.get_embeddings_path_for_vocab(embeddings_fname, vocab_fname)
if os.path.exists(embeddings_fname):
if not logger is None:
logger.whisper("Loading entity embedding from {0}".format(embeddings_fname))
word_embeddings = np.load(embeddings_fname)
"""
# The model does not use embeddings (yet) which were extracted from some other source
else:
vocabulary_idx_to_word,_ = data_utils.load_vocabulary(vocab_fname)
all_entity_vectors = load_word2vec_embeddings(embeddings_fname)
word_embeddings, unk_inds, found_inds = filter_embeddings(all_entity_vectors, vocabulary_idx_to_word, normalise_word=True, rnd_init=True)
fill_missing_embeddings(word_embeddings, unk_inds, found_inds)
save_word_embeddings(word_embeddings, embeddings_fname)
"""
return word_embeddings
def save_word_embeddings(word_embeddings, outfname, logger=None):
np.save(outfname, word_embeddings)
if not logger is None:
logger.whisper("Embeddings saved in \n\t{0}".format(outfname))
| 44.833333 | 145 | 0.730374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,920 | 0.419856 |
2ff0c47570b54645f3e78c495d57b04e387a120a | 1,980 | py | Python | goNord.py | MiraculousMoon/ImageGoNord-pip | bc4dd4ee72e0aa569112877e081cb397b2637300 | [
"MIT"
] | null | null | null | goNord.py | MiraculousMoon/ImageGoNord-pip | bc4dd4ee72e0aa569112877e081cb397b2637300 | [
"MIT"
] | null | null | null | goNord.py | MiraculousMoon/ImageGoNord-pip | bc4dd4ee72e0aa569112877e081cb397b2637300 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from ImageGoNord import NordPaletteFile, GoNord
import sys, os
def main():
dirOld = "/home/mir/Pictures/wallhaven/"
dirNew = "/home/mir/Pictures/newWall/"
for root, dirs, files in os.walk(dirOld):
for file in files:
imagePath = dirOld + file
name = file.split(".", 1)
oName = name[0] + "-Nord." + name[1]
savePath = dirNew + oName
# E.g. Replace pixel by pixel
go_nord = GoNord()
image = go_nord.open_image(imagePath)
go_nord.convert_image(image, save_path=savePath)
# E.g. Avg algorithm and less colors
go_nord.enable_avg_algorithm()
go_nord.reset_palette()
go_nord.add_file_to_palette(NordPaletteFile.POLAR_NIGHT)
go_nord.add_file_to_palette(NordPaletteFile.SNOW_STORM)
# You can add color also by their hex code
go_nord.add_color_to_palette("#FF0000")
image = go_nord.open_image(imagePath)
go_nord.convert_image(image, save_path=savePath)
# E.g. Resized img no Avg algorithm and less colors
go_nord.disable_avg_algorithm()
go_nord.reset_palette()
go_nord.add_file_to_palette(NordPaletteFile.POLAR_NIGHT)
go_nord.add_file_to_palette(NordPaletteFile.SNOW_STORM)
image = go_nord.open_image(imagePath)
resized_img = go_nord.resize_image(image)
go_nord.convert_image(resized_img, save_path=savePath)
# E.g. Quantize
image = go_nord.open_image(imagePath)
go_nord.reset_palette()
go_nord.set_default_nord_palette()
quantize_image = go_nord.quantize_image(image, save_path=savePath)
# To base64
go_nord.image_to_base64(quantize_image, "jpeg")
if __name__ == "__main__":
main()
| 35.357143 | 78 | 0.60303 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.152525 |
2ff3d7677d6e99fe3507a5c337bd079019cdb6be | 3,091 | py | Python | graphic_convergence_topology.py | win7/parallel_social_spider_optimization | 9dbad144e4242fef2ff6aacc8e72376e14b03a61 | [
"MIT"
] | 1 | 2020-10-02T15:49:18.000Z | 2020-10-02T15:49:18.000Z | graphic_convergence_topology.py | win7/parallel_social_spider_optimization | 9dbad144e4242fef2ff6aacc8e72376e14b03a61 | [
"MIT"
] | null | null | null | graphic_convergence_topology.py | win7/parallel_social_spider_optimization | 9dbad144e4242fef2ff6aacc8e72376e14b03a61 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
============================================================================
Authors:
Edwin Alvarez-Mamani and Jose Luis Soncco-Alvarez*
*Department of Informatics
Universidad Nacional de San Antonio Abad del Cusco (UNSAAC) - Perú
============================================================================
"""
# Python: 3.8.x
"""
Script for evaluate best topology (static and dinamic) about convergence
"""
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import numpy as np
from utils import topology, dataset
print("******* START *******")
dataset_topology = [
[9, 5, 2, 3, 7, 8, 4, 1, 6], # d 20
[], # d 21
[5, 7, 2, 6, 8, 9, 3, 1, 4], # d 22
[9, 5, 1, 2, 8, 3, 4, 6, 7], # d 23
[8, 7, 1, 5, 2, 4, 3, 6, 9], # d 24
[7, 8, 1, 5, 6, 4, 2, 3, 9], # d 25
[9, 8, 4, 5, 1, 2, 6, 7, 3], # d 26
[7, 4, 1, 2, 8, 9, 3, 5, 6], # d 27
[8, 6, 3, 4, 5, 7, 1, 2, 9], # d 28
[] # d 29
]
rankig_low = [] # ranking metric for low dataset
rankig_high = [] # ranking metric for high dataset
rankig_all = [] # ranking metric low and high dataset
for index, index_topology in enumerate([0, 1, 2, 3, 4, 5, 6, 7, 8]): # change [0, 1, 2, 3, 4, 5, 6, 7, 8]
# load data for plot
rankig_l = []
rankig_h = []
rankig_a = []
for index_dataset in [20, 22, 23, 24, 25, 26, 27, 28]: # change [0, ..., 29]
if index_dataset >= 26:
rankig_h.append(dataset_topology[index_dataset - 20][index])
else:
rankig_l.append(dataset_topology[index_dataset - 20][index])
rankig_a.append(dataset_topology[index_dataset - 20][index])
rankig_low.append(np.sum(rankig_l))
rankig_high.append(np.sum(rankig_h))
rankig_all.append(np.sum(rankig_a))
labels = topology
# rankig_low = [20, 34, 30, 35, 27]
# rankig_high = [25, 32, 34, 20, 25]
x = np.arange(len(labels)) # the label locations
width = 0.25 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width / 2, rankig_low, width, label='Low size')
rects2 = ax.bar(x + width / 2, rankig_high, width, label='High size')
"""rects1 = ax.bar(x - width, rankig_low, width, label='Low size')
rects2 = ax.bar(x, rankig_high, width, label='High size')
rects3 = ax.bar(x + width, rankig_all, width, label='All') """
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel("Scores")
ax.set_xlabel("Topology")
ax.set_title("Best Topology")
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
# autolabel(rects3)
fig.tight_layout()
plt.grid()
plt.show()
print("******* END *******")
# Run:
# python graphic_convergence_topology.py | 30.60396 | 107 | 0.590747 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,373 | 0.444049 |
2ff404338aa621080a3a500a159897b5f6c9013e | 5,821 | py | Python | TestInterfaceResiduePrediction.py | sebastiandaberdaku/AntibodyInterfacePrediction | 4d31f57f7cdac1fe68cfb4f3448f6e3129ae2838 | [
"BSD-3-Clause"
] | 10 | 2017-10-11T16:05:35.000Z | 2021-10-01T14:43:10.000Z | TestInterfaceResiduePrediction.py | sebastiandaberdaku/AntibodyInterfacePrediction | 4d31f57f7cdac1fe68cfb4f3448f6e3129ae2838 | [
"BSD-3-Clause"
] | null | null | null | TestInterfaceResiduePrediction.py | sebastiandaberdaku/AntibodyInterfacePrediction | 4d31f57f7cdac1fe68cfb4f3448f6e3129ae2838 | [
"BSD-3-Clause"
] | 2 | 2018-09-13T17:04:13.000Z | 2019-01-22T08:55:22.000Z | # This script runs the IF algorithm for outlier detection to remove false positive patches and maps the predicted LSPs on the underlying residues.
# The results are compared to other predictor software packages.
# Please remember to set the path variable to the current location of the test set.
import numpy as np
from sklearn.neighbors.kd_tree import KDTree
from glob import glob
from math import copysign
from sklearn.ensemble import IsolationForest
from os.path import basename
from os import path, makedirs
from Bio.PDB.PDBParser import PDBParser
p = PDBParser(QUIET=True, PERMISSIVE=True)
from Bio.PDB.Polypeptide import three_to_one
def convert3to1(s):
try :
return three_to_one(s)
except KeyError :
return "X"
import re
_hydrogen = re.compile("[123 ]*H.*")
def isHydrogen(atm):
return _hydrogen.match(atm.get_id())
def isHETATM(atm):
return atm.get_parent().get_id()[0] != " "
#######################
# import pickle
#######################
outlier_fraction = 0.18
threshold = 0.6232013
n_iterations = 100
mapping_distance = 6.0
def compute_average_scores(testing_set_path, prediction_path):
files = glob("%s/*_ab.pdb" % (testing_set_path))
for pdb_filename in sorted(files) :
file_id = basename(pdb_filename)[:-7]
pdb_patch_coord = ("%s/%s_ab_patch_centers.txt" % (testing_set_path, file_id))
pdb_patch_score = ("%s/%s_ab_patch_score.txt" % (testing_set_path, file_id))
with open(pdb_patch_coord) as coord, open(pdb_patch_score) as score:
patch_coord = [[float(x) for x in a.strip().split()] for a in coord.readlines()]
patch_score = [float(x) - threshold for x in score.readlines()]
min_v = min(patch_score)
max_v = max(patch_score)
patch_score_scaled = [(lambda x: -(x / min_v) if x < 0 else (x / max_v))(x) for x in patch_score]
X = np.array([a[0] for a in zip(patch_coord, patch_score_scaled) if a[1] >= 0])
X_weights = np.array([x for x in patch_score_scaled if x >= 0])
pdb_structure = p.get_structure(file_id, pdb_filename)
atoms = np.array([atm.get_coord() for atm in pdb_structure.get_atoms() if not isHydrogen(atm)])
atoms_tree = KDTree(atoms)
residues_coord = {}
for residue in pdb_structure.get_residues() :
for atm in residue :
residues_coord[tuple(atm.get_coord())] = residue
average_residues_scores = {residue : 0 for residue in pdb_structure.get_residues()}
# since the isollation forest algorithm is random, we run it several times to assess the average performance of the method
for iteration in xrange(n_iterations) :
print "Running iteration %d of %d" % (iteration + 1, n_iterations)
forest = IsolationForest(contamination=outlier_fraction, n_jobs=-1)
forest.fit(X, sample_weight=X_weights)
prediction_isolation_forest = forest.predict(patch_coord)
patch_pred_no_outliers = [copysign(1, x) for x in prediction_isolation_forest]
# here we map the patch predictions on the underlying residues
for i in xrange(len(patch_coord)) : # for each patch
# if it was predicted as non-interface continue to the next
if patch_pred_no_outliers[i] < 0 : continue
# multiple residues can be underneath a given patch, we do not want to consider the same residue more than once
marked_residues = set()
# get all atoms within mapping_distance from the given patch center
indexes = atoms_tree.query_radius([patch_coord[i]], r=mapping_distance, count_only = False, return_distance=True, sort_results = True)
for ind in zip(indexes[0][0], indexes[1][0]) :
# which residue does the current atom belong to?
current_res = residues_coord[tuple(atoms[ind[0]])]
# if already considered continue to the next
if current_res in marked_residues : continue
# increase the score of the current residue
average_residues_scores[current_res] += 1 / (1.0 + ind[1]) # patch_pred_no_outliers[i] / (1.0 + ind[1])
# mark as seen for the current patch
marked_residues.add(current_res)
average_residues_scores.update((x, y / n_iterations) for x, y in average_residues_scores.items())
residues_with_scores = [(lambda x, y, z : (convert3to1(z), x[2], x[3][1], x[3][2], y))(residue.get_full_id(), score, residue.get_resname()) for residue, score in average_residues_scores.items()]
residues_with_scores.sort(key=lambda x : x[2])
residues_with_scores.sort(key=lambda x : x[1])
if not path.exists(prediction_path) : makedirs(prediction_path)
print file_id
with open("%s/%s_ab_residue_prediction.txt" % (prediction_path, file_id), "wb") as output_residue_scores :
for r in residues_with_scores :
output_residue_scores.write("%s;%s;%d;%s;%s\n" %(r[0], r[1], r[2], r[3], str(r[4])))
compute_average_scores("./our_dataset/testing_set/LH_NonProtein/structures/", "./method_comparison/our_method/predictions/LH_NonProtein/")
compute_average_scores("./our_dataset/testing_set/LH_Protein/structures/", "./method_comparison/our_method/predictions/LH_Protein/")
compute_average_scores("./our_dataset/homology/LH_NonProtein/structures/", "./method_comparison/our_method_homology/predictions/LH_NonProtein/")
compute_average_scores("./our_dataset/homology_90/LH_Protein/structures/", "./method_comparison/our_method_homology/predictions/LH_Protein/")
| 47.325203 | 202 | 0.662429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,625 | 0.279162 |
2ff6da846eaf58d5ba7cf9c6f7b0be471d5b5563 | 9,139 | py | Python | solrcl/document.py | zaccheob/solrcl | de731c0e1f12361770121e6f2aad7e41c7a40f68 | [
"MIT"
] | null | null | null | solrcl/document.py | zaccheob/solrcl | de731c0e1f12361770121e6f2aad7e41c7a40f68 | [
"MIT"
] | null | null | null | solrcl/document.py | zaccheob/solrcl | de731c0e1f12361770121e6f2aad7e41c7a40f68 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
import warnings
import xml.etree.cElementTree as ET
import re
import logging
logger = logging.getLogger("solrcl")
logger.setLevel(logging.DEBUG)
import exceptions
class SOLRDocumentError(exceptions.SOLRError): pass
class SOLRDocumentWarning(UserWarning): pass
class SOLRDocument(object):
"""Class that stores data for a SOLR document. To instantiate SOLRDocument from xml use SOLRDocumentFactory"""
def __init__(self, solrid, solrcore):
self._fields = {}
self._child_docs = []
self.solr = solrcore
self.setField(self.solr.id_field, solrid)
def __getattr__(self, name):
#Shortcut to id field
if name == "id":
return self.getField(self.solr.id_field)
else:
raise AttributeError
def __eq__(self, other):
if type(other) is type(self):
return set(self._fields.keys()) == set(other._fields.keys()) and all(set(self._fields[x]) == set(other._fields[x]) for x in self._fields.keys()) and sorted(self._child_docs) == sorted(other._child_docs)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
#Need it to make the object sortable (sorting is used in __eq__ method to test child documents)
return self.id < other.id
def _serializeValue(self, v, encoding='utf8'):
if isinstance(v, unicode):
return v
elif isinstance(v, str):
return v.decode(encoding)
else:
return unicode(v)
def setField(self, fieldname, fieldvalue):
if self._fields.has_key(fieldname):
del self._fields[fieldname]
if isinstance(fieldvalue, list):
for x in fieldvalue:
self.appendFieldValue(fieldname, x)
else:
self.appendFieldValue(fieldname, fieldvalue)
def appendFieldValue(self, fieldname, fieldvalue):
logger.debug("appendFieldValue %s %s" % (repr(fieldname), repr(fieldvalue)))
if self.solr.fields.has_key(fieldname):
if fieldvalue is None:
self._fields.setdefault(fieldname, [])
self._fields[fieldname].append(fieldvalue)
else:
try:
self.solr.fields[fieldname].type.check(fieldvalue)
except AssertionError, e:
raise SOLRDocumentError, "Invalid value %s for field %s (type %s)" % (repr(fieldvalue), fieldname, self.solr.fields[fieldname].type.name)
self._fields.setdefault(fieldname, [])
if len(self._fields[fieldname]) > 0 and not self.solr.fields[fieldname].multi:
raise SOLRDocumentError, "Multiple values for not multivalued field %s" % fieldname
self._fields[fieldname].append(fieldvalue)
else:
raise SOLRDocumentError, "Field %s not in schema" % fieldname
logger.debug("SET: '%s' '%s'" % (fieldname, repr(self._fields[fieldname])))
def getField(self, fieldname):
ret = self._fields[fieldname]
if self.solr.fields[fieldname].multi:
return ret
else:
return ret[0]
def removeField(self, fieldname):
self._fields.pop(fieldname, None)
def getFieldDefault(self, fieldname, default=None):
try:
return self.getField(fieldname)
except KeyError:
return default
def addChild(self, doc):
self._child_docs.append(doc)
def getChildDocs(self):
return self._child_docs
def hasChildDocs(self):
return bool(self._child_docs)
def _toXML(self, update=True):
doc = ET.Element('doc')
for field, value in self._fields.iteritems():
if value[0] is None:
f = ET.SubElement(doc, 'field', null='true', name=field)
if field != self.solr.id_field and update:
f.set('update', 'set')
f.text = ''
else:
for v in value:
f = ET.SubElement(doc, 'field', null='false', name=field)
if field != self.solr.id_field and update:
f.set('update', 'set')
f.text = self.solr.fields[field].type.serialize(v)
for child in self._child_docs:
doc.append(child._toXML(update=update))
return doc
def toXML(self, update=True):
"""Serializes SOLRDocument into an XML string suitable for SOLR update request handler"""
#Unfortunately it seems there's no way to avoid xml declaration... so I've to remove it with a regexp
return re.sub(r"^<\?xml version='1.0' encoding='[^']*'\?>\s*", '', ET.tostring(self._toXML(update=update), encoding='utf8'))
def clone(self):
#Don't use copy.deepcopy because i don't want to clone also self.solr object
anotherme = SOLRDocument(self.id, self.solr)
for fieldname in self._fields.iterkeys():
anotherme.setField(fieldname, self.getField(fieldname))
for child in self.getChildDocs():
anotherme.addChild(child.clone())
return anotherme
def update(self, otherdoc, merge_child_docs=True):
for fieldname in otherdoc._fields.iterkeys():
self.setField(fieldname, otherdoc.getField(fieldname))
if not merge_child_docs:
#Shortcut! A removeChild method would be better, but I'm lazy :)
self._child_docs = []
actual_child_docs = dict(((d.id, d) for d in self.getChildDocs()))
for child_doc in otherdoc.getChildDocs():
if actual_child_docs.has_key(child_doc.id):
#update child
actual_child_docs[child_doc.id].update(child_doc)
else:
#new child
self.addChild(child_doc.clone())
class SOLRDocumentFactory(object):
"""Class with methods to create SOLRDocument instances that fits on solr core"""
def __init__(self, solr):
"""Initializes the instance with solr core"""
self.solr = solr
def _fromXMLDoc(self, xmldoc):
id_in_record = False
#Create a new document with a fake id, unfortunately id field is not necessarly in the first position so I should iterate all fields to find it before reading other fields. In this way I can set it later. The counterpart is that I have to enforce that id field exists in another way.
doc = SOLRDocument(u'changeme', self.solr)
for field in xmldoc:
if field.tag == 'field':
fieldname = field.get('name')
if not self.solr.fields.has_key(fieldname):
raise SOLRDocumentError, "Field %s does not exist in schema" % fieldname
if fieldname == self.solr.id_field:
id_in_record = True
if field.get('null') == 'true':
doc.setField(fieldname, None)
else:
value = field.text
# Note that when there is no text field.text returns None, not ''
# Let's transform it in '' because Nulls are already managed separately
value = u'' if value is None else value
try:
if fieldname == self.solr.id_field:
doc.setField(fieldname, self.solr.fields[fieldname].type.deserialize(unicode(value)))
else:
doc.appendFieldValue(fieldname, self.solr.fields[fieldname].type.deserialize(unicode(value)))
except ValueError as e:
raise SOLRDocumentError("%s" % e)
elif field.tag == 'doc':
doc.addChild(self._fromXMLDoc(field))
else:
raise SOLRDocumentError, "Invalid tag {0} in doc".format(field.tag)
if not id_in_record:
raise SOLRDocumentError, "Missing unique id field in doc"
return doc
def fromXML(self, fh):
"""Returns a generator over SOLRDocument instances from an xml document read from the file like object fh. Fields are checked against solr schema and if not valid a SOLRDocumentXMLError exception is raised."""
doc_depth = 0
for (event, element) in ET.iterparse(fh, events=('start', 'end')):
if event == 'start':
if element.tag == 'doc':
doc_depth += 1
elif event == 'end':
if element.tag == 'doc':
doc_depth -= 1
elif element.tag in ('field', 'add'):
pass
else:
raise SOLRDocumentError, "Invalid tag {0}".format(element.tag)
if element.tag == 'doc' and event == 'end' and doc_depth == 0:
try:
yield self._fromXMLDoc(element)
except SOLRDocumentError, e:
#Transform document errors in warnings to continue to next
warnings.warn("%s" % e, SOLRDocumentWarning)
| 40.083333 | 291 | 0.589561 | 8,943 | 0.978553 | 1,078 | 0.117956 | 0 | 0 | 0 | 0 | 1,912 | 0.209213 |
2ff8bf398acab99da30d643bdc618ebcc561b20a | 14,054 | py | Python | testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging.py | rdkcmf/rdkb-tools-tdkb | 9f9c3600cd701d5fc90ac86a6394ebd28d49267e | [
"Apache-2.0"
] | null | null | null | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>2</version>
<name>TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging</name>
<primitive_test_id/>
<primitive_test_name>WIFIAgent_Get</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To check if the channel utilization marker CHUTIL_2 logging is happening according to the log interval set with Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.</synopsis>
<groups_id/>
<execution_time>20</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
<box_type>RPI</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIAGENT_148</test_case_id>
<test_objective>To check if the channel utilization marker CHUTIL_2 logging is happening according to the log interval set with Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband, RPI</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>None</api_or_interface_used>
<input_parameters>ParamName : Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval
ParamValue : 30 or 60
Type : int
</input_parameters>
<automation_approch>1. Load the modules
2. Get the initial value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.
3. If the initial value is 30, set it to 60 else set the value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval to 30.
4. Check if the log file wifihealth.txt is present under /rdklogs/logs.
5. Get the initial count of the telemetry marker CHUTIL_2 and store it.
6. Sleep for a wait time of sum of initial value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval and the new value set.
7. After the wait time, check the final count of the telemetry marker CHUTIL_2 and compute the difference with the initial value.
8. The difference should be greater than or equal to 2.
9. Revert Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval to initial value.
10. Unload the modules.</automation_approch>
<expected_output>The channel utilization marker CHUTIL_2 logging should happen according to the log interval set with Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval.</expected_output>
<priority>High</priority>
<test_stub_interface>wifiagent</test_stub_interface>
<test_script>TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging</test_script>
<skipped>No</skipped>
<release_version>M93</release_version>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
def getLogFileTotalLinesCount(tdkTestObj, string, step):
cmd = "grep -ire " + "\"" + string + "\" " + "/rdklogs/logs/wifihealth.txt | wc -l";
expectedresult="SUCCESS";
tdkTestObj.addParameter("command",cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\n*********************************************";
print "TEST STEP %d : Get the number of log lines currently present" %step;
print "EXPECTED RESULT %d : Should get the number of log lines currently present" %step;
print "Query : %s" %cmd;
count = 0;
if expectedresult in actualresult:
count = int(tdkTestObj.getResultDetails().strip().replace("\\n", ""));
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: Successfully captured the number of log lines present : %d" %(step, count);
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Failed to capture the number of log lines present : %s" %(step, details);
print "[TEST EXECUTION RESULT] : FAILURE";
print "*********************************************\n";
return count,step;
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("pam","RDKB");
sysObj = tdklib.TDKScriptingLibrary("sysutil","RDKB");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging');
sysObj.configureTestCase(ip,port,'TS_WIFIAGENT_CheckTelemetryMarkerCHUTIL_2_Logging');
#Get the result of connection with test component and DUT
loadmodulestatus=obj.getLoadModuleResult();
sysutilloadmodulestatus=sysObj.getLoadModuleResult();
if "SUCCESS" in loadmodulestatus.upper() and "SUCCESS" in sysutilloadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
sysObj.setLoadModuleStatus("SUCCESS");
expectedresult="SUCCESS";
step = 1;
#Check whether the wifihealth.txt file is present or not
tdkTestObj = sysObj.createTestStep('ExecuteCmd');
cmd = "[ -f /rdklogs/logs/wifihealth.txt ] && echo \"File exist\" || echo \"File does not exist\"";
tdkTestObj.addParameter("command",cmd);
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
print "\nTEST STEP %d: Check for wifihealth log file presence" %step;
print "EXPECTED RESULT %d:wifihealth log file should be present" %step;
if details == "File exist":
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d:wifihealth log file is present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
step = step + 1;
#Get the value of Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval
tdkTestObj = obj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval");
#Execute the test case in DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
initial_value = tdkTestObj.getResultDetails();
print "\nTEST STEP %d: Get the TELEMETRY Channel Utility LogInterval from Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval" %step;
print "EXPECTED RESULT %d: Should get the TELEMETRY Channel Utility LogInterval from Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval" %step;
if expectedresult in actualresult and initial_value != "":
DeflogInt = int(initial_value);
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: TELEMETRY Channel Utility LogInterval: %d" %(step,DeflogInt);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
if DeflogInt == 30:
newlogInt = "60";
else:
newlogInt = "30";
#Set the LogInterval to newlogInt, the set is cross checked with get
step = step + 1;
tdkTestObj = obj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval");
tdkTestObj.addParameter("ParamValue",newlogInt);
tdkTestObj.addParameter("Type","int");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\nTEST STEP %d: Set the TELEMETRY Channel Utility LogInterval to %ss" %(step, newlogInt);
print "EXPECTED RESULT %d: Should set the TELEMETRY Channel Utility LogInterval to %ss" %(step, newlogInt);
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: TELEMETRY Channel Utility LogInterval: %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
print "\nGet the number of log lines \"CHUTIL_2\" in /rdklogs/logs/wifihealth.txt";
step = step + 1;
tdkTestObj1 = sysObj.createTestStep('ExecuteCmd');
log = "CHUTIL_2";
no_of_lines_initial,step = getLogFileTotalLinesCount(tdkTestObj1, log, step);
print "The initial number of log lines \"CHUTIL_2\" in wifihealth.txt is : %d" %no_of_lines_initial;
#Sleeping for initial telemetry interval DeflogInt + newlogInt
sleep_time = DeflogInt + int(newlogInt);
print "\nSleeping for duration : %d to check if the logging is happening according to the new log interval set" %sleep_time;
sleep(sleep_time);
print "\nGet the final number of log lines \"CHUTIL_2\" in /rdklogs/logs/wifihealth.txt";
step = step + 1;
tdkTestObj1 = sysObj.createTestStep('ExecuteCmd');
log = "CHUTIL_2";
no_of_lines_final,step = getLogFileTotalLinesCount(tdkTestObj1, log, step);
print "The initial number of log lines \"CHUTIL_2\" in wifihealth.txt is : %d" %no_of_lines_final;
step = step + 1;
difference = no_of_lines_final - no_of_lines_initial;
print "\nThe CHUTIL_2 log lines can be >= 2, after accounting for the initial log interval and the new log interval set";
print "TEST STEP %d: Should get CHUTIL_2 markers count greater than or equal to 2" %step;
print "EXPECTED RESULT %d: The CHUTIL_2 markers count should be greater than or equal to 2" %step;
if difference >= 2:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: Number of new CHUTIL_2 markers are : %d" %(step, difference);
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Number of new CHUTIL_2 markers are : %d" %(step, difference);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Set operation failed" %(step);
#Get the result of execution
print "[TEST EXECUTION RESULT] :FAILURE";
#Revert the Value
step = step + 1;
tdkTestObj = obj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.DeviceInfo.X_RDKCENTRAL-COM_WIFI_TELEMETRY.ChUtilityLogInterval");
tdkTestObj.addParameter("ParamValue",initial_value);
tdkTestObj.addParameter("Type","int");
expectedresult="SUCCESS";
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
print "\nTEST STEP %d: Revert the TELEMETRY Channel Utility LogInterval to initial value" %step;
print "EXPECTED RESULT %d: Should revert the TELEMETRY Channel Utility LogInterval to initial value" %step;
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "ACTUAL RESULT %d: Revert successful" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: Revertion failed" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d: TELEMETRY Channel Utility LogInterval: %s" %(step,details);
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "ACTUAL RESULT %d:wifihealth log file is not present" %step;
#Get the result of execution
print "[TEST EXECUTION RESULT] :FAILURE";
obj.unloadModule("pam")
sysObj.unloadModule("sysutil");
else:
print "Failed to load module";
obj.setLoadModuleStatus("FAILURE");
sysObj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 51.291971 | 221 | 0.674897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,606 | 0.612352 |
2ffa77f1e40ba05e4e11e3418487de31ab281199 | 679 | py | Python | stubs.min/System/Runtime/InteropServices/__init___parts/GuidAttribute.py | denfromufa/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2017-07-07T11:15:45.000Z | 2017-07-07T11:15:45.000Z | stubs.min/System/Runtime/InteropServices/__init___parts/GuidAttribute.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/Runtime/InteropServices/__init___parts/GuidAttribute.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class GuidAttribute(Attribute,_Attribute):
"""
Supplies an explicit System.Guid when an automatic GUID is undesirable.
GuidAttribute(guid: str)
"""
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,guid):
""" __new__(cls: type,guid: str) """
pass
Value=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the System.Guid of the class.
Get: Value(self: GuidAttribute) -> str
"""
| 30.863636 | 215 | 0.687776 | 673 | 0.991163 | 0 | 0 | 87 | 0.12813 | 0 | 0 | 453 | 0.667158 |
2ffb1e0056539f8edf4a7ae850e5442548db09df | 1,833 | py | Python | fern/models.py | edilio/dental | 3fa6b453939c7536883d1036fd414b3fae8977d7 | [
"MIT"
] | 1 | 2016-03-14T18:56:06.000Z | 2016-03-14T18:56:06.000Z | fern/models.py | edilio/dental | 3fa6b453939c7536883d1036fd414b3fae8977d7 | [
"MIT"
] | null | null | null | fern/models.py | edilio/dental | 3fa6b453939c7536883d1036fd414b3fae8977d7 | [
"MIT"
] | null | null | null | import datetime
from django.db import models
from django.utils import timezone
class Source(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
GENDER_CHOICES = (
('F', 'Female'),
('M', 'Male'),
)
class Patient(models.Model):
fullname = models.CharField(max_length=50)
phone = models.CharField(max_length=15, null=True, blank=True, editable=False)
source = models.ForeignKey(Source)
birth_date = models.DateField(null=True, blank=True)
gender = models.CharField(max_length=1, null=True, blank=True, choices=GENDER_CHOICES)
tour_date = models.DateField(default=timezone.now)
new_patient = models.BooleanField(default=True)
@property
def age(self):
if self.birth_date:
bday = self.birth_date
d = datetime.date.today()
return (d.year - bday.year) - int((d.month, d.day) < (bday.month, bday.day))
else:
return None
def __unicode__(self):
return self.fullname
TREATMENT_OPTIONS = (
(0, 'Implants'),
(1, 'Crowns'),
(2, 'Laser'),
(3, 'Surgery from July-2015')
)
class HappyBirthdayPatient(models.Model):
fullname = models.CharField(max_length=50)
birth_date = models.DateField()
address = models.CharField(max_length=120)
treatment = models.PositiveSmallIntegerField(default=0, choices=TREATMENT_OPTIONS)
@property
def birth_date_month(self):
return self.birth_date.strftime('%B')
@property
def age(self):
if self.birth_date:
bday = self.birth_date
d = datetime.date.today()
return (d.year - bday.year) - int((d.month, d.day) < (bday.month, bday.day))
else:
return None
def __unicode__(self):
return self.fullname | 26.185714 | 90 | 0.642662 | 1,564 | 0.853246 | 0 | 0 | 599 | 0.326787 | 0 | 0 | 73 | 0.039825 |
2ffba1d7176d2ef136ac49f09e73731254afa317 | 1,900 | py | Python | plot-salience-parsing-results.py | zetnim/saliency-semantic-parsing-reid | 5ae58c5beca260d1bc1ff3e6222e08c99dd93bfe | [
"MIT"
] | 12 | 2019-08-27T08:58:46.000Z | 2021-08-05T12:44:48.000Z | plot-salience-parsing-results.py | zetnim/saliency-semantic-parsing-reid | 5ae58c5beca260d1bc1ff3e6222e08c99dd93bfe | [
"MIT"
] | 3 | 2019-11-13T00:52:08.000Z | 2020-07-18T05:05:54.000Z | plot-salience-parsing-results.py | zetnim/saliency-semantic-parsing-reid | 5ae58c5beca260d1bc1ff3e6222e08c99dd93bfe | [
"MIT"
] | 8 | 2019-08-27T08:58:49.000Z | 2022-02-14T14:42:20.000Z | import os.path as osp
import os
import pylab as plt
import gc
import argparse
from utils import read_image
parser = argparse.ArgumentParser(description='Plot rank-5 results of S-ReID, SP-ReID and SSP-ReID')
parser.add_argument('-d', '--dataset', type=str, default='market1501')
# Architecture
parser.add_argument('-a', '--arch', type=str, default='resnet50')
parser.add_argument('--save-dir', type=str, default='log/tmp')
args = parser.parse_args()
def plot(images, save_name):
num_figs = len(images)
fig = plt.figure(figsize = (30, 20))
for i, img in enumerate(images):
a = fig.add_subplot(num_figs, 1, i + 1)
plt.imshow(img)
plt.axis('off')
fig.savefig(save_name, bbox_inches='tight')
fig.clf()
plt.close()
del a
gc.collect()
def combine_fig(file_name, salience_dir, parsing_dir, salience_parsing_dir, save_dir):
salience_file = osp.join(salience_dir, file_name)
parsing_file = osp.join(parsing_dir, file_name)
salience_parsing_file = osp.join(salience_parsing_dir, file_name)
save_file = osp.join(save_dir, file_name)
images = [read_image(salience_file), read_image(parsing_file), read_image(salience_parsing_file)]
plot(images, save_file)
def main():
dataset = args.dataset
model = args.arch
salience_dir = osp.join('log/', '{}-salience-{}/-1'.format(model, dataset))
parsing_dir = osp.join('log/', '{}-parsing-{}/-1'.format(model, dataset))
salience_parsing_dir = osp.join('log/', '{}-salience-parsing-{}/-1'.format(model, dataset))
save_dir = osp.join(args.save_dir, '{}-improvement-{}'.format(model, dataset))
if not os.path.exists(save_dir):
os.makedirs(save_dir)
list_figs = os.listdir(salience_dir)
for img_name in list_figs:
combine_fig(img_name, salience_dir, parsing_dir, salience_parsing_dir, save_dir)
if __name__ == '__main__':
main() | 32.758621 | 101 | 0.694737 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.136842 |
2ffe9071010ecb101be8a813c44cbb919e9b3376 | 267 | py | Python | quoters/check_connection.py | suman-kr/random-quotes | 5d3241dc9647a7b16a93dece12e9f214072e64c3 | [
"MIT"
] | 22 | 2020-01-24T08:59:18.000Z | 2022-02-09T02:35:20.000Z | quoters/check_connection.py | suman-kr/random-quotes | 5d3241dc9647a7b16a93dece12e9f214072e64c3 | [
"MIT"
] | 9 | 2021-04-07T00:57:09.000Z | 2022-03-31T10:18:06.000Z | quoters/check_connection.py | suman-kr/random-quotes | 5d3241dc9647a7b16a93dece12e9f214072e64c3 | [
"MIT"
] | 1 | 2021-06-06T19:00:55.000Z | 2021-06-06T19:00:55.000Z | import socket
from quoters.constants import CONN_URL
def is_connected():
try:
sock_conn = socket.create_connection((CONN_URL, 80))
if(sock_conn):
sock_conn.close()
return True
except OSError:
pass
return False
| 20.538462 | 60 | 0.632959 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2fff77d99a6ba3eacfb663befedf8f439580d40e | 7,917 | py | Python | tests.py | mkolar/maya-capture | 01ef86333694cd528293c855ac39c18e84bbffcf | [
"MIT"
] | 118 | 2016-02-28T18:20:09.000Z | 2022-03-05T03:28:44.000Z | tests.py | mkolar/maya-capture | 01ef86333694cd528293c855ac39c18e84bbffcf | [
"MIT"
] | 46 | 2016-02-26T22:00:41.000Z | 2018-11-19T12:56:43.000Z | tests.py | mkolar/maya-capture | 01ef86333694cd528293c855ac39c18e84bbffcf | [
"MIT"
] | 32 | 2016-02-26T21:59:20.000Z | 2021-08-09T07:33:37.000Z | """Tests for capture.
Within Maya, setup a scene of moderate range (e.g. 10 frames)
and run the following.
Example:
>>> nose.run(argv=[sys.argv[0], "tests", "-v"])
"""
import capture
from maya import cmds
def test_capture():
"""Plain capture works"""
capture.capture()
def test_camera_options():
"""(Optional) camera options works"""
capture.capture(camera_options={"displayGateMask": False})
def test_display_options():
"""(Optional) display options works"""
capture.capture(display_options={"displayGradient": False})
def test_viewport_options():
"""(Optional) viewport options works"""
capture.capture(viewport_options={"wireframeOnShaded": True})
def test_viewport2_options():
"""(Optional) viewport2 options works"""
capture.capture(viewport2_options={"ssaoEnable": True})
def test_parse_active_view():
"""Parse active view works"""
# Set focus to modelPanel1 (assume it exists)
# Otherwise the panel with focus (temporary panel from capture)
# got deleted and there's no "active panel"
import maya.cmds as cmds
cmds.setFocus("modelPanel1")
options = capture.parse_active_view()
capture.capture(**options)
def test_parse_view():
"""Parse view works"""
options = capture.parse_view("modelPanel1")
capture.capture(**options)
def test_apply_view():
"""Apply view works"""
capture.apply_view("modelPanel1", camera_options={"overscan": 2})
def test_apply_parsed_view():
"""Apply parsed view works"""
options = capture.parse_view("modelPanel1")
capture.apply_view("modelPanel1", **options)
def test_apply_parsed_view_exact():
"""Apply parsed view sanity check works"""
import maya.cmds as cmds
panel = "modelPanel1"
cmds.modelEditor(panel, edit=True, displayAppearance="wireframe")
parsed = capture.parse_view(panel)
display = parsed["viewport_options"]["displayAppearance"]
assert display == "wireframe"
# important to test both, just in case wireframe was already
# set when making the first query, and to make sure this
# actually does something.
cmds.modelEditor(panel, edit=True, displayAppearance="smoothShaded")
parsed = capture.parse_view(panel)
display = parsed["viewport_options"]["displayAppearance"]
assert display == "smoothShaded"
capture.apply_view(panel,
viewport_options={"displayAppearance": "wireframe"})
assert cmds.modelEditor(panel,
query=True,
displayAppearance=True) == "wireframe"
def test_apply_parsed_view_all():
"""Apply parsed view all options works"""
# A set of options all trying to be different from the default
# settings (in `capture.py`) so we can test "changing states"
camera_options = {}
display_options = {}
viewport_options = {}
viewport2_options = {}
for key, value in capture.CameraOptions.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, (int, float)):
value = value + 1
else:
raise Exception("Unexpected value in CameraOptions: %s=%s"
% (key, value))
for key, value in capture.DisplayOptions.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, tuple):
value = (1, 0, 1)
else:
raise Exception("Unexpected value in DisplayOptions: %s=%s"
% (key, value))
for key, value in capture.ViewportOptions.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, (int, float)):
value = value + 1
elif isinstance(value, tuple):
value = (1, 0, 1)
elif isinstance(value, basestring):
pass # Don't bother, for now
else:
raise Exception("Unexpected value in ViewportOptions: %s=%s"
% (key, value))
for key, value in capture.Viewport2Options.items():
if isinstance(value, bool):
value = not value
elif isinstance(value, (int, float)):
value = value + 1
elif isinstance(value, tuple):
value = (1, 0, 1)
elif isinstance(value, basestring):
pass # Don't bother, for now
else:
raise Exception("Unexpected value in Viewport2Options: %s=%s"
% (key, value))
defaults = {
"camera_options": capture.CameraOptions.copy(),
"display_options": capture.DisplayOptions.copy(),
"viewport_options": capture.ViewportOptions.copy(),
"viewport2_options": capture.Viewport2Options.copy(),
}
others = {
"camera_options": camera_options,
"display_options": display_options,
"viewport_options": viewport_options,
"viewport2_options": viewport2_options,
}
panel = "modelPanel1"
def compare(this, other):
"""Compare options for only settings available in `this`
Some color values will be returned with possible floating
point precision errors as such result in a slightly
different number. We'd need to compare whilst keeping
such imprecisions in mind.
"""
precision = 1e-4
for opt in this:
this_option = this[opt]
other_option = other[opt]
for key, value in this_option.iteritems():
other_value = other_option[key]
if isinstance(value, float) or isinstance(other_value, float):
if abs(value - other_value) > precision:
return False
elif isinstance(value, (tuple, list)):
# Assuming for now that any tuple or list contains floats
if not all((abs(a-b) < precision)
for a, b in zip(value, other_value)):
return False
else:
if value != other_value:
return False
return True
# Apply defaults and check
capture.apply_view(panel, **defaults)
parsed_defaults = capture.parse_view(panel)
assert compare(defaults, parsed_defaults)
# Apply others and check
capture.apply_view(panel, **others)
parsed_others = capture.parse_view(panel)
assert compare(others, parsed_others)
def test_preset():
"""Creating and applying presets works"""
preset = {
"width": 320,
"height": 240,
"camera_options": {
"displayGateMask": False
},
"viewport_options": {
"wireframeOnShaded": True
},
"display_options": {
"displayGateMask": False
}
}
capture.capture(**preset)
def test_parse_active_scene():
"""parse_active_scene() works"""
parsed = capture.parse_active_scene()
reference = {
"start_frame": cmds.playbackOptions(minTime=True, query=True),
"end_frame": cmds.playbackOptions(maxTime=True, query=True),
"width": cmds.getAttr("defaultResolution.width"),
"height": cmds.getAttr("defaultResolution.height"),
"compression": cmds.optionVar(query="playblastCompression"),
"filename": (cmds.optionVar(query="playblastFile")
if cmds.optionVar(query="playblastSaveToFile") else None),
"format": cmds.optionVar(query="playblastFormat"),
"off_screen": (True if cmds.optionVar(query="playblastOffscreen")
else False),
"show_ornaments": (True if cmds.optionVar(query="playblastShowOrnaments")
else False),
"quality": cmds.optionVar(query="playblastQuality")
}
for key, value in reference.items():
assert parsed[key] == value
| 31.416667 | 81 | 0.611848 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,531 | 0.319692 |
2fff8b0aec7151100a55548ed8c95e2c72aeeace | 3,400 | py | Python | examples/ubqc/client.py | cgmcintyr/SimulaQron | d80f0e43f2d993dfde5f8bfc253ca4987c19a12f | [
"BSD-3-Clause"
] | null | null | null | examples/ubqc/client.py | cgmcintyr/SimulaQron | d80f0e43f2d993dfde5f8bfc253ca4987c19a12f | [
"BSD-3-Clause"
] | 6 | 2018-10-18T09:06:23.000Z | 2018-10-24T16:08:20.000Z | examples/ubqc/client.py | cgmcintyr/SimulaQron | d80f0e43f2d993dfde5f8bfc253ca4987c19a12f | [
"BSD-3-Clause"
] | 5 | 2018-10-13T12:11:29.000Z | 2020-01-24T23:05:12.000Z | import random
import struct
import sys
import time
from pathlib import Path
import numpy as np
from SimulaQron.general.hostConfig import *
from SimulaQron.cqc.backend.cqcHeader import *
from SimulaQron.cqc.pythonLib.cqc import *
from flow import circuit_file_to_flow, count_qubits_in_sequence
from angle import measure_angle
# Randomly select circuit from circuits directory
circuits_path = Path(".") / "circuits"
circuit_file_paths = list(circuits_path.glob("*.json"))
circuit = random.choice(circuit_file_paths)
# Load circuit as MBQC flow
print("Client Loading {}".format(circuit))
seq_out = circuit_file_to_flow("./circuits/circuit1.json")
# Determine number of cubits our circuit needs
nQubits = count_qubits_in_sequence(seq_out)
# Initialize measurements count and entanglement lists
nMeasurement = 0
E1 = []
E2 = []
# We use the flow sequence to build entanglemtn lists and count measurements
for s in seq_out:
s.printinfo()
if s.type == "E":
E1.append(s.qubits[0])
E2.append(s.qubits[1])
if s.type == "M":
nMeasurement += 1
# Outcome of each qubit will be stored in this outcome list
outcome = nQubits * [-1]
server_name = "Charlie"
with CQCConnection("Bob") as client:
print("Client Sending (classical): Create {} qubits".format(nQubits))
client.sendClassical(server_name, nQubits)
angles = []
for i in range(0, nQubits):
rand_angle = int(256 * random.random())
angles.append(rand_angle)
q = qubit(client)
q.rot_Y(64) # |+> state
q.rot_Z(rand_angle)
print("Client Sending (quantum): qubit {}".format(i + 1))
client.sendQubit(q, server_name)
time.sleep(1)
print("Client Sending (classical): Ask to perform {} measurements".format(nQubits))
client.sendClassical(server_name, nMeasurement)
time.sleep(1)
print("Client Sending (classical): List of 1st Qubits to Entangle".format(nQubits))
client.sendClassical(server_name, E1)
time.sleep(1)
print("Client Sending (classical): List of 2nd Qubits to Entangle".format(nQubits))
client.sendClassical(server_name, E2)
for s in seq_out:
if s.type == "M":
# Which qubit are we measuring?
qubit_n = s.qubit
# What is the angle we wish to measure
computation_angle = s.angle
input_angle = angles[qubit_n]
# Calclate the angle to send with randomisation applied
r = np.round(random.random())
angle_to_send = measure_angle(
qubit_n, seq_out, outcome, input_angle, computation_angle
) + r * (np.pi)
print("Client Sending (classical): ask to measure qubit {}".format(qubit_n))
time.sleep(1)
client.sendClassical(server_name, qubit_n)
print(
"Client Sending (classical): measurement angle {}".format(angle_to_send)
)
time.sleep(1)
client.sendClassical(server_name, angle_to_send)
m = int.from_bytes(client.recvClassical(), "little")
print("Client Received: result {}".format(m))
# We adjust for the randomness only we know we added
if r == 1:
outcome[qubit_n - 1] = 1 - m
else:
outcome[qubit_n - 1] = m
print("Client Output: {}".format(outcome))
sys.exit(0)
| 32.075472 | 88 | 0.649118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,007 | 0.296176 |
2ffffeae1ee94afdecc472f7586e718b4d8018e7 | 4,733 | py | Python | save_combine.py | firebrettbrown/bbgm | 31d41cef2175be452793866e1119150518936120 | [
"MIT"
] | 11 | 2019-06-25T17:20:48.000Z | 2020-07-04T03:09:17.000Z | save_combine.py | firebrettbrown/bbgm | 31d41cef2175be452793866e1119150518936120 | [
"MIT"
] | null | null | null | save_combine.py | firebrettbrown/bbgm | 31d41cef2175be452793866e1119150518936120 | [
"MIT"
] | 10 | 2019-06-28T06:26:28.000Z | 2022-01-17T18:12:36.000Z | import argparse
import os
import sys
import shutil
import subprocess
import re
from bs4 import BeautifulSoup
import pandas as pd
import numpy as np
import pickle
from selenium import webdriver
tables = {}
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--folder', default='combine',type=str, help='folder to save combine stats')
args = parser.parse_args()
for folder in [args.folder]:
try:
os.mkdir(folder)
print("Directory {} created".format(folder))
except FileExistsError:
pass
driver = webdriver.Firefox()
for year in range(20):
target = os.path.join(args.folder,str(year) + '.html')
# get the files
if not os.path.exists(target):
driver.get('https://stats.nba.com/draft/combine-anthro/#!?SeasonYear=20{:02d}-{:02d}'.format(year,year+1))
with open(target, 'w') as f:
f.write(driver.page_source)
driver.get('http://google.com')
# # load the data
# with open(target,'rt') as fp:
# data = fp.read()
# # collect all the tables
# m = re.findall(r'<!--[ \n]*(<div[\s\S\r]+?</div>)[ \n]*-->',data)
# m2 = re.findall(r'(<div class="table_outer_container">[ \n]*<div class="overthrow table_container" id="div_roster">[\s\S\r]+?</table>[ \n]*</div>[ \n]*</div>)',data)
# m3 = re.findall(r'(<div class="table_outer_container">[ \n]*<div class="overthrow table_container" id="div_contracts">[\s\S\r]+?</table>[ \n]*</div>[ \n]*</div>)',datac)
# m = m2 + m + m3
# print(target,len(m))
# tables[team] = {}
# for test_table in m:
# try:
# soup = BeautifulSoup(test_table,features="lxml")
# table_id = str(soup.find('table').get('id'))
# if table_id == ['team_and_opponent']:
# continue
# soup.findAll('tr')
# table_size = {'shooting':2,'pbp':1,'playoffs_shooting':2,'playoffs_pbp':1,'contracts':1}
# # use getText()to extract the text we need into a list
# headers = [th.getText() for th in soup.findAll('tr')[table_size.get(table_id,0)].findAll('th')]
# # exclude the first column as we will not need the ranking order from Basketball Reference for the analysis
# start_col = 1
# if table_id in ['contracts','injury']:
# start_col = 0
# headers = headers[start_col:]
# rows = soup.findAll('tr')[start_col:]
# player_stats = [[td.getText() for td in rows[i].findAll('td')]
# for i in range(len(rows))]
# if table_id in ['contracts']:
# player_status = [[td.get('class') for td in rows[i].findAll('td')]
# for i in range(len(rows))]
# status_array = []
# for status in player_status:
# if len(status) > 0:
# s2 = [False] + [s[-1] in ['salary-pl','salary-et','salary-tm'] for s in status[1:]]
# else:
# s2 = np.array([])
# status_array.append(s2)
# status_array = np.array(status_array)
# player_stats_new = []
# for a,b in zip(status_array,player_stats):
# b_new = []
# for c,d in zip(a,b):
# b_new.append(d if not c else '')
# player_stats_new.append(b_new)
# player_stats = player_stats_new
# if table_id in ['contracts','injury']:
# player_names = [[td.getText() for td in rows[i].findAll('th')]
# for i in range(len(rows))]
# player_stats = [a + b for a,b in zip(player_names[1:],player_stats[1:])]
# headers[0] = 'Name'
# stats = pd.DataFrame(player_stats, columns = headers).set_index('Name')
# if table_id in ['contracts']:
# stats = stats.drop(['Player'])
# stats = stats.iloc[:stats.index.get_loc('')]
# # drop nan
# stats = stats[~ stats.index.isin([None])]
# # convert to float
# obj_cols = stats.loc[:, stats.dtypes == object]
# conv_cols = obj_cols.apply(pd.to_numeric, errors = 'ignore')
# stats.loc[:, stats.dtypes == object] = conv_cols
# #print(table_id,stats.index)
# tables[team][table_id]= stats.fillna('')
# except:
# pass
# #print('FAILED TO PARSE ' +str(soup.find('table').get('id') ))
# with open('combine_{}.pkl'.format(args.year),'wb') as fp:
# pickle.dump(tables,fp) | 41.156522 | 175 | 0.539193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,853 | 0.814071 |
64014d8fee54d1b69d480a688afff4e7b8216ffa | 958 | py | Python | osgar/test_node.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | 12 | 2017-02-16T10:22:59.000Z | 2022-03-20T05:48:06.000Z | osgar/test_node.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | 618 | 2016-08-30T04:46:12.000Z | 2022-03-25T16:03:10.000Z | osgar/test_node.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 11 | 2016-08-27T20:02:55.000Z | 2022-03-07T08:53:53.000Z | import unittest
from unittest.mock import MagicMock
from datetime import timedelta
from osgar.bus import Bus
from osgar.node import Node
class NodeTest(unittest.TestCase):
def test_usage(self):
empty_config = {}
bus = Bus(logger=MagicMock())
node = Node(config=empty_config, bus=bus.handle('mynode'))
node.start()
node.request_stop()
node.join()
def test_update(self):
empty_config = {}
bus = Bus(logger=MagicMock())
node = Node(config=empty_config, bus=bus.handle('mynode'))
tester = bus.handle('tester')
tester.register('vel')
bus.connect('tester.vel', 'mynode.vel')
dt = tester.publish('vel', 3)
node.update()
self.assertEqual(node.time, dt)
self.assertEqual(node.vel, 3)
node2 = Node(config=empty_config, bus=bus.handle('mynode2'))
self.assertNotIn('vel', dir(node2))
# vim: expandtab sw=4 ts=4
| 27.371429 | 68 | 0.626305 | 789 | 0.823591 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.102296 |
6402443744b3bd54e4d4bbd65fa2dea681ba9bdd | 12,025 | py | Python | Programacao I/main.py | HiggsWRX/uevora | 7d7b3c082631628ffe4d0ecce707b532f3f7afe8 | [
"MIT"
] | 1 | 2017-05-08T13:48:49.000Z | 2017-05-08T13:48:49.000Z | Programacao I/main.py | HiggsWRX/uevora | 7d7b3c082631628ffe4d0ecce707b532f3f7afe8 | [
"MIT"
] | null | null | null | Programacao I/main.py | HiggsWRX/uevora | 7d7b3c082631628ffe4d0ecce707b532f3f7afe8 | [
"MIT"
] | 2 | 2018-05-07T10:51:34.000Z | 2019-07-14T21:21:06.000Z | '''Disciplina: Programação I
Trabalho prático ano lectivo 2013/2014
Realizado por Hiago Oliveira (29248) e Rui Oliveira (31511)
'''
class Village:
# Constructor method
# Used to create a new instance of Village, taking in arguments like
# its size and population, then builds the board used throughout the
# program
# inputs: size, population
# outputs: none
def __init__(self, size, population):
self.size = size
self.population = population
self.board = []
self.locations = dict()
for y in range(self.size):
self.board.append([])
for x in range(self.size):
self.board[y].append(0)
# populate() method
# Iterates over the number of population to fill the board with each
# villager's coordinates
# inputs: none
# outputs: none
def populate(self):
global bad_file
for i in range(self.population):
while True:
try:
if io_method == 1:
coords = input('{:d} Coords: '.format(i + 1)).split()
elif io_method == 2:
coords = f_input[i + 2].split()
for j in range(len(coords)):
coords[j] = int(coords[j])
# Storing in the dictionary the given coordinates. for eg:
# {1: [0, 0, 3, 2]}
self.locations[i + 1] = coords
# Checking if the given coordinates are already filled
if self.board[coords[1]][coords[0]] == 0:
if self.board[coords[3]][coords[2]] == 0:
self.board[coords[1]][coords[0]] = i + 1
self.board[coords[3]][coords[2]] = i + 1
break
else:
if io_method == 2:
print('Bad input file, please review it.')
bad_file = True
return
else:
raise ValueError
else:
if io_method == 2:
print('Bad input file, please review it.')
bad_file = True
return
else:
raise ValueError
except ValueError:
print('Coord already occupied... Try again.')
except IndexError:
print('Invalid coordinate format...')
# solve(b, p, l)
# The solve function has all the necessary arguments to start solving the
# puzzle, it will first create a pool of all possible paths from point to point
# then finding the correct combination of paths through other functions
# inputs: board, population, locations
# "locations" is a dictionary of villagers, each one with its corresponding
# coordinates for their potion and house
# outputs: none
def solve(board, population, locations):
solved_board = board.copy()
paths = {}
solve_key = []
vil_keys = []
# Retrieves from the dicionary the coordinates for the villager's potion
# then finds all possible paths from the potion to the house using the
# path_finder() function
for i in range(1, population + 1):
x = locations[i][0]
y = locations[i][1]
paths[i] = path_finder(solved_board, i, y, x)
# Creates a list with the villagers, for eg if this village has 3
# inhabitants, vil_keys will be [1, 2, 3]
for i in paths.keys():
vil_keys.append(i)
# sort_out() explanation further below
sort_out(paths, solved_board, vil_keys, 0, [], solve_key)
# io_method dictates whether the user chose to input values manually (1)
# or by providing a file (2). The only difference is if io_method == 1
# the solved puzzle will be printed directly in the screen instead of
# being written to a file
if io_method == 1:
if not solve_key:
print('\nAlesia')
else:
print('\nToutatis\n')
for i in range(len(vil_keys)):
for j in paths[vil_keys[i]][solve_key[i]]:
solved_board[j[0]][j[1]] = vil_keys[i]
draw_board(solved_board)
elif io_method == 2:
fout_name = fin_name[:3] + '.out'
temp = open(fout_name, 'w+')
if not solve_key:
temp.write('Alesia')
else:
temp.write('Toutatis\n')
for i in range(len(vil_keys)):
for j in paths[vil_keys[i]][solve_key[i]]:
solved_board[j[0]][j[1]] = vil_keys[i]
write_board(solved_board, temp)
temp.close()
print('%s has been created at your CWD.' % fout_name)
return
# path_finder()
# Used as an intermediary function to create the list of all possible paths,
# lather filled by path_crawler()
# inputs: board, i, y, x
# "i" is the current villager which possible paths are being generated.
# x and y are the coords of that villager's potion, or the starting point.
# outputs: pos_path
# "pos_path" is a list fileld with all possible paths from 'a' to 'b'.
def path_finder(board, i, y, x):
pos_path = []
path_crawler(board, i, y, x, pos_path, [], y, x)
return pos_path
# Directions used to travel the board
dirs = [(0, 1), (1, 0), (0, -1), (-1, 0)]
# path_crawler()
# Generates all possible paths from 'a' to 'b', recursively
# inputs: board, i, y, x, path_list, stack, startY, startX
# outputs: none, but modifies path_list (possible paths)
def path_crawler(board, i, y, x, path_list, stack, startY, startX):
# Previously visited/tested paths are marked with a *
if board[y][x] == '*':
return
stack.append((y, x))
if board[y][x] == 0 or (y == startY and x == startX):
lastVal = board[y][x]
board[y][x] = '*'
for d in dirs:
if not valid(x + d[0], y + d[1], len(board)):
continue
path_crawler(board, i, y + d[1], x + d[0], path_list, stack,
startY, startX)
board[y][x] = lastVal
elif board[y][x] == i:
path_list.append(list(stack))
stack.pop()
# valid()
# Checks if the given coordinate is valid, i.e., if it's within the board
# inputs: x, y, size
# outputs: boolean
# True being a valide coordinate, False otherwise
def valid(x, y, size):
return x < size and y < size and x >= 0 and y >= 0
# sort_out()
# Receives a dictionary (paths) with all villagers and their possible solutions
# which then sorts out and finds the correct solution key, by excluding the
# paths that intersect with eachother and combining a set of paths that can be
# used to solve the puzzle
# inputs: paths, board, vil, index, stack, final_sol
# "vil" is a list of villagers, for eg if 3 villagers exist vil = [1, 2, 3]
# "final_sol" is a list of the correct key to solve the puzzle, for further
# explanation check the report.
# "stack" list used to store possible paths, pops the wrong ones.
def sort_out(paths, board, vil, index, stack, final_sol):
if len(vil) == 1 and index == 1:
return True
if index == len(vil):
return len(stack) == len(board) * len(board[index - 1])
for current in range(0, len(paths[vil[index]])):
path = paths[vil[index]][current]
if intersects(path, stack):
continue
final_sol.append(current)
if sort_out(paths, board, vil, index + 1, stack + path, final_sol):
return True
final_sol.pop()
return False
# intersects()
# Returns whether an item is present in both lists
# inputs: listA, listB
# outputs: boolean
def intersects(listA, listB):
for i in listA:
if i in listB:
return True
return False
# draw_board()
# Used to properly print "board", or a list of lists
# inputs: board
# outputs: none
def draw_board(board):
for item in board:
for j in item:
print(j, end=' ')
print()
# write_board()
# Same as draw_board(), but instead of printing directly to the screen,
# writes it in a file
# inputs: board, file
# outputs: none
def write_board(board, file):
for item in board:
for j in item:
file.write(str(j) + ' ')
file.write('\n')
if __name__ == '__main__':
# While True, the game runs. If the user later chooses not to play again,
# a 'break' statement will break out of it.
while True:
bad_file = False
print('Input values manually(1) or import them from a file(2)? ',
end='')
while True:
try:
io_method = int(input())
if io_method != 1 and io_method != 2:
raise ValueError
else:
break
except ValueError:
print('Not a valid choice...')
if io_method == 2:
f_input = []
while True:
try:
fin_name = input('Please enter the file name: ')
temp = open(fin_name, 'r')
# Fills a list f_input with the contents of a file
for line in temp:
f_input.append(line.rstrip())
temp.close()
break
except FileNotFoundError:
print('Error: No such file or directory.')
while True:
try:
if io_method == 1:
board_size = int(input('Village size: '))
elif io_method == 2:
board_size = int(f_input[0])
if board_size < 2 or board_size > 7:
if io_method == 2:
print('Bad input file, please review it.')
bad_file = True
break
else:
raise ValueError
else:
break
except ValueError:
print('Village size must be AN INTEGER between 2 and 7.')
# These "bad_file checks" are used to know if the file provided
# contains a bad variable or coordinate, eg a larger village than
# permitted, or a coordinate already occupied..
# These checks occur twice more down below.
if bad_file:
input('Press Enter to exit...')
break
while True:
try:
if io_method == 1:
vil_population = int(input('Village population: '))
elif io_method == 2:
vil_population = int(f_input[1])
if vil_population < 1 or vil_population > board_size:
if io_method == 2:
print('Bad input file, please review it.')
bad_file = True
break
else:
raise ValueError
else:
break
except ValueError:
print('Population must be between 1 and village size.')
if bad_file:
input('Press Enter to exit...')
break
# Creates a new instance of Village and then populates it
new_game = Village(board_size, vil_population)
new_game.populate()
if bad_file:
input('Press Enter to exit...')
break
#start solving
solve(new_game.board, vil_population, new_game.locations)
replay = input('\nReplay? Y/N ... ')
if replay == 'n' or replay == 'N':
break
elif replay == 'y' or replay == 'Y':
continue
else:
print('Interpreting vague answer as... no.')
input('Press Enter to exit...')
break
| 32.325269 | 79 | 0.538462 | 2,536 | 0.210841 | 0 | 0 | 0 | 0 | 0 | 0 | 4,549 | 0.378201 |
640327734c43cd0a30210d3a9d569373531e582d | 7,824 | py | Python | backend/src/services/post_translation/post_translation.py | didi/MeetDot | a57009d30c1347a9b85950c2e02b77685ce63952 | [
"Apache-2.0"
] | 6 | 2021-09-23T14:53:58.000Z | 2022-02-18T10:14:17.000Z | backend/src/services/post_translation/post_translation.py | didi/MeetDot | a57009d30c1347a9b85950c2e02b77685ce63952 | [
"Apache-2.0"
] | null | null | null | backend/src/services/post_translation/post_translation.py | didi/MeetDot | a57009d30c1347a9b85950c2e02b77685ce63952 | [
"Apache-2.0"
] | 1 | 2021-09-24T02:48:50.000Z | 2021-09-24T02:48:50.000Z | """
The main responsibilites of PostTranslation class are:
1) initialize given strategy
2) take API request
3) remove profanity
4) return translation text after these processes
Post-translation API input:
{
"session_id": unique session ID,
"strategies": list of strategies want to be applied (e.g. ["translate-k", "mask-k"])
"params": None or list of parameter sets for applied strategies, (e.g. [ {"k": 4}, {"k":5}])
"translation": translation text,
"is_final": True/False (True if current translation text will not change
anymore, else False if the utterance is final, no
strategies will be applied)
}
Post-translation API output:
{
"session_id": unique session ID,
"translation": translation text after applying all enabled passes,
"translation_update_status": True if current translation response is different
with last timestamp response, else False
}
"""
import re
import os
from languages import languages
from ..tokenizer import get_tokenizer
from .interface import PostTranslationRequest, PostTranslationResponse
from utils import ThreadSafeDict
import requests
class PostTranslationService:
def __init__(self, config, logger):
self.config = config
self.logger = logger
self.do_translate_k = self.config.translate_k and self.config.translate_k > 0
self.do_mask_k = self.config.mask_k and self.config.mask_k > 0
# TODO(scotfang) make translate_k dictionaries garbage collect like an LRU cache
self.translate_k_count = ThreadSafeDict() # (session_id, language) as keys
# We only do atomic reads/writes to this dict, so no need to make it a ThreadSafeDict.
# TODO(scotfang) make translate_k dictionaries garbage collect like an LRU cache
self.translate_k_cached_translations = {}
# use punctuation server if it is specified in the env
self.punctuation_server_url = os.getenv("PUNCTUATION_SERVER_URL")
self.punctuation_server_enabled = (
bool(self.punctuation_server_url) and self.config.add_punctuation
)
if self.punctuation_server_enabled:
self.punctuation_server_active = self.test_punctuation_server()
def test_punctuation_server(self):
try:
requests.post(
f"{self.punctuation_server_url}/punctuate_and_capitalize",
json={"text": "test", "language": "en-US"},
timeout=1,
)
return True
except requests.exceptions.ConnectionError:
print(
f"Could not connect to Punctuation server at "
f"{self.punctuation_server_url} - is it running?"
)
return False
def __call__(self, request: PostTranslationRequest) -> PostTranslationResponse:
translation = self.post_translation(
request.session_id,
request.translation,
request.original_language,
request.language,
request.is_final,
)
return PostTranslationResponse(
translation=translation,
)
def post_translation(
self, session_id, translation, original_language, language, asr_is_final
):
"""
Note: all strategies only apply on current input utterance. PostTranslation service won't
apply across previous utterances.
Args:
translation:
asr_is_final: boolean, True if ASR service decides the current ASR text is finalized
and won't be changed anymore
Return:
translation: translated string after all passes
mt_update_flag: True (in default) if the mt translation after anti-flicker methods
changes compared to previous time step translation
"""
update = True
key = f"{session_id}-{language}"
if original_language != language and self.do_translate_k:
update = self._update_translate_k(key, asr_is_final)
if not update:
translation = self.translate_k_cached_translations[key]
if update:
if original_language != language and self.do_mask_k:
translation = self._mask_k(translation, language, asr_is_final)
if self.config.remove_profanity:
translation = self._remove_profanity(translation, language)
if original_language != language and self.do_translate_k:
if asr_is_final:
# TODO(scotfang): If a stale final request gets skipped in translator.py,
# we won't clear these cached translations like we're supposed to.
self.translate_k_cached_translations[key] = ""
elif update:
self.translate_k_cached_translations[key] = translation
self.logger.debug(
f"session id: {session_id}, translation str after post-translation: {translation}"
)
# call punctuation and capitalization server to insert punctuations
# and revise capitalization
if (
asr_is_final
and self.punctuation_server_enabled
and self.punctuation_server_active
):
resp = requests.post(
f"{self.punctuation_server_url}/punctuate_and_capitalize",
json={"text": translation, "language": language},
)
translation = resp.json()["text"]
return translation
def _update_translate_k(self, key, asr_is_final):
"""
Return:
update_flag: bool, indicating if the translation should be updated.
"""
with self.translate_k_count as counts:
if key not in counts:
counts[key] = 0
ct = counts[key]
counts[key] += 1
if asr_is_final or ct % self.config.translate_k == 0:
return True
else:
return False
def _mask_k(self, translation, language, asr_is_final):
"""
implement the mask k strategy mentioned in google's paper
https://arxiv.org/pdf/1912.03393.pdf
definition: mask the last k tokens of the predicted target sentence;
- The masking is only applied if the current source are prefixes and not yet
completed sentences.
Args:
Return:
translation string
"""
if not asr_is_final:
# if asr text is not finalized, mask the last k tokens of the predicted target
tokenizer = get_tokenizer(language)
translation_tokens = tokenizer.tokenize(translation)
translation_tokens_masked = translation_tokens[: -self.config.mask_k]
if not translation_tokens_masked and self.config.disable_masking_before_k:
translation_tokens_masked = translation_tokens
translation = tokenizer.detokenize(translation_tokens_masked)
return translation
def _remove_profanity(self, translation, language):
"""
Remove profane words using a simple per-language word list.
This is not perfect, but probably good enough, since all words
must have come through the ASR anyways.
"""
lang = languages[language]
for word in lang.profane_words:
if lang.has_spaces:
translation = re.sub(
fr"\b{word}\b",
lambda word: word[0][0] + "*" * (len(word[0]) - 1),
translation,
flags=re.IGNORECASE,
)
else:
translation = translation.replace(word, "*" * len(word))
return translation
| 37.257143 | 98 | 0.624361 | 6,618 | 0.845859 | 0 | 0 | 0 | 0 | 0 | 0 | 3,351 | 0.428298 |
64045b106becea133b4f1a358490427556743f7a | 427 | py | Python | src/3-2-7.py | Nikxxx007/sturyPython | 09fbcb3e77425a810deea93e06e14134dc154f23 | [
"MIT"
] | null | null | null | src/3-2-7.py | Nikxxx007/sturyPython | 09fbcb3e77425a810deea93e06e14134dc154f23 | [
"MIT"
] | null | null | null | src/3-2-7.py | Nikxxx007/sturyPython | 09fbcb3e77425a810deea93e06e14134dc154f23 | [
"MIT"
] | null | null | null | from math import *
def main():
rad = int(input('rad = '))
for i in range(10):
numb = i + 1
print(f'Shot number: {numb}')
x = float(input('x = '))
y = float(input('y = '))
if pow((x + sqrt(rad)), 2) + pow((y - sqrt(rad)), 2) > rad and x < 0 and y > 0:
print('yes')
elif 2*rad > x > 0 and y > -rad and y < 0:
print('yes')
else:
print('no')
if __name__ == '__main__':
main() | 18.565217 | 82 | 0.487119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.154567 |
64052ebf346968245c36b8f3711569edc3bcb093 | 454 | py | Python | navigationCommand.py | islam-shamiul/Selenium_python | ee4cea5e58ab9afa88b3ba3e70aef52ec4808d4a | [
"MIT"
] | null | null | null | navigationCommand.py | islam-shamiul/Selenium_python | ee4cea5e58ab9afa88b3ba3e70aef52ec4808d4a | [
"MIT"
] | null | null | null | navigationCommand.py | islam-shamiul/Selenium_python | ee4cea5e58ab9afa88b3ba3e70aef52ec4808d4a | [
"MIT"
] | 1 | 2020-07-21T08:43:25.000Z | 2020-07-21T08:43:25.000Z | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
driver = webdriver.Chrome(executable_path="E:/SQA/chromedriver_win32/chromedriver.exe")
driver.get("http://newtours.demoaut.com/")
time.sleep(5)
print(driver.title)
driver.get("https://www.google.com/")
time.sleep(5)
print(driver.title)
driver.back()
time.sleep(5)
print(driver.title)
driver.forward()
time.sleep(5)
print(driver.title)
driver.close() | 15.133333 | 87 | 0.762115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 99 | 0.218062 |