hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k โ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 โ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 โ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k โ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 โ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 โ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k โ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 โ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 โ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
195b8629b6c5b96f8b0420012e11e13bd47a5abc | 26,645 | py | Python | z_laser_viz/src/z_laser_viz/zlp_viz.py | fada-catec/z_laser_projector | 79be087febbdc721734031fdf0b3289817be92ce | [
"Apache-2.0"
] | null | null | null | z_laser_viz/src/z_laser_viz/zlp_viz.py | fada-catec/z_laser_projector | 79be087febbdc721734031fdf0b3289817be92ce | [
"Apache-2.0"
] | null | null | null | z_laser_viz/src/z_laser_viz/zlp_viz.py | fada-catec/z_laser_projector | 79be087febbdc721734031fdf0b3289817be92ce | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, FADA-CATEC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utility classes and methods to run a projections visualizer."""
import rospy
import math
import numpy as np
from math import sin, cos, pi, radians
from scipy.spatial.transform import Rotation
from z_laser_zlp1.zlp_keyboard import KeyboardParameters
from z_laser_zlp1.zlp_utils import CoordinateSystemParameters, ProjectionElementParameters
from geometry_msgs.msg import Point, Quaternion, Vector3, Pose, Quaternion
from visualization_msgs.msg import Marker, MarkerArray
from std_srvs.srv import Trigger, TriggerResponse
from z_laser_msgs.msg import Figure
from z_laser_msgs.srv import CoordinateSystem, CoordinateSystemResponse
from z_laser_msgs.srv import CoordinateSystemName, CoordinateSystemNameResponse
from z_laser_msgs.srv import CoordinateSystemShow, CoordinateSystemShowResponse
from z_laser_msgs.srv import CoordinateSystemList, CoordinateSystemListResponse
from z_laser_msgs.srv import ProjectionElement, ProjectionElementResponse
class ZLPVisualizer(object):
"""This class implement the functions related with projection elements.
Attributes:
cs_marker_array (list): coordinate systems' markers list (origin axes and frame of each system)
pe_marker_array (list): markers list of projection elements
active_cs (str): name of active reference system
cs_reference (str): auxiliar variable to differentiate and find the origin axes and frames markers
STD_WAIT_TIME (int): predefined number of projection seconds in reference system definition
figures_list (list): list with the figures' identificator names
"""
def __init__(self):
"""Initialize the ZLPVisualizer object."""
self.cs_marker_array = MarkerArray()
self.pe_marker_array = MarkerArray()
self.active_cs = ""
self.cs_reference = ""
self.STD_WAIT_TIME = CoordinateSystemParameters().DEFAULT_SHOW_TIME
self.figures_list = ProjectionElementParameters().figures_list
self.scale_factor = 1
def open_services(self):
"""Open ROS services for visualizer."""
self.start_proj = rospy.Service('projection_start', Trigger, self.projection_start_cb)
self.stop_proj = rospy.Service('projection_stop', Trigger, self.projection_stop_cb)
self.manual_cs = rospy.Service('define_coordinate_system', CoordinateSystem, self.manual_define_coord_sys_cb)
self.set_cs = rospy.Service('set_coordinate_system', CoordinateSystemName, self.set_coord_sys_cb)
self.rem_cs = rospy.Service('remove_coordinate_system', CoordinateSystemName, self.remove_coord_sys_cb)
self.show_cs = rospy.Service('show_active_coordinate_system', CoordinateSystemShow, self.show_coord_sys_cb)
self.hide_proj_elem = rospy.Service('hide_projection_element', ProjectionElement, self.hide_proj_elem_cb)
self.unhide_proj_elem = rospy.Service('unhide_projection_element', ProjectionElement, self.unhide_proj_elem_cb)
self.remove_proj_elem = rospy.Service('remove_projection_element', ProjectionElement, self.remove_proj_elem_cb)
self.add_proj_elem = rospy.Subscriber("add_projection_element", Figure, self.add_fig_cb)
self.monit_proj_elem = rospy.Subscriber("monitor_projection_element", Figure, self.init_keyboard_listener_cb)
def projection_start_cb(self, req):
"""Callback of ROS service to start projection of elements related to the active reference system on the surface.
Args:
req (object): trigger request ROS service object
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if not self.active_cs:
return TriggerResponse(False, "No Coordinate System set as active.")
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(self.active_cs)>-1:
self.pe_marker_array.markers[i].action = Marker.ADD
return TriggerResponse(True, "Projection started.")
def projection_stop_cb(self, req):
"""Callback of ROS service to stop projection of all elements.
Args:
req (object): trigger request ROS service object
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i in range(len(self.pe_marker_array.markers)):
self.pe_marker_array.markers[i].action = Marker.DELETE
return TriggerResponse(True, "Projection stopped.")
def manual_define_coord_sys_cb(self, req):
"""Callback of ROS service to define a new reference system, stating the points coordinates manually by the user.
Args:
req (object): object with the necessary info to define a new coordinate system
Returns:
tuple[list, bool, str]: the first value in the returned tuple is a list of the user reference points T0, T1, T2, T3,
the second is a bool success value and the third s an information message string
"""
for marker in self.cs_marker_array.markers:
if req.name in marker.ns:
return CoordinateSystemResponse([], False, "Coordinate System already exists.")
self.active_cs = req.name
axis_x_marker, axis_y_marker = self.coord_sys_axes(req)
self.cs_marker_array.markers.append(axis_x_marker)
self.cs_marker_array.markers.append(axis_y_marker)
self.cs_marker_array.markers.append(self.coord_sys_frame(req))
self.cs_reference = "_origin"
self.timer_secs = self.STD_WAIT_TIME
self.update_cs_markers()
return CoordinateSystemResponse([], True, "Coordinate System added manually.")
def timer_cb(self, timer):
"""Timer for controlling the projection pause between the reference systems's different markers."""
for i in range(len(self.cs_marker_array.markers)):
self.cs_marker_array.markers[i].action = Marker.DELETE
self.update_cs_markers()
def update_cs_markers(self):
"""Change projection between origin axes and frame markers."""
for marker in self.cs_marker_array.markers:
if (self.active_cs + self.cs_reference) in marker.ns:
marker.action = Marker.ADD
if self.cs_reference in ["_origin","_frame"]:
rospy.Timer(rospy.Duration(self.timer_secs), self.timer_cb, oneshot=True)
self.cs_reference = "_frame" if self.cs_reference == "_origin" else "empty"
def base_marker(self, cs_name):
"""Initialize the common and basic parameters of a marker.
Args:
cs_name (object): name of the reference system with which the marker is associated
Returns:
object: marker initialized
"""
# define marker common fields
marker = Marker()
marker.type = Marker.LINE_STRIP
marker.action = Marker.DELETE
marker.scale.x = 0.01 # Vector3(0.01, 0.01, 0)
marker.color.g = 1.0
marker.color.a = 1.0
marker.header.frame_id = cs_name
marker.pose.orientation = Quaternion(0,0,0,1)
return marker
def coord_sys_axes(self, cs_points):
"""Create the origin axes markers.
Args:
cs_points (object): object with the x,y,z position of the reference points from the reference system
Returns:
tuple[object, object]: the first value in the returned tuple is the x-axis marker and
the second is the y-axis marker
"""
# read axes points
orig = Point() # axes origin point
orig.x = cs_points.P[0].x * 0.001
orig.y = cs_points.P[0].y * 0.001
axis_x = Point() # axis x line end point
axis_x.x = cs_points.P[1].x * 0.001
axis_x.y = orig.y
axis_y = Point() # axis y line end point
axis_y.x = orig.x
axis_y.y = cs_points.P[2].y * 0.001
# create one marker for each axis line
# and append the correspondent points
axis_x_marker = self.base_marker("[P]")
axis_y_marker = self.base_marker("[P]")
axis_x_marker.points.append(orig)
axis_x_marker.points.append(axis_x)
axis_y_marker.points.append(orig)
axis_y_marker.points.append(axis_y)
# update frame and namespace
axis_x_marker.ns = cs_points.name + "_origin/polyline/axis_x"
axis_y_marker.ns = cs_points.name + "_origin/polyline/axis_y"
return axis_x_marker, axis_y_marker
def coord_sys_frame(self, cs_points):
"""Create the frame marker.
Args:
cs_points (object): object with the x,y,z position of the reference points from the reference system
Returns:
object: frame marker
"""
frame = self.base_marker("[P]")
# read frame points
for i in [0,1,2,3,0]:
point = Point()
point.x = cs_points.P[i].x * 0.001
point.y = cs_points.P[i].y * 0.001
frame.points.append(point)
frame.ns = cs_points.name + "_frame/polyline/T1_T2_T3_T4"
return frame
def set_coord_sys_cb(self, req):
"""Callback of ROS service to set the indicated reference system as 'active reference system'.
Args:
req (object): object with the necessary parameters to identify a coordinate system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
self.active_cs = req.name
return CoordinateSystemNameResponse(True, "Coordinate System set as active.")
def show_coord_sys_cb(self, req):
"""Callback of ROS service to project reference points, origin axes and frame of the active reference system.
Args:
req (object): object with the necessary parameters to identify a reference system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if not self.active_cs:
return CoordinateSystemShowResponse(False, "None Coordinate System is set.")
if not req.secs > 0:
return CoordinateSystemShowResponse(False, "Seconds projection is set to 0.")
self.timer_secs = req.secs
self.cs_reference = "_origin"
self.update_cs_markers()
return CoordinateSystemShowResponse(True, "Active Coordinate System showed correctly.")
def remove_coord_sys_cb(self, req):
"""Callback of ROS service to remove a reference system.
Args:
req (object): object with the necessary parameters to identify a reference system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if any(req.name in cs.ns for cs in self.cs_marker_array.markers):
self.cs_marker_array.markers = [cs for cs in self.cs_marker_array.markers if cs.ns.find(req.name)==-1]
self.pe_marker_array.markers = [pe for pe in self.pe_marker_array.markers if pe.ns.find(req.name)==-1]
if req.name == self.active_cs:
self.active_cs = ""
return CoordinateSystemNameResponse(True, "Coordinate System removed.")
else:
return CoordinateSystemNameResponse(False, "Coordinate System does not exist.")
def add_fig_cb(self, msg):
"""Callback of ROS topic to define a new projection element.
Args:
msg (object): object with the necessary parameters to define a new projection element
"""
# define marker common fields
marker = self.base_marker(self.active_cs)
step = self.compute_step()
marker.pose.position.x = msg.position.x * step
marker.pose.position.y = msg.position.y * step
if msg.figure_type == Figure.POLYLINE:
length = msg.size[0] * step
angle = radians(msg.angle[0])
# middle point for polyline ()
marker.pose.position.x += length/2*cos(angle)
marker.pose.position.y += length/2*sin(angle)
figure = self.line_eq(length, angle)
elif msg.figure_type == Figure.CIRCLE:
radius = msg.size[0] * step
figure = self.circle_eq(radius, 0.0, 2*pi)
elif msg.figure_type == Figure.ARC:
radius = msg.size[0] * step
start_angle = radians(msg.angle[0])
end_angle = radians(msg.angle[1])
figure = self.circle_eq(radius, start_angle, end_angle)
elif msg.figure_type == Figure.OVAL:
wide_size = msg.size[0] * step
height_size = msg.size[1] * step
angle = radians(msg.angle[0])
figure = self.oval_eq(wide_size, height_size, angle)
elif msg.figure_type == Figure.TEXT:
angle = radians(msg.angle[0])
marker.type = Marker.TEXT_VIEW_FACING
marker.scale.z = msg.size[0] * step
# overwrite some marker fields for text
rotation = Rotation.from_euler('xyz', [0, 0, angle], degrees=False)
marker.pose.orientation = Quaternion(*rotation.as_quat())
marker.text = msg.text
if msg.figure_type != Figure.TEXT:
marker.points = figure
marker.ns = self.active_cs+ "/" + msg.projection_group + self.figures_list[msg.figure_type] + msg.figure_name
self.pe_marker_array.markers.append(marker)
def line_eq(self, length, ang):
"""Calculate points array of a new line from its parametrics equation.
Args:
length (float): line length
ang (float): line angle slope
Returns:
list: list of calculated points
"""
line_points = []
delta_th = 0.01
for th in np.arange(-length/2, length/2, delta_th):
point = Point()
point.x = th * cos(ang)
point.y = th * sin(ang)
line_points.append(point)
return line_points
def circle_eq(self, radius, start_ang, end_ang):
"""Calculate points array of a new circle or arc from its parametrics equation.
Args:
radius (float): circle or arc radius
start_ang (float): arc start angle
end_ang (float): arc end angle
Returns:
list: list of calculated points
"""
circle_points = []
delta_th = 0.01
for th in np.arange(start_ang, end_ang, delta_th):
point = Point()
point.x = radius * sin(th)
point.y = radius * cos(th)
circle_points.append(point)
return circle_points
def oval_eq(self, a, b, angle):
"""Calculate points array of a new ellipse from its parametrics equation.
Args:
a (float): ellipse width
b (float): ellipse height
angle (float): rotation angle
Returns:
list: list of calculated points
"""
oval_points = []
delta_th = 0.01
for th in np.arange(0.0, 2*pi+delta_th, delta_th):
point = Point()
point.x = a * cos(th)*cos(angle) - b * sin(th)*sin(angle)
point.y = a * cos(th)*sin(angle) + b * sin(th)*cos(angle)
oval_points.append(point)
return oval_points
def hide_proj_elem_cb(self, req):
"""Callback of ROS service to hide specific projection element from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers[i].color.a = 0
return ProjectionElementResponse(True, "Figure hidden correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def unhide_proj_elem_cb(self, req):
"""Callback of ROS service to unhide specific projection element from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers[i].color.a = 1
return ProjectionElementResponse(True, "Figure unhidden correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def remove_proj_elem_cb(self, req):
"""Callback of ROS service to remove specific figure from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers.pop(i)
return ProjectionElementResponse(True, "Figure removed correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def translate(self, marker, dx=0, dy=0, dz=0):
"""Translate marker from one position to another.
Args:
marker (object): marker object to translate
dx (float): offset in x direction
dy (float): offset in y direction
dz (float): offset in z direction
"""
marker.action = Marker.DELETE
marker.pose.position.x += dx
marker.pose.position.y += dy
marker.pose.position.z += dz
marker.action = Marker.ADD
def compute_step(self):
"""Calculate the resolution step of the active reference system.
Returns:
float: resolution step (real dimension system {P} in mm / user dimension system {T})
"""
res = rospy.get_param('/zlaser/coordinate_system_resolution', 1000)
P0_x = rospy.get_param('/zlaser/P0/x', 1000) * 0.001
P1_x = rospy.get_param('/zlaser/P1/x', 1000) * 0.001
step = (P1_x - P0_x)/res
return step
def rotate(self, marker, angle):
"""Rotate marker an angle.
Args:
marker (object): marker object to rotate
angle (float): rotation angle [degrees]
"""
marker.action = Marker.DELETE
q = marker.pose.orientation
rotation = Rotation.from_euler('xyz', [0, 0, angle], degrees=True)
q_rot = Quaternion(*rotation.as_quat())
marker.pose.orientation = self.quat_multiply(q_rot, q)
marker.action = Marker.ADD
def quat_multiply(self, q1, q0):
"""Calculate the product of two quaternions.
Returns:
object: object with the x,y,z,w values of the result quaternion
"""
return Quaternion( q1.x*q0.w + q1.y*q0.z - q1.z*q0.y + q1.w*q0.x,
-q1.x*q0.z + q1.y*q0.w + q1.z*q0.x + q1.w*q0.y,
q1.x*q0.y - q1.y*q0.x + q1.z*q0.w + q1.w*q0.z,
-q1.x*q0.x - q1.y*q0.y - q1.z*q0.z + q1.w*q0.w)
def scale(self, marker, factor, proj_elem_params):
"""Scale size of marker by redefining figure equation.
Args:
marker (object): marker object to scale
factor (float): scale factor
proj_elem_params (object): object with the parameters of the projection element to transform
"""
marker.action = Marker.DELETE
self.scale_factor *= factor # update factor
size = proj_elem_params.size[0]*0.001 * self.scale_factor
angle = radians(proj_elem_params.angle[0])
if proj_elem_params.figure_type == Figure.POLYLINE:
figure = self.line_eq(size, angle) # size is line length
elif proj_elem_params.figure_type == Figure.CIRCLE:
figure = self.circle_eq(size, 0.0, 2*pi) # size is circle radius
elif proj_elem_params.figure_type == Figure.ARC:
end_ang = radians(proj_elem_params.angle[1])
figure = self.circle_eq(size, angle, end_ang) # size is arc radius
elif proj_elem_params.figure_type == Figure.OVAL:
height_size = proj_elem_params.size[1]*0.001 * self.scale_factor
figure = self.oval_eq(size, height_size, angle) # size is oval width
elif proj_elem_params.figure_type == Figure.TEXT:
marker.scale.z = marker.scale.z*0.001 * self.scale_factor
figure = []
marker.points = figure
marker.action = Marker.ADD
def on_press(self, key, marker, proj_elem_params):
"""Check if the key pressed if one of the list and execute the respective tasks.
Args:
key (enum): key pressed
marker (object): monitored marker object
proj_elem_params (object): object with the parameters of the projection element to monitor
"""
if any([key in COMBO for COMBO in self.keyboard_params.COMBINATIONS]):
self.current.add(key)
if self.current == self.keyboard_params.KEY_UP:
rospy.loginfo("VIZ_KEY_UP")
self.translate(marker, dy=self.compute_step())
elif self.current == self.keyboard_params.KEY_DOWN:
rospy.loginfo("VIZ_KEY_DOWN")
self.translate(marker, dy=-self.compute_step())
elif self.current == self.keyboard_params.KEY_LEFT:
rospy.loginfo("VIZ_KEY_LEFT")
self.translate(marker, dx=-self.compute_step())
elif self.current == self.keyboard_params.KEY_RIGHT:
rospy.loginfo("VIZ_KEY_RIGHT")
self.translate(marker, dx=self.compute_step())
elif self.current == self.keyboard_params.KEY_PLUS:
rospy.loginfo("VIZ_KEY_PLUS")
self.scale(marker, 2, proj_elem_params)
elif self.current == self.keyboard_params.KEY_MINUS:
rospy.loginfo("VIZ_KEY_MINUS")
self.scale(marker, 0.5, proj_elem_params)
elif self.current == self.keyboard_params.CTRL_LEFT:
rospy.loginfo("VIZ_CTRL_LEFT")
self.rotate(marker, 1)
elif self.current == self.keyboard_params.CTRL_RIGHT:
rospy.loginfo("VIZ_CTRL_RIGHT")
self.rotate(marker, -1)
elif self.current == self.keyboard_params.ESC:
rospy.loginfo("VIZ_ESC")
marker.action = Marker.DELETE
def on_release(self, key):
"""Remove current stored key, on release.
Args:
key (enum): key pressed
"""
if any([key in COMBO for COMBO in self.keyboard_params.COMBINATIONS]):
if self.current == self.keyboard_params.ESC:
return False # stop listener
self.current.remove(key)
def marker_from_name(self, name):
"""Find marker object in the markers array with the name.
Args:
name (str): name of the marker
Returns:
object: marker found
"""
for marker in self.pe_marker_array.markers:
if name in marker.ns:
marker.action = Marker.ADD
return marker
return []
def init_keyboard_listener_cb(self, msg):
"""Start keyboard listener for monitoring key presses.
Args:
msg (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
from pynput import keyboard
self.keyboard_params = KeyboardParameters()
self.current = set()
name = self.active_cs + "/" + msg.projection_group + self.figures_list[msg.figure_type] + msg.figure_name
marker = self.marker_from_name(name)
if not marker:
return ProjectionElementResponse(False, "Marker not found.")
try:
on_press_handler = lambda event: self.on_press(event, marker=marker, proj_elem_params=msg)
listener = keyboard.Listener(on_press = on_press_handler,
on_release = self.on_release)
listener.start()
return ProjectionElementResponse(True, "Viz monitor.")
except Exception as e:
rospy.logerr(e)
return ProjectionElementResponse(False, "Error viz monitor.") | 41.24613 | 128 | 0.630662 | # Copyright (c) 2020, FADA-CATEC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains utility classes and methods to run a projections visualizer."""
import rospy
import math
import numpy as np
from math import sin, cos, pi, radians
from scipy.spatial.transform import Rotation
from z_laser_zlp1.zlp_keyboard import KeyboardParameters
from z_laser_zlp1.zlp_utils import CoordinateSystemParameters, ProjectionElementParameters
from geometry_msgs.msg import Point, Quaternion, Vector3, Pose, Quaternion
from visualization_msgs.msg import Marker, MarkerArray
from std_srvs.srv import Trigger, TriggerResponse
from z_laser_msgs.msg import Figure
from z_laser_msgs.srv import CoordinateSystem, CoordinateSystemResponse
from z_laser_msgs.srv import CoordinateSystemName, CoordinateSystemNameResponse
from z_laser_msgs.srv import CoordinateSystemShow, CoordinateSystemShowResponse
from z_laser_msgs.srv import CoordinateSystemList, CoordinateSystemListResponse
from z_laser_msgs.srv import ProjectionElement, ProjectionElementResponse
class ZLPVisualizer(object):
"""This class implement the functions related with projection elements.
Attributes:
cs_marker_array (list): coordinate systems' markers list (origin axes and frame of each system)
pe_marker_array (list): markers list of projection elements
active_cs (str): name of active reference system
cs_reference (str): auxiliar variable to differentiate and find the origin axes and frames markers
STD_WAIT_TIME (int): predefined number of projection seconds in reference system definition
figures_list (list): list with the figures' identificator names
"""
def __init__(self):
"""Initialize the ZLPVisualizer object."""
self.cs_marker_array = MarkerArray()
self.pe_marker_array = MarkerArray()
self.active_cs = ""
self.cs_reference = ""
self.STD_WAIT_TIME = CoordinateSystemParameters().DEFAULT_SHOW_TIME
self.figures_list = ProjectionElementParameters().figures_list
self.scale_factor = 1
def open_services(self):
"""Open ROS services for visualizer."""
self.start_proj = rospy.Service('projection_start', Trigger, self.projection_start_cb)
self.stop_proj = rospy.Service('projection_stop', Trigger, self.projection_stop_cb)
self.manual_cs = rospy.Service('define_coordinate_system', CoordinateSystem, self.manual_define_coord_sys_cb)
self.set_cs = rospy.Service('set_coordinate_system', CoordinateSystemName, self.set_coord_sys_cb)
self.rem_cs = rospy.Service('remove_coordinate_system', CoordinateSystemName, self.remove_coord_sys_cb)
self.show_cs = rospy.Service('show_active_coordinate_system', CoordinateSystemShow, self.show_coord_sys_cb)
self.hide_proj_elem = rospy.Service('hide_projection_element', ProjectionElement, self.hide_proj_elem_cb)
self.unhide_proj_elem = rospy.Service('unhide_projection_element', ProjectionElement, self.unhide_proj_elem_cb)
self.remove_proj_elem = rospy.Service('remove_projection_element', ProjectionElement, self.remove_proj_elem_cb)
self.add_proj_elem = rospy.Subscriber("add_projection_element", Figure, self.add_fig_cb)
self.monit_proj_elem = rospy.Subscriber("monitor_projection_element", Figure, self.init_keyboard_listener_cb)
def projection_start_cb(self, req):
"""Callback of ROS service to start projection of elements related to the active reference system on the surface.
Args:
req (object): trigger request ROS service object
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if not self.active_cs:
return TriggerResponse(False, "No Coordinate System set as active.")
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(self.active_cs)>-1:
self.pe_marker_array.markers[i].action = Marker.ADD
return TriggerResponse(True, "Projection started.")
def projection_stop_cb(self, req):
"""Callback of ROS service to stop projection of all elements.
Args:
req (object): trigger request ROS service object
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i in range(len(self.pe_marker_array.markers)):
self.pe_marker_array.markers[i].action = Marker.DELETE
return TriggerResponse(True, "Projection stopped.")
def manual_define_coord_sys_cb(self, req):
"""Callback of ROS service to define a new reference system, stating the points coordinates manually by the user.
Args:
req (object): object with the necessary info to define a new coordinate system
Returns:
tuple[list, bool, str]: the first value in the returned tuple is a list of the user reference points T0, T1, T2, T3,
the second is a bool success value and the third s an information message string
"""
for marker in self.cs_marker_array.markers:
if req.name in marker.ns:
return CoordinateSystemResponse([], False, "Coordinate System already exists.")
self.active_cs = req.name
axis_x_marker, axis_y_marker = self.coord_sys_axes(req)
self.cs_marker_array.markers.append(axis_x_marker)
self.cs_marker_array.markers.append(axis_y_marker)
self.cs_marker_array.markers.append(self.coord_sys_frame(req))
self.cs_reference = "_origin"
self.timer_secs = self.STD_WAIT_TIME
self.update_cs_markers()
return CoordinateSystemResponse([], True, "Coordinate System added manually.")
def timer_cb(self, timer):
"""Timer for controlling the projection pause between the reference systems's different markers."""
for i in range(len(self.cs_marker_array.markers)):
self.cs_marker_array.markers[i].action = Marker.DELETE
self.update_cs_markers()
def update_cs_markers(self):
"""Change projection between origin axes and frame markers."""
for marker in self.cs_marker_array.markers:
if (self.active_cs + self.cs_reference) in marker.ns:
marker.action = Marker.ADD
if self.cs_reference in ["_origin","_frame"]:
rospy.Timer(rospy.Duration(self.timer_secs), self.timer_cb, oneshot=True)
self.cs_reference = "_frame" if self.cs_reference == "_origin" else "empty"
def base_marker(self, cs_name):
"""Initialize the common and basic parameters of a marker.
Args:
cs_name (object): name of the reference system with which the marker is associated
Returns:
object: marker initialized
"""
# define marker common fields
marker = Marker()
marker.type = Marker.LINE_STRIP
marker.action = Marker.DELETE
marker.scale.x = 0.01 # Vector3(0.01, 0.01, 0)
marker.color.g = 1.0
marker.color.a = 1.0
marker.header.frame_id = cs_name
marker.pose.orientation = Quaternion(0,0,0,1)
return marker
def coord_sys_axes(self, cs_points):
"""Create the origin axes markers.
Args:
cs_points (object): object with the x,y,z position of the reference points from the reference system
Returns:
tuple[object, object]: the first value in the returned tuple is the x-axis marker and
the second is the y-axis marker
"""
# read axes points
orig = Point() # axes origin point
orig.x = cs_points.P[0].x * 0.001
orig.y = cs_points.P[0].y * 0.001
axis_x = Point() # axis x line end point
axis_x.x = cs_points.P[1].x * 0.001
axis_x.y = orig.y
axis_y = Point() # axis y line end point
axis_y.x = orig.x
axis_y.y = cs_points.P[2].y * 0.001
# create one marker for each axis line
# and append the correspondent points
axis_x_marker = self.base_marker("[P]")
axis_y_marker = self.base_marker("[P]")
axis_x_marker.points.append(orig)
axis_x_marker.points.append(axis_x)
axis_y_marker.points.append(orig)
axis_y_marker.points.append(axis_y)
# update frame and namespace
axis_x_marker.ns = cs_points.name + "_origin/polyline/axis_x"
axis_y_marker.ns = cs_points.name + "_origin/polyline/axis_y"
return axis_x_marker, axis_y_marker
def coord_sys_frame(self, cs_points):
"""Create the frame marker.
Args:
cs_points (object): object with the x,y,z position of the reference points from the reference system
Returns:
object: frame marker
"""
frame = self.base_marker("[P]")
# read frame points
for i in [0,1,2,3,0]:
point = Point()
point.x = cs_points.P[i].x * 0.001
point.y = cs_points.P[i].y * 0.001
frame.points.append(point)
frame.ns = cs_points.name + "_frame/polyline/T1_T2_T3_T4"
return frame
def set_coord_sys_cb(self, req):
"""Callback of ROS service to set the indicated reference system as 'active reference system'.
Args:
req (object): object with the necessary parameters to identify a coordinate system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
self.active_cs = req.name
return CoordinateSystemNameResponse(True, "Coordinate System set as active.")
def show_coord_sys_cb(self, req):
"""Callback of ROS service to project reference points, origin axes and frame of the active reference system.
Args:
req (object): object with the necessary parameters to identify a reference system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if not self.active_cs:
return CoordinateSystemShowResponse(False, "None Coordinate System is set.")
if not req.secs > 0:
return CoordinateSystemShowResponse(False, "Seconds projection is set to 0.")
self.timer_secs = req.secs
self.cs_reference = "_origin"
self.update_cs_markers()
return CoordinateSystemShowResponse(True, "Active Coordinate System showed correctly.")
def remove_coord_sys_cb(self, req):
"""Callback of ROS service to remove a reference system.
Args:
req (object): object with the necessary parameters to identify a reference system
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
if any(req.name in cs.ns for cs in self.cs_marker_array.markers):
self.cs_marker_array.markers = [cs for cs in self.cs_marker_array.markers if cs.ns.find(req.name)==-1]
self.pe_marker_array.markers = [pe for pe in self.pe_marker_array.markers if pe.ns.find(req.name)==-1]
if req.name == self.active_cs:
self.active_cs = ""
return CoordinateSystemNameResponse(True, "Coordinate System removed.")
else:
return CoordinateSystemNameResponse(False, "Coordinate System does not exist.")
def add_fig_cb(self, msg):
"""Callback of ROS topic to define a new projection element.
Args:
msg (object): object with the necessary parameters to define a new projection element
"""
# define marker common fields
marker = self.base_marker(self.active_cs)
step = self.compute_step()
marker.pose.position.x = msg.position.x * step
marker.pose.position.y = msg.position.y * step
if msg.figure_type == Figure.POLYLINE:
length = msg.size[0] * step
angle = radians(msg.angle[0])
# middle point for polyline ()
marker.pose.position.x += length/2*cos(angle)
marker.pose.position.y += length/2*sin(angle)
figure = self.line_eq(length, angle)
elif msg.figure_type == Figure.CIRCLE:
radius = msg.size[0] * step
figure = self.circle_eq(radius, 0.0, 2*pi)
elif msg.figure_type == Figure.ARC:
radius = msg.size[0] * step
start_angle = radians(msg.angle[0])
end_angle = radians(msg.angle[1])
figure = self.circle_eq(radius, start_angle, end_angle)
elif msg.figure_type == Figure.OVAL:
wide_size = msg.size[0] * step
height_size = msg.size[1] * step
angle = radians(msg.angle[0])
figure = self.oval_eq(wide_size, height_size, angle)
elif msg.figure_type == Figure.TEXT:
angle = radians(msg.angle[0])
marker.type = Marker.TEXT_VIEW_FACING
marker.scale.z = msg.size[0] * step
# overwrite some marker fields for text
rotation = Rotation.from_euler('xyz', [0, 0, angle], degrees=False)
marker.pose.orientation = Quaternion(*rotation.as_quat())
marker.text = msg.text
if msg.figure_type != Figure.TEXT:
marker.points = figure
marker.ns = self.active_cs+ "/" + msg.projection_group + self.figures_list[msg.figure_type] + msg.figure_name
self.pe_marker_array.markers.append(marker)
def line_eq(self, length, ang):
"""Calculate points array of a new line from its parametrics equation.
Args:
length (float): line length
ang (float): line angle slope
Returns:
list: list of calculated points
"""
line_points = []
delta_th = 0.01
for th in np.arange(-length/2, length/2, delta_th):
point = Point()
point.x = th * cos(ang)
point.y = th * sin(ang)
line_points.append(point)
return line_points
def circle_eq(self, radius, start_ang, end_ang):
"""Calculate points array of a new circle or arc from its parametrics equation.
Args:
radius (float): circle or arc radius
start_ang (float): arc start angle
end_ang (float): arc end angle
Returns:
list: list of calculated points
"""
circle_points = []
delta_th = 0.01
for th in np.arange(start_ang, end_ang, delta_th):
point = Point()
point.x = radius * sin(th)
point.y = radius * cos(th)
circle_points.append(point)
return circle_points
def oval_eq(self, a, b, angle):
"""Calculate points array of a new ellipse from its parametrics equation.
Args:
a (float): ellipse width
b (float): ellipse height
angle (float): rotation angle
Returns:
list: list of calculated points
"""
oval_points = []
delta_th = 0.01
for th in np.arange(0.0, 2*pi+delta_th, delta_th):
point = Point()
point.x = a * cos(th)*cos(angle) - b * sin(th)*sin(angle)
point.y = a * cos(th)*sin(angle) + b * sin(th)*cos(angle)
oval_points.append(point)
return oval_points
def hide_proj_elem_cb(self, req):
"""Callback of ROS service to hide specific projection element from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers[i].color.a = 0
return ProjectionElementResponse(True, "Figure hidden correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def unhide_proj_elem_cb(self, req):
"""Callback of ROS service to unhide specific projection element from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers[i].color.a = 1
return ProjectionElementResponse(True, "Figure unhidden correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def remove_proj_elem_cb(self, req):
"""Callback of ROS service to remove specific figure from active reference system.
Args:
req (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
for i, marker in enumerate(self.pe_marker_array.markers):
if marker.ns.find(req.projection_group)>-1 and marker.ns.find(req.figure_name)>-1:
self.pe_marker_array.markers.pop(i)
return ProjectionElementResponse(True, "Figure removed correctly.")
return ProjectionElementResponse(False, "Figure not found.")
def translate(self, marker, dx=0, dy=0, dz=0):
"""Translate marker from one position to another.
Args:
marker (object): marker object to translate
dx (float): offset in x direction
dy (float): offset in y direction
dz (float): offset in z direction
"""
marker.action = Marker.DELETE
marker.pose.position.x += dx
marker.pose.position.y += dy
marker.pose.position.z += dz
marker.action = Marker.ADD
def compute_step(self):
"""Calculate the resolution step of the active reference system.
Returns:
float: resolution step (real dimension system {P} in mm / user dimension system {T})
"""
res = rospy.get_param('/zlaser/coordinate_system_resolution', 1000)
P0_x = rospy.get_param('/zlaser/P0/x', 1000) * 0.001
P1_x = rospy.get_param('/zlaser/P1/x', 1000) * 0.001
step = (P1_x - P0_x)/res
return step
def rotate(self, marker, angle):
"""Rotate marker an angle.
Args:
marker (object): marker object to rotate
angle (float): rotation angle [degrees]
"""
marker.action = Marker.DELETE
q = marker.pose.orientation
rotation = Rotation.from_euler('xyz', [0, 0, angle], degrees=True)
q_rot = Quaternion(*rotation.as_quat())
marker.pose.orientation = self.quat_multiply(q_rot, q)
marker.action = Marker.ADD
def quat_multiply(self, q1, q0):
"""Calculate the product of two quaternions.
Returns:
object: object with the x,y,z,w values of the result quaternion
"""
return Quaternion( q1.x*q0.w + q1.y*q0.z - q1.z*q0.y + q1.w*q0.x,
-q1.x*q0.z + q1.y*q0.w + q1.z*q0.x + q1.w*q0.y,
q1.x*q0.y - q1.y*q0.x + q1.z*q0.w + q1.w*q0.z,
-q1.x*q0.x - q1.y*q0.y - q1.z*q0.z + q1.w*q0.w)
def scale(self, marker, factor, proj_elem_params):
"""Scale size of marker by redefining figure equation.
Args:
marker (object): marker object to scale
factor (float): scale factor
proj_elem_params (object): object with the parameters of the projection element to transform
"""
marker.action = Marker.DELETE
self.scale_factor *= factor # update factor
size = proj_elem_params.size[0]*0.001 * self.scale_factor
angle = radians(proj_elem_params.angle[0])
if proj_elem_params.figure_type == Figure.POLYLINE:
figure = self.line_eq(size, angle) # size is line length
elif proj_elem_params.figure_type == Figure.CIRCLE:
figure = self.circle_eq(size, 0.0, 2*pi) # size is circle radius
elif proj_elem_params.figure_type == Figure.ARC:
end_ang = radians(proj_elem_params.angle[1])
figure = self.circle_eq(size, angle, end_ang) # size is arc radius
elif proj_elem_params.figure_type == Figure.OVAL:
height_size = proj_elem_params.size[1]*0.001 * self.scale_factor
figure = self.oval_eq(size, height_size, angle) # size is oval width
elif proj_elem_params.figure_type == Figure.TEXT:
marker.scale.z = marker.scale.z*0.001 * self.scale_factor
figure = []
marker.points = figure
marker.action = Marker.ADD
def on_press(self, key, marker, proj_elem_params):
"""Check if the key pressed if one of the list and execute the respective tasks.
Args:
key (enum): key pressed
marker (object): monitored marker object
proj_elem_params (object): object with the parameters of the projection element to monitor
"""
if any([key in COMBO for COMBO in self.keyboard_params.COMBINATIONS]):
self.current.add(key)
if self.current == self.keyboard_params.KEY_UP:
rospy.loginfo("VIZ_KEY_UP")
self.translate(marker, dy=self.compute_step())
elif self.current == self.keyboard_params.KEY_DOWN:
rospy.loginfo("VIZ_KEY_DOWN")
self.translate(marker, dy=-self.compute_step())
elif self.current == self.keyboard_params.KEY_LEFT:
rospy.loginfo("VIZ_KEY_LEFT")
self.translate(marker, dx=-self.compute_step())
elif self.current == self.keyboard_params.KEY_RIGHT:
rospy.loginfo("VIZ_KEY_RIGHT")
self.translate(marker, dx=self.compute_step())
elif self.current == self.keyboard_params.KEY_PLUS:
rospy.loginfo("VIZ_KEY_PLUS")
self.scale(marker, 2, proj_elem_params)
elif self.current == self.keyboard_params.KEY_MINUS:
rospy.loginfo("VIZ_KEY_MINUS")
self.scale(marker, 0.5, proj_elem_params)
elif self.current == self.keyboard_params.CTRL_LEFT:
rospy.loginfo("VIZ_CTRL_LEFT")
self.rotate(marker, 1)
elif self.current == self.keyboard_params.CTRL_RIGHT:
rospy.loginfo("VIZ_CTRL_RIGHT")
self.rotate(marker, -1)
elif self.current == self.keyboard_params.ESC:
rospy.loginfo("VIZ_ESC")
marker.action = Marker.DELETE
def on_release(self, key):
"""Remove current stored key, on release.
Args:
key (enum): key pressed
"""
if any([key in COMBO for COMBO in self.keyboard_params.COMBINATIONS]):
if self.current == self.keyboard_params.ESC:
return False # stop listener
self.current.remove(key)
def marker_from_name(self, name):
"""Find marker object in the markers array with the name.
Args:
name (str): name of the marker
Returns:
object: marker found
"""
for marker in self.pe_marker_array.markers:
if name in marker.ns:
marker.action = Marker.ADD
return marker
return []
def init_keyboard_listener_cb(self, msg):
"""Start keyboard listener for monitoring key presses.
Args:
msg (object): object with the necessary parameters to identify a projection element
Returns:
tuple[bool, str]: the first value in the returned tuple is a bool success value and the second value in the tuple
is an information message string
"""
from pynput import keyboard
self.keyboard_params = KeyboardParameters()
self.current = set()
name = self.active_cs + "/" + msg.projection_group + self.figures_list[msg.figure_type] + msg.figure_name
marker = self.marker_from_name(name)
if not marker:
return ProjectionElementResponse(False, "Marker not found.")
try:
on_press_handler = lambda event: self.on_press(event, marker=marker, proj_elem_params=msg)
listener = keyboard.Listener(on_press = on_press_handler,
on_release = self.on_release)
listener.start()
return ProjectionElementResponse(True, "Viz monitor.")
except Exception as e:
rospy.logerr(e)
return ProjectionElementResponse(False, "Error viz monitor.") | 0 | 0 | 0 |
60cc0c50324debf1726abc76ee4503a50c63c45b | 822 | py | Python | pyladies2006/app/storage/quantify.py | Alisa-lisa/conferences | d93014747dc9d18493295dbc33fa51c8fb9467dc | [
"MIT"
] | 5 | 2019-07-06T07:22:57.000Z | 2020-12-19T22:49:35.000Z | pyladies2006/app/storage/quantify.py | pindash/conferences | 87fcb9f595a244408c015c66283c337d124b358d | [
"MIT"
] | null | null | null | pyladies2006/app/storage/quantify.py | pindash/conferences | 87fcb9f595a244408c015c66283c337d124b358d | [
"MIT"
] | 3 | 2020-06-07T14:58:24.000Z | 2020-11-24T22:51:14.000Z | from storage import db
from storage.user import User
| 43.263158 | 94 | 0.710462 | from storage import db
from storage.user import User
class Quantify(db.Model):
__tablename__ = "worklife"
id = db.Column(db.Integer, primary_key=True, unique=True)
usr_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
timestamp = db.Column(db.TIMESTAMP, nullable=False)
activity = db.Column(db.String, nullable=False)
category = db.Column(db.String, nullable=False) # mastery/fun/chores
stress = db.Column(db.Integer, nullable=True) # on the scale 1 to 10 how stressed are you
happiness = db.Column(db.Integer, nullable=True) # on the scale 1 to 10 how happy are you
def to_list(self):
""" representation of one object in a list format for easier csv write """
return [self.timestamp, self.activity, self.category, self.stress, self.happiness]
| 0 | 745 | 23 |
93784d658a8932e10e2b9b65c44f56cbe38aa0e9 | 6,302 | py | Python | models/backbones/wideresnet.py | wutong16/Adversarial_Long-Tail | ab2f3a792aede2bd2a3e57657c787ec542165be1 | [
"FSFAP"
] | 78 | 2021-04-05T15:58:03.000Z | 2022-03-30T02:42:42.000Z | models/backbones/wideresnet.py | wutong16/Adversarial_Long-Tail | ab2f3a792aede2bd2a3e57657c787ec542165be1 | [
"FSFAP"
] | 1 | 2022-03-08T04:11:02.000Z | 2022-03-18T04:11:17.000Z | models/backbones/wideresnet.py | wutong16/Adversarial_Long-Tail | ab2f3a792aede2bd2a3e57657c787ec542165be1 | [
"FSFAP"
] | 9 | 2021-04-13T09:51:51.000Z | 2022-03-09T02:45:20.000Z | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .non_local import NonLocal_Direct
from .custom_activations import build_custom_activation
from .custom_norm import select_norm
| 37.963855 | 131 | 0.608854 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from .non_local import NonLocal_Direct
from .custom_activations import build_custom_activation
from .custom_norm import select_norm
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, activation_name='relu'):
super(BasicBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
# self.relu1 = nn.ReLU(inplace=True)
self.relu1 = build_custom_activation(activation_name)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes)
# self.relu2 = nn.ReLU(inplace=True)
self.relu2 = build_custom_activation(activation_name)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
def forward(self, x):
if not self.equalInOut:
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1(out if self.equalInOut else x)))
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
return torch.add(x if self.equalInOut else self.convShortcut(x), out)
class NetworkBlock(nn.Module):
def __init__(self, nb_layers, in_planes, out_planes, block, stride, dropRate=0.0, activation_name='relu'):
super(NetworkBlock, self).__init__()
self.layer = self._make_layer(block, in_planes, out_planes, nb_layers, stride, dropRate, activation_name)
def _make_layer(self, block, in_planes, out_planes, nb_layers, stride, dropRate, activation_name):
layers = []
for i in range(int(nb_layers)):
layers.append(block(i == 0 and in_planes or out_planes, out_planes, i == 0 and stride or 1, dropRate, activation_name))
return nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class WideResNet(nn.Module):
def __init__(self, depth=34, num_classes=10, widen_factor=10, dropRate=0.0, use_relu=True, use_fc=True,
denoise=(), activation_name='relu', norm_type='BN', norm_power=0.2, use_pool=True):
super(WideResNet, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert ((depth - 4) % 6 == 0)
n = (depth - 4) / 6
block = BasicBlock
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate, activation_name)
# 1st sub-block
# self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, 1, dropRate)
# 2nd block
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, 2, dropRate, activation_name)
# 3rd block
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, 2, dropRate, activation_name)
# select norm to be used
self.normlayer = select_norm(norm_type, norm_power=norm_power)
# global average pooling and classifier
self.bn1 = nn.BatchNorm2d(nChannels[3])
# self.bn1 = self.normlayer(nChannels[3])
self.relu = nn.ReLU(inplace=True)
# self.relu = build_custom_activation(activation_name)
if use_fc:
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.use_relu = use_relu
self.use_fc = use_fc
self.use_pool = use_pool
self.denoise = denoise
if self.denoise:
self.denoise1 = NonLocal_Direct(in_channels=nChannels[1])
self.denoise2 = NonLocal_Direct(in_channels=nChannels[2])
self.denoise3 = NonLocal_Direct(in_channels=nChannels[3])
self.use_aux_bn = False
if self.use_aux_bn:
self.bn_aux = nn.BatchNorm2d(nChannels[3])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
if '1' in self.denoise:
out, _ = self.denoise1(out)
out = self.block2(out)
if '2' in self.denoise:
out, _ = self.denoise2(out)
out = self.block3(out)
if '3' in self.denoise:
out, _ = self.denoise3(out)
if self.use_relu:
if self.use_aux_bn:
out = self.relu(self.bn_aux(out))
else:
out = self.relu(self.bn1(out))
if not self.use_pool:
return out
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
if self.use_fc:
out = self.fc(out)
return out
def free_bn(self):
self.apply(set_bn_train)
def freeze_bn(self):
self.apply(set_bn_eval)
def reset_bn(self):
self.apply(reset_bn)
def set_bn_train(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.train()
# m.track_running_stats = False
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
# m.track_running_stats = False
def reset_bn(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.reset_running_stats()
m.track_running_stats = True
# print(m.num_batches_tracked)
| 5,657 | 23 | 405 |
4d75a406dd259cb5ae2939364c8f7e00ee590ceb | 17,358 | py | Python | hdf_compass/hdf5rest_model/hdf5dtype.py | HDFGroup/hdf-compass | 050e05aeb491d9d8f79b36529c7c8e9eebf4fd71 | [
"IJG"
] | 124 | 2015-06-12T02:01:12.000Z | 2022-03-26T20:10:01.000Z | hdf_compass/hdf5rest_model/hdf5dtype.py | giumas/hdf-compass | 945d9acd6d4d676db8bf81e0af694b6eefb7dc25 | [
"IJG"
] | 152 | 2015-04-17T04:38:08.000Z | 2022-03-27T16:19:05.000Z | hdf_compass/hdf5rest_model/hdf5dtype.py | giumas/hdf-compass | 945d9acd6d4d676db8bf81e0af694b6eefb7dc25 | [
"IJG"
] | 42 | 2015-04-24T19:28:30.000Z | 2021-11-25T08:01:32.000Z | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of H5Serv (HDF5 REST Server) Service, Libraries and #
# Utilities. The full HDF5 REST Server copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
"""
This class is used to map between HDF5 type representations and numpy types
"""
import numpy as np
from h5py.h5t import special_dtype
from h5py.h5t import check_dtype
from h5py.h5r import Reference
from h5py.h5r import RegionReference
"""
Convert the given type item to a predefined type string for
predefined integer and floating point types ("H5T_STD_I64LE", et. al).
For compound types, recursively iterate through the typeItem and do same
conversion for fields of the compound type.
"""
"""
Return type info.
For primitive types, return string with typename
For compound types return array of dictionary items
"""
"""
Get element type info - either a complete type or element of a compound type
Returns dictionary
Note: only getTypeItem should call this!
"""
"""
Get Base type info for given type element.
"""
| 37.816993 | 89 | 0.549833 | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of H5Serv (HDF5 REST Server) Service, Libraries and #
# Utilities. The full HDF5 REST Server copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
"""
This class is used to map between HDF5 type representations and numpy types
"""
import numpy as np
from h5py.h5t import special_dtype
from h5py.h5t import check_dtype
from h5py.h5r import Reference
from h5py.h5r import RegionReference
"""
Convert the given type item to a predefined type string for
predefined integer and floating point types ("H5T_STD_I64LE", et. al).
For compound types, recursively iterate through the typeItem and do same
conversion for fields of the compound type.
"""
def getTypeResponse(typeItem):
response = None
if 'uuid' in typeItem:
# committed type, just return uuid
response = 'datatypes/' + typeItem['uuid']
elif typeItem['class'] == 'H5T_INTEGER' or typeItem['class'] == 'H5T_FLOAT':
# just return the class and base for pre-defined types
response = {}
response['class'] = typeItem['class']
response['base'] = typeItem['base']
elif typeItem['class'] == 'H5T_OPAQUE':
response = {}
response['class'] = 'H5T_OPAQUE'
response['size'] = typeItem['size']
elif typeItem['class'] == 'H5T_REFERENCE':
response = {}
response['class'] = 'H5T_REFERENCE'
response['base'] = typeItem['base']
elif typeItem['class'] == 'H5T_COMPOUND':
response = {}
response['class'] = 'H5T_COMPOUND'
fieldList = []
for field in typeItem['fields']:
fieldItem = { }
fieldItem['name'] = field['name']
fieldItem['type'] = getTypeResponse(field['type']) # recursive call
fieldList.append(fieldItem)
response['fields'] = fieldList
else:
response = {} # otherwise, return full type
for k in typeItem.keys():
if k == 'base':
if type(typeItem[k]) == dict:
response[k] = getTypeResponse(typeItem[k]) # recursive call
else:
response[k] = typeItem[k] # predefined type
elif k not in ('size', 'base_size'):
response[k] = typeItem[k]
return response
"""
Return type info.
For primitive types, return string with typename
For compound types return array of dictionary items
"""
def getTypeItem(dt):
type_info = {}
if len(dt) <= 1:
type_info = getTypeElement(dt)
else:
names = dt.names
type_info['class'] = 'H5T_COMPOUND'
fields = []
for name in names:
field = { 'name': name }
field['type'] = getTypeElement(dt[name])
fields.append(field)
type_info['fields'] = fields
return type_info
"""
Get element type info - either a complete type or element of a compound type
Returns dictionary
Note: only getTypeItem should call this!
"""
def getTypeElement(dt):
if len(dt) > 1:
raise Exception("unexpected numpy type passed to getTypeElement")
type_info = {}
if dt.kind == 'O':
# numpy object type - assume this is a h5py variable length extension
h5t_check = check_dtype(vlen=dt)
if h5t_check is not None:
if h5t_check == str:
type_info['class'] = 'H5T_STRING'
type_info['length'] = 'H5T_VARIABLE'
type_info['charSet'] = 'H5T_CSET_ASCII'
type_info['strPad'] = 'H5T_STR_NULLTERM'
elif h5t_check == unicode:
type_info['class'] = 'H5T_STRING'
type_info['length'] = 'H5T_VARIABLE'
type_info['charSet'] = 'H5T_CSET_UTF8'
type_info['strPad'] = 'H5T_STR_NULLTERM'
elif type(h5t_check) == np.dtype:
# vlen data
type_info['class'] = 'H5T_VLEN'
type_info['size'] = 'H5T_VARIABLE'
type_info['base'] = getBaseType(h5t_check)
else:
#unknown vlen type
raise TypeError("Unknown h5py vlen type: " + h5t_check)
else:
# check for reference type
h5t_check = check_dtype(ref=dt)
if h5t_check is not None:
type_info['class'] = 'H5T_REFERENCE'
if h5t_check is Reference:
type_info['base'] = 'H5T_STD_REF_OBJ' # objref
elif h5t_check is RegionReference:
type_info['base'] = 'H5T_STD_REF_DSETREG' # region ref
else:
raise TypeError("unexpected reference type")
else:
raise TypeError("unknown object type")
elif dt.kind == 'V':
baseType = getBaseType(dt)
if dt.shape:
# array type
type_info['dims'] = dt.shape
type_info['class'] = 'H5T_ARRAY'
type_info['base'] = baseType
elif baseType['class'] == 'H5T_OPAQUE':
# expecting this to be an opaque type
type_info = baseType # just promote the base type
else:
raise TypeError("unexpected Void type")
elif dt.kind == 'S':
# String type
baseType = getBaseType(dt)
type_info = baseType # just use base type
elif dt.kind == 'U':
# Unicode String type
baseType = getBaseType(dt)
type_info = baseType # just use base type
elif dt.kind == 'i' or dt.kind == 'u':
# integer type
baseType = getBaseType(dt)
# numpy integer type - but check to see if this is the hypy
# enum extension
mapping = check_dtype(enum=dt)
if mapping:
# yes, this is an enum!
type_info['class'] = 'H5T_ENUM'
type_info['mapping'] = mapping
type_info['base'] = baseType
else:
type_info = baseType # just use base type
elif dt.kind == 'f':
# floating point type
baseType = getBaseType(dt)
type_info = baseType # just use base type
else:
# unexpected kind
raise TypeError("unexpected dtype kind: " + dt.kind)
return type_info
"""
Get Base type info for given type element.
"""
def getBaseType(dt):
if len(dt) > 1:
raise TypeError("unexpected numpy type passed to getTypeElement")
predefined_int_types = {
'int8': 'H5T_STD_I8',
'uint8': 'H5T_STD_U8',
'int16': 'H5T_STD_I16',
'uint16': 'H5T_STD_U16',
'int32': 'H5T_STD_I32',
'uint32': 'H5T_STD_U32',
'int64': 'H5T_STD_I64',
'uint64': 'H5T_STD_U64'
}
predefined_float_types = {
'float32': 'H5T_IEEE_F32',
'float64': 'H5T_IEEE_F64'
}
type_info = {}
#type_info['base_size'] = dt.base.itemsize
# primitive type
if dt.base.kind == 'S':
# Fixed length string type
type_info['class'] = 'H5T_STRING'
type_info['charSet'] = 'H5T_CSET_ASCII'
type_info['length'] = dt.base.itemsize
type_info['strPad'] = 'H5T_STR_NULLPAD'
elif dt.base.kind == 'V':
type_info['class'] = 'H5T_OPAQUE'
type_info['size'] = dt.itemsize
type_info['tag'] = '' # todo - determine tag
elif dt.base.kind == 'i' or dt.base.kind == 'u':
type_info['class'] = 'H5T_INTEGER'
byteorder = 'LE'
if dt.base.byteorder == '>':
byteorder = 'BE'
if dt.base.name in predefined_int_types:
#maps to one of the HDF5 predefined types
type_info['base'] = predefined_int_types[dt.base.name] + byteorder
elif dt.base.kind == 'f':
type_info['class'] = 'H5T_FLOAT'
byteorder = 'LE'
if dt.base.byteorder == '>':
byteorder = 'BE'
if dt.base.name in predefined_float_types:
#maps to one of the HDF5 predefined types
type_info['base'] = predefined_float_types[dt.base.name] + byteorder
elif dt.base.kind == 'O':
# check for reference type
h5t_check = check_dtype(ref=dt)
if h5t_check is not None:
type_info['class'] = 'H5T_REFERENCE'
if h5t_check is Reference:
type_info['base'] = 'H5T_STD_REF_OBJ' # objref
elif h5t_check is RegionReference:
type_info['base'] = 'H5T_STD_REF_DSETREG' # region ref
else:
raise TypeError("unexpected reference type")
else:
raise TypeError("unknown object type")
else:
# unexpected kind
raise TypeError("unexpected dtype base kind: " + dt.base.kind)
return type_info
def getNumpyTypename(hdf5TypeName, typeClass=None):
predefined_int_types = {
'H5T_STD_I8': 'i1',
'H5T_STD_U8': 'u1',
'H5T_STD_I16': 'i2',
'H5T_STD_U16': 'u2',
'H5T_STD_I32': 'i4',
'H5T_STD_U32': 'u4',
'H5T_STD_I64': 'i8',
'H5T_STD_U64': 'u8'
}
predefined_float_types = {
'H5T_IEEE_F32': 'f4',
'H5T_IEEE_F64': 'f8'
}
if len(hdf5TypeName) < 3:
raise Exception("Type Error: invalid typename: ")
endian = '<' # default endian
key = hdf5TypeName
if hdf5TypeName.endswith('LE'):
key = hdf5TypeName[:-2]
elif hdf5TypeName.endswith('BE'):
key = hdf5TypeName[:-2]
endian = '>'
if key in predefined_int_types and (typeClass == None or
typeClass == 'H5T_INTEGER'):
return endian + predefined_int_types[key]
if key in predefined_float_types and (typeClass == None or
typeClass == 'H5T_FLOAT'):
return endian + predefined_float_types[key]
raise TypeError("Type Error: invalid type")
def createBaseDataType(typeItem):
dtRet = None
if type(typeItem) == str or type(typeItem) == unicode:
# should be one of the predefined types
dtName = getNumpyTypename(typeItem)
dtRet = np.dtype(dtName)
return dtRet # return predefined type
if type(typeItem) != dict:
raise TypeError("Type Error: invalid type")
if 'class' not in typeItem:
raise KeyError("'class' not provided")
typeClass = typeItem['class']
dims = ''
if 'dims' in typeItem:
dims = None
if type(typeItem['dims']) == int:
dims = (typeItem['dims']) # make into a tuple
elif type(typeItem['dims']) not in (list, tuple):
raise TypeError("expected list or integer for dims")
else:
dims = typeItem['dims']
dims = str(tuple(dims))
if typeClass == 'H5T_INTEGER':
if 'base' not in typeItem:
raise KeyError("'base' not provided")
baseType = getNumpyTypename(typeItem['base'], typeClass='H5T_INTEGER')
dtRet = np.dtype(dims + baseType)
elif typeClass == 'H5T_FLOAT':
if 'base' not in typeItem:
raise KeyError("'base' not provided")
baseType = getNumpyTypename(typeItem['base'], typeClass='H5T_FLOAT')
dtRet = np.dtype(dims + baseType)
elif typeClass == 'H5T_STRING':
if 'length' not in typeItem:
raise KeyError("'length' not provided")
if 'charSet' not in typeItem:
raise KeyError("'charSet' not provided")
if typeItem['length'] == 'H5T_VARIABLE':
if dims:
raise TypeError("ArrayType is not supported for variable len types")
if typeItem['charSet'] == 'H5T_CSET_ASCII':
dtRet = special_dtype(vlen=str)
elif typeItem['charSet'] == 'H5T_CSET_UTF8':
dtRet = special_dtype(vlen=unicode)
else:
raise TypeError("unexpected 'charSet' value")
else:
nStrSize = typeItem['length']
if type(nStrSize) != int:
raise TypeError("expecting integer value for 'length'")
type_code = None
if typeItem['charSet'] == 'H5T_CSET_ASCII':
type_code = 'S'
elif typeItem['charSet'] == 'H5T_CSET_UTF8':
raise TypeError("fixed-width unicode strings are not supported")
else:
raise TypeError("unexpected 'charSet' value")
dtRet = np.dtype(dims + type_code + str(nStrSize)) # fixed size string
elif typeClass == 'H5T_VLEN':
if dims:
raise TypeError("ArrayType is not supported for variable len types")
if 'base' not in typeItem:
raise KeyError("'base' not provided")
baseType = createBaseDataType(typeItem['base'])
dtRet = special_dtype(vlen=np.dtype(baseType))
elif typeClass == 'H5T_OPAQUE':
if dims:
raise TypeError("Opaque Type is not supported for variable len types")
if 'size' not in typeItem:
raise KeyError("'size' not provided")
nSize = int(typeItem['size'])
if nSize <= 0:
raise TypeError("'size' must be non-negative")
dtRet = np.dtype('V' + str(nSize))
elif typeClass == 'H5T_ARRAY':
if not dims:
raise KeyError("'dims' must be provided for array types")
if 'base' not in typeItem:
raise KeyError("'base' not provided")
arrayBaseType = typeItem['base']
if type(arrayBaseType) is dict:
if "class" not in arrayBaseType:
raise KeyError("'class' not provided for array base type")
if arrayBaseType["class"] not in ('H5T_INTEGER', 'H5T_FLOAT', 'H5T_STRING'):
raise TypeError("Array Type base type must be integer, float, or string")
baseType = createDataType(arrayBaseType)
dtRet = np.dtype(dims+baseType.str)
return dtRet # return predefined type
elif typeClass == 'H5T_REFERENCE':
if 'base' not in typeItem:
raise KeyError("'base' not provided")
if typeItem['base'] == 'H5T_STD_REF_OBJ':
dtRet = special_dtype(ref=Reference)
elif typeItem['base'] == 'H5T_STD_REF_DSETREG':
dtRet = special_dtype(ref=RegionReference)
else:
raise TypeError("Invalid base type for reference type")
else:
raise TypeError("Invalid type class")
return dtRet
def createDataType(typeItem):
dtRet = None
if type(typeItem) == str or type(typeItem) == unicode:
# should be one of the predefined types
dtName = getNumpyTypename(typeItem)
dtRet = np.dtype(dtName)
return dtRet # return predefined type
if type(typeItem) != dict:
raise TypeError("invalid type")
if 'class' not in typeItem:
raise KeyError("'class' not provided")
typeClass = typeItem['class']
if typeClass == 'H5T_COMPOUND':
if 'fields' not in typeItem:
raise KeyError("'fields' not provided for compound type")
fields = typeItem['fields']
if type(fields) is not list:
raise TypeError("Type Error: expected list type for 'fields'")
if not fields:
raise KeyError("no 'field' elements provided")
subtypes = []
for field in fields:
if type(field) != dict:
raise TypeError("Expected dictionary type for field")
if 'name' not in field:
raise KeyError("'name' missing from field")
if 'type' not in field:
raise KeyError("'type' missing from field")
field_name = field['name']
if type(field_name) == unicode:
# convert to ascii
ascii_name = field_name.encode('ascii')
if ascii_name != field_name:
raise TypeError("non-ascii field name not allowed")
field['name'] = ascii_name
dt = createDataType(field['type']) # recursive call
if dt is None:
raise Exception("unexpected error")
subtypes.append((field['name'], dt)) # append tuple
dtRet = np.dtype(subtypes)
else:
dtRet = createBaseDataType(typeItem) # create non-compound dt
return dtRet
| 15,331 | 0 | 180 |
4a59bff9ab2c447e1f0b633f0189f010a153ef26 | 1,645 | py | Python | raydockop/large_csv_to_parquet.py | abazabaaa/dockop | 6985d1296ef6f076e702db3403bd52f045c7ad1d | [
"MIT"
] | null | null | null | raydockop/large_csv_to_parquet.py | abazabaaa/dockop | 6985d1296ef6f076e702db3403bd52f045c7ad1d | [
"MIT"
] | null | null | null | raydockop/large_csv_to_parquet.py | abazabaaa/dockop | 6985d1296ef6f076e702db3403bd52f045c7ad1d | [
"MIT"
] | null | null | null | from pyarrow import Table
from pyarrow.parquet import ParquetWriter
import pyarrow as pa
import pandas as pd
from pyarrow import csv
include_columns = ['zincid', 'smiles', 'dockscore']
delimiter = str(',')
chunksize = 1048576*1000
file_stream = '/data/dockop_data/AmpC_screen_table.csv'
input_stream_reader = InputStreamReader(file_stream)
for i, batch in input_stream_reader.batches():
df = batch.to_pandas()
table = pa.Table.from_pandas(df)
schema = table.schema
print(f'Writing a total of {len(list(df['smiles']))} to disk.')
ParquetWriter(f'/data/newdockop/dockop/code/mod_code_base/parquet/AmpC_screen_table_part_{i}.parquet', schema).write_table(table) | 36.555556 | 133 | 0.657751 | from pyarrow import Table
from pyarrow.parquet import ParquetWriter
import pyarrow as pa
import pandas as pd
from pyarrow import csv
class InputStreamReader:
def __init__(self, file_stream):
self.file_stream = file_stream
self._stream = None
def batches(self):
i = tries = 0
while True:
try:
batch = self.__next_batch()
i += 1
yield i, batch
except StopIteration:
break
def __next_batch(self):
return self.stream.read_next_batch()
@property
def stream(self):
if not self._stream:
read_options = pa.csv.ReadOptions(block_size=chunksize)
parse_options = pa.csv.ParseOptions(delimiter=delimiter)
convert_options = pa.csv.ConvertOptions(include_columns=include_columns)
self._stream = pa.csv.open_csv(
self.file_stream, read_options=read_options,
parse_options=parse_options,
convert_options=convert_options
)
return self._stream
include_columns = ['zincid', 'smiles', 'dockscore']
delimiter = str(',')
chunksize = 1048576*1000
file_stream = '/data/dockop_data/AmpC_screen_table.csv'
input_stream_reader = InputStreamReader(file_stream)
for i, batch in input_stream_reader.batches():
df = batch.to_pandas()
table = pa.Table.from_pandas(df)
schema = table.schema
print(f'Writing a total of {len(list(df['smiles']))} to disk.')
ParquetWriter(f'/data/newdockop/dockop/code/mod_code_base/parquet/AmpC_screen_table_part_{i}.parquet', schema).write_table(table) | 822 | 121 | 22 |
a1eb1c3ce20dc24e214298373cbc3c3153fbb4c6 | 2,933 | py | Python | hathor/conf/testnet.py | luislhl/hathor-core | 9fe53b5a6ecb6efd3f4e8ce2ba21d591f0402cc6 | [
"Apache-2.0"
] | null | null | null | hathor/conf/testnet.py | luislhl/hathor-core | 9fe53b5a6ecb6efd3f4e8ce2ba21d591f0402cc6 | [
"Apache-2.0"
] | null | null | null | hathor/conf/testnet.py | luislhl/hathor-core | 9fe53b5a6ecb6efd3f4e8ce2ba21d591f0402cc6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hathor.checkpoint import Checkpoint as cp
from hathor.conf.settings import HathorSettings
SETTINGS = HathorSettings(
P2PKH_VERSION_BYTE=b'\x49',
MULTISIG_VERSION_BYTE=b'\x87',
NETWORK_NAME='testnet-golf',
BOOTSTRAP_DNS=['golf.testnet.hathor.network'],
# Genesis stuff
GENESIS_OUTPUT_SCRIPT=bytes.fromhex('76a914a584cf48b161e4a49223ed220df30037ab740e0088ac'),
GENESIS_TIMESTAMP=1577836800,
GENESIS_BLOCK_NONCE=826272,
GENESIS_BLOCK_HASH=bytes.fromhex('0000033139d08176d1051fb3a272c3610457f0c7f686afbe0afe3d37f966db85'),
GENESIS_TX1_NONCE=190,
GENESIS_TX1_HASH=bytes.fromhex('00e161a6b0bee1781ea9300680913fb76fd0fac4acab527cd9626cc1514abdc9'),
GENESIS_TX2_NONCE=115,
GENESIS_TX2_HASH=bytes.fromhex('00975897028ceb037307327c953f5e7ad4d3f42402d71bd3d11ecb63ac39f01a'),
# tx weight parameters. With these settings, tx weight is always 8
MIN_TX_WEIGHT_K=0,
MIN_TX_WEIGHT_COEFFICIENT=0,
MIN_TX_WEIGHT=8,
CHECKPOINTS=[
cp(100_000, bytes.fromhex('0000007ece4c7830169f360ed11c51b776e1b72bf0060e6e5b325ca8be474ac5')),
cp(200_000, bytes.fromhex('00000113ecd4b666116abf3d3f05ad509d903d6b456a1e8c35e46a9e426af11a')),
cp(300_000, bytes.fromhex('000000e42df13e4e7490cee98f303cb3b0ca33f362af180c5f7df740c98699d9')),
cp(400_000, bytes.fromhex('000000e9a748b34fc4d662d88bb36ef2a033ba129960924208be14eccdac1a65')),
cp(500_000, bytes.fromhex('000000b5c4572d7b85e585849540ece44b73948c5cdbc6f17a9a3a77fbd0f29a')),
cp(600_000, bytes.fromhex('000000f6743ba3d67e51d7adc21821b8263726ce3bc86010d5e1a905bf2531dc')),
cp(700_000, bytes.fromhex('0000008fda01c9e5fd6f99a5461e6dbf1039cba38cc8d0fc738a097d71caa968')),
cp(800_000, bytes.fromhex('000000397af32fcc4eeb6985d96326c1ff4644792631872a00394688b1782af5')),
cp(900_000, bytes.fromhex('00000097ae405036614f4335ad0e631df8fc5f7434e82c3421627e2fea4e1830')),
cp(1_000_000, bytes.fromhex('000000145ba662cdee0d72034658f93a0a3a4568d5ba5083ff09013ca1e6556c')),
cp(1_100_000, bytes.fromhex('000000404e6ff6a23695a6ffe712ce1c4efc02e75bbc11c3129f4c2377b07743')),
cp(1_200_000, bytes.fromhex('0000003be5fae5bb2c9ceaed589d172bcd9e74ca6c8d7d2ca06567f65cea7c9b')),
cp(1_300_000, bytes.fromhex('0000000000007d39de6e781c377bc202213b0b5b60db14c13d0b16e06d6fd5ac')),
],
)
| 56.403846 | 105 | 0.800205 | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hathor.checkpoint import Checkpoint as cp
from hathor.conf.settings import HathorSettings
SETTINGS = HathorSettings(
P2PKH_VERSION_BYTE=b'\x49',
MULTISIG_VERSION_BYTE=b'\x87',
NETWORK_NAME='testnet-golf',
BOOTSTRAP_DNS=['golf.testnet.hathor.network'],
# Genesis stuff
GENESIS_OUTPUT_SCRIPT=bytes.fromhex('76a914a584cf48b161e4a49223ed220df30037ab740e0088ac'),
GENESIS_TIMESTAMP=1577836800,
GENESIS_BLOCK_NONCE=826272,
GENESIS_BLOCK_HASH=bytes.fromhex('0000033139d08176d1051fb3a272c3610457f0c7f686afbe0afe3d37f966db85'),
GENESIS_TX1_NONCE=190,
GENESIS_TX1_HASH=bytes.fromhex('00e161a6b0bee1781ea9300680913fb76fd0fac4acab527cd9626cc1514abdc9'),
GENESIS_TX2_NONCE=115,
GENESIS_TX2_HASH=bytes.fromhex('00975897028ceb037307327c953f5e7ad4d3f42402d71bd3d11ecb63ac39f01a'),
# tx weight parameters. With these settings, tx weight is always 8
MIN_TX_WEIGHT_K=0,
MIN_TX_WEIGHT_COEFFICIENT=0,
MIN_TX_WEIGHT=8,
CHECKPOINTS=[
cp(100_000, bytes.fromhex('0000007ece4c7830169f360ed11c51b776e1b72bf0060e6e5b325ca8be474ac5')),
cp(200_000, bytes.fromhex('00000113ecd4b666116abf3d3f05ad509d903d6b456a1e8c35e46a9e426af11a')),
cp(300_000, bytes.fromhex('000000e42df13e4e7490cee98f303cb3b0ca33f362af180c5f7df740c98699d9')),
cp(400_000, bytes.fromhex('000000e9a748b34fc4d662d88bb36ef2a033ba129960924208be14eccdac1a65')),
cp(500_000, bytes.fromhex('000000b5c4572d7b85e585849540ece44b73948c5cdbc6f17a9a3a77fbd0f29a')),
cp(600_000, bytes.fromhex('000000f6743ba3d67e51d7adc21821b8263726ce3bc86010d5e1a905bf2531dc')),
cp(700_000, bytes.fromhex('0000008fda01c9e5fd6f99a5461e6dbf1039cba38cc8d0fc738a097d71caa968')),
cp(800_000, bytes.fromhex('000000397af32fcc4eeb6985d96326c1ff4644792631872a00394688b1782af5')),
cp(900_000, bytes.fromhex('00000097ae405036614f4335ad0e631df8fc5f7434e82c3421627e2fea4e1830')),
cp(1_000_000, bytes.fromhex('000000145ba662cdee0d72034658f93a0a3a4568d5ba5083ff09013ca1e6556c')),
cp(1_100_000, bytes.fromhex('000000404e6ff6a23695a6ffe712ce1c4efc02e75bbc11c3129f4c2377b07743')),
cp(1_200_000, bytes.fromhex('0000003be5fae5bb2c9ceaed589d172bcd9e74ca6c8d7d2ca06567f65cea7c9b')),
cp(1_300_000, bytes.fromhex('0000000000007d39de6e781c377bc202213b0b5b60db14c13d0b16e06d6fd5ac')),
],
)
| 0 | 0 | 0 |
d3aaba5da0c90d09dc983a2b34ef38ae2721d5fd | 1,812 | py | Python | iPadAir.py | hagridblack/appium-sample-code-for-ios | 022e2013841ba29cdf77c04ff03652f33cc5abfc | [
"MIT"
] | null | null | null | iPadAir.py | hagridblack/appium-sample-code-for-ios | 022e2013841ba29cdf77c04ff03652f33cc5abfc | [
"MIT"
] | null | null | null | iPadAir.py | hagridblack/appium-sample-code-for-ios | 022e2013841ba29cdf77c04ff03652f33cc5abfc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import time
from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
if __name__ == '__main__':
unittest.main()
| 31.789474 | 90 | 0.629691 | # -*- coding: utf-8 -*-
import unittest
import time
from appium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
class Untitled(unittest.TestCase):
reportDirectory = 'reports'
reportFormat = 'xml'
dc = {}
testName = 'Untitled'
driver = None
def setUp(self):
self.dc['reportDirectory'] = self.reportDirectory
self.dc['reportFormat'] = self.reportFormat
self.dc['testName'] = self.testName
self.dc['udid'] = 'dxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxb'
self.dc['platformName'] = 'ios'
#self.dc['bundleId'] = 'com.google.chrome.ios'
self.dc['automationName'] = 'XCUITest'
self.dc['platformVersion'] = '12.4.8'
self.dc['xcodeOrgId'] = 'xxxxxxxxxx'
self.dc['xcodeSigningId'] = 'iPhone Developer'
#self.dc['browserName'] = 'Safari'
self.dc['deviceName'] = 'iPad4,1'
self.dc['noReset'] = 'true'
self.dc['autoAcceptAlerts'] = 'true'
self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',self.dc)
#self.driver.implicitly_wait(60)
#self.driver.set_page_load_timeout(60000)
def testUntitled(self):
# Demo1: home to chrome
self.driver.swipe(1109, 1526, 138, 1526, 100)
time.sleep(3)
self.driver.find_element_by_xpath("xpath=//*[@text='Chrome']").click()
# Demo2: launch chrome
time.sleep(3)
self.driver.find_element_by_xpath("xpath=//*[@text='ๆๅฐๆ่ผธๅ
ฅ็ถฒๅ']").send_keys('yahoo')
self.driver.find_element_by_xpath("xpath=//*[@text='Go']").click()
def tearDown(self):
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| 1,296 | 208 | 23 |
bc4fb5bfa4d5d205864e0760e8235900969cf64b | 1,738 | py | Python | loader/integrated_xbrl/downloader.py | icoxfog417/laser-search | 9b2d3b4a1a40d45152effaab3b2b5ea9c5ed8cd3 | [
"MIT"
] | null | null | null | loader/integrated_xbrl/downloader.py | icoxfog417/laser-search | 9b2d3b4a1a40d45152effaab3b2b5ea9c5ed8cd3 | [
"MIT"
] | 4 | 2021-03-10T01:39:02.000Z | 2021-12-13T20:29:07.000Z | loader/integrated_xbrl/downloader.py | chakki-works/CoARiJ-search | 9b2d3b4a1a40d45152effaab3b2b5ea9c5ed8cd3 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from zipfile import ZipFile
import boto3
| 34.078431 | 72 | 0.566168 | import os
from pathlib import Path
from zipfile import ZipFile
import boto3
class Downloader():
def __init__(self, root=""):
self.bucket = "chakki.esg.csr.jp"
self.taxonomy_file = "env-report/jpers.zip"
self.data_file = "env-report/envreports.zip"
self.root = Path(root)
if not self.root:
root = os.path.join(os.path.dirname(__file__), "data")
root = Path(root)
if not root.exists():
root.mkdir()
elif not self.root.exists():
raise Exception("Specified root does not exist.")
def download_data(self, folder_name="reports"):
return self._download(self.data_file, folder_name)
def download_taxonomy(self, folder_name="taxonomy"):
return self._download(self.taxonomy_file, folder_name)
def _download(self, file_name, folder_name):
f = os.path.basename(file_name)
zip_file = self.root.joinpath(f)
zip_dir = self.root.joinpath(folder_name)
if not zip_dir.exists():
s3 = boto3.client("s3")
s3.download_file(self.bucket, file_name, str(zip_file))
with ZipFile(zip_file, "r") as z:
for zf in z.namelist():
parts = Path(zf).parts[1:]
target = zip_dir.joinpath("/".join(parts))
if z.getinfo(zf).is_dir() and not target.exists():
target.mkdir(parents=True, exist_ok=True)
else:
target.parent.mkdir(parents=True, exist_ok=True)
with target.open("wb") as _f:
_f.write(z.read(zf))
zip_file.unlink()
return zip_dir
| 1,532 | -2 | 131 |
72fc23d17d0df2ff2741845d0adba79435790ef9 | 13,691 | py | Python | rapid/workflow/data/dal/pipeline_dal.py | m2bright/rapid | fd66515105ca9773c5da8562a878c6b0bfa4487a | [
"Apache-2.0"
] | null | null | null | rapid/workflow/data/dal/pipeline_dal.py | m2bright/rapid | fd66515105ca9773c5da8562a878c6b0bfa4487a | [
"Apache-2.0"
] | null | null | null | rapid/workflow/data/dal/pipeline_dal.py | m2bright/rapid | fd66515105ca9773c5da8562a878c6b0bfa4487a | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=broad-except
import logging
import datetime
try:
import simplejson as out_json
except ImportError:
import json as out_json
from flask import request
from flask.wrappers import Response
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.sql.expression import asc
from rapid.lib.exceptions import InvalidObjectException
from rapid.lib.store_service import StoreService
from rapid.workflow.data.models import PipelineEvent
from rapid.lib import api_key_required, get_db_session
from rapid.lib.constants import StatusConstants, ModuleConstants
from rapid.lib.exceptions import VcsNotFoundException
from rapid.lib.framework.injectable import Injectable
from rapid.lib.modules import CiModule
from rapid.master.data.database.dal.general_dal import GeneralDal
from rapid.workflow.data.models import Action, Pipeline, Stage, Workflow, PipelineInstance, PipelineParameters
logger = logging.getLogger("rapid")
| 44.307443 | 165 | 0.62815 | """
Copyright (c) 2015 Michael Bright and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable=broad-except
import logging
import datetime
try:
import simplejson as out_json
except ImportError:
import json as out_json
from flask import request
from flask.wrappers import Response
from sqlalchemy.ext.declarative import DeclarativeMeta
from sqlalchemy.sql.expression import asc
from rapid.lib.exceptions import InvalidObjectException
from rapid.lib.store_service import StoreService
from rapid.workflow.data.models import PipelineEvent
from rapid.lib import api_key_required, get_db_session
from rapid.lib.constants import StatusConstants, ModuleConstants
from rapid.lib.exceptions import VcsNotFoundException
from rapid.lib.framework.injectable import Injectable
from rapid.lib.modules import CiModule
from rapid.master.data.database.dal.general_dal import GeneralDal
from rapid.workflow.data.models import Action, Pipeline, Stage, Workflow, PipelineInstance, PipelineParameters
logger = logging.getLogger("rapid")
class PipelineDal(GeneralDal, Injectable):
__injectables__ = {ModuleConstants.CI_MODULE: CiModule, 'flask_app': None, 'queue_constants': None}
def __init__(self, ci_module, queue_constants, flask_app=None):
"""
:type ci_module: :class:`rapid.lib.modules.modules.CiModule`
:type queue_constants: QueueHandlerConstants
:param flask_app: Flask
:return:
"""
super(PipelineDal, self).__init__()
self.app = flask_app
self.ci_module = ci_module
self.queue_constants = queue_constants
def is_serviceable(self, model):
return model == Pipeline
def register_url_rules(self, flask_app):
self.app = flask_app
flask_app.add_url_rule('/api/pipelines/create', 'create_pipeline', api_key_required(self.create_pipeline), methods=['POST'])
flask_app.add_url_rule('/api/pipelines/<int:pipeline_id>/start', 'start_pipeline_instance', api_key_required(self.start_pipeline_instance), methods=['POST'])
def create_pipeline(self):
json = request.get_json()
return Response(self._get_pipeline(json), content_type='application/json')
def start_pipeline_instance_via_reponame(self, repo, json_data=None):
"""
Start a pipeline_instance identified by the the repo name
:param repo: VCS repo name or Vcs.repo
:param json_data: dict for the Data in the PipelineInstance
:return:
:rtype PipelineInstance
"""
vcs = self.ci_module.get_vcs_by_repo_name(repo)
if vcs is not None:
if vcs.pipeline_id is not None:
return self.create_pipeline_instance(vcs.pipeline_id, json_data, vcs_id=vcs.id)
if vcs.active:
raise VcsNotFoundException("The repo[{}] did not have a default pipeline defined.".format(repo))
raise VcsNotFoundException("The repo [{}] is not found in the system".format(repo))
def start_pipeline_instances_via_pipeline_id(self, pipeline_id, json_data=None):
for session in get_db_session():
return self.create_pipeline_instance(pipeline_id, json_data, in_session=session)
def start_pipeline_instance(self, pipeline_id):
data = request.get_json()
return "It worked!" if self.create_pipeline_instance(pipeline_id, data) else "It Failed!"
def get_pipeline_by_id(self, pipeline_id, session=None):
if session is None:
for db_session in get_db_session():
return db_session.query(Pipeline).get(pipeline_id).serialize()
else:
return session.query(Pipeline).get(pipeline_id)
def get_pipeline_events_by_pipeline_id(self, pipeline_id, session=None):
if session is None:
for db_session in get_db_session():
return [event.serialize() for event in db_session.query(PipelineEvent).filter(PipelineEvent.pipeline_id == pipeline_id).all()]
else:
return session.query(PipelineEvent).filter(PipelineEvent.pipeline_id == pipeline_id).all()
def get_pipeline_instance_by_id(self, pipeline_instance_id, session=None):
if session is None:
for db_session in get_db_session():
return db_session.query(PipelineInstance).get(pipeline_instance_id).serialize()
else:
return session.query(PipelineInstance).get(pipeline_instance_id)
def _get_pipeline(self, json):
if json:
for session in get_db_session():
pipeline = Pipeline(**{"name": json['name'], 'active': json['active']})
session.add(pipeline)
session.flush()
pipeline.stages.extend(self.get_stages(json, pipeline, session))
response = out_json.dumps(pipeline.serialize())
try:
session.commit()
finally:
if session:
session.expunge_all()
session.close()
session = None
return response
raise BaseException("No pipeline was created.")
def get_actions(self, json, pipeline, workflow, session):
actions = []
if 'actions' in json:
for tmp_action in json['actions']:
action = Action(**tmp_action)
action.order = len(actions)
action.pipeline_id = pipeline.id
action.workflow_id = workflow.id
session.add(action)
actions.append(action)
return actions
def get_workflows(self, json, pipeline, stage, session):
workflows = []
if 'workflows' in json:
for tmp_workflow in json['workflows']:
workflow = Workflow(
**{"name": tmp_workflow['name'], 'active': tmp_workflow['active'], "order": len(workflows),
"stage_id": stage.id})
session.add(workflow)
session.flush()
workflow.actions.extend(self.get_actions(tmp_workflow, pipeline, workflow, session))
workflows.append(workflow)
return workflows
def get_stages(self, json, pipeline, session):
stages = []
if 'stages' in json:
for tmp_stage in json['stages']:
stage = Stage(**{"name": tmp_stage['name'], "active": tmp_stage['active'], "order": len(stages),
"pipeline_id": pipeline.id})
session.add(stage)
session.flush()
stage.workflows.extend(self.get_workflows(tmp_stage, pipeline, stage, session))
stages.append(stage)
return stages
def new_alchemy_encoder(self):
_visited_objs = []
class AlchemyEncoder(out_json.JSONEncoder):
def default(self, obj): # pylint: disable=arguments-differ,method-hidden
if isinstance(obj.__class__, DeclarativeMeta):
# don't re-visit self
if obj in _visited_objs:
return None
_visited_objs.append(obj)
# an SQLAlchemy class
fields = {}
for field in [x for x in dir(obj) if
not x.startswith('_') and x != 'metadata' and x != 'query' and x != 'query_class']:
fields[field] = obj.__getattribute__(field)
# a json-encodable dict
return fields
return out_json.JSONEncoder.default(self, obj)
return AlchemyEncoder
def get_actions_query(self, session, pipeline_id):
return session.query(Stage, Workflow, Action) \
.filter(Stage.pipeline_id == pipeline_id) \
.filter(Stage.id == Workflow.stage_id) \
.filter(Workflow.id == Action.workflow_id) \
.order_by(asc(Stage.order)) \
.order_by(asc(Workflow.id)) \
.order_by(asc(Action.order))
def create_pipeline_instance(self, pipeline_id, json_data=None, vcs_id=None, in_session=None):
if in_session:
return self._process_pipeline(pipeline_id, vcs_id, json_data, in_session)
for session in get_db_session():
return self._process_pipeline(pipeline_id, vcs_id, json_data, session)
def _process_pipeline(self, pipeline_id, vcs_id, json_data, session):
pipeline = session.query(Pipeline).get(pipeline_id)
if pipeline is not None and pipeline.active:
pipeline_instance = PipelineInstance(pipeline_id=pipeline_id, status_id=StatusConstants.INPROGRESS,
created_date=datetime.datetime.utcnow(),
start_date=datetime.datetime.utcnow())
session.add(pipeline_instance)
session.flush()
self._setup_pipeline(session, pipeline_id, pipeline_instance.id)
try:
if json_data and 'parameters' in json_data:
for parameter, value in json_data['parameters'].items():
tmp = PipelineParameters(parameter=parameter, value=value)
tmp.pipeline_instance_id = pipeline_instance.id
session.add(tmp)
if parameter == "commit":
# Look for vcs and get the ID
if vcs_id is None:
vcs = self.ci_module.get_vcs_by_pipeline_id(pipeline_id, session=session)
vcs_id = vcs.id if vcs is not None else None
else:
self.ci_module.create_git_commit(value, vcs_id, pipeline_instance_id=pipeline_instance.id, session=session)
except Exception as exception:
logger.error("Creating Pipeline Instance failed.")
logger.error(exception)
session.commit()
create_pipeline_instance = pipeline_instance.serialize()
return create_pipeline_instance
try:
logger.info("Inactive pipeline: {}".format(pipeline.name))
except AttributeError:
pass
return {"message": "Invalid Pipeline, or inactive pipeline"}
def cancel_pipeline_instance(self, pipeline_instance_id):
for session in get_db_session():
pipeline_instance = self.get_pipeline_instance_by_id(pipeline_instance_id, session)
if pipeline_instance:
for action_instance in pipeline_instance.action_instances:
for client in StoreService.get_clients(self.app).values():
if action_instance.status_id <= StatusConstants.SUCCESS and client.get_uri() == action_instance.assigned_to:
self.queue_constants.cancel_worker(action_instance.serialize())
pipeline_instance.status_id = StatusConstants.CANCELED
pipeline_instance.end_date = datetime.datetime.utcnow()
session.commit()
else:
raise InvalidObjectException("Pipeline Instance not found", 404)
return {"message": "Running clients have been canceled and pipeline canceled."}
def _setup_pipeline(self, session, pipeline_id, pipeline_instance_id):
current_stage = None
current_workflow = None
first_action_instance = True
added_objects = []
for (stage, workflow, action) in self.get_actions_query(session, pipeline_id):
if current_stage is None:
current_stage = stage.convert_to_instance()
current_stage.pipeline_instance_id = pipeline_instance_id
session.add(current_stage)
session.flush()
added_objects.append(current_stage)
elif current_stage.stage_id != stage.id:
break
if current_workflow is None or current_workflow.workflow_id != workflow.id:
first_action_instance = True
current_workflow = workflow.convert_to_instance()
current_workflow.stage_instance_id = current_stage.id
session.add(current_workflow)
session.flush()
added_objects.append(current_workflow)
slices = max(1, action.slices)
for slice_num in range(slices):
action_instance = action.convert_to_instance()
action_instance.workflow_instance_id = current_workflow.id
action_instance.pipeline_instance_id = pipeline_instance_id
action_instance.slice = "{}/{}".format(slice_num + 1, slices)
if first_action_instance:
action_instance.status_id = StatusConstants.READY
session.add(action_instance)
added_objects.append(action_instance)
session.flush()
if first_action_instance:
first_action_instance = False
return added_objects
| 10,308 | 1,816 | 23 |
ebea6c73a67575342cc2093c7f3b40435861445d | 1,028 | py | Python | DemPipe/executor/mixin/config_mixin.py | hmiladhia/DemPipe | 48d48150969fa047e2f20b35ee1c61991e7b44ad | [
"MIT"
] | null | null | null | DemPipe/executor/mixin/config_mixin.py | hmiladhia/DemPipe | 48d48150969fa047e2f20b35ee1c61991e7b44ad | [
"MIT"
] | null | null | null | DemPipe/executor/mixin/config_mixin.py | hmiladhia/DemPipe | 48d48150969fa047e2f20b35ee1c61991e7b44ad | [
"MIT"
] | null | null | null | from configDmanager import import_config, Config
from DemPipe.executor import SimplePipeExecutor
| 26.358974 | 50 | 0.639105 | from configDmanager import import_config, Config
from DemPipe.executor import SimplePipeExecutor
class ConfigMixin(SimplePipeExecutor):
pipe_name: str
def __init__(self, config_file=None):
self.__load_config(config_file)
super().__init__()
def load_config(self, config):
self.pipe_name = config.pipe_name
def set_default_config(self, config):
config.pipe_name = None
def __load_config(self, config_file):
config = self.__import_config(config_file)
self.load_config(config)
return config
def __import_config(self, config_file):
if config_file:
return import_config(config_file)
else:
config = Config(dict())
self.set_default_config(config)
return config
def get_title(self, title=None):
l_title = []
if self.pipe_name:
l_title.append(f'[{self.pipe_name}]')
if title:
l_title.append(title)
return ' - '.join(l_title)
| 708 | 198 | 23 |
d8e66d1f432b5d05594739faf75414a48bafafa6 | 168 | py | Python | bardhub/artist/admin.py | migdotcom/music-library | 4648ea02e4b071c4a287eba09202045963992873 | [
"MIT"
] | null | null | null | bardhub/artist/admin.py | migdotcom/music-library | 4648ea02e4b071c4a287eba09202045963992873 | [
"MIT"
] | null | null | null | bardhub/artist/admin.py | migdotcom/music-library | 4648ea02e4b071c4a287eba09202045963992873 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Artist
# Register your models here.
admin.site.register(Artist)
admin.site.site_header = 'Bardhub Administration'
| 24 | 49 | 0.809524 | from django.contrib import admin
from .models import Artist
# Register your models here.
admin.site.register(Artist)
admin.site.site_header = 'Bardhub Administration'
| 0 | 0 | 0 |
34a885fd5c4c664b0eb7e713dcb670d1dd910411 | 823 | py | Python | sortByMTime.py | nightjuggler/pig | 24284ce9fec67b28445bf0d15754928ecc888fc6 | [
"MIT"
] | null | null | null | sortByMTime.py | nightjuggler/pig | 24284ce9fec67b28445bf0d15754928ecc888fc6 | [
"MIT"
] | null | null | null | sortByMTime.py | nightjuggler/pig | 24284ce9fec67b28445bf0d15754928ecc888fc6 | [
"MIT"
] | null | null | null | import os
import os.path
import sys
if __name__ == '__main__':
main()
| 27.433333 | 78 | 0.671932 | import os
import os.path
import sys
def main():
time_name_pairs = [(os.stat(name).st_mtime, name) for name in os.listdir('.')
if name[:4] == 'IMG_' and name[-4:] == '.JPG']
time_name_pairs.sort()
names = []
for i, (mtime, oldname) in enumerate(time_name_pairs, start=1):
newname = 'IMG_{:04d}.JPG'.format(i)
tmpname = 'tmp_' + newname
if os.path.exists(tmpname):
sys.exit('"{}" already exists!'.format(tmpname))
names.append((oldname, tmpname, newname))
for oldname, tmpname, newname in names:
print('Renaming', oldname, 'to', tmpname)
os.rename(oldname, tmpname)
for oldname, tmpname, newname in names:
if os.path.exists(newname):
sys.exit('"{}" already exists!'.format(newname))
print('Renaming', tmpname, 'to', newname)
os.rename(tmpname, newname)
if __name__ == '__main__':
main()
| 728 | 0 | 23 |
428b8c0635f5c971145cee47479b955994e649a4 | 1,608 | py | Python | main.py/lab07.py | sofilaulia/Lab-python | 114cc30acd324212b7b4b6708ee65aefcc68baa0 | [
"MIT"
] | null | null | null | main.py/lab07.py | sofilaulia/Lab-python | 114cc30acd324212b7b4b6708ee65aefcc68baa0 | [
"MIT"
] | null | null | null | main.py/lab07.py | sofilaulia/Lab-python | 114cc30acd324212b7b4b6708ee65aefcc68baa0 | [
"MIT"
] | null | null | null | # Nama: Sofil Muna Aulia
# NIM: 0110120115
# Kelas: Sistem Informasi 05
# Mulai baris ini hingga baris paling bawah
# digunakan untuk mengetes fungsi yang telah dibuat.
# Tidak perlu mengubah bagian ini.
# Ketika dijalankan, program akan menampilkan contoh
# pemanggilan fungsi dan solusi yang seharusnya.
# Cocokkan hasil pemanggilan fungsi dengan solusi
# yang seharusnya.
if __name__ == '__main__':
test() | 28.714286 | 84 | 0.651741 | # Nama: Sofil Muna Aulia
# NIM: 0110120115
# Kelas: Sistem Informasi 05
def convert_list(multilist):
# Tulis kode fungsi convert_list() di bawah ini
# Hapus pass jika implementasi sudah dibuat
gabung = sum(multilist, [])
return gabung
def get_nilai(filename, nama):
# Tulis kode fungsi get_nilai() di bawah ini
f = open(filename,'r')
for baris in f:
if nama.lower() in baris.lower():
data = baris.split()
nama = data[0]
nilai = float(data[1])
angka = round(nilai)
return angka
f.close()
def nilai_max(filename):
# Tulis kode fungsi nilai_max() di bawah ini
# Hapus pass jika implementasi sudah dibuat
pass
# Mulai baris ini hingga baris paling bawah
# digunakan untuk mengetes fungsi yang telah dibuat.
# Tidak perlu mengubah bagian ini.
# Ketika dijalankan, program akan menampilkan contoh
# pemanggilan fungsi dan solusi yang seharusnya.
# Cocokkan hasil pemanggilan fungsi dengan solusi
# yang seharusnya.
def test():
r = convert_list([[1,2], [3,4], [5,6]])
print(f"convert_list([[1,2], [3,4], [5,6]]) = {r} \n(solusi: [1, 2, 3, 4, 5, 6])")
print()
r = get_nilai('nilai1.txt','joni')
print(f"get_nilai('nilai1.txt','joni') = {r} \n(solusi: 76)")
print()
r = get_nilai('nilai2.txt','joni')
print(f"get_nilai('nilai2.txt','joni') = {r} \n(solusi: None)")
print()
r = nilai_max('nilai1.txt')
print(f"nilai_max('nilai1.txt') = {r} \n(solusi: ('Zack', 88.05)")
print()
r = nilai_max('nilai2.txt')
print(f"nilai_max('nilai2.txt') = {r} \n(solusi: ('Arya', 90.00)")
print()
if __name__ == '__main__':
test() | 1,097 | 0 | 95 |
1427e4f5aadf989968cb0c8ef88292b0fc3d4ad2 | 10,129 | py | Python | data/unified_emotion.py | IvoOVerhoeven/meta-learning-emotion-detection | fb076d7644173b13eb62d6301544af9e98352512 | [
"Apache-2.0"
] | 3 | 2021-04-21T15:44:50.000Z | 2021-05-28T15:53:23.000Z | data/unified_emotion.py | IvoOVerhoeven/meta-learning-emotion-detection | fb076d7644173b13eb62d6301544af9e98352512 | [
"Apache-2.0"
] | null | null | null | data/unified_emotion.py | IvoOVerhoeven/meta-learning-emotion-detection | fb076d7644173b13eb62d6301544af9e98352512 | [
"Apache-2.0"
] | 1 | 2021-09-10T18:13:28.000Z | 2021-09-10T18:13:28.000Z | from collections import defaultdict
import torch
import jsonlines
class unified_emotion():
"""Class for the 'Unified Emotion Dataset'. Data from https://github.com/sarnthil/unify-emotion-datasets.
"""
def __init__(self, file_path, include=['grounded_emotions'], split_ratio=0.7, verbose=False, first_label_only=False):
"""
Class for the 'Unified Emotion Dataset'.
Data from https://github.com/sarnthil/unify-emotion-datasets.
Args:
file_path (str): path to the 'unified-dataset.jsonl' file
include (list, optional): if not None, will only use the datasets in the include list. Defaults to None
exclude (list, optional): tasks to exclude. Defaults to ['fb-valence-arousal-anon', 'emobank', 'affectivetext', 'emotion-cause', 'electoraltweets'].
split_ratio (float, optional): amount of data reserved for test sets. Defaults to 0.8.
"""
self.file_path = file_path
self.include = include
self.split_ratio = split_ratio
self.verbose = verbose
self.first_label_only = first_label_only
self.info = [row for row in unified_emotion_info() if row['source'] in self.include]
def prep(self, text_tokenizer=lambda x: x, text_tokenizer_kwargs=dict()):
"""Generates dataset from unified file.
Args:
text_tokenizer (callable, optional): function that processes a line of text. Defaults to identity (raw text).
"""
datasets = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
source_lengths = dict()
label_map = defaultdict()
with jsonlines.open(self.file_path) as file:
for i, line in enumerate(file.iter()):
# Skip line if not in include list
source = line['source']
if not source in self.include:
continue
# Give 'all' split if data doesn't have its own train/test split
split = 'all' if line.get('split', None) == None else line['split']
# Give line a data specific id
id = source_lengths.get(source, 0)
# Convert the labels
# Saves the mapping if this is the first line of a dataset
labels = {k: v for k, v in sorted(line['emotions'].items())
if v != None}
if id == 0:
label_map[source] = {k: i for i,
(k, _) in enumerate(labels.items())}
# All present emotions (labels > 1)
present_emotions = [emotion for emotion,
present in labels.items() if present > 0]
#text = text_tokenizer(line['text'], **text_tokenizer_kwargs)
text = line['text']
# Ensure proper encoding
try:
text = text.encode('latin-1').decode('utf8')
except (UnicodeEncodeError, UnicodeDecodeError):
if self.verbose:
print("Removed sentence for bad encoding")
continue
text = text_tokenizer(text, **text_tokenizer_kwargs)
# If the tokenizer removes the text, carry on
if text == None:
continue
if isinstance(text, list):
text = ' '.join(text)
# Ignore all remaining utf8 encodings and bring to 'plain' text
text = text.encode('ascii', 'ignore').decode('ascii')
# If more than 1 emotion is present, multiple examples are created
if (not self.first_label_only):
for i, emotion in enumerate(present_emotions):
label = label_map[source][emotion]
datasets[source][split][label].append(
{'idx': id, 'labels': label, 'text': text})
source_lengths[source] = id + i + 1
else:
label = label_map[source][present_emotions[0]]
datasets[source][split][label].append(
{'idx': id, 'labels': label, 'text': text})
source_lengths[source] = id + 1
for source in datasets.keys():
if len(datasets[source].keys()) == 1 and 'all' in datasets[source].keys():
class_lengths = {k: len(datasets[source]['all'][k])
for k in datasets[source]['all'].keys()}
for c, l in class_lengths.items():
train_l = int(self.split_ratio * l)
datasets[source]['train'][c] = datasets[source]['all'][c][:train_l]
val_l = train_l + int((1 - self.split_ratio) * l * 0.5)
datasets[source]['validation'][c] = datasets[source]['all'][c][train_l:val_l]
datasets[source]['test'][c] = datasets[source]['all'][c][val_l:]
del datasets[source]['all']
self.datasets = datasets
self.source_lengths = source_lengths
self.label_map = label_map
self.inv_label_map = {source: {val: key for key,
val in label_map[source].items()} for source in label_map.keys()}
# Remove classes with limited data
total_removed, total_data_removed = 0, 0
removing = []
for source in datasets.keys():
n_classes = len(datasets[source]['train'].keys())
for c in datasets[source]['train'].keys():
train_size = len(datasets[source]['train'][c])
val_size = len(datasets[source]['validation'][c])
test_size = len(datasets[source]['test'][c])
keep = (train_size >= 96 and val_size >= 64 and test_size >= 64)
if (not keep):
if self.verbose:
print("Removed {:}/{:} for too little data |train|={}, |test|={}".
format(source, self.inv_label_map[source][c], train_size, test_size))
total_removed += 1
total_data_removed += train_size + test_size
self.source_lengths[source] -= train_size + test_size
removing.append((source, c))
for source, c in removing:
del datasets[source]['train'][c]
del datasets[source]['validation'][c]
del datasets[source]['test'][c]
if self.verbose:
print("Removed a total of {:} classes and {:} examples.".format(
total_removed, total_data_removed))
for source in datasets.keys():
assert len(datasets[source]['train'].keys()) >= 2, print(
f"{source} has too few classes remaining.")
@property
def lens(self):
"""Lengths of the individual datasets
"""
return self.source_lengths
"""
def get_dataloader(self, source_name, device, k=4, tokenizer=None, shuffle=True):
Generates a dataloader from a specified dataset.
See MetaStratifiedLoader for more.
Args:
source_name(str): a dataset from one of the processed ones.
k(int, optional): the k-shot. Defaults to 4.
tokenizer(callable, optional): function that processes list of strings to PyTorch tensor. Defaults to None.
shuffle(boolean, optional): whether or not to shuffle the train data. Defaults to True.
Returns:
dataloaders: iterable of data_loaders. First is train, last is test.
data_loaders = []
for split in self.datasets[source_name].keys():
source_dict = self.datasets[source_name]
dataloader = MetaStratifiedLoader(source_dict=source_dict,
split=split,
class_to_int=self.label_map[source_name],
k=k,
tokenizer=tokenizer,
shuffle=shuffle,
device=device
)
if split == 'train':
data_loaders.insert(0, dataloader)
else:
data_loaders.append(dataloader)
return data_loaders
"""
| 46.677419 | 160 | 0.541317 | from collections import defaultdict
import torch
import jsonlines
def unified_emotion_info():
return [{'source': 'affectivetext', 'size': 250, 'domain': 'headlines', 'classes': 6, 'special': 'non-discrete, multiple labels'},
{'source': 'crowdflower', 'size': 40000, 'domain': 'tweets', 'classes': 14, 'special': 'includes no-emotions class'},
{'source': 'dailydialog', 'size': 13000, 'domain': 'conversations', 'classes': 6, 'special': 'includes no-emotions class'},
{'source': 'electoraltweets', 'size': 4058, 'domain': 'tweets', 'classes': 8, 'special': 'includes no-emotions class'},
{'source': 'emobank', 'size': 10000, 'domain': 'headlines', 'classes': 3, 'special': 'VAD regression'},
{'source': 'emoint', 'size': 7097, 'domain': 'tweets', 'classes': 6, 'special': 'annotated by experts'},
{'source': 'emotion-cause', 'size': 2414, 'domain': 'artificial', 'classes': 6, 'special': 'N/A'},
{'source': 'fb-valence-arousal-anon', 'size': 2800, 'domain': 'facebook', 'classes': 3, 'special': 'VA regression'},
{'source': 'grounded_emotions', 'size': 2500, 'domain': 'tweets', 'classes': 2, 'special': 'N/A'},
{'source': 'ssec', 'size': 4868, 'domain': 'tweets', 'classes': 8, 'special': 'multiple labels per sentence'},
{'source': 'tales-emotion', 'size': 15302, 'domain': 'fairytales', 'classes': 6, 'special': 'includes no-emotions class'},
{'source': 'tec', 'size': 21051, 'domain': 'tweets', 'classes': 7, 'special': 'annotated by experts'}
]
class unified_emotion():
"""Class for the 'Unified Emotion Dataset'. Data from https://github.com/sarnthil/unify-emotion-datasets.
"""
def __init__(self, file_path, include=['grounded_emotions'], split_ratio=0.7, verbose=False, first_label_only=False):
"""
Class for the 'Unified Emotion Dataset'.
Data from https://github.com/sarnthil/unify-emotion-datasets.
Args:
file_path (str): path to the 'unified-dataset.jsonl' file
include (list, optional): if not None, will only use the datasets in the include list. Defaults to None
exclude (list, optional): tasks to exclude. Defaults to ['fb-valence-arousal-anon', 'emobank', 'affectivetext', 'emotion-cause', 'electoraltweets'].
split_ratio (float, optional): amount of data reserved for test sets. Defaults to 0.8.
"""
self.file_path = file_path
self.include = include
self.split_ratio = split_ratio
self.verbose = verbose
self.first_label_only = first_label_only
self.info = [row for row in unified_emotion_info() if row['source'] in self.include]
def prep(self, text_tokenizer=lambda x: x, text_tokenizer_kwargs=dict()):
"""Generates dataset from unified file.
Args:
text_tokenizer (callable, optional): function that processes a line of text. Defaults to identity (raw text).
"""
datasets = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
source_lengths = dict()
label_map = defaultdict()
with jsonlines.open(self.file_path) as file:
for i, line in enumerate(file.iter()):
# Skip line if not in include list
source = line['source']
if not source in self.include:
continue
# Give 'all' split if data doesn't have its own train/test split
split = 'all' if line.get('split', None) == None else line['split']
# Give line a data specific id
id = source_lengths.get(source, 0)
# Convert the labels
# Saves the mapping if this is the first line of a dataset
labels = {k: v for k, v in sorted(line['emotions'].items())
if v != None}
if id == 0:
label_map[source] = {k: i for i,
(k, _) in enumerate(labels.items())}
# All present emotions (labels > 1)
present_emotions = [emotion for emotion,
present in labels.items() if present > 0]
#text = text_tokenizer(line['text'], **text_tokenizer_kwargs)
text = line['text']
# Ensure proper encoding
try:
text = text.encode('latin-1').decode('utf8')
except (UnicodeEncodeError, UnicodeDecodeError):
if self.verbose:
print("Removed sentence for bad encoding")
continue
text = text_tokenizer(text, **text_tokenizer_kwargs)
# If the tokenizer removes the text, carry on
if text == None:
continue
if isinstance(text, list):
text = ' '.join(text)
# Ignore all remaining utf8 encodings and bring to 'plain' text
text = text.encode('ascii', 'ignore').decode('ascii')
# If more than 1 emotion is present, multiple examples are created
if (not self.first_label_only):
for i, emotion in enumerate(present_emotions):
label = label_map[source][emotion]
datasets[source][split][label].append(
{'idx': id, 'labels': label, 'text': text})
source_lengths[source] = id + i + 1
else:
label = label_map[source][present_emotions[0]]
datasets[source][split][label].append(
{'idx': id, 'labels': label, 'text': text})
source_lengths[source] = id + 1
for source in datasets.keys():
if len(datasets[source].keys()) == 1 and 'all' in datasets[source].keys():
class_lengths = {k: len(datasets[source]['all'][k])
for k in datasets[source]['all'].keys()}
for c, l in class_lengths.items():
train_l = int(self.split_ratio * l)
datasets[source]['train'][c] = datasets[source]['all'][c][:train_l]
val_l = train_l + int((1 - self.split_ratio) * l * 0.5)
datasets[source]['validation'][c] = datasets[source]['all'][c][train_l:val_l]
datasets[source]['test'][c] = datasets[source]['all'][c][val_l:]
del datasets[source]['all']
self.datasets = datasets
self.source_lengths = source_lengths
self.label_map = label_map
self.inv_label_map = {source: {val: key for key,
val in label_map[source].items()} for source in label_map.keys()}
# Remove classes with limited data
total_removed, total_data_removed = 0, 0
removing = []
for source in datasets.keys():
n_classes = len(datasets[source]['train'].keys())
for c in datasets[source]['train'].keys():
train_size = len(datasets[source]['train'][c])
val_size = len(datasets[source]['validation'][c])
test_size = len(datasets[source]['test'][c])
keep = (train_size >= 96 and val_size >= 64 and test_size >= 64)
if (not keep):
if self.verbose:
print("Removed {:}/{:} for too little data |train|={}, |test|={}".
format(source, self.inv_label_map[source][c], train_size, test_size))
total_removed += 1
total_data_removed += train_size + test_size
self.source_lengths[source] -= train_size + test_size
removing.append((source, c))
for source, c in removing:
del datasets[source]['train'][c]
del datasets[source]['validation'][c]
del datasets[source]['test'][c]
if self.verbose:
print("Removed a total of {:} classes and {:} examples.".format(
total_removed, total_data_removed))
for source in datasets.keys():
assert len(datasets[source]['train'].keys()) >= 2, print(
f"{source} has too few classes remaining.")
@property
def lens(self):
"""Lengths of the individual datasets
"""
return self.source_lengths
def __getitem__(self, i):
return self.datasets.get(i, None)
"""
def get_dataloader(self, source_name, device, k=4, tokenizer=None, shuffle=True):
Generates a dataloader from a specified dataset.
See MetaStratifiedLoader for more.
Args:
source_name(str): a dataset from one of the processed ones.
k(int, optional): the k-shot. Defaults to 4.
tokenizer(callable, optional): function that processes list of strings to PyTorch tensor. Defaults to None.
shuffle(boolean, optional): whether or not to shuffle the train data. Defaults to True.
Returns:
dataloaders: iterable of data_loaders. First is train, last is test.
data_loaders = []
for split in self.datasets[source_name].keys():
source_dict = self.datasets[source_name]
dataloader = MetaStratifiedLoader(source_dict=source_dict,
split=split,
class_to_int=self.label_map[source_name],
k=k,
tokenizer=tokenizer,
shuffle=shuffle,
device=device
)
if split == 'train':
data_loaders.insert(0, dataloader)
else:
data_loaders.append(dataloader)
return data_loaders
"""
| 1,555 | 0 | 50 |
7e69166a97cfa5210984f7711d7e1be3be68117e | 1,864 | py | Python | main.py | heyitswither/Discord-Matrix | b566c6413f787849ea6e805c80e31612b2bad6cd | [
"MIT"
] | null | null | null | main.py | heyitswither/Discord-Matrix | b566c6413f787849ea6e805c80e31612b2bad6cd | [
"MIT"
] | null | null | null | main.py | heyitswither/Discord-Matrix | b566c6413f787849ea6e805c80e31612b2bad6cd | [
"MIT"
] | null | null | null | from matrix_bot_api.matrix_bot_api import MatrixBotAPI
from matrix_bot_api.mhandler import MHandler
import discord
import yaml
config = yaml.safe_load(open('config.yml'))
matrix_client = MatrixBotAPI(config.get('matrix').get('username'), config.get('matrix').get('password'), config.get('matrix').get('homeserver'))
discord_client = DiscordClient()
discord_client.run(config.get('discord').get('token'))
| 43.348837 | 144 | 0.690451 | from matrix_bot_api.matrix_bot_api import MatrixBotAPI
from matrix_bot_api.mhandler import MHandler
import discord
import yaml
config = yaml.safe_load(open('config.yml'))
class DiscordClient(discord.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bg_task = self.loop.create_task(self.run_matrix())
async def on_ready(self):
self.watching = self.get_channel(config.get('discord').get('channel_id'))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nDiscord client ready!")
print(f"{self.user.name}#{self.user.discriminator}")
print(f"Watching #{self.watching.name} in {self.watching.guild.name}")
async def on_message(self, message):
if not message.channel == self.watching: return
if message.author == discord_client.user: return
matrix_client.watching.send_text(f"{message.author.name}: {message.content}")
async def run_matrix(self):
matrix_client.add_handler(MHandler(return_true, matrix_on_message))
matrix_client.start_polling()
matrix_client.watching = matrix_client.client.rooms.get(config.get('matrix').get('room_id'))
print("~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nMatrix client ready!")
print(matrix_client.client.user_id)
print(f"Watching {matrix_client.watching.name} in {matrix_client.client.hs}")
def return_true(room, event):
return True
def matrix_on_message(room, event):
if event['sender'] == matrix_client.client.user_id: return
discord_client.loop.create_task(discord_client.watching.send(f"{event['sender']}: {event['content']['body']}"))
matrix_client = MatrixBotAPI(config.get('matrix').get('username'), config.get('matrix').get('password'), config.get('matrix').get('homeserver'))
discord_client = DiscordClient()
discord_client.run(config.get('discord').get('token'))
| 1,266 | 15 | 176 |
fbe8a1f8f0a1776d58881439c433f53f8f7188c9 | 3,667 | py | Python | face_one_shot_learing/dataset_multiplefaces.py | kornellewy/face_one_shot_learing | 4cd8c8b1807717f921853043858a6f7ad5259917 | [
"MIT"
] | null | null | null | face_one_shot_learing/dataset_multiplefaces.py | kornellewy/face_one_shot_learing | 4cd8c8b1807717f921853043858a6f7ad5259917 | [
"MIT"
] | null | null | null | face_one_shot_learing/dataset_multiplefaces.py | kornellewy/face_one_shot_learing | 4cd8c8b1807717f921853043858a6f7ad5259917 | [
"MIT"
] | null | null | null | import os
import cv2
from PIL import Image
import numpy as np
from random import randint, choice, sample
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch
from torch import nn
from torch import optim
from torchvision import datasets,transforms
from torchvision.utils import save_image
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from utils import load_files_with_given_extension, random_idx_with_exclude
if __name__=='__main__':
img_transform = A.Compose(
[
A.Resize(100, 100),
A.RGBShift(),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.ShiftScaleRotate(scale_limit=0.1, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
A.PadIfNeeded(min_height=100, min_width=100, always_apply=True, border_mode=0),
A.IAAAdditiveGaussianNoise(p=0.1),
A.IAAPerspective(p=0.1),
A.RandomBrightnessContrast(p=0.1),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensorV2(),
])
dataset_path = 'dataset/'
dataset = DatasetMultipleFaces(dataset_path=dataset_path,
img_transform=img_transform)
image1, image2, class_idx = dataset[0]
print('image1.shape: ', image1.shape)
save_image(image1, 'image1.jpg')
print('image2.shape: ', image2.shape)
save_image(image2, 'image2.jpg')
print('class_idx: ', class_idx) | 45.271605 | 121 | 0.612762 | import os
import cv2
from PIL import Image
import numpy as np
from random import randint, choice, sample
import albumentations as A
from albumentations.pytorch import ToTensorV2
import torch
from torch import nn
from torch import optim
from torchvision import datasets,transforms
from torchvision.utils import save_image
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from utils import load_files_with_given_extension, random_idx_with_exclude
class DatasetMultipleFaces(Dataset):
def __init__(self, dataset_path, img_transform=None):
self.dataset_path = dataset_path
self.all_images_paths = load_files_with_given_extension(dataset_path)
self.img_transform = img_transform
self.classes = [ f.path for f in os.scandir(dataset_path) if f.is_dir() ]
self.images_by_classes = {class_path: load_files_with_given_extension(class_path) for class_path in self.classes}
def __len__(self):
return len(self.all_images_paths)
def __getitem__(self, idx):
# same = 1, difrent = 0
class_idx = randint(0,1)
if class_idx == 1:
class_path = choice(self.classes)
image1_path = choice(self.images_by_classes[class_path])
image1 = cv2.imread(image1_path)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image2_path = choice(self.images_by_classes[class_path])
image2 = cv2.imread(image2_path)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
if self.img_transform:
image1 = self.img_transform(image=image1)['image']
image2 = self.img_transform(image=image2)['image']
elif class_idx == 0:
class_path1, class_path2 = sample(population=self.classes, k=2)
image1_path = choice(self.images_by_classes[class_path1])
image1 = cv2.imread(image1_path)
image1 = cv2.cvtColor(image1, cv2.COLOR_BGR2RGB)
image2_path = choice(self.images_by_classes[class_path2])
image2 = cv2.imread(image2_path)
image2 = cv2.cvtColor(image2, cv2.COLOR_BGR2RGB)
if self.img_transform:
image1 = self.img_transform(image=image1)['image']
image2 = self.img_transform(image=image2)['image']
return image1, image2, torch.Tensor([class_idx])
if __name__=='__main__':
img_transform = A.Compose(
[
A.Resize(100, 100),
A.RGBShift(),
A.HorizontalFlip(p=0.5),
A.VerticalFlip(p=0.5),
A.ShiftScaleRotate(scale_limit=0.1, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0),
A.PadIfNeeded(min_height=100, min_width=100, always_apply=True, border_mode=0),
A.IAAAdditiveGaussianNoise(p=0.1),
A.IAAPerspective(p=0.1),
A.RandomBrightnessContrast(p=0.1),
A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
ToTensorV2(),
])
dataset_path = 'dataset/'
dataset = DatasetMultipleFaces(dataset_path=dataset_path,
img_transform=img_transform)
image1, image2, class_idx = dataset[0]
print('image1.shape: ', image1.shape)
save_image(image1, 'image1.jpg')
print('image2.shape: ', image2.shape)
save_image(image2, 'image2.jpg')
print('class_idx: ', class_idx) | 1,783 | 15 | 103 |
a6bad8bbe3f547b619f58cb0a3c1a513a492db92 | 8,515 | py | Python | selvpcclient/util.py | selectel/python-selvpcclient | 99955064215c2be18b568e5e9b34f17087ec304f | [
"Apache-2.0"
] | 7 | 2017-07-15T12:44:23.000Z | 2020-03-24T09:45:11.000Z | selvpcclient/util.py | selectel/python-selvpcclient | 99955064215c2be18b568e5e9b34f17087ec304f | [
"Apache-2.0"
] | 13 | 2017-07-05T09:34:09.000Z | 2021-04-20T08:18:46.000Z | selvpcclient/util.py | selectel/python-selvpcclient | 99955064215c2be18b568e5e9b34f17087ec304f | [
"Apache-2.0"
] | 9 | 2017-06-29T13:51:35.000Z | 2021-06-26T21:00:49.000Z | import base64
import hashlib
import json
import logging
import requests
import os
import sys
import six
log = logging.getLogger(__name__)
SENSITIVE_HEADERS = ['X-Token']
FILES_EXTENSIONS = ("png", "jpg", "svg", "txt")
def resource_filter(func):
"""This decorator allows to you filter answer from RESOURCE.list() by
project_id and region.
Both params are optional and may be used separately.
Example:
selvpc --debug floatingip list
selvpc --debug floatingip list --project=UUID
selvpc --debug floatingip list --region=REGION
selvpc --debug floatingip list --project=UUID --region=REGION
client.subnets.list(project=UUID)
client.subnets.list(region=REGION)
client.subnets.list(project=UUID, region=REGION)
"""
return wrap
def confirm_action(action):
"""Func must be a take_action func."""
return wrap
def get_item_properties(item, fields, mixed_case_fields=(), formatters=None):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Tenant, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
if formatters is None:
formatters = {}
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](item))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not hasattr(item, field_name) and isinstance(item, dict):
data = item[field_name]
else:
data = getattr(item, field_name, '')
if data is None:
data = ''
row.append(data)
return tuple(row)
def sort_list_of_dicts(list_, dict_key):
"""Sort list of dicts by dict key
:param list list_: List of dicts,
:param string dict_key: Dict key for sorting.
:rtype: list
"""
# NOTE: Python 3 introduced new rules for ordering comparisons:
# See detailed here (chapter ordering-comparisons)
# https://docs.python.org/release/3.0.1/whatsnew/3.0.html
items = []
for item in list_:
if item[dict_key] is None:
item[dict_key] = str()
items.append(item)
return sorted(items, key=lambda item: item[dict_key])
def build_url(*args):
"""Build URL by provided parts of url.
Also this method strip all right slashes.
:param args: Parts of url.
:rtype: str
"""
return "/".join([part.rstrip('/') for part in args])
def update_json_error_message(content):
"""Converts and capitalize JSON error to normal message.
:param str content: JSON-answer from server.
:rtype: str
"""
if 'error' in content:
try:
message = json.loads(content)['error']
return message.capitalize().replace('_', ' ')
except Exception:
return content
def try_parse_json(json_):
"""Converts the string representation of JSON to JSON.
:param str json_: JSON in str representation.
:rtype: :class:`dict` if converted successfully, otherwise False.
"""
if not json_:
return False
try:
return json.loads(json_)
except ValueError:
return False
def is_url(data):
"""Checks if getting value is valid url and path exists."""
try:
r = requests.head(data)
except Exception:
return False
return r.status_code == requests.codes.ok
def process_logo_by_url(url):
"""Download and encode image by url."""
res = requests.get(url)
encoded_logo = base64.b64encode(res.content)
return encoded_logo
def process_theme_params(func):
"""This decorator allows to enter path to logo/url to logo
and adds hash to color value."""
return inner
def process_pair_params(func):
"""This decorator allows to enter path to ~/.ssh/id_rsa.pub or provide
id_rsa.pub as plain-text.
"""
return inner
| 28.289037 | 79 | 0.57569 | import base64
import hashlib
import json
import logging
import requests
import os
import sys
import six
log = logging.getLogger(__name__)
SENSITIVE_HEADERS = ['X-Token']
FILES_EXTENSIONS = ("png", "jpg", "svg", "txt")
def parse_headers(headers):
result = {}
for header in headers:
if ':' in header:
header = header.replace(' ', '')
parts = header.split(':')
result[parts[0]] = parts[1]
return result
def handle_http_error(func):
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as err:
log.error(err)
sys.exit(2)
return wrap
def resource_filter(func):
"""This decorator allows to you filter answer from RESOURCE.list() by
project_id and region.
Both params are optional and may be used separately.
Example:
selvpc --debug floatingip list
selvpc --debug floatingip list --project=UUID
selvpc --debug floatingip list --region=REGION
selvpc --debug floatingip list --project=UUID --region=REGION
client.subnets.list(project=UUID)
client.subnets.list(region=REGION)
client.subnets.list(project=UUID, region=REGION)
"""
def wrap(*args, **kwargs):
project_id = kwargs.pop("project_id", None)
region = kwargs.pop("region", None)
resources = func(*args, **kwargs)
if project_id:
resources = [r for r in resources if r["project_id"] == project_id]
if region:
resources = [r for r in resources if r["region"] == region]
return resources
return wrap
def add_resource_filter_arguments(parser, add_region=True, add_project=True):
if add_project:
parser.add_argument(
'-p',
'--project',
dest="project_id",
required=False,
default=None,
type=str,
)
if add_region:
parser.add_argument(
'-r',
'--region',
required=False,
default=None,
type=str,
)
def confirm_action(action):
"""Func must be a take_action func."""
def wrap(func):
def wrap(*args, **kwargs):
if not hasattr(args[1], "yes_i_really_want_to_" + action):
log.error("Please add confirm argument into parser.")
sys.exit(-1)
accept = getattr(args[1], "yes_i_really_want_to_" + action)
if not accept:
log.warning("Confirm action by --yes-i-really-want-to-%s",
action)
sys.exit(-1)
return func(*args, **kwargs)
return wrap
return wrap
def get_item_properties(item, fields, mixed_case_fields=(), formatters=None):
"""Return a tuple containing the item properties.
:param item: a single item resource (e.g. Server, Tenant, etc)
:param fields: tuple of strings with the desired field names
:param mixed_case_fields: tuple of field names to preserve case
:param formatters: dictionary mapping field names to callables
to format the values
"""
if formatters is None:
formatters = {}
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](item))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if not hasattr(item, field_name) and isinstance(item, dict):
data = item[field_name]
else:
data = getattr(item, field_name, '')
if data is None:
data = ''
row.append(data)
return tuple(row)
def sort_list_of_dicts(list_, dict_key):
"""Sort list of dicts by dict key
:param list list_: List of dicts,
:param string dict_key: Dict key for sorting.
:rtype: list
"""
# NOTE: Python 3 introduced new rules for ordering comparisons:
# See detailed here (chapter ordering-comparisons)
# https://docs.python.org/release/3.0.1/whatsnew/3.0.html
items = []
for item in list_:
if item[dict_key] is None:
item[dict_key] = str()
items.append(item)
return sorted(items, key=lambda item: item[dict_key])
def build_url(*args):
"""Build URL by provided parts of url.
Also this method strip all right slashes.
:param args: Parts of url.
:rtype: str
"""
return "/".join([part.rstrip('/') for part in args])
def update_json_error_message(content):
"""Converts and capitalize JSON error to normal message.
:param str content: JSON-answer from server.
:rtype: str
"""
if 'error' in content:
try:
message = json.loads(content)['error']
return message.capitalize().replace('_', ' ')
except Exception:
return content
def try_parse_json(json_):
"""Converts the string representation of JSON to JSON.
:param str json_: JSON in str representation.
:rtype: :class:`dict` if converted successfully, otherwise False.
"""
if not json_:
return False
try:
return json.loads(json_)
except ValueError:
return False
def make_curl(url, method, data):
string_parts = ['curl -i', ' -X{} "{}"'.format(method, url)]
for (key, value) in six.iteritems(data.get('headers', {})):
if key in SENSITIVE_HEADERS:
v = str()
if value:
v = value.encode('utf-8')
h = hashlib.sha1(v)
d = h.hexdigest()
value = "{SHA1}%s" % d
header = ' -H "%s: %s"' % (key, value)
string_parts.append(header)
if data.get('json', None):
string_parts.append(" -d '%s'" % (json.dumps(data['json'])))
return "".join(string_parts)
def process_partial_quotas(resp_ok):
result = {"quotas": {}}
for item in resp_ok:
if item["resource"] not in result["quotas"]:
result["quotas"][item["resource"]] = [{
k: item[k] for k in item if k != "resource"
}]
else:
result["quotas"][item["resource"]].append({
k: item[k] for k in item if k != "resource"
})
return result
def is_url(data):
"""Checks if getting value is valid url and path exists."""
try:
r = requests.head(data)
except Exception:
return False
return r.status_code == requests.codes.ok
def process_logo_by_url(url):
"""Download and encode image by url."""
res = requests.get(url)
encoded_logo = base64.b64encode(res.content)
return encoded_logo
def process_theme_params(func):
"""This decorator allows to enter path to logo/url to logo
and adds hash to color value."""
def inner(*args, **kwargs):
color = kwargs.get("color", None)
if color and not color.startswith("#"):
kwargs["color"] = "#" + color
brand_color = kwargs.get("brand_color", None)
if brand_color and not brand_color.startswith("#"):
kwargs["brand_color"] = "#" + brand_color
path = kwargs.get("logo", None)
if path:
if os.path.isfile(path) and path.endswith(FILES_EXTENSIONS):
with open(path, "rb") as image_file:
if not path.endswith("txt"):
encoded_logo = base64.b64encode(image_file.read())
else:
encoded_logo = image_file.read()
kwargs["logo"] = encoded_logo
elif is_url(path):
kwargs["logo"] = process_logo_by_url(path)
else:
raise Exception("Invalid path/url or file")
return func(*args, **kwargs)
return inner
def process_pair_params(func):
"""This decorator allows to enter path to ~/.ssh/id_rsa.pub or provide
id_rsa.pub as plain-text.
"""
def inner(*args, **kwargs):
path = kwargs["keypair"]["keypair"]["public_key"]
if os.path.isfile(path):
with open(path, "r") as sr:
kwargs["keypair"]["keypair"]["public_key"] = sr.read().rstrip()
return func(*args, **kwargs)
return inner
def convert_to_short(logo_b64):
if len(logo_b64) >= 50:
logo_b64 = logo_b64[:15] + ' ... ' + logo_b64[len(logo_b64) - 15:]
return logo_b64
| 4,045 | 0 | 246 |
e01203521ab7224d47fd68eef13a0101657fdec3 | 3,551 | py | Python | lib/galaxy/util/script.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 4 | 2015-05-12T20:36:41.000Z | 2017-06-26T15:34:02.000Z | lib/galaxy/util/script.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 52 | 2015-03-16T14:02:14.000Z | 2021-12-24T09:50:23.000Z | lib/galaxy/util/script.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 7 | 2016-11-03T19:11:01.000Z | 2020-05-11T14:23:52.000Z | """Utilities for Galaxy scripts
"""
import argparse
import os
import sys
from galaxy.util.properties import find_config_file, load_app_properties
DESCRIPTION = None
ACTIONS = None
ARGUMENTS = None
DEFAULT_ACTION = None
ARG_HELP_CONFIG_FILE = """
Galaxy config file (defaults to $GALAXY_ROOT/config/galaxy.yml if that file exists
or else to ./config/galaxy.ini if that exists). If this isn't set on the
command line it can be set with the environment variable GALAXY_CONFIG_FILE.
"""
# ARG_HELP_CONFIG_SECTION = """
# Section containing application configuration in the target config file specified with
# -c/--config-file. This defaults to 'galaxy' for YAML/JSON configuration files and 'main'
# with 'app:' prepended for INI. If this isn't set on the command line it can be set with
# the environment variable GALAXY_CONFIG_SECTION.
# """
def main(argv=None):
"""Entry point for conversion process."""
if argv is None:
argv = sys.argv[1:]
args = _arg_parser().parse_args(argv)
kwargs = app_properties_from_args(args)
action = args.action
action_func = ACTIONS[action]
action_func(args, kwargs)
| 39.455556 | 118 | 0.713039 | """Utilities for Galaxy scripts
"""
import argparse
import os
import sys
from galaxy.util.properties import find_config_file, load_app_properties
DESCRIPTION = None
ACTIONS = None
ARGUMENTS = None
DEFAULT_ACTION = None
ARG_HELP_CONFIG_FILE = """
Galaxy config file (defaults to $GALAXY_ROOT/config/galaxy.yml if that file exists
or else to ./config/galaxy.ini if that exists). If this isn't set on the
command line it can be set with the environment variable GALAXY_CONFIG_FILE.
"""
# ARG_HELP_CONFIG_SECTION = """
# Section containing application configuration in the target config file specified with
# -c/--config-file. This defaults to 'galaxy' for YAML/JSON configuration files and 'main'
# with 'app:' prepended for INI. If this isn't set on the command line it can be set with
# the environment variable GALAXY_CONFIG_SECTION.
# """
def main_factory(description=None, actions=None, arguments=None, default_action=None):
global DESCRIPTION, ACTIONS, ARGUMENTS, DEFAULT_ACTION
DESCRIPTION = description
ACTIONS = actions or {}
ARGUMENTS = arguments or []
DEFAULT_ACTION = default_action
return main
def main(argv=None):
"""Entry point for conversion process."""
if argv is None:
argv = sys.argv[1:]
args = _arg_parser().parse_args(argv)
kwargs = app_properties_from_args(args)
action = args.action
action_func = ACTIONS[action]
action_func(args, kwargs)
def app_properties_from_args(args, legacy_config_override=None, app=None):
config_file = config_file_from_args(args, legacy_config_override=legacy_config_override, app=app)
config_section = getattr(args, "config_section", None)
app_properties = load_app_properties(config_file=config_file, config_section=config_section)
return app_properties
def config_file_from_args(args, legacy_config_override=None, app=None):
app = app or getattr(args, "app", "galaxy")
config_file = legacy_config_override or args.config_file or find_config_file(app)
return config_file
def populate_config_args(parser):
# config and config-file respected because we have used different arguments at different
# time for scripts.
# Options (e.g. option_name) not found in this file can have their defaults overridden
# set setting GALAXY_CONFIG_OPTION_NAME where OPTION_NAME is option_name converted to upper case.
# Options specified in that file can be overridden for this program set setting
# GALAXY_CONFIG_OVERRIDE_OPTION_NAME to a new value.
parser.add_argument("-c", "--config-file", "--config",
default=os.environ.get('GALAXY_CONFIG_FILE', None),
help=ARG_HELP_CONFIG_FILE)
parser.add_argument("--config-section",
default=os.environ.get('GALAXY_CONFIG_SECTION', None),
help=argparse.SUPPRESS) # See ARG_HELP_CONFIG_SECTION comment above for unsuppressed details.
def _arg_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('action', metavar='ACTION', type=str,
choices=list(ACTIONS.keys()),
default=DEFAULT_ACTION,
nargs='?' if DEFAULT_ACTION is not None else None,
help='action to perform')
populate_config_args(parser)
parser.add_argument("--app",
default=os.environ.get('GALAXY_APP', 'galaxy'))
for argument in ARGUMENTS:
parser.add_argument(*argument[0], **argument[1])
return parser
| 2,294 | 0 | 115 |
7c9d732fb03b774eb87f41e39cc63dcae1a824dc | 10,106 | py | Python | ic3po/utils.py | will62794/ic3po | 56d355fd69e15f99cd778d1d5c8f63295fd1baba | [
"Apache-2.0"
] | null | null | null | ic3po/utils.py | will62794/ic3po | 56d355fd69e15f99cd778d1d5c8f63295fd1baba | [
"Apache-2.0"
] | null | null | null | ic3po/utils.py | will62794/ic3po | 56d355fd69e15f99cd778d1d5c8f63295fd1baba | [
"Apache-2.0"
] | null | null | null | # ------------------------------------------
# IC3PO: IC3 for Proving Protocol Properties
# ------------------------------------------
# Copyright (c) 2021 Aman Goel and Karem Sakallah, University of Michigan.
# All rights reserved.
#
# Author: Aman Goel (amangoel@umich.edu), University of Michigan
# ------------------------------------------
from __future__ import print_function
import sys
import time
import common
import math
from pysmt.pretty_printer import pretty_serialize
from pysmt.shortcuts import *
times = []
start_time = 0
SORT_SUFFIX = ":e"
# def print_smt2(self, cl):
# solver = Solver(name="z3")
# solver.add_assertion(cl)
# solver.solve()
# cl_smt2 = solver.to_smt2()
# print(cl_smt2)
#
# # print(cl)
#
# def print_smt2_set(self, inv_set):
# print("Proof certificate (SMT-LIB): #%d" % len(inv_set))
# print("-------------------------------------------------")
# count = 0
# for cl in inv_set:
# count += 1
# print("invariant [ic3po_%d]\t" % count, end='')
# self.print_smt2(cl)
# print("-------------------------------------------------")
| 29.899408 | 76 | 0.552246 | # ------------------------------------------
# IC3PO: IC3 for Proving Protocol Properties
# ------------------------------------------
# Copyright (c) 2021 Aman Goel and Karem Sakallah, University of Michigan.
# All rights reserved.
#
# Author: Aman Goel (amangoel@umich.edu), University of Michigan
# ------------------------------------------
from __future__ import print_function
import sys
import time
import common
import math
from pysmt.pretty_printer import pretty_serialize
from pysmt.shortcuts import *
times = []
start_time = 0
SORT_SUFFIX = ":e"
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def print_stat(key, val):
common.gopts.statsF.write("%s:\t%s\n" % (key, val))
def print_stat_stdout(key, val, prefix="\t"):
print("%s%s:\t%s" % (prefix, key, val))
def push_time():
global times
times.append(time.time())
def pop_time():
global times
assert(len(times) != 0)
return time.time() - times.pop()
def elapsed_time():
global start_time
elapsed_time = time.time() - start_time
return elapsed_time
def time_str():
return "@ %5.0fs " % elapsed_time()
def pretty_print_str(cl, mode=1):
subs = {}
qvars = cl.get_quantifier_variables()
nameset = set()
count = 0
for v in qvars:
name = str(v)
suffix = name.rstrip('1234567890')
name = name[len(suffix):]
if len(name) != 0:
vs = v.symbol_type()
n = str(vs)[0].upper() + name
if n in nameset:
count += 1
n = n + "_" + str(count)
nameset.add(n)
subs[v] = n
fvars = cl.get_free_variables()
for v in fvars:
name = str(v)
name = name.lstrip('_')
suffix = name.rstrip('1234567890')
if len(suffix) != 0:
tmpName = name[:len(suffix)]
if tmpName.endswith(SORT_SUFFIX):
name = tmpName[:-2]
subs[v] = name
# return pretty_serialize(cl, mode=mode, subs=subs)
return pretty_serialize(cl, subs=subs)
def pretty_print(cl, mode=1):
print(pretty_print_str(cl, mode))
def pretty_print_inv_set(inv_set, comment=""):
print("### %s: #%d" % (comment, len(inv_set)))
count = 0
for cl in inv_set:
count += 1
print("invariant [%d_ic3po]\t" % count, end='')
pretty_print(cl)
print("###\n")
sys.stdout.flush()
def pretty_print_inv(inv_list, comment, suffix=""):
print("### %s: #%d" % (comment, len(inv_list)))
for label, cl in inv_list:
print("invariant [%s%s]\t" % (label, suffix), end='')
pretty_print(cl)
print("###\n")
sys.stdout.flush()
def pretty_print_inv_file(invF, inv_list, comment="Proof certificate"):
print("### %s: #%d" % (comment, len(inv_list)), file=invF)
for label, cl in inv_list:
print("invariant [ic3po_%s]\t" % label, end='', file=invF)
print(pretty_print_str(cl, 1), file=invF)
print("###", file=invF)
# def print_smt2(self, cl):
# solver = Solver(name="z3")
# solver.add_assertion(cl)
# solver.solve()
# cl_smt2 = solver.to_smt2()
# print(cl_smt2)
#
# # print(cl)
#
# def print_smt2_set(self, inv_set):
# print("Proof certificate (SMT-LIB): #%d" % len(inv_set))
# print("-------------------------------------------------")
# count = 0
# for cl in inv_set:
# count += 1
# print("invariant [ic3po_%d]\t" % count, end='')
# self.print_smt2(cl)
# print("-------------------------------------------------")
def pretty_print_set(s, mode=1):
res = "[ "
for v in s:
# res += pretty_serialize(v, mode=mode)
res += pretty_serialize(v)
res += ", "
res += "]"
return res
def substitute_sort(f, subs, suffix, f2i=False):
if f2i:
name = f.symbol_name()
if name.endswith(suffix):
name = name[:len(name) - len(suffix)]
else:
name = f.symbol_name() + suffix
s_type = f.symbol_type()
rett = s_type
args = []
if s_type.is_function_type():
rett = s_type.return_type
if rett in subs:
rett = subs[rett]
i = 0
for paramt in s_type.param_types:
i += 1
if paramt in subs:
args.append(subs[paramt])
else:
args.append(paramt)
ft = FunctionType(rett, tuple(args))
else:
if rett in subs:
rett = subs[rett]
ft = rett
res = Symbol(name, ft)
return res
def flatten_and(formula):
flat = set()
if (formula.is_and()):
for arg in formula.args():
for flat_arg in flatten_and(arg):
flat.add(flat_arg)
else:
flat.add(formula)
return flat
def flatten_cube(cube):
flat = set()
cube_flat = cube
if cube_flat.is_exists():
cube_flat = cube_flat.args()[0]
if (cube_flat.is_and()):
for arg in cube_flat.args():
for flat_arg in flatten_cube(arg):
flat.add(flat_arg)
else:
flat.add(cube_flat)
return flat
def flatten_or(cube):
flat = set()
cube_flat = cube
if (cube_flat.is_or()):
for arg in cube_flat.args():
for flat_arg in flatten_or(arg):
flat.add(flat_arg)
else:
flat.add(cube_flat)
return flat
def assert_permanent(solver, formulae):
solver.add_assertion(And(formulae))
solver.push()
def count_quantifiers(formula, pol=True, inF=0, inE=0):
outF = inF
outE = inE
# print("formula: %s %s %d %d" % (formula, pol, outF, outE))
if formula.is_not():
outF, outE = count_quantifiers(formula.arg(0), not pol, outF, outE)
return (outF, outE)
if formula.is_implies():
outF, outE = count_quantifiers(formula.arg(0), not pol, outF, outE)
outF, outE = count_quantifiers(formula.arg(1), pol, outF, outE)
return (outF, outE)
is_e = formula.is_exists()
is_a = formula.is_forall()
if (is_e and pol) or (is_a and not pol):
qvars = formula.quantifier_vars()
outE += len(qvars)
if (is_e and not pol) or (is_a and pol):
qvars = formula.quantifier_vars()
outF += len(qvars)
for arg in formula.args():
outF, outE = count_quantifiers(arg, pol, outF, outE)
if formula.is_ite():
outF, outE = count_quantifiers(formula.arg(0), not pol, outF, outE)
if formula.is_iff() or formula.is_equals():
outF, outE = count_quantifiers(formula.arg(0), not pol, outF, outE)
outF, outE = count_quantifiers(formula.arg(1), not pol, outF, outE)
return (outF, outE)
def count_and(formula):
f = And(formula)
flat = flatten_and(f)
return len(flat)
def formula_cost(formula, pol=True, inC=0):
factor = 1
outC = inC
# print("formula: %s %s %d %d" % (formula, pol, outF, outE))
if formula.is_not():
outC = formula_cost(formula.arg(0), not pol, outC)
return outC
if formula.is_implies():
outC = formula_cost(formula.arg(0), not pol, outC)
outC = formula_cost(formula.arg(1), pol, outC)
return outC
is_e = formula.is_exists()
is_a = formula.is_forall()
if (is_e and pol) or (is_a and not pol):
qvars = formula.quantifier_vars()
if inC > 0:
factor = 100
outC += factor*len(qvars)
if (is_e and not pol) or (is_a and pol):
qvars = formula.quantifier_vars()
if inC > 100:
factor = 10
outC += factor*len(qvars)
for arg in formula.args():
outC = formula_cost(arg, pol, outC)
if formula.is_ite():
outC = formula_cost(formula.arg(0), not pol, outC)
if formula.is_iff() or formula.is_equals():
outC = formula_cost(formula.arg(0), not pol, outC)
outC = formula_cost(formula.arg(1), not pol, outC)
return outC
def binom(n, k):
return math.factorial(n) // math.factorial(k) // math.factorial(n - k)
def num_majority(n):
return binom(n, math.ceil((n+1.0)/2.0))
def substituteDefinitions(formula, defMap, mode=0):
# print("in: %s" % formula)
args = []
for ch in formula.args():
chNew = substituteDefinitions(ch, defMap)
args.append(chNew)
# print("curr: %s" % formula)
# print("args: %s" % args)
if formula.is_exists():
qvars = formula.quantifier_vars()
return Exists(qvars, args[0])
if formula.is_forall():
qvars = formula.quantifier_vars()
return ForAll(qvars, args[0])
if formula.is_not():
return Not(args[0])
if formula.is_implies():
return Implies(args[0], args[1])
if formula.is_ite():
return Ite(args[0], args[1], args[2])
if formula.is_iff():
return Iff(args[0], args[1])
if formula.is_equals():
return Equals(args[0], args[1])
if formula.is_and():
return And(args)
if formula.is_or():
return Or(args)
if formula.is_function_application():
formulaType = formula.function_name()
if str(formulaType) in defMap:
entry = defMap[str(formulaType)]
assert(len(entry) == 4)
largs = entry[1]
rhs = entry[-1]
subs = {}
for idx, v in enumerate(largs):
subs[v] = args[idx]
# print("%s becomes %s" % (v, subs[v]))
# print("rhs: %s" % rhs)
rhs = rhs.simple_substitute(subs)
# print("rhsNew: %s" % rhs)
return rhs
if len(args) != 0:
return Function(formulaType, args)
if (mode == 0) and formula.is_symbol():
formulaType = formula
if str(formulaType) in defMap:
entry = defMap[str(formulaType)]
assert(len(entry) == 4)
rhs = entry[-1]
# print("rhsNew: %s" % rhs)
return rhs
return formula
return formula
| 8,310 | 0 | 588 |
12162cb891dd942d662731a380e3c5db9aedddcc | 1,989 | py | Python | src/models/metrics.py | AC297R-Wayfair-NLP/wayfair_nlp_public | 9d3628f14f0e3e75f386a2c2f63aed93681ef686 | [
"MIT"
] | 2 | 2021-01-11T21:16:54.000Z | 2021-12-16T16:39:14.000Z | src/models/metrics.py | AC297R-Wayfair-NLP/wayfair_nlp_public | 9d3628f14f0e3e75f386a2c2f63aed93681ef686 | [
"MIT"
] | null | null | null | src/models/metrics.py | AC297R-Wayfair-NLP/wayfair_nlp_public | 9d3628f14f0e3e75f386a2c2f63aed93681ef686 | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
# Asymmetric mean squared error
def bootstrap(full_model, non_nlp_model, X_full, y, score_func, n_boot=100, nlp_cols=None):
"""Resamples X to calculate `n_boot` pairs of full and non-nlp model scores
Args:
full_model (model): must have .predict method
non_nlp_model (model): must have .predict method
X_full (pd.DataFrame): full X dataframe including NLP columns
y (array-like): target variables
score_func (function): must have argument `score_func(y_true, y_pred)`
n_boot (int): number of bootstrap iterations
nlp_cols (list): list of NLP columns. See code for default value
"""
if nlp_cols is None:
nlp_cols = ['compound', 'emb1', 'emb10', 'emb11', 'emb12', 'emb13', 'emb14',
'emb15', 'emb16', 'emb2', 'emb3', 'emb4', 'emb5', 'emb6', 'emb7',
'emb8', 'emb9', 'neg', 'neu', 'pos', 'subjectivity', 'topic_18',
'topic_6']
# get predictions
X_non_nlp = X_full.drop(nlp_cols, axis=1)
y_pred_full = full_model.predict(X_full)
y_pred_non_nlp = non_nlp_model.predict(X_non_nlp)
X_non_nlp = np.array(X_non_nlp)
# resample test set
full_scores = []
non_nlp_scores = []
for i in range(n_boot):
boot_idxs = np.random.choice(X_full.shape[0], size=X_full.shape[0], replace=True)
X_boot = X_full.iloc[boot_idxs]
y_true_boot = y.iloc[boot_idxs]
y_pred_full_boot = y_pred_full[boot_idxs]
y_pred_non_nlp_boot = y_pred_non_nlp[boot_idxs]
full_scores.append(score_func(y_true_boot, y_pred_full_boot))
non_nlp_scores.append(score_func(y_true_boot, y_pred_non_nlp_boot))
return np.array(full_scores), np.array(non_nlp_scores) | 42.319149 | 91 | 0.668175 | import tensorflow as tf
import numpy as np
# Asymmetric mean squared error
def asymmetric_mse(y_actual,y_pred):
y_true = tf.cast(y_actual, y_pred.dtype)
asymmetric_mse=tf.square(y_pred-y_actual)*tf.square(tf.sign(y_pred-y_actual)+0.3)
return asymmetric_mse
def bootstrap(full_model, non_nlp_model, X_full, y, score_func, n_boot=100, nlp_cols=None):
"""Resamples X to calculate `n_boot` pairs of full and non-nlp model scores
Args:
full_model (model): must have .predict method
non_nlp_model (model): must have .predict method
X_full (pd.DataFrame): full X dataframe including NLP columns
y (array-like): target variables
score_func (function): must have argument `score_func(y_true, y_pred)`
n_boot (int): number of bootstrap iterations
nlp_cols (list): list of NLP columns. See code for default value
"""
if nlp_cols is None:
nlp_cols = ['compound', 'emb1', 'emb10', 'emb11', 'emb12', 'emb13', 'emb14',
'emb15', 'emb16', 'emb2', 'emb3', 'emb4', 'emb5', 'emb6', 'emb7',
'emb8', 'emb9', 'neg', 'neu', 'pos', 'subjectivity', 'topic_18',
'topic_6']
# get predictions
X_non_nlp = X_full.drop(nlp_cols, axis=1)
y_pred_full = full_model.predict(X_full)
y_pred_non_nlp = non_nlp_model.predict(X_non_nlp)
X_non_nlp = np.array(X_non_nlp)
# resample test set
full_scores = []
non_nlp_scores = []
for i in range(n_boot):
boot_idxs = np.random.choice(X_full.shape[0], size=X_full.shape[0], replace=True)
X_boot = X_full.iloc[boot_idxs]
y_true_boot = y.iloc[boot_idxs]
y_pred_full_boot = y_pred_full[boot_idxs]
y_pred_non_nlp_boot = y_pred_non_nlp[boot_idxs]
full_scores.append(score_func(y_true_boot, y_pred_full_boot))
non_nlp_scores.append(score_func(y_true_boot, y_pred_non_nlp_boot))
return np.array(full_scores), np.array(non_nlp_scores) | 172 | 0 | 22 |
141c4481eeacdbde53d983fcf75ec8624dc36750 | 29 | py | Python | raven/config.py | fossabot/raven | b5ed6258a4c09ac4d132873d6b8b4a1d82d2131b | [
"MIT"
] | 29 | 2018-08-13T20:16:41.000Z | 2022-03-17T02:31:38.000Z | raven/config.py | fossabot/raven | b5ed6258a4c09ac4d132873d6b8b4a1d82d2131b | [
"MIT"
] | 359 | 2018-05-31T00:37:53.000Z | 2022-03-26T04:35:43.000Z | raven/config.py | fossabot/raven | b5ed6258a4c09ac4d132873d6b8b4a1d82d2131b | [
"MIT"
] | 10 | 2019-06-17T18:07:46.000Z | 2022-02-15T02:01:32.000Z | max_parallel_processes = 100
| 14.5 | 28 | 0.862069 | max_parallel_processes = 100
| 0 | 0 | 0 |
adb8e2db68eb4d9a88043c7a3f96e022d2ba9993 | 4,934 | py | Python | pycps.py | matt-saenz/PyCPS | 510ac1764d472299ec87756b34b97b7b6b737a23 | [
"MIT"
] | null | null | null | pycps.py | matt-saenz/PyCPS | 510ac1764d472299ec87756b34b97b7b6b737a23 | [
"MIT"
] | null | null | null | pycps.py | matt-saenz/PyCPS | 510ac1764d472299ec87756b34b97b7b6b737a23 | [
"MIT"
] | null | null | null | import os
import re
import pandas as pd # type: ignore
import requests
BASE_URL = "http://api.census.gov/data/"
# Core functions
def get_asec(year: int, vars: list[str], show_url: bool = False) -> pd.DataFrame:
"""Get CPS ASEC microdata using the Census API."""
key = _get_key()
_check_year(year, dataset="asec")
formatted_vars = _format_vars(vars)
url = f"{BASE_URL}{year}/cps/asec/mar?get={formatted_vars}&key={key}"
print(f"Getting CPS ASEC microdata for {year}")
df = _get_data(url, show_url)
return df
def get_basic(
year: int, month: int, vars: list[str], show_url: bool = False
) -> pd.DataFrame:
"""Get basic monthly CPS microdata using the Census API."""
key = _get_key()
_check_year(year, dataset="basic")
month_name, month_abb = _get_month_info(month)
formatted_vars = _format_vars(vars)
url = f"{BASE_URL}{year}/cps/basic/{month_abb}?get={formatted_vars}&key={key}"
print(f"Getting basic monthly CPS microdata for {month_name} {year}")
df = _get_data(url, show_url)
return df
# Helpers
class CensusAPIRequestError(Exception):
"""Raise if Census API request fails."""
# Create custom exception since clear built-in does not exist
class EnvVarNotFoundError(Exception):
"""Raise if environment variable is not found."""
if __name__ == "__main__":
# Get inputs
print(
"Hello! This if-name-main code calculates the employment-to-population",
"(EPOP) ratio for a given month and year.",
)
month_year = input(
"Please provide a month and year in MM/YYYY format (e.g., 09/2021): "
)
month, year = [int(x) for x in month_year.split("/")]
month_name, month_abb = _get_month_info(month)
# Get data
print() # For empty line
cps = get_basic(year, month, ["prpertyp", "prtage", "pemlr", "pwcmpwgt"], True)
print("\nRaw data:", cps, sep="\n")
# Clean data
cps = cps.loc[(cps.prpertyp == 2) & (cps.prtage >= 16)]
cps["pop16plus"] = True # Given above filter
cps["employed"] = cps.pemlr.isin([1, 2])
# Analyze data
results = (
cps[["pop16plus", "employed"]]
.apply(lambda x, wt: x.dot(wt), wt=cps.pwcmpwgt) # Weighted sum
.astype(int)
)
print("\nWeighted sums:", results, sep="\n")
# Calculate EPOP ratio
print(
f"\nThe EPOP ratio for {month_name} {year} was",
f"{results['employed'] / results['pop16plus']:.1%}.",
)
| 25.832461 | 85 | 0.622213 | import os
import re
import pandas as pd # type: ignore
import requests
BASE_URL = "http://api.census.gov/data/"
# Core functions
def get_asec(year: int, vars: list[str], show_url: bool = False) -> pd.DataFrame:
"""Get CPS ASEC microdata using the Census API."""
key = _get_key()
_check_year(year, dataset="asec")
formatted_vars = _format_vars(vars)
url = f"{BASE_URL}{year}/cps/asec/mar?get={formatted_vars}&key={key}"
print(f"Getting CPS ASEC microdata for {year}")
df = _get_data(url, show_url)
return df
def get_basic(
year: int, month: int, vars: list[str], show_url: bool = False
) -> pd.DataFrame:
"""Get basic monthly CPS microdata using the Census API."""
key = _get_key()
_check_year(year, dataset="basic")
month_name, month_abb = _get_month_info(month)
formatted_vars = _format_vars(vars)
url = f"{BASE_URL}{year}/cps/basic/{month_abb}?get={formatted_vars}&key={key}"
print(f"Getting basic monthly CPS microdata for {month_name} {year}")
df = _get_data(url, show_url)
return df
# Helpers
class CensusAPIRequestError(Exception):
"""Raise if Census API request fails."""
def _get_data(url: str, show_url: bool) -> pd.DataFrame:
if show_url:
# Suppress key!
print("URL:", re.sub("&key=.*", "", url))
resp = requests.get(url)
if resp.status_code != 200:
raise CensusAPIRequestError(
f"Census API request failed [{resp.status_code}]: {resp.reason}"
)
if resp.headers["content-type"] != "application/json;charset=utf-8":
raise CensusAPIRequestError("Census API did not return JSON")
df = pd.DataFrame(resp.json())
df.columns = df.iloc[0].str.lower().to_list()
df = df.iloc[1:].reset_index(drop=True)
df = df.apply(pd.to_numeric)
return df
def _check_year(year: int, dataset: str) -> None:
years_lookup = {"asec": (2014, 2021), "basic": (1994, 2022)}
start_year, end_year = years_lookup[dataset]
if year not in range(start_year, end_year + 1):
raise ValueError(f"Years {start_year} to {end_year} are currently supported")
def _format_vars(vars: list[str]) -> str:
if not isinstance(vars, list):
raise TypeError("vars must be a list")
not_string = [not isinstance(var, str) for var in vars]
if any(not_string):
raise TypeError("vars must only contain strings")
invalid_char = [bool(re.search("[^A-Za-z0-9_]", var)) for var in vars]
if any(invalid_char):
raise ValueError(
"Elements of vars must only contain letters, digits, and underscores"
)
if len(vars) != len(set(vars)):
raise ValueError("vars must not contain any duplicate elements")
formatted_vars = ",".join(vars).upper()
return formatted_vars
# Create custom exception since clear built-in does not exist
class EnvVarNotFoundError(Exception):
"""Raise if environment variable is not found."""
def _get_key() -> str:
key = os.getenv("CENSUS_API_KEY")
if key is None:
raise EnvVarNotFoundError(
"You must provide a Census API key by setting env var CENSUS_API_KEY"
)
return key
def _get_month_info(month: int) -> tuple[str, str]:
if month not in range(1, 13):
raise ValueError("month must be a number ranging from 1 to 12")
month_names = [
"", # Empty string at 0 so that month number matches month name
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
month_name = month_names[month]
month_abb = month_name[:3].lower()
return month_name, month_abb
if __name__ == "__main__":
# Get inputs
print(
"Hello! This if-name-main code calculates the employment-to-population",
"(EPOP) ratio for a given month and year.",
)
month_year = input(
"Please provide a month and year in MM/YYYY format (e.g., 09/2021): "
)
month, year = [int(x) for x in month_year.split("/")]
month_name, month_abb = _get_month_info(month)
# Get data
print() # For empty line
cps = get_basic(year, month, ["prpertyp", "prtage", "pemlr", "pwcmpwgt"], True)
print("\nRaw data:", cps, sep="\n")
# Clean data
cps = cps.loc[(cps.prpertyp == 2) & (cps.prtage >= 16)]
cps["pop16plus"] = True # Given above filter
cps["employed"] = cps.pemlr.isin([1, 2])
# Analyze data
results = (
cps[["pop16plus", "employed"]]
.apply(lambda x, wt: x.dot(wt), wt=cps.pwcmpwgt) # Weighted sum
.astype(int)
)
print("\nWeighted sums:", results, sep="\n")
# Calculate EPOP ratio
print(
f"\nThe EPOP ratio for {month_name} {year} was",
f"{results['employed'] / results['pop16plus']:.1%}.",
)
| 2,334 | 0 | 115 |
d5e7ea9e23dee658abe41f2297d8e47b50c79219 | 11,451 | py | Python | cmframework/src/cmframework/server/cmprocessor.py | akraino-edge-stack/ta-config-manager | 8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44 | [
"Apache-2.0"
] | null | null | null | cmframework/src/cmframework/server/cmprocessor.py | akraino-edge-stack/ta-config-manager | 8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44 | [
"Apache-2.0"
] | null | null | null | cmframework/src/cmframework/server/cmprocessor.py | akraino-edge-stack/ta-config-manager | 8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from cmframework.utils import cmactivationwork
from cmframework.server import cmeventletrwlock
from cmframework.server import cmcsn
from cmframework.server import cmsnapshot
from cmframework.utils.cmflagfile import CMFlagFile
from cmframework.utils import cmalarm
| 36.123028 | 99 | 0.643088 | # Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from cmframework.utils import cmactivationwork
from cmframework.server import cmeventletrwlock
from cmframework.server import cmcsn
from cmframework.server import cmsnapshot
from cmframework.utils.cmflagfile import CMFlagFile
from cmframework.utils import cmalarm
class CMProcessor(object):
SERVICE_GROUP_NAME = 'config-manager'
def __init__(self,
backend_handler,
validator,
activator,
changemonitor,
activationstate_handler,
snapshot_handler):
logging.debug('CMProcessor constructed')
self.backend_handler = backend_handler
self.lock = cmeventletrwlock.CMEventletRWLock()
self.csn = cmcsn.CMCSN(self.backend_handler)
self.validator = validator
self.activator = activator
self.reboot_requests = set()
self.automatic_activation_disabled = CMFlagFile('automatic_activation_disabled')
self.changemonitor = changemonitor
self.activationstate_handler = activationstate_handler
self.snapshot = cmsnapshot.CMSnapshot(snapshot_handler)
def reboot_request(self, node_name):
logging.debug('reboot_request called for %s', node_name)
self.reboot_requests.add(node_name)
def _clear_reboot_requests(self):
logging.debug('_clear_reboot_requests called')
self.reboot_requests.clear()
def _raise_reboot_alarms(self):
logging.debug('_raise_reboot_alarms called')
reboot_request_alarm = cmalarm.CMRebootRequestAlarm()
for node_name in self.reboot_requests:
reboot_request_alarm.raise_alarm_for_node(node_name)
def get_property(self, prop_name, snapshot_name=None):
logging.debug('get_property called for %s', prop_name)
with self.lock.reader():
if snapshot_name:
self.snapshot.load(snapshot_name)
return self.snapshot.get_property(prop_name)
return self.backend_handler.get_property(prop_name)
def get_properties(self, prop_filter, snapshot_name=None):
logging.debug('get_properties called with filter %s', prop_filter)
with self.lock.reader():
if snapshot_name:
self.snapshot.load(snapshot_name)
return self.snapshot.get_properties(prop_filter)
return self.backend_handler.get_properties(prop_filter)
def set_property(self, prop_name, prop_value):
logging.debug('set_property called %s=%s', prop_name, prop_value)
props = {}
props[prop_name] = prop_value
return self.set_properties(props)
def set_properties(self, props, overwrite=False):
logging.debug('set_properties called for %s', str(props))
with self.lock.writer():
self._validate_set(props)
if overwrite:
logging.debug('Deleting old configuration data as requested')
orig_props = self.backend_handler.get_properties('.*')
self.backend_handler.delete_properties(orig_props.keys())
self.backend_handler.set_properties(props)
self.csn.increment()
if not self.automatic_activation_disabled:
return self._activate_set(props)
return "0"
def delete_property(self, prop_name):
logging.debug('delete_property called for %s', prop_name)
props = []
props.append(prop_name)
return self._delete_properties(props, None)
def delete_properties(self, arg):
logging.debug('delete_properties called with arg %r', arg)
keys = []
prop_filter = None
if isinstance(arg, str):
prop_filter = arg
props = self.get_properties(prop_filter)
keys = props.keys()
else:
keys = arg
return self._delete_properties(keys, prop_filter)
def _delete_properties(self, props, props_filter):
logging.debug('_delete_properties called with props %s filter %s', props, props_filter)
with self.lock.writer():
self._validate_delete(props)
if props_filter:
self.backend_handler.delete_properties(props_filter)
else:
if len(props) == 1:
self.backend_handler.delete_property(props[0])
else:
self.backend_handler.delete_properties(props)
self.csn.increment()
if not self.automatic_activation_disabled:
return self._activate_delete(props)
return "0"
def _validate_set(self, props):
logging.debug('_validate_set called for %s', str(props))
self.validator.validate_set(props)
def _activate_set_no_lock(self, props):
logging.debug('_activate_set_no_lock called for %s', str(props))
uuid_value = self.changemonitor.start_change()
work = cmactivationwork.CMActivationWork(cmactivationwork.CMActivationWork.OPER_SET,
self.csn.get(), props)
work.uuid_value = uuid_value
self.activator.add_work(work)
return uuid_value
def _activate_set(self, props):
logging.debug('_activate_set called')
with self.lock.reader():
return self._activate_set_no_lock(props)
def _validate_delete(self, props):
logging.debug('_validate_delete called for %s', str(props))
self.validator.validate_delete(props)
def _activate_delete(self, props):
logging.debug('_activate_delete called for %s', str(props))
with self.lock.reader():
uuid_value = self.changemonitor.start_change()
work = cmactivationwork.CMActivationWork(cmactivationwork.CMActivationWork.OPER_DELETE,
self.csn.get(), props)
work.uuid_value = uuid_value
self.activator.add_work(work)
return uuid_value
def create_snapshot(self, snapshot_name):
logging.debug('create_snapshot called, snapshot name is %s', snapshot_name)
with self.lock.writer():
self.snapshot.create(snapshot_name, self.backend_handler)
def restore_snapshot(self, snapshot_name):
logging.debug('restore_snapshot called, snapshot name is %s', snapshot_name)
with self.lock.writer():
self.snapshot.load(snapshot_name)
self._validate_set(self.snapshot.get_properties())
self.snapshot.restore(self.backend_handler)
self.csn = cmcsn.CMCSN(self.backend_handler)
self._activate_set_no_lock(self.snapshot.get_properties())
def list_snapshots(self):
logging.debug('list_snapshots called')
snapshots = []
with self.lock.writer():
snapshots = self.snapshot.list()
return snapshots
def delete_snapshot(self, snapshot_name):
logging.debug('delete_snapshot called, snapshot name is %s', snapshot_name)
with self.lock.writer():
self.snapshot.delete(snapshot_name)
def activate(self, node_name=None, startup_activation=False):
logging.debug('activate called, node is %s', node_name)
activation_alarm = cmalarm.CMActivationFailedAlarm()
if node_name:
activation_alarm.cancel_alarm_for_node(node_name)
else:
activation_alarm.cancel_alarm_for_sg(CMProcessor.SERVICE_GROUP_NAME)
with self.lock.reader():
uuid_value = self.changemonitor.start_change()
if not node_name:
work = cmactivationwork.CMActivationWork(
cmactivationwork.CMActivationWork.OPER_FULL,
self.csn.get(), {}, None, startup_activation)
else:
work = cmactivationwork.CMActivationWork(
cmactivationwork.CMActivationWork.OPER_FULL,
self.csn.get(), {}, node_name)
work.uuid_value = uuid_value
self.activator.add_work(work)
logging.debug('activation work added, going to wait for result')
failures = work.get_result()
logging.debug('got activation result')
if self.reboot_requests:
self._raise_reboot_alarms()
if not node_name:
self.activationstate_handler.clear_full_failed()
if failures:
logging.warning('Activation failed: %s', failures)
failed_activators = [activator for handler in failures.keys()
for activator in failures[handler]]
supplementary_info = {'failed activators': failed_activators}
if node_name:
activation_alarm.raise_alarm_for_node(node_name, supplementary_info)
else:
self.activationstate_handler.set_full_failed(failed_activators)
activation_alarm.raise_alarm_for_sg(CMProcessor.SERVICE_GROUP_NAME,
supplementary_info)
return uuid_value
def activate_node(self, node_name):
logging.debug('activate_node called, node name is %s', node_name)
if self.automatic_activation_disabled:
return False
with self.lock.reader():
node_csn = self.csn.get_node_csn(node_name)
if self.csn.get() == node_csn:
logging.info('No change in data since last translation, last csn %d',
self.csn.get())
return False
self._clear_reboot_requests()
work = cmactivationwork.CMActivationWork(cmactivationwork.CMActivationWork.OPER_NODE,
self.csn.get(), {}, node_name)
self.activator.add_work(work)
activation_alarm = cmalarm.CMActivationFailedAlarm()
activation_alarm.cancel_alarm_for_node(node_name)
failures = work.get_result()
if failures:
logging.warning('Activation failed: %s', failures)
failed_activators = [activator for handler in failures.keys()
for activator in failures[handler]]
supplementary_info = {'failed activators': failed_activators}
activation_alarm.raise_alarm_for_node(node_name, supplementary_info)
else:
with self.lock.writer():
self.csn.sync_node_csn(node_name)
return node_name in self.reboot_requests
def set_automatic_activation_state(self, state):
logging.debug('set_automatic_activation_state called, state is %s', state)
with self.lock.writer():
if state:
self.automatic_activation_disabled.unset()
else:
self.automatic_activation_disabled.set()
| 9,911 | 668 | 23 |
f56f9f644274341a7a889c358a94b2c1672c6109 | 99 | py | Python | src/interpreter/functions/time_func.py | UnseenSevet/b-star | 422371f6093e255dc04613b3006050027a3f065b | [
"MIT"
] | null | null | null | src/interpreter/functions/time_func.py | UnseenSevet/b-star | 422371f6093e255dc04613b3006050027a3f065b | [
"MIT"
] | null | null | null | src/interpreter/functions/time_func.py | UnseenSevet/b-star | 422371f6093e255dc04613b3006050027a3f065b | [
"MIT"
] | null | null | null | from typing import List
import time
| 14.142857 | 37 | 0.737374 | from typing import List
import time
def time_func(block: List, codebase):
return time.time()
| 39 | 0 | 23 |
7c748b5474de8d8145eba39fbd9a309eb5402848 | 26,358 | py | Python | astute-dashboard/astutedashboard/dashboards/billing/type_mappings/workflows.py | sreenathmenon/astute-project | 00590f769742c44460ef892a803c601a5403e165 | [
"Apache-2.0"
] | null | null | null | astute-dashboard/astutedashboard/dashboards/billing/type_mappings/workflows.py | sreenathmenon/astute-project | 00590f769742c44460ef892a803c601a5403e165 | [
"Apache-2.0"
] | null | null | null | astute-dashboard/astutedashboard/dashboards/billing/type_mappings/workflows.py | sreenathmenon/astute-project | 00590f769742c44460ef892a803c601a5403e165 | [
"Apache-2.0"
] | 1 | 2018-02-24T10:32:41.000Z | 2018-02-24T10:32:41.000Z | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import memoized
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
#from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
from openstack_dashboard.local import local_settings
import smtplib
import re, requests
try:
import simplejson as json
except ImportError:
import json
except:
raise
from astutedashboard.common import get_admin_ksclient, \
get_billing_types, \
create_billing_type_mapping, \
modify_billing_type_mapping, \
create_user_letter, \
get_projects, \
get_project, \
create_project, \
create_user, \
get_tenants, \
get_users, \
get_neutron_client, \
create_network, \
create_subnet, \
list_network, \
list_subnet, \
create_router, \
add_interface_to_router
from astutedashboard.dashboards.billing.cipher import encrypt
from openstack_dashboard.local.local_settings import CIPHER_KEY
ACCOUNT_MAPPING_FIELDS = (
"domain_id",
"domain_name",
"project_mapping",
"project_name",
"description",
"username",
"password",
"confirm_password",
"project_id",
"billing_type"
)
ACCOUNT_EXTRA_FIELDS = (
"crm_account_num",
"service_id",
"customer_name",
"business_reg_num",
"registered_address",
"authorized_officer_name",
"authorized_officer_nric",
"authorized_officer_phone",
"authorized_officer_email",
"account_manager"
)
ACCOUNT_QUOTA_FIELDS = (
"quota_instances",
"quota_cores",
"quota_ram",
"quota_floating_ips",
"quota_fixed_ips",
"quota_gigabytes"
)
COMMON_HORIZONTAL_TEMPLATE = "billing/type_mappings/_common_form.html"
WELCOME_EMAIL_TEMPLATE = "billing/type_mappings/welcome_email.html"
# send multipart email
password_requirement_str = 'must be at least 8 chars long and contain of mixed case and digit chars'
| 37.07173 | 163 | 0.601298 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import memoized
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
#from openstack_dashboard.api import neutron
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
from openstack_dashboard.local import local_settings
import smtplib
import re, requests
try:
import simplejson as json
except ImportError:
import json
except:
raise
from astutedashboard.common import get_admin_ksclient, \
get_billing_types, \
create_billing_type_mapping, \
modify_billing_type_mapping, \
create_user_letter, \
get_projects, \
get_project, \
create_project, \
create_user, \
get_tenants, \
get_users, \
get_neutron_client, \
create_network, \
create_subnet, \
list_network, \
list_subnet, \
create_router, \
add_interface_to_router
from astutedashboard.dashboards.billing.cipher import encrypt
from openstack_dashboard.local.local_settings import CIPHER_KEY
ACCOUNT_MAPPING_FIELDS = (
"domain_id",
"domain_name",
"project_mapping",
"project_name",
"description",
"username",
"password",
"confirm_password",
"project_id",
"billing_type"
)
ACCOUNT_EXTRA_FIELDS = (
"crm_account_num",
"service_id",
"customer_name",
"business_reg_num",
"registered_address",
"authorized_officer_name",
"authorized_officer_nric",
"authorized_officer_phone",
"authorized_officer_email",
"account_manager"
)
ACCOUNT_QUOTA_FIELDS = (
"quota_instances",
"quota_cores",
"quota_ram",
"quota_floating_ips",
"quota_fixed_ips",
"quota_gigabytes"
)
COMMON_HORIZONTAL_TEMPLATE = "billing/type_mappings/_common_form.html"
WELCOME_EMAIL_TEMPLATE = "billing/type_mappings/welcome_email.html"
# send multipart email
def send_mail(subject=None, sender=None, to=None, body=None, html=None, smtp_host='localhost', smtp_port=25, username=None, password=None):
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = ','.join(to) if isinstance(to, list) else to
# Record the MIME types of both parts - text/plain and text/html.
# Attach parts into message container.
# According to RFC 2046, the last part of a multipart message, in this case
# the HTML message, is best and preferred.
if body:
body_part = MIMEText(body, 'plain')
msg.attach(body_part)
if html:
html_part = MIMEText(html, 'html')
msg.attach(html_part)
smtp = smtplib.SMTP(smtp_host, smtp_port)
smtp.ehlo()
if username and password:
smtp.login(username, password)
smtp.sendmail(sender, to, msg.as_string())
smtp.quit()
class CommonAccountGenericAction(workflows.Action):
class Meta(object):
name = _("Generic")
slug = 'account_generic'
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(
label=_("Domain ID"),
required=False,
widget=forms.HiddenInput()
)
domain_name = forms.CharField(
label=_("Domain Name"),
required=False,
widget=forms.HiddenInput()
)
def __init__(self, request, *args, **kwargs):
super(CommonAccountGenericAction, self).__init__(request, *args, **kwargs)
# set domain values
#FIXME: Following line need to be checked for keystone V3 case (M1 specific roles)
domain = keystone.get_default_domain(self.request)
self.fields['domain_id'].widget.attrs['value'] = domain.id or ''
self.fields['domain_name'].widget.attrs['value'] = domain.name or ''
password_requirement_str = 'must be at least 8 chars long and contain of mixed case and digit chars'
class CreateAccountGenericAction(CommonAccountGenericAction):
class Meta(object):
name = _("Generic")
slug = 'account_generic'
project_mapping = forms.ChoiceField(
label = _('Mapping'),
choices=[
('0', 'Create New Project'),
('1', 'Use Existing Project')
],
required=False,
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'project_mapping'
})
)
project_name = forms.CharField(
label=_('Project Name'),
required=False,
widget=forms.TextInput(attrs={
'placeholder': 'must be a valid project name',
'class': 'switched',
'data-switch-on': 'project_mapping',
'data-project_mapping-0': _('Project Name')
})
)
description = forms.CharField(
label=_("Description"),
required=False,
widget=forms.widgets.Textarea(
attrs={
'rows': 4,
'class': 'switched',
'data-switch-on': 'project_mapping',
'data-project_mapping-0': _('Description')
}
)
)
username = forms.CharField(
label=_('Project User'),
required=False,
widget=forms.TextInput(attrs={
'placeholder': 'leave blank to use value of project name',
'class': 'switched',
'data-switch-on': 'project_mapping',
'data-project_mapping-0': _('Project User')
})
)
# password = forms.CharField(
# label=_('Password'),
# required=False,
# widget=forms.TextInput(attrs={
# 'placeholder': password_requirement_str,
# 'type': 'password',
# 'class': 'switched',
# 'data-switch-on': 'project_mapping',
# 'data-project_mapping-0': _('Password')
# })
# )
# confirm_password = forms.CharField(
# label=_('Confirm Password'),
# required=False,
# widget=forms.TextInput(attrs={
# 'placeholder': 'must match password value above',
# 'type': 'password',
# 'class': 'switched',
# 'data-switch-on': 'project_mapping',
# 'data-project_mapping-0': _('Confirm Password')
# })
# )
password = forms.RegexField(
label=_("Password"),
widget=forms.TextInput(attrs={
'placeholder': password_requirement_str,
'type': 'password',
'class': 'switched',
'data-switch-on': 'project_mapping',
'data-project_mapping-0': _('Password')
}),
required = False,
regex=validators.password_validator(),
error_messages={'invalid': validators.password_validator_msg()}
)
confirm_password = forms.CharField(
label=_("Confirm Password"),
required = False,
widget=forms.TextInput(attrs={
'placeholder': 'must match password value above',
'type': 'password',
'class': 'switched',
'data-switch-on': 'project_mapping',
'data-project_mapping-0': _('Confirm Password')
})
)
project_id = forms.ChoiceField(
label=_('Project'),
choices=[],
required=False,
widget=forms.Select(attrs={
'class': 'switched',
'data-switch-on': 'project_mapping',
'data-project_mapping-1': _('Project')
})
)
billing_type = forms.ChoiceField(label=_('Billing Type'), choices=[], required=False)
def __init__(self, request, *args, **kwargs):
super(CreateAccountGenericAction, self).__init__(request, *args, **kwargs)
# populate existing projects
#Keystone connection
#(_projects, _) = keystone.tenant_list(self.request)
(_projects, _) = get_tenants(self.request)
projects = [(project.id, project.name) for project in _projects]
self.fields['project_id'].choices = projects
# populate billing types
# TODO (div): switch on astudeclient lib [get_billing_types()]
billing_types = [(billing_type['id'], billing_type['name']) for billing_type in get_billing_types(request)]
self.fields['billing_type'].choices = billing_types
# data clean up and validation
def clean(self):
cleaned_data = super(CreateAccountGenericAction, self).clean()
if str(cleaned_data.get('project_mapping')) == '0':
# validate new project fields
#Password and confirm password field values are required
self.fields['password'].required = True
self.fields['confirm_password'].required = True
msg_field_is_required = 'This field is required.'
cleaned_data['project_name'] = (cleaned_data.get('project_name') or '').strip()
project_name = cleaned_data['project_name']
if project_name == '':
self.add_error('project_name', msg_field_is_required)
else:
# check if specified project is already exists
#if len([p for p in keystone.tenant_list(self.request)[0] if p.name == project_name]) > 0:
if len([p for p in get_projects(self.request) if p.name == project_name]) > 0:
self.add_error('project_name', 'Project `%s` already exists.' % project_name)
cleaned_data['username'] = (cleaned_data.get('username') or cleaned_data.get('project_name') or '').strip()
username = cleaned_data['username']
if username != '':
# check if specified user is already exists
#if len([u for u in keystone.user_list(self.request) if u.name == username]) > 0:
ks = get_admin_ksclient()
if len([u for u in get_users(self.request) if u.name == username]) > 0:
self.add_error('username', 'User `%s` already exists.' % username)
password = cleaned_data.get('password')
if not password:
self.add_error('password', msg_field_is_required)
# else:
# # check password strength
# if not (
# any(c.isupper() for c in password) and \
# any(c.islower() for c in password) and \
# any(c.isdigit() for c in password) and \
# len(password) >= 8
# ):
# self.add_error('password', 'Password is too weak: %s.' % password_requirement_str)
confirm = cleaned_data.get('confirm_password')
if not confirm:
self.add_error('confirm_password', msg_field_is_required)
if password and confirm and password != confirm:
self.add_error('confirm_password', 'Confirmation does not match password.')
return cleaned_data
class UpdateAccountGenericAction(CommonAccountGenericAction):
class Meta(object):
name = _("Generic")
slug = 'account_generic'
id = forms.CharField(
label=_("id"),
required=True,
widget=forms.HiddenInput()
)
project_id = forms.CharField(
label=_("project_id"),
required=True,
widget=forms.HiddenInput()
)
project_name = forms.CharField(
label=_("Account"),
required=False,
widget=forms.TextInput(attrs = {'readonly': 'readonly'})
)
billing_type = forms.CharField(
label=_("billing_type"),
required=False,
widget=forms.HiddenInput()
)
billing_type_name = forms.CharField(
label=_('Billing Type'),
required=False,
widget=forms.TextInput(attrs = {'readonly': 'readonly'})
)
def __init__(self, request, *args, **kwargs):
super(UpdateAccountGenericAction, self).__init__(request, *args, **kwargs)
# populate billing types
# TODO (div): switch on astudeclient lib [get_billing_types()]
billing_types = dict([(str(billing_type['id']), billing_type['name']) for billing_type in get_billing_types(request)])
self.fields['billing_type_name'].widget.attrs['value'] = billing_types[str(self.initial['billing_type'])]
class CommonAccountDetailsAction(workflows.Action):
class Meta(object):
name = _("Details")
slug = 'account_details'
crm_account_num = forms.CharField(label=_("CRM Account #"), required=True)
service_id = forms.CharField(label=_("Service ID"), required=True)
customer_name = forms.CharField(label=_("Customer Name"), required=True)
business_reg_num = forms.CharField(label=_("Business Reg. #"), required=True)
registered_address = forms.CharField(label=_("Registered Address"), required=True)
authorized_officer_name = forms.CharField(label=_("Authorized Officer"), required=True)
authorized_officer_nric = forms.CharField(label=_(" - NRIC"), required=True)
authorized_officer_phone = forms.CharField(label=_(" - Phone"), required=True)
authorized_officer_email = forms.CharField(label=_("Service Administer's Email"), required=True)
account_manager = forms.CharField(label=_("Account Manager"), required=True)
class CommonAccountQuotaAction(workflows.Action):
class Meta(object):
name = _("Quota")
slug = 'account_quota'
quota_instances = forms.IntegerField(label=_("Instances"), required=False)
quota_cores = forms.IntegerField(label=_("vCPUs"), required=False)
quota_ram = forms.IntegerField(label=_("RAM (MB)"), required=False)
quota_floating_ips = forms.IntegerField(label=_("Floating IPs"), required=False)
quota_fixed_ips = forms.IntegerField(label=_("Fixed IPs"), required=False)
quota_gigabytes = forms.IntegerField(label=_("Total Size of Volumes and Snapshots (GB)"), required=False)
def __init__(self, request, *args, **kwargs):
super(CommonAccountQuotaAction, self).__init__(request, *args, **kwargs)
# set fields min value to -1
for field in self.fields:
self.fields[field].widget.attrs.update({'min': '-1'})
# populate volume type quotas
for volume_type in cinder.volume_type_list(self.request):
self.fields['quota_gigabytes_' + volume_type.name] = forms.IntegerField(
label=_("%s (GB)" % volume_type.name),
required=False
)
# display default quota values
for quota in nova.itenant_quota_get(self.request, None):
field = 'quota_' + quota.name
if self.fields.get(field):
self.fields[field].widget.attrs.update({'placeholder': str(quota.limit)})
for quota in cinder.default_quota_get(self.request, None):
field = 'quota_' + quota.name
if self.fields.get(field):
self.fields[field].widget.attrs.update({'placeholder': str(quota.limit)})
class CreateAccountQuotaAction(CommonAccountQuotaAction):
class Meta(object):
name = _("Quota")
slug = 'account_quota'
class UpdateAccountQuotaAction(CommonAccountQuotaAction):
class Meta(object):
name = _("Quota")
slug = 'account_quota'
def clean(self):
usages = quotas.tenant_quota_usages(self.request, tenant_id=self.initial['project_id'])
# Validate the quota values before updating quotas.
bad_values = []
for data_key, value in cleaned_data.items():
key = data_key[6:]
used = usages[key].get('used', 0)
if value is not None and value >= 0 and used > value:
bad_values.append(_('%(used)s %(key)s used') %
{'used': used,
'key': quotas.QUOTA_NAMES.get(key, key)})
if bad_values:
value_str = ", ".join(bad_values)
msg = (_('Quota value(s) cannot be less than the current usage '
'value(s): %s.') %
value_str)
raise forms.ValidationError(msg)
class CreateAccountGenericStep(workflows.Step):
action_class = CreateAccountGenericAction
template_name = COMMON_HORIZONTAL_TEMPLATE
contributes = ACCOUNT_MAPPING_FIELDS
class UpdateAccountGenericStep(workflows.Step):
action_class = UpdateAccountGenericAction
template_name = COMMON_HORIZONTAL_TEMPLATE
contributes = ("id", "project_id", "project_name", "description", "billing_type")
class CommonAccountDetailsStep(workflows.Step):
action_class = CommonAccountDetailsAction
template_name = COMMON_HORIZONTAL_TEMPLATE
contributes = ACCOUNT_EXTRA_FIELDS
class CommonAccountWorkflow(workflows.Workflow):
pass
class CreateAccountWorkflow(CommonAccountWorkflow):
slug = "billing_create_account"
name = "Create Account"
finalize_button_name = _("Submit")
success_message = _('Created new account "%s".')
failure_message = _('Unable to create account "%s".')
success_url = "horizon:billing:billing_type_mappings:index"
default_steps = (
CreateAccountGenericStep,
CommonAccountDetailsStep
)
def handle(self, request, data):
is_new_project = str(data['project_mapping']) == '0'
project = None
#Keystone connection
ks = get_admin_ksclient()
# handle project mapping
if is_new_project:
# create new project first
'''
project = keystone.tenant_create(
request,
data.get('project_name'),
description=data.get('description'),
enabled=True,
domain=data.get('domain_id')
)
'''
project = create_project(request,
data.get('project_name'),
description=data.get('description'),
enabled=True,
domain=data.get('domain_id'))
else:
# fetch project
#project = keystone.tenant_get(request, data.get('project_id'))
project = get_project(request, data.get('project_id'))
# map project to billing account
extra_fields = dict([(f, data[f]) for f in ACCOUNT_EXTRA_FIELDS])
try:
success = bool(create_billing_type_mapping(request, {
"user": project.id,
"billing_type": data.get('billing_type') or 1,
"extra_fields": json.dumps(extra_fields)
}))
if not success:
raise exceptions.HorizonException('Unable to create billing type mapping.')
except:
# clean up created project in case of error
if is_new_project:
#FIXME: Need to check V3 related issues
#keystone.tenant_delete(request, project.id)
ks.tenants.delete(project.id)
raise
# create user (in case of new project)
user = None
try:
if is_new_project:
'''
user = keystone.user_create(
request,
name=data.get('username'),
password=data.get('password'),
email=data.get('authorized_officer_email'),
enabled=True,
description='General user of project `%s`' % project.name,
project=project.id,
domain=data.get('domain_id')
)
'''
user = create_user(request,
name=data.get('username'),
password=data.get('password'),
email=data.get('authorized_officer_email'),
enabled=True,
description='General user of project `%s`' % project.name,
project=project.id,
domain=data.get('domain_id'))
except:
# clean up created project in case of error
if is_new_project:
#keystone.tenant_delete(request, project.id)
#FIXME: Need to check V3 related issues
ks.tenants.delete(project.id)
raise
# do networking deployment
if is_new_project and getattr(local_settings, 'ASTUDE_CONFIGURE_ACCOUNT_NETWORKING', True):
try:
self._configure_networking(request, project)
except Exception as e:
print "Exception while adding network"
print e
pass
self.name = project.name
# send welcome email
if not getattr(local_settings, 'ASTUTE_ENABLE_WELCOME_EMAIL', True):
return True
# send welcome email
subj = getattr(settings, 'ASTUTE_WELCOME_EMAIL_SUBJ', 'Your new M1 Cloud Application Service')
sender = getattr(settings, 'ASTUTE_WELCOME_EMAIL_FROM', 'donotreplyCAS@m1.com.sg')
host = getattr(settings, 'ASTUTE_SMTP_HOST', 'localhost')
port = getattr(settings, 'ASTUTE_SMTP_PORT', 25)
user = getattr(settings, 'ASTUTE_SMTP_USER', None)
pswd = getattr(settings, 'ASTUTE_SMTP_PASS', None)
html = render_to_string(WELCOME_EMAIL_TEMPLATE, data)
# save the email content for the project user
try:
success = bool(create_user_letter(request, {
"user": project.id,
"content": encrypt(CIPHER_KEY, html),
}))
except Exception as e:
print '*******mail*****'
print e
pass
try:
send_mail(
subject=subj,
sender=sender,
to=data.get('authorized_officer_email'),
body=None,
html=html,
smtp_host=host,
smtp_port=port,
username=user,
password=pswd
)
except Exception as e:
print e
#raise exceptions.RecoverableError("Account has been created but error ocured on sending welcome email")
pass
return True
def _configure_networking(self, request, project):
# configuration
external_network_name = getattr(local_settings, 'ASTUDE_ACCOUNT_EXTERNAL_NETWORK_NAME', 'public')
internal_network_name = getattr(local_settings, 'ASTUDE_ACCOUNT_INTERNAL_NETWORK_NAME', '{{ account }}-nw')
internal_network_cidr = getattr(local_settings, 'ASTUDE_ACCOUNT_INTERNAL_NETWORK_CIDR', '10.0.0.0/24')
account_router_name = getattr(local_settings, 'ASTUDE_ACCOUNT_ROUTER_NAME', '{{ account }}-gw')
rexp = r'\{\{\s*account\s*\}\}'
external_network_name = re.sub(rexp, project.name, external_network_name)
internal_network_name = re.sub(rexp, project.name, internal_network_name)
account_router_name = re.sub(rexp, project.name, account_router_name)
# create network
#network = neutron.network_create(request, tenant_id=project.id, name=internal_network_name)
network = create_network(request, tenant_id=project.id, name=internal_network_name)
#subnet = neutron.subnet_create(request, network_id=network.id, tenant_id=project.id, name=internal_network_cidr, cidr=internal_network_cidr, ip_version=4)
subnet = create_subnet(request, network_id=network.id, tenant_id=project.id, name=internal_network_cidr, cidr=internal_network_cidr, ip_version=4)
# find external network
#external_network = [n for n in neutron.network_list(request) if n.name == external_network_name]
external_network = [n for n in list_network(request) if n.name == external_network_name]
if len(external_network) < 1:
raise exceptions.HorizonException('Public network `%s` not found.' % external_network_name)
external_network = external_network[0]
# create router
params = {
"tenant_id": project.id,
"name": account_router_name,
"external_gateway_info": {
"network_id": external_network.id
}
}
#router = neutron.router_create(request, **params)
router = create_router(request, **params)
# apply internal network into account router
#neutron.router_add_interface(request, router.id, subnet_id=subnet.id)
add_interface_to_router(request, router.id, subnet_id=subnet.id)
return True
class UpdateAccountWorkflow(CommonAccountWorkflow):
slug = "billing_update_account"
name = "Modify Account"
finalize_button_name = _("Submit")
success_message = _('Updated account "%s".')
failure_message = _('Unable to update account "%s".')
success_url = "horizon:billing:billing_type_mappings:index"
default_steps = (
UpdateAccountGenericStep,
CommonAccountDetailsStep
)
def handle(self, request, data):
extra_fields = dict([(f, data[f]) for f in ACCOUNT_EXTRA_FIELDS])
params = {
"user": data['project_id'],
"billing_type": data['billing_type'],
"extra_fields": json.dumps(extra_fields)
}
modify_billing_type_mapping(request, data['id'], params)
self.name = data.get('project_name')
return True
| 14,759 | 8,264 | 321 |
41b84f06ae65c97f8376b64688e0ea2b2d1f0ac7 | 1,399 | py | Python | budgethelper/clients/databaseabc.py | Preocts/budgethelper | 160fc575e969a3cd009874bf295c50421cdcd999 | [
"MIT"
] | null | null | null | budgethelper/clients/databaseabc.py | Preocts/budgethelper | 160fc575e969a3cd009874bf295c50421cdcd999 | [
"MIT"
] | null | null | null | budgethelper/clients/databaseabc.py | Preocts/budgethelper | 160fc575e969a3cd009874bf295c50421cdcd999 | [
"MIT"
] | null | null | null | """
Abstract Base Class
"""
import sqlite3
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import List
from budgethelper.models.database import Database
| 22.206349 | 78 | 0.604003 | """
Abstract Base Class
"""
import sqlite3
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import List
from budgethelper.models.database import Database
class DatabaseABC(ABC):
def __init__(self, database: Database) -> None:
super().__init__()
self.database = database
self.conn = sqlite3.connect(database=database.name)
@property
def changes(self) -> int:
"""Return the # of changes pending"""
return self.conn.total_changes
def close(self) -> None:
"""Close connection, must reinitialize to open again"""
self.conn.close()
@staticmethod
def listtables(conn: sqlite3.Connection) -> List[str]:
"""return a list of tables in the database"""
cursor = conn.cursor()
try:
cursor.execute("SELECT * FROM sqlite_master WHERE type = 'table'")
results = cursor.fetchall()
finally:
cursor.close()
return [t[1] for t in results]
@abstractmethod
def listcolumns(self) -> List[str]:
...
@abstractmethod
def create(self, row_object: Any) -> None:
...
@abstractmethod
def read(self, uid: int) -> Any:
...
@abstractmethod
def update(self, row_object: Any) -> None:
...
@abstractmethod
def delete(self, uid: int) -> None:
...
| 287 | 896 | 23 |
33d4990b1e1849b6a3e28cc44e92397a72436f5c | 7,250 | py | Python | contrib/MedicalSeg/medicalseg/core/infer_window.py | sun222/PaddleSeg | 6019dd164d873e455255500fa3d7ff197f04e95e | [
"Apache-2.0"
] | null | null | null | contrib/MedicalSeg/medicalseg/core/infer_window.py | sun222/PaddleSeg | 6019dd164d873e455255500fa3d7ff197f04e95e | [
"Apache-2.0"
] | null | null | null | contrib/MedicalSeg/medicalseg/core/infer_window.py | sun222/PaddleSeg | 6019dd164d873e455255500fa3d7ff197f04e95e | [
"Apache-2.0"
] | null | null | null | import paddle as torch
import paddle
import paddle.nn.functional as F
import math
def dense_patch_slices(image_size, patch_size, scan_interval):
"""
Enumerate all slices defining 2D/3D patches of size `patch_size` from an `image_size` input image.
Args:
image_size (tuple of int): dimensions of image to iterate over
patch_size (tuple of int): size of patches to generate slices
scan_interval (tuple of int): dense patch sampling interval
Returns:
a list of slice objects defining each patch
"""
num_spatial_dims = len(image_size)
if num_spatial_dims not in (2, 3):
raise ValueError('image_size should has 2 or 3 elements')
patch_size = patch_size
scan_interval = scan_interval
scan_num = [int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1
for i in range(num_spatial_dims)]
slices = []
if num_spatial_dims == 3:
for i in range(scan_num[0]):
start_i = i * scan_interval[0]
start_i -= max(start_i + patch_size[0] - image_size[0], 0)
slice_i = slice(start_i, start_i + patch_size[0])
for j in range(scan_num[1]):
start_j = j * scan_interval[1]
start_j -= max(start_j + patch_size[1] - image_size[1], 0)
slice_j = slice(start_j, start_j + patch_size[1])
for k in range(0, scan_num[2]):
start_k = k * scan_interval[2]
start_k -= max(start_k + patch_size[2] - image_size[2], 0)
slice_k = slice(start_k, start_k + patch_size[2])
slices.append((slice_i, slice_j, slice_k))
else:
for i in range(scan_num[0]):
start_i = i * scan_interval[0]
start_i -= max(start_i + patch_size[0] - image_size[0], 0)
slice_i = slice(start_i, start_i + patch_size[0])
for j in range(scan_num[1]):
start_j = j * scan_interval[1]
start_j -= max(start_j + patch_size[1] - image_size[1], 0)
slice_j = slice(start_j, start_j + patch_size[1])
slices.append((slice_i, slice_j))
return slices
def sliding_window_inference(inputs, roi_size, sw_batch_size, predictor):
"""Use SlidingWindow method to execute inference.
Args:
inputs (torch Tensor): input image to be processed (assuming NCHW[D])
roi_size (list, tuple): the window size to execute SlidingWindow inference.
sw_batch_size (int): the batch size to run window slices.
predictor (Callable): given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)`
should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D];
where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`.
Note:
must be channel first, support both 2D and 3D.
input data must have batch dim.
execute on 1 image/per inference, run a batch of window slices of 1 input image.
"""
num_spatial_dims = len(inputs.shape) - 2
assert len(roi_size) == num_spatial_dims, 'roi_size {} does not match input dims.'.format(roi_size)
# determine image spatial size and batch size
# Note: all input images must have the same image size and batch size
image_size = list(inputs.shape[2:])
batch_size = inputs.shape[0]
# TODO: Enable batch sizes > 1 in future
if batch_size > 1:
raise NotImplementedError
original_image_size = [image_size[i] for i in range(num_spatial_dims)]
# in case that image size is smaller than roi size
image_size = tuple(max(image_size[i], roi_size[i]) for i in range(num_spatial_dims))
pad_size = [i for k in range(len(inputs.shape) - 1, 1, -1) for i in (0, max(roi_size[k - 2] - inputs.shape[k], 0))]
inputs = F.pad(inputs, pad=pad_size, mode='constant', value=0,data_format="NDHWC")
# TODO: interval from user's specification
scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims)
# Store all slices in list
slices = dense_patch_slices(image_size, roi_size, scan_interval)
slice_batches = []
for slice_index in range(0, len(slices), sw_batch_size):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
input_slices = []
for curr_index in slice_index_range:
if num_spatial_dims == 3:
slice_i, slice_j, slice_k = slices[curr_index]
input_slices.append(inputs[0, :, slice_i, slice_j, slice_k])
else:
slice_i, slice_j = slices[curr_index]
input_slices.append(inputs[0, :, slice_i, slice_j])
slice_batches.append(torch.stack(input_slices))
# Perform predictions
output_rois = list()
for data in slice_batches:
seg_prob = predictor(data) # batched patch segmentation
output_rois.append(seg_prob[0].numpy())
# stitching output image
output_classes = output_rois[0].shape[1]
output_shape = [batch_size, output_classes] + list(image_size)
# allocate memory to store the full output and the count for overlapping parts
output_image = torch.zeros(output_shape, dtype=torch.float32).numpy()
count_map = torch.zeros(output_shape, dtype=torch.float32).numpy()
for window_id, slice_index in enumerate(range(0, len(slices), sw_batch_size)):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
# store the result in the proper location of the full output
for curr_index in slice_index_range:
if num_spatial_dims == 3:
slice_i, slice_j, slice_k = slices[curr_index]
ors=output_rois[window_id][curr_index - slice_index, :]
output_image[0, :, slice_i, slice_j, slice_k] += ors
count_map[0, :, slice_i, slice_j, slice_k] += 1.
else:
slice_i, slice_j = slices[curr_index]
output_image[0, :, slice_i, slice_j] += output_rois[window_id][curr_index - slice_index, :]
count_map[0, :, slice_i, slice_j] += 1.
# account for any overlapping sections
output_image /= count_map
output_image=paddle.to_tensor(output_image)
if num_spatial_dims == 3:
return (output_image[..., :original_image_size[0], :original_image_size[1], :original_image_size[2]],)
return (output_image[..., :original_image_size[0], :original_image_size[1]] ,) # 2D
| 42.647059 | 119 | 0.646759 | import paddle as torch
import paddle
import paddle.nn.functional as F
import math
def dense_patch_slices(image_size, patch_size, scan_interval):
"""
Enumerate all slices defining 2D/3D patches of size `patch_size` from an `image_size` input image.
Args:
image_size (tuple of int): dimensions of image to iterate over
patch_size (tuple of int): size of patches to generate slices
scan_interval (tuple of int): dense patch sampling interval
Returns:
a list of slice objects defining each patch
"""
num_spatial_dims = len(image_size)
if num_spatial_dims not in (2, 3):
raise ValueError('image_size should has 2 or 3 elements')
patch_size = patch_size
scan_interval = scan_interval
scan_num = [int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1
for i in range(num_spatial_dims)]
slices = []
if num_spatial_dims == 3:
for i in range(scan_num[0]):
start_i = i * scan_interval[0]
start_i -= max(start_i + patch_size[0] - image_size[0], 0)
slice_i = slice(start_i, start_i + patch_size[0])
for j in range(scan_num[1]):
start_j = j * scan_interval[1]
start_j -= max(start_j + patch_size[1] - image_size[1], 0)
slice_j = slice(start_j, start_j + patch_size[1])
for k in range(0, scan_num[2]):
start_k = k * scan_interval[2]
start_k -= max(start_k + patch_size[2] - image_size[2], 0)
slice_k = slice(start_k, start_k + patch_size[2])
slices.append((slice_i, slice_j, slice_k))
else:
for i in range(scan_num[0]):
start_i = i * scan_interval[0]
start_i -= max(start_i + patch_size[0] - image_size[0], 0)
slice_i = slice(start_i, start_i + patch_size[0])
for j in range(scan_num[1]):
start_j = j * scan_interval[1]
start_j -= max(start_j + patch_size[1] - image_size[1], 0)
slice_j = slice(start_j, start_j + patch_size[1])
slices.append((slice_i, slice_j))
return slices
def sliding_window_inference(inputs, roi_size, sw_batch_size, predictor):
"""Use SlidingWindow method to execute inference.
Args:
inputs (torch Tensor): input image to be processed (assuming NCHW[D])
roi_size (list, tuple): the window size to execute SlidingWindow inference.
sw_batch_size (int): the batch size to run window slices.
predictor (Callable): given input tensor `patch_data` in shape NCHW[D], `predictor(patch_data)`
should return a prediction with the same spatial shape and batch_size, i.e. NMHW[D];
where HW[D] represents the patch spatial size, M is the number of output channels, N is `sw_batch_size`.
Note:
must be channel first, support both 2D and 3D.
input data must have batch dim.
execute on 1 image/per inference, run a batch of window slices of 1 input image.
"""
num_spatial_dims = len(inputs.shape) - 2
assert len(roi_size) == num_spatial_dims, 'roi_size {} does not match input dims.'.format(roi_size)
# determine image spatial size and batch size
# Note: all input images must have the same image size and batch size
image_size = list(inputs.shape[2:])
batch_size = inputs.shape[0]
# TODO: Enable batch sizes > 1 in future
if batch_size > 1:
raise NotImplementedError
original_image_size = [image_size[i] for i in range(num_spatial_dims)]
# in case that image size is smaller than roi size
image_size = tuple(max(image_size[i], roi_size[i]) for i in range(num_spatial_dims))
pad_size = [i for k in range(len(inputs.shape) - 1, 1, -1) for i in (0, max(roi_size[k - 2] - inputs.shape[k], 0))]
inputs = F.pad(inputs, pad=pad_size, mode='constant', value=0,data_format="NDHWC")
# TODO: interval from user's specification
scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims)
# Store all slices in list
slices = dense_patch_slices(image_size, roi_size, scan_interval)
slice_batches = []
for slice_index in range(0, len(slices), sw_batch_size):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
input_slices = []
for curr_index in slice_index_range:
if num_spatial_dims == 3:
slice_i, slice_j, slice_k = slices[curr_index]
input_slices.append(inputs[0, :, slice_i, slice_j, slice_k])
else:
slice_i, slice_j = slices[curr_index]
input_slices.append(inputs[0, :, slice_i, slice_j])
slice_batches.append(torch.stack(input_slices))
# Perform predictions
output_rois = list()
for data in slice_batches:
seg_prob = predictor(data) # batched patch segmentation
output_rois.append(seg_prob[0].numpy())
# stitching output image
output_classes = output_rois[0].shape[1]
output_shape = [batch_size, output_classes] + list(image_size)
# allocate memory to store the full output and the count for overlapping parts
output_image = torch.zeros(output_shape, dtype=torch.float32).numpy()
count_map = torch.zeros(output_shape, dtype=torch.float32).numpy()
for window_id, slice_index in enumerate(range(0, len(slices), sw_batch_size)):
slice_index_range = range(slice_index, min(slice_index + sw_batch_size, len(slices)))
# store the result in the proper location of the full output
for curr_index in slice_index_range:
if num_spatial_dims == 3:
slice_i, slice_j, slice_k = slices[curr_index]
ors=output_rois[window_id][curr_index - slice_index, :]
output_image[0, :, slice_i, slice_j, slice_k] += ors
count_map[0, :, slice_i, slice_j, slice_k] += 1.
else:
slice_i, slice_j = slices[curr_index]
output_image[0, :, slice_i, slice_j] += output_rois[window_id][curr_index - slice_index, :]
count_map[0, :, slice_i, slice_j] += 1.
# account for any overlapping sections
output_image /= count_map
output_image=paddle.to_tensor(output_image)
if num_spatial_dims == 3:
return (output_image[..., :original_image_size[0], :original_image_size[1], :original_image_size[2]],)
return (output_image[..., :original_image_size[0], :original_image_size[1]] ,) # 2D
def _get_scan_interval(image_size, roi_size, num_spatial_dims):
assert (len(image_size) == num_spatial_dims), 'image coord different from spatial dims.'
assert (len(roi_size) == num_spatial_dims), 'roi coord different from spatial dims.'
scan_interval = [1 for _ in range(num_spatial_dims)]
for i in range(num_spatial_dims):
if roi_size[i] == image_size[i]:
scan_interval[i] = int(roi_size[i])
else:
# this means that it's r-16 (if r>=64) and r*0.75 (if r<=64)
scan_interval[i] = int(max(roi_size[i] - 16, roi_size[i] * 0.75))
return tuple(scan_interval) | 606 | 0 | 23 |
4db6da118a90fca9dc471472d096300c298fbc86 | 3,469 | py | Python | examples/adspygoogle/adwords/v201306/campaign_management/add_location_extension_override.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/adwords/v201306/campaign_management/add_location_extension_override.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/adwords/v201306/campaign_management/add_location_extension_override.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | 2 | 2020-04-02T19:00:31.000Z | 2020-08-06T03:28:38.000Z | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds an ad extension override to a given campaign. To get
campaigns, run get_campaigns.py.
Tags: GeoLocationService.get, AdExtensionOverrideService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_id = 'INSERT_AD_GROUP_AD_ID_HERE'
ad_extension_id = 'INSERT_AD_EXTENSION_ID_HERE'
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_id, ad_extension_id)
| 32.726415 | 80 | 0.595272 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds an ad extension override to a given campaign. To get
campaigns, run get_campaigns.py.
Tags: GeoLocationService.get, AdExtensionOverrideService.mutate
Api: AdWordsOnly
"""
__author__ = 'api.kwinter@gmail.com (Kevin Winter)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import AdWordsClient
ad_id = 'INSERT_AD_GROUP_AD_ID_HERE'
ad_extension_id = 'INSERT_AD_EXTENSION_ID_HERE'
def main(client, ad_id, ad_extension_id):
# Initialize appropriate service.
geo_location_service = client.GetGeoLocationService(version='v201306')
ad_extension_override_service = client.GetAdExtensionOverrideService(
version='v201306')
# Construct selector and get geo location info for a given address.
selector = {
'addresses': [
{
'streetAddress': '1600 Amphitheatre Parkway',
'cityName': 'Mountain View',
'provinceCode': 'US-CA',
'provinceName': 'California',
'postalCode': '94043',
'countryCode': 'US'
}
]
}
geo_location = geo_location_service.Get(selector)[0]
# Construct operations and add ad extension override.
operations = [
{
'operator': 'ADD',
'operand': {
'adId': ad_id,
'adExtension': {
'xsi_type': 'LocationExtension',
'id': ad_extension_id,
'address': geo_location['address'],
'geoPoint': geo_location['geoPoint'],
'encodedLocation': geo_location['encodedLocation'],
'source': 'ADWORDS_FRONTEND',
# Optional fields.
'companyName': 'ACME Inc.',
'phoneNumber': '(650) 253-0000'
# 'iconMediaId': '...',
# 'imageMediaId': '...'
},
# Optional fields.
'overrideInfo': {
'LocationOverrideInfo': {
'radius': '5',
'radiusUnits': 'MILES'
}
}
}
}
]
ad_extensions = ad_extension_override_service.Mutate(operations)[0]
# Display results.
for ad_extension in ad_extensions['value']:
print ('Ad extension override with id \'%s\' for ad with id \'%s\' was '
'added.' % (ad_extension['adExtension']['id'], ad_extension['adId']))
print
print ('Usage: %s units, %s operations' % (client.GetUnits(),
client.GetOperations()))
if __name__ == '__main__':
# Initialize client object.
client = AdWordsClient(path=os.path.join('..', '..', '..', '..', '..'))
main(client, ad_id, ad_extension_id)
| 2,119 | 0 | 23 |
ccfc7c0b942763e165b178211e1cc550da2d0c4b | 10,401 | py | Python | escnn/group/irrep.py | QUVA-Lab/escnn | 59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882 | [
"BSD-3-Clause"
] | 4 | 2022-03-16T22:51:39.000Z | 2022-03-18T18:45:49.000Z | escnn/group/irrep.py | QUVA-Lab/escnn | 59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882 | [
"BSD-3-Clause"
] | null | null | null | escnn/group/irrep.py | QUVA-Lab/escnn | 59ed6b96f61f8616f87b3f25aa2f8abdb6f1a882 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
import escnn.group
from escnn.group import Representation, GroupElement, Group
from escnn.group._numerical import decompose_representation_finitegroup
from escnn.group._numerical import decompose_representation_general
from typing import Callable, Any, List, Union, Dict, Tuple, Type
import numpy as np
__all__ = [
"IrreducibleRepresentation",
"build_irrep_from_generators",
"generate_irrep_matrices_from_generators",
"restrict_irrep"
]
from joblib import Memory
# import os
# cache = Memory(os.path.join(os.path.dirname(__file__), '_jl_restricted_irreps'), verbose=2)
from escnn.group import __cache_path__
cache = Memory(__cache_path__, verbose=0)
@cache.cache
def restrict_irrep(irrep: IrreducibleRepresentation, id) -> Tuple[np.matrix, List[Tuple[str, int]]]:
r"""
Restrict the input `irrep` to the subgroup identified by `id`.
"""
group_keys = irrep.group._keys
id = irrep.group._encode_subgroup_id_pickleable(id)
return _restrict_irrep(irrep.id, id, irrep.group.__class__.__name__, **group_keys)
| 36.114583 | 199 | 0.52418 | from __future__ import annotations
import escnn.group
from escnn.group import Representation, GroupElement, Group
from escnn.group._numerical import decompose_representation_finitegroup
from escnn.group._numerical import decompose_representation_general
from typing import Callable, Any, List, Union, Dict, Tuple, Type
import numpy as np
__all__ = [
"IrreducibleRepresentation",
"build_irrep_from_generators",
"generate_irrep_matrices_from_generators",
"restrict_irrep"
]
class IrreducibleRepresentation(Representation):
def __init__(self,
group: escnn.group.Group,
id: Tuple,
name: str,
representation: Union[Dict[escnn.group.GroupElement, np.ndarray], Callable[[Any], np.ndarray]],
size: int,
type: str,
supported_nonlinearities: List[str],
character: Union[Dict[escnn.group.GroupElement, float], Callable[[Any], float]] = None,
**kwargs
):
"""
Describes an "*irreducible representation*" (*irrep*).
It is a subclass of a :class:`~escnn.group.Representation`.
Irreducible representations are the building blocks into which any other representation decomposes under a
change of basis.
Indeed, any :class:`~escnn.group.Representation` is internally decomposed into a direct sum of irreps.
Args:
group (Group): the group which is being represented
id (tuple): args to generate this irrep using ``group.irrep(*id)``
name (str): an identification name for this representation
representation (dict or callable): a callable implementing this representation or a dict mapping
each group element to its representation.
size (int): the size of the vector space where this representation is defined (i.e. the size of the matrices)
type (str): type of the irrep. It needs to be one of `R`, `C` or `H`, which represent respectively
real, complex and quaternionic types.
NOTE: this parameter substitutes the old `sum_of_squares_constituents` from *e2cnn*.
supported_nonlinearities (list): list of nonlinearitiy types supported by this representation.
character (callable or dict, optional): a callable returning the character of this representation for an
input element or a dict mapping each group element to its character.
**kwargs: custom attributes the user can set and, then, access from the dictionary
in :attr:`escnn.group.Representation.attributes`
Attributes:
sum_of_squares_constituents (int): the sum of the squares of the multiplicities of pairwise distinct
irreducible constituents of the character of this representation over a non-splitting field (see
`Character Orthogonality Theorem <https://groupprops.subwiki.org/wiki/Character_orthogonality_theorem#Statement_over_general_fields_in_terms_of_inner_product_of_class_functions>`_
over general fields).
This attribute is fully determined by the irrep's `type` as:
+----------+---------------------------------+
| `type` | `sum_of_squares_constituents` |
+==========+=================================+
| 'R' | `1` |
+----------+---------------------------------+
| 'C' | `2` |
+----------+---------------------------------+
| 'H' | `4` |
+----------+---------------------------------+
"""
assert type in {'R', 'C', 'H'}
if type == 'C':
assert size % 2 == 0
elif type == 'H':
assert size % 4 == 0
super(IrreducibleRepresentation, self).__init__(group,
name,
[id],
np.eye(size),
supported_nonlinearities,
representation=representation,
character=character,
**kwargs)
assert isinstance(id, tuple)
self.id = id
self.irreducible = True
self.type = type
if self.type == 'R':
self.sum_of_squares_constituents = 1
elif self.type == 'C':
self.sum_of_squares_constituents = 2
elif self.type == 'H':
self.sum_of_squares_constituents = 4
else:
raise ValueError()
def endomorphism_basis(self) -> np.ndarray:
if self.type == 'R':
return np.eye(self.size).reshape(1, self.size, self.size)
elif self.type == 'C':
basis = np.stack([
np.eye(2),
np.diag([1., -1.])[::-1]
], axis=0)
return np.kron(basis, np.eye(self.size // 2))
elif self.type == 'H':
basis = np.stack([
np.eye(4),
np.diag([1., -1., 1., -1.])[::-1],
np.array([
[ 0., 0., -1., 0.],
[ 0., 0., 0., -1.],
[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
]),
np.array([
[0., -1., 0., 0.],
[1., 0., 0., 0.],
[0., 0., 0., 1.],
[0., 0., -1., 0.],
]),
], axis=0)
return np.kron(basis, np.eye(self.size // 4))
else:
raise ValueError()
def build_irrep_from_generators(
group: escnn.group.Group,
generators: List[Tuple[escnn.group.GroupElement, np.ndarray]],
id: Tuple,
name: str,
type: str,
supported_nonlinearities: List[str],
**kwargs
) -> IrreducibleRepresentation:
rho = generate_irrep_matrices_from_generators(group, generators)
d = generators[0][1].shape[0]
return IrreducibleRepresentation(
group,
id,
name,
rho,
d,
type,
supported_nonlinearities,
**kwargs
)
def generate_irrep_matrices_from_generators(
group: escnn.group.Group,
generators: List[Tuple[escnn.group.GroupElement, np.ndarray]],
) -> List[np.ndarray]:
assert group.order() > 0
d = generators[0][1].shape[0]
for g, rho_g in generators:
assert group == g.group
assert rho_g.shape == (d, d)
elements = set()
added = set()
identity = group.identity
added.add(identity)
elements.add(identity)
rho = {
g: rho_g
for g, rho_g in generators
}
rho[identity] = np.eye(d)
generators = [g for g, _ in generators]
while len(added) > 0:
new = set()
for g in generators:
for e in added:
if g @ e not in rho:
rho[g @ e] = rho[g] @ rho[e]
if ~g @ e not in rho:
rho[~g @ e] = rho[g].T @ rho[e]
new |= {g @ e, ~g @ e}
added = new - elements
elements |= added
assert len(elements) == group.order(), 'Error! The set of generators passed does not generate the whole group'
for a in elements:
assert ~a in elements
assert np.allclose(
rho[~a],
rho[a].T
)
for b in elements:
assert a @ b in elements
assert np.allclose(
rho[a] @ rho[b],
rho[a @ b]
)
return rho
from joblib import Memory
# import os
# cache = Memory(os.path.join(os.path.dirname(__file__), '_jl_restricted_irreps'), verbose=2)
from escnn.group import __cache_path__
cache = Memory(__cache_path__, verbose=0)
@cache.cache
def _restrict_irrep(irrep_id: Tuple, id, group_class: str, **group_keys) -> Tuple[np.matrix, List[Tuple[Tuple, int]]]:
group = escnn.group.groups_dict[group_class]._generator(**group_keys)
irrep = group.irrep(*irrep_id)
id = irrep.group._decode_subgroup_id_pickleable(id)
subgroup, parent, child = group.subgroup(id)
if subgroup.order() == 1:
# if the subgroup is the trivial group, just return the identity cob and the list of trivial reprs
return np.eye(irrep.size), [subgroup.trivial_representation.id]*irrep.size
if subgroup.order() > 1:
# if it is a finite group, we can sample all the element and use the precise method based on Character theory
representation = {
g: irrep(parent(g)) for g in subgroup.elements
}
# to solve the Sylvester equation and find the change of basis matrix, it is sufficient to sample
# the generators of the subgroup
change_of_basis, multiplicities = decompose_representation_finitegroup(
representation,
subgroup,
)
else:
# if the group is not finite, we rely on the numerical method which is based on some samples of the group
representation = lambda g: irrep(parent(g))
change_of_basis, multiplicities = decompose_representation_general(
representation,
subgroup,
)
irreps = []
for irr, m in multiplicities:
irreps += [irr]*m
return change_of_basis, irreps
def restrict_irrep(irrep: IrreducibleRepresentation, id) -> Tuple[np.matrix, List[Tuple[str, int]]]:
r"""
Restrict the input `irrep` to the subgroup identified by `id`.
"""
group_keys = irrep.group._keys
id = irrep.group._encode_subgroup_id_pickleable(id)
return _restrict_irrep(irrep.id, id, irrep.group.__class__.__name__, **group_keys)
| 4,593 | 4,620 | 91 |
60545ecac254eb41fc5a27c74d5339601d92c65d | 6,018 | py | Python | src/pyams_file_views/widget/__init__.py | Py-AMS/pyams-file-views | f5ed508ca62d1b39f8387da44f601fca1bf9120a | [
"ZPL-2.1"
] | null | null | null | src/pyams_file_views/widget/__init__.py | Py-AMS/pyams-file-views | f5ed508ca62d1b39f8387da44f601fca1bf9120a | [
"ZPL-2.1"
] | null | null | null | src/pyams_file_views/widget/__init__.py | Py-AMS/pyams-file-views | f5ed508ca62d1b39f8387da44f601fca1bf9120a | [
"ZPL-2.1"
] | null | null | null | #
# Copyright (c) 2015-2020 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_file_views.widget module
This module defines data converters and and form widgets which are required to manage files
and images.
"""
import os.path
from cgi import FieldStorage
from datetime import datetime
from pyramid.interfaces import IView
from zope.component import queryMultiAdapter
from zope.dublincore.interfaces import IZopeDublinCore
from zope.interface import implementer_only
from pyams_file.file import EXTENSIONS_THUMBNAILS
from pyams_file.interfaces.thumbnail import IThumbnails
from pyams_file.schema import IFileField, IMediaField
from pyams_form.browser.file import FileWidget as FileWidgetBase
from pyams_form.converter import BaseDataConverter
from pyams_form.interfaces import DISPLAY_MODE, IDataConverter, INPUT_MODE
from pyams_form.interfaces.widget import IFieldWidget, IFileWidget, IMediaFileWidget
from pyams_form.template import widget_template_config
from pyams_form.util import to_bytes
from pyams_form.widget import FieldWidget
from pyams_layer.interfaces import IPyAMSLayer
from pyams_utils.adapter import adapter_config
from pyams_utils.interfaces.form import NOT_CHANGED, TO_BE_DELETED
from pyams_utils.size import get_human_size
from pyams_utils.url import absolute_url
__docformat__ = 'restructuredtext'
@adapter_config(required=(IFileField, IFileWidget), provides=IDataConverter)
class FileUploadDataConverter(BaseDataConverter):
"""File upload data converter"""
@widget_template_config(mode=INPUT_MODE,
template='templates/file-input.pt', layer=IPyAMSLayer)
@widget_template_config(mode=DISPLAY_MODE,
template='templates/file-display.pt', layer=IPyAMSLayer)
@implementer_only(IFileWidget)
class FileWidget(FileWidgetBase):
"""File widget"""
@property
def timestamp(self):
"""Image timestamp getter"""
dc = IZopeDublinCore(self.current_value, None) # pylint: disable=invalid-name
if dc is None:
return datetime.utcnow().timestamp()
return dc.modified.timestamp() # pylint: disable=no-member
@property
def current_value(self):
"""Widget value getter"""
if self.form.ignore_context:
return None
value = self.field.get(self.context)
if isinstance(value, dict):
lang = getattr(self, 'lang', None)
if lang is not None:
value = value.get(lang)
return value
@property
def deletable(self):
"""Widget deletable flag getter"""
if self.required:
return False
if not self.ignore_context:
value = self.current_value
else:
value = self.value
return bool(value)
def get_human_size(self):
"""File human size getter"""
return get_human_size(self.current_value.get_size(), self.request)
def get_thumbnail(self, geometry='128x128'):
"""File thumbnail getter"""
thumbnails = IThumbnails(self.current_value, None)
if thumbnails is not None:
display = thumbnails.get_thumbnail(geometry) # pylint: disable=assignment-from-no-return
if display is not None:
dc = IZopeDublinCore(display, None) # pylint: disable=invalid-name
if dc is None:
timestamp = self.timestamp
else:
timestamp = dc.modified.timestamp() # pylint: disable=no-member
return '{}?_={}'.format(absolute_url(display, self.request),
timestamp)
_name, ext = os.path.splitext(self.current_value.filename)
return '/--static--/pyams_file/img/{}'.format(
EXTENSIONS_THUMBNAILS.get(ext, 'unknown.png'))
def get_thumbnail_target(self):
"""Widget thumbnail target getter"""
value = self.current_value
if value is not None:
view = queryMultiAdapter((value, self.request), IView, name='preview.html')
if view is not None:
return absolute_url(value, self.request, 'preview.html')
return None
@adapter_config(required=(IFileField, IPyAMSLayer), provides=IFieldWidget)
def FileFieldWidget(field, request): # pylint: disable=invalid-name
"""File field widget factory"""
return FieldWidget(field, FileWidget(request))
#
# Medias files widget
#
@widget_template_config(mode=INPUT_MODE,
template='templates/media-input.pt', layer=IPyAMSLayer)
@widget_template_config(mode=DISPLAY_MODE,
template='templates/media-display.pt', layer=IPyAMSLayer)
@implementer_only(IMediaFileWidget)
class MediaFileWidget(FileWidget):
"""Media file widget"""
@adapter_config(required=(IMediaField, IPyAMSLayer), provides=IFieldWidget)
def MediaFileFieldWidget(field, request): # pylint: disable=invalid-name
"""Media file field widget factory"""
return FieldWidget(field, MediaFileWidget(request))
| 37.148148 | 101 | 0.694084 | #
# Copyright (c) 2015-2020 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""PyAMS_file_views.widget module
This module defines data converters and and form widgets which are required to manage files
and images.
"""
import os.path
from cgi import FieldStorage
from datetime import datetime
from pyramid.interfaces import IView
from zope.component import queryMultiAdapter
from zope.dublincore.interfaces import IZopeDublinCore
from zope.interface import implementer_only
from pyams_file.file import EXTENSIONS_THUMBNAILS
from pyams_file.interfaces.thumbnail import IThumbnails
from pyams_file.schema import IFileField, IMediaField
from pyams_form.browser.file import FileWidget as FileWidgetBase
from pyams_form.converter import BaseDataConverter
from pyams_form.interfaces import DISPLAY_MODE, IDataConverter, INPUT_MODE
from pyams_form.interfaces.widget import IFieldWidget, IFileWidget, IMediaFileWidget
from pyams_form.template import widget_template_config
from pyams_form.util import to_bytes
from pyams_form.widget import FieldWidget
from pyams_layer.interfaces import IPyAMSLayer
from pyams_utils.adapter import adapter_config
from pyams_utils.interfaces.form import NOT_CHANGED, TO_BE_DELETED
from pyams_utils.size import get_human_size
from pyams_utils.url import absolute_url
__docformat__ = 'restructuredtext'
@adapter_config(required=(IFileField, IFileWidget), provides=IDataConverter)
class FileUploadDataConverter(BaseDataConverter):
"""File upload data converter"""
def to_widget_value(self, value):
return value
def to_field_value(self, value):
deleted_field_name = '{}__deleted'.format(self.widget.name)
deleted = self.widget.request.params.get(deleted_field_name)
if deleted:
return TO_BE_DELETED
if (value is None) or (value is NOT_CHANGED) or (value == ''):
return NOT_CHANGED
if isinstance(value, FieldStorage):
return value.filename, value.file
if isinstance(value, tuple):
return value
return to_bytes(value)
@widget_template_config(mode=INPUT_MODE,
template='templates/file-input.pt', layer=IPyAMSLayer)
@widget_template_config(mode=DISPLAY_MODE,
template='templates/file-display.pt', layer=IPyAMSLayer)
@implementer_only(IFileWidget)
class FileWidget(FileWidgetBase):
"""File widget"""
@property
def timestamp(self):
"""Image timestamp getter"""
dc = IZopeDublinCore(self.current_value, None) # pylint: disable=invalid-name
if dc is None:
return datetime.utcnow().timestamp()
return dc.modified.timestamp() # pylint: disable=no-member
@property
def current_value(self):
"""Widget value getter"""
if self.form.ignore_context:
return None
value = self.field.get(self.context)
if isinstance(value, dict):
lang = getattr(self, 'lang', None)
if lang is not None:
value = value.get(lang)
return value
@property
def deletable(self):
"""Widget deletable flag getter"""
if self.required:
return False
if not self.ignore_context:
value = self.current_value
else:
value = self.value
return bool(value)
def get_human_size(self):
"""File human size getter"""
return get_human_size(self.current_value.get_size(), self.request)
def get_thumbnail(self, geometry='128x128'):
"""File thumbnail getter"""
thumbnails = IThumbnails(self.current_value, None)
if thumbnails is not None:
display = thumbnails.get_thumbnail(geometry) # pylint: disable=assignment-from-no-return
if display is not None:
dc = IZopeDublinCore(display, None) # pylint: disable=invalid-name
if dc is None:
timestamp = self.timestamp
else:
timestamp = dc.modified.timestamp() # pylint: disable=no-member
return '{}?_={}'.format(absolute_url(display, self.request),
timestamp)
_name, ext = os.path.splitext(self.current_value.filename)
return '/--static--/pyams_file/img/{}'.format(
EXTENSIONS_THUMBNAILS.get(ext, 'unknown.png'))
def get_thumbnail_target(self):
"""Widget thumbnail target getter"""
value = self.current_value
if value is not None:
view = queryMultiAdapter((value, self.request), IView, name='preview.html')
if view is not None:
return absolute_url(value, self.request, 'preview.html')
return None
@adapter_config(required=(IFileField, IPyAMSLayer), provides=IFieldWidget)
def FileFieldWidget(field, request): # pylint: disable=invalid-name
"""File field widget factory"""
return FieldWidget(field, FileWidget(request))
#
# Medias files widget
#
@widget_template_config(mode=INPUT_MODE,
template='templates/media-input.pt', layer=IPyAMSLayer)
@widget_template_config(mode=DISPLAY_MODE,
template='templates/media-display.pt', layer=IPyAMSLayer)
@implementer_only(IMediaFileWidget)
class MediaFileWidget(FileWidget):
"""Media file widget"""
@adapter_config(required=(IMediaField, IPyAMSLayer), provides=IFieldWidget)
def MediaFileFieldWidget(field, request): # pylint: disable=invalid-name
"""Media file field widget factory"""
return FieldWidget(field, MediaFileWidget(request))
| 519 | 0 | 54 |
f72e7997ae61489068cbd806a984a116eb6dbe1f | 147 | py | Python | chap12.py | maciejkos/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | 1 | 2022-01-04T12:54:18.000Z | 2022-01-04T12:54:18.000Z | chap12.py | Dicaromonroy/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | null | null | null | chap12.py | Dicaromonroy/ModSimPy | fe80a994689dafd282c3d479b19c90c34c590eb5 | [
"MIT"
] | null | null | null | from modsim import *
| 18.375 | 41 | 0.687075 | from modsim import *
def calc_total_infected(results, system):
s_0 = results.S[0]
s_end = results.S[system.t_end]
return s_0 - s_end
| 102 | 0 | 23 |
d70140e749c3602a3fd0881b30ab9d600d5da846 | 7,892 | py | Python | python/comp_coreextractor-1.74/comp_coreextractor/webentity.py | dataesr/scanr-backend | 39681be69b9a96b4a07b9410754c897cd5b65c24 | [
"MIT"
] | null | null | null | python/comp_coreextractor-1.74/comp_coreextractor/webentity.py | dataesr/scanr-backend | 39681be69b9a96b4a07b9410754c897cd5b65c24 | [
"MIT"
] | 3 | 2020-06-18T15:06:10.000Z | 2021-05-07T16:29:50.000Z | plugins/comp_coreextractor/comp_coreextractor/webentity.py | reseachalps/Search-Engine | 1cd1e83902119938ffd412394b09dce92d082500 | [
"MIT"
] | null | null | null | import collections
from textmining import normalizer
from statistics import mean, variance
from math import floor
class WebEntity(collections.MutableMapping):
"""
Represents a web entity and all its potential
attributes.
Attributes are accessible as in a dictionary
"""
def __init__(self):
"""
Initializes attributes
"""
self.set_attributes = {"localId", "summary", "email", "url", "phone", "fax", "domain", "contact", "contactform",
"legal", "useterms", "rss", "mobile", "responsive", "capital", "outlinks", "delivery_options", "payment_options"}
self.set_attributes.update(["monitoring", "seo"])
self.list_attributes = {"cms", "ecommerce", "addresses", "basket", "prices", "prices_per_page"}
self.str_attributes = {"description", "metadescription", "country"}
self.social_attributes = {"twitter", "facebook", "linkedin", "viadeo", "googleplus", "instagram", "youtube",
"dailymotion", "vimeo"}
self.dict_attributes = {"ecommerce_meta"}
self.attr = dict()
self.normzer = normalizer.Normalizer()
for a in self.set_attributes:
self.attr[a] = set()
for a in self.list_attributes:
self.attr[a] = list()
for a in self.str_attributes:
self.attr[a] = None
for a in self.social_attributes:
self.attr[a] = {}
for a in self.dict_attributes:
self.attr[a] = {}
self.attributes = self.set_attributes | self.str_attributes | self.list_attributes | self.social_attributes | self.dict_attributes
def export(self):
"""
Export all attributes in a dictionary
which can be rendered in json.
"""
attr = self.attr.copy()
# Json needs different social structure for JBM
for a in self.social_attributes:
social = []
for account in attr[a].values():
social.append(
{"account": account.account, "score": account.score, "profilePictureUrl": account.profile_picture})
attr[a] = social
# Json loader can't manage set objects
for a in self.set_attributes:
if a in ["responsive", "legal", "useterms", "seo", "mobile"]:
if True in attr[a]:
attr[a] = True
else:
attr[a] = False
elif a == "contact":
cts = []
for c in attr[a]:
cts.append(c.to_dict())
attr[a] = cts
elif a == "email":
emails = []
for e in attr[a]:
emails.append({"email": e[0], "generic": not e[1]})
attr[a] = emails
elif a == "rss":
rss = []
for r in attr[a]:
if r[0] is not None and r[1] is not None:
rss.append({"url": r[0], "frequency": r[1]})
attr[a] = rss
elif a == "summary":
attr[a] = attr[a].get_best_words(20, dic=True)
elif type(attr[a]) == set:
attr[a] = list(attr[a])
# Managing addresses
la = []
venues = set()
for addr in attr["addresses"]:
if addr.address not in venues:
a = {"address": addr.address, "zipcode": addr.zipcode, "city": addr.city}
venues.add(addr.address)
la.append(a)
attr["addresses"] = la
return attr
def normalize(self, pages_count):
"""
Normalizes attributes
"""
# Normalizes phone numbers
np = set()
for phone in self.attr["phone"]:
n = self.normzer.normalize_phone_number(phone)
if n is not None:
np.add(n)
# If normalization failed, we do not record phone
else:
pass
self.attr["phone"] = np
# Normalizes fax numbers
nf = set()
for fax in self.attr["fax"]:
f = self.normzer.normalize_phone_number(fax)
if f is not None:
nf.add(f)
# If normalization failed, we do not record fax
else:
pass
self.attr["fax"] = nf
# Normalize CMS found
cms = set()
res = []
for c in self.attr["cms"]:
if c["type"] not in cms:
res.append(c)
cms.add(c["type"])
self.attr["cms"] = res
# Normalize shopping platform found
shop = set()
res = []
for c in self.attr["ecommerce"]:
if c["type"] not in shop:
res.append(c)
shop.add(c["type"])
self.attr["ecommerce"] = res
if pages_count > 0:
baskets = len([x for x in self.attr["basket"] if x is True])
self.attr["ecommerce_meta"]["perc_pages_with_prices"] = self.attr["ecommerce_meta"]["pages_with_prices"] / pages_count
self.attr["ecommerce_meta"]["pages_with_basket"] = baskets
self.attr["ecommerce_meta"]["perc_pages_with_basket"] = baskets / pages_count
self.attr["ecommerce_meta"]["avg_price"] = mean(self.attr["prices"]) if len(self.attr["prices"]) > 0 else None
self.attr["ecommerce_meta"]["variance"] = variance(self.attr["prices"]) if len(self.attr["prices"]) > 1 else None
self.attr["ecommerce_meta"]["avg_prices_per_page"] = mean(self.attr["prices_per_page"]) if len(self.attr["prices"]) > 0 else None
# Computing quartiles
if len(self.attr["prices"]) > 0:
prices = sorted(self.attr["prices"])
tot = len(prices)
median = prices[floor(tot / 2)]
quart1 = prices[floor(tot / 4)]
quart3 = prices[floor(tot / 4 * 3)]
else:
median = quart1 = quart3 = None
self.attr["ecommerce_meta"]["median_price"] = median
self.attr["ecommerce_meta"]["first_quart_price"] = quart1
self.attr["ecommerce_meta"]["third_quart_price"] = quart3
# No pages crawled, values representing volumes must be initialized at 0
else:
for bkey in ["perc_pages_with_prices", "pages_with_basket", "perc_pages_with_basket", "pages_with_prices"]:
self.attr["ecommerce_meta"][bkey] = 0
self.attr["ecommerce_meta"]["payment_options"] = list(self.attr["ecommerce_meta"]["payment_options"])
self.attr["ecommerce_meta"]["delivery_options"] = list(self.attr["ecommerce_meta"]["delivery_options"])
# Remove potentially big fields unnecessary for JBM
del self.attr["prices"]
del self.attr["basket"]
del self.attr["prices_per_page"]
def __getitem__(self, key):
"""
Overrides dict class method
"""
return self.attr[key]
def __setitem__(self, key, value):
"""
Overrides dict class method.
Our dict is read only, no set possible.
"""
if key not in self.attributes:
raise ReadOnlyDictionaryException
else:
self.attr[key] = value
| 33.299578 | 144 | 0.537886 | import collections
from textmining import normalizer
from statistics import mean, variance
from math import floor
class ReadOnlyDictionaryException(Exception):
def __init__(self):
Exception.__init__(self, "Impossible to set a value in this read-only dictionary")
class WebEntity(collections.MutableMapping):
"""
Represents a web entity and all its potential
attributes.
Attributes are accessible as in a dictionary
"""
def __init__(self):
"""
Initializes attributes
"""
self.set_attributes = {"localId", "summary", "email", "url", "phone", "fax", "domain", "contact", "contactform",
"legal", "useterms", "rss", "mobile", "responsive", "capital", "outlinks", "delivery_options", "payment_options"}
self.set_attributes.update(["monitoring", "seo"])
self.list_attributes = {"cms", "ecommerce", "addresses", "basket", "prices", "prices_per_page"}
self.str_attributes = {"description", "metadescription", "country"}
self.social_attributes = {"twitter", "facebook", "linkedin", "viadeo", "googleplus", "instagram", "youtube",
"dailymotion", "vimeo"}
self.dict_attributes = {"ecommerce_meta"}
self.attr = dict()
self.normzer = normalizer.Normalizer()
for a in self.set_attributes:
self.attr[a] = set()
for a in self.list_attributes:
self.attr[a] = list()
for a in self.str_attributes:
self.attr[a] = None
for a in self.social_attributes:
self.attr[a] = {}
for a in self.dict_attributes:
self.attr[a] = {}
self.attributes = self.set_attributes | self.str_attributes | self.list_attributes | self.social_attributes | self.dict_attributes
def export(self):
"""
Export all attributes in a dictionary
which can be rendered in json.
"""
attr = self.attr.copy()
# Json needs different social structure for JBM
for a in self.social_attributes:
social = []
for account in attr[a].values():
social.append(
{"account": account.account, "score": account.score, "profilePictureUrl": account.profile_picture})
attr[a] = social
# Json loader can't manage set objects
for a in self.set_attributes:
if a in ["responsive", "legal", "useterms", "seo", "mobile"]:
if True in attr[a]:
attr[a] = True
else:
attr[a] = False
elif a == "contact":
cts = []
for c in attr[a]:
cts.append(c.to_dict())
attr[a] = cts
elif a == "email":
emails = []
for e in attr[a]:
emails.append({"email": e[0], "generic": not e[1]})
attr[a] = emails
elif a == "rss":
rss = []
for r in attr[a]:
if r[0] is not None and r[1] is not None:
rss.append({"url": r[0], "frequency": r[1]})
attr[a] = rss
elif a == "summary":
attr[a] = attr[a].get_best_words(20, dic=True)
elif type(attr[a]) == set:
attr[a] = list(attr[a])
# Managing addresses
la = []
venues = set()
for addr in attr["addresses"]:
if addr.address not in venues:
a = {"address": addr.address, "zipcode": addr.zipcode, "city": addr.city}
venues.add(addr.address)
la.append(a)
attr["addresses"] = la
return attr
def normalize(self, pages_count):
"""
Normalizes attributes
"""
# Normalizes phone numbers
np = set()
for phone in self.attr["phone"]:
n = self.normzer.normalize_phone_number(phone)
if n is not None:
np.add(n)
# If normalization failed, we do not record phone
else:
pass
self.attr["phone"] = np
# Normalizes fax numbers
nf = set()
for fax in self.attr["fax"]:
f = self.normzer.normalize_phone_number(fax)
if f is not None:
nf.add(f)
# If normalization failed, we do not record fax
else:
pass
self.attr["fax"] = nf
# Normalize CMS found
cms = set()
res = []
for c in self.attr["cms"]:
if c["type"] not in cms:
res.append(c)
cms.add(c["type"])
self.attr["cms"] = res
# Normalize shopping platform found
shop = set()
res = []
for c in self.attr["ecommerce"]:
if c["type"] not in shop:
res.append(c)
shop.add(c["type"])
self.attr["ecommerce"] = res
if pages_count > 0:
baskets = len([x for x in self.attr["basket"] if x is True])
self.attr["ecommerce_meta"]["perc_pages_with_prices"] = self.attr["ecommerce_meta"]["pages_with_prices"] / pages_count
self.attr["ecommerce_meta"]["pages_with_basket"] = baskets
self.attr["ecommerce_meta"]["perc_pages_with_basket"] = baskets / pages_count
self.attr["ecommerce_meta"]["avg_price"] = mean(self.attr["prices"]) if len(self.attr["prices"]) > 0 else None
self.attr["ecommerce_meta"]["variance"] = variance(self.attr["prices"]) if len(self.attr["prices"]) > 1 else None
self.attr["ecommerce_meta"]["avg_prices_per_page"] = mean(self.attr["prices_per_page"]) if len(self.attr["prices"]) > 0 else None
# Computing quartiles
if len(self.attr["prices"]) > 0:
prices = sorted(self.attr["prices"])
tot = len(prices)
median = prices[floor(tot / 2)]
quart1 = prices[floor(tot / 4)]
quart3 = prices[floor(tot / 4 * 3)]
else:
median = quart1 = quart3 = None
self.attr["ecommerce_meta"]["median_price"] = median
self.attr["ecommerce_meta"]["first_quart_price"] = quart1
self.attr["ecommerce_meta"]["third_quart_price"] = quart3
# No pages crawled, values representing volumes must be initialized at 0
else:
for bkey in ["perc_pages_with_prices", "pages_with_basket", "perc_pages_with_basket", "pages_with_prices"]:
self.attr["ecommerce_meta"][bkey] = 0
self.attr["ecommerce_meta"]["payment_options"] = list(self.attr["ecommerce_meta"]["payment_options"])
self.attr["ecommerce_meta"]["delivery_options"] = list(self.attr["ecommerce_meta"]["delivery_options"])
# Remove potentially big fields unnecessary for JBM
del self.attr["prices"]
del self.attr["basket"]
del self.attr["prices_per_page"]
def __getitem__(self, key):
"""
Overrides dict class method
"""
return self.attr[key]
def __setitem__(self, key, value):
"""
Overrides dict class method.
Our dict is read only, no set possible.
"""
if key not in self.attributes:
raise ReadOnlyDictionaryException
else:
self.attr[key] = value
def __delitem__(self, key):
del self.attr[key]
def __iter__(self):
return iter(self.attr)
def __len__(self):
return len(self.attr)
def __str__(self):
s = ""
for a in self.attributes:
if a in self.attr and self.attr[a] is not None:
s += "[%s] %s\n" % (a, str(self.attr[a]))
return s
| 360 | 24 | 157 |
dc4bc736dbb873816bd20c425a93adc0c3bdee0f | 298 | py | Python | Day2/python_network_programming/remote_machine_info.py | nuSapb/basic-python | df6b15e922db7a27a8931143f8dc8ca4fb198b6b | [
"MIT"
] | null | null | null | Day2/python_network_programming/remote_machine_info.py | nuSapb/basic-python | df6b15e922db7a27a8931143f8dc8ca4fb198b6b | [
"MIT"
] | null | null | null | Day2/python_network_programming/remote_machine_info.py | nuSapb/basic-python | df6b15e922db7a27a8931143f8dc8ca4fb198b6b | [
"MIT"
] | null | null | null | import socket
if __name__ == '__main__':
get_remote_machine_info() | 27.090909 | 66 | 0.674497 | import socket
def get_remote_machine_info():
remote_host = 'www.python.org'
try:
print('IP address: %s' %socket.gethostbyname(remote_host))
except socket.error as err_msg:
print("%s: %s" %(remote_host, err_msg))
if __name__ == '__main__':
get_remote_machine_info() | 204 | 0 | 23 |
dcf8949b095d33434aeec30bd6f9cc8d28dd2587 | 10,493 | py | Python | SassifiParser/old/parse_sassifi_fault_injection.py | UFRGS-CAROL/radiation-benchmarks-parsers | a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b | [
"Apache-2.0"
] | null | null | null | SassifiParser/old/parse_sassifi_fault_injection.py | UFRGS-CAROL/radiation-benchmarks-parsers | a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b | [
"Apache-2.0"
] | null | null | null | SassifiParser/old/parse_sassifi_fault_injection.py | UFRGS-CAROL/radiation-benchmarks-parsers | a39844ed3ed511f4f2672bc2e0c7e6f920dc7f2b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import include
import os
import csv
import sys
import re
from collections import Counter
ERROR_MODEL_SIZE = len(include.EM_STR)
INSTRUCTION_SIZE = len(include.ASID_STR)
inst_type="rf"
MAX_LOGS_SIZE=9999999999
# except:
# e = sys.exc_info()[0]
# #write_to_page( "<p>Error: %s</p>" % e )
# print e
if __name__ == "__main__":
parameter = sys.argv[1:]
#()
if len(parameter) < 3:
usage()
else:
print parameter[3]
if parameter[3] != 'caio':
inst_type = (parameter[3] if parameter[3] == 'rf' else 'inst')
parse_csv(parameter[0], parameter[1], (True if parameter[2] == 'cp' else False))
#():
else:
process_daniels_and_caios_log(parameter[0], parameter[1], parameter[2])
| 35.449324 | 141 | 0.62089 | #!/usr/bin/python
import include
import os
import csv
import sys
import re
from collections import Counter
ERROR_MODEL_SIZE = len(include.EM_STR)
INSTRUCTION_SIZE = len(include.ASID_STR)
inst_type="rf"
MAX_LOGS_SIZE=9999999999
def check_crash(log_filename):
log_file = open(log_filename)
regexp = re.compile(r'.*KerTime.*?([0-9.-]+)')
there_is_end = 0
there_is_abort = 0
kernel_time = 0
for line in log_file:
match = regexp.match(line)
if match:
kernel_time = float(match.group(1))
if 'END' in line:
there_is_end = 1
if 'ABORT' in line:
there_is_abort = 1
log_file.close()
crash = 0
if there_is_end == 0:
crash = 1
return (crash, there_is_abort, kernel_time)
def count_frequency(row_to_count):
# row_to_count = [row[string_to_count] for row in reader]
array_size = []
for (k,v) in Counter(row_to_count).iteritems():
array_size.append([k,v])
return array_size
def parse_csv(csv_input, logs_dir, cp):
global INSTRUCTION_SIZE, ERROR_MODEL_SIZE
csvfile = open(csv_input)
reader = csv.DictReader(csvfile)
#count sdcs for each instruction
sdc_inst_count = []
#count sdcs for each error model
sdc_em_count = []
sdcs = 0
#count total faults
total_faults = 0
#total faults per instruction
total_inst_count = []
#per error model
total_em_count = []
#count crashes and abort per instruction
crashes_inst_count = []
abort_inst_count = []
#count crashes and abort per error model
crashes_em_count = []
abort_em_count = []
#count for each injection type the kernel occurrence
sdc_em_count_per_kernel = {}
sdc_inst_count_per_kernel = {}
inj_em_count_per_kernel = {}
inj_inst_count_per_kernel = {}
# kernel_array = count_frequency(reader,"inj_kname")
#kernel time
kern_time = []
total_crashes = 0
total_aborts = 0
print "Parsing " + csv_input
#separate the good data
if cp: os.system("mkdir -p ./good_logs")
for i in range(0,INSTRUCTION_SIZE):
sdc_inst_count.append(0)
total_inst_count.append(0)
crashes_inst_count.append(0)
abort_inst_count.append(0)
for i in range(0,ERROR_MODEL_SIZE):
sdc_em_count.append(0)
total_em_count.append(0)
crashes_em_count.append(0)
abort_em_count.append(0)
# for i in kernel_array:
# kernel = str(i[0])
# sdc_em_count_per_kernel[kernel] = sdc_em_count
max_logs_count = 0
#log_file,has_sdc,inj_kname,inj_kcount, inj_igid, inj_fault_model, inj_inst_id, inj_destination_id, inj_bit_location, finished
for row in reader:
if MAX_LOGS_SIZE == max_logs_count:
break
max_logs_count += 1
#print row['log_file']
# cp all good data to new folder
if cp: os.system("cp "+ logs_dir + "/" + row['log_file'] +" good_logs/")
it_inst_count = 8
if 'inst' in inst_type:
it_inst_count = int(row['inj_igid'])
it_em_count = int(row['inj_fault_model'])
#increase each instrction/error model count to have the final results
if '1' in row['has_sdc']:
sdc_inst_count[it_inst_count] += 1
sdc_em_count[it_em_count] += 1
sdcs += 1
#count em per kernel
if row["inj_kname"] not in sdc_em_count_per_kernel:
sdc_em_count_per_kernel[row["inj_kname"]] = []
for x in range(0, ERROR_MODEL_SIZE):
sdc_em_count_per_kernel[row["inj_kname"]].append(0)
if row["inj_kname"] not in sdc_inst_count_per_kernel:
sdc_inst_count_per_kernel[row["inj_kname"]] = []
for x in range(0, INSTRUCTION_SIZE):
sdc_inst_count_per_kernel[row["inj_kname"]].append(0)
sdc_em_count_per_kernel[row["inj_kname"]][it_em_count] += 1
sdc_inst_count_per_kernel[row["inj_kname"]][it_inst_count] += 1
if row["inj_kname"] not in inj_em_count_per_kernel:
inj_em_count_per_kernel[row["inj_kname"]] = 0
if row["inj_kname"] not in inj_inst_count_per_kernel:
inj_inst_count_per_kernel[row["inj_kname"]] = 0
inj_em_count_per_kernel[row["inj_kname"]] += 1
inj_inst_count_per_kernel[row["inj_kname"]] += 1
#check crash info for each file
(crash, abort, kertime) = check_crash(logs_dir + "/" + row['log_file'])
if crash > 1 or abort > 1:
print 'Some error in the log files'
exit(1)
crashes_inst_count[it_inst_count] += crash
abort_inst_count[it_inst_count] += abort
crashes_em_count[it_em_count] += crash
abort_em_count[it_em_count] += abort
total_crashes += crash
kern_time.append(kertime)
total_faults += 1
#print row['inj_asid'] + " " + row['inj_fault_model']
total_inst_count[it_inst_count] += 1
total_em_count[it_em_count] += 1
csvfile.close();
#---------------------------------------------------------------
#print instruction histogram
csvfile = open('parse_'+csv_input, 'w')
fieldnames = ['instruction', 'sdc_num', 'total_inst_count', 'crashes', 'abort']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(0, INSTRUCTION_SIZE):
writer.writerow({'instruction': include.ASID_STR[i], 'sdc_num': str(sdc_inst_count[i]), 'total_inst_count': str(total_inst_count[i]),
'crashes': str(crashes_inst_count[i]), 'abort': str(abort_inst_count[i])})
writer.writerow({'instruction': '', 'sdc_num': '', 'total_inst_count':''})
writer.writerow({'instruction': 'error_model', 'sdc_num': 'sdc_num', 'total_inst_count':'total_em_count'})
for i in range(0, ERROR_MODEL_SIZE):
writer.writerow({'instruction': include.EM_STR[i], 'sdc_num': str(sdc_em_count[i]), 'total_inst_count': str(total_em_count[i]),
'crashes':str(crashes_em_count[i]), 'abort':str(abort_em_count[i])})
writer.writerow({'instruction': '', 'sdc_num': ''})
writer.writerow({'instruction': 'Total sdcs', 'sdc_num': str(sdcs)})
writer.writerow({'instruction': 'Injected faults', 'sdc_num': str(total_faults)})
# print kern_time
runtime_average = sum(kern_time) / len(kern_time)
writer.writerow({'instruction': 'Average kernel runtime', 'sdc_num': str(runtime_average)})
writer.writerow({'instruction': 'error model', 'sdc_num': 'SDCs', 'total_inst_count':''})
for kernel in inj_em_count_per_kernel:
# kernel = str(i[0])
writer.writerow({'instruction': kernel})
if kernel in sdc_em_count_per_kernel:
err_list = sdc_em_count_per_kernel[kernel]
for j in range(ERROR_MODEL_SIZE):
writer.writerow({'instruction': include.EM_STR[j], 'sdc_num': str(err_list[j])})
writer.writerow({'instruction': '', 'sdc_num': '', 'total_inst_count':''})
writer.writerow({'instruction': 'Instructions', 'sdc_num': 'SDCs', 'total_inst_count':''})
for kernel in inj_inst_count_per_kernel:
writer.writerow({'instruction': kernel})
if kernel in sdc_inst_count_per_kernel:
err_list = sdc_inst_count_per_kernel[kernel]
for j in range(INSTRUCTION_SIZE):
writer.writerow({'instruction': include.ASID_STR[j], 'sdc_num': str(err_list[j])})
writer.writerow({'instruction': '', 'total_inst_count':''})
writer.writerow({'instruction': 'kernel', 'sdc_num': 'injected faults', 'total_inst_count':''})
for kernel in inj_inst_count_per_kernel:
writer.writerow({'instruction': kernel, 'sdc_num': inj_em_count_per_kernel[kernel]})
csvfile.close()
print csv_input + " parsed"
def process_daniels_and_caios_log(csv_input, daniel_csv, is_daniel):
print is_daniel
csvfile = open(csv_input)
reader = csv.DictReader(csvfile)
daniel_input = open(daniel_csv)
# if is_daniel:
# reader_daniel = csv.DictReader(daniel_input, delimiter=';', quoting=csv.QUOTE_NONE)
# # reader_daniel = csv.DictReader(daniel_input)
# else:
reader_daniel = csv.DictReader(daniel_input)
fieldnames = []
for i in reader.fieldnames:
fieldnames.append(i)
for i in reader_daniel.fieldnames:
fieldnames.append(i)
my_lines = 0
daniel_lines = 0
output_csv = open("parsed_dc_" + csv_input, 'w')
writer = csv.DictWriter(output_csv, fieldnames=fieldnames)
writer.writeheader()
first = True
daniel_rows = []
logs_rows = []
for row in reader_daniel:
daniel_rows.append(row)
for row in reader:
logs_rows.append(row)
# print writer.fieldnames
count_logs = 0
d = {}
index_str_log = 'log_file'
has_sdc = 'has_sdc'
if is_daniel == 'd':
log_file_name = 'logFileName'
elif is_daniel == 'c':
log_file_name = 'Logname'
elif is_daniel == 'l':
log_file_name = '_File_name'
if is_daniel != 'none':
for i in logs_rows:
log_file = i[index_str_log]
if '1' in i[has_sdc]:
for j in daniel_rows:
logname = j[log_file_name]
if logname in log_file and log_file not in d.values():
z = j.copy()
z.update(i)
d = z.copy()
writer.writerow(z)
count_logs += 1
# print d
break
print "Parsed " + str(count_logs)
csvfile.close();
daniel_input.close()
output_csv.close()
# except:
# e = sys.exc_info()[0]
# #write_to_page( "<p>Error: %s</p>" % e )
# print e
def usage():
print "For parse raw data <csv_input> <logs_dir> <cp | none> <caio | none>"
print "For merge and parse Daniel's log <csv_input> <daniel_csv> <is_daniel> <c or d or l | none>"
if __name__ == "__main__":
parameter = sys.argv[1:]
#()
if len(parameter) < 3:
usage()
else:
print parameter[3]
if parameter[3] != 'caio':
inst_type = (parameter[3] if parameter[3] == 'rf' else 'inst')
parse_csv(parameter[0], parameter[1], (True if parameter[2] == 'cp' else False))
#():
else:
process_daniels_and_caios_log(parameter[0], parameter[1], parameter[2])
| 9,582 | 0 | 114 |
9e26b5dfa815bc12723da707b2340b11e565861e | 39,683 | py | Python | tests/conftest.py | neuro-inc/neuro-admin-client | b67702912c92f9caf579e5c3b3869fe567064164 | [
"Apache-2.0"
] | null | null | null | tests/conftest.py | neuro-inc/neuro-admin-client | b67702912c92f9caf579e5c3b3869fe567064164 | [
"Apache-2.0"
] | 41 | 2021-09-02T12:36:24.000Z | 2022-02-21T16:44:18.000Z | tests/conftest.py | neuro-inc/neuro-admin-client | b67702912c92f9caf579e5c3b3869fe567064164 | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import asyncio
import datetime
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from dataclasses import dataclass, field, replace
from decimal import Decimal
from typing import Any
import aiohttp
import aiohttp.web
import pytest
from yarl import URL
from neuro_admin_client import (
Balance,
Cluster,
ClusterUser,
ClusterUserRoleType,
Org,
OrgCluster,
OrgUser,
OrgUserRoleType,
Quota,
User,
)
@dataclass
@dataclass(frozen=True)
@dataclass()
@pytest.fixture
@pytest.fixture
@asynccontextmanager
| 37.901624 | 88 | 0.560265 | from __future__ import annotations
import asyncio
import datetime
from collections.abc import AsyncIterator
from contextlib import asynccontextmanager
from dataclasses import dataclass, field, replace
from decimal import Decimal
from typing import Any
import aiohttp
import aiohttp.web
import pytest
from yarl import URL
from neuro_admin_client import (
Balance,
Cluster,
ClusterUser,
ClusterUserRoleType,
Org,
OrgCluster,
OrgUser,
OrgUserRoleType,
Quota,
User,
)
@dataclass
class ApiAddress:
host: str
port: int
@dataclass(frozen=True)
class Debt:
cluster_name: str
user_name: str
credits: Decimal
def _parse_bool(value: str) -> bool:
value = value.lower()
return value in ("1", "true", "yes")
@dataclass()
class AdminServer:
address: ApiAddress | None = None
users: list[User] = field(default_factory=list)
clusters: list[Cluster] = field(default_factory=list)
orgs: list[Org] = field(default_factory=list)
cluster_users: list[ClusterUser] = field(default_factory=list)
org_clusters: list[OrgCluster] = field(default_factory=list)
org_users: list[OrgUser] = field(default_factory=list)
debts: list[Debt] = field(default_factory=list)
@property
def url(self) -> URL:
assert self.address
return URL(f"http://{self.address.host}:{self.address.port}/api/v1/")
def _serialize_user(self, user: User) -> dict[str, Any]:
return {
"name": user.name,
"email": user.email,
"first_name": user.first_name,
"last_name": user.last_name,
"created_at": user.created_at.isoformat() if user.created_at else None,
}
def _serialize_user_cluster(self, user: ClusterUser) -> dict[str, Any]:
res = self._serialize_cluster_user(user, False)
res.pop("user_name")
res["cluster_name"] = user.cluster_name
return res
async def handle_user_post(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
payload = await request.json()
new_user = User(
name=payload["name"],
email=payload["email"],
first_name=payload["first_name"],
last_name=payload["last_name"],
created_at=datetime.datetime.now(datetime.timezone.utc),
)
self.users.append(new_user)
return aiohttp.web.json_response(self._serialize_user(new_user))
async def handle_user_get(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
user_name = request.match_info["uname"]
for user in self.users:
if user.name == user_name:
payload = self._serialize_user(user)
if "clusters" in request.query.getall("include", []):
payload["clusters"] = [
self._serialize_user_cluster(cluster_user)
for cluster_user in self.cluster_users
if cluster_user.user_name == user_name
]
return aiohttp.web.json_response(payload)
raise aiohttp.web.HTTPNotFound
async def handle_user_list(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
resp = [self._serialize_user(user) for user in self.users]
return aiohttp.web.json_response(resp)
def _serialize_org(self, org: Org) -> dict[str, Any]:
return {
"name": org.name,
}
async def handle_org_post(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
payload = await request.json()
new_org = Org(
name=payload["name"],
)
self.orgs.append(new_org)
return aiohttp.web.json_response(self._serialize_org(new_org))
async def handle_org_get(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
org_name = request.match_info["oname"]
for org in self.orgs:
if org.name == org_name:
return aiohttp.web.json_response(self._serialize_org(org))
raise aiohttp.web.HTTPNotFound
async def handle_org_delete(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
org_name = request.match_info["oname"]
for idx, org in enumerate(self.orgs):
if org.name == org_name:
del self.orgs[idx]
return aiohttp.web.json_response(self._serialize_org(org))
raise aiohttp.web.HTTPNotFound
async def handle_org_list(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
resp = [self._serialize_org(org) for org in self.orgs]
return aiohttp.web.json_response(resp)
def _serialize_cluster(self, cluster: Cluster) -> dict[str, Any]:
resp: dict[str, Any] = {
"name": cluster.name,
"default_quota": {},
}
if cluster.default_credits:
resp["default_credits"] = str(cluster.default_credits)
if cluster.default_quota.total_running_jobs:
resp["default_quota"][
"total_running_jobs"
] = cluster.default_quota.total_running_jobs
return resp
def _int_or_none(self, value: str | None) -> int | None:
if value:
return int(value)
return None
async def handle_cluster_post(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
payload = await request.json()
default_credits_raw = payload.get("default_credits")
default_quota_raw = payload.get("default_quota", {})
new_cluster = Cluster(
name=payload["name"],
default_credits=Decimal(default_credits_raw)
if default_credits_raw
else None,
default_quota=Quota(
total_running_jobs=self._int_or_none(
default_quota_raw.get("total_running_jobs")
)
),
)
self.clusters.append(new_cluster)
return aiohttp.web.json_response(self._serialize_cluster(new_cluster))
async def handle_cluster_put(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
payload = await request.json()
assert cluster_name == payload["name"]
default_credits_raw = payload.get("default_credits")
default_quota_raw = payload.get("default_quota", {})
changed_cluster = Cluster(
name=payload["name"],
default_credits=Decimal(default_credits_raw)
if default_credits_raw
else None,
default_quota=Quota(
total_running_jobs=self._int_or_none(
default_quota_raw.get("total_running_jobs")
)
),
)
self.clusters = [
cluster for cluster in self.clusters if cluster.name != changed_cluster.name
]
self.clusters.append(changed_cluster)
return aiohttp.web.json_response(self._serialize_cluster(changed_cluster))
async def handle_cluster_get(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
for cluster in self.clusters:
if cluster.name == cluster_name:
return aiohttp.web.json_response(self._serialize_cluster(cluster))
raise aiohttp.web.HTTPNotFound
async def handle_cluster_list(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
resp = [self._serialize_cluster(cluster) for cluster in self.clusters]
return aiohttp.web.json_response(resp)
async def handle_cluster_delete(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
for idx, cluster in enumerate(self.clusters):
if cluster.name == cluster_name:
del self.clusters[idx]
return aiohttp.web.json_response(self._serialize_cluster(cluster))
raise aiohttp.web.HTTPNotFound
def _serialize_cluster_user(
self, cluster_user: ClusterUser, with_info: bool
) -> dict[str, Any]:
res: dict[str, Any] = {
"user_name": cluster_user.user_name,
"role": cluster_user.role.value,
"org_name": cluster_user.org_name,
"quota": {},
"balance": {
"spent_credits": str(cluster_user.balance.spent_credits),
},
}
if cluster_user.quota.total_running_jobs is not None:
res["quota"]["total_running_jobs"] = cluster_user.quota.total_running_jobs
if cluster_user.balance.credits is not None:
res["balance"]["credits"] = str(cluster_user.balance.credits)
if with_info:
user = next(
user for user in self.users if user.name == cluster_user.user_name
)
res["user_info"] = self._serialize_user(user)
res["user_info"].pop("name")
return res
async def handle_cluster_user_post(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
payload = await request.json()
credits_raw = payload["balance"].get("credits")
spend_credits_raw = payload["balance"].get("spend_credits_raw")
new_cluster_user = ClusterUser(
cluster_name=cluster_name,
user_name=payload["user_name"],
role=ClusterUserRoleType(payload["role"]),
org_name=payload.get("org_name"),
quota=Quota(total_running_jobs=payload["quota"].get("total_running_jobs")),
balance=Balance(
credits=Decimal(credits_raw) if credits_raw else None,
spent_credits=Decimal(spend_credits_raw)
if spend_credits_raw
else Decimal(0),
),
)
self.cluster_users.append(new_cluster_user)
return aiohttp.web.json_response(
self._serialize_cluster_user(
new_cluster_user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
async def handle_cluster_user_put(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
user_name = request.match_info["uname"]
org_name = request.match_info.get("oname")
payload = await request.json()
credits_raw = payload["balance"].get("credits")
spend_credits_raw = payload["balance"].get("spend_credits_raw")
assert user_name == payload["user_name"]
assert org_name == payload.get("org_name")
new_cluster_user = ClusterUser(
cluster_name=cluster_name,
user_name=payload["user_name"],
role=ClusterUserRoleType(payload["role"]),
org_name=payload.get("org_name"),
quota=Quota(total_running_jobs=payload["quota"].get("total_running_jobs")),
balance=Balance(
credits=Decimal(credits_raw) if credits_raw else None,
spent_credits=Decimal(spend_credits_raw)
if spend_credits_raw
else Decimal(0),
),
)
assert new_cluster_user.user_name == user_name
self.cluster_users = [
user
for user in self.cluster_users
if user.cluster_name != cluster_name
or user.user_name != user_name
or user.org_name != org_name
]
self.cluster_users.append(new_cluster_user)
return aiohttp.web.json_response(
self._serialize_cluster_user(
new_cluster_user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
async def handle_cluster_user_patch_quota(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
user_name = request.match_info["uname"]
org_name = request.match_info.get("oname")
payload = await request.json()
for index, user in enumerate(self.cluster_users):
if (
user.cluster_name == cluster_name
and user.user_name == user_name
and user.org_name == org_name
):
quota = user.quota
if "quota" in payload:
quota = replace(
quota,
total_running_jobs=payload["quota"].get("total_running_jobs"),
)
if (
"additional_quota" in payload
and quota.total_running_jobs is not None
):
quota = replace(
quota,
total_running_jobs=quota.total_running_jobs
+ payload["additional_quota"].get("total_running_jobs"),
)
user = replace(user, quota=quota)
self.cluster_users[index] = user
return aiohttp.web.json_response(
self._serialize_cluster_user(
user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
raise aiohttp.web.HTTPNotFound
async def handle_cluster_user_patch_balance(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
user_name = request.match_info["uname"]
org_name = request.match_info.get("oname")
payload = await request.json()
for index, user in enumerate(self.cluster_users):
if (
user.cluster_name == cluster_name
and user.user_name == user_name
and user.org_name == org_name
):
balance = user.balance
if "credits" in payload:
credits = (
Decimal(payload["credits"]) if payload["credits"] else None
)
balance = replace(balance, credits=credits)
if payload.get("additional_credits") and balance.credits is not None:
additional_credits = Decimal(payload["additional_credits"])
balance = replace(
balance, credits=balance.credits + additional_credits
)
user = replace(user, balance=balance)
self.cluster_users[index] = user
return aiohttp.web.json_response(
self._serialize_cluster_user(
user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
raise aiohttp.web.HTTPNotFound
async def handle_cluster_user_add_spending(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
user_name = request.match_info["uname"]
org_name = request.match_info.get("oname")
payload = await request.json()
for index, user in enumerate(self.cluster_users):
if (
user.cluster_name == cluster_name
and user.user_name == user_name
and user.org_name == org_name
):
balance = user.balance
spending = Decimal(payload["spending"])
balance = replace(
balance, spent_credits=balance.spent_credits + spending
)
if balance.credits:
balance = replace(balance, credits=balance.credits - spending)
user = replace(user, balance=balance)
self.cluster_users[index] = user
return aiohttp.web.json_response(
self._serialize_cluster_user(
user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
raise aiohttp.web.HTTPNotFound
async def handle_cluster_user_add_debt(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
payload = await request.json()
self.debts.append(
Debt(
cluster_name=cluster_name,
user_name=payload["user_name"],
credits=Decimal(payload["credits"]),
)
)
raise aiohttp.web.HTTPNoContent
async def handle_cluster_user_get(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
user_name = request.match_info["uname"]
org_name = request.match_info.get("oname")
for cluster_user in self.cluster_users:
if (
cluster_user.cluster_name == cluster_name
and cluster_user.user_name == user_name
and cluster_user.org_name == org_name
):
return aiohttp.web.json_response(
self._serialize_cluster_user(
cluster_user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
raise aiohttp.web.HTTPNotFound
async def handle_cluster_user_delete(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
user_name = request.match_info["uname"]
org_name = request.match_info.get("oname")
for idx, cluster_user in enumerate(self.cluster_users):
if (
cluster_user.cluster_name == cluster_name
and cluster_user.user_name == user_name
and cluster_user.org_name == org_name
):
del self.cluster_users[idx]
raise aiohttp.web.HTTPNoContent
raise aiohttp.web.HTTPNotFound
async def handle_cluster_user_list(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
org_name = request.match_info.get("oname")
resp = [
self._serialize_cluster_user(
cluster_user, _parse_bool(request.query.get("with_user_info", "false"))
)
for cluster_user in self.cluster_users
if cluster_user.cluster_name == cluster_name
and (org_name is None or cluster_user.org_name == org_name)
]
return aiohttp.web.json_response(resp)
def _serialize_org_user(self, org_user: OrgUser, with_info: bool) -> dict[str, Any]:
res: dict[str, Any] = {
"user_name": org_user.user_name,
"role": org_user.role.value,
"org_name": org_user.org_name,
}
if with_info:
user = next(user for user in self.users if user.name == org_user.user_name)
res["user_info"] = self._serialize_user(user)
res["user_info"].pop("name")
return res
async def handle_org_user_post(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
org_name = request.match_info["oname"]
payload = await request.json()
new_org_user = OrgUser(
org_name=org_name,
user_name=payload["user_name"],
role=OrgUserRoleType(payload["role"]),
)
self.org_users.append(new_org_user)
return aiohttp.web.json_response(
self._serialize_org_user(
new_org_user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
async def handle_org_user_put(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
org_name = request.match_info["oname"]
user_name = request.match_info["uname"]
payload = await request.json()
new_org_user = OrgUser(
org_name=org_name,
user_name=payload["user_name"],
role=OrgUserRoleType(payload["role"]),
)
assert new_org_user.user_name == user_name
self.org_users = [
user
for user in self.org_users
if user.org_name != org_name or user.user_name != user_name
]
self.org_users.append(new_org_user)
return aiohttp.web.json_response(
self._serialize_org_user(
new_org_user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
async def handle_org_user_get(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
org_name = request.match_info["oname"]
user_name = request.match_info["uname"]
for org_user in self.org_users:
if org_user.org_name == org_name and org_user.user_name == user_name:
return aiohttp.web.json_response(
self._serialize_org_user(
org_user,
_parse_bool(request.query.get("with_user_info", "false")),
)
)
raise aiohttp.web.HTTPNotFound
async def handle_org_user_delete(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
org_name = request.match_info["oname"]
user_name = request.match_info["uname"]
for idx, org_user in enumerate(self.org_users):
if org_user.org_name == org_name and org_user.user_name == user_name:
del self.org_users[idx]
raise aiohttp.web.HTTPNoContent
raise aiohttp.web.HTTPNotFound
async def handle_org_user_list(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
org_name = request.match_info["oname"]
resp = [
self._serialize_org_user(
org_user, _parse_bool(request.query.get("with_user_info", "false"))
)
for org_user in self.org_users
if org_user.org_name == org_name
]
return aiohttp.web.json_response(resp)
def _serialize_org_cluster(self, org_cluster: OrgCluster) -> dict[str, Any]:
res: dict[str, Any] = {
"org_name": org_cluster.org_name,
"quota": {},
"balance": {
"spent_credits": str(org_cluster.balance.spent_credits),
},
"default_quota": {},
}
if org_cluster.quota.total_running_jobs is not None:
res["quota"]["total_running_jobs"] = org_cluster.quota.total_running_jobs
if org_cluster.balance.credits is not None:
res["balance"]["credits"] = str(org_cluster.balance.credits)
if org_cluster.default_credits:
res["default_credits"] = str(org_cluster.default_credits)
if org_cluster.default_quota.total_running_jobs:
res["default_quota"][
"total_running_jobs"
] = org_cluster.default_quota.total_running_jobs
return res
async def handle_org_cluster_post(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
payload = await request.json()
credits_raw = payload.get("balance", {}).get("credits")
default_credits_raw = payload.get("default_credits")
spend_credits_raw = payload.get("balance", {}).get("spend_credits_raw")
new_org_cluster = OrgCluster(
cluster_name=cluster_name,
org_name=payload["org_name"],
quota=Quota(
total_running_jobs=payload.get("quota", {}).get("total_running_jobs")
),
balance=Balance(
credits=Decimal(credits_raw) if credits_raw else None,
spent_credits=Decimal(spend_credits_raw)
if spend_credits_raw
else Decimal(0),
),
default_quota=Quota(
total_running_jobs=payload.get("default_quota", {}).get(
"total_running_jobs"
)
),
default_credits=Decimal(default_credits_raw)
if default_credits_raw
else None,
)
self.org_clusters.append(new_org_cluster)
return aiohttp.web.json_response(
self._serialize_org_cluster(
new_org_cluster,
)
)
async def handle_org_cluster_put(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
org_name = request.match_info["oname"]
payload = await request.json()
credits_raw = payload.get("balance", {}).get("credits")
default_credits_raw = payload.get("default_credits")
spend_credits_raw = payload.get("balance", {}).get("spend_credits_raw")
new_org_cluster = OrgCluster(
cluster_name=cluster_name,
org_name=payload["org_name"],
quota=Quota(
total_running_jobs=payload.get("quota", {}).get("total_running_jobs")
),
balance=Balance(
credits=Decimal(credits_raw) if credits_raw else None,
spent_credits=Decimal(spend_credits_raw)
if spend_credits_raw
else Decimal(0),
),
default_quota=Quota(
total_running_jobs=payload.get("default_quota", {}).get(
"total_running_jobs"
)
),
default_credits=Decimal(default_credits_raw)
if default_credits_raw
else None,
)
assert new_org_cluster.org_name == org_name
self.org_clusters = [
user
for user in self.org_clusters
if user.cluster_name != cluster_name or user.org_name != org_name
]
self.org_clusters.append(new_org_cluster)
return aiohttp.web.json_response(
self._serialize_org_cluster(
new_org_cluster,
)
)
async def handle_org_cluster_get(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
org_name = request.match_info["oname"]
for org_cluster in self.org_clusters:
if (
org_cluster.cluster_name == cluster_name
and org_cluster.org_name == org_name
):
return aiohttp.web.json_response(
self._serialize_org_cluster(org_cluster)
)
raise aiohttp.web.HTTPNotFound
async def handle_org_cluster_delete(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
org_name = request.match_info["oname"]
for idx, org_cluster in enumerate(self.org_clusters):
if (
org_cluster.cluster_name == cluster_name
and org_cluster.org_name == org_name
):
del self.org_clusters[idx]
raise aiohttp.web.HTTPNoContent
raise aiohttp.web.HTTPNotFound
async def handle_org_cluster_list(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
resp = [
self._serialize_org_cluster(org_cluster)
for org_cluster in self.org_clusters
if org_cluster.cluster_name == cluster_name
]
return aiohttp.web.json_response(resp)
async def handle_org_cluster_patch_quota(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
org_name = request.match_info["oname"]
payload = await request.json()
for index, org_cluster in enumerate(self.org_clusters):
if (
org_cluster.cluster_name == cluster_name
and org_cluster.org_name == org_name
):
quota = org_cluster.quota
if "quota" in payload:
quota = replace(
quota,
total_running_jobs=payload["quota"].get("total_running_jobs"),
)
if (
"additional_quota" in payload
and quota.total_running_jobs is not None
):
quota = replace(
quota,
total_running_jobs=quota.total_running_jobs
+ payload["additional_quota"].get("total_running_jobs"),
)
org_cluster = replace(org_cluster, quota=quota)
self.org_clusters[index] = org_cluster
return aiohttp.web.json_response(
self._serialize_org_cluster(org_cluster)
)
raise aiohttp.web.HTTPNotFound
async def handle_org_cluster_patch_balance(
self, request: aiohttp.web.Request
) -> aiohttp.web.Response:
cluster_name = request.match_info["cname"]
org_name = request.match_info["oname"]
payload = await request.json()
for index, org_cluster in enumerate(self.org_clusters):
if (
org_cluster.cluster_name == cluster_name
and org_cluster.org_name == org_name
):
balance = org_cluster.balance
if "credits" in payload:
credits = (
Decimal(payload["credits"]) if payload["credits"] else None
)
balance = replace(balance, credits=credits)
if payload.get("additional_credits") and balance.credits is not None:
additional_credits = Decimal(payload["additional_credits"])
balance = replace(
balance, credits=balance.credits + additional_credits
)
org_cluster = replace(org_cluster, balance=balance)
self.org_clusters[index] = org_cluster
return aiohttp.web.json_response(
self._serialize_org_cluster(
org_cluster,
)
)
raise aiohttp.web.HTTPNotFound
@pytest.fixture
async def mock_admin_server(
loop: asyncio.AbstractEventLoop,
) -> AsyncIterator[AdminServer]:
admin_server = AdminServer()
def _create_app() -> aiohttp.web.Application:
app = aiohttp.web.Application()
app.router.add_routes(
(
aiohttp.web.get(
"/api/v1/users",
admin_server.handle_user_list,
),
aiohttp.web.post(
"/api/v1/users",
admin_server.handle_user_post,
),
aiohttp.web.get(
"/api/v1/users/{uname}",
admin_server.handle_user_get,
),
aiohttp.web.get(
"/api/v1/orgs",
admin_server.handle_org_list,
),
aiohttp.web.post(
"/api/v1/orgs",
admin_server.handle_org_post,
),
aiohttp.web.get(
"/api/v1/orgs/{oname}",
admin_server.handle_org_get,
),
aiohttp.web.delete(
"/api/v1/orgs/{oname}",
admin_server.handle_org_delete,
),
aiohttp.web.get(
"/api/v1/clusters",
admin_server.handle_cluster_list,
),
aiohttp.web.post(
"/api/v1/clusters",
admin_server.handle_cluster_post,
),
aiohttp.web.get(
"/api/v1/clusters/{cname}",
admin_server.handle_cluster_get,
),
aiohttp.web.put(
"/api/v1/clusters/{cname}",
admin_server.handle_cluster_put,
),
aiohttp.web.delete(
"/api/v1/clusters/{cname}",
admin_server.handle_cluster_delete,
),
aiohttp.web.post(
"/api/v1/clusters/{cname}/users",
admin_server.handle_cluster_user_post,
),
aiohttp.web.get(
"/api/v1/clusters/{cname}/users",
admin_server.handle_cluster_user_list,
),
aiohttp.web.get(
"/api/v1/clusters/{cname}/users/{uname}",
admin_server.handle_cluster_user_get,
),
aiohttp.web.put(
"/api/v1/clusters/{cname}/users/{uname}",
admin_server.handle_cluster_user_put,
),
aiohttp.web.delete(
"/api/v1/clusters/{cname}/users/{uname}",
admin_server.handle_cluster_user_delete,
),
aiohttp.web.patch(
"/api/v1/clusters/{cname}/users/{uname}/balance",
admin_server.handle_cluster_user_patch_balance,
),
aiohttp.web.patch(
"/api/v1/clusters/{cname}/users/{uname}/quota",
admin_server.handle_cluster_user_patch_quota,
),
aiohttp.web.post(
"/api/v1/clusters/{cname}/users/{uname}/spending",
admin_server.handle_cluster_user_add_spending,
),
aiohttp.web.post(
"/api/v1/clusters/{cname}/debts",
admin_server.handle_cluster_user_add_debt,
),
aiohttp.web.post(
"/api/v1/orgs/{oname}/users",
admin_server.handle_org_user_post,
),
aiohttp.web.get(
"/api/v1/orgs/{oname}/users",
admin_server.handle_org_user_list,
),
aiohttp.web.get(
"/api/v1/orgs/{oname}/users/{uname}",
admin_server.handle_org_user_get,
),
aiohttp.web.put(
"/api/v1/orgs/{oname}/users/{uname}",
admin_server.handle_org_user_put,
),
aiohttp.web.delete(
"/api/v1/orgs/{oname}/users/{uname}",
admin_server.handle_org_user_delete,
),
aiohttp.web.post(
"/api/v1/clusters/{cname}/orgs",
admin_server.handle_org_cluster_post,
),
aiohttp.web.get(
"/api/v1/clusters/{cname}/orgs",
admin_server.handle_org_cluster_list,
),
aiohttp.web.get(
"/api/v1/clusters/{cname}/orgs/{oname}",
admin_server.handle_org_cluster_get,
),
aiohttp.web.put(
"/api/v1/clusters/{cname}/orgs/{oname}",
admin_server.handle_org_cluster_put,
),
aiohttp.web.delete(
"/api/v1/clusters/{cname}/orgs/{oname}",
admin_server.handle_org_cluster_delete,
),
# org user endpoints:
aiohttp.web.get(
"/api/v1/clusters/{cname}/orgs/{oname}/users",
admin_server.handle_cluster_user_list,
),
aiohttp.web.get(
"/api/v1/clusters/{cname}/orgs/{oname}/users/{uname}",
admin_server.handle_cluster_user_get,
),
aiohttp.web.put(
"/api/v1/clusters/{cname}/orgs/{oname}/users/{uname}",
admin_server.handle_cluster_user_put,
),
aiohttp.web.delete(
"/api/v1/clusters/{cname}/orgs/{oname}/users/{uname}",
admin_server.handle_cluster_user_delete,
),
aiohttp.web.patch(
"/api/v1/clusters/{cname}/orgs/{oname}/users/{uname}/balance",
admin_server.handle_cluster_user_patch_balance,
),
aiohttp.web.patch(
"/api/v1/clusters/{cname}/orgs/{oname}/users/{uname}/quota",
admin_server.handle_cluster_user_patch_quota,
),
aiohttp.web.post(
"/api/v1/clusters/{cname}/orgs/{oname}/users/{uname}/spending",
admin_server.handle_cluster_user_add_spending,
),
# patch org quota endpoints:
aiohttp.web.patch(
"/api/v1/clusters/{cname}/orgs/{oname}/balance",
admin_server.handle_org_cluster_patch_balance,
),
aiohttp.web.patch(
"/api/v1/clusters/{cname}/orgs/{oname}/quota",
admin_server.handle_org_cluster_patch_quota,
),
)
)
return app
app = _create_app()
runner = ApiRunner(app, port=8085)
api_address = await runner.run()
admin_server.address = api_address
yield admin_server
await runner.close()
@pytest.fixture
def admin_url(
mock_admin_server: AdminServer,
) -> URL:
return mock_admin_server.url
@asynccontextmanager
async def create_local_app_server(
app: aiohttp.web.Application, port: int = 8080
) -> AsyncIterator[ApiAddress]:
runner = aiohttp.web.AppRunner(app)
try:
await runner.setup()
api_address = ApiAddress("0.0.0.0", port)
site = aiohttp.web.TCPSite(runner, api_address.host, api_address.port)
await site.start()
yield api_address
finally:
await runner.shutdown()
await runner.cleanup()
class ApiRunner:
def __init__(self, app: aiohttp.web.Application, port: int) -> None:
self._app = app
self._port = port
self._api_address_future: asyncio.Future[ApiAddress] = asyncio.Future()
self._cleanup_future: asyncio.Future[None] = asyncio.Future()
self._task: asyncio.Task[None] | None = None
async def _run(self) -> None:
async with create_local_app_server(self._app, port=self._port) as api_address:
self._api_address_future.set_result(api_address)
await self._cleanup_future
async def run(self) -> ApiAddress:
loop = asyncio.get_event_loop()
self._task = loop.create_task(self._run())
return await self._api_address_future
async def close(self) -> None:
if self._task:
task = self._task
self._task = None
self._cleanup_future.set_result(None)
await task
@property
def closed(self) -> bool:
return not bool(self._task)
| 37,077 | 1,806 | 178 |
21f660305f9eb4a0474e78b1bbb3a65006f2c75f | 30,257 | py | Python | src/huntsman/pocs/observatory.py | Physarah/huntsman-pocs | df013f71f99a7da17d6b44915ebf6f301aba10fc | [
"MIT"
] | null | null | null | src/huntsman/pocs/observatory.py | Physarah/huntsman-pocs | df013f71f99a7da17d6b44915ebf6f301aba10fc | [
"MIT"
] | null | null | null | src/huntsman/pocs/observatory.py | Physarah/huntsman-pocs | df013f71f99a7da17d6b44915ebf6f301aba10fc | [
"MIT"
] | null | null | null | import time
from contextlib import suppress, contextmanager
from astropy import units as u
from panoptes.utils import error
from panoptes.utils.utils import get_quantity_value
from panoptes.utils.time import current_time, wait_for_events, CountdownTimer
from panoptes.pocs.observatory import Observatory
from panoptes.pocs.scheduler.observation.bias import BiasObservation
from huntsman.pocs.utils.logger import get_logger
from huntsman.pocs.guide.bisque import Guide
from huntsman.pocs.archive.utils import remove_empty_directories
from huntsman.pocs.scheduler.observation.dark import DarkObservation
from huntsman.pocs.utils.flats import make_flat_field_sequences, make_flat_field_observation
from huntsman.pocs.utils.flats import get_cameras_with_filter
from huntsman.pocs.utils.safety import get_solar_altaz
from huntsman.pocs.camera.group import CameraGroup, dispatch_parallel
from huntsman.pocs.error import NotTwilightError
| 44.300146 | 100 | 0.623228 | import time
from contextlib import suppress, contextmanager
from astropy import units as u
from panoptes.utils import error
from panoptes.utils.utils import get_quantity_value
from panoptes.utils.time import current_time, wait_for_events, CountdownTimer
from panoptes.pocs.observatory import Observatory
from panoptes.pocs.scheduler.observation.bias import BiasObservation
from huntsman.pocs.utils.logger import get_logger
from huntsman.pocs.guide.bisque import Guide
from huntsman.pocs.archive.utils import remove_empty_directories
from huntsman.pocs.scheduler.observation.dark import DarkObservation
from huntsman.pocs.utils.flats import make_flat_field_sequences, make_flat_field_observation
from huntsman.pocs.utils.flats import get_cameras_with_filter
from huntsman.pocs.utils.safety import get_solar_altaz
from huntsman.pocs.camera.group import CameraGroup, dispatch_parallel
from huntsman.pocs.error import NotTwilightError
class HuntsmanObservatory(Observatory):
def __init__(self, with_autoguider=True, hdr_mode=False, take_flats=True, logger=None,
*args, **kwargs):
"""Huntsman POCS Observatory
Args:
with_autoguider (bool, optional): If autoguider is attached, defaults to True.
hdr_mode (bool, optional): If pics should be taken in HDR mode, defaults to False.
take_flats (bool, optional): If flat field images should be taken, defaults to True.
logger (logger, optional): The logger instance. If not provided, use default Huntsman
logger.
*args: Parsed to Observatory init function.
**kwargs: Parsed to Observatory init function.
"""
if not logger:
logger = get_logger()
super().__init__(logger=logger, *args, **kwargs)
# Make a camera group
self.camera_group = CameraGroup(self.cameras)
self._has_hdr_mode = hdr_mode
self._has_autoguider = with_autoguider
self.flat_fields_required = take_flats
# Focusing
self.last_coarse_focus_time = None
self.last_coarse_focus_temp = None
self._coarse_focus_interval = self.get_config('focusing.coarse.interval_hours', 1) * u.hour
self._coarse_focus_filter = self.get_config('focusing.coarse.filter_name')
self._coarse_focus_temptol = self.get_config('focusing.coarse.temp_tol_deg', 5) * u.Celsius
self.last_fine_focus_time = None
self.last_fine_focus_temp = None
self._fine_focus_interval = self.get_config('focusing.fine.interval_hours', 1) * u.hour
self._fine_focus_temptol = self.get_config('focusing.fine.temp_tol_deg', 5) * u.Celsius
if self.has_autoguider:
self.logger.info("Setting up autoguider")
try:
self._create_autoguider()
except Exception as e:
self._has_autoguider = False
self.logger.warning(f"Problem setting autoguider, continuing without: {e!r}")
# Hack solution to the observatory not knowing whether it is safe or not
# This can be overridden when creating the HuntsmanPOCS instance
self._is_safe = None
# Properties
@property
def has_hdr_mode(self):
""" Does camera support HDR mode
Returns:
bool: HDR enabled, default False
"""
return self._has_hdr_mode
@property
def has_autoguider(self):
""" Does camera have attached autoguider
Returns:
bool: True if has autoguider
"""
return self._has_autoguider
@property
def coarse_focus_required(self):
""" Return True if we should do a coarse focus. """
return self._focus_required(coarse=True)
@property
def fine_focus_required(self):
""" Return True if we should do a fine focus. """
return self._focus_required()
@property
def is_past_midnight(self):
"""Check if it's morning, useful for going into either morning or evening flats."""
# Get the time of the nearest midnight to now
# If the nearest midnight is in the past, it's the morning
midnight = self.observer.midnight(current_time(), which='nearest')
return midnight < current_time()
@property
def is_twilight(self):
""" Return True if it is twilight, else False. """
return self.is_dark(horizon="twilight_max") and not self.is_dark(horizon="twilight_min")
@property
def temperature(self):
""" Return the ambient temperature. """
temp = None
try:
reading = self.db.get_current("weather")["data"]["ambient_temp_C"]
temp = get_quantity_value(reading, u.Celsius) * u.Celsius
except (KeyError, TypeError) as err:
self.logger.warning(f"Unable to determine temperature: {err!r}")
return temp
@property
def solar_altaz(self):
""" Return the current solar alt az. """
return get_solar_altaz(location=self.earth_location, time=current_time())
# Context managers
@contextmanager
def safety_checking(self, *args, **kwargs):
""" Check safety before and after the code block.
To be used with a "with" statement, e.g.:
with self.safety_checking():
print(x)
Args:
*args, **kwargs: Parsed to self._assert_safe
Raises:
RuntimeError: If not safe.
"""
self._assert_safe(*args, **kwargs)
try:
yield None
finally:
self._assert_safe(*args, **kwargs)
# Methods
def initialize(self):
"""Initialize the observatory and connected hardware """
super().initialize()
if self.has_autoguider:
self.logger.debug("Connecting to autoguider")
self.autoguider.connect()
def is_safe(self, park_if_not_safe=False, *args, **kwargs):
""" Return True if it is safe, else False.
Args:
*args, **kwargs: Parsed to self._is_safe. See panoptes.pocs.core.POCS.is_safe.
park_if_not_safe (bool): If True, park if safety fails. Default: False.
Returns:
bool: True if safe, else False.
"""
if self._is_safe is not None:
return self._is_safe(park_if_not_safe=park_if_not_safe, *args, **kwargs)
self.logger.warning("Safety function not set. Returning False")
return False
def remove_camera(self, cam_name):
""" Remove a camera from the observatory.
Args:
cam_name (str): The name of the camera to remove.
"""
super().remove_camera(cam_name)
with suppress(KeyError):
del self.camera_group.cameras[cam_name]
def autofocus_cameras(self, coarse=False, filter_name=None, default_timeout=900,
blocking=True, **kwargs):
""" Override autofocus_cameras to update the last focus time and move filterwheels.
Args:
coarse (bool, optional): Perform coarse focus? Default False.
filter_name (str, optional): The filter name to focus with. If None (default), will
attempt to get from config, by default using the coarse focus filter.
*args, **kwargs: Parsed to `pocs.observatory.Observatory.autofocus_cameras`.
Returns:
threading.Event: The autofocus event.
"""
focus_type = "coarse" if coarse else "fine"
# Choose the filter to focus with
# TODO: Move this logic to the camera level
if filter_name is None:
if coarse:
filter_name = self._coarse_focus_filter
else:
try:
filter_name = self.current_observation.filter_name
except AttributeError:
filter_name = self._coarse_focus_filter
self.logger.warning("Unable to retrieve filter name from current observation."
f" Defaulting to coarse focus filter ({filter_name}).")
# Asyncronously dispatch autofocus calls
with self.safety_checking(horizon="focus"):
events = self.camera_group.autofocus(coarse=coarse, filter_name=filter_name, **kwargs)
# Wait for sequences to finish
if blocking:
timeout = self.get_config(f"focusing.{focus_type}.timeout", default_timeout)
if not wait_for_events(list(events.values()), timeout=timeout):
raise error.Timeout(f"Timeout of {timeout} reached while waiting for fine focus.")
# Update last focus time
setattr(self, f"last_{focus_type}_focus_time", current_time())
# Update last focus temperature
setattr(self, f"last_{focus_type}_focus_temp", self.temperature)
return events
def cleanup_observations(self, *args, **kwargs):
""" Override method to remove empty directories. Called in housekeeping state."""
super().cleanup_observations(*args, **kwargs)
self.logger.info("Removing empty directories in images directory.")
images_dir = self.get_config("directories.images")
remove_empty_directories(images_dir)
self.logger.info("Removing empty directories in archive directory.")
archive_dir = self.get_config("directories.archive")
remove_empty_directories(archive_dir)
def take_flat_fields(self, cameras=None, **kwargs):
""" Take flat fields for each camera in each filter, respecting filter order.
Args:
cameras (dict): Dict of cam_name: camera pairs. If None (default), use all cameras.
**kwargs: Overrides config entries under `calibs.flat`.
"""
if cameras is None:
cameras = self.cameras
# Load the flat field config, allowing overrides from kwargs
flat_config = self.get_config('calibs.flat', default=dict())
flat_config.update(kwargs)
# Specify filter order
filter_order = flat_config['filter_order'].copy()
if self.is_past_midnight: # If it's the morning, order is reversed
filter_order.reverse()
# Take flat fields in each filter
for filter_name in filter_order:
if not (self.is_safe(horizon="twilight_max") and self.is_twilight):
raise RuntimeError("Not safe for twilight flats. Aborting.")
# Get a dict of cameras that have this filter
cameras_with_filter = get_cameras_with_filter(cameras, filter_name)
# Go to next filter if there are no cameras with this one
if not cameras_with_filter:
self.logger.warning(f'No cameras found with {filter_name} filter.')
continue
# Get the flat field observation
observation = make_flat_field_observation(self.earth_location, filter_name=filter_name)
observation.seq_time = current_time(flatten=True)
# Take the flats for each camera in this filter
self.logger.info(f'Taking flat fields in {filter_name} filter.')
autoflat_config = flat_config.get("autoflats", {})
try:
self._take_autoflats(cameras_with_filter, observation, **autoflat_config)
# Break out of loop if no longer twilight
# Catch the error so the state machine keeps running
except NotTwilightError as err:
self.logger.warning(f"{err!r}")
break
self.logger.info('Finished flat-fielding.')
def prepare_cameras(self, drop=True, *args, **kwargs):
""" Make sure cameras are all cooled and ready.
Args:
drop (bool): If True, drop cameras that do not become ready in time. Default: True.
*args, **kwargs: Parsed to self.camera_group.wait_until_ready.
"""
self.logger.info(f"Preparing {len(self.cameras)} cameras.")
failed_cameras = self.camera_group.wait_until_ready(*args, **kwargs)
# Remove cameras that didn't become ready in time
if drop:
for cam_name in failed_cameras:
self.logger.debug(f'Removing {cam_name} from {self} for not being ready.')
self.remove_camera(cam_name)
def take_observation_block(self, observation, cameras=None, timeout=60 * u.second,
remove_on_error=False, do_focus=True, safety_kwargs=None,
do_slew=True):
""" Macro function to take an observation block.
This function will perform:
- slewing (when necessary)
- fine focusing (when necessary)
- observation exposures
- safety checking
Args:
observation (Observation): The observation object.
cameras (dict, optional): Dict of cam_name: camera pairs. If None (default), use all
cameras.
timeout (float, optional): The timeout in addition to the exposure time. Default 60s.
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
do_slew (bool, optional): If True, do not attempt to slew the telescope. Default
False.
**safety_kwargs (dict, optional): Extra kwargs to be parsed to safety function.
Raises:
RuntimeError: If safety check fails.
"""
if cameras is None:
cameras = self.cameras
safety_kwargs = {} if safety_kwargs is None else safety_kwargs
self._assert_safe(**safety_kwargs)
# Set the sequence time of the observation
if observation.seq_time is None:
observation.seq_time = current_time(flatten=True)
headers = self.get_standard_headers(observation=observation)
# Take the observation block
self.logger.info(f"Starting observation block for {observation}")
# The start new set flag is True before we enter the loop and is set to False
# immediately inside the loop. This allows the loop to start a new set in case
# the set_is_finished condition is already satisfied.
start_new_set = True
current_field = None
while (start_new_set or not observation.set_is_finished):
start_new_set = False # We don't want to start another set after this one
# Perform the slew if necessary
slew_required = (current_field != observation.field) and do_slew
if slew_required:
with self.safety_checking(**safety_kwargs):
self.slew_to_observation(observation)
current_field = observation.field
# Fine focus the cameras if necessary
focus_required = self.fine_focus_required or observation.current_exp_num == 0
if do_focus and focus_required:
with self.safety_checking(**safety_kwargs):
self.autofocus_cameras(blocking=True, filter_name=observation.filter_name)
# Set a common start time for this batch of exposures
headers['start_time'] = current_time(flatten=True)
# Start the exposures and get events
with self.safety_checking(**safety_kwargs):
events = self.camera_group.take_observation(observation, headers=headers)
# Wait for the exposures (blocking)
# TODO: Use same timeout as camera client
try:
self._wait_for_camera_events(events, duration=observation.exptime + timeout,
remove_on_error=remove_on_error, **safety_kwargs)
except error.Timeout as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with observation block after error.")
# Explicitly mark the observation as complete
with suppress(AttributeError):
observation.mark_exposure_complete()
self.logger.info(f"Observation status: {observation.status}")
def take_dark_observation(self, bias=False, **kwargs):
""" Take a bias observation block on each camera (blocking).
Args:
bias (bool, optional): If True, take Bias observation instead of dark observation.
Default: False.
**kwargs: Parsed to `self.take_observation_block`.
"""
# Move telescope to park position
if not self.mount.is_parked:
self.logger.info("Moving telescope to park position for dark observation.")
self.mount.park()
# Create the observation
# Keep the mount where it is since we are just taking darks
position = self.mount.get_current_coordinates()
ObsClass = BiasObservation if bias else DarkObservation
observation = ObsClass(position=position)
# Dark observations don't care if it's dark or not
safety_kwargs = {"ignore": ["is_dark"]}
# Can ignore weather safety if dome is closed
with suppress(AttributeError):
if self.dome.is_closed:
self.logger.warning(f"Ignoring weather safety for {observation}.")
safety_kwargs["ignore"].append("good_weather")
# Take the observation (blocking)
self.take_observation_block(observation, do_focus=False, do_slew=False,
safety_kwargs=safety_kwargs, **kwargs)
def slew_to_observation(self, observation, min_solar_alt=10 * u.deg):
""" Slew to the observation field coordinates.
Args:
observation (Observation): The observation object.
min_solar_alt (astropy.Quantity, optional): The minimum solar altitude above which the
FWs will be moved to their dark positions before slewing.
"""
self.logger.info(f"Slewing to target coordinates for {observation}.")
if not self.mount.set_target_coordinates(observation.field.coord):
raise RuntimeError(f"Unable to set target coordinates for {observation.field}.")
# Move FWs to dark pos if Sun too high to minimise damage potential
move_fws = self.solar_altaz.alt > get_quantity_value(min_solar_alt, u.deg) * u.deg
if move_fws:
self.logger.warning("Solar altitude above minimum for safe slew. Moving FWs to dark"
" positions.")
# Record curent positions so we can put them back after slew
# NOTE: These positions could include the dark position so can't use last_light_position
current_fw_positions = {}
for cam_name, cam in self.cameras.items():
if cam.has_filterwheel:
current_fw_positions[cam_name] = cam.filterwheel.current_filter
self.camera_group.filterwheel_move_to(current_fw_positions)
self.mount.slew_to_target()
if move_fws:
self.logger.info("Moving FWs back to last positions.")
self.camera_group.filterwheel_move_to(current_fw_positions)
# Private methods
def _create_autoguider(self):
guider_config = self.get_config('guider')
guider = Guide(**guider_config)
self.autoguider = guider
def _take_autoflats(
self, cameras, observation, target_scaling=0.17, scaling_tolerance=0.05, timeout=60,
bias=32, remove_on_error=False, sleep_time=300, evening_initial_flat_exptime=0.01,
morning_initial_flat_exptime=1, **kwargs):
""" Take flat fields using automatic updates for exposure times.
Args:
cameras (dict): Dict of camera name: Camera pairs.
observation: The flat field observation. TODO: Integrate with FlatFieldSequence.
target_scaling (float, optional): Required to be between [0, 1] so
target_adu is proportionally between 0 and digital saturation level.
Default: 0.17.
scaling_tolerance (float, optional): The minimum precision on the average counts
required to keep the exposure, expressed as a fraction of the dynamic range.
Default: 0.05.
timeout (float): The timeout on top of the exposure time, default 60s.
bias (int): The bias to subtract from the frames. TODO: Use a real bias image!
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
**kwargs: Parsed to FlatFieldSequence.
"""
# set the initial exposure time
if self.is_past_midnight:
initial_exptime = morning_initial_flat_exptime
else:
initial_exptime = evening_initial_flat_exptime
# Create a flat field sequence for each camera
sequences = make_flat_field_sequences(cameras, target_scaling, scaling_tolerance,
bias, initial_exposure_time=initial_exptime, **kwargs)
# Loop until sequence has finished
self.logger.info(f"Starting flat field sequence for {len(self.cameras)} cameras.")
while True:
if not self.is_twilight:
raise NotTwilightError("No longer twilight. Aborting flat fields.")
# Slew to field
with self.safety_checking(horizon="twilight_max"):
self.slew_to_observation(observation)
# Get standard fits headers
headers = self.get_standard_headers(observation=observation)
events = {}
exptimes = {}
filenames = {}
start_times = {}
# Define function to start the exposures
def func(cam_name):
seq = sequences[cam_name]
camera = cameras[cam_name]
# Get exposure time, filename and current time
exptimes[cam_name] = seq.get_next_exptime(past_midnight=self.is_past_midnight)
filenames[cam_name] = observation.get_exposure_filename(camera)
start_times[cam_name] = current_time()
try:
events[cam_name] = camera.take_observation(
observation, headers=headers, filename=filenames[cam_name],
exptime=exptimes[cam_name])
except error.PanError as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with flat observation after error.")
# Start the exposures in parallel
dispatch_parallel(func, list(cameras.keys()))
# Wait for the exposures
self.logger.info('Waiting for flat field exposures to complete.')
duration = get_quantity_value(max(exptimes.values()), u.second) + timeout
try:
self._wait_for_camera_events(events, duration, remove_on_error=remove_on_error,
horizon="twilight_max")
except error.Timeout as err:
self.logger.error(f"{err!r}")
self.logger.warning("Continuing with flat observation after timeout error.")
# Mark the current exposure as complete
observation.mark_exposure_complete()
# Update the flat field sequences with new data
for cam_name in list(sequences.keys()):
# Remove sequence for any removed cameras
if cam_name not in self.cameras:
del sequences[cam_name]
continue
# Attempt to update the exposure sequence for this camera.
# If the exposure failed, use info from the last successful exposure.
try:
sequences[cam_name].update(filename=filenames[cam_name],
exptime=exptimes[cam_name],
time_start=start_times[cam_name])
except (KeyError, FileNotFoundError) as err:
self.logger.warning(f"Unable to update flat field sequence for {cam_name}:"
f" {err!r}")
# Log sequence status
status = sequences[cam_name].status
status["filter_name"] = observation.filter_name
self.logger.info(f"Flat field status for {cam_name}: {status}")
# Check if sequences are complete
if all([s.is_finished for s in sequences.values()]):
self.logger.info("All flat field sequences finished.")
break
# Check if counts are ok
if self.is_past_midnight:
# Terminate if Sun is coming up and all exposures are too bright
if all([s.min_exptime_reached for s in sequences.values()]):
self.logger.info(f"Terminating flat sequence for {observation.filter_name}"
f" filter because min exposure time reached.")
break
# Wait if Sun is coming up and all exposures are too faint
elif all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"All exposures are too faint. Waiting for {sleep_time}s")
self._safe_sleep(sleep_time, horizon="twilight_max")
else:
# Terminate if Sun is going down and all exposures are too faint
if all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"Terminating flat sequence for {observation.filter_name}"
f" filter because max exposure time reached.")
break
# Wait if Sun is going down and all exposures are too bright
elif all([s.max_exptime_reached for s in sequences.values()]):
self.logger.info(f"All exposures are too bright. Waiting for {sleep_time}s")
self._safe_sleep(sleep_time, horizon="twilight_max")
def _wait_for_camera_events(self, events, duration, remove_on_error=False, sleep=1, **kwargs):
""" Wait for camera events to be set.
Args:
events (dict of camera_name: threading.Event): The events to wait for.
duration (float): The total amount of time to wait for (should include exptime).
remove_on_error (bool, default False): If True, remove cameras that timeout. If False,
raise a TimeoutError instead.
sleep (float): Sleep this long between event checks. Default 1s.
**kwargs: Parsed to self._assert_safe.
"""
self.logger.debug(f'Waiting for {len(events)} events with timeout of {duration}.')
timer = CountdownTimer(duration)
while not timer.expired():
# Check safety here
self._assert_safe(**kwargs)
# Check if all cameras have finished
if all([e.is_set() for e in events.values()]):
break
time.sleep(sleep)
# Make sure events are set
for cam_name, event in events.items():
if not event.is_set():
if remove_on_error:
self.logger.warning(f"Timeout while waiting for camera event on {cam_name}. "
"Removing from observatory.")
self.remove_camera(cam_name)
else:
raise error.Timeout(f"Timeout while waiting for camera event on {cam_name}.")
def _focus_required(self, coarse=False):
""" Check if a focus is required based on current conditions.
Args:
coarse (bool): If True, check if we need to do a coarse focus. Default: False.
Returns:
bool: True if focus required, else False.
"""
focus_type = "coarse" if coarse else "fine"
# If a long time period has passed then focus again
last_focus_time = getattr(self, f"last_{focus_type}_focus_time")
interval = getattr(self, f"_{focus_type}_focus_interval")
if last_focus_time is None: # If we haven't focused yet
self.logger.info(f"{focus_type} focus required because we haven't focused yet.")
return True
if current_time() - last_focus_time > interval:
self.logger.info(f"{focus_type} focus required because of time difference.")
return True
# If there has been a large change in temperature then we need to focus again
last_focus_temp = getattr(self, f"last_{focus_type}_focus_temp")
temptol = getattr(self, f"_{focus_type}_focus_temptol")
if (last_focus_temp is not None) and (self.temperature is not None):
if abs(last_focus_temp - self.temperature) > temptol:
self.logger.info(f"{focus_type} focus required because of temperature change.")
return True
return False
def _assert_safe(self, *args, **kwargs):
""" Raise a RuntimeError if not safe to continue.
TODO: Raise a custom error type indicating lack of safety.
Args:
*args, **kwargs: Parsed to self.is_safe.
"""
if not self.is_safe(*args, **kwargs):
raise RuntimeError("Safety check failed!")
def _safe_sleep(self, duration, interval=1, *args, **kwargs):
""" Sleep for a specified amount of time while ensuring safety.
A RuntimeError is raised if safety fails while waiting.
Args:
duration (float or Quantity): The time to wait.
interval (float): The time in between safety checks.
*args, **kwargs: Parsed to is_safe.
Raises:
RuntimeError: If safety fails while waiting.
"""
self.logger.debug(f"Safe sleeping for {duration}")
timer = CountdownTimer(duration)
while not timer.expired():
self._assert_safe(*args, **kwargs)
time.sleep(interval)
| 915 | 28,384 | 23 |
83b201d9467d745f1f90c3d612f0fd43a4d2c97b | 225 | py | Python | talkademic-client/src/test.py | Nouran-Soliman/Talkademic | e81f1a109f25886d10354698bb6852ba9f5fd4f3 | [
"MIT"
] | null | null | null | talkademic-client/src/test.py | Nouran-Soliman/Talkademic | e81f1a109f25886d10354698bb6852ba9f5fd4f3 | [
"MIT"
] | null | null | null | talkademic-client/src/test.py | Nouran-Soliman/Talkademic | e81f1a109f25886d10354698bb6852ba9f5fd4f3 | [
"MIT"
] | null | null | null | import requests
URL = 'https://scholar.googleusercontent.com/citations?view_op=export_citations&user=JtSAIqgAAAAJ&citsig=AMD79ooAAAAAYEerXzdIALaAeL3goamu28BB1p8NLHDg&hl=en'
page = requests.get(URL)
print(type(page.content)) | 37.5 | 156 | 0.84 | import requests
URL = 'https://scholar.googleusercontent.com/citations?view_op=export_citations&user=JtSAIqgAAAAJ&citsig=AMD79ooAAAAAYEerXzdIALaAeL3goamu28BB1p8NLHDg&hl=en'
page = requests.get(URL)
print(type(page.content)) | 0 | 0 | 0 |
c4814b198ea4c54cc38584a5fc56a29721049e9f | 1,258 | py | Python | tools/metrics_analyzer/metrics_visualizer/assets/memory.py | hcindyl/renode | d8ccb719df28315773072a641b5c5e501de9e39b | [
"MIT"
] | 768 | 2017-07-26T06:03:38.000Z | 2022-03-30T12:48:59.000Z | tools/metrics_analyzer/metrics_visualizer/assets/memory.py | hcindyl/renode | d8ccb719df28315773072a641b5c5e501de9e39b | [
"MIT"
] | 301 | 2017-11-09T09:00:43.000Z | 2022-03-30T06:23:45.000Z | tools/metrics_analyzer/metrics_visualizer/assets/memory.py | hcindyl/renode | d8ccb719df28315773072a641b5c5e501de9e39b | [
"MIT"
] | 157 | 2017-09-09T14:00:39.000Z | 2022-03-31T05:19:14.000Z | import matplotlib.pyplot as plt
import pandas as pd
from .legend_picker import *
from .helpers import *
| 40.580645 | 96 | 0.717011 | import matplotlib.pyplot as plt
import pandas as pd
from .legend_picker import *
from .helpers import *
def show_memory_access(metricsParser, options, onePlotFigureSize, fontSize):
memoryEntries = metricsParser.get_memory_entries()
data = pd.DataFrame(memoryEntries, columns=['realTime', 'virtualTime', 'operation'])
reads = data[data['operation'] == bytes([2])]
writes = data[data['operation'] == bytes([3])]
fig, ax = plt.subplots(figsize=onePlotFigureSize, constrained_layout=True)
lines = _prepare_data(ax, reads, writes, 'realTime' if options.real_time else 'virtualTime')
fig.suptitle('Memory access', fontsize=fontSize)
handles, labels = ax.get_legend_handles_labels()
legend = fig.legend(handles, labels, loc='upper left')
set_legend_picker(fig, lines, legend)
ax.set_xlabel('{} time in miliseconds'.format('Real' if options.real_time else 'Virtual'))
save_fig(fig, 'memory.png', options)
def _prepare_data(ax, reads, writes, columnName):
writeLines, = ax.plot(writes[columnName], range(0, len(writes)), label='Writes')
readLines, = ax.plot(reads[columnName], range(0, len(reads)), label='Reads')
ax.set_ylabel('Memory access operations')
return [writeLines, readLines]
| 1,106 | 0 | 46 |
492648709453b7d1ab3f201690d87e7c3c581022 | 1,432 | py | Python | uploadfilehandler.py | DivinaThomas/dropBoxReplica_CloudComputing | 684f54881fd168d6293f06d76f21520915affe38 | [
"MIT"
] | null | null | null | uploadfilehandler.py | DivinaThomas/dropBoxReplica_CloudComputing | 684f54881fd168d6293f06d76f21520915affe38 | [
"MIT"
] | null | null | null | uploadfilehandler.py | DivinaThomas/dropBoxReplica_CloudComputing | 684f54881fd168d6293f06d76f21520915affe38 | [
"MIT"
] | null | null | null | from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from directory import Directory
import webapp2
import jinja2
import os
from google.appengine.api import users
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
| 28.078431 | 67 | 0.75419 | from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from directory import Directory
import webapp2
import jinja2
import os
from google.appengine.api import users
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class UploadFileHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
error_message = ''
upload = self.get_uploads()[0]
blobinfo = blobstore.BlobInfo(upload.key())
filename = blobinfo.filename
directory_id = self.request.get('directory_id')
directory_key = ndb.Key(Directory,directory_id)
directory = directory_key.get()
file_exists_counter = 0
for each in directory.list_of_files :
if each == filename :
error_message = 'Sorry a file with this name exists'
file_exists_counter = file_exists_counter+1
break
if file_exists_counter == 0 :
directory.list_of_files.append(filename)
directory.blobs.append(upload.key())
directory.put()
self.redirect('/')
else :
user = users.get_current_user()
logout = users.create_logout_url('/')
template_values = {
'user': user,
'logout': logout,
'error_message': error_message,
}
template = JINJA_ENVIRONMENT.get_template('error.html')
self.response.write(template.render(template_values))
| 931 | 46 | 46 |
e10a22e7135c076fab89283de23346148ddbc424 | 11,468 | py | Python | tutorial/test/visualize.py | alexpostnikov/waymo-open-dataset | efe74553dfab813450a1f9070cd14f72d83a40bc | [
"Apache-2.0"
] | null | null | null | tutorial/test/visualize.py | alexpostnikov/waymo-open-dataset | efe74553dfab813450a1f9070cd14f72d83a40bc | [
"Apache-2.0"
] | null | null | null | tutorial/test/visualize.py | alexpostnikov/waymo-open-dataset | efe74553dfab813450a1f9070cd14f72d83a40bc | [
"Apache-2.0"
] | null | null | null | import uuid
import numpy as np
from matplotlib import pyplot as plt, cm
def create_figure_and_axes(size_pixels):
"""Initializes a unique figure and axes for plotting."""
fig, ax = plt.subplots(1, 1, num=uuid.uuid4())
# Sets output image to pixel resolution.
dpi = 100
size_inches = size_pixels / dpi
fig.set_size_inches([size_inches, size_inches])
fig.set_dpi(dpi)
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.xaxis.label.set_color('black')
ax.tick_params(axis='x', colors='black')
ax.yaxis.label.set_color('black')
ax.tick_params(axis='y', colors='black')
fig.set_tight_layout(True)
ax.grid(False)
return fig, ax
def fig_canvas_image(fig):
"""Returns a [H, W, 3] uint8 np.array image from fig.canvas.tostring_rgb()."""
# Just enough margin in the figure to display xticks and yticks.
fig.subplots_adjust(
left=0.08, bottom=0.08, right=0.98, top=0.98, wspace=0.0, hspace=0.0)
fig.canvas.draw()
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
return data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
def get_colormap(num_agents):
"""Compute a color map array of shape [num_agents, 4]."""
colors = cm.get_cmap('jet', num_agents)
colors = colors(range(num_agents))
np.random.shuffle(colors)
return colors
def get_viewport(all_states, all_states_mask):
"""Gets the region containing the data.
Args:
all_states: states of agents as an array of shape [num_agents, num_steps,
2].
all_states_mask: binary mask of shape [num_agents, num_steps] for
`all_states`.
Returns:
center_y: float. y coordinate for center of data.
center_x: float. x coordinate for center of data.
width: float. Width of data.
"""
valid_states = all_states[all_states_mask]
all_y = valid_states[..., 1]
all_x = valid_states[..., 0]
center_y = (np.max(all_y) + np.min(all_y)) / 2
center_x = (np.max(all_x) + np.min(all_x)) / 2
range_y = np.ptp(all_y)
range_x = np.ptp(all_x)
width = max(range_y, range_x)
return center_y, center_x, width
def visualize_one_step(states,
mask,
roadgraph,
title,
center_y,
center_x,
width,
color_map,
size_pixels=1000):
"""Generate visualization for a single step."""
# Create figure and axes.
fig, ax = create_figure_and_axes(size_pixels=size_pixels)
# Plot roadgraph.
rg_pts = roadgraph[:, :2].T
ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)
masked_x = states[:, 0][mask]
masked_y = states[:, 1][mask]
colors = color_map[mask]
# Plot agent current position.
ax.scatter(
masked_x,
masked_y,
marker='o',
linewidths=3,
color=colors,
)
# Title.
ax.set_title(title)
# Set axes. Should be at least 10m on a side and cover 160% of agents.
size = max(10, width * 1.0)
ax.axis([
-size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,
size / 2 + center_y
])
ax.set_aspect('equal')
image = fig_canvas_image(fig)
plt.close(fig)
return image
def visualize_one_step_with_future(states, mask, future_states, future_states_mask, roadgraph, title,
center_y, center_x, width, color_map, size_pixels=1000, predictions=None, confs=None):
"""Generate visualization for a single step."""
# Create figure and axes.
fig, ax = create_figure_and_axes(size_pixels=size_pixels)
# Plot roadgraph.
rg_pts = roadgraph[:, :2].T
ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)
masked_x = states[:, 0][mask]
masked_y = states[:, 1][mask]
colors = color_map[mask]
# Plot agent current position.
ax.scatter(
masked_x,
masked_y,
marker='o',
linewidths=4,
color=colors,
)
for ped in range(128):
maskeds_x = []
maskeds_y = []
for step in range(future_states.shape[1]):
if not future_states_mask[ped,step]:
continue
masked_x = future_states[ped, step, 0] #[future_states_mask[:,step]]
masked_y = future_states[ped, step, 1] #[future_states_mask[:,step]]
maskeds_x.append(masked_x)
maskeds_y.append(masked_y)
colors = color_map[ped] #+ np.array([0.3,0.3,0.3,0.3])
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=4,
color=colors,
)
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=2,
color=np.array([181, 179, 92, 255])/255.,
)
nump, timestamps, modalities, datadim = predictions.shape
if predictions is not None:
for ped in range(128):
if future_states_mask[ped].sum() == 0:
continue
for modality in range(modalities):
maskeds_x = []
maskeds_y = []
for step in range(timestamps):
if not future_states_mask[ped, step]:
continue
if [future_states_mask[ped, step]]:
masked_x = predictions[ped, step, modality, 0]
masked_y = predictions[ped, step, modality, 1]
maskeds_x.append(masked_x)
maskeds_y.append(masked_y)
colors = color_map[ped]
# ax.scatter(
# masked_x,
# masked_y,
# marker='o',
# linewidths=0.05,
# color=colors,
# )
conf = confs[ped, modality].detach().cpu().item()
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=3*conf,
color=colors - np.array([0, 0, 0, 1-conf]),
)
ax.text(maskeds_x[-1], maskeds_y[-1], f"{conf:.2f}",
fontsize="xx-small")
# Title.
ax.set_title(title)
# Set axes. Should be at least 10m on a side and cover 160% of agents.
size = max(10, width * 1.0)
ax.axis([
-size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,
size / 2 + center_y
])
ax.set_aspect('equal')
image = fig_canvas_image(fig)
plt.close(fig)
return image
def visualize_all_agents_smooth(
decoded_example,
size_pixels=1000,
):
"""Visualizes all agent predicted trajectories in a serie of images.
Args:
decoded_example: Dictionary containing agent info about all modeled agents.
size_pixels: The size in pixels of the output image.
Returns:
T of [H, W, 3] uint8 np.arrays of the drawn matplotlib's figure canvas.
"""
# [num_agents, num_past_steps, 2] float32.
center_x, center_y, color_map, current_states, current_states_mask, future_states, future_states_mask, \
num_future_steps, num_past_steps, past_states, past_states_mask, roadgraph_xyz, width = prepare_data_for_vis(
decoded_example)
images = []
# Generate images from past time steps.
for i, (s, m) in enumerate(
zip(
np.split(past_states, num_past_steps, 1),
np.split(past_states_mask, num_past_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'past: %d' % (num_past_steps - i), center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate one image for the current time step.
s = current_states
m = current_states_mask
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz, 'current', center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate images from future time steps.
for i, (s, m) in enumerate(
zip(
np.split(future_states, num_future_steps, 1),
np.split(future_states_mask, num_future_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'future: %d' % (i + 1), center_y, center_x, width,
color_map, size_pixels)
images.append(im)
return images
| 35.504644 | 121 | 0.595396 | import uuid
import numpy as np
from matplotlib import pyplot as plt, cm
def create_figure_and_axes(size_pixels):
"""Initializes a unique figure and axes for plotting."""
fig, ax = plt.subplots(1, 1, num=uuid.uuid4())
# Sets output image to pixel resolution.
dpi = 100
size_inches = size_pixels / dpi
fig.set_size_inches([size_inches, size_inches])
fig.set_dpi(dpi)
fig.set_facecolor('white')
ax.set_facecolor('white')
ax.xaxis.label.set_color('black')
ax.tick_params(axis='x', colors='black')
ax.yaxis.label.set_color('black')
ax.tick_params(axis='y', colors='black')
fig.set_tight_layout(True)
ax.grid(False)
return fig, ax
def fig_canvas_image(fig):
"""Returns a [H, W, 3] uint8 np.array image from fig.canvas.tostring_rgb()."""
# Just enough margin in the figure to display xticks and yticks.
fig.subplots_adjust(
left=0.08, bottom=0.08, right=0.98, top=0.98, wspace=0.0, hspace=0.0)
fig.canvas.draw()
data = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
return data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
def get_colormap(num_agents):
"""Compute a color map array of shape [num_agents, 4]."""
colors = cm.get_cmap('jet', num_agents)
colors = colors(range(num_agents))
np.random.shuffle(colors)
return colors
def get_viewport(all_states, all_states_mask):
"""Gets the region containing the data.
Args:
all_states: states of agents as an array of shape [num_agents, num_steps,
2].
all_states_mask: binary mask of shape [num_agents, num_steps] for
`all_states`.
Returns:
center_y: float. y coordinate for center of data.
center_x: float. x coordinate for center of data.
width: float. Width of data.
"""
valid_states = all_states[all_states_mask]
all_y = valid_states[..., 1]
all_x = valid_states[..., 0]
center_y = (np.max(all_y) + np.min(all_y)) / 2
center_x = (np.max(all_x) + np.min(all_x)) / 2
range_y = np.ptp(all_y)
range_x = np.ptp(all_x)
width = max(range_y, range_x)
return center_y, center_x, width
def visualize_one_step(states,
mask,
roadgraph,
title,
center_y,
center_x,
width,
color_map,
size_pixels=1000):
"""Generate visualization for a single step."""
# Create figure and axes.
fig, ax = create_figure_and_axes(size_pixels=size_pixels)
# Plot roadgraph.
rg_pts = roadgraph[:, :2].T
ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)
masked_x = states[:, 0][mask]
masked_y = states[:, 1][mask]
colors = color_map[mask]
# Plot agent current position.
ax.scatter(
masked_x,
masked_y,
marker='o',
linewidths=3,
color=colors,
)
# Title.
ax.set_title(title)
# Set axes. Should be at least 10m on a side and cover 160% of agents.
size = max(10, width * 1.0)
ax.axis([
-size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,
size / 2 + center_y
])
ax.set_aspect('equal')
image = fig_canvas_image(fig)
plt.close(fig)
return image
def vis_cur_and_fut(decoded_example, predictions=None, size_pixels=1000, bn=0, confs=None):
data = {}
for key in decoded_example.keys():
data[key] = decoded_example[key][bn].detach().cpu().numpy()
center_x, center_y, color_map, current_states, current_states_mask, future_states, future_states_mask, \
num_future_steps, num_past_steps, past_states, past_states_mask, roadgraph_xyz, width = prepare_data_for_vis(
data)
images = []
s = current_states
m = current_states_mask
prediction = None
if predictions is not None:
prediction = predictions[bn].detach().cpu().numpy()
prediction = prediction + current_states[:,np.newaxis]
# predictions = predictions.cumsum(1)
future_states_mask *= np.repeat(data["state/tracks_to_predict"].reshape(128, 1), 80, axis=1)>0
im = visualize_one_step_with_future(s[:, 0], m[:, 0], future_states, future_states_mask, roadgraph_xyz,
'cur with fut', center_y, center_x, width, color_map, size_pixels,
predictions=prediction, confs=confs[bn])
return im
def visualize_one_step_with_future(states, mask, future_states, future_states_mask, roadgraph, title,
center_y, center_x, width, color_map, size_pixels=1000, predictions=None, confs=None):
"""Generate visualization for a single step."""
# Create figure and axes.
fig, ax = create_figure_and_axes(size_pixels=size_pixels)
# Plot roadgraph.
rg_pts = roadgraph[:, :2].T
ax.plot(rg_pts[0, :], rg_pts[1, :], 'k.', alpha=1, ms=2)
masked_x = states[:, 0][mask]
masked_y = states[:, 1][mask]
colors = color_map[mask]
# Plot agent current position.
ax.scatter(
masked_x,
masked_y,
marker='o',
linewidths=4,
color=colors,
)
for ped in range(128):
maskeds_x = []
maskeds_y = []
for step in range(future_states.shape[1]):
if not future_states_mask[ped,step]:
continue
masked_x = future_states[ped, step, 0] #[future_states_mask[:,step]]
masked_y = future_states[ped, step, 1] #[future_states_mask[:,step]]
maskeds_x.append(masked_x)
maskeds_y.append(masked_y)
colors = color_map[ped] #+ np.array([0.3,0.3,0.3,0.3])
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=4,
color=colors,
)
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=2,
color=np.array([181, 179, 92, 255])/255.,
)
nump, timestamps, modalities, datadim = predictions.shape
if predictions is not None:
for ped in range(128):
if future_states_mask[ped].sum() == 0:
continue
for modality in range(modalities):
maskeds_x = []
maskeds_y = []
for step in range(timestamps):
if not future_states_mask[ped, step]:
continue
if [future_states_mask[ped, step]]:
masked_x = predictions[ped, step, modality, 0]
masked_y = predictions[ped, step, modality, 1]
maskeds_x.append(masked_x)
maskeds_y.append(masked_y)
colors = color_map[ped]
# ax.scatter(
# masked_x,
# masked_y,
# marker='o',
# linewidths=0.05,
# color=colors,
# )
conf = confs[ped, modality].detach().cpu().item()
ax.plot(
maskeds_x,
maskeds_y,
# marker='o',
linewidth=3*conf,
color=colors - np.array([0, 0, 0, 1-conf]),
)
ax.text(maskeds_x[-1], maskeds_y[-1], f"{conf:.2f}",
fontsize="xx-small")
# Title.
ax.set_title(title)
# Set axes. Should be at least 10m on a side and cover 160% of agents.
size = max(10, width * 1.0)
ax.axis([
-size / 2 + center_x, size / 2 + center_x, -size / 2 + center_y,
size / 2 + center_y
])
ax.set_aspect('equal')
image = fig_canvas_image(fig)
plt.close(fig)
return image
def visualize_all_agents_smooth(
decoded_example,
size_pixels=1000,
):
"""Visualizes all agent predicted trajectories in a serie of images.
Args:
decoded_example: Dictionary containing agent info about all modeled agents.
size_pixels: The size in pixels of the output image.
Returns:
T of [H, W, 3] uint8 np.arrays of the drawn matplotlib's figure canvas.
"""
# [num_agents, num_past_steps, 2] float32.
center_x, center_y, color_map, current_states, current_states_mask, future_states, future_states_mask, \
num_future_steps, num_past_steps, past_states, past_states_mask, roadgraph_xyz, width = prepare_data_for_vis(
decoded_example)
images = []
# Generate images from past time steps.
for i, (s, m) in enumerate(
zip(
np.split(past_states, num_past_steps, 1),
np.split(past_states_mask, num_past_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'past: %d' % (num_past_steps - i), center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate one image for the current time step.
s = current_states
m = current_states_mask
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz, 'current', center_y,
center_x, width, color_map, size_pixels)
images.append(im)
# Generate images from future time steps.
for i, (s, m) in enumerate(
zip(
np.split(future_states, num_future_steps, 1),
np.split(future_states_mask, num_future_steps, 1))):
im = visualize_one_step(s[:, 0], m[:, 0], roadgraph_xyz,
'future: %d' % (i + 1), center_y, center_x, width,
color_map, size_pixels)
images.append(im)
return images
def prepare_data_for_vis(decoded_example):
past_states = np.stack(
[decoded_example['state/past/x'], decoded_example['state/past/y']],
-1).reshape(-1, 10, 2)
past_states_mask = decoded_example['state/past/valid'].reshape(128, 10) > 0.0
# [num_agents, 1, 2] float32.
current_states = np.stack(
[decoded_example['state/current/x'], decoded_example['state/current/y']],
-1).reshape(-1, 1, 2)
current_states_mask = decoded_example['state/current/valid'].reshape(128, 1) > 0.0
# [num_agents, num_future_steps, 2] float32.
future_states = np.stack(
[decoded_example['state/future/x'], decoded_example['state/future/y']],
-1).reshape(-1, 80, 2)
future_states_mask = decoded_example['state/future/valid'].reshape(128, 80) > 0.0
# [num_points, 3] float32.
roadgraph_xyz = decoded_example['roadgraph_samples/xyz'].reshape(-1, 3)
num_agents, num_past_steps, _ = past_states.shape
num_future_steps = future_states.shape[1]
color_map = get_colormap(num_agents)
# [num_agens, num_past_steps + 1 + num_future_steps, depth] float32.
all_states = np.concatenate([past_states, current_states, future_states], 1)
# [num_agens, num_past_steps + 1 + num_future_steps] float32.
all_states_mask = np.concatenate(
[past_states_mask, current_states_mask, future_states_mask], 1)
center_y, center_x, width = get_viewport(all_states, all_states_mask)
return center_x, center_y, color_map, current_states, current_states_mask, future_states, future_states_mask, \
num_future_steps, num_past_steps, past_states, past_states_mask, roadgraph_xyz, width | 2,759 | 0 | 46 |
6b5b04deff99b965746d9b733f9164d871724d78 | 2,144 | py | Python | regui/config_ui.py | yevgenyr/regui | 97ab9fbbe12eabc753ba129529b563a14da3908b | [
"MIT"
] | null | null | null | regui/config_ui.py | yevgenyr/regui | 97ab9fbbe12eabc753ba129529b563a14da3908b | [
"MIT"
] | null | null | null | regui/config_ui.py | yevgenyr/regui | 97ab9fbbe12eabc753ba129529b563a14da3908b | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
import os
| 28.586667 | 83 | 0.658582 | import PySimpleGUI as sg
import os
def _icon(fname):
return os.path.join(os.path.dirname(os.path.dirname(__file__)), 'icons', fname)
class UIConfig:
# fonts
font_style = 'courier 10 pitch'
font_6 = (font_style, 6)
font_6b = (font_style, 6, 'bold')
font_8 = (font_style, 8)
font_8b = (font_style, 8, 'bold')
font_9 = (font_style, 9)
font_9b = (font_style, 9, 'bold')
font_11 = (font_style, 11)
font_11b = (font_style, 11, 'bold')
# standard_sizes
schema_box_size = (800, 500)
selector_short_size = (20, 8)
selector_long_size = (40, 8)
selector_index_size = (3, 8)
entry_size = (20, 1)
types_size = (20, 1)
input_size = (50, 1)
integer_input_size = (5, 1)
multyline_size = (49, 2)
edit_list_len = 75
### globals
# look and feel
gui_theme_1 = 'LightBrown3'
gui_theme_2 = 'LightBrown1'
gui_theme_3 = 'DarkBlue13'
gui_theme_4 = 'LightBlue'
# selected colors
PALE_BLUE_BUTTON_COLOR = '#a5cadd'
YELLOW_BUTTON_COLOR = '#d8d584'
WHITE_COLOR = '#ffffff'
BLACK_COLOR = '#000000'
RED_COLOR = '#f48c84'
GREEN_COLOR = '#abecab'
GREY_COLOR = "#d9d9d9"
DARK_BLUE = "#293194"
# icons
LOGO_ICON = _icon('regolith-logo.png')
ENTER_ICON = _icon('enter.png')
ENTER_LIST_ICON = _icon('enter_list.png')
DATE_ICON = _icon('date.png')
FILTER_ICON = _icon('filter.png')
EDIT_ICON = _icon('edit.png')
INFO_ICON = _icon('info.png')
# global setup
sg.change_look_and_feel(gui_theme_4)
sg.set_options(icon=LOGO_ICON)
sg.set_options(input_elements_background_color=WHITE_COLOR)
sg.set_options(button_color=(BLACK_COLOR, YELLOW_BUTTON_COLOR))
sg.set_options(text_color=BLACK_COLOR)
sg.set_options(font=font_11)
sg.set_options(element_padding=(5, 2))
sg.set_options(border_width=1)
sg.set_options(use_ttk_buttons=True)
sg.set_options(ttk_theme="clam") # 'clam', 'alt', 'default', 'classic'
# set window position
window = sg.Window('')
sg.DEFAULT_WINDOW_LOCATION = map(lambda x: x / 3, window.get_screen_size())
window.close()
| 80 | 1,983 | 46 |
21f462d39e36c18101dd3311b03b9725f2b56199 | 2,415 | py | Python | api.py | Gimu/chancl | f8b6e319f6b3f0f4e165bfa025b917c43fad0a83 | [
"MIT"
] | 33 | 2017-05-07T08:06:45.000Z | 2021-04-06T04:40:22.000Z | api.py | gimu/chancli | f8b6e319f6b3f0f4e165bfa025b917c43fad0a83 | [
"MIT"
] | null | null | null | api.py | gimu/chancli | f8b6e319f6b3f0f4e165bfa025b917c43fad0a83 | [
"MIT"
] | 1 | 2020-05-31T11:11:35.000Z | 2020-05-31T11:11:35.000Z | #!/usr/bin/env python3
import urllib.error
import urllib.request
| 37.153846 | 139 | 0.601242 | #!/usr/bin/env python3
import urllib.error
import urllib.request
class ApiError(object):
@staticmethod
def get_error(target, error):
"""Return error message."""
return {'content': "\nCould not generate {}\nFull error code: {}".format(target, error), 'status': "Error occured"}
class Api(object):
def get_boards(self):
"""Return boards' information."""
data = {'error': False, 'result': None}
try:
data['result'] = urllib.request.urlopen("https://a.4cdn.org/boards.json").read().decode('utf-8')
except urllib.error.HTTPError as error:
data['error'] = ApiError.get_error("boards list", error)
except urllib.error.URLError as error:
data['error'] = ApiError.get_error("boards list", error)
return data
def get_threads(self, board, page=1):
"""Get threads by board and page."""
data = {'error': False, 'result': None}
try:
data['result'] = urllib.request.urlopen("https://a.4cdn.org/{}/{}.json".format(board, page)).read().decode('utf-8')
except urllib.error.HTTPError as error:
data['error'] = ApiError.get_error("threads list", error)
except urllib.error.URLError as error:
data['error'] = ApiError.get_error("threads list", error)
return data
def get_thread(self, board, thread_id):
"""Get particular thread by id."""
data = {'error': False, 'result': None}
try:
data['result'] = urllib.request.urlopen("https://a.4cdn.org/{}/thread/{}.json".format(board, thread_id)).read().decode('utf-8')
except urllib.error.HTTPError as error:
data['error'] = ApiError.get_error("thread list", error)
except urllib.error.URLError as error:
data['error'] = ApiError.get_error("thread list", error)
return data
def get_archive(self, board):
"""Get archive of board."""
data = {'error': False, 'result': None}
try:
data['result'] = urllib.request.urlopen("https://a.4cdn.org/{}/archive.json".format(board)).read().decode('utf-8')
except urllib.error.HTTPError as error:
data['error'] = ApiError.get_error("archive list", error)
except urllib.error.URLError as error:
data['error'] = ApiError.get_error("archive list", error)
return data
| 0 | 2,304 | 46 |
65c17ed8d6a8695bfc33a24d52ad5cb4f445f2c0 | 4,289 | py | Python | airmozilla/manage/tests/views/test_participants.py | RAMilewski/airmozilla | 70d52295294fd319e60b046bf75baf971cd00f98 | [
"BSD-3-Clause"
] | 1 | 2019-08-17T21:22:08.000Z | 2019-08-17T21:22:08.000Z | airmozilla/manage/tests/views/test_participants.py | RAMilewski/airmozilla | 70d52295294fd319e60b046bf75baf971cd00f98 | [
"BSD-3-Clause"
] | 4 | 2021-03-19T15:38:56.000Z | 2021-09-08T02:47:16.000Z | airmozilla/manage/tests/views/test_participants.py | Acidburn0zzz/airmozilla | 7b03af6d6efe9af00a6070f5327e10fb755c3766 | [
"BSD-3-Clause"
] | null | null | null | from nose.tools import eq_, ok_
from funfactory.urlresolvers import reverse
from airmozilla.main.models import Participant
from .base import ManageTestCase
| 40.847619 | 79 | 0.594777 | from nose.tools import eq_, ok_
from funfactory.urlresolvers import reverse
from airmozilla.main.models import Participant
from .base import ManageTestCase
class TestParticipants(ManageTestCase):
def test_participant_pages(self):
"""Participants pagination always returns valid pages."""
response = self.client.get(reverse('manage:participants'))
eq_(response.status_code, 200)
response = self.client.get(reverse('manage:participants'),
{'page': 5000})
eq_(response.status_code, 200)
def test_participant_find(self):
"""Search filters participants; returns all for bad search."""
response_ok = self.client.post(
reverse('manage:participants'),
{
'name': 'Tim'
}
)
eq_(response_ok.status_code, 200)
ok_(response_ok.content.find('Tim') >= 0)
response_fail = self.client.post(
reverse('manage:participants'),
{
'name': 'Lincoln'
}
)
eq_(response_fail.status_code, 200)
ok_(response_fail.content.find('Tim') >= 0)
def test_participant_edit(self):
"""Participant edit page responds OK; bad form results in failure;
submission induces a change.
"""
participant = Participant.objects.get(name='Tim Mickel')
response = self.client.get(reverse('manage:participant_edit',
kwargs={'id': participant.id}))
eq_(response.status_code, 200)
response_ok = self.client.post(
reverse('manage:participant_edit', kwargs={'id': participant.id}),
{
'name': 'George Washington',
'email': 'george@whitehouse.gov',
'role': Participant.ROLE_PRINCIPAL_PRESENTER,
'cleared': Participant.CLEARED_YES
}
)
self.assertRedirects(response_ok, reverse('manage:participants'))
participant_george = Participant.objects.get(id=participant.id)
eq_(participant_george.name, 'George Washington')
response_fail = self.client.post(
reverse('manage:participant_edit', kwargs={'id': participant.id}),
{
'name': 'George Washington',
'email': 'bademail'
}
)
eq_(response_fail.status_code, 200)
def test_participant_email(self):
"""Participant email page generates a token, redirects properly."""
participant = Participant.objects.get(name='Tim Mickel')
participant.clear_token = ''
participant.save()
url = reverse('manage:participant_email',
kwargs={'id': participant.id})
response = self.client.get(url)
eq_(response.status_code, 200)
participant = Participant.objects.get(name='Tim Mickel')
ok_(participant.clear_token)
response_redirect = self.client.post(url)
self.assertRedirects(response_redirect, reverse('manage:participants'))
def test_participant_new(self):
"""New participant page responds OK and form works as expected."""
response = self.client.get(reverse('manage:participant_new'))
eq_(response.status_code, 200)
with open('airmozilla/manage/tests/firefox.png') as fp:
response_ok = self.client.post(
reverse('manage:participant_new'),
{
'name': 'Mozilla Firefox',
'slug': 'mozilla-firefox',
'photo': fp,
'email': 'mozilla@mozilla.com',
'role': Participant.ROLE_PRINCIPAL_PRESENTER,
'cleared': Participant.CLEARED_NO
}
)
self.assertRedirects(response_ok, reverse('manage:participants'))
participant = Participant.objects.get(name='Mozilla Firefox')
eq_(participant.email, 'mozilla@mozilla.com')
eq_(participant.creator, self.user)
def test_participant_remove(self):
participant = Participant.objects.get(name='Tim Mickel')
self._delete_test(participant, 'manage:participant_remove',
'manage:participants')
| 195 | 3,912 | 23 |
b24cf5ddcfa475be657b5972e647ee20784e2ced | 2,708 | py | Python | build/lib/ringity/plots.py | kiri93/ringity | 3fc7990ce42219236235e41b9eeec6cd2477e477 | [
"MIT"
] | 7 | 2021-03-02T17:51:40.000Z | 2022-01-31T00:31:06.000Z | build/lib/ringity/plots.py | kiri93/ringity | 3fc7990ce42219236235e41b9eeec6cd2477e477 | [
"MIT"
] | null | null | null | build/lib/ringity/plots.py | kiri93/ringity | 3fc7990ce42219236235e41b9eeec6cd2477e477 | [
"MIT"
] | 1 | 2021-03-02T17:52:00.000Z | 2021-03-02T17:52:00.000Z | import networkx as nx
import matplotlib.pyplot as plt
CEMM_COL1 = ( 0/255, 85/255, 100/255)
CEMM_COL2 = ( 0/255, 140/255, 160/255)
CEMM_COL3 = ( 64/255, 185/255, 212/255)
CEMM_COL4 = (212/255, 236/255, 242/255)
DARK_CEMM_COL1 = (0/255, 43/255, 50/255)
BAR_COL = (0.639, 0.639, 0.639)
# -------------------------------- ACTUAL PLOTS --------------------------------
| 27.917526 | 80 | 0.509232 | import networkx as nx
import matplotlib.pyplot as plt
CEMM_COL1 = ( 0/255, 85/255, 100/255)
CEMM_COL2 = ( 0/255, 140/255, 160/255)
CEMM_COL3 = ( 64/255, 185/255, 212/255)
CEMM_COL4 = (212/255, 236/255, 242/255)
DARK_CEMM_COL1 = (0/255, 43/255, 50/255)
BAR_COL = (0.639, 0.639, 0.639)
def set():
#sns.set() <--- costumize rc!
plt.rc('axes', labelsize=24, titlesize=28)
plt.rc('xtick', labelsize=24)
plt.rc('ytick', labelsize=24)
def ax_setup(ax):
ax.tick_params(axis='both', which='major', labelsize=24)
ax.patch.set_alpha(0)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('k')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('k')
# -------------------------------- ACTUAL PLOTS --------------------------------
def plot_seq(dgm, crop=None, ax=None, **kwargs):
if ax is None:
fig, ax = plt.subplots()
fig.patch.set_alpha(0)
if crop is None:
dgm_plot = dgm.copy()
else:
dgm_plot = dgm.crop(crop)
ax_setup(ax)
bar = list(dgm_plot.sequence)
ax.bar(range(len(bar)), bar, color=BAR_COL);
def plot_nx(G,
pos = None,
ax = None,
node_colors = None,
node_alpha = 0.3,
edge_colors = None,
edge_alpha = 0.2,
**kwargs):
if pos is None:
pos = nx.spring_layout(G)
if ax is None:
fig, ax = plt.subplots(figsize=(12,8));
fig.patch.set_alpha(0)
if node_colors is None:
node_colors = [CEMM_COL1]*nx.number_of_nodes(G)
if edge_colors is None:
edge_colors = [CEMM_COL2]*nx.number_of_edges(G)
nodes = nx.draw_networkx_nodes(G, pos=pos,
alpha=node_alpha,
ax=ax,
node_color=node_colors,
node_size=15,
linewidths=1)
edges = nx.draw_networkx_edges(G, pos=pos,
alpha=edge_alpha,
ax=ax,
edge_color=edge_colors)
ax.axis('off');
def plot_dgm(dgm, ax=None, **kwargs):
x,y = zip(*[(k.birth,k.death) for k in dgm])
d = max(y)
if ax is None:
fig, ax = plt.subplots()
fig.patch.set_alpha(0)
ax_setup(ax)
hw = 0.025 # head width of the arrow
ax.set_xlim([-hw, d*1.1])
ax.set_ylim([-hw, d*1.1])
ax.plot(x, y, '*', markersize=5, color=CEMM_COL2);
ax.plot([0,d],[0,d], color=DARK_CEMM_COL1,
linewidth=1,
linestyle='dashed');
| 2,221 | 0 | 114 |
d94019710e3a00ceb6d7adda682f4c33170b887d | 324 | py | Python | {{cookiecutter.project_slug}}/src/users/tests/test_whoami.py | nvo87/django | fd07fb74ab59e868c73512cd0ca4952129b44cd8 | [
"MIT"
] | 98 | 2020-04-21T20:22:16.000Z | 2021-06-07T12:33:51.000Z | {{cookiecutter.project_slug}}/src/users/tests/test_whoami.py | nvo87/django | fd07fb74ab59e868c73512cd0ca4952129b44cd8 | [
"MIT"
] | 70 | 2020-04-21T21:59:49.000Z | 2021-06-13T13:35:01.000Z | {{cookiecutter.project_slug}}/src/users/tests/test_whoami.py | ginsenghillock/django | 65ab4f52897ca7efdfde347383153fca4f2d2c14 | [
"MIT"
] | 23 | 2020-04-23T06:03:13.000Z | 2021-06-09T06:59:34.000Z | import pytest
pytestmark = [pytest.mark.django_db]
| 19.058824 | 60 | 0.66358 | import pytest
pytestmark = [pytest.mark.django_db]
def test_ok(as_user, user):
got = as_user.get('/api/v1/users/me/')
assert got['id'] == user.pk
assert got['username'] == user.username
def test_anon(as_anon):
got = as_anon.get('/api/v1/users/me/', as_response=True)
assert got.status_code == 401
| 224 | 0 | 46 |
c2b33a793ad163ec1d6be7cac26ed7a94c6f9d30 | 1,559 | py | Python | aiosmf/smf/rpc/dynamic_header.py | noahdesu/aiosmf | 72ff874a2750c253cf7c0154c2a676e521bea7ce | [
"Apache-2.0"
] | 1 | 2019-03-30T17:22:08.000Z | 2019-03-30T17:22:08.000Z | aiosmf/smf/rpc/dynamic_header.py | senior7515/aiosmf | 72ff874a2750c253cf7c0154c2a676e521bea7ce | [
"Apache-2.0"
] | 6 | 2019-03-30T17:58:52.000Z | 2019-04-09T13:09:49.000Z | aiosmf/smf/rpc/dynamic_header.py | noahdesu/aiosmf | 72ff874a2750c253cf7c0154c2a676e521bea7ce | [
"Apache-2.0"
] | 2 | 2019-03-31T14:09:40.000Z | 2022-03-22T16:51:18.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: rpc
import flatbuffers
# /// \brief used for extra headers, ala HTTP
# /// The use case for the core is to support
# /// zipkin/google-Dapper style tracing
# /// alows for binary search lookup
# /// use with CreateVectorOfSortedTables<> instead of the CreateVector
# dynamic_header
# dynamic_header
| 35.431818 | 140 | 0.704298 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: rpc
import flatbuffers
# /// \brief used for extra headers, ala HTTP
# /// The use case for the core is to support
# /// zipkin/google-Dapper style tracing
class dynamic_header(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsdynamic_header(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = dynamic_header()
x.Init(buf, n + offset)
return x
# dynamic_header
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# /// alows for binary search lookup
# /// use with CreateVectorOfSortedTables<> instead of the CreateVector
# dynamic_header
def Key(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# dynamic_header
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def dynamic_headerStart(builder): builder.StartObject(2)
def dynamic_headerAddKey(builder, key): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(key), 0)
def dynamic_headerAddValue(builder, value): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def dynamic_headerEnd(builder): return builder.EndObject()
| 877 | 125 | 163 |
f20094ac6ecfc85b1a237048817b89f64de8c4f5 | 4,898 | py | Python | tools/evaluate/track_proto_evaluate.py | myfavouritekk/TPN | b346774cddeb0b9e0c1d8ed1f91a69f00436456d | [
"MIT"
] | 74 | 2017-04-07T04:06:52.000Z | 2022-01-30T01:40:10.000Z | tools/evaluate/track_proto_evaluate.py | wulongyuan/TPN | b346774cddeb0b9e0c1d8ed1f91a69f00436456d | [
"MIT"
] | 8 | 2017-07-19T06:54:02.000Z | 2018-08-28T07:38:52.000Z | tools/evaluate/track_proto_evaluate.py | wulongyuan/TPN | b346774cddeb0b9e0c1d8ed1f91a69f00436456d | [
"MIT"
] | 28 | 2017-04-07T03:50:52.000Z | 2019-02-28T04:18:15.000Z | #!/usr/bin/env python
import argparse
import os
import os.path as osp
import glob
import numpy as np
import sys
import cPickle
from time import time
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib/'))
sys.path.insert(0, osp.join(this_dir, '../../src'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from fast_rcnn.nms_wrapper import nms
from tpn.evaluate import write_ilsvrc_results_file
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('track_dir',
help='Directory that contains all track detection results.')
parser.add_argument('vid_dir')
parser.add_argument('image_list')
parser.add_argument('score_key')
parser.add_argument('box_key')
parser.add_argument('output_dir')
parser.add_argument('--results', type=str, default='',
help='Result file.')
parser.add_argument('--thres', type=float, default=0.01,
help='Detection score threshold. [0.01]')
parser.add_argument('--num_classes', type=int, default=31,
help='Number of classes. [31]')
parser.add_argument('--max_per_image', type=int, default=100,
help='Maximum detection in each image. [100]')
args = parser.parse_args()
# read image_list
with open(args.image_list, 'r') as f:
image_list = dict([line.strip().split() for line in f])
num_classes = args.num_classes
all_boxes = [[[] for _ in xrange(len(image_list))]
for _ in xrange(num_classes)]
# process vid detections
tracks = sorted(glob.glob(osp.join(args.track_dir, '*')))
for track_path in tracks:
print track_path
vid_name = osp.split(track_path)[-1].split('.')[0]
vid_proto = proto_load(osp.join(args.vid_dir, vid_name + '.vid'))
track_proto = proto_load(track_path)
for frame in vid_proto['frames']:
frame_name = osp.join(vid_name, osp.splitext(frame['path'])[0])
if frame_name not in image_list.keys(): continue
frame_idx = frame['frame']
sub_idx = int(image_list[frame_name])
global_idx = sub_idx - 1
start_time = time()
scores, boxes = _frame_dets(track_proto['tracks'], frame_idx,
args.score_key, args.box_key)
boxes = boxes.reshape((boxes.shape[0], -1))
for j in xrange(1, num_classes):
inds = np.where(scores[:, j] > args.thres)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, 0.3, force_cpu=True)
cls_dets = cls_dets[keep, :]
all_boxes[j][global_idx] = cls_dets
# Limit to max_per_image detections *over all classes*
if args.max_per_image > 0:
image_scores = np.hstack([all_boxes[j][global_idx][:, -1]
for j in xrange(1, num_classes)])
if len(image_scores) > args.max_per_image:
image_thresh = np.sort(image_scores)[-args.max_per_image]
for j in xrange(1, num_classes):
keep = np.where(all_boxes[j][global_idx][:, -1] >= image_thresh)[0]
all_boxes[j][global_idx] = all_boxes[j][global_idx][keep, :]
end_time = time()
print "{}/{}: {:.03f} s".format(sub_idx, len(image_list), end_time - start_time)
sys.stdout.flush()
det_file = osp.join(args.output_dir, 'detections.pkl')
if not osp.isdir(args.output_dir):
os.makedirs(args.output_dir)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
if args.results:
with open(args.results, 'w') as f:
write_ilsvrc_results_file(all_boxes, f, thres=args.thres)
| 41.508475 | 92 | 0.606778 | #!/usr/bin/env python
import argparse
import os
import os.path as osp
import glob
import numpy as np
import sys
import cPickle
from time import time
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/py-faster-rcnn/lib/'))
sys.path.insert(0, osp.join(this_dir, '../../src'))
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load
from fast_rcnn.nms_wrapper import nms
from tpn.evaluate import write_ilsvrc_results_file
def _frame_dets(tracks, frame_idx, score_key, box_key):
scores = []
boxes = []
for track in tracks:
for frame in track:
if frame_idx != frame['frame']: continue
assert score_key in frame
assert box_key in frame
cur_scores = np.asarray(frame[score_key]).flatten()[np.newaxis,:]
cur_boxes = np.asarray(frame[box_key]).flatten()[np.newaxis,:]
num_cls = cur_scores.shape[1]
assert cur_boxes.shape[1] in [4, 4 * num_cls]
# repeat boxes if not class specific
if cur_boxes.shape[1] == 4:
cur_boxes = np.tile(cur_boxes, num_cls)
scores.append(cur_scores.copy())
boxes.append(cur_boxes.copy())
scores = np.vstack(scores)
boxes = np.vstack(boxes)
return scores, boxes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('track_dir',
help='Directory that contains all track detection results.')
parser.add_argument('vid_dir')
parser.add_argument('image_list')
parser.add_argument('score_key')
parser.add_argument('box_key')
parser.add_argument('output_dir')
parser.add_argument('--results', type=str, default='',
help='Result file.')
parser.add_argument('--thres', type=float, default=0.01,
help='Detection score threshold. [0.01]')
parser.add_argument('--num_classes', type=int, default=31,
help='Number of classes. [31]')
parser.add_argument('--max_per_image', type=int, default=100,
help='Maximum detection in each image. [100]')
args = parser.parse_args()
# read image_list
with open(args.image_list, 'r') as f:
image_list = dict([line.strip().split() for line in f])
num_classes = args.num_classes
all_boxes = [[[] for _ in xrange(len(image_list))]
for _ in xrange(num_classes)]
# process vid detections
tracks = sorted(glob.glob(osp.join(args.track_dir, '*')))
for track_path in tracks:
print track_path
vid_name = osp.split(track_path)[-1].split('.')[0]
vid_proto = proto_load(osp.join(args.vid_dir, vid_name + '.vid'))
track_proto = proto_load(track_path)
for frame in vid_proto['frames']:
frame_name = osp.join(vid_name, osp.splitext(frame['path'])[0])
if frame_name not in image_list.keys(): continue
frame_idx = frame['frame']
sub_idx = int(image_list[frame_name])
global_idx = sub_idx - 1
start_time = time()
scores, boxes = _frame_dets(track_proto['tracks'], frame_idx,
args.score_key, args.box_key)
boxes = boxes.reshape((boxes.shape[0], -1))
for j in xrange(1, num_classes):
inds = np.where(scores[:, j] > args.thres)[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \
.astype(np.float32, copy=False)
keep = nms(cls_dets, 0.3, force_cpu=True)
cls_dets = cls_dets[keep, :]
all_boxes[j][global_idx] = cls_dets
# Limit to max_per_image detections *over all classes*
if args.max_per_image > 0:
image_scores = np.hstack([all_boxes[j][global_idx][:, -1]
for j in xrange(1, num_classes)])
if len(image_scores) > args.max_per_image:
image_thresh = np.sort(image_scores)[-args.max_per_image]
for j in xrange(1, num_classes):
keep = np.where(all_boxes[j][global_idx][:, -1] >= image_thresh)[0]
all_boxes[j][global_idx] = all_boxes[j][global_idx][keep, :]
end_time = time()
print "{}/{}: {:.03f} s".format(sub_idx, len(image_list), end_time - start_time)
sys.stdout.flush()
det_file = osp.join(args.output_dir, 'detections.pkl')
if not osp.isdir(args.output_dir):
os.makedirs(args.output_dir)
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
if args.results:
with open(args.results, 'w') as f:
write_ilsvrc_results_file(all_boxes, f, thres=args.thres)
| 816 | 0 | 23 |
1c60b18e5b2ea2aef181e331209ccde98b184675 | 451 | py | Python | mindhome_alpha/erpnext/patches/v13_0/add_po_to_global_search.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/patches/v13_0/add_po_to_global_search.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/patches/v13_0/add_po_to_global_search.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | from __future__ import unicode_literals
import frappe
| 25.055556 | 82 | 0.738359 | from __future__ import unicode_literals
import frappe
def execute():
global_search_settings = frappe.get_single("Global Search Settings")
if "Purchase Order" in (
dt.document_type for dt in global_search_settings.allowed_in_global_search
):
return
global_search_settings.append(
"allowed_in_global_search", {"document_type": "Purchase Order"}
)
global_search_settings.save(ignore_permissions=True)
| 373 | 0 | 23 |
d0c87b6684335749368b5ef0252dbf7af689cfa3 | 366 | py | Python | tools/is_prime.py | lucasayres/python-tools | 686b84986aae1b1714fa5645b1f2a3fd6ef8355d | [
"MIT"
] | 71 | 2018-06-28T17:38:15.000Z | 2022-02-08T17:42:42.000Z | tools/is_prime.py | DalavanCloud/python-tools | 686b84986aae1b1714fa5645b1f2a3fd6ef8355d | [
"MIT"
] | null | null | null | tools/is_prime.py | DalavanCloud/python-tools | 686b84986aae1b1714fa5645b1f2a3fd6ef8355d | [
"MIT"
] | 14 | 2018-07-08T03:29:29.000Z | 2022-03-22T21:04:39.000Z | # -*- coding: utf-8 -*-
def is_prime(number):
"""Check if a number is a prime number.
Args:
number (int): Number.
Returns:
bool: Return True if number is a prime number and False if not.
"""
if number <= 1:
return False
for x in range(2, number):
if not number % x:
return False
return True
| 20.333333 | 71 | 0.54918 | # -*- coding: utf-8 -*-
def is_prime(number):
"""Check if a number is a prime number.
Args:
number (int): Number.
Returns:
bool: Return True if number is a prime number and False if not.
"""
if number <= 1:
return False
for x in range(2, number):
if not number % x:
return False
return True
| 0 | 0 | 0 |
81d055c649c5fd50879a07d7ea549d30ab3fdf38 | 2,884 | py | Python | runfile/appstore.py | mhdahsan2000/AppScraper2.0 | 3bc53273ace6f2fe772d2c0c379a6a254cfa05b2 | [
"MIT"
] | 2 | 2021-11-12T08:28:21.000Z | 2021-11-27T02:17:07.000Z | runfile/appstore.py | mhdahsan2000/AppScraper2.0 | 3bc53273ace6f2fe772d2c0c379a6a254cfa05b2 | [
"MIT"
] | null | null | null | runfile/appstore.py | mhdahsan2000/AppScraper2.0 | 3bc53273ace6f2fe772d2c0c379a6a254cfa05b2 | [
"MIT"
] | null | null | null | """
Created on Thu Aug 2 19:42:10 2021
@author: Mohammed Ahsan
-------------------------------------------------------
APP REVIEW SCRAPER - ioS and google Store.
version : 1.0
Build name : RedSparrow -----
-------------------------------------------------------
"""
from app_store_scraper import AppStore
from pprint import pprint
import json
from bson import json_util
import datetime
import csv
# CSV
# Updated list to store all of the game names.
updatedlist = []
# A new list to store all of the game names from the CSV File.
results = []
# CONVERT THE FIELDS IN THE CSV INTO A NEW LIST called as results.
# Add the app names as new rows into the testnames.csv file. The app names are the app id's for the scrapper.
# testnames.csv consist of all the app names as new rows .
with open('testnames.csv', newline='') as inputfile:
for row in csv.reader(inputfile):
results.append(row[0])
# USE list incase if the reading from csv is unsuccessfull.
# The list of app names that we would want the reviews of .
#appnames = ["monopoly"]
#appnames = ["monopoly","Fidget Toys Trading 3D","Flow Free","Two Dots","Blackout Bingo - Bingo Game","Pull the Pin","Parking Jam 3D","Adorable Home"," Match 3D"," Terraforming Mars","The Game of Life 2","Jigsaw Puzzle","Coin Pusher - Lucky Dozer Game"]
# List of app. names the reviews to.
# Iterate through the results list to fetch the reviews of all of the apps - list with field names from CSV.
for i in results :
output = AppStore(country="us", app_name=i)
output.review(how_many=5)
updatedlist.append(output.reviews)
# print the output.
print(updatedlist)
# write the reviews to a text file as output.
with open('OUTPUTFILEAPPS.txt', 'w', encoding='utf-8') as f:
f.write(str(updatedlist))
# Convert the Scraped data into JSON.
with open("OUTPUTJSON.json", 'w') as file :
file.write((json.dumps(output.reviews,default=json_util.default, indent=0, sort_keys= False)))
# TESTING.
"""
# Fetch the App using country name and app name
output = AppStore(country="us", app_name="Fidget Toys Trading 3D")
# Count of how many reviews
output.review(how_many=10000)
# updated list to store the reviews.
updatedlist = []
# Add the reviews to the list
updatedlist.append(output.reviews)
# Write the Output into a TEXT file.
with open('APPREVIEWS.txt', 'w', encoding='utf-8') as f:
f.write(str(updatedlist))
# Convert the list to JSON.
print(updatedlist)
print(type(updatedlist))
#pprint(monopoly.reviews)
#pprint(monopoly.reviews_count)
"""
# CSV_2
"""
# iterate through the list to fetch the reviews of all of the apps. in the appnames list.
or i in lists_from_csv :
output = AppStore(country="us", app_name=i)
output.review(how_many=20)f
updatedlist.append(output.reviews)
"""
| 26.458716 | 253 | 0.668169 | """
Created on Thu Aug 2 19:42:10 2021
@author: Mohammed Ahsan
-------------------------------------------------------
APP REVIEW SCRAPER - ioS and google Store.
version : 1.0
Build name : RedSparrow -----
-------------------------------------------------------
"""
from app_store_scraper import AppStore
from pprint import pprint
import json
from bson import json_util
import datetime
import csv
# CSV
# Updated list to store all of the game names.
updatedlist = []
# A new list to store all of the game names from the CSV File.
results = []
# CONVERT THE FIELDS IN THE CSV INTO A NEW LIST called as results.
# Add the app names as new rows into the testnames.csv file. The app names are the app id's for the scrapper.
# testnames.csv consist of all the app names as new rows .
with open('testnames.csv', newline='') as inputfile:
for row in csv.reader(inputfile):
results.append(row[0])
# USE list incase if the reading from csv is unsuccessfull.
# The list of app names that we would want the reviews of .
#appnames = ["monopoly"]
#appnames = ["monopoly","Fidget Toys Trading 3D","Flow Free","Two Dots","Blackout Bingo - Bingo Game","Pull the Pin","Parking Jam 3D","Adorable Home"," Match 3D"," Terraforming Mars","The Game of Life 2","Jigsaw Puzzle","Coin Pusher - Lucky Dozer Game"]
# List of app. names the reviews to.
# Iterate through the results list to fetch the reviews of all of the apps - list with field names from CSV.
for i in results :
output = AppStore(country="us", app_name=i)
output.review(how_many=5)
updatedlist.append(output.reviews)
# print the output.
print(updatedlist)
# write the reviews to a text file as output.
with open('OUTPUTFILEAPPS.txt', 'w', encoding='utf-8') as f:
f.write(str(updatedlist))
# Convert the Scraped data into JSON.
with open("OUTPUTJSON.json", 'w') as file :
file.write((json.dumps(output.reviews,default=json_util.default, indent=0, sort_keys= False)))
# TESTING.
"""
# Fetch the App using country name and app name
output = AppStore(country="us", app_name="Fidget Toys Trading 3D")
# Count of how many reviews
output.review(how_many=10000)
# updated list to store the reviews.
updatedlist = []
# Add the reviews to the list
updatedlist.append(output.reviews)
# Write the Output into a TEXT file.
with open('APPREVIEWS.txt', 'w', encoding='utf-8') as f:
f.write(str(updatedlist))
# Convert the list to JSON.
print(updatedlist)
print(type(updatedlist))
#pprint(monopoly.reviews)
#pprint(monopoly.reviews_count)
"""
# CSV_2
"""
# iterate through the list to fetch the reviews of all of the apps. in the appnames list.
or i in lists_from_csv :
output = AppStore(country="us", app_name=i)
output.review(how_many=20)f
updatedlist.append(output.reviews)
"""
| 0 | 0 | 0 |
9128f46dfdc43f9451f8418dcdfd12e28a73cc9f | 1,691 | py | Python | setup.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | setup.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | setup.py | muon-spectroscopy-computational-project/muspinsim | d9e971edd840ab0c33b143f9b5694bc1b09011d2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
version = {}
with open("muspinsim/version.py") as fp:
exec(fp.read(), version)
setuptools.setup(
name="muspinsim",
version=version["__version__"],
author="Simone Sturniolo",
author_email="simonesturniolo@gmail.com",
description="Full quantum simulation of muon experiments",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/muon-spectroscopy-computational-project/muspinsim.git",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
# Indicate who your project is intended for
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Information Analysis",
],
install_requires=["numpy", "scipy", "soprano", "lark"],
extras_require={
"docs": ["mkdocs", "pymdown-extensions"],
"dev": ["flake8", "black>=22.3.0", "pytest", "pre-commit"],
},
entry_points={
"console_scripts": [
"muspinsim = muspinsim.__main__:main",
"muspinsim.mpi = muspinsim.__main__:main_mpi",
]
},
python_requires=">=3.6",
)
| 33.82 | 83 | 0.626848 | #!/usr/bin/env python
#
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
version = {}
with open("muspinsim/version.py") as fp:
exec(fp.read(), version)
setuptools.setup(
name="muspinsim",
version=version["__version__"],
author="Simone Sturniolo",
author_email="simonesturniolo@gmail.com",
description="Full quantum simulation of muon experiments",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/muon-spectroscopy-computational-project/muspinsim.git",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 5 - Production/Stable",
# Indicate who your project is intended for
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Information Analysis",
],
install_requires=["numpy", "scipy", "soprano", "lark"],
extras_require={
"docs": ["mkdocs", "pymdown-extensions"],
"dev": ["flake8", "black>=22.3.0", "pytest", "pre-commit"],
},
entry_points={
"console_scripts": [
"muspinsim = muspinsim.__main__:main",
"muspinsim.mpi = muspinsim.__main__:main_mpi",
]
},
python_requires=">=3.6",
)
| 0 | 0 | 0 |
69f5f911ce12976f518733c0e2fcb7d5be7964b3 | 826 | py | Python | PINNFramework/__init__.py | Jeyhun1/NeuralSolvers | 46f3214aed881eab04706842eb30ca791821a014 | [
"MIT"
] | 2 | 2021-11-02T20:36:25.000Z | 2022-02-22T12:16:15.000Z | PINNFramework/__init__.py | harsh1702/NeuralSolvers | 3adbe4009bacd18f8aca0a454efac7487aecf7de | [
"MIT"
] | null | null | null | PINNFramework/__init__.py | harsh1702/NeuralSolvers | 3adbe4009bacd18f8aca0a454efac7487aecf7de | [
"MIT"
] | null | null | null | from . import models
from .InitalCondition import InitialCondition
from .BoundaryCondition import PeriodicBC
from .BoundaryCondition import DirichletBC
from .BoundaryCondition import RobinBC
from .BoundaryCondition import TimeDerivativeBC
from .BoundaryCondition import NeumannBC
from .PDELoss import PDELoss
from .HPMLoss import HPMLoss
from .Logger_Interface import LoggerInterface
from .WandB_Logger import WandbLogger
from .TensorBoard_Logger import TensorBoardLogger
from .PINN import PINN
import PINNFramework.models
import PINNFramework.callbacks
__all__ = [
'InitialCondition',
'PeriodicBC',
'DirichletBC',
'RobinBC',
'NeumannBC',
'TimeDerivativeBC',
'PDELoss',
'HPMLoss',
'PINN',
'models',
'LoggerInterface',
'WandbLogger',
'TensorBoardLogger',
'callbacks']
| 24.294118 | 49 | 0.768765 | from . import models
from .InitalCondition import InitialCondition
from .BoundaryCondition import PeriodicBC
from .BoundaryCondition import DirichletBC
from .BoundaryCondition import RobinBC
from .BoundaryCondition import TimeDerivativeBC
from .BoundaryCondition import NeumannBC
from .PDELoss import PDELoss
from .HPMLoss import HPMLoss
from .Logger_Interface import LoggerInterface
from .WandB_Logger import WandbLogger
from .TensorBoard_Logger import TensorBoardLogger
from .PINN import PINN
import PINNFramework.models
import PINNFramework.callbacks
__all__ = [
'InitialCondition',
'PeriodicBC',
'DirichletBC',
'RobinBC',
'NeumannBC',
'TimeDerivativeBC',
'PDELoss',
'HPMLoss',
'PINN',
'models',
'LoggerInterface',
'WandbLogger',
'TensorBoardLogger',
'callbacks']
| 0 | 0 | 0 |
dbbf71d23f022fbd023d98c88ed3dc98d7be2b45 | 2,349 | py | Python | motion_primitives_py/motion_primitives_py/motion_primitive_types/euclidean_motion_primitive.py | ljarin/dispersion_motion_planning | 1c16c95b70915e58e407c1a45aa4065877fbb3de | [
"BSD-3-Clause"
] | 1 | 2022-03-04T12:03:26.000Z | 2022-03-04T12:03:26.000Z | motion_primitives_py/motion_primitives_py/motion_primitive_types/euclidean_motion_primitive.py | ljarin/dispersion_motion_planning | 1c16c95b70915e58e407c1a45aa4065877fbb3de | [
"BSD-3-Clause"
] | null | null | null | motion_primitives_py/motion_primitives_py/motion_primitive_types/euclidean_motion_primitive.py | ljarin/dispersion_motion_planning | 1c16c95b70915e58e407c1a45aa4065877fbb3de | [
"BSD-3-Clause"
] | null | null | null | from motion_primitives_py import MotionPrimitive
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
from scipy.linalg import expm
import scipy.integrate as integrate
class EuclideanMotionPrimitive(MotionPrimitive):
"""
A motion primitive that is just a straight line, with the norm of the distance between start and goal as the cost.
"""
@classmethod
def from_dict(cls, dict, num_dims, max_state, subclass_specific_data={}):
"""
Load a inputs representation of a motion primitive from a dictionary
"""
return super().from_dict(dict, num_dims, max_state)
def to_dict(self):
"""
Write important attributes of motion primitive to a dictionary
"""
return super().to_dict()
if __name__ == "__main__":
# problem parameters
num_dims = 2
control_space_q = 3
# setup problem
start_state = np.zeros((num_dims * control_space_q,))
# end_state = np.random.rand(num_dims * control_space_q,)
end_state = np.ones_like(start_state)
end_state[0] = 2
max_state = 1 * np.ones((control_space_q+1,))
# polynomial
mp = EuclideanMotionPrimitive(start_state, end_state, num_dims, max_state)
# save
assert(mp.is_valid)
assert(np.array_equal(mp.end_state, end_state))
print(mp.cost)
dictionary = mp.to_dict()
# reconstruct
mp = EuclideanMotionPrimitive.from_dict(dictionary, num_dims, max_state)
# plot
mp.plot(position_only=True)
plt.show()
| 32.625 | 125 | 0.680715 | from motion_primitives_py import MotionPrimitive
import numpy as np
import matplotlib.pyplot as plt
import sympy as sym
from scipy.linalg import expm
import scipy.integrate as integrate
class EuclideanMotionPrimitive(MotionPrimitive):
"""
A motion primitive that is just a straight line, with the norm of the distance between start and goal as the cost.
"""
def __init__(self, start_state, end_state, num_dims, max_state,
subclass_specific_data={}):
# Initialize class
super().__init__(start_state, end_state, num_dims, max_state,
subclass_specific_data)
self.is_valid = True
self.cost = np.linalg.norm(start_state-end_state)
def get_sampled_position(self, step_size=0.1):
sampling_array = self.get_sampled_states(step_size)
return sampling_array[0, :], sampling_array[1:, :]
def get_sampled_states(self, step_size=0.1):
sampling = self.start_state + (self.end_state - self.start_state)*np.arange(0, 1+step_size, step_size)[:, np.newaxis]
sampling_array = np.vstack((np.arange(0, 1+step_size, step_size), sampling[:, :self.num_dims].T))
return sampling_array
@classmethod
def from_dict(cls, dict, num_dims, max_state, subclass_specific_data={}):
"""
Load a inputs representation of a motion primitive from a dictionary
"""
return super().from_dict(dict, num_dims, max_state)
def to_dict(self):
"""
Write important attributes of motion primitive to a dictionary
"""
return super().to_dict()
if __name__ == "__main__":
# problem parameters
num_dims = 2
control_space_q = 3
# setup problem
start_state = np.zeros((num_dims * control_space_q,))
# end_state = np.random.rand(num_dims * control_space_q,)
end_state = np.ones_like(start_state)
end_state[0] = 2
max_state = 1 * np.ones((control_space_q+1,))
# polynomial
mp = EuclideanMotionPrimitive(start_state, end_state, num_dims, max_state)
# save
assert(mp.is_valid)
assert(np.array_equal(mp.end_state, end_state))
print(mp.cost)
dictionary = mp.to_dict()
# reconstruct
mp = EuclideanMotionPrimitive.from_dict(dictionary, num_dims, max_state)
# plot
mp.plot(position_only=True)
plt.show()
| 749 | 0 | 81 |
70717f2eca1ee1b86f087e4ddaa3247fc908c185 | 5,652 | py | Python | tests/test_plotting.py | Imperial-CMTH/koala | cd05b11be402295468be709db13a957530f66578 | [
"MIT"
] | null | null | null | tests/test_plotting.py | Imperial-CMTH/koala | cd05b11be402295468be709db13a957530f66578 | [
"MIT"
] | 9 | 2021-12-13T10:16:03.000Z | 2022-03-28T10:39:16.000Z | tests/test_plotting.py | Imperial-CMTH/koala | cd05b11be402295468be709db13a957530f66578 | [
"MIT"
] | null | null | null | import numpy as np
from matplotlib import pyplot as plt
from koala.pointsets import generate_bluenoise
from koala.voronization import generate_lattice
from koala.graph_color import edge_color
from koala.plotting import plot_lattice, plot_vertex_indices, plot_degeneracy_breaking, plot_plaquettes, line_intersection
from koala.voronization import Lattice
from koala import plotting, example_graphs
h = Lattice(
vertices = np.array([[0.5,0.5], [0.1,0.1], [0.5,0.9], [0.9,0.1]]),
edge_indices = np.array([[0,1],[0,2],[0,3]]),
edge_crossing = np.array([[0,0],[0,0],[0,0]]),
)
n = 10
points = generate_bluenoise(30,n,n)
voronoi_lattice = generate_lattice(points)
test_lattices = [voronoi_lattice,h]
def plotting_test(plotting_function, lattice, N):
"""
A helper script to test plot_vertices, plot_edges and plot_plaquettes
because they have identical interfaces.
:param plotting_function: plotting function
:type plotting_function: function
"""
# Simplest call
plotting_function(lattice)
# Explicit ax
f, ax = plt.subplots()
plotting_function(lattice)
# Adding a single color
plotting_function(lattice, color_scheme = 'green')
# Adding a color_scheme
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N))
# Use a slice as a subset
subset = slice(0, 10)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
# Use a boolean subset
subset = np.random.randint(2, size = N).astype(bool)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
# Use a list of indices subset
subset = np.random.randint(N, size = 5)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
| 38.189189 | 146 | 0.678698 | import numpy as np
from matplotlib import pyplot as plt
from koala.pointsets import generate_bluenoise
from koala.voronization import generate_lattice
from koala.graph_color import edge_color
from koala.plotting import plot_lattice, plot_vertex_indices, plot_degeneracy_breaking, plot_plaquettes, line_intersection
from koala.voronization import Lattice
from koala import plotting, example_graphs
h = Lattice(
vertices = np.array([[0.5,0.5], [0.1,0.1], [0.5,0.9], [0.9,0.1]]),
edge_indices = np.array([[0,1],[0,2],[0,3]]),
edge_crossing = np.array([[0,0],[0,0],[0,0]]),
)
n = 10
points = generate_bluenoise(30,n,n)
voronoi_lattice = generate_lattice(points)
test_lattices = [voronoi_lattice,h]
def test_plotting():
for g in test_lattices:
solvable, solution = edge_color(g, n_colors = 3)
point_coloring = np.random.randint(2,size=g.vertices.positions.shape[0])
plot_lattice(g,edge_labels=solution)
plot_lattice(g,edge_labels=solution,edge_color_scheme=['k','lightgrey','blue'])
plot_lattice(g,vertex_labels=point_coloring)
plot_lattice(g,edge_labels=solution,edge_color_scheme=['k','lightgrey','blue'],vertex_labels=point_coloring,vertex_color_scheme=['k','g'])
plot_lattice(g,edge_labels=solution,edge_color_scheme=['k','lightgrey','blue'],scatter_args= {'c': 'r'} )
plot_lattice(g,edge_labels=solution,edge_color_scheme=['k','lightgrey','blue'],scatter_args= {'c': 'r'}, edge_arrows=True)
def test_plot_vertex_indices():
for g in test_lattices:
plot_lattice(h, edge_arrows = True, edge_index_labels = True, vertex_labels = 0)
plot_vertex_indices(h)
plot_degeneracy_breaking(0, h)
def test_plaquette_plotting():
from matplotlib import pyplot as plt
cmap = plt.get_cmap("tab10")
for g in test_lattices:
plaq_labels = np.arange(g.n_plaquettes)
color_scheme = cmap(plaq_labels % 10)
plot_plaquettes(g, labels = plaq_labels, color_scheme = color_scheme, alpha = 0.3)
def plotting_test(plotting_function, lattice, N):
"""
A helper script to test plot_vertices, plot_edges and plot_plaquettes
because they have identical interfaces.
:param plotting_function: plotting function
:type plotting_function: function
"""
# Simplest call
plotting_function(lattice)
# Explicit ax
f, ax = plt.subplots()
plotting_function(lattice)
# Adding a single color
plotting_function(lattice, color_scheme = 'green')
# Adding a color_scheme
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N))
# Use a slice as a subset
subset = slice(0, 10)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
# Use a boolean subset
subset = np.random.randint(2, size = N).astype(bool)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
# Use a list of indices subset
subset = np.random.randint(N, size = 5)
plotting_function(lattice, color_scheme = ['green', 'black'], labels = np.random.randint(2, size = N), subset = subset)
def test_plot_vertices():
plotting_test(plotting.plot_vertices, voronoi_lattice, voronoi_lattice.n_vertices)
def test_plot_edges():
plotting_test(plotting.plot_edges, voronoi_lattice, voronoi_lattice.n_edges)
# Test plotting lattice arrows
subset_size = voronoi_lattice.n_edges // 2
subset = np.random.randint(voronoi_lattice.n_edges, size = subset_size)
# single direction
plotting.plot_edges(voronoi_lattice, subset = subset, directions = 1)
# directions for every edges
plotting.plot_edges(voronoi_lattice, subset = subset,
directions = np.random.choice([1,-1], size = voronoi_lattice.n_edges))
# direction only for the subset
plotting.plot_edges(voronoi_lattice, subset = subset,
directions = np.random.choice([1,-1], size = subset_size))
# arrow_head_length
plotting.plot_edges(voronoi_lattice, subset = subset, directions = 1, arrow_head_length = 1)
def test_plot_plaquettes():
plotting_test(plotting.plot_plaquettes, voronoi_lattice, voronoi_lattice.n_plaquettes)
def test_line_intersection():
t = np.pi/6
def angle(t): return np.array([np.sin(t), np.cos(t)])
O = np.array([0, 0])
e1 = angle(np.pi/4) * 0.7
e2 = angle(np.pi/6) * 0.7
l1 = [O,e1]
l2 = [O, e2]
test_lines = np.array([l1,l2])
crossing_line = np.array([[0.1,0.5], [0.3,0.4]]) + 0.05
parallel_line = np.array([0.5*e1,e1*0.6]) + np.array([0,-0.1])
parallel_line2 = np.array([0.5*e2,e2*0.6]) + np.array([0,+0.1])
colinear_line_yes = np.array([0.9*e1,1.1*e1])
colinear_line_no = np.array([1.2*e1,1.5*e1])
colinear_line_yes2 = np.array([0.9*e2,1.1*e2])
colinear_line_no2 = np.array([1.2*e2,1.5*e2])
lines = np.array([parallel_line, parallel_line2, crossing_line,
colinear_line_yes, colinear_line_no,
colinear_line_yes2, colinear_line_no2,
])
cross, are_parallel, are_colinear = line_intersection(lines, test_lines, full_output = True)
cross_r = np.array([[0, 0],[0, 0],[0, 1],[1, 0],[0, 0],[0, 1],[0, 0]])
are_parallel_r = np.array([[1, 0],[0, 1],[0, 0],[1, 0],[1, 0],[0, 1],[0, 1]])
are_colinear_r = np.array([[0, 0],[0, 0],[0, 0],[1, 0],[1, 0],[0, 1],[0, 1]])
assert(np.all(are_parallel == are_parallel_r))
assert(np.all(are_colinear == are_colinear_r))
assert(np.all(cross == cross_r)) | 3,557 | 0 | 161 |
16c33973fbb2e47abab8065a2fa40e42c6b46e6d | 1,223 | py | Python | semana-02/lista-exercicio/lista-3/exercicio_3.py | larissajusten/ufsc-object-oriented-programming | 839e6abcc20580ea1a47479232c3ed3cb0153e4b | [
"MIT"
] | 6 | 2021-11-29T05:43:19.000Z | 2022-03-15T21:54:54.000Z | semana-02/lista-exercicio/lista-3/exercicio_3.py | larissajusten/ufsc-object-oriented-programming | 839e6abcc20580ea1a47479232c3ed3cb0153e4b | [
"MIT"
] | 3 | 2021-11-21T03:44:03.000Z | 2021-11-21T03:44:05.000Z | semana-02/lista-exercicio/lista-3/exercicio_3.py | larissajusten/ufsc-object-oriented-programming | 839e6abcc20580ea1a47479232c3ed3cb0153e4b | [
"MIT"
] | null | null | null | """
Exercรญcio 3. Escreva um programa que lรช duas notas de vรกrios alunos e armazena tais notas em um dicionรกrio, onde a chave รฉ o nome do aluno.
A entrada de dados deve terminar quando for lida uma string vazia como nome.
Escreva uma funรงรฃo que retorna a mรฉdia do aluno, dado seu nome.
"""
if __name__ == '__main__':
dicionario = le_notas()
nome_aluno = input('\nDigite o nome do aluno que deseja saber a nota: ')
if dicionario and nome_aluno in dicionario.keys():
media = retorna_nota_aluno(dicionario, nome_aluno)
print(f'{nome_aluno}: {media}')
| 37.060606 | 144 | 0.686836 | """
Exercรญcio 3. Escreva um programa que lรช duas notas de vรกrios alunos e armazena tais notas em um dicionรกrio, onde a chave รฉ o nome do aluno.
A entrada de dados deve terminar quando for lida uma string vazia como nome.
Escreva uma funรงรฃo que retorna a mรฉdia do aluno, dado seu nome.
"""
def le_notas(dicionario = {}):
nome_aluno = input('Digite o nome do aluno: ')
if nome_aluno.isalpha() and nome_aluno not in dicionario.keys():
nota1 = float(input('Digite a primeira nota: (somente numeros) '))
nota2 = float(input('Digite a segunda nota: (somente numeros) '))
dicionario[nome_aluno] = [nota1, nota2]
le_notas(dicionario)
elif nome_aluno in dicionario.keys():
print('Aluno ja adicionado!')
le_notas(dicionario)
return dicionario
def retorna_nota_aluno(dicionario, nome_aluno):
return (dicionario[nome_aluno][0] + dicionario[nome_aluno][1]) / 2
if __name__ == '__main__':
dicionario = le_notas()
nome_aluno = input('\nDigite o nome do aluno que deseja saber a nota: ')
if dicionario and nome_aluno in dicionario.keys():
media = retorna_nota_aluno(dicionario, nome_aluno)
print(f'{nome_aluno}: {media}')
| 584 | 0 | 46 |
0ee02dea0ae7aaa3a63717a631635f19d000de14 | 942 | py | Python | myconfig.py | AndrewKarelin/DaemonManager | 869726090bd012e7243d9a1c068881afa6d2b19c | [
"Apache-2.0"
] | null | null | null | myconfig.py | AndrewKarelin/DaemonManager | 869726090bd012e7243d9a1c068881afa6d2b19c | [
"Apache-2.0"
] | null | null | null | myconfig.py | AndrewKarelin/DaemonManager | 869726090bd012e7243d9a1c068881afa6d2b19c | [
"Apache-2.0"
] | null | null | null | import json
import os.path
import syslog
| 32.482759 | 97 | 0.619958 | import json
import os.path
import syslog
def load_config():
def_config = {'flag_state': 'checked', 'host_name': 'localhost', 'port': 8080,
'daemon_name': r'/etc/init.d/cups', 'poll_interval': 5}
config_file_name = os.getcwd() + '/config.json'
try:
with open(config_file_name, 'r') as f:
config = json.load(f)
syslog.syslog('ะะพะฝัะธะณััะฐัะธั ะทะฐะณััะถะตะฝะฐ ' + config_file_name + ' ' + str(config))
except:
config = def_config
syslog.syslog('ะะพะฝัะธะณััะฐัะธั ะฟะพ ัะผะพะปัะฐะฝะธั ' + str(config))
return config
def save_config(config):
config_file_name = os.getcwd() + '/config.json'
try:
with open(config_file_name, 'w') as f:
json.dump(config, f)
syslog.syslog('ะะพะฝัะธะณััะฐัะธั ัะพั
ัะฐะฝะตะฝะฐ ' + config_file_name + ' ' + str(config))
except:
syslog.syslog('ะัะธะฑะบะฐ ัะพั
ัะฐะฝะตะฝะธั ะบะพะฝัะธะณััะฐัะธะธ ' + config_file_name + ' ' + str(config))
| 946 | 0 | 46 |
3bb0101e77ff88be5f0f65f8e20796ab57e7b01b | 1,865 | py | Python | whitebox/grid_search.py | Jonksar/whitebox | b6824d0210e6becb7015c840201b7fb1e3a90f33 | [
"BSD-3-Clause"
] | 3 | 2018-05-28T20:29:01.000Z | 2019-08-17T11:01:20.000Z | whitebox/grid_search.py | Jonksar/whitebox | b6824d0210e6becb7015c840201b7fb1e3a90f33 | [
"BSD-3-Clause"
] | null | null | null | whitebox/grid_search.py | Jonksar/whitebox | b6824d0210e6becb7015c840201b7fb1e3a90f33 | [
"BSD-3-Clause"
] | null | null | null | """
--------------------------------------------------
File Name : grid_search.py
Creation Date : 2019-06-27 N 10:37
Last Modified : 2019-06-27 N 10:41
Created By : Joonatan Samuel
--------------------------------------------------
"""
from sklearn.model_selection import cross_validate
from pprint import pprint
# ---- Choose a bunch of models ----
import sklearn.ensemble
import sklearn.linear_model
import sklearn.neighbors
classifiers = {
'Random Forest': sklearn.ensemble.RandomForestClassifier(),
'Logistic Regression': sklearn.linear_model.LogisticRegression(),
'Nearest Neighbors': sklearn.neighbors.KNeighborsClassifier()
}
parameter_sets = {
'Random Forest': [{'n_estimators': [1, 5, 10, 15, 25, 35],
'max_depth': [1, 2, 3, 5, 7, 10]}
],
'Logistic Regression': [{'penalty': ['l1', 'l2'],
'C': [0.1, 0.3, 1, 3, 10, 30, 100]}
],
# Very slow for some reason,
# probably underlying implementation is slow
#
#'Support Vector Machine': [
# {'kernel': ['linear'],
# 'C': [1, 10, 100, 1000]}
# ],
'Nearest Neighbors': [{'n_neighbors': range(1, 25, 3)}]
}
# TODO: rewrite this for loop to use this:
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
for name, model in classifiers.items():
n_folds = 3
scores = cross_validate(model, X, y, cv=n_folds, return_train_score=True)
print("---- model {} ----".format(name))
for fold in range(n_folds):
print("Fold {} \t\t train score {:.2f}\t\t test score {:.2f}".format(
fold,
scores["train_score"][fold],
scores["test_score"][fold]
))
print()
| 31.610169 | 93 | 0.536729 | """
--------------------------------------------------
File Name : grid_search.py
Creation Date : 2019-06-27 N 10:37
Last Modified : 2019-06-27 N 10:41
Created By : Joonatan Samuel
--------------------------------------------------
"""
from sklearn.model_selection import cross_validate
from pprint import pprint
# ---- Choose a bunch of models ----
import sklearn.ensemble
import sklearn.linear_model
import sklearn.neighbors
classifiers = {
'Random Forest': sklearn.ensemble.RandomForestClassifier(),
'Logistic Regression': sklearn.linear_model.LogisticRegression(),
'Nearest Neighbors': sklearn.neighbors.KNeighborsClassifier()
}
parameter_sets = {
'Random Forest': [{'n_estimators': [1, 5, 10, 15, 25, 35],
'max_depth': [1, 2, 3, 5, 7, 10]}
],
'Logistic Regression': [{'penalty': ['l1', 'l2'],
'C': [0.1, 0.3, 1, 3, 10, 30, 100]}
],
# Very slow for some reason,
# probably underlying implementation is slow
#
#'Support Vector Machine': [
# {'kernel': ['linear'],
# 'C': [1, 10, 100, 1000]}
# ],
'Nearest Neighbors': [{'n_neighbors': range(1, 25, 3)}]
}
# TODO: rewrite this for loop to use this:
# https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html
for name, model in classifiers.items():
n_folds = 3
scores = cross_validate(model, X, y, cv=n_folds, return_train_score=True)
print("---- model {} ----".format(name))
for fold in range(n_folds):
print("Fold {} \t\t train score {:.2f}\t\t test score {:.2f}".format(
fold,
scores["train_score"][fold],
scores["test_score"][fold]
))
print()
| 0 | 0 | 0 |
fbe665d53ab00f81085628189537e8ede858b943 | 6,887 | py | Python | syntext/inline.py | dmulholland/syntext | 69c766a201f36d2975d574df88ad6d550112b034 | [
"0BSD"
] | 53 | 2015-08-25T17:02:16.000Z | 2017-09-07T19:11:40.000Z | syntext/inline.py | dmulholland/monk | 69c766a201f36d2975d574df88ad6d550112b034 | [
"0BSD"
] | 1 | 2016-08-12T00:48:20.000Z | 2016-08-12T21:20:06.000Z | syntext/inline.py | dmulholland/monk | 69c766a201f36d2975d574df88ad6d550112b034 | [
"0BSD"
] | null | null | null | # ------------------------------------------------------------------------------
# Functions for parsing and rendering inline markup.
# ------------------------------------------------------------------------------
import html
import hashlib
import re
# ------------------------------------------------------------------------------
# Regular expressions for identifying inline markup.
# ------------------------------------------------------------------------------
# *x*
re_italic_sc = re.compile(r"\*(\S)\*")
# *foo bar*
re_italic_mc = re.compile(r"\*(\S.*?\S)\*", re.DOTALL)
# **x**
re_bold_sc = re.compile(r"\*{2}(\S)\*{2}")
# **foo bar**
re_bold_mc = re.compile(r"\*{2}(\S.*?\S)\*{2}", re.DOTALL)
# ***x***
re_bolditalic_sc = re.compile(r"\*{3}(\S)\*{3}")
# ***foo bar***
re_bolditalic_mc = re.compile(r"\*{3}(\S.*?\S)\*{3}", re.DOTALL)
# `foo bar`
re_backticks = re.compile(r"`(.+?)`", re.DOTALL)
# [link text](http://example.com)
re_link = re.compile(r"\[([^\]]+)\]\(([^\)]+)\)")
# [link text][ref]
re_ref_link = re.compile(r"\[([^\]]+)\]\[([^\]]*)\]")
# 
re_img = re.compile(r"!\[([^\]]*)\]\(([^\)]+)\)")
# ![alt text][ref]
re_ref_img = re.compile(r"!\[([^\]]*)\]\[([^\]]*)\]")
# [^ref] or [^]
re_footnote_super = re.compile(r"\[\^([^\]]*?)\]")
# [fn:ref] or [fn]
re_footnote_span = re.compile(r"\[fn:?([^\]]*?)\]")
# & '
re_entity = re.compile(r"&[#a-zA-Z0-9]+;")
# html tags: <span>, </span>, <!-- comment -->, etc.
re_html_tag = re.compile(r"<([a-zA-Z/][^>]*?|!--.*?--)>")
# <http://example.com>
re_bracketed_url = re.compile(r"<((?:https?|ftp)://[^>]+)>")
# http://example.com
re_bare_url = re.compile(r"""
(^|\s)
(https?|ftp)
(://[-A-Z0-9+&@#/%?=~_|\[\]\(\)!:,\.;]*[-A-Z0-9+&@#/%=~_|\[\]])
($|\W)
""", re.VERBOSE | re.MULTILINE | re.IGNORECASE)
# n-dash
re_ndash = re.compile(r"((?<=\s)|\b|^)--(?=[ ]|\b|$)", re.MULTILINE)
# m-dash
re_mdash = re.compile(r"((?<=\s)|\b|^)---(?=[ ]|\b|$)", re.MULTILINE)
# x^{2}
re_superscript = re.compile(r"\^\{(.+?)\}")
# H_{2}O
re_subscript = re.compile(r"_\{(.+?)\}")
# ``foo bar``
re_verbatim = re.compile(r"``(.+?)``", re.DOTALL)
# ------------------------------------------------------------------------------
# Renderers.
# ------------------------------------------------------------------------------
# Entry point.
# Hashes a string, stores it as a {digest: string} pair in 'hashes', and
# returns the digest.
| 27.882591 | 80 | 0.543923 | # ------------------------------------------------------------------------------
# Functions for parsing and rendering inline markup.
# ------------------------------------------------------------------------------
import html
import hashlib
import re
# ------------------------------------------------------------------------------
# Regular expressions for identifying inline markup.
# ------------------------------------------------------------------------------
# *x*
re_italic_sc = re.compile(r"\*(\S)\*")
# *foo bar*
re_italic_mc = re.compile(r"\*(\S.*?\S)\*", re.DOTALL)
# **x**
re_bold_sc = re.compile(r"\*{2}(\S)\*{2}")
# **foo bar**
re_bold_mc = re.compile(r"\*{2}(\S.*?\S)\*{2}", re.DOTALL)
# ***x***
re_bolditalic_sc = re.compile(r"\*{3}(\S)\*{3}")
# ***foo bar***
re_bolditalic_mc = re.compile(r"\*{3}(\S.*?\S)\*{3}", re.DOTALL)
# `foo bar`
re_backticks = re.compile(r"`(.+?)`", re.DOTALL)
# [link text](http://example.com)
re_link = re.compile(r"\[([^\]]+)\]\(([^\)]+)\)")
# [link text][ref]
re_ref_link = re.compile(r"\[([^\]]+)\]\[([^\]]*)\]")
# 
re_img = re.compile(r"!\[([^\]]*)\]\(([^\)]+)\)")
# ![alt text][ref]
re_ref_img = re.compile(r"!\[([^\]]*)\]\[([^\]]*)\]")
# [^ref] or [^]
re_footnote_super = re.compile(r"\[\^([^\]]*?)\]")
# [fn:ref] or [fn]
re_footnote_span = re.compile(r"\[fn:?([^\]]*?)\]")
# & '
re_entity = re.compile(r"&[#a-zA-Z0-9]+;")
# html tags: <span>, </span>, <!-- comment -->, etc.
re_html_tag = re.compile(r"<([a-zA-Z/][^>]*?|!--.*?--)>")
# <http://example.com>
re_bracketed_url = re.compile(r"<((?:https?|ftp)://[^>]+)>")
# http://example.com
re_bare_url = re.compile(r"""
(^|\s)
(https?|ftp)
(://[-A-Z0-9+&@#/%?=~_|\[\]\(\)!:,\.;]*[-A-Z0-9+&@#/%=~_|\[\]])
($|\W)
""", re.VERBOSE | re.MULTILINE | re.IGNORECASE)
# n-dash
re_ndash = re.compile(r"((?<=\s)|\b|^)--(?=[ ]|\b|$)", re.MULTILINE)
# m-dash
re_mdash = re.compile(r"((?<=\s)|\b|^)---(?=[ ]|\b|$)", re.MULTILINE)
# x^{2}
re_superscript = re.compile(r"\^\{(.+?)\}")
# H_{2}O
re_subscript = re.compile(r"_\{(.+?)\}")
# ``foo bar``
re_verbatim = re.compile(r"``(.+?)``", re.DOTALL)
# ------------------------------------------------------------------------------
# Renderers.
# ------------------------------------------------------------------------------
# Entry point.
def render(text, meta):
hashes = {}
text = render_verbatim(text, hashes)
text = render_backticks(text, hashes)
text = render_bracketed_urls(text, hashes)
text = render_inline_html(text, hashes)
text = render_html_entities(text, hashes)
text = render_dashes(text, hashes)
text = html.escape(text, False)
text = render_bolditalic(text)
text = render_bold(text)
text = render_italic(text)
text = render_footnotes(text, meta)
text = render_images(text)
text = render_ref_images(text, meta)
text = render_links(text)
text = render_ref_links(text, meta)
text = render_superscripts(text)
text = render_subscripts(text)
if 'nl2br' in meta.get('context', []):
text = text.replace('\n', '<br>\n')
for key, value in hashes.items():
text = text.replace(key, value)
return text
# Hashes a string, stores it as a {digest: string} pair in 'hashes', and
# returns the digest.
def hashstr(text, hashes):
digest = hashlib.sha1(text.encode()).hexdigest()
hashes[digest] = text
return digest
def render_backticks(text, hashes):
def callback(match):
content = html.escape(match.group(1))
return hashstr('<code>%s</code>' % content, hashes)
return re_backticks.sub(callback, text)
def render_bracketed_urls(text, hashes):
def callback(match):
url = '<a href="%s">%s</a>' % (match.group(1), match.group(1))
return hashstr(url, hashes)
return re_bracketed_url.sub(callback, text)
def render_inline_html(text, hashes):
return re_html_tag.sub(lambda match: hashstr(match.group(), hashes), text)
def render_verbatim(text, hashes):
return re_verbatim.sub(lambda match: hashstr(match.group(1), hashes), text)
def render_html_entities(text, hashes):
return re_entity.sub(lambda match: hashstr(match.group(), hashes), text)
def render_dashes(text, hashes):
text = re_ndash.sub(hashstr("–", hashes), text)
text = re_mdash.sub(hashstr("—", hashes), text)
return text
def render_bold(text):
text = re_bold_sc.sub(r"<b>\1</b>", text)
text = re_bold_mc.sub(r"<b>\1</b>", text)
return text
def render_italic(text):
text = re_italic_sc.sub(r"<i>\1</i>", text)
text = re_italic_mc.sub(r"<i>\1</i>", text)
return text
def render_bolditalic(text):
text = re_bolditalic_sc.sub(r"<b><i>\1</i></b>", text)
text = re_bolditalic_mc.sub(r"<b><i>\1</i></b>", text)
return text
def render_superscripts(text):
return re_superscript.sub(r"<sup>\1</sup>", text)
def render_subscripts(text):
return re_subscript.sub(r"<sub>\1</sub>", text)
def render_images(text):
def callback(match):
alt = html.escape(match.group(1))
url = match.group(2)
return f'<img alt="{alt}" src="{url}">'
return re_img.sub(callback, text)
def render_ref_images(text, meta):
def callback(match):
alt = html.escape(match.group(1))
ref = match.group(2).lower() if match.group(2) else alt.lower()
url, title = meta.get('linkrefs', {}).get(ref, ('', ''))
if title:
title = html.escape(title)
return '<img alt="%s" src="%s" title="%s">' % (alt, url, title)
else:
return '<img alt="%s" src="%s">' % (alt, url)
return re_ref_img.sub(callback, text)
def render_links(text):
def callback(match):
text = match.group(1)
url = match.group(2)
return f'<a href="{url}">{text}</a>'
return re_link.sub(callback, text)
def render_ref_links(text, meta):
def callback(match):
text = match.group(1)
ref = match.group(2).lower() if match.group(2) else text.lower()
url, title = meta.get('linkrefs', {}).get(ref, ('', ''))
if title:
title = html.escape(title)
return '<a href="%s" title="%s">%s</a>' % (url, title, text)
else:
return '<a href="%s">%s</a>' % (url, text)
return re_ref_link.sub(callback, text)
def render_footnotes(text, meta):
def callback(match):
if match.group(1):
ref = match.group(1)
else:
ref = meta.setdefault('footnote-ref-index', 1)
meta['footnote-ref-index'] += 1
link = f'<a href="#fn:{ref}">{ref}</a>'
return f'<{tag} class="footnote" id="fnref:{ref}">{link}</{tag}>'
tag = "sup"
text = re_footnote_super.sub(callback, text)
tag = "span"
text = re_footnote_span.sub(callback, text)
return text
| 4,001 | 0 | 412 |
2b2e18b2e9fa5009f200fa275921bf0097ed6a5b | 698 | py | Python | bin/fresno.py | dmpayton/FresnoPython | d606b1d3604ba25ab0f3d178173ce9d6c05be1b2 | [
"MIT"
] | 2 | 2016-05-25T02:38:02.000Z | 2016-05-25T02:39:44.000Z | bin/fresno.py | FresnoPython/FresnoPython | d606b1d3604ba25ab0f3d178173ce9d6c05be1b2 | [
"MIT"
] | null | null | null | bin/fresno.py | FresnoPython/FresnoPython | d606b1d3604ba25ab0f3d178173ce9d6c05be1b2 | [
"MIT"
] | 1 | 2017-05-21T14:01:20.000Z | 2017-05-21T14:01:20.000Z | #!/usr/bin/env python
import argparse
import FresnoPython
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--website', action='store_true', help='Open the website.')
parser.add_argument('--map', action='store_true', help='Open the location on Google Maps.')
parser.add_argument('--twitter', action='store_true', help='Open the twitter account.')
args = parser.parse_args()
main(args)
| 26.846154 | 95 | 0.690544 | #!/usr/bin/env python
import argparse
import FresnoPython
def main(args):
if args.website:
FresnoPython.open_website()
if args.map:
FresnoPython.open_map()
if args.twitter:
FresnoPython.open_twitter()
print(FresnoPython.message())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--website', action='store_true', help='Open the website.')
parser.add_argument('--map', action='store_true', help='Open the location on Google Maps.')
parser.add_argument('--twitter', action='store_true', help='Open the twitter account.')
args = parser.parse_args()
main(args)
| 194 | 0 | 23 |
51e65d6a89f738957bf092dfde8598b3c7f0834b | 992 | py | Python | source/py/tests/test_call.py | shakfu/pymax | 67dca5990581d91ffcedf800e585e87646ab94d4 | [
"CC0-1.0"
] | 25 | 2020-08-06T12:38:07.000Z | 2022-03-23T17:35:09.000Z | source/py/tests/test_call.py | shakfu/pymax | 67dca5990581d91ffcedf800e585e87646ab94d4 | [
"CC0-1.0"
] | 7 | 2021-04-02T02:58:56.000Z | 2022-03-31T22:58:19.000Z | source/py/tests/test_call.py | shakfu/pymax | 67dca5990581d91ffcedf800e585e87646ab94d4 | [
"CC0-1.0"
] | 3 | 2021-04-04T05:47:07.000Z | 2021-06-26T03:30:02.000Z | # dispatch.py
"""
patterns to parse:
1. func arg1 arg2 key1=val1 key2=val2
2. func arg1 arg2 key1:val1 key2:val2
3. func arg1 arg2 dict(key1=val1, key2=val2)
4. obj.func arg1 arg2 key1=val1 key2=val2
"""
obj = Klass()
gdict = {'func': func, 'obj': obj}
s1 = 'func arg1 arg2 key1=val1 key2=val2'
head = lambda x: x[:1]
tail = lambda x: x[1:]
| 15.746032 | 44 | 0.542339 | # dispatch.py
"""
patterns to parse:
1. func arg1 arg2 key1=val1 key2=val2
2. func arg1 arg2 key1:val1 key2:val2
3. func arg1 arg2 dict(key1=val1, key2=val2)
4. obj.func arg1 arg2 key1=val1 key2=val2
"""
def func(*args, **kwargs):
return args, kwargs
class Klass:
def func(self, *args, **kwargs):
return args, kwargs
obj = Klass()
gdict = {'func': func, 'obj': obj}
s1 = 'func arg1 arg2 key1=val1 key2=val2'
head = lambda x: x[:1]
tail = lambda x: x[1:]
def process_s1(s, sep='='):
args=[]
kwargs=[]
elems = s.split()
f = head(elems)[0]
for elem in tail(elems):
if mapping_symbol in elem:
kwargs.append(elem)
else: args.append(elem)
targs = []
for arg in args:
if '.' in arg:
try:
targs.append(float(arg))
except TypeError:
pass
try:
targs.append(int(arg))
except TypeError:
pass
| 538 | -9 | 95 |
ee2bd645634ae68d2c49bd15d04487f14d49ec22 | 1,680 | py | Python | search.py | Kratosc3/GittigidiyorEntegre | 17f011cfec49b3adb2b05f1b3ea559c7a7495c29 | [
"MIT"
] | null | null | null | search.py | Kratosc3/GittigidiyorEntegre | 17f011cfec49b3adb2b05f1b3ea559c7a7495c29 | [
"MIT"
] | null | null | null | search.py | Kratosc3/GittigidiyorEntegre | 17f011cfec49b3adb2b05f1b3ea559c7a7495c29 | [
"MIT"
] | null | null | null | from zeep import Client
from zeep.transports import Transport
from zeep import xsd
from zeep import helpers
import xmltodict
import json
| 45.405405 | 441 | 0.589286 | from zeep import Client
from zeep.transports import Transport
from zeep import xsd
from zeep import helpers
import xmltodict
import json
class search:
def __init__(self,keyword = '',criteria = {'format' : '','freeshipping' : '','startFromOne' : '','catalogOption' : '','newProduct' : '','minPrice' : '','maxPrice' : '','city' : '','runOutItems' : '','seller' : '','categoryCode' : '','catalogId' : '','categorySpecs' : {'categorySpec' : [{'name' : ''} , {'value' : ''}]}},startOffSet = 1,rowCount = 1,includeDescription = False, withData = False,orderBy = '',lang = 'tr',session = None):
# Zeep Client
client = Client(wsdl="https://dev.gittigidiyor.com:8443/listingapi/ws/SearchService?wsdl", transport=Transport(session=session))
service = client.create_service('http://search.anonymous.ws.listingapi.gg.com}SearchServiceBinding' , 'http://dev.gittigidiyor.com:8080/listingapi/ws/SearchService')
with client.settings(raw_response=True):
try:
response = helpers.serialize_object(service.search(keyword,criteria,startOffSet,rowCount,includeDescription,withData,orderBy,lang).content.decode('utf-8'),dict)
#Parsing...
jsondata = xmltodict.parse(response)
jsondump = json.dumps(jsondata)
jsonload = json.loads(jsondump)
jsonList = jsonload['env:Envelope']['env:Body']['ns0:searchResponse']['return']
self.asJson = jsonList
except:
self.asJson = None
pass
| 1,417 | -8 | 52 |
d3dc30343b3b3ac3fb15b44ae10a2bd7020d8e60 | 23,363 | py | Python | phply/pythonast.py | alex4men/phply | bbc41656e56401ae754c3e0346ef6b6a1a56c322 | [
"BSD-3-Clause"
] | null | null | null | phply/pythonast.py | alex4men/phply | bbc41656e56401ae754c3e0346ef6b6a1a56c322 | [
"BSD-3-Clause"
] | null | null | null | phply/pythonast.py | alex4men/phply | bbc41656e56401ae754c3e0346ef6b6a1a56c322 | [
"BSD-3-Clause"
] | null | null | null | from . import phpast as php
import ast as py
unary_ops = {
'~': py.Invert,
'!': py.Not,
'+': py.UAdd,
'-': py.USub,
}
bool_ops = {
'&&': py.And,
'||': py.Or,
'and': py.And,
'or': py.Or,
}
cmp_ops = {
'!=': py.NotEq,
'!==': py.NotEq,
'<>': py.NotEq,
'<': py.Lt,
'<=': py.LtE,
'==': py.Eq,
'===': py.Eq,
'>': py.Gt,
'>=': py.GtE,
}
binary_ops = {
'+': py.Add,
'-': py.Sub,
'*': py.Mult,
'/': py.Div,
'%': py.Mod,
'<<': py.LShift,
'>>': py.RShift,
'|': py.BitOr,
'&': py.BitAnd,
'^': py.BitXor,
}
casts = {
'double': 'float',
'string': 'str',
'array': 'list',
}
| 41.423759 | 181 | 0.448102 | from . import phpast as php
import ast as py
unary_ops = {
'~': py.Invert,
'!': py.Not,
'+': py.UAdd,
'-': py.USub,
}
bool_ops = {
'&&': py.And,
'||': py.Or,
'and': py.And,
'or': py.Or,
}
cmp_ops = {
'!=': py.NotEq,
'!==': py.NotEq,
'<>': py.NotEq,
'<': py.Lt,
'<=': py.LtE,
'==': py.Eq,
'===': py.Eq,
'>': py.Gt,
'>=': py.GtE,
}
binary_ops = {
'+': py.Add,
'-': py.Sub,
'*': py.Mult,
'/': py.Div,
'%': py.Mod,
'<<': py.LShift,
'>>': py.RShift,
'|': py.BitOr,
'&': py.BitAnd,
'^': py.BitXor,
}
casts = {
'double': 'float',
'string': 'str',
'array': 'list',
}
def to_stmt(pynode):
if not isinstance(pynode, py.stmt):
pynode = py.Expr(pynode,
lineno=pynode.lineno,
col_offset=pynode.col_offset)
return pynode
def from_phpast(node):
if node is None:
return py.Pass(**pos(node))
if isinstance(node, str):
return py.Str(node, **pos(node))
if isinstance(node, (int, float)):
return py.Num(node, **pos(node))
if isinstance(node, php.Array):
if node.nodes:
if node.nodes[0].key is not None:
keys = []
values = []
for elem in node.nodes:
keys.append(from_phpast(elem.key))
values.append(from_phpast(elem.value))
return py.Dict(keys, values, **pos(node))
else:
return py.List([from_phpast(x.value) for x in node.nodes],
py.Load(**pos(node)),
**pos(node))
else:
return py.List([], py.Load(**pos(node)), **pos(node))
if isinstance(node, php.InlineHTML):
args = [py.Str(node.data, **pos(node))]
return py.Call(py.Name('inline_html',
py.Load(**pos(node)),
**pos(node)),
args, [], None, None,
**pos(node))
if isinstance(node, php.Echo):
return py.Call(py.Name('print', py.Load(**pos(node)),
**pos(node)),
list(map(from_phpast, node.nodes)),
[], None, None,
**pos(node))
if isinstance(node, php.Print):
return py.Print(None, [from_phpast(node.node)], True, **pos(node))
if isinstance(node, php.Exit):
args = []
if node.expr is not None:
args.append(from_phpast(node.expr))
return py.Raise(py.Call(py.Name('Exit', py.Load(**pos(node)),
**pos(node)),
args, [], None, None, **pos(node)),
None, None, **pos(node))
if isinstance(node, php.Return):
if node.node is None:
return py.Return(None, **pos(node))
else:
return py.Return(from_phpast(node.node), **pos(node))
if isinstance(node, php.Break):
assert node.node is None, 'level on break not supported'
return py.Break(**pos(node))
if isinstance(node, php.Continue):
assert node.node is None, 'level on continue not supported'
return py.Continue(**pos(node))
if isinstance(node, php.Silence):
return from_phpast(node.expr)
if isinstance(node, php.Block):
return from_phpast(php.If(1, node, [], None, lineno=node.lineno))
if isinstance(node, php.Unset):
return py.Delete(list(map(from_phpast, node.nodes)), **pos(node))
if isinstance(node, php.IsSet) and len(node.nodes) == 1:
if isinstance(node.nodes[0], php.ArrayOffset):
return py.Compare(from_phpast(node.nodes[0].expr),
[py.In(**pos(node))],
[from_phpast(node.nodes[0].node)],
**pos(node))
if isinstance(node.nodes[0], php.ObjectProperty):
return py.Call(py.Name('hasattr', py.Load(**pos(node)),
**pos(node)),
[from_phpast(node.nodes[0].node),
from_phpast(node.nodes[0].name)],
[], None, None, **pos(node))
if isinstance(node.nodes[0], php.Variable):
return py.Compare(py.Str(node.nodes[0].name[1:], **pos(node)),
[py.In(**pos(node))],
[py.Call(py.Name('vars', py.Load(**pos(node)),
**pos(node)),
[], [], None, None, **pos(node))],
**pos(node))
return py.Compare(from_phpast(node.nodes[0]),
[py.IsNot(**pos(node))],
[py.Name('None', py.Load(**pos(node)), **pos(node))],
**pos(node))
if isinstance(node, php.Empty):
return from_phpast(php.UnaryOp('!',
php.BinaryOp('&&',
php.IsSet([node.expr],
lineno=node.lineno),
node.expr,
lineno=node.lineno),
lineno=node.lineno))
if isinstance(node, php.Assignment):
if (isinstance(node.node, php.ArrayOffset)
and node.node.expr is None):
return py.Call(py.Attribute(from_phpast(node.node.node),
'append', py.Load(**pos(node)),
**pos(node)),
[from_phpast(node.expr)],
[], None, None, **pos(node))
if (isinstance(node.node, php.ObjectProperty)
and isinstance(node.node.name, php.BinaryOp)):
return to_stmt(py.Call(py.Name('setattr', py.Load(**pos(node)),
**pos(node)),
[from_phpast(node.node.node),
from_phpast(node.node.name),
from_phpast(node.expr)],
[], None, None, **pos(node)))
return py.Assign([store(from_phpast(node.node))],
from_phpast(node.expr),
**pos(node))
if isinstance(node, php.ListAssignment):
return py.Assign([py.Tuple(list(map(store, list(map(from_phpast, node.nodes)))),
py.Store(**pos(node)),
**pos(node))],
from_phpast(node.expr),
**pos(node))
if isinstance(node, php.AssignOp):
return from_phpast(php.Assignment(node.left,
php.BinaryOp(node.op[:-1],
node.left,
node.right,
lineno=node.lineno),
False,
lineno=node.lineno))
if isinstance(node, (php.PreIncDecOp, php.PostIncDecOp)):
return from_phpast(php.Assignment(node.expr,
php.BinaryOp(node.op[0],
node.expr,
1,
lineno=node.lineno),
False,
lineno=node.lineno))
if isinstance(node, php.ArrayOffset):
return py.Subscript(from_phpast(node.node),
py.Index(from_phpast(node.expr), **pos(node)),
py.Load(**pos(node)),
**pos(node))
if isinstance(node, php.ObjectProperty):
if isinstance(node.name, (php.Variable, php.BinaryOp)):
return py.Call(py.Name('getattr', py.Load(**pos(node)),
**pos(node)),
[from_phpast(node.node),
from_phpast(node.name)],
[], None, None, **pos(node))
return py.Attribute(from_phpast(node.node),
node.name,
py.Load(**pos(node)),
**pos(node))
if isinstance(node, php.Constant):
name = node.name
if name.lower() == 'true': name = 'True'
if name.lower() == 'false': name = 'False'
if name.lower() == 'null': name = 'None'
return py.Name(name, py.Load(**pos(node)), **pos(node))
if isinstance(node, php.Variable):
name = node.name[1:]
if name == 'this': name = 'self'
return py.Name(name, py.Load(**pos(node)), **pos(node))
if isinstance(node, php.Global):
return py.Global([var.name[1:] for var in node.nodes], **pos(node))
if isinstance(node, php.Include):
once = py.Name('True' if node.once else 'False',
py.Load(**pos(node)),
**pos(node))
return py.Call(py.Name('include', py.Load(**pos(node)),
**pos(node)),
[from_phpast(node.expr), once],
[], None, None, **pos(node))
if isinstance(node, php.Require):
once = py.Name('True' if node.once else 'False',
py.Load(**pos(node)),
**pos(node))
return py.Call(py.Name('require', py.Load(**pos(node)),
**pos(node)),
[from_phpast(node.expr), once],
[], None, None, **pos(node))
if isinstance(node, php.UnaryOp):
op = unary_ops.get(node.op)
assert op is not None, "unknown unary operator: '%s'" % node.op
op = op(**pos(node))
return py.UnaryOp(op, from_phpast(node.expr), **pos(node))
if isinstance(node, php.BinaryOp):
if node.op == '.':
pattern, pieces = build_format(node.left, node.right)
if pieces:
return py.BinOp(py.Str(pattern, **pos(node)),
py.Mod(**pos(node)),
py.Tuple(list(map(from_phpast, pieces)),
py.Load(**pos(node)),
**pos(node)),
**pos(node))
else:
return py.Str(pattern % (), **pos(node))
if node.op in bool_ops:
op = bool_ops[node.op](**pos(node))
return py.BoolOp(op, [from_phpast(node.left),
from_phpast(node.right)], **pos(node))
if node.op in cmp_ops:
op = cmp_ops[node.op](**pos(node))
return py.Compare(from_phpast(node.left), [op],
[from_phpast(node.right)],
**pos(node))
op = binary_ops.get(node.op)
if node.op == 'instanceof':
return py.Call(func=py.Name(id='isinstance', ctx=py.Load(**pos(node))), args=[from_phpast(node.left), from_phpast(node.right)], keywords=[], starargs=None, kwargs=None )
assert op is not None, "unknown binary operator: '%s'" % node.op
op = op(**pos(node))
return py.BinOp(from_phpast(node.left),
op,
from_phpast(node.right),
**pos(node))
if isinstance(node, php.TernaryOp):
return py.IfExp(from_phpast(node.expr),
from_phpast(node.iftrue),
from_phpast(node.iffalse),
**pos(node))
if isinstance(node, php.Cast):
return py.Call(py.Name(casts.get(node.type, node.type),
py.Load(**pos(node)),
**pos(node)),
[from_phpast(node.expr)],
[], None, None, **pos(node))
if isinstance(node, php.If):
orelse = []
if node.else_:
for else_ in map(from_phpast, deblock(node.else_.node)):
orelse.append(to_stmt(else_))
for elseif in reversed(node.elseifs):
orelse = [py.If(from_phpast(elseif.expr),
list(map(to_stmt, list(map(from_phpast, deblock(elseif.node))))),
orelse, **pos(node))]
return py.If(from_phpast(node.expr),
list(map(to_stmt, list(map(from_phpast, deblock(node.node))))),
orelse, **pos(node))
if isinstance(node, php.Switch):
ifexpr = py.Compare(from_phpast(node.expr), [py.Eq(**pos(node))],
[from_phpast(node.nodes[0].expr)],
**pos(node))
def recurseIfs(conds, bodies, current):
assert (len(conds) == len(bodies)), \
'Length of conds %r and bodies %r mismatch' % (len(conds), len(bodies))
if current >= len(conds):
return []
elif current == len(conds)-1:
return bodies[current]
return py.If(conds[current],
bodies[current],
recurseIfs(conds, bodies, current+1),
**pos(node))
elseifs_conditions = [py.Compare(from_phpast(node.expr), [py.Eq(**pos(node))],
[from_phpast(subnode.expr)],
**pos(node))
for subnode in node.nodes[1:]]
elseifs_bodies = [list(map(to_stmt, list(map(from_phpast, subnode.nodes))))
for subnode in node.nodes[1:]]
elseifs = recurseIfs(elseifs_conditions, elseifs_bodies, 0)
return py.If(ifexpr,
list(map(to_stmt, list(map(from_phpast, node.nodes[0].nodes)))),
elseifs,
**pos(node))
if isinstance(node, php.For):
assert node.test is None or len(node.test) == 1, \
'only a single test is supported in for-loops'
return from_phpast(php.Block((node.start or [])
+ [php.While(node.test[0] if node.test else 1,
php.Block(deblock(node.node)
+ (node.count or []),
lineno=node.lineno),
lineno=node.lineno)],
lineno=node.lineno))
if isinstance(node, php.Foreach):
if node.keyvar is None:
target = py.Name(node.valvar.name[1:], py.Store(**pos(node)),
**pos(node))
else:
target = py.Tuple([py.Name(node.keyvar.name[1:],
py.Store(**pos(node))),
py.Name(node.valvar.name[1:],
py.Store(**pos(node)))],
py.Store(**pos(node)), **pos(node))
return py.For(target, from_phpast(node.expr),
list(map(to_stmt, list(map(from_phpast, deblock(node.node))))),
[], **pos(node))
if isinstance(node, php.While):
return py.While(from_phpast(node.expr),
list(map(to_stmt, list(map(from_phpast, deblock(node.node))))),
[], **pos(node))
if isinstance(node, php.DoWhile):
condition = php.If(php.UnaryOp('!', node.expr, lineno=node.lineno),
php.Break(None, lineno=node.lineno),
[], None, lineno=node.lineno)
return from_phpast(php.While(1,
php.Block(deblock(node.node)
+ [condition],
lineno=node.lineno),
lineno=node.lineno))
if isinstance(node, php.Try):
return py.TryExcept(list(map(to_stmt, list(map(from_phpast, node.nodes)))),
[py.ExceptHandler(py.Name(catch.class_,
py.Load(**pos(node)),
**pos(node)),
store(from_phpast(catch.var)),
list(map(to_stmt, list(map(from_phpast, catch.nodes)))),
**pos(node))
for catch in node.catches],
[],
**pos(node))
if isinstance(node, php.Throw):
return py.Raise(from_phpast(node.node), None, None, **pos(node))
if isinstance(node, php.Function):
args = []
defaults = []
for param in node.params:
args.append(py.Name(param.name[1:],
py.Param(**pos(node)),
**pos(node)))
if param.default is not None:
defaults.append(from_phpast(param.default))
body = list(map(to_stmt, list(map(from_phpast, node.nodes))))
if not body: body = [py.Pass(**pos(node))]
return py.FunctionDef(node.name,
py.arguments(args, None, None, defaults),
body, [], **pos(node))
if isinstance(node, php.Method):
args = []
defaults = []
decorator_list = []
if 'static' in node.modifiers:
decorator_list.append(py.Name('classmethod',
py.Load(**pos(node)),
**pos(node)))
args.append(py.Name('cls', py.Param(**pos(node)), **pos(node)))
else:
args.append(py.Name('self', py.Param(**pos(node)), **pos(node)))
for param in node.params:
args.append(py.Name(param.name[1:],
py.Param(**pos(node)),
**pos(node)))
if param.default is not None:
defaults.append(from_phpast(param.default))
body = list(map(to_stmt, list(map(from_phpast, node.nodes))))
if not body: body = [py.Pass(**pos(node))]
return py.FunctionDef(node.name,
py.arguments(args, None, None, defaults),
body, decorator_list, **pos(node))
if isinstance(node, php.Class):
name = node.name
bases = []
extends = node.extends or 'object'
bases.append(py.Name(extends, py.Load(**pos(node)), **pos(node)))
body = list(map(to_stmt, list(map(from_phpast, node.nodes))))
for stmt in body:
if (isinstance(stmt, py.FunctionDef)
and stmt.name in (name, '__construct')):
stmt.name = '__init__'
if not body: body = [py.Pass(**pos(node))]
return py.ClassDef(name, bases, body, [], **pos(node))
if isinstance(node, (php.ClassConstants, php.ClassVariables)):
assert len(node.nodes) == 1, \
'only one class-level assignment supported per line'
if isinstance(node.nodes[0], php.ClassConstant):
name = php.Constant(node.nodes[0].name, lineno=node.lineno)
else:
name = php.Variable(node.nodes[0].name, lineno=node.lineno)
initial = node.nodes[0].initial
if initial is None:
initial = php.Constant('None', lineno=node.lineno)
return py.Assign([store(from_phpast(name))],
from_phpast(initial),
**pos(node))
if isinstance(node, (php.FunctionCall, php.New)):
if isinstance(node.name, str):
name = py.Name(node.name, py.Load(**pos(node)), **pos(node))
else:
name = py.Subscript(py.Call(py.Name('vars', py.Load(**pos(node)),
**pos(node)),
[], [], None, None, **pos(node)),
py.Index(from_phpast(node.name), **pos(node)),
py.Load(**pos(node)),
**pos(node))
args, kwargs = build_args(node.params)
return py.Call(name, args, kwargs, None, None, **pos(node))
if isinstance(node, php.MethodCall):
args, kwargs = build_args(node.params)
return py.Call(py.Attribute(from_phpast(node.node),
node.name,
py.Load(**pos(node)),
**pos(node)),
args, kwargs, None, None, **pos(node))
if isinstance(node, php.StaticMethodCall):
class_ = node.class_
if class_ == 'self': class_ = 'cls'
args, kwargs = build_args(node.params)
return py.Call(py.Attribute(py.Name(class_, py.Load(**pos(node)),
**pos(node)),
node.name,
py.Load(**pos(node)),
**pos(node)),
args, kwargs, None, None, **pos(node))
if isinstance(node, php.StaticProperty):
class_ = node.node
name = node.name
if isinstance(name, php.Variable):
name = name.name[1:]
return py.Attribute(py.Name(class_, py.Load(**pos(node)),
**pos(node)),
name,
py.Load(**pos(node)),
**pos(node))
return py.Call(py.Name('XXX', py.Load(**pos(node)), **pos(node)),
[py.Str(str(node), **pos(node))],
[], None, None, **pos(node))
def pos(node):
return {'lineno': getattr(node, 'lineno', 0), 'col_offset': 0}
def store(name):
name.ctx = py.Store(**pos(name))
return name
def deblock(node):
if isinstance(node, php.Block):
return node.nodes
else:
return [node]
def build_args(params):
args = []
kwargs = []
for param in params:
node = from_phpast(param.node)
if isinstance(node, py.Assign):
kwargs.append(py.keyword(node.targets[0].id, node.value))
else:
args.append(node)
return args, kwargs
def build_format(left, right):
if isinstance(left, str):
pattern, pieces = left.replace('%', '%%'), []
elif isinstance(left, php.BinaryOp) and left.op == '.':
pattern, pieces = build_format(left.left, left.right)
else:
pattern, pieces = '%s', [left]
if isinstance(right, str):
pattern += right.replace('%', '%%')
else:
pattern += '%s'
pieces.append(right)
return pattern, pieces
| 22,518 | 0 | 161 |
91c50c426964b8ed46db51b3027fbd04fd11f484 | 14,890 | py | Python | matrixprofile/algorithms/mstomp.py | MORE-EU/matrixprofile | 7c598385f7723f337d7bf7d3f90cffb690c6b0df | [
"Apache-2.0"
] | 262 | 2020-02-28T20:42:27.000Z | 2022-03-30T14:02:28.000Z | matrixprofile/algorithms/mstomp.py | MORE-EU/matrixprofile | 7c598385f7723f337d7bf7d3f90cffb690c6b0df | [
"Apache-2.0"
] | 79 | 2020-03-01T01:42:14.000Z | 2022-03-30T07:15:48.000Z | matrixprofile/algorithms/mstomp.py | MORE-EU/matrixprofile | 7c598385f7723f337d7bf7d3f90cffb690c6b0df | [
"Apache-2.0"
] | 56 | 2020-03-03T14:56:27.000Z | 2022-03-22T07:18:42.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import logging
import numpy as np
from matrixprofile import core
logger = logging.getLogger(__name__)
_EPS = 1e-14
def _batch_compute(args):
"""
Internal function to compute a batch of the time series in parallel.
Parameters
----------
args : tuple
Various attributes used for computing the batch.
(
batch_start : int
The starting index for this batch.
batch_end : int
The ending index for this batch.
ts : array_like
The time series to compute the matrix profile for.
query : array_like
The query.
window_size : int
The size of the window to compute the profile over.
data_length : int
The number of elements in the time series.
profile_length : int
The number of elements that will be in the final matrix
profile.
exclusion_zone : int
Used to exclude trivial matches.
data_mu : array_like
The moving average over the time series for the given window
size.
data_sig : array_like
The moving standard deviation over the time series for the
given window size.
first_product : array_like
The first sliding dot product for the time series over index
0 to window_size.
skip_locs : array_like
Indices that should be skipped for distance profile calculation
due to a nan or inf.
)
Returns
-------
dict : profile
The matrix profile, left and right matrix profiles and their respective
profile indices.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> }
"""
num_dim, batch_start, batch_end, ts, query, window_size, data_length, \
profile_length, exclusion_zone, data_mu, data_sig, \
first_product, skip_locs, profile_dimension, return_dimension = args
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
left_matrix_profile = None
right_matrix_profile = None
left_profile_index = None
right_profile_index = None
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
# with batch 0 we do not need to recompute the dot product
# however with other batch windows, we need the previous iterations sliding
# dot product
last_product = np.copy(first_product)
if batch_start is 0:
first_window = query[:, batch_start:batch_start + window_size]
else:
first_window = query[:, batch_start - 1:batch_start + window_size - 1]
for i in range(num_dim):
last_product[i, :] = core.fft_convolve(ts[i, :], first_window[i, :])
query_sum = np.sum(first_window, axis=1)
query_2sum = np.sum(first_window**2, axis=1)
query_mu, query_sig = np.empty(num_dim), np.empty(num_dim)
for i in range(num_dim):
query_mu[i], query_sig[i] = core.moving_avg_std(first_window[i, :], window_size)
drop_value = np.empty(num_dim)
for i in range(num_dim):
drop_value[i] = first_window[i, 0]
distance_profile = np.empty((num_dim, profile_length))
# make sure to compute inclusively from batch start to batch end
# otherwise there are gaps in the profile
if batch_end < profile_length:
batch_end += 1
# iteratively compute distance profile and update with element-wise mins
for i in range(batch_start, batch_end):
# check for nan or inf and skip
if skip_locs[i]:
continue
for j in range(num_dim):
if i == 0:
query_window = query[j, i:i + window_size]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, 0,
distance_profile[j, :])
else:
query_window = query[j, i:i + window_size]
query_sum[j] = query_sum[j] - drop_value[j] + query_window[-1]
query_2sum[j] = query_2sum[j] - drop_value[j]**2 + query_window[-1]**2
query_mu[j] = query_sum[j] / window_size
query_sig2 = query_2sum[j] / window_size - query_mu[j]**2
if query_sig2 < _EPS:
query_sig2 = _EPS
query_sig[j] = np.sqrt(query_sig2)
last_product[j, 1:] = last_product[j, 0:data_length - window_size] \
- ts[j, 0:data_length - window_size] * drop_value[j] \
+ ts[j, window_size:] * query_window[-1]
last_product[j, 0] = first_product[j, i]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply the exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, i,
distance_profile[j, :])
distance_profile[j, distance_profile[j, :] < _EPS] = 0
drop_value[j] = query_window[0]
if np.any(query_sig < _EPS):
continue
distance_profile[:, skip_locs] = np.inf
distance_profile[data_sig < np.sqrt(_EPS)] = np.inf
distance_profile_dim = np.argsort(distance_profile, axis=0)
distance_profile_sort = np.sort(distance_profile, axis=0)
distance_profile_cumsum = np.zeros(profile_length)
for j in range(num_dim):
distance_profile_cumsum += distance_profile_sort[j, :]
distance_profile_mean = distance_profile_cumsum / (j + 1)
# update the matrix profile
indices = (distance_profile_mean < matrix_profile[j, :])
matrix_profile[j, indices] = distance_profile_mean[indices]
profile_index[j, indices] = i
if return_dimension:
profile_dimension[j][:, indices] = distance_profile_dim[:j + 1, indices]
# update the left and right matrix profiles
# find differences, shift left and update
indices = distance_profile_mean[i:] < left_matrix_profile[j, i:]
falses = np.zeros(i).astype('bool')
indices = np.append(falses, indices)
left_matrix_profile[j, indices] = distance_profile_mean[indices]
left_profile_index[j, np.argwhere(indices)] = i
# find differences, shift right and update
indices = distance_profile_mean[0:i] < right_matrix_profile[j, 0:i]
falses = np.zeros(profile_length - i).astype('bool')
indices = np.append(indices, falses)
right_matrix_profile[j, indices] = distance_profile_mean[indices]
right_profile_index[j, np.argwhere(indices)] = i
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
}
def mstomp(ts, window_size, return_dimension=False, n_jobs=1):
"""
Computes multidimensional matrix profile with mSTAMP (stomp based). Ray or Python's multiprocessing library may be used. When you have initialized Ray on your machine, it takes priority over using Python's multiprocessing.
Parameters
----------
ts : array_like, shape (n_dim, seq_len)
The multidimensional time series to compute the multidimensional matrix profile for.
window_size: int
The size of the window to compute the matrix profile over.
return_dimension : bool
if True, also return the matrix profile dimension. It takses O(d^2 n)
to store and O(d^2 n^2) to compute. (default is False)
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A MatrixProfile data structure.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> 'metric': The distance metric computed for the mp,
>>> 'w': The window size used to compute the matrix profile,
>>> 'ez': The exclusion zone used,
>>> 'sample_pct': Percentage of samples used in computing the MP,
>>> 'data': {
>>> 'ts': Time series data,
>>> 'query': Query data if supplied
>>> }
>>> 'class': "MatrixProfile"
>>> 'algorithm': "stomp_based_mstamp"
>>> }
Raises
------
ValueError
If window_size < 4.
If window_size > time series length / 2.
If ts is not a list or np.array.
"""
query = ts
# data conversion to np.array
ts = core.to_np_array(ts)
query = core.to_np_array(query)
if window_size < 4:
error = "window size must be at least 4."
raise ValueError(error)
if ts.ndim == 1:
ts = np.expand_dims(ts, axis=0)
query = np.expand_dims(query, axis=0)
if window_size > query.shape[1] / 2:
error = "Time series is too short relative to desired window size"
raise ValueError(error)
# multiprocessing or single threaded approach
if n_jobs == 1:
pass
else:
n_jobs = core.valid_n_jobs(n_jobs)
# precompute some common values - profile length, query length etc.
profile_length = core.get_profile_length(ts, query, window_size)
data_length = ts.shape[1]
query_length = query.shape[1]
num_queries = query_length - window_size + 1
exclusion_zone = int(np.ceil(window_size / 2.0))
num_dim = ts.shape[0]
# find skip locations, clean up nan and inf in the ts and query
skip_locs = core.find_multid_skip_locations(ts, profile_length, window_size)
ts = core.clean_nan_inf(ts)
query = core.clean_nan_inf(query)
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
# profile_index = np.full((num_dim, profile_length), -1)
# compute left and right matrix profile when similarity join does not happen
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
profile_dimension = []
if return_dimension:
n_jobs = 1
for i in range(num_dim):
profile_dimension.append(np.empty((i + 1, profile_length), dtype=int))
# precompute some statistics on ts
data_mu, data_sig, first_product = np.empty((num_dim, profile_length)), np.empty(
(num_dim, profile_length)), np.empty((num_dim, profile_length))
for i in range(num_dim):
data_mu[i, :], data_sig[i, :] = core.moving_avg_std(ts[i, :], window_size)
first_window = query[i, 0:window_size]
first_product[i, :] = core.fft_convolve(ts[i, :], first_window)
batch_windows = []
results = []
# batch compute with multiprocessing
args = []
for start, end in core.generate_batch_jobs(num_queries, n_jobs):
args.append((num_dim, start, end, ts, query, window_size, data_length, profile_length, exclusion_zone, data_mu,
data_sig, first_product, skip_locs, profile_dimension, return_dimension))
batch_windows.append((start, end))
# we are running single threaded stomp - no need to initialize any
# parallel environments.
if n_jobs == 1 or len(args) == 1:
results.append(_batch_compute(args[0]))
else:
# parallelize
with core.mp_pool()(n_jobs) as pool:
results = pool.map(_batch_compute, args)
# now we combine the batch results
if len(results) == 1:
result = results[0]
matrix_profile = result['mp']
profile_index = result['pi']
profile_dimension = result['pd']
left_matrix_profile = result['lmp']
left_profile_index = result['lpi']
right_matrix_profile = result['rmp']
right_profile_index = result['rpi']
else:
for index, result in enumerate(results):
start = batch_windows[index][0]
end = batch_windows[index][1]
# update the matrix profile
indices = result['mp'] < matrix_profile
matrix_profile[indices] = result['mp'][indices]
profile_index[indices] = result['pi'][indices]
# update the left and right matrix profiles
indices = result['lmp'] < left_matrix_profile
left_matrix_profile[indices] = result['lmp'][indices]
left_profile_index[indices] = result['lpi'][indices]
indices = result['rmp'] < right_matrix_profile
right_matrix_profile[indices] = result['rmp'][indices]
right_profile_index[indices] = result['rpi'][indices]
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
'metric': 'euclidean',
'w': window_size,
'ez': exclusion_zone,
'sample_pct': 1,
'data': {
'ts': ts,
'query': query
},
'class': "MatrixProfile",
'algorithm': "stomp_based_mstamp"
} | 39.287599 | 226 | 0.605306 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
range = getattr(__builtins__, 'xrange', range)
# end of py2 compatability boilerplate
import logging
import numpy as np
from matrixprofile import core
logger = logging.getLogger(__name__)
_EPS = 1e-14
def _batch_compute(args):
"""
Internal function to compute a batch of the time series in parallel.
Parameters
----------
args : tuple
Various attributes used for computing the batch.
(
batch_start : int
The starting index for this batch.
batch_end : int
The ending index for this batch.
ts : array_like
The time series to compute the matrix profile for.
query : array_like
The query.
window_size : int
The size of the window to compute the profile over.
data_length : int
The number of elements in the time series.
profile_length : int
The number of elements that will be in the final matrix
profile.
exclusion_zone : int
Used to exclude trivial matches.
data_mu : array_like
The moving average over the time series for the given window
size.
data_sig : array_like
The moving standard deviation over the time series for the
given window size.
first_product : array_like
The first sliding dot product for the time series over index
0 to window_size.
skip_locs : array_like
Indices that should be skipped for distance profile calculation
due to a nan or inf.
)
Returns
-------
dict : profile
The matrix profile, left and right matrix profiles and their respective
profile indices.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> }
"""
num_dim, batch_start, batch_end, ts, query, window_size, data_length, \
profile_length, exclusion_zone, data_mu, data_sig, \
first_product, skip_locs, profile_dimension, return_dimension = args
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
left_matrix_profile = None
right_matrix_profile = None
left_profile_index = None
right_profile_index = None
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
# with batch 0 we do not need to recompute the dot product
# however with other batch windows, we need the previous iterations sliding
# dot product
last_product = np.copy(first_product)
if batch_start is 0:
first_window = query[:, batch_start:batch_start + window_size]
else:
first_window = query[:, batch_start - 1:batch_start + window_size - 1]
for i in range(num_dim):
last_product[i, :] = core.fft_convolve(ts[i, :], first_window[i, :])
query_sum = np.sum(first_window, axis=1)
query_2sum = np.sum(first_window**2, axis=1)
query_mu, query_sig = np.empty(num_dim), np.empty(num_dim)
for i in range(num_dim):
query_mu[i], query_sig[i] = core.moving_avg_std(first_window[i, :], window_size)
drop_value = np.empty(num_dim)
for i in range(num_dim):
drop_value[i] = first_window[i, 0]
distance_profile = np.empty((num_dim, profile_length))
# make sure to compute inclusively from batch start to batch end
# otherwise there are gaps in the profile
if batch_end < profile_length:
batch_end += 1
# iteratively compute distance profile and update with element-wise mins
for i in range(batch_start, batch_end):
# check for nan or inf and skip
if skip_locs[i]:
continue
for j in range(num_dim):
if i == 0:
query_window = query[j, i:i + window_size]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, 0,
distance_profile[j, :])
else:
query_window = query[j, i:i + window_size]
query_sum[j] = query_sum[j] - drop_value[j] + query_window[-1]
query_2sum[j] = query_2sum[j] - drop_value[j]**2 + query_window[-1]**2
query_mu[j] = query_sum[j] / window_size
query_sig2 = query_2sum[j] / window_size - query_mu[j]**2
if query_sig2 < _EPS:
query_sig2 = _EPS
query_sig[j] = np.sqrt(query_sig2)
last_product[j, 1:] = last_product[j, 0:data_length - window_size] \
- ts[j, 0:data_length - window_size] * drop_value[j] \
+ ts[j, window_size:] * query_window[-1]
last_product[j, 0] = first_product[j, i]
distance_profile[j, :] = core.distance_profile(last_product[j, :], window_size, data_mu[j, :],
data_sig[j, :], query_mu[j], query_sig[j])
# apply the exclusion zone
distance_profile[j, :] = core.apply_exclusion_zone(exclusion_zone, 0, window_size, data_length, i,
distance_profile[j, :])
distance_profile[j, distance_profile[j, :] < _EPS] = 0
drop_value[j] = query_window[0]
if np.any(query_sig < _EPS):
continue
distance_profile[:, skip_locs] = np.inf
distance_profile[data_sig < np.sqrt(_EPS)] = np.inf
distance_profile_dim = np.argsort(distance_profile, axis=0)
distance_profile_sort = np.sort(distance_profile, axis=0)
distance_profile_cumsum = np.zeros(profile_length)
for j in range(num_dim):
distance_profile_cumsum += distance_profile_sort[j, :]
distance_profile_mean = distance_profile_cumsum / (j + 1)
# update the matrix profile
indices = (distance_profile_mean < matrix_profile[j, :])
matrix_profile[j, indices] = distance_profile_mean[indices]
profile_index[j, indices] = i
if return_dimension:
profile_dimension[j][:, indices] = distance_profile_dim[:j + 1, indices]
# update the left and right matrix profiles
# find differences, shift left and update
indices = distance_profile_mean[i:] < left_matrix_profile[j, i:]
falses = np.zeros(i).astype('bool')
indices = np.append(falses, indices)
left_matrix_profile[j, indices] = distance_profile_mean[indices]
left_profile_index[j, np.argwhere(indices)] = i
# find differences, shift right and update
indices = distance_profile_mean[0:i] < right_matrix_profile[j, 0:i]
falses = np.zeros(profile_length - i).astype('bool')
indices = np.append(indices, falses)
right_matrix_profile[j, indices] = distance_profile_mean[indices]
right_profile_index[j, np.argwhere(indices)] = i
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
}
def mstomp(ts, window_size, return_dimension=False, n_jobs=1):
"""
Computes multidimensional matrix profile with mSTAMP (stomp based). Ray or Python's multiprocessing library may be used. When you have initialized Ray on your machine, it takes priority over using Python's multiprocessing.
Parameters
----------
ts : array_like, shape (n_dim, seq_len)
The multidimensional time series to compute the multidimensional matrix profile for.
window_size: int
The size of the window to compute the matrix profile over.
return_dimension : bool
if True, also return the matrix profile dimension. It takses O(d^2 n)
to store and O(d^2 n^2) to compute. (default is False)
n_jobs : int, Default = 1
Number of cpu cores to use.
Returns
-------
dict : profile
A MatrixProfile data structure.
>>> {
>>> 'mp': The matrix profile,
>>> 'pi': The matrix profile 1NN indices,
>>> 'rmp': The right matrix profile,
>>> 'rpi': The right matrix profile 1NN indices,
>>> 'lmp': The left matrix profile,
>>> 'lpi': The left matrix profile 1NN indices,
>>> 'metric': The distance metric computed for the mp,
>>> 'w': The window size used to compute the matrix profile,
>>> 'ez': The exclusion zone used,
>>> 'sample_pct': Percentage of samples used in computing the MP,
>>> 'data': {
>>> 'ts': Time series data,
>>> 'query': Query data if supplied
>>> }
>>> 'class': "MatrixProfile"
>>> 'algorithm': "stomp_based_mstamp"
>>> }
Raises
------
ValueError
If window_size < 4.
If window_size > time series length / 2.
If ts is not a list or np.array.
"""
query = ts
# data conversion to np.array
ts = core.to_np_array(ts)
query = core.to_np_array(query)
if window_size < 4:
error = "window size must be at least 4."
raise ValueError(error)
if ts.ndim == 1:
ts = np.expand_dims(ts, axis=0)
query = np.expand_dims(query, axis=0)
if window_size > query.shape[1] / 2:
error = "Time series is too short relative to desired window size"
raise ValueError(error)
# multiprocessing or single threaded approach
if n_jobs == 1:
pass
else:
n_jobs = core.valid_n_jobs(n_jobs)
# precompute some common values - profile length, query length etc.
profile_length = core.get_profile_length(ts, query, window_size)
data_length = ts.shape[1]
query_length = query.shape[1]
num_queries = query_length - window_size + 1
exclusion_zone = int(np.ceil(window_size / 2.0))
num_dim = ts.shape[0]
# find skip locations, clean up nan and inf in the ts and query
skip_locs = core.find_multid_skip_locations(ts, profile_length, window_size)
ts = core.clean_nan_inf(ts)
query = core.clean_nan_inf(query)
# initialize matrices
matrix_profile = np.full((num_dim, profile_length), np.inf)
profile_index = np.full((num_dim, profile_length), 0)
# profile_index = np.full((num_dim, profile_length), -1)
# compute left and right matrix profile when similarity join does not happen
left_matrix_profile = np.copy(matrix_profile)
right_matrix_profile = np.copy(matrix_profile)
left_profile_index = np.copy(profile_index)
right_profile_index = np.copy(profile_index)
profile_dimension = []
if return_dimension:
n_jobs = 1
for i in range(num_dim):
profile_dimension.append(np.empty((i + 1, profile_length), dtype=int))
# precompute some statistics on ts
data_mu, data_sig, first_product = np.empty((num_dim, profile_length)), np.empty(
(num_dim, profile_length)), np.empty((num_dim, profile_length))
for i in range(num_dim):
data_mu[i, :], data_sig[i, :] = core.moving_avg_std(ts[i, :], window_size)
first_window = query[i, 0:window_size]
first_product[i, :] = core.fft_convolve(ts[i, :], first_window)
batch_windows = []
results = []
# batch compute with multiprocessing
args = []
for start, end in core.generate_batch_jobs(num_queries, n_jobs):
args.append((num_dim, start, end, ts, query, window_size, data_length, profile_length, exclusion_zone, data_mu,
data_sig, first_product, skip_locs, profile_dimension, return_dimension))
batch_windows.append((start, end))
# we are running single threaded stomp - no need to initialize any
# parallel environments.
if n_jobs == 1 or len(args) == 1:
results.append(_batch_compute(args[0]))
else:
# parallelize
with core.mp_pool()(n_jobs) as pool:
results = pool.map(_batch_compute, args)
# now we combine the batch results
if len(results) == 1:
result = results[0]
matrix_profile = result['mp']
profile_index = result['pi']
profile_dimension = result['pd']
left_matrix_profile = result['lmp']
left_profile_index = result['lpi']
right_matrix_profile = result['rmp']
right_profile_index = result['rpi']
else:
for index, result in enumerate(results):
start = batch_windows[index][0]
end = batch_windows[index][1]
# update the matrix profile
indices = result['mp'] < matrix_profile
matrix_profile[indices] = result['mp'][indices]
profile_index[indices] = result['pi'][indices]
# update the left and right matrix profiles
indices = result['lmp'] < left_matrix_profile
left_matrix_profile[indices] = result['lmp'][indices]
left_profile_index[indices] = result['lpi'][indices]
indices = result['rmp'] < right_matrix_profile
right_matrix_profile[indices] = result['rmp'][indices]
right_profile_index[indices] = result['rpi'][indices]
return {
'mp': matrix_profile,
'pi': profile_index,
'pd': profile_dimension,
'rmp': right_matrix_profile,
'rpi': right_profile_index,
'lmp': left_matrix_profile,
'lpi': left_profile_index,
'metric': 'euclidean',
'w': window_size,
'ez': exclusion_zone,
'sample_pct': 1,
'data': {
'ts': ts,
'query': query
},
'class': "MatrixProfile",
'algorithm': "stomp_based_mstamp"
} | 0 | 0 | 0 |
f219abbdea61be901e01d4bb8905f5061d23a1d7 | 133 | py | Python | odoo/custom/src/private/advance_and_additional_discount/__init__.py | ecosoft-odoo/mh-doodba | 093f14850aaff337951b4829b24bf32eee6e6d40 | [
"BSL-1.0"
] | 1 | 2021-10-03T08:11:18.000Z | 2021-10-03T08:11:18.000Z | odoo/custom/src/private/advance_and_additional_discount/__init__.py | ecosoft-odoo/mh-doodba | 093f14850aaff337951b4829b24bf32eee6e6d40 | [
"BSL-1.0"
] | null | null | null | odoo/custom/src/private/advance_and_additional_discount/__init__.py | ecosoft-odoo/mh-doodba | 093f14850aaff337951b4829b24bf32eee6e6d40 | [
"BSL-1.0"
] | null | null | null | import sale
import purchase
import account_invoice
import account_voucher
import stock
import wizard
import partner
import res_config | 16.625 | 22 | 0.887218 | import sale
import purchase
import account_invoice
import account_voucher
import stock
import wizard
import partner
import res_config | 0 | 0 | 0 |
06fbd889a7ced5c23a442171b1fd8a0e210890a6 | 103 | py | Python | DataStructuresAlgorithms/ArrayBasedSequences/list_02.py | M1NH42/python-dsa | 297e70d68bb81aececad1279e5c16e42eb941975 | [
"MIT"
] | null | null | null | DataStructuresAlgorithms/ArrayBasedSequences/list_02.py | M1NH42/python-dsa | 297e70d68bb81aececad1279e5c16e42eb941975 | [
"MIT"
] | null | null | null | DataStructuresAlgorithms/ArrayBasedSequences/list_02.py | M1NH42/python-dsa | 297e70d68bb81aececad1279e5c16e42eb941975 | [
"MIT"
] | null | null | null | import array
numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
numbers_array = array('i', numbers) # ERROR
| 20.6 | 44 | 0.572816 | import array
numbers = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
numbers_array = array('i', numbers) # ERROR
| 0 | 0 | 0 |
a3f36ef3ac086712c7885627548ecf611d881782 | 8,859 | py | Python | docqa/triviaqa/evidence_corpus.py | Willyoung2017/doc-qa | 7ee02218952b0b9db63bc82b3895f743cdbd8f22 | [
"Apache-2.0"
] | 422 | 2017-10-31T12:20:29.000Z | 2022-03-14T11:25:16.000Z | docqa/triviaqa/evidence_corpus.py | Willyoung2017/doc-qa | 7ee02218952b0b9db63bc82b3895f743cdbd8f22 | [
"Apache-2.0"
] | 54 | 2017-11-02T10:34:45.000Z | 2021-02-04T05:05:20.000Z | docqa/triviaqa/evidence_corpus.py | Willyoung2017/doc-qa | 7ee02218952b0b9db63bc82b3895f743cdbd8f22 | [
"Apache-2.0"
] | 138 | 2017-11-02T10:49:09.000Z | 2021-11-26T15:34:01.000Z | import argparse
import pickle
import re
from collections import Counter
from os import walk, mkdir, makedirs
from os.path import relpath, join, exists
from typing import Set
from tqdm import tqdm
from docqa import config
from docqa.config import CORPUS_DIR
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
from docqa.triviaqa.read_data import normalize_wiki_filename
from docqa.utils import group, split, flatten_iterable
"""
Build and cache a tokenized version of the evidence corpus
"""
def build_tokenized_files(filenames, input_root, output_root, tokenizer, override=True) -> Set[str]:
"""
For each file in `filenames` loads the text, tokenizes it with `tokenizer, and
saves the output to the same relative location in `output_root`.
@:return a set of all the individual words seen
"""
voc = set()
for filename in filenames:
out_file = normalize_wiki_filename(filename[:filename.rfind(".")]) + ".txt"
out_file = join(output_root, out_file)
if not override and exists(out_file):
continue
with open(join(input_root, filename), "r") as in_file:
text = in_file.read().strip()
paras = [x for x in text.split("\n") if len(x) > 0]
paragraphs = [tokenizer.tokenize_paragraph(x) for x in paras]
for para in paragraphs:
for i, sent in enumerate(para):
voc.update(sent)
with open(join(output_root, out_file), "w") as in_file:
in_file.write("\n\n".join("\n".join(" ".join(sent) for sent in para) for para in paragraphs))
return voc
class TriviaQaEvidenceCorpusTxt(object):
"""
Corpus of the tokenized text from the given TriviaQa evidence documents.
Allows the text to be retrieved by document id
"""
_split_all = re.compile("[\n ]")
_split_para = re.compile("\n\n+") # FIXME we should not have saved document w/extra spaces...
if __name__ == "__main__":
main() | 36.012195 | 111 | 0.589683 | import argparse
import pickle
import re
from collections import Counter
from os import walk, mkdir, makedirs
from os.path import relpath, join, exists
from typing import Set
from tqdm import tqdm
from docqa import config
from docqa.config import CORPUS_DIR
from docqa.data_processing.text_utils import NltkAndPunctTokenizer
from docqa.triviaqa.read_data import normalize_wiki_filename
from docqa.utils import group, split, flatten_iterable
"""
Build and cache a tokenized version of the evidence corpus
"""
def _gather_files(input_root, output_dir, skip_dirs, wiki_only):
if not exists(output_dir):
mkdir(output_dir)
all_files = []
for root, dirs, filenames in walk(input_root):
if skip_dirs:
output = join(output_dir, relpath(root, input_root))
if exists(output):
continue
path = relpath(root, input_root)
normalized_path = normalize_wiki_filename(path)
if not exists(join(output_dir, normalized_path)):
mkdir(join(output_dir, normalized_path))
all_files += [join(path, x) for x in filenames]
if wiki_only:
all_files = [x for x in all_files if "wikipedia/" in x]
return all_files
def build_tokenized_files(filenames, input_root, output_root, tokenizer, override=True) -> Set[str]:
"""
For each file in `filenames` loads the text, tokenizes it with `tokenizer, and
saves the output to the same relative location in `output_root`.
@:return a set of all the individual words seen
"""
voc = set()
for filename in filenames:
out_file = normalize_wiki_filename(filename[:filename.rfind(".")]) + ".txt"
out_file = join(output_root, out_file)
if not override and exists(out_file):
continue
with open(join(input_root, filename), "r") as in_file:
text = in_file.read().strip()
paras = [x for x in text.split("\n") if len(x) > 0]
paragraphs = [tokenizer.tokenize_paragraph(x) for x in paras]
for para in paragraphs:
for i, sent in enumerate(para):
voc.update(sent)
with open(join(output_root, out_file), "w") as in_file:
in_file.write("\n\n".join("\n".join(" ".join(sent) for sent in para) for para in paragraphs))
return voc
def build_tokenized_corpus(input_root, tokenizer, output_dir, skip_dirs=False,
n_processes=1, wiki_only=False):
if not exists(output_dir):
makedirs(output_dir)
all_files = _gather_files(input_root, output_dir, skip_dirs, wiki_only)
if n_processes == 1:
voc = build_tokenized_files(tqdm(all_files, ncols=80), input_root, output_dir, tokenizer)
else:
voc = set()
from multiprocessing import Pool
with Pool(n_processes) as pool:
chunks = split(all_files, n_processes)
chunks = flatten_iterable(group(c, 500) for c in chunks)
pbar = tqdm(total=len(chunks), ncols=80)
for v in pool.imap_unordered(_build_tokenized_files_t,
[[c, input_root, output_dir, tokenizer] for c in chunks]):
voc.update(v)
pbar.update(1)
pbar.close()
voc_file = join(output_dir, "vocab.txt")
with open(voc_file, "w") as f:
for word in sorted(voc):
f.write(word)
f.write("\n")
def _build_tokenized_files_t(arg):
return build_tokenized_files(*arg)
def extract_voc(corpus, doc_ids):
voc = Counter()
for i, doc in enumerate(doc_ids):
voc.update(corpus.get_document(doc, flat=True))
return voc
def _extract_voc_tuple(x):
return extract_voc(*x)
def get_evidence_voc(corpus, n_processes=1):
doc_ids = corpus.list_documents()
voc = Counter()
if n_processes == 1:
for doc in tqdm(doc_ids):
voc = corpus.get_document(doc, flat=True)
else:
from multiprocessing import Pool
chunks = split(doc_ids, n_processes)
chunks = flatten_iterable(group(x, 10000) for x in chunks)
pbar = tqdm(total=len(chunks), ncols=80)
with Pool(n_processes) as pool:
for v in pool.imap_unordered(_extract_voc_tuple, [[corpus, c] for c in chunks]):
voc += v
pbar.update(1)
pbar.close()
return voc
def build_evidence_voc(corpus, override, n_processes):
target_file = join(corpus.directory, "vocab.txt")
if exists(target_file) and not override:
raise ValueError()
voc = get_evidence_voc(TriviaQaEvidenceCorpusTxt(), n_processes=n_processes).keys()
with open(target_file, "w") as f:
for word in sorted(voc):
f.write(word)
f.write("\n")
class TriviaQaEvidenceCorpusTxt(object):
"""
Corpus of the tokenized text from the given TriviaQa evidence documents.
Allows the text to be retrieved by document id
"""
_split_all = re.compile("[\n ]")
_split_para = re.compile("\n\n+") # FIXME we should not have saved document w/extra spaces...
def __init__(self, file_id_map=None):
self.directory = join(CORPUS_DIR, "triviaqa/evidence")
self.file_id_map = file_id_map
def get_vocab(self):
with open(join(self.directory, "vocab.txt"), "r") as f:
return {x.strip() for x in f}
def load_word_vectors(self, vec_name):
filename = join(self.directory, vec_name + "_pruned.pkl")
if exists(filename):
with open(filename, "rb"):
return pickle.load(filename)
else:
return None
def list_documents(self):
if self.file_id_map is not None:
return list(self.file_id_map.keys())
f = []
for dirpath, dirnames, filenames in walk(self.directory):
if dirpath == self.directory:
# Exclude files in the top level dir, like the vocab file
continue
if self.directory != dirpath:
rel_path = relpath(dirpath, self.directory)
f += [join(rel_path, x[:-4]) for x in filenames]
else:
f += [x[:-4] for x in filenames]
return f
def get_document(self, doc_id, n_tokens=None, flat=False):
if self.file_id_map is None:
file_id = doc_id
else:
file_id = self.file_id_map.get(doc_id)
if file_id is None:
return None
file_id = join(self.directory, file_id + ".txt")
if not exists(file_id):
return None
with open(file_id, "r") as f:
if n_tokens is None:
text = f.read()
if flat:
return [x for x in self._split_all.split(text) if len(x) > 0]
else:
paragraphs = []
for para in self._split_para.split(text):
paragraphs.append([sent.split(" ") for sent in para.split("\n")])
return paragraphs
else:
paragraphs = []
paragraph = []
cur_tokens = 0
for line in f:
if line == "\n":
if not flat and len(paragraph) > 0:
paragraphs.append(paragraph)
paragraph = []
else:
sent = line.split(" ")
sent[-1] = sent[-1].rstrip()
if len(sent) + cur_tokens > n_tokens:
if n_tokens != cur_tokens:
paragraph.append(sent[:n_tokens-cur_tokens])
break
else:
paragraph.append(sent)
cur_tokens += len(sent)
if flat:
return flatten_iterable(paragraph)
else:
if len(paragraph) > 0:
paragraphs.append(paragraph)
return paragraphs
def main():
parse = argparse.ArgumentParser("Pre-tokenize the TriviaQA evidence corpus")
parse.add_argument("-o", "--output_dir", type=str, default=join(config.CORPUS_DIR, "triviaqa", "evidence"))
parse.add_argument("-s", "--source", type=str, default=join(config.TRIVIA_QA, "evidence"))
# This is slow, using more processes is recommended
parse.add_argument("-n", "--n_processes", type=int, default=1, help="Number of processes to use")
parse.add_argument("--wiki_only", action="store_true")
args = parse.parse_args()
build_tokenized_corpus(args.source, NltkAndPunctTokenizer(), args.output_dir,
n_processes=args.n_processes, wiki_only=args.wiki_only)
if __name__ == "__main__":
main() | 6,561 | 0 | 319 |
84efe3bfb727ac976e976b8d9cfe388786b4f2a3 | 4,122 | py | Python | problems/cop/real/Fapp.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 28 | 2019-12-14T09:25:52.000Z | 2022-03-24T08:15:13.000Z | problems/cop/real/Fapp.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 7 | 2020-04-15T11:02:07.000Z | 2022-01-20T12:48:54.000Z | problems/cop/real/Fapp.py | xcsp3team/pycsp3 | a11bc370e34cd3fe37faeae9a5df935fcbd7770d | [
"MIT"
] | 3 | 2020-04-15T08:23:45.000Z | 2021-12-07T14:02:28.000Z | """
See Challenge ROADEF 2001 (FAPP: Problรจme d'affectation de frรฉquences avec polarization)
Examples of Execution:
python3 Fapp.py -data=Fapp_ex2.json
python3 Fapp.py -data=Fapp_ex2.json -variant=short
"""
from pycsp3 import *
domains, routes, hard_constraints, soft_constraints = data
domains = [domains[route.domain] for route in routes] # we skip the indirection
polarizations = [route.polarization for route in routes]
n, nSofts = len(routes), len(data.softs)
# f[i] is the frequency of the ith radio-link
f = VarArray(size=n, dom=lambda i: domains[i])
# p[i] is the polarization of the ith radio-link
p = VarArray(size=n, dom=lambda i: {0, 1} if polarizations[i] == 0 else {1} if polarizations[i] == 1 else {0})
# k is the relaxation level to be optimized
k = Var(dom=range(12))
# v1[q] is 1 iff the qth pair of radio-electric compatibility constraints is violated when relaxing another level
v1 = VarArray(size=nSofts, dom={0, 1})
# v2[q] is the number of times the qth pair of radio-electric compatibility constraints is violated when relaxing more than one level
v2 = VarArray(size=nSofts, dom=range(11))
satisfy(
# imperative constraints
dst == gap if eq else dst != gap for (dst, eq, gap) in [(abs(f[i] - f[j] if fq else p[i] - p[j]), eq, gap) for (i, j, fq, eq, gap) in hard_constraints]
)
if not variant():
satisfy(
# soft radio-electric compatibility constraints
(f[i], f[j], p[i], p[j], k, v1[l], v2[l]) in table_soft(i, j, tuple(eqr), tuple(ner), False) for l, (i, j, eqr, ner) in enumerate(soft_constraints)
)
elif variant("short"):
soft_links = [[False] * n for _ in range(n)]
for c in data.softs:
soft_links[c.route1][c.route2] = soft_links[c.route2][c.route1] = True
# d[i][j] is the distance between the ith and the jth frequencies (for i < j when a soft link exists)
d = VarArray(size=[n, n], dom=lambda i, j: {abs(f1 - f2) for f1 in domains[i] for f2 in domains[j]} if i < j and soft_links[i][j] else None)
satisfy(
# computing intermediary distances
[d[i][j] == abs(f[i] - f[j]) for i, j in combinations(range(n), 2) if d[i][j]],
# soft radio-electric compatibility constraints
[(d[min(i, j)][max(i, j)], p[i], p[j], k, v1[l], v2[l]) in table_soft(i, j, tuple(er), tuple(nr)) for l, (i, j, er, nr) in enumerate(soft_constraints)]
)
minimize(
k * (10 * nSofts ** 2) + Sum(v1) * (10 * nSofts) + Sum(v2)
)
""" Comments
1) we transform lists in tuples of relaxation arrays for speeding up calculations
2) when gap is 0, abs(x - y) == gap (resp., abs(x - y) != gap) is automatically simplified into x == y (resp., x != y)
"""
| 41.22 | 159 | 0.598011 | """
See Challenge ROADEF 2001 (FAPP: Problรจme d'affectation de frรฉquences avec polarization)
Examples of Execution:
python3 Fapp.py -data=Fapp_ex2.json
python3 Fapp.py -data=Fapp_ex2.json -variant=short
"""
from pycsp3 import *
domains, routes, hard_constraints, soft_constraints = data
domains = [domains[route.domain] for route in routes] # we skip the indirection
polarizations = [route.polarization for route in routes]
n, nSofts = len(routes), len(data.softs)
def table_soft(i, j, eq_relaxation, ne_relaxation, short_table=True):
def calculate_size():
for l in range(kl - 1):
if distance >= t[l]:
return l
return kl - 1
table = [] # we use a list instead of a set because is is quite faster to process
cache = {}
for f1 in domains[i]:
for f2 in domains[j]:
distance = abs(f1 - f2)
key = str(distance) + " " + str(polarizations[i]) + " " + str(polarizations[j])
if key not in cache:
suffixes = []
for pol in range(4):
p1 = 0 if pol < 2 else 1
p2 = 1 if pol in {1, 3} else 0
if (polarizations[i], p1) in [(1, 0), (-1, 1)] or (polarizations[j], p2) in [(1, 0), (-1, 1)]:
continue
t = eq_relaxation if p1 == p2 else ne_relaxation # eqRelaxations or neRelaxations
for kl in range(12):
if kl == 11 or distance >= t[kl]: # for kl=11, we suppose t[kl] = 0
suffixes.append((p1, p2, kl, 0 if kl == 0 or distance >= t[kl - 1] else 1, 0 if kl <= 1 else calculate_size()))
cache[key] = suffixes
elif short_table:
continue
for suffix in cache[key]:
table.append((distance, *suffix) if short_table else (f1, f2, *suffix))
return table
# f[i] is the frequency of the ith radio-link
f = VarArray(size=n, dom=lambda i: domains[i])
# p[i] is the polarization of the ith radio-link
p = VarArray(size=n, dom=lambda i: {0, 1} if polarizations[i] == 0 else {1} if polarizations[i] == 1 else {0})
# k is the relaxation level to be optimized
k = Var(dom=range(12))
# v1[q] is 1 iff the qth pair of radio-electric compatibility constraints is violated when relaxing another level
v1 = VarArray(size=nSofts, dom={0, 1})
# v2[q] is the number of times the qth pair of radio-electric compatibility constraints is violated when relaxing more than one level
v2 = VarArray(size=nSofts, dom=range(11))
satisfy(
# imperative constraints
dst == gap if eq else dst != gap for (dst, eq, gap) in [(abs(f[i] - f[j] if fq else p[i] - p[j]), eq, gap) for (i, j, fq, eq, gap) in hard_constraints]
)
if not variant():
satisfy(
# soft radio-electric compatibility constraints
(f[i], f[j], p[i], p[j], k, v1[l], v2[l]) in table_soft(i, j, tuple(eqr), tuple(ner), False) for l, (i, j, eqr, ner) in enumerate(soft_constraints)
)
elif variant("short"):
soft_links = [[False] * n for _ in range(n)]
for c in data.softs:
soft_links[c.route1][c.route2] = soft_links[c.route2][c.route1] = True
# d[i][j] is the distance between the ith and the jth frequencies (for i < j when a soft link exists)
d = VarArray(size=[n, n], dom=lambda i, j: {abs(f1 - f2) for f1 in domains[i] for f2 in domains[j]} if i < j and soft_links[i][j] else None)
satisfy(
# computing intermediary distances
[d[i][j] == abs(f[i] - f[j]) for i, j in combinations(range(n), 2) if d[i][j]],
# soft radio-electric compatibility constraints
[(d[min(i, j)][max(i, j)], p[i], p[j], k, v1[l], v2[l]) in table_soft(i, j, tuple(er), tuple(nr)) for l, (i, j, er, nr) in enumerate(soft_constraints)]
)
minimize(
k * (10 * nSofts ** 2) + Sum(v1) * (10 * nSofts) + Sum(v2)
)
""" Comments
1) we transform lists in tuples of relaxation arrays for speeding up calculations
2) when gap is 0, abs(x - y) == gap (resp., abs(x - y) != gap) is automatically simplified into x == y (resp., x != y)
"""
| 1,430 | 0 | 23 |
97bc762620822cae73e686437ed0f6274ebe8a23 | 19,334 | py | Python | tensorflow_checkpoint_reader/pb/tensorflow/lite/tools/evaluation/proto/preprocessing_steps_pb2.py | shawwn/tensorflow-checkpoint-reader | f0e65548411e3bd66a07e36bb1850907a05952d0 | [
"MIT"
] | 1 | 2021-12-02T15:06:09.000Z | 2021-12-02T15:06:09.000Z | tensorflow_checkpoint_reader/pb/tensorflow/lite/tools/evaluation/proto/preprocessing_steps_pb2.py | shawwn/tensorflow-checkpoint-reader | f0e65548411e3bd66a07e36bb1850907a05952d0 | [
"MIT"
] | null | null | null | tensorflow_checkpoint_reader/pb/tensorflow/lite/tools/evaluation/proto/preprocessing_steps_pb2.py | shawwn/tensorflow-checkpoint-reader | f0e65548411e3bd66a07e36bb1850907a05952d0 | [
"MIT"
] | null | null | null |
'Generated protocol buffer code.'
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='tensorflow/lite/tools/evaluation/proto/preprocessing_steps.proto', package='tflite.evaluation', syntax='proto2', serialized_options=b'\n\x11tflite.evaluationP\x01\xf8\x01\x01', create_key=_descriptor._internal_create_key, serialized_pb=b'\n@tensorflow/lite/tools/evaluation/proto/preprocessing_steps.proto\x12\x11tflite.evaluation"\xa8\x02\n\x1cImagePreprocessingStepParams\x12<\n\x0fcropping_params\x18\x01 \x01(\x0b2!.tflite.evaluation.CroppingParamsH\x00\x12<\n\x0fresizing_params\x18\x02 \x01(\x0b2!.tflite.evaluation.ResizingParamsH\x00\x12:\n\x0epadding_params\x18\x03 \x01(\x0b2 .tflite.evaluation.PaddingParamsH\x00\x12F\n\x14normalization_params\x18\x04 \x01(\x0b2&.tflite.evaluation.NormalizationParamsH\x00B\x08\n\x06params"*\n\tImageSize\x12\r\n\x05width\x18\x01 \x02(\r\x12\x0e\n\x06height\x18\x02 \x02(\r"\x8c\x01\n\x0eCroppingParams\x12"\n\x11cropping_fraction\x18\x01 \x01(\x02:\x050.875H\x00\x123\n\x0btarget_size\x18\x02 \x01(\x0b2\x1c.tflite.evaluation.ImageSizeH\x00\x12\x17\n\x0fsquare_cropping\x18\x03 \x01(\x08B\x08\n\x06params"^\n\x0eResizingParams\x121\n\x0btarget_size\x18\x01 \x02(\x0b2\x1c.tflite.evaluation.ImageSize\x12\x19\n\x11aspect_preserving\x18\x02 \x02(\x08"\x7f\n\rPaddingParams\x123\n\x0btarget_size\x18\x01 \x01(\x0b2\x1c.tflite.evaluation.ImageSizeH\x00\x12\x18\n\x0esquare_padding\x18\x02 \x01(\x08H\x00\x12\x15\n\rpadding_value\x18\x03 \x02(\x05B\x08\n\x06params"\xe1\x01\n\x13NormalizationParams\x12\x1a\n\x10channelwise_mean\x18\x01 \x01(\x02H\x00\x12L\n\x05means\x18\x02 \x01(\x0b2;.tflite.evaluation.NormalizationParams.PerChannelMeanValuesH\x00\x12\x10\n\x05scale\x18\x03 \x02(\x02:\x011\x1aF\n\x14PerChannelMeanValues\x12\x0e\n\x06r_mean\x18\x01 \x02(\x02\x12\x0e\n\x06g_mean\x18\x02 \x02(\x02\x12\x0e\n\x06b_mean\x18\x03 \x02(\x02B\x06\n\x04meanB\x18\n\x11tflite.evaluationP\x01\xf8\x01\x01')
_IMAGEPREPROCESSINGSTEPPARAMS = _descriptor.Descriptor(name='ImagePreprocessingStepParams', full_name='tflite.evaluation.ImagePreprocessingStepParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='cropping_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.cropping_params', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='resizing_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.resizing_params', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='padding_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.padding_params', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='normalization_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.normalization_params', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.ImagePreprocessingStepParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=88, serialized_end=384)
_IMAGESIZE = _descriptor.Descriptor(name='ImageSize', full_name='tflite.evaluation.ImageSize', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='width', full_name='tflite.evaluation.ImageSize.width', index=0, number=1, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='height', full_name='tflite.evaluation.ImageSize.height', index=1, number=2, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=386, serialized_end=428)
_CROPPINGPARAMS = _descriptor.Descriptor(name='CroppingParams', full_name='tflite.evaluation.CroppingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='cropping_fraction', full_name='tflite.evaluation.CroppingParams.cropping_fraction', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.875), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.CroppingParams.target_size', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='square_cropping', full_name='tflite.evaluation.CroppingParams.square_cropping', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.CroppingParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=431, serialized_end=571)
_RESIZINGPARAMS = _descriptor.Descriptor(name='ResizingParams', full_name='tflite.evaluation.ResizingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.ResizingParams.target_size', index=0, number=1, type=11, cpp_type=10, label=2, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='aspect_preserving', full_name='tflite.evaluation.ResizingParams.aspect_preserving', index=1, number=2, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=573, serialized_end=667)
_PADDINGPARAMS = _descriptor.Descriptor(name='PaddingParams', full_name='tflite.evaluation.PaddingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.PaddingParams.target_size', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='square_padding', full_name='tflite.evaluation.PaddingParams.square_padding', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='padding_value', full_name='tflite.evaluation.PaddingParams.padding_value', index=2, number=3, type=5, cpp_type=1, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.PaddingParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=669, serialized_end=796)
_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES = _descriptor.Descriptor(name='PerChannelMeanValues', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='r_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.r_mean', index=0, number=1, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='g_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.g_mean', index=1, number=2, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='b_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.b_mean', index=2, number=3, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=946, serialized_end=1016)
_NORMALIZATIONPARAMS = _descriptor.Descriptor(name='NormalizationParams', full_name='tflite.evaluation.NormalizationParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='channelwise_mean', full_name='tflite.evaluation.NormalizationParams.channelwise_mean', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='means', full_name='tflite.evaluation.NormalizationParams.means', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='scale', full_name='tflite.evaluation.NormalizationParams.scale', index=2, number=3, type=2, cpp_type=6, label=2, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='mean', full_name='tflite.evaluation.NormalizationParams.mean', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=799, serialized_end=1024)
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'].message_type = _CROPPINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'].message_type = _RESIZINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'].message_type = _PADDINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'].message_type = _NORMALIZATIONPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_CROPPINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_CROPPINGPARAMS.oneofs_by_name['params'].fields.append(_CROPPINGPARAMS.fields_by_name['cropping_fraction'])
_CROPPINGPARAMS.fields_by_name['cropping_fraction'].containing_oneof = _CROPPINGPARAMS.oneofs_by_name['params']
_CROPPINGPARAMS.oneofs_by_name['params'].fields.append(_CROPPINGPARAMS.fields_by_name['target_size'])
_CROPPINGPARAMS.fields_by_name['target_size'].containing_oneof = _CROPPINGPARAMS.oneofs_by_name['params']
_RESIZINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_PADDINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_PADDINGPARAMS.oneofs_by_name['params'].fields.append(_PADDINGPARAMS.fields_by_name['target_size'])
_PADDINGPARAMS.fields_by_name['target_size'].containing_oneof = _PADDINGPARAMS.oneofs_by_name['params']
_PADDINGPARAMS.oneofs_by_name['params'].fields.append(_PADDINGPARAMS.fields_by_name['square_padding'])
_PADDINGPARAMS.fields_by_name['square_padding'].containing_oneof = _PADDINGPARAMS.oneofs_by_name['params']
_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES.containing_type = _NORMALIZATIONPARAMS
_NORMALIZATIONPARAMS.fields_by_name['means'].message_type = _NORMALIZATIONPARAMS_PERCHANNELMEANVALUES
_NORMALIZATIONPARAMS.oneofs_by_name['mean'].fields.append(_NORMALIZATIONPARAMS.fields_by_name['channelwise_mean'])
_NORMALIZATIONPARAMS.fields_by_name['channelwise_mean'].containing_oneof = _NORMALIZATIONPARAMS.oneofs_by_name['mean']
_NORMALIZATIONPARAMS.oneofs_by_name['mean'].fields.append(_NORMALIZATIONPARAMS.fields_by_name['means'])
_NORMALIZATIONPARAMS.fields_by_name['means'].containing_oneof = _NORMALIZATIONPARAMS.oneofs_by_name['mean']
DESCRIPTOR.message_types_by_name['ImagePreprocessingStepParams'] = _IMAGEPREPROCESSINGSTEPPARAMS
DESCRIPTOR.message_types_by_name['ImageSize'] = _IMAGESIZE
DESCRIPTOR.message_types_by_name['CroppingParams'] = _CROPPINGPARAMS
DESCRIPTOR.message_types_by_name['ResizingParams'] = _RESIZINGPARAMS
DESCRIPTOR.message_types_by_name['PaddingParams'] = _PADDINGPARAMS
DESCRIPTOR.message_types_by_name['NormalizationParams'] = _NORMALIZATIONPARAMS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImagePreprocessingStepParams = _reflection.GeneratedProtocolMessageType('ImagePreprocessingStepParams', (_message.Message,), {'DESCRIPTOR': _IMAGEPREPROCESSINGSTEPPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ImagePreprocessingStepParams)
ImageSize = _reflection.GeneratedProtocolMessageType('ImageSize', (_message.Message,), {'DESCRIPTOR': _IMAGESIZE, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ImageSize)
CroppingParams = _reflection.GeneratedProtocolMessageType('CroppingParams', (_message.Message,), {'DESCRIPTOR': _CROPPINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(CroppingParams)
ResizingParams = _reflection.GeneratedProtocolMessageType('ResizingParams', (_message.Message,), {'DESCRIPTOR': _RESIZINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ResizingParams)
PaddingParams = _reflection.GeneratedProtocolMessageType('PaddingParams', (_message.Message,), {'DESCRIPTOR': _PADDINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(PaddingParams)
NormalizationParams = _reflection.GeneratedProtocolMessageType('NormalizationParams', (_message.Message,), {'PerChannelMeanValues': _reflection.GeneratedProtocolMessageType('PerChannelMeanValues', (_message.Message,), {'DESCRIPTOR': _NORMALIZATIONPARAMS_PERCHANNELMEANVALUES, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'}), 'DESCRIPTOR': _NORMALIZATIONPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(NormalizationParams)
_sym_db.RegisterMessage(NormalizationParams.PerChannelMeanValues)
DESCRIPTOR._options = None
| 292.939394 | 2,286 | 0.843592 |
'Generated protocol buffer code.'
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='tensorflow/lite/tools/evaluation/proto/preprocessing_steps.proto', package='tflite.evaluation', syntax='proto2', serialized_options=b'\n\x11tflite.evaluationP\x01\xf8\x01\x01', create_key=_descriptor._internal_create_key, serialized_pb=b'\n@tensorflow/lite/tools/evaluation/proto/preprocessing_steps.proto\x12\x11tflite.evaluation"\xa8\x02\n\x1cImagePreprocessingStepParams\x12<\n\x0fcropping_params\x18\x01 \x01(\x0b2!.tflite.evaluation.CroppingParamsH\x00\x12<\n\x0fresizing_params\x18\x02 \x01(\x0b2!.tflite.evaluation.ResizingParamsH\x00\x12:\n\x0epadding_params\x18\x03 \x01(\x0b2 .tflite.evaluation.PaddingParamsH\x00\x12F\n\x14normalization_params\x18\x04 \x01(\x0b2&.tflite.evaluation.NormalizationParamsH\x00B\x08\n\x06params"*\n\tImageSize\x12\r\n\x05width\x18\x01 \x02(\r\x12\x0e\n\x06height\x18\x02 \x02(\r"\x8c\x01\n\x0eCroppingParams\x12"\n\x11cropping_fraction\x18\x01 \x01(\x02:\x050.875H\x00\x123\n\x0btarget_size\x18\x02 \x01(\x0b2\x1c.tflite.evaluation.ImageSizeH\x00\x12\x17\n\x0fsquare_cropping\x18\x03 \x01(\x08B\x08\n\x06params"^\n\x0eResizingParams\x121\n\x0btarget_size\x18\x01 \x02(\x0b2\x1c.tflite.evaluation.ImageSize\x12\x19\n\x11aspect_preserving\x18\x02 \x02(\x08"\x7f\n\rPaddingParams\x123\n\x0btarget_size\x18\x01 \x01(\x0b2\x1c.tflite.evaluation.ImageSizeH\x00\x12\x18\n\x0esquare_padding\x18\x02 \x01(\x08H\x00\x12\x15\n\rpadding_value\x18\x03 \x02(\x05B\x08\n\x06params"\xe1\x01\n\x13NormalizationParams\x12\x1a\n\x10channelwise_mean\x18\x01 \x01(\x02H\x00\x12L\n\x05means\x18\x02 \x01(\x0b2;.tflite.evaluation.NormalizationParams.PerChannelMeanValuesH\x00\x12\x10\n\x05scale\x18\x03 \x02(\x02:\x011\x1aF\n\x14PerChannelMeanValues\x12\x0e\n\x06r_mean\x18\x01 \x02(\x02\x12\x0e\n\x06g_mean\x18\x02 \x02(\x02\x12\x0e\n\x06b_mean\x18\x03 \x02(\x02B\x06\n\x04meanB\x18\n\x11tflite.evaluationP\x01\xf8\x01\x01')
_IMAGEPREPROCESSINGSTEPPARAMS = _descriptor.Descriptor(name='ImagePreprocessingStepParams', full_name='tflite.evaluation.ImagePreprocessingStepParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='cropping_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.cropping_params', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='resizing_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.resizing_params', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='padding_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.padding_params', index=2, number=3, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='normalization_params', full_name='tflite.evaluation.ImagePreprocessingStepParams.normalization_params', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.ImagePreprocessingStepParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=88, serialized_end=384)
_IMAGESIZE = _descriptor.Descriptor(name='ImageSize', full_name='tflite.evaluation.ImageSize', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='width', full_name='tflite.evaluation.ImageSize.width', index=0, number=1, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='height', full_name='tflite.evaluation.ImageSize.height', index=1, number=2, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=386, serialized_end=428)
_CROPPINGPARAMS = _descriptor.Descriptor(name='CroppingParams', full_name='tflite.evaluation.CroppingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='cropping_fraction', full_name='tflite.evaluation.CroppingParams.cropping_fraction', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=True, default_value=float(0.875), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.CroppingParams.target_size', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='square_cropping', full_name='tflite.evaluation.CroppingParams.square_cropping', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.CroppingParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=431, serialized_end=571)
_RESIZINGPARAMS = _descriptor.Descriptor(name='ResizingParams', full_name='tflite.evaluation.ResizingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.ResizingParams.target_size', index=0, number=1, type=11, cpp_type=10, label=2, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='aspect_preserving', full_name='tflite.evaluation.ResizingParams.aspect_preserving', index=1, number=2, type=8, cpp_type=7, label=2, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=573, serialized_end=667)
_PADDINGPARAMS = _descriptor.Descriptor(name='PaddingParams', full_name='tflite.evaluation.PaddingParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='target_size', full_name='tflite.evaluation.PaddingParams.target_size', index=0, number=1, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='square_padding', full_name='tflite.evaluation.PaddingParams.square_padding', index=1, number=2, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='padding_value', full_name='tflite.evaluation.PaddingParams.padding_value', index=2, number=3, type=5, cpp_type=1, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='params', full_name='tflite.evaluation.PaddingParams.params', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=669, serialized_end=796)
_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES = _descriptor.Descriptor(name='PerChannelMeanValues', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='r_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.r_mean', index=0, number=1, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='g_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.g_mean', index=1, number=2, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='b_mean', full_name='tflite.evaluation.NormalizationParams.PerChannelMeanValues.b_mean', index=2, number=3, type=2, cpp_type=6, label=2, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[], serialized_start=946, serialized_end=1016)
_NORMALIZATIONPARAMS = _descriptor.Descriptor(name='NormalizationParams', full_name='tflite.evaluation.NormalizationParams', filename=None, file=DESCRIPTOR, containing_type=None, create_key=_descriptor._internal_create_key, fields=[_descriptor.FieldDescriptor(name='channelwise_mean', full_name='tflite.evaluation.NormalizationParams.channelwise_mean', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='means', full_name='tflite.evaluation.NormalizationParams.means', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), _descriptor.FieldDescriptor(name='scale', full_name='tflite.evaluation.NormalizationParams.scale', index=2, number=3, type=2, cpp_type=6, label=2, has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key)], extensions=[], nested_types=[_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES], enum_types=[], serialized_options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[_descriptor.OneofDescriptor(name='mean', full_name='tflite.evaluation.NormalizationParams.mean', index=0, containing_type=None, create_key=_descriptor._internal_create_key, fields=[])], serialized_start=799, serialized_end=1024)
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'].message_type = _CROPPINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'].message_type = _RESIZINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'].message_type = _PADDINGPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'].message_type = _NORMALIZATIONPARAMS
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['cropping_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['resizing_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['padding_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params'].fields.append(_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'])
_IMAGEPREPROCESSINGSTEPPARAMS.fields_by_name['normalization_params'].containing_oneof = _IMAGEPREPROCESSINGSTEPPARAMS.oneofs_by_name['params']
_CROPPINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_CROPPINGPARAMS.oneofs_by_name['params'].fields.append(_CROPPINGPARAMS.fields_by_name['cropping_fraction'])
_CROPPINGPARAMS.fields_by_name['cropping_fraction'].containing_oneof = _CROPPINGPARAMS.oneofs_by_name['params']
_CROPPINGPARAMS.oneofs_by_name['params'].fields.append(_CROPPINGPARAMS.fields_by_name['target_size'])
_CROPPINGPARAMS.fields_by_name['target_size'].containing_oneof = _CROPPINGPARAMS.oneofs_by_name['params']
_RESIZINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_PADDINGPARAMS.fields_by_name['target_size'].message_type = _IMAGESIZE
_PADDINGPARAMS.oneofs_by_name['params'].fields.append(_PADDINGPARAMS.fields_by_name['target_size'])
_PADDINGPARAMS.fields_by_name['target_size'].containing_oneof = _PADDINGPARAMS.oneofs_by_name['params']
_PADDINGPARAMS.oneofs_by_name['params'].fields.append(_PADDINGPARAMS.fields_by_name['square_padding'])
_PADDINGPARAMS.fields_by_name['square_padding'].containing_oneof = _PADDINGPARAMS.oneofs_by_name['params']
_NORMALIZATIONPARAMS_PERCHANNELMEANVALUES.containing_type = _NORMALIZATIONPARAMS
_NORMALIZATIONPARAMS.fields_by_name['means'].message_type = _NORMALIZATIONPARAMS_PERCHANNELMEANVALUES
_NORMALIZATIONPARAMS.oneofs_by_name['mean'].fields.append(_NORMALIZATIONPARAMS.fields_by_name['channelwise_mean'])
_NORMALIZATIONPARAMS.fields_by_name['channelwise_mean'].containing_oneof = _NORMALIZATIONPARAMS.oneofs_by_name['mean']
_NORMALIZATIONPARAMS.oneofs_by_name['mean'].fields.append(_NORMALIZATIONPARAMS.fields_by_name['means'])
_NORMALIZATIONPARAMS.fields_by_name['means'].containing_oneof = _NORMALIZATIONPARAMS.oneofs_by_name['mean']
DESCRIPTOR.message_types_by_name['ImagePreprocessingStepParams'] = _IMAGEPREPROCESSINGSTEPPARAMS
DESCRIPTOR.message_types_by_name['ImageSize'] = _IMAGESIZE
DESCRIPTOR.message_types_by_name['CroppingParams'] = _CROPPINGPARAMS
DESCRIPTOR.message_types_by_name['ResizingParams'] = _RESIZINGPARAMS
DESCRIPTOR.message_types_by_name['PaddingParams'] = _PADDINGPARAMS
DESCRIPTOR.message_types_by_name['NormalizationParams'] = _NORMALIZATIONPARAMS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ImagePreprocessingStepParams = _reflection.GeneratedProtocolMessageType('ImagePreprocessingStepParams', (_message.Message,), {'DESCRIPTOR': _IMAGEPREPROCESSINGSTEPPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ImagePreprocessingStepParams)
ImageSize = _reflection.GeneratedProtocolMessageType('ImageSize', (_message.Message,), {'DESCRIPTOR': _IMAGESIZE, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ImageSize)
CroppingParams = _reflection.GeneratedProtocolMessageType('CroppingParams', (_message.Message,), {'DESCRIPTOR': _CROPPINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(CroppingParams)
ResizingParams = _reflection.GeneratedProtocolMessageType('ResizingParams', (_message.Message,), {'DESCRIPTOR': _RESIZINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(ResizingParams)
PaddingParams = _reflection.GeneratedProtocolMessageType('PaddingParams', (_message.Message,), {'DESCRIPTOR': _PADDINGPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(PaddingParams)
NormalizationParams = _reflection.GeneratedProtocolMessageType('NormalizationParams', (_message.Message,), {'PerChannelMeanValues': _reflection.GeneratedProtocolMessageType('PerChannelMeanValues', (_message.Message,), {'DESCRIPTOR': _NORMALIZATIONPARAMS_PERCHANNELMEANVALUES, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'}), 'DESCRIPTOR': _NORMALIZATIONPARAMS, '__module__': 'tensorflow.lite.tools.evaluation.proto.preprocessing_steps_pb2'})
_sym_db.RegisterMessage(NormalizationParams)
_sym_db.RegisterMessage(NormalizationParams.PerChannelMeanValues)
DESCRIPTOR._options = None
| 0 | 0 | 0 |
5e367700e918548627037907e67e38bcd6b412d8 | 79 | py | Python | tests/test_app.py | troyan-dy/secret-transferring-service | c7bff7a9696c39fb8c31cecaa17cdbd6969d69ae | [
"MIT"
] | 1 | 2021-03-20T17:59:45.000Z | 2021-03-20T17:59:45.000Z | tests/test_app.py | troyan-dy/secret-transferring-service | c7bff7a9696c39fb8c31cecaa17cdbd6969d69ae | [
"MIT"
] | null | null | null | tests/test_app.py | troyan-dy/secret-transferring-service | c7bff7a9696c39fb8c31cecaa17cdbd6969d69ae | [
"MIT"
] | null | null | null | import pytest
@pytest.mark.asyncio
| 11.285714 | 25 | 0.746835 | import pytest
@pytest.mark.asyncio
async def test_request():
assert True
| 20 | 0 | 22 |
2ffd154bcadc5a7cba4f2b152013f1040183cb56 | 908 | py | Python | scheduler/migrations/0001_initial.py | theju/smp | bee0849a5599bb9635f65f298889c0ea7ea6b46f | [
"MIT"
] | 16 | 2015-12-31T20:52:50.000Z | 2022-02-20T16:47:29.000Z | scheduler/migrations/0001_initial.py | theju/smp | bee0849a5599bb9635f65f298889c0ea7ea6b46f | [
"MIT"
] | null | null | null | scheduler/migrations/0001_initial.py | theju/smp | bee0849a5599bb9635f65f298889c0ea7ea6b46f | [
"MIT"
] | 6 | 2016-06-06T20:26:54.000Z | 2021-02-11T23:07:53.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
| 33.62963 | 125 | 0.609031 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ScheduledPost',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.TextField()),
('service', models.CharField(max_length=20, choices=[(b'facebook', b'Facebook'), (b'twitter', b'Twitter')])),
('scheduled_datetime', models.DateTimeField()),
('is_posted', models.BooleanField(default=False)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| 0 | 745 | 23 |
8107e1c4df5175b20a4a3a50cb72501b8a93d9be | 3,101 | py | Python | 4.tweet_preprocessing/tweet_preprocess_part1.py | esh-b/Resources-for-Twitter-related-projects | 28f2b54b47b76fdb5d4804692c13bccfef1805a0 | [
"BSD-3-Clause"
] | 2 | 2019-12-10T21:54:48.000Z | 2020-06-30T20:37:57.000Z | 4.tweet_preprocessing/tweet_preprocess_part1.py | esh-b/Resources-for-Twitter-related-projects | 28f2b54b47b76fdb5d4804692c13bccfef1805a0 | [
"BSD-3-Clause"
] | null | null | null | 4.tweet_preprocessing/tweet_preprocess_part1.py | esh-b/Resources-for-Twitter-related-projects | 28f2b54b47b76fdb5d4804692c13bccfef1805a0 | [
"BSD-3-Clause"
] | null | null | null | """
Part 1 of the tweet preprocessing phase
Lang: py3
"""
import json
import re
import csv
import sys
OUTPUT_DIR = os.path.join(os.getcwd(), 'part1_output')
EMOJI_PATTERN = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
URL_PATTERN = re.compile('http\S+')
#Method to replace usermentions with actual username
if(__name__ == "__main__"):
if(not(len(sys.argv) == 2)):
print("Usage: tweet_preprocess_part1.py <TWEET_DUMP_FILEPATH>")
sys.exit()
#Input filepath
input_filepath = sys.argv[1]
#If the input file is X/Y/input_file.csv, then output filename is input_file_spacyNP.csv
output_filepath = OUTPUT_DIR + input_filepath.split("/")[-1].split(".")[0] + "_part1_results.csv"
try:
g = open(output_filepath, "w")
except IOError:
print("Error while creating new file!!!")
sys.exit()
writer = csv.writer(g, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(["tweet_id", "actual_text", "preprocess_part1_results"])
#with open("news_outlets_234h.jsonl") as f:
with open(input_filepath) as f:
count = 0
for line in f:
#Load the tweet info from the tweet dump
json_line = json.loads(line)
#Get the tweet full_text
text = json_line['full_text']
#Replace all the newlines with spaces
text = text.replace("\n", ' ')
#Remove all the emojis from the tweet
text = EMOJI_PATTERN.sub('', text)
#Remove all the URLs from the tweet
text = URL_PATTERN.sub('', text)
#Split the text into words (filter removes the empty strings after split)
text = list(filter(None, text.split(" ")))
#Get all the usermentions in the tweet which are then replaced by the actual username
user_mentions = json_line['entities']['user_mentions']
#If the last word in the tweet starts with #, then lastPP is True
if(text[len(text) - 1].startswith("#") or text[len(text) - 1].startswith("@")):
lastPP = True
else:
lastPP = False
#Check: If tweet is just "#something"
#Iterate from the last word till the first word of the tweet
for i in range(len(text) - 1, 0, -1):
if(text[i].startswith("@") or text[i].startswith("#") and lastPP):
if(text[i - 1].startswith(("#", "@"))):
text[i] = ""
else:
lastPP = False
#Remove all the empty strings (incase any) obtained from the previous loop
text = filter(None, text)
#Join the words of the text
text = ' '.join(text)
#Write to file
writer.writerow([json_line["id_str"], json_line['full_text'], text])
count += 1
if(count % 5000 == 0):
print("Part1: Processed", count, "tweets...")
g.close()
print("Part1 of preprocessing done....you can now run the part2 code to further preprocess your tweet text.")
| 29.533333 | 110 | 0.665592 | """
Part 1 of the tweet preprocessing phase
Lang: py3
"""
import json
import re
import csv
import sys
OUTPUT_DIR = os.path.join(os.getcwd(), 'part1_output')
EMOJI_PATTERN = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
"]+", flags=re.UNICODE)
URL_PATTERN = re.compile('http\S+')
#Method to replace usermentions with actual username
def replaceMentions(token, user_mentions):
username = None
for user in user_mentions:
if(user['screen_name'] == token[1:]):
username = user['name']
break
return username
if(__name__ == "__main__"):
if(not(len(sys.argv) == 2)):
print("Usage: tweet_preprocess_part1.py <TWEET_DUMP_FILEPATH>")
sys.exit()
#Input filepath
input_filepath = sys.argv[1]
#If the input file is X/Y/input_file.csv, then output filename is input_file_spacyNP.csv
output_filepath = OUTPUT_DIR + input_filepath.split("/")[-1].split(".")[0] + "_part1_results.csv"
try:
g = open(output_filepath, "w")
except IOError:
print("Error while creating new file!!!")
sys.exit()
writer = csv.writer(g, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
writer.writerow(["tweet_id", "actual_text", "preprocess_part1_results"])
#with open("news_outlets_234h.jsonl") as f:
with open(input_filepath) as f:
count = 0
for line in f:
#Load the tweet info from the tweet dump
json_line = json.loads(line)
#Get the tweet full_text
text = json_line['full_text']
#Replace all the newlines with spaces
text = text.replace("\n", ' ')
#Remove all the emojis from the tweet
text = EMOJI_PATTERN.sub('', text)
#Remove all the URLs from the tweet
text = URL_PATTERN.sub('', text)
#Split the text into words (filter removes the empty strings after split)
text = list(filter(None, text.split(" ")))
#Get all the usermentions in the tweet which are then replaced by the actual username
user_mentions = json_line['entities']['user_mentions']
#If the last word in the tweet starts with #, then lastPP is True
if(text[len(text) - 1].startswith("#") or text[len(text) - 1].startswith("@")):
lastPP = True
else:
lastPP = False
#Check: If tweet is just "#something"
#Iterate from the last word till the first word of the tweet
for i in range(len(text) - 1, 0, -1):
if(text[i].startswith("@") or text[i].startswith("#") and lastPP):
if(text[i - 1].startswith(("#", "@"))):
text[i] = ""
else:
lastPP = False
#Remove all the empty strings (incase any) obtained from the previous loop
text = filter(None, text)
#Join the words of the text
text = ' '.join(text)
#Write to file
writer.writerow([json_line["id_str"], json_line['full_text'], text])
count += 1
if(count % 5000 == 0):
print("Part1: Processed", count, "tweets...")
g.close()
print("Part1 of preprocessing done....you can now run the part2 code to further preprocess your tweet text.")
| 159 | 0 | 22 |
be363a277e03a916241827d0dad6a6e5bee94e45 | 4,531 | py | Python | src/plot_generation_script.py | NicholasJohnson2020/Sequential-Network-Structure-Optimization | c4e141062ecaec70cfd89e7fa5b06389d4e2c879 | [
"MIT"
] | null | null | null | src/plot_generation_script.py | NicholasJohnson2020/Sequential-Network-Structure-Optimization | c4e141062ecaec70cfd89e7fa5b06389d4e2c879 | [
"MIT"
] | null | null | null | src/plot_generation_script.py | NicholasJohnson2020/Sequential-Network-Structure-Optimization | c4e141062ecaec70cfd89e7fa5b06389d4e2c879 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import pickle
# Verify the number of command line arguments
assert len(sys.argv) == 5
num_agents_path = sys.argv[1]
T_path = sys.argv[2]
m_path = sys.argv[3]
output_path_root = sys.argv[4]
data = {}
with open(num_agents_path, 'rb') as handle:
data['num_agents'] = pickle.load(handle)
with open(T_path, 'rb') as handle:
data['T'] = pickle.load(handle)
with open(m_path, 'rb') as handle:
data['m'] = pickle.load(handle)
plt.rcParams["font.family"] = "Times New Roman"
policies = ['Control',
'Perpetual Random',
'Initial Random',
'Myopic',
'One Step Lookahead',
'Modifed Reconnect']
marker_dict = {'Control': 'o',
'Perpetual Random': 'v',
'Initial Random': '^',
'Myopic': 's',
'One Step Lookahead': 'd',
'Modifed Reconnect': 'X'}
normalized_plots = [['Cumulative','num_agents','Objective'],
['Cumulative','T','Objective'],
['Terminal','num_agents','Objective']
]
x_labels = {'num_agents': 'Number of Nodes |V|',
'T': 'Time Horizon T',
'm': 'Number of Edges Formed by Entering\nNodes during Network Construction'}
y_labels = {'num_agents': '\ndivided by |V|',
'T': '\ndivided by T'}
params = ['num_agents', 'T', 'm']
obj_modes = ['Cumulative', 'Terminal']
exog_modes = ['Uniform', 'Weighted']
plot_modes = ['Objective', 'Time']
for obj_mode in obj_modes:
for exog_mode in exog_modes:
for plot_mode in plot_modes:
output_path = output_path_root + obj_mode + '_' + exog_mode + \
'_' + plot_mode + '_plots'
generate_plots_ijcai(params=params, data=data, obj_mode=obj_mode,
exog_mode=exog_mode, policies=policies,
figsize=(23, 8), mode=plot_mode,
filename=output_path)
| 33.316176 | 88 | 0.580004 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
import pickle
# Verify the number of command line arguments
assert len(sys.argv) == 5
num_agents_path = sys.argv[1]
T_path = sys.argv[2]
m_path = sys.argv[3]
output_path_root = sys.argv[4]
data = {}
with open(num_agents_path, 'rb') as handle:
data['num_agents'] = pickle.load(handle)
with open(T_path, 'rb') as handle:
data['T'] = pickle.load(handle)
with open(m_path, 'rb') as handle:
data['m'] = pickle.load(handle)
plt.rcParams["font.family"] = "Times New Roman"
policies = ['Control',
'Perpetual Random',
'Initial Random',
'Myopic',
'One Step Lookahead',
'Modifed Reconnect']
marker_dict = {'Control': 'o',
'Perpetual Random': 'v',
'Initial Random': '^',
'Myopic': 's',
'One Step Lookahead': 'd',
'Modifed Reconnect': 'X'}
normalized_plots = [['Cumulative','num_agents','Objective'],
['Cumulative','T','Objective'],
['Terminal','num_agents','Objective']
]
x_labels = {'num_agents': 'Number of Nodes |V|',
'T': 'Time Horizon T',
'm': 'Number of Edges Formed by Entering\nNodes during Network Construction'}
y_labels = {'num_agents': '\ndivided by |V|',
'T': '\ndivided by T'}
def generate_subplot(ax, value, param, data, obj_mode,
exog_mode, policies, include_title):
normalized = [obj_mode, param, value] in normalized_plots
mean_label = 'Mean ' + value
variables = data[param]['Parameters'][param]
hyper_params = data[param]['Parameters'].copy()
del(hyper_params[param])
for policy in policies:
policy_data = data[param][obj_mode][exog_mode][policy]
mean_data = np.zeros(len(variables))
if normalized:
for i in range(len(variables)):
mean_data[i] = policy_data[variables[i]][
mean_label] / variables[i]
else:
for i in range(len(variables)):
mean_data[i] = policy_data[variables[i]][mean_label]
if policy == 'Modifed Reconnect':
label = 'Gradient Based'
else:
label = policy
ax.plot(variables, mean_data, linewidth=2.5,
label=label, marker=marker_dict[policy], markersize=16)
ax.set_xlabel(x_labels[param], fontsize=24)
if value == 'Objective':
y_label = 'Average Objective Value'
else:
y_label = 'Average Execution Time'
if normalized:
y_label = y_label + y_labels[param]
ax.set_ylabel(y_label, fontsize=24)
ax.tick_params(labelsize=20)
if include_title:
if value == 'Objective':
title = 'Average Objective Value'
else:
title = 'Average Execution Time (seconds)'
ax.set_title(title, fontsize=24, pad=40)
ax.grid()
def generate_plots_ijcai(params, data, obj_mode,
exog_mode, policies, figsize,
mode='Objective', filename=None):
assert mode in ['Objective', 'Time']
n = len(params)
fig, ax = plt.subplots(1, n, figsize=figsize)
for i in range(n):
generate_subplot(ax=ax[i],value=mode, param=params[i],
data=data, obj_mode=obj_mode, exog_mode=exog_mode,
policies=policies, include_title=False)
handles, labels = ax[n - 1].get_legend_handles_labels()
fig.subplots_adjust(bottom=0.15, wspace = 0.3)
leg = fig.legend(handles, labels, loc='upper center',
fancybox=True, shadow=True, ncol=6, fontsize=24)
for legobj in leg.legendHandles:
legobj.set_linewidth(5)
if filename != None:
plt.savefig(filename, dpi = 300)
params = ['num_agents', 'T', 'm']
obj_modes = ['Cumulative', 'Terminal']
exog_modes = ['Uniform', 'Weighted']
plot_modes = ['Objective', 'Time']
for obj_mode in obj_modes:
for exog_mode in exog_modes:
for plot_mode in plot_modes:
output_path = output_path_root + obj_mode + '_' + exog_mode + \
'_' + plot_mode + '_plots'
generate_plots_ijcai(params=params, data=data, obj_mode=obj_mode,
exog_mode=exog_mode, policies=policies,
figsize=(23, 8), mode=plot_mode,
filename=output_path)
| 2,429 | 0 | 46 |
16a964c561916f1a67430c581ee6f1a2b835feff | 2,667 | py | Python | sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_various_io_types.py | ConverJens/pipelines | a1d453af214ec9eebad73fb05845dd3499d60d00 | [
"Apache-2.0"
] | 2 | 2021-03-11T14:27:12.000Z | 2021-03-11T14:27:24.000Z | sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_various_io_types.py | ConverJens/pipelines | a1d453af214ec9eebad73fb05845dd3499d60d00 | [
"Apache-2.0"
] | 484 | 2021-01-21T06:49:17.000Z | 2022-03-23T01:21:24.000Z | sdk/python/kfp/v2/compiler_cli_tests/test_data/pipeline_with_various_io_types.py | ConverJens/pipelines | a1d453af214ec9eebad73fb05845dd3499d60d00 | [
"Apache-2.0"
] | 1 | 2021-03-19T14:31:00.000Z | 2021-03-19T14:31:00.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from kfp import components
from kfp.v2 import dsl
import kfp.v2.compiler as compiler
component_op_1 = components.load_component_from_text("""
name: upstream
inputs:
- {name: input_1, type: String}
- {name: input_2, type: Float}
- {name: input_3, type: }
- {name: input_4}
- {name: input_5, type: Metrics}
- {name: input_6, type: Datasets}
- {name: input_7, type: Some arbitrary type}
- {name: input_8, type: {GcsPath: {data_type: TSV}}}
outputs:
- {name: output_1, type: Integer}
- {name: output_2, type: Model}
- {name: output_3}
implementation:
container:
image: gcr.io/image
args:
- {inputValue: input_1}
- {inputValue: input_2}
- {inputUri: input_3}
- {inputUri: input_4}
- {inputUri: input_5}
- {inputUri: input_6}
- {inputUri: input_7}
- {inputUri: input_8}
- {outputPath: output_1}
- {outputUri: output_2}
- {outputPath: output_3}
""")
component_op_2 = components.load_component_from_text("""
name: downstream
inputs:
- {name: input_a, type: Integer}
- {name: input_b, type: Model}
- {name: input_c}
implementation:
container:
image: gcr.io/image
args:
- {inputValue: input_a}
- {inputUri: input_b}
- {inputPath: input_c}
""")
@dsl.pipeline(name='pipeline-with-various-types')
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
pipeline_root='dummy_root',
output_path=__file__ + '.json')
| 27.494845 | 74 | 0.667042 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from kfp import components
from kfp.v2 import dsl
import kfp.v2.compiler as compiler
component_op_1 = components.load_component_from_text("""
name: upstream
inputs:
- {name: input_1, type: String}
- {name: input_2, type: Float}
- {name: input_3, type: }
- {name: input_4}
- {name: input_5, type: Metrics}
- {name: input_6, type: Datasets}
- {name: input_7, type: Some arbitrary type}
- {name: input_8, type: {GcsPath: {data_type: TSV}}}
outputs:
- {name: output_1, type: Integer}
- {name: output_2, type: Model}
- {name: output_3}
implementation:
container:
image: gcr.io/image
args:
- {inputValue: input_1}
- {inputValue: input_2}
- {inputUri: input_3}
- {inputUri: input_4}
- {inputUri: input_5}
- {inputUri: input_6}
- {inputUri: input_7}
- {inputUri: input_8}
- {outputPath: output_1}
- {outputUri: output_2}
- {outputPath: output_3}
""")
component_op_2 = components.load_component_from_text("""
name: downstream
inputs:
- {name: input_a, type: Integer}
- {name: input_b, type: Model}
- {name: input_c}
implementation:
container:
image: gcr.io/image
args:
- {inputValue: input_a}
- {inputUri: input_b}
- {inputPath: input_c}
""")
@dsl.pipeline(name='pipeline-with-various-types')
def my_pipeline(input1,
input3,
input4='',
input5='gs://bucket/metrics',
input6='gs://bucket/dataset',
input7='arbitrary value',
input8='gs://path2'):
component_1 = component_op_1(
input_1=input1,
input_2=3.1415926,
input_3=input3,
input_4=input4,
input_5='gs://bucket/metrics',
input_6=input6,
input_7=input7,
input_8=input8)
component_2 = component_op_2(
input_a=component_1.outputs['output_1'],
input_b=component_1.outputs['output_2'],
input_c=component_1.outputs['output_3'])
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
pipeline_root='dummy_root',
output_path=__file__ + '.json')
| 624 | 0 | 22 |
2dfb4c47df4398d5e9a573d66201c2f3446812bc | 3,107 | py | Python | msda_src/model_utils/tagger.py | zfjsail/transfer | 2cc2a73b579fb57714079f94a9f4ffe6b8a4acb4 | [
"MIT"
] | 51 | 2018-09-18T06:41:44.000Z | 2022-03-21T00:45:04.000Z | msda_src/model_utils/tagger.py | zfjsail/transfer | 2cc2a73b579fb57714079f94a9f4ffe6b8a4acb4 | [
"MIT"
] | 3 | 2018-11-26T11:36:26.000Z | 2020-08-30T03:02:32.000Z | msda_src/model_utils/tagger.py | zfjsail/transfer | 2cc2a73b579fb57714079f94a9f4ffe6b8a4acb4 | [
"MIT"
] | 20 | 2018-10-01T09:08:52.000Z | 2022-01-12T03:53:41.000Z | import random
import sys
import argparse
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.nn import functional as F
from .basic import ModelBase
| 38.8375 | 130 | 0.60251 | import random
import sys
import argparse
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.nn import functional as F
from .basic import ModelBase
class Tagger(ModelBase):
@staticmethod
def add_config(cfgparser):
super(Tagger, Tagger).add_config(cfgparser)
def get_var(self, x, volatile=False):
x = Variable(x, volatile=volatile)
return x.cuda() if self.args.CUDA else x
def __init__(self, args, vw, vc, vt, wc, UNK, CUNK, pad_char):
super(Tagger, self).__init__(args)
WEMBED_SIZE = args.WEMBED_SIZE
CEMBED_SIZE = args.CEMBED_SIZE
HIDDEN_SIZE = args.HIDDEN_SIZE
MLP_SIZE = args.MLP_SIZE
self.args = args
ntags = args.ntags
nwords = args.nwords
nchars = args.nchars
self.vw = vw
self.vc = vc
self.vt = vt
self.wc = wc
self.UNK = UNK
self.CUNK = CUNK
self.pad_char = pad_char
self.lookup_w = nn.Embedding(nwords, WEMBED_SIZE, padding_idx=self.UNK)
self.lookup_c = nn.Embedding(nchars, CEMBED_SIZE, padding_idx=self.CUNK)
self.lstm = nn.LSTM(WEMBED_SIZE, HIDDEN_SIZE, 1, bidirectional=True)
self.lstm_c_f = nn.LSTM(CEMBED_SIZE, WEMBED_SIZE / 2, 1)
self.lstm_c_r = nn.LSTM(CEMBED_SIZE, WEMBED_SIZE / 2, 1)
#self.proj1 = nn.Linear(2 * HIDDEN_SIZE, MLP_SIZE)
#self.proj2 = nn.Linear(MLP_SIZE, ntags)
def forward(self, words, volatile=False):
word_ids = []
needs_chars = []
char_ids = []
for i, w in enumerate(words):
if self.wc[w] > 5:
word_ids.append(self.vw.w2i[w])
else:
word_ids.append(self.UNK)
needs_chars.append(i)
char_ids.append([self.pad_char] + [self.vc.w2i.get(c, self.CUNK) for c in w] \
+ [self.pad_char])
embeddings = self.lookup_w(self.get_var(torch.LongTensor(word_ids), volatile=volatile))
if needs_chars:
max_len = max(len(x) for x in char_ids)
fwd_char_ids = [ids + [self.pad_char \
for _ in range(max_len - len(ids))] for ids in char_ids]
rev_char_ids = [ids[::-1] + [self.pad_char \
for _ in range(max_len - len(ids))] for ids in char_ids]
char_embeddings = torch.cat([
self.lstm_c_f(self.lookup_c(self.get_var(torch.LongTensor(fwd_char_ids).t())))[0],
self.lstm_c_r(self.lookup_c(self.get_var(torch.LongTensor(rev_char_ids).t())))[0]
], 2)
unk_embeddings = torch.cat([char_embeddings[len(words[j]) + 1, i].unsqueeze(0) for i, j in enumerate(needs_chars)], 0)
embeddings = embeddings.index_add(0, self.get_var(torch.LongTensor(needs_chars)), unk_embeddings)
return self.lstm(embeddings.unsqueeze(1))[0].squeeze(1)
#return self.proj2(self.proj1(self.lstm(embeddings.unsqueeze(1))[0].squeeze(1)))
| 2,731 | 131 | 23 |
9751dbfc75b668ea92e2f8cf832d672fc5eb7064 | 594 | py | Python | dakara_server/internal/pagination.py | DakaraProject/dakara-server | b28fc1a8561e431d562102932f3d6ff3607e545b | [
"MIT"
] | 4 | 2018-07-24T18:22:16.000Z | 2020-01-24T16:30:54.000Z | dakara_server/internal/pagination.py | DakaraProject/dakara-server | b28fc1a8561e431d562102932f3d6ff3607e545b | [
"MIT"
] | 88 | 2017-11-04T08:58:02.000Z | 2022-03-30T11:39:08.000Z | dakara_server/internal/pagination.py | DakaraProject/dakara-server | b28fc1a8561e431d562102932f3d6ff3607e545b | [
"MIT"
] | 1 | 2018-05-05T15:37:20.000Z | 2018-05-05T15:37:20.000Z | from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class PageNumberPaginationCustom(PageNumberPagination):
"""Pagination.
Gives current page number and last page number.
"""
| 27 | 58 | 0.572391 | from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
class PageNumberPaginationCustom(PageNumberPagination):
"""Pagination.
Gives current page number and last page number.
"""
def get_paginated_response(self, data):
return Response(
{
"pagination": {
"current": self.page.number,
"last": self.page.paginator.num_pages,
},
"count": self.page.paginator.count,
"results": data,
}
)
| 325 | 0 | 27 |
6aa330ce51d5ebce9669acc43855c4f034ad294d | 458 | py | Python | voicetools/__init__.py | fossabot/JackCogs | b9651a87e8a1fe29e4d672a03a42b6b3f37bb852 | [
"MIT"
] | null | null | null | voicetools/__init__.py | fossabot/JackCogs | b9651a87e8a1fe29e4d672a03a42b6b3f37bb852 | [
"MIT"
] | null | null | null | voicetools/__init__.py | fossabot/JackCogs | b9651a87e8a1fe29e4d672a03a42b6b3f37bb852 | [
"MIT"
] | null | null | null | from redbot import version_info, VersionInfo
from redbot.core.bot import Red
from redbot.core.errors import CogLoadError
from .voicetools import VoiceTools
| 30.533333 | 88 | 0.69869 | from redbot import version_info, VersionInfo
from redbot.core.bot import Red
from redbot.core.errors import CogLoadError
from .voicetools import VoiceTools
async def setup(bot: Red) -> None:
if version_info < VersionInfo.from_str("3.1.3"):
raise CogLoadError(
"This cog requires at least Red 3.1.3.\n"
"Go update, it's a straight improvement from previously supported versions."
)
bot.add_cog(VoiceTools())
| 277 | 0 | 23 |
1cc38f2d129f2a9a2fc3d2af053bb15247ffe26a | 764 | py | Python | 0701. Insert into a Binary Search Tree/solution.py | furutuki/LeetCodeSolution | db5e6573d0c907dfa3e6ad5e5b3b5ff9944a4f53 | [
"MIT"
] | null | null | null | 0701. Insert into a Binary Search Tree/solution.py | furutuki/LeetCodeSolution | db5e6573d0c907dfa3e6ad5e5b3b5ff9944a4f53 | [
"MIT"
] | null | null | null | 0701. Insert into a Binary Search Tree/solution.py | furutuki/LeetCodeSolution | db5e6573d0c907dfa3e6ad5e5b3b5ff9944a4f53 | [
"MIT"
] | null | null | null | # Definition for a binary tree node.
| 23.875 | 66 | 0.515707 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def solve(self, node: TreeNode, val: int):
if not node:
return
if val < node.val:
if node.left:
self.solve(node.left, val)
else:
node.left = TreeNode(val)
else:
if node.right:
self.solve(node.right, val)
else:
node.right = TreeNode(val)
def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:
if not root:
return TreeNode(val)
self.solve(root, val)
return root
| 613 | -12 | 124 |
bb0c87e02ac0a359d88ce24de93cc2c12a5f3464 | 5,031 | py | Python | setup.py | simongormley/python-mqlight | 4aace74290d10c37e262cc296dfcbb92e66afb9e | [
"Apache-2.0"
] | 13 | 2015-05-11T21:40:37.000Z | 2022-01-18T02:20:23.000Z | setup.py | simongormley/python-mqlight | 4aace74290d10c37e262cc296dfcbb92e66afb9e | [
"Apache-2.0"
] | 27 | 2016-03-08T22:35:42.000Z | 2021-06-25T15:16:43.000Z | setup.py | simongormley/python-mqlight | 4aace74290d10c37e262cc296dfcbb92e66afb9e | [
"Apache-2.0"
] | 11 | 2016-02-21T11:11:55.000Z | 2021-11-10T02:43:24.000Z |
# python-mqlight - high-level API by which you can interact with MQ Light
#
# Copyright 2015-2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.test import test as TestCommand
from codecs import open as codecs_open
from os import path, environ
from platform import system, architecture
if not sys.version_info[:2] >= (2, 6):
print('ERROR: Python 2.6 or newer is required')
sys.exit(1)
if system() == 'Windows' and architecture()[0] == '32bit':
print('ERROR: Mqlight requires 64bit Python on Windows.')
sys.exit(1)
HERE = path.abspath(path.dirname(__file__))
with codecs_open(path.join(HERE, 'description.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
if system() == 'Darwin':
environ['ARCHFLAGS'] = '-arch x86_64 -mmacosx-version-min=10.8'
def get_sources():
"""Return a list of source files to compile into the extension"""
if system() == 'Windows':
return [path.join('mqlight', 'cproton.cxx')]
else:
return [path.join('mqlight', 'cproton.c')]
def get_runtime_library_dirs():
"""Return a custom rpath to write into the extension"""
if system() == 'Linux':
return ['$ORIGIN']
else:
return []
def get_extra_compile_args():
"""Return a list of extra arguments to supply at extension compile time"""
if system() == 'Linux':
return ['-Wno-address', '-Wno-unused-function']
else:
return []
def get_extra_link_args():
"""Return a list of extra arguments to supply at extension link time"""
if system() == 'Darwin':
return ['-Wl,-rpath,@loader_path/']
else:
return []
# pylint: disable=R0904
class PyTest(TestCommand):
"""TestCommand to run suite using py.test"""
test_args = []
test_suite = True
pytest_args = []
setup(
name='mqlight',
version='9.9.9999999999',
description='IBM MQ Light Client Python Module',
long_description=LONG_DESCRIPTION,
url='https://developer.ibm.com/messaging/mq-light/',
author='IBM',
author_email='mqlight@uk.ibm.com',
license='proprietary',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Communications',
'Topic :: System :: Networking',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
keywords='ibm mqlight',
packages=find_packages(
exclude=['tests']),
package_data={
'mqlight': [
'*.dll',
'libqpid-proton*',
'samples/*.py',
'licenses/*',
'README']},
ext_package='mqlight',
ext_modules=[
Extension(
name='_cproton',
sources=get_sources(),
include_dirs=[
path.join(
HERE,
'include')],
library_dirs=['mqlight'],
libraries=['qpid-proton'],
runtime_library_dirs=get_runtime_library_dirs(),
extra_compile_args=get_extra_compile_args(),
extra_link_args=get_extra_link_args()),
],
install_requires=[
'argparse',
'backports.ssl_match_hostname>=3.4.0.2'
],
test_suite='tests',
tests_require=[
'pytest_cov',
'pytest_pep8',
'pytest_timeout',
'pytest',
'pbr==1.6.0'],
cmdclass={
'test': PyTest}
)
| 31.248447 | 78 | 0.622739 |
# python-mqlight - high-level API by which you can interact with MQ Light
#
# Copyright 2015-2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from setuptools import setup, find_packages, Extension
from setuptools.command.test import test as TestCommand
from codecs import open as codecs_open
from os import path, environ
from platform import system, architecture
if not sys.version_info[:2] >= (2, 6):
print('ERROR: Python 2.6 or newer is required')
sys.exit(1)
if system() == 'Windows' and architecture()[0] == '32bit':
print('ERROR: Mqlight requires 64bit Python on Windows.')
sys.exit(1)
HERE = path.abspath(path.dirname(__file__))
with codecs_open(path.join(HERE, 'description.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
if system() == 'Darwin':
environ['ARCHFLAGS'] = '-arch x86_64 -mmacosx-version-min=10.8'
def get_sources():
"""Return a list of source files to compile into the extension"""
if system() == 'Windows':
return [path.join('mqlight', 'cproton.cxx')]
else:
return [path.join('mqlight', 'cproton.c')]
def get_runtime_library_dirs():
"""Return a custom rpath to write into the extension"""
if system() == 'Linux':
return ['$ORIGIN']
else:
return []
def get_extra_compile_args():
"""Return a list of extra arguments to supply at extension compile time"""
if system() == 'Linux':
return ['-Wno-address', '-Wno-unused-function']
else:
return []
def get_extra_link_args():
"""Return a list of extra arguments to supply at extension link time"""
if system() == 'Darwin':
return ['-Wl,-rpath,@loader_path/']
else:
return []
# pylint: disable=R0904
class PyTest(TestCommand):
"""TestCommand to run suite using py.test"""
test_args = []
test_suite = True
pytest_args = []
def initialize_options(self):
TestCommand.initialize_options(self)
def finalize_options(self):
TestCommand.finalize_options(self)
def run_tests(self):
# environ['MQLIGHT_PYTHON_LOG'] = 'ALL'
import pytest
# self.pytest_args.insert(0, 'tests/test_unsubscribe.py')
self.pytest_args.insert(0, '--junitxml=junit.xml')
self.pytest_args.insert(0, '--timeout=10')
self.pytest_args.insert(0, '--cov-report=term')
self.pytest_args.insert(0, '--cov-report=html')
self.pytest_args.insert(0, '--cov=mqlight')
errno = pytest.main(self.pytest_args)
errno += pytest.main(['--pep8', '-m pep8', 'mqlight/'])
sys.exit(errno)
setup(
name='mqlight',
version='9.9.9999999999',
description='IBM MQ Light Client Python Module',
long_description=LONG_DESCRIPTION,
url='https://developer.ibm.com/messaging/mq-light/',
author='IBM',
author_email='mqlight@uk.ibm.com',
license='proprietary',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development',
'Topic :: Communications',
'Topic :: System :: Networking',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: Other/Proprietary License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
keywords='ibm mqlight',
packages=find_packages(
exclude=['tests']),
package_data={
'mqlight': [
'*.dll',
'libqpid-proton*',
'samples/*.py',
'licenses/*',
'README']},
ext_package='mqlight',
ext_modules=[
Extension(
name='_cproton',
sources=get_sources(),
include_dirs=[
path.join(
HERE,
'include')],
library_dirs=['mqlight'],
libraries=['qpid-proton'],
runtime_library_dirs=get_runtime_library_dirs(),
extra_compile_args=get_extra_compile_args(),
extra_link_args=get_extra_link_args()),
],
install_requires=[
'argparse',
'backports.ssl_match_hostname>=3.4.0.2'
],
test_suite='tests',
tests_require=[
'pytest_cov',
'pytest_pep8',
'pytest_timeout',
'pytest',
'pbr==1.6.0'],
cmdclass={
'test': PyTest}
)
| 645 | 0 | 81 |
3f840d5c29cd464b02e7bb916949d9f28abf657a | 10,268 | py | Python | app/views/plays/parser.py | julio-am/screenplay-editor | ae891e15f541ecfe40feb731c118cbe4cd79a130 | [
"MIT"
] | null | null | null | app/views/plays/parser.py | julio-am/screenplay-editor | ae891e15f541ecfe40feb731c118cbe4cd79a130 | [
"MIT"
] | 2 | 2021-09-28T02:34:20.000Z | 2022-02-26T07:47:38.000Z | app/views/plays/parser.py | julio-am/screenplay-editor | ae891e15f541ecfe40feb731c118cbe4cd79a130 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
"""
elemToString
This takes in content, a node, and returns the inner text
"""
"""
cleanElemToString
This takes in content, a node, and returns the inner text with only one space between
words and no line breaks
"""
"""
stageDirInLine
This gets the stage directions in the middle of a line and writes them to our file.
This takes in content, a stage directions XML node, and a targetFile, the file object with write privileges.
"""
"""
printSingleLine
This writes a string to file after removing extra spaces and all line breaks
This takes in line, a string, and targetFile, a file object with write privileges.
"""
"""
speaker
This writes the speaker's name to file and returns it to use as the key for the dictionary.
This takes in content, a speaker node, and a targetFile, a file object with write privileges.
"""
"""
getLines
This will write all the lines that one character speaks and the in-line stage directions to a file.
It takes in content, a node with tag 'ab', and a targetFile, a file object with write privilege.
"""
"""
printOneScene
This will write a single scene as we want it formatted and update the character line dictionary.
It takes in a scene (div2) node, a file to write to, and a dicitionary that holds the lines characters.
"""
"""
visitAct
This is a visitor parser to create a custom navigation bar for any play we use.
It requires an xmlTree that has acts noted by div1 and scenes noted by div2, like the Folger
XML versions of the plays. It also requires a file to write to. Hopefully, this is the file
that we're writing to all along.
This will go through and find all the acts and scenes based on those assumptions. It will
write out the proper HTML to make a navbar based on those assumptions.
"""
dictionary = {}
header = open("header.html", "r")
lines = header.readlines()
target = open("index.html.erb", "w")
tree = ET.parse("data.xml").getroot()
formatting = open("../../assets/javascripts/application.js", "w")
formatHeader = open("../../assets/javascripts/applicationheader.txt", "r")
# Write the header to index file first, using the visitor parser at the appropriate place
for line in lines:
target.write(line)
if '<a class="navbar-brand" href="#">' in line:
title = tree.find(".//{http://www.tei-c.org/ns/1.0}title")
target.write(elemToString(title))
elif '<div class="row">' in line:
oldVisitAct(tree, target)
elif '<ul class="scroll-menu scroll-menu-2x">' in line:
visitAct(tree, target)
jsLines = formatHeader.readlines()
for line in jsLines:
formatting.write(line)
# Start by finding all the acts, noted with div1's
acts = tree.findall(".//{http://www.tei-c.org/ns/1.0}div1")
for act in acts:
target.write('\n<h1 id = %s>\nAct '% act.get('n'))
target.write('%s\n</h1>' % act.get('n'))
# Find all the scenes in the act. Each has the tag div2
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
# idNumber is the id attribute so the navigation works.
# It reflects the ActNumber.SceneNumber numbering of Shakespeare plays
idNumber = act.get('n') + "." + scene.get('n')
target.write("\n<h2 id ="+idNumber+">\nScene %s\n</h2>" % scene.get('n'))
writeOneScene(scene, target, dictionary)
target.write("</div>\n</body>\n</html>")
target.close()
formatting.write("\n})")
chars = open("characters.html.erb", "w")
chars.write("<DOCTYPE! HTML>\n<html>")
chars.write('<center>\n<table style="width:50%">\n')
chars.write("<tr><th><b>Character Name</b></th><th><b>Modified Number of Lines</b></th>")
chars.write("<th><b>Original Number of Lines</b></th></tr>")
# In a table we output the name of the character from the dictionary
# and the number of lines they spoke
for key in dictionary:
chars.write('<tr><td>%s</td>' % key)
chars.write('<td>%d</td>' % dictionary[key])
chars.write('<td>%d</td></tr>' % dictionary[key])
chars.write("</table></center>")
chars.close()
| 42.605809 | 109 | 0.62836 | import xml.etree.ElementTree as ET
"""
elemToString
This takes in content, a node, and returns the inner text
"""
def elemToString(content):
return ET.tostring(content, encoding='utf8', method='text')
"""
cleanElemToString
This takes in content, a node, and returns the inner text with only one space between
words and no line breaks
"""
def cleanElemToString(content):
string = elemToString(content)
return filter(lambda x: x != "\n", string).replace(" ", "")
def stageDirElem(content):
string = ""
for children in content.findall("./*"):
if children.tag == "{http://www.tei-c.org/ns/1.0}lb":
string += "\n<br>\n"
else:
toString = ET.tostring(children, encoding='utf8', method='text')
string += filter(lambda x: x != "\n", toString).replace(" ", "")
return string
def printSingleLine(line, targetFile):
targetFile.write(filter(lambda x: x != "\n", line).replace(" ", ""))
"""
stageDirInLine
This gets the stage directions in the middle of a line and writes them to our file.
This takes in content, a stage directions XML node, and a targetFile, the file object with write privileges.
"""
def stageDirInLine(content, targetFile):
xmlstr = stageDirElem(content)
targetFile.write("<i>%s</i>" % xmlstr)
"""
printSingleLine
This writes a string to file after removing extra spaces and all line breaks
This takes in line, a string, and targetFile, a file object with write privileges.
"""
def printSingleLine(line, targetFile):
targetFile.write(filter(lambda x: x != "\n", line).replace(" ", ""))
"""
speaker
This writes the speaker's name to file and returns it to use as the key for the dictionary.
This takes in content, a speaker node, and a targetFile, a file object with write privileges.
"""
def speaker(content, targetFile):
xmlstr = cleanElemToString(content)
targetFile.write('\n<br>\n<span class = "character">%s</span> '% xmlstr)
return xmlstr
def writeFormatting(className):
formatting.write("\n$('.%s').on('click', function(e){\n" % className)
formatting.write("e.preventDefault();\n")
formatting.write("$('.%s').toggleClass('strikethrough');\n});\n" % className)
"""
getLines
This will write all the lines that one character speaks and the in-line stage directions to a file.
It takes in content, a node with tag 'ab', and a targetFile, a file object with write privilege.
"""
def getLines(content, targetFile):
line = ""
numLines = 0
listOfSD = []
for words in content.findall("./*"):
# If the child is a milestone, it prints out the previous line, the next line number, and resets
if ((words.tag == "{http://www.tei-c.org/ns/1.0}milestone") and (words.get('unit') == "ftln")):
numLines += 1
printSingleLine(line, targetFile)
if numLines > 0:
targetFile.write("</span>")
targetFile.write('\n<br>\n<span class="lineNum">%s</span>' % words.get('n')[4:])
targetFile.write('<span class = "%s">' % words.get('n').replace(".", "-"))
writeFormatting(words.get('n').replace(".", "-"))
line = ""
numLines += 1
# If the child node is a q or seg, those are wrappers, so we need to go one level deeper
elif((words.tag == "{http://www.tei-c.org/ns/1.0}seg")):
getLines(words, targetFile)
# If the child is a stage, we should print the line and then print the stage direction
elif (words.tag == "{http://www.tei-c.org/ns/1.0}stage"):
printSingleLine(line, targetFile)
targetFile.write(" ")
line = ""
stageDirInLine(words, targetFile)
listOfSD = listOfSD + [words.get('n')]
elif(words.tag == "{http://www.tei-c.org/ns/1.0}seg"):
getLines(words, targetFile)
# Any other tag that is not fw is a word, space, or punctuation that should be added to the line
elif (words.tag != "{http://www.tei-c.org/ns/1.0}fw"):
line += ET.tostring(words, encoding='utf8', method='text')
# Because we never hit a final milestone after reading in the last line, we need to print it out
printSingleLine(line, targetFile)
targetFile.write("</span>")
targetFile.write("<br>")
return (numLines, listOfSD)
"""
printOneScene
This will write a single scene as we want it formatted and update the character line dictionary.
It takes in a scene (div2) node, a file to write to, and a dicitionary that holds the lines characters.
"""
def writeOneScene(scene, targetFile, dictionary):
curSpeaker = ""
lines = 0
listOfSD = []
# This goes through every node in the scene, hence the need for outerLvlStageDir and stageDirInLine
for content in scene.iter():
# If we get a stage direction at this level, it should be an outer level one
if (content.tag == "{http://www.tei-c.org/ns/1.0}stage"):
if content.get('n') not in listOfSD:
stageDirInLine(content, targetFile)
# If we get a speaker, we need to update the current speaker
elif (content.tag == "{http://www.tei-c.org/ns/1.0}speaker"):
curSpeaker = speaker(content, targetFile)
# If we get an 'ab' tag, this is the start of a line for curSpeaker
elif(content.tag == "{http://www.tei-c.org/ns/1.0}ab"):
numLinesAndSD = getLines(content, targetFile)
lines = numLinesAndSD[0]
listOfSD += numLinesAndSD[1]
# Writes the line to the targetFile and updates the character dictionary
if curSpeaker not in dictionary:
dictionary[curSpeaker] = lines
else:
dictionary[curSpeaker] += lines
"""
visitAct
This is a visitor parser to create a custom navigation bar for any play we use.
It requires an xmlTree that has acts noted by div1 and scenes noted by div2, like the Folger
XML versions of the plays. It also requires a file to write to. Hopefully, this is the file
that we're writing to all along.
This will go through and find all the acts and scenes based on those assumptions. It will
write out the proper HTML to make a navbar based on those assumptions.
"""
def oldVisitAct(xmlTree, targetFile):
acts = xmlTree.findall(".//{http://www.tei-c.org/ns/1.0}div1")
baseIndent = " " * 14
secondLvl = baseIndent + " "
thirdLvl = secondLvl + " "
actPattern = baseIndent + '<div class="col-sm-4">\n' + secondLvl+ '<ul class="multi-column-dropdown">\n'
for act in acts:
targetFile.write(actPattern)
targetFile.write(thirdLvl+'<li><a href="#%s">Act ' % act.get('n'))
targetFile.write('%s</a></li>\n' % act.get('n'))
targetFile.write(thirdLvl+'<li class="divider"></li>\n')
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
idNumber = act.get('n') + "." + scene.get('n')
targetFile.write(thirdLvl + '<li><a href="#'+idNumber)
targetFile.write('">Scene %s</a></li>\n' % scene.get('n'))
targetFile.write(secondLvl+'</ul>\n'+baseIndent+'</div>\n')
# Every 3 acts, we will create a new row
if int(act.get('n')) == 3:
targetFile.write(secondLvl+"</div>")
def visitAct(content, targetFile):
indent = " "*4
acts = content.findall(".//{http://www.tei-c.org/ns/1.0}div1")
for act in acts:
targetFile.write(indent)
targetFile.write('\n<li><a href="#%s" class="act">Act' % act.get('n'))
targetFile.write(' %s</a></li>' % act.get('n'))
targetFile.write('\n<li class="divider"></li>')
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
idNumber = act.get('n') + "." + scene.get('n')
targetFile.write(indent)
targetFile.write('\n<li><a href="#%s" class="scene">Scene ' % idNumber)
targetFile.write('%s</a></li>' % scene.get('n'))
dictionary = {}
header = open("header.html", "r")
lines = header.readlines()
target = open("index.html.erb", "w")
tree = ET.parse("data.xml").getroot()
formatting = open("../../assets/javascripts/application.js", "w")
formatHeader = open("../../assets/javascripts/applicationheader.txt", "r")
# Write the header to index file first, using the visitor parser at the appropriate place
for line in lines:
target.write(line)
if '<a class="navbar-brand" href="#">' in line:
title = tree.find(".//{http://www.tei-c.org/ns/1.0}title")
target.write(elemToString(title))
elif '<div class="row">' in line:
oldVisitAct(tree, target)
elif '<ul class="scroll-menu scroll-menu-2x">' in line:
visitAct(tree, target)
jsLines = formatHeader.readlines()
for line in jsLines:
formatting.write(line)
# Start by finding all the acts, noted with div1's
acts = tree.findall(".//{http://www.tei-c.org/ns/1.0}div1")
for act in acts:
target.write('\n<h1 id = %s>\nAct '% act.get('n'))
target.write('%s\n</h1>' % act.get('n'))
# Find all the scenes in the act. Each has the tag div2
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
# idNumber is the id attribute so the navigation works.
# It reflects the ActNumber.SceneNumber numbering of Shakespeare plays
idNumber = act.get('n') + "." + scene.get('n')
target.write("\n<h2 id ="+idNumber+">\nScene %s\n</h2>" % scene.get('n'))
writeOneScene(scene, target, dictionary)
target.write("</div>\n</body>\n</html>")
target.close()
formatting.write("\n})")
chars = open("characters.html.erb", "w")
chars.write("<DOCTYPE! HTML>\n<html>")
chars.write('<center>\n<table style="width:50%">\n')
chars.write("<tr><th><b>Character Name</b></th><th><b>Modified Number of Lines</b></th>")
chars.write("<th><b>Original Number of Lines</b></th></tr>")
# In a table we output the name of the character from the dictionary
# and the number of lines they spoke
for key in dictionary:
chars.write('<tr><td>%s</td>' % key)
chars.write('<td>%d</td>' % dictionary[key])
chars.write('<td>%d</td></tr>' % dictionary[key])
chars.write("</table></center>")
chars.close()
| 5,959 | 0 | 268 |
9873b4d12140f0e1fe8c471ca624a6311ca31727 | 208 | py | Python | Exercicios Loop/exercicio 01 - secao 06.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | Exercicios Loop/exercicio 01 - secao 06.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | Exercicios Loop/exercicio 01 - secao 06.py | cristinamais/exercicios_python | 8a09b0b68ffaa62d13afb952998e890a79667c7e | [
"MIT"
] | null | null | null | """
1 - Faรงa um programa que determine e mostre os cinco primeiros mรบltiplos de 3, considerando nรบmeros maiores que 0.
"""
soma = 0
for num in range(1,6):
numero = num * 3
soma += 1
print(numero)
| 23.111111 | 114 | 0.663462 | """
1 - Faรงa um programa que determine e mostre os cinco primeiros mรบltiplos de 3, considerando nรบmeros maiores que 0.
"""
soma = 0
for num in range(1,6):
numero = num * 3
soma += 1
print(numero)
| 0 | 0 | 0 |
c8f6e15e18f3008d4305fe32517b64e216c567cf | 2,061 | py | Python | practice/demo.py | alfaro28/sqlalchemy-media | c70af4faf8b299212aa18247f9cb26fd5efac7bf | [
"MIT"
] | 78 | 2016-09-27T20:40:39.000Z | 2022-02-23T21:08:51.000Z | practice/demo.py | alfaro28/sqlalchemy-media | c70af4faf8b299212aa18247f9cb26fd5efac7bf | [
"MIT"
] | 126 | 2016-09-27T20:39:09.000Z | 2022-02-08T08:23:18.000Z | practice/demo.py | jpmn/sqlalchemy-media | 7dee4aa70fc8979b6fbb39d04c27d897dd51ae2f | [
"MIT"
] | 27 | 2016-10-04T00:27:24.000Z | 2022-03-15T16:52:20.000Z | import json
import functools
from pprint import pprint
from os.path import join, exists
from sqlalchemy import Column, Integer, create_engine, Unicode, TypeDecorator
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_media import Image, StoreManager, FileSystemStore, ImageProcessor
# Step 1
TEMP_PATH = '/tmp/sqlalchemy-media'
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
session_factory = sessionmaker(bind=engine)
# Step 2
StoreManager.register('fs', functools.partial(FileSystemStore, TEMP_PATH, 'http://static.example.org/'), default=True)
# Sqlite is not supporting JSON type, so emulating it:
Base.metadata.create_all(engine, checkfirst=True)
if __name__ == '__main__':
session = session_factory()
with StoreManager(session):
person1 = Person()
# person1.image = Image.create_from('https://www.python.org/static/img/python-logo@2x.png')
person1.image = Avatar()
person1.image.attach('https://www.python.org/static/img/python-logo@2x.png')
session.add(person1)
session.commit()
print(person1.id)
pprint(person1.image)
path = join(TEMP_PATH, person1.image.path)
print(path)
print(person1.image.locate())
assert exists(path)
| 26.423077 | 118 | 0.667637 | import json
import functools
from pprint import pprint
from os.path import join, exists
from sqlalchemy import Column, Integer, create_engine, Unicode, TypeDecorator
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy_media import Image, StoreManager, FileSystemStore, ImageProcessor
# Step 1
TEMP_PATH = '/tmp/sqlalchemy-media'
Base = declarative_base()
engine = create_engine('sqlite:///:memory:', echo=False)
session_factory = sessionmaker(bind=engine)
# Step 2
StoreManager.register('fs', functools.partial(FileSystemStore, TEMP_PATH, 'http://static.example.org/'), default=True)
# Sqlite is not supporting JSON type, so emulating it:
class Json(TypeDecorator):
impl = Unicode
def process_bind_param(self, value, engine):
return json.dumps(value)
def process_result_value(self, value, engine):
if value is None:
return None
return json.loads(value)
class Avatar(Image):
__pre_processors__ = [
ImageProcessor(
fmt='jpeg',
crop=dict(
width=250,
height=250,
)
)
]
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
name = Column(Unicode(100))
image = Column(Avatar.as_mutable(Json))
def __repr__(self):
return "<%s id=%s>" % (self.name, self.id)
Base.metadata.create_all(engine, checkfirst=True)
if __name__ == '__main__':
session = session_factory()
with StoreManager(session):
person1 = Person()
# person1.image = Image.create_from('https://www.python.org/static/img/python-logo@2x.png')
person1.image = Avatar()
person1.image.attach('https://www.python.org/static/img/python-logo@2x.png')
session.add(person1)
session.commit()
print(person1.id)
pprint(person1.image)
path = join(TEMP_PATH, person1.image.path)
print(path)
print(person1.image.locate())
assert exists(path)
| 214 | 433 | 68 |
4573313ea2349bd3cee43c50eeecd9bc30e577e3 | 84 | py | Python | category/urls.py | shaymk1/my-e-commerce-shop | f8d2dacd7c3eaec557ef5a158e4ba41b170008b2 | [
"MIT"
] | null | null | null | category/urls.py | shaymk1/my-e-commerce-shop | f8d2dacd7c3eaec557ef5a158e4ba41b170008b2 | [
"MIT"
] | null | null | null | category/urls.py | shaymk1/my-e-commerce-shop | f8d2dacd7c3eaec557ef5a158e4ba41b170008b2 | [
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import *
urlpatterns = [
]
| 6.461538 | 32 | 0.654762 |
from django.contrib import admin
from .models import *
urlpatterns = [
]
| 0 | 0 | 0 |
7fe78c9282f487a492760ae6859e3c6ba9ad4aef | 737 | py | Python | packs/salt/actions/local.py | meirwah/st2contrib | 0743c96abc04ccda983303c4bdb744929dc17fd2 | [
"Apache-2.0"
] | 1 | 2020-11-21T10:10:47.000Z | 2020-11-21T10:10:47.000Z | packs/salt/actions/local.py | meirwah/st2contrib | 0743c96abc04ccda983303c4bdb744929dc17fd2 | [
"Apache-2.0"
] | null | null | null | packs/salt/actions/local.py | meirwah/st2contrib | 0743c96abc04ccda983303c4bdb744929dc17fd2 | [
"Apache-2.0"
] | 2 | 2015-09-09T11:46:25.000Z | 2020-11-21T10:10:49.000Z | import json
from requests import Session
from lib.base import SaltAction
| 29.48 | 79 | 0.603799 | import json
from requests import Session
from lib.base import SaltAction
class SaltLocal(SaltAction):
def run(self, **kwargs):
'''
CLI Examples:
st2 run salt.runner matches='web*' module=test.ping
st2 run salt.client module=pkg.install \
kwargs='{"pkgs":["git","httpd"]}'
'''
# TODO: This is broken, fix it. I temporary disabled it to avoid pylint
# failure.
# Also "args" and "kwargs" action parameters are unused?
# self.generate_package(cmd=cmd)
request = self.generate_request()
request.prepare_body(json.dumps(self.data), None)
resp = Session().send(request, verify=True)
return resp.json()
| 0 | 639 | 23 |
4330771e8136c4a6df3587740060dc1e8e0a63cb | 876 | py | Python | Problemset/sum-lists-lcci/sum-lists-lcci.py | worldwonderer/algorithm | 083178b2d987de7f6020aceca869a353c0b4b1f3 | [
"MIT"
] | 1 | 2021-01-30T01:52:46.000Z | 2021-01-30T01:52:46.000Z | Problemset/sum-lists-lcci/sum-lists-lcci.py | worldwonderer/algorithm | 083178b2d987de7f6020aceca869a353c0b4b1f3 | [
"MIT"
] | 1 | 2021-12-15T14:54:06.000Z | 2021-12-15T14:54:06.000Z | Problemset/sum-lists-lcci/sum-lists-lcci.py | worldwonderer/algorithm | 083178b2d987de7f6020aceca869a353c0b4b1f3 | [
"MIT"
] | 2 | 2021-04-19T03:32:18.000Z | 2021-06-22T07:06:01.000Z |
# @Title: ้พ่กจๆฑๅ (Sum Lists LCCI)
# @Author: 18015528893
# @Date: 2021-02-12 21:23:09
# @Runtime: 60 ms
# @Memory: 14.8 MB
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
| 23.675676 | 68 | 0.44863 |
# @Title: ้พ่กจๆฑๅ (Sum Lists LCCI)
# @Author: 18015528893
# @Date: 2021-02-12 21:23:09
# @Runtime: 60 ms
# @Memory: 14.8 MB
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
dummy = ListNode(0)
carry = 0
p = dummy
while l1 or l2 or carry == 1:
total = 0
if l1:
total += l1.val
l1 = l1.next
if l2:
total += l2.val
l2 = l2.next
if carry == 1:
total += 1
carry = 0
if total >= 10:
carry = 1
total -= 10
p.next = ListNode(total)
p = p.next
return dummy.next
| 575 | -6 | 49 |
b8f15f38159c642a1546aa6bb014008958980e05 | 3,338 | py | Python | setup.py | bepec/pydvbcss | 72bec02d42582416390ea0379dc6b79da8cd0721 | [
"Apache-2.0"
] | 22 | 2015-03-15T17:24:47.000Z | 2021-12-23T01:42:24.000Z | setup.py | bepec/pydvbcss | 72bec02d42582416390ea0379dc6b79da8cd0721 | [
"Apache-2.0"
] | 15 | 2016-02-21T20:05:03.000Z | 2021-01-11T12:19:18.000Z | setup.py | bepec/pydvbcss | 72bec02d42582416390ea0379dc6b79da8cd0721 | [
"Apache-2.0"
] | 6 | 2015-03-30T11:41:20.000Z | 2020-12-16T11:16:00.000Z | #!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import os
import sys
import re
def find_packages(path, base="" ):
""" Find all packages in path """
packages = {}
if "__init__.py" in os.listdir(path):
packages[base] = path
for item in os.listdir(path):
itempath = os.path.join(path,item)
if os.path.isdir(itempath):
newbase = "%s.%s" % (base, item)
packages.update(find_packages(itempath, newbase))
return packages
packages = find_packages("dvbcss","dvbcss")
package_names = packages.keys()
otherArgs = {}
# if registering or uploading to PyPI: convert markdown readme to ReStructuredText
# using pandoc
lcase_args = [arg.lower() for arg in sys.argv]
if "register" in lcase_args or "upload" in lcase_args:
retval = os.system("pandoc --from=markdown --to=rst --output=tmp.README.rst README.md")
if retval==0:
otherArgs["long_description"] = open("tmp.README.rst").read()
else:
raise RuntimeError("Unable to convert documentation from Markdown to ReStructuredText. Is 'pandoc' command line tool installed?")
try:
VERSION, _ = re.match("^([.0-9a-zA-Z]+)-(.+)$", open("VERSION").read().replace("\n","").replace("\r","")).groups()
setup(
name = "pydvbcss",
version = VERSION,
author = "Matt Hammond (British Broadcasting Corporation)",
author_email = "matt.hammond@bbc.co.uk",
description = ("pydvbcss is a library implementing DVB \"CSS\" protocols for Companion Screen Synchronisation."),
license = "Apache 2.0",
keywords = "dvb companion synchronisation synchronization second-screen protocol",
url = "http://github.com/BBC/pydvbcss",
packages = package_names,
package_dir = packages,
install_requires = filter(len, [req.strip() for req in open("requirements.txt","r").read().splitlines()]),
test_suite = "test.test_all.testSuite",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking :: Time Synchronization",
],
**otherArgs
)
finally:
if "long_description" in otherArgs:
os.remove("tmp.README.rst")
| 36.282609 | 137 | 0.641402 | #!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
import os
import sys
import re
def find_packages(path, base="" ):
""" Find all packages in path """
packages = {}
if "__init__.py" in os.listdir(path):
packages[base] = path
for item in os.listdir(path):
itempath = os.path.join(path,item)
if os.path.isdir(itempath):
newbase = "%s.%s" % (base, item)
packages.update(find_packages(itempath, newbase))
return packages
packages = find_packages("dvbcss","dvbcss")
package_names = packages.keys()
otherArgs = {}
# if registering or uploading to PyPI: convert markdown readme to ReStructuredText
# using pandoc
lcase_args = [arg.lower() for arg in sys.argv]
if "register" in lcase_args or "upload" in lcase_args:
retval = os.system("pandoc --from=markdown --to=rst --output=tmp.README.rst README.md")
if retval==0:
otherArgs["long_description"] = open("tmp.README.rst").read()
else:
raise RuntimeError("Unable to convert documentation from Markdown to ReStructuredText. Is 'pandoc' command line tool installed?")
try:
VERSION, _ = re.match("^([.0-9a-zA-Z]+)-(.+)$", open("VERSION").read().replace("\n","").replace("\r","")).groups()
setup(
name = "pydvbcss",
version = VERSION,
author = "Matt Hammond (British Broadcasting Corporation)",
author_email = "matt.hammond@bbc.co.uk",
description = ("pydvbcss is a library implementing DVB \"CSS\" protocols for Companion Screen Synchronisation."),
license = "Apache 2.0",
keywords = "dvb companion synchronisation synchronization second-screen protocol",
url = "http://github.com/BBC/pydvbcss",
packages = package_names,
package_dir = packages,
install_requires = filter(len, [req.strip() for req in open("requirements.txt","r").read().splitlines()]),
test_suite = "test.test_all.testSuite",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Telecommunications Industry",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Topic :: Internet",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: System :: Networking :: Time Synchronization",
],
**otherArgs
)
finally:
if "long_description" in otherArgs:
os.remove("tmp.README.rst")
| 0 | 0 | 0 |
f8aaed1a0b6042ff699508a3d1757a5327405bad | 765 | py | Python | ChemicalReactions/ad/functions.py | temmy222/ReactionModeling | d397b4cdc77c1415369298cc75a49be3798048c1 | [
"Unlicense"
] | null | null | null | ChemicalReactions/ad/functions.py | temmy222/ReactionModeling | d397b4cdc77c1415369298cc75a49be3798048c1 | [
"Unlicense"
] | null | null | null | ChemicalReactions/ad/functions.py | temmy222/ReactionModeling | d397b4cdc77c1415369298cc75a49be3798048c1 | [
"Unlicense"
] | null | null | null | import numpy as np
from ad.forward_mode import Ad_array
| 20.131579 | 50 | 0.603922 | import numpy as np
from ad.forward_mode import Ad_array
def exp(var):
if isinstance(var, Ad_array):
val = np.exp(var.val)
der = var.diagvec_mul_jac(np.exp(var.val))
return Ad_array(val, der)
else:
return np.exp(var)
def log(var):
if not isinstance(var, Ad_array):
return np.log(var)
val = np.log(var.val)
der = var.diagvec_mul_jac(1 / var.val)
return Ad_array(val, der)
def sign(var):
if not isinstance(var, Ad_array):
return np.sign(var)
else:
return np.sign(var.val)
def abs(var):
if not isinstance(var, Ad_array):
return np.abs(var)
else:
val = np.abs(var.val)
jac = var.diagvec_mul_jac(sign(var))
return Ad_array(val, jac)
| 612 | 0 | 92 |
803202c2b799656ec37b8d405d8275a911b9dcaa | 1,242 | py | Python | benchmarking/preprocess_data.py | radujica/data-analysis-pipelines | 64a6e5613cb1ab2ba2eb2f763c2aa1e3bc5e0d3b | [
"MIT"
] | 5 | 2018-03-05T13:19:35.000Z | 2020-11-17T15:59:41.000Z | benchmarking/preprocess_data.py | radujica/data-analysis-pipelines | 64a6e5613cb1ab2ba2eb2f763c2aa1e3bc5e0d3b | [
"MIT"
] | 1 | 2021-06-01T22:27:44.000Z | 2021-06-01T22:27:44.000Z | benchmarking/preprocess_data.py | radujica/data-analysis-pipelines | 64a6e5613cb1ab2ba2eb2f763c2aa1e3bc5e0d3b | [
"MIT"
] | null | null | null | import argparse
import xarray as xr
"""
Join and convert the NETCDF3_CLASSIC files into a large NETCDF4 file (with full HDF5 API)
"""
parser = argparse.ArgumentParser(description='Combine data')
parser.add_argument('-i', '--input', required=True, help='Path to folder containing input files; also output folder')
args = parser.parse_args()
combine(args.input, ['tg', 'tg_stderr', 'pp', 'pp_stderr', 'rr', 'rr_stderr'], 'data1')
combine(args.input, ['tn', 'tn_stderr', 'tx', 'tx_stderr'], 'data2')
| 32.684211 | 117 | 0.649758 | import argparse
import xarray as xr
"""
Join and convert the NETCDF3_CLASSIC files into a large NETCDF4 file (with full HDF5 API)
"""
parser = argparse.ArgumentParser(description='Combine data')
parser.add_argument('-i', '--input', required=True, help='Path to folder containing input files; also output folder')
args = parser.parse_args()
def combine(path, files, output):
file_extension = '.nc'
# first dataset
result_ds = xr.open_dataset(path + files[0] + file_extension)
# merge all into result_ds
for file_name in files[1:]:
raw_ds = xr.open_dataset(path + file_name + file_extension)
# the _err files have the same variable name as the non _err files, so fix it
# e.g. tg -> tg_err
if files.index(file_name) % 2 == 1:
raw_ds.rename({files[files.index(file_name) - 1]: file_name}, inplace=True)
result_ds = xr.merge([result_ds, raw_ds], join='inner')
result_ds.to_netcdf(path=path + output + file_extension,
format='NETCDF4',
engine='netcdf4')
combine(args.input, ['tg', 'tg_stderr', 'pp', 'pp_stderr', 'rr', 'rr_stderr'], 'data1')
combine(args.input, ['tn', 'tn_stderr', 'tx', 'tx_stderr'], 'data2')
| 716 | 0 | 23 |
a20a16e7dac1f8725c3102e3697a4c1e29dc5316 | 593 | py | Python | mediawebapp/middleware.py | jbrownrs/issue-376-GDS-link | e8cce1b79f46b98a7d24b2da5eca48430fd904a3 | [
"MIT"
] | 5 | 2019-01-07T17:22:34.000Z | 2020-10-08T15:03:12.000Z | mediawebapp/middleware.py | jbrownrs/issue-376-GDS-link | e8cce1b79f46b98a7d24b2da5eca48430fd904a3 | [
"MIT"
] | 203 | 2017-12-14T09:51:56.000Z | 2018-08-28T14:04:08.000Z | mediawebapp/middleware.py | jbrownrs/issue-376-GDS-link | e8cce1b79f46b98a7d24b2da5eca48430fd904a3 | [
"MIT"
] | 5 | 2018-10-22T11:36:01.000Z | 2020-07-20T05:47:49.000Z | from automationlookup.models import UserLookup
from django.conf import settings
| 25.782609 | 62 | 0.650927 | from automationlookup.models import UserLookup
from django.conf import settings
def user_lookup_middleware(get_response):
def middleware(request):
"""
This middleware ensures that a UserLookup model exists
to map an authenticated user to lookup
"""
if not request.user.is_anonymous:
UserLookup.objects.get_or_create(
user=request.user,
scheme=settings.LOOKUP_PEOPLE_ID_SCHEME,
identifier=request.user.username
)
return get_response(request)
return middleware
| 489 | 0 | 23 |
51914992ce4c318882cbb1ecdbab16b2f838dfe3 | 5,063 | py | Python | tools/Test_ATL_HICO_wo_obj.py | abreza/HOI-CL | c5be517bb26eac73ef88a39d6ec9e564c3379714 | [
"MIT"
] | 40 | 2021-04-09T17:53:08.000Z | 2022-03-30T02:38:10.000Z | tools/Test_ATL_HICO_wo_obj.py | abreza/HOI-CL | c5be517bb26eac73ef88a39d6ec9e564c3379714 | [
"MIT"
] | 21 | 2021-04-09T19:05:47.000Z | 2022-01-31T23:17:16.000Z | tools/Test_ATL_HICO_wo_obj.py | abreza/HOI-CL | c5be517bb26eac73ef88a39d6ec9e564c3379714 | [
"MIT"
] | 8 | 2021-05-30T12:37:00.000Z | 2022-03-14T03:13:57.000Z | # --------------------------------------------------------
# Tensorflow iCAN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ['DATASET'] = 'HICO'
os.environ["KMP_BLOCKTIME"] = str(0)
os.environ["KMP_SETTINGS"] = str(1)
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
os.environ["OMP_NUM_THREADS"] = str(8)
import tensorflow as tf
import argparse
from ult.config import cfg
from models.test_HICO import obtain_test_dataset_wo_obj, test_net_data_api_wo_obj
if __name__ == '__main__':
args = parse_args()
print(args)
# test detections result
weight = cfg.ROOT_DIR + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
import os
if not os.path.exists(weight + '.index'):
weight = cfg.LOCAL_DATA + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
print('weight:', weight)
print('Human thres = ' + str(args.human_thres) + ', Object thres = ' + str(args.object_thres) + ', iter = ' + str(
args.iteration) + ', path = ' + weight)
output_file = cfg.LOCAL_DATA + '/Results/' + str(args.iteration) + '_' + args.model + '_tin.pkl'
if os.path.exists(output_file):
os.remove(output_file)
# init session
HICO_dir = cfg.ROOT_DIR + '/Results/HICO/' + str(args.iteration) + '_' + args.model + '/'
tfconfig = tf.ConfigProto(device_count={"CPU": 12},
inter_op_parallelism_threads=8,
intra_op_parallelism_threads=8,
allow_soft_placement=True)
# init session
# tfconfig = tf.ConfigProto(allow_soft_placement=True)
# tfconfig.gpu_options.allow_growth = True
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
# net = ResNet50(model_name=args.model)
# net.create_architecture(False)
#
#
# saver = tf.train.Saver()
# saver.restore(sess, weight)
#
# print('Pre-trained weights loaded.')
#
# test_net(sess, net, Test_RCNN, output_file, args.object_thres, args.human_thres)
# sess.close()
# Generate_HICO_detection(output_file, HICO_dir)
if args.model.__contains__('res101'):
os.environ['DATASET'] = 'HICO_res101'
from networks.HOI import HOI
net = HOI(model_name=args.model)
else:
from networks.HOI import HOI
net = HOI(model_name=args.model)
stride = 200
image, blobs, image_id = obtain_test_dataset_wo_obj(args.object_thres, args.human_thres, test_type=args.test_type,
has_human_threhold=not args.not_h_threhold,
stride=stride)
image = image[0:1]
print(blobs, image)
tmp_labels = tf.one_hot(tf.reshape(tf.cast(blobs['O_cls'], tf.int32), shape=[-1, ]), 80, dtype=tf.float32)
tmp_ho_class_from_obj = tf.cast(tf.matmul(tmp_labels, net.obj_to_HO_matrix) > 0, tf.float32)
# action_ho = blobs['O_cls']
net.set_ph(image, image_id, num_pos=blobs['H_num'], Human_augmented=blobs['H_boxes'],
Object_augmented=blobs['O_boxes'],
action_HO=None, sp=blobs['sp'],)
# net.set_add_ph()
# net.init_verbs_objs_cls()
net.create_architecture(False)
saver = tf.train.Saver()
print(weight)
saver.restore(sess, weight)
print('Pre-trained weights loaded.')
test_net_data_api_wo_obj(sess, net, output_file, blobs['H_boxes'][:, 1:], blobs['O_boxes'][:, 1:],
blobs['O_cls'], blobs['H_score'], blobs['O_score'], None, image_id, args.debug)
sess.close()
| 38.067669 | 118 | 0.595892 | # --------------------------------------------------------
# Tensorflow iCAN
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
os.environ['DATASET'] = 'HICO'
os.environ["KMP_BLOCKTIME"] = str(0)
os.environ["KMP_SETTINGS"] = str(1)
os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0"
os.environ["OMP_NUM_THREADS"] = str(8)
import tensorflow as tf
import argparse
from ult.config import cfg
from models.test_HICO import obtain_test_dataset_wo_obj, test_net_data_api_wo_obj
def parse_args():
parser = argparse.ArgumentParser(description='Test an iCAN on HICO')
parser.add_argument('--num_iteration', dest='iteration',
help='Specify which weight to load',
default=1800000, type=int)
parser.add_argument('--model', dest='model',
help='Select model',
default='iCAN_ResNet50_HICO', type=str)
parser.add_argument('--object_thres', dest='object_thres',
help='Object threshold',
default=0.1, type=float)
parser.add_argument('--human_thres', dest='human_thres',
help='Human threshold',
default=0.3, type=float)
parser.add_argument('--debug', dest='debug',
help='Human threshold',
default=0, type=int)
parser.add_argument('--type', dest='test_type',
help='Human threshold',
default='vcl', type=str)
parser.add_argument('--not_h_threhold', dest='not_h_threhold',
help='not_h_threhold',
action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print(args)
# test detections result
weight = cfg.ROOT_DIR + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
import os
if not os.path.exists(weight + '.index'):
weight = cfg.LOCAL_DATA + '/Weights/' + args.model + '/HOI_iter_' + str(args.iteration) + '.ckpt'
print('weight:', weight)
print('Human thres = ' + str(args.human_thres) + ', Object thres = ' + str(args.object_thres) + ', iter = ' + str(
args.iteration) + ', path = ' + weight)
output_file = cfg.LOCAL_DATA + '/Results/' + str(args.iteration) + '_' + args.model + '_tin.pkl'
if os.path.exists(output_file):
os.remove(output_file)
# init session
HICO_dir = cfg.ROOT_DIR + '/Results/HICO/' + str(args.iteration) + '_' + args.model + '/'
tfconfig = tf.ConfigProto(device_count={"CPU": 12},
inter_op_parallelism_threads=8,
intra_op_parallelism_threads=8,
allow_soft_placement=True)
# init session
# tfconfig = tf.ConfigProto(allow_soft_placement=True)
# tfconfig.gpu_options.allow_growth = True
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
# net = ResNet50(model_name=args.model)
# net.create_architecture(False)
#
#
# saver = tf.train.Saver()
# saver.restore(sess, weight)
#
# print('Pre-trained weights loaded.')
#
# test_net(sess, net, Test_RCNN, output_file, args.object_thres, args.human_thres)
# sess.close()
# Generate_HICO_detection(output_file, HICO_dir)
if args.model.__contains__('res101'):
os.environ['DATASET'] = 'HICO_res101'
from networks.HOI import HOI
net = HOI(model_name=args.model)
else:
from networks.HOI import HOI
net = HOI(model_name=args.model)
stride = 200
image, blobs, image_id = obtain_test_dataset_wo_obj(args.object_thres, args.human_thres, test_type=args.test_type,
has_human_threhold=not args.not_h_threhold,
stride=stride)
image = image[0:1]
print(blobs, image)
tmp_labels = tf.one_hot(tf.reshape(tf.cast(blobs['O_cls'], tf.int32), shape=[-1, ]), 80, dtype=tf.float32)
tmp_ho_class_from_obj = tf.cast(tf.matmul(tmp_labels, net.obj_to_HO_matrix) > 0, tf.float32)
# action_ho = blobs['O_cls']
net.set_ph(image, image_id, num_pos=blobs['H_num'], Human_augmented=blobs['H_boxes'],
Object_augmented=blobs['O_boxes'],
action_HO=None, sp=blobs['sp'],)
# net.set_add_ph()
# net.init_verbs_objs_cls()
net.create_architecture(False)
saver = tf.train.Saver()
print(weight)
saver.restore(sess, weight)
print('Pre-trained weights loaded.')
test_net_data_api_wo_obj(sess, net, output_file, blobs['H_boxes'][:, 1:], blobs['O_boxes'][:, 1:],
blobs['O_cls'], blobs['H_score'], blobs['O_score'], None, image_id, args.debug)
sess.close()
| 1,216 | 0 | 23 |
3140f5e66df241402f57b679a55206a4ea912d16 | 1,414 | py | Python | migrations/0026_auto_20160928_1457.py | tobiasbartel/servicium-application_manager | f9dc26b3d7c0c28d42b347fdd26976a908bf95c0 | [
"MIT"
] | null | null | null | migrations/0026_auto_20160928_1457.py | tobiasbartel/servicium-application_manager | f9dc26b3d7c0c28d42b347fdd26976a908bf95c0 | [
"MIT"
] | null | null | null | migrations/0026_auto_20160928_1457.py | tobiasbartel/servicium-application_manager | f9dc26b3d7c0c28d42b347fdd26976a908bf95c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-28 14:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 35.35 | 146 | 0.630127 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-28 14:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('servicecatalog', '0025_auto_20160928_1433'),
]
operations = [
migrations.RemoveField(
model_name='module',
name='connected_to_module',
),
migrations.AddField(
model_name='modulewritestomodule',
name='access_direction',
field=models.CharField(choices=[('r', 'Read'), ('w', 'Write'), ('rw', 'Read/Write')], default='rw', max_length=2),
),
migrations.AlterField(
model_name='modulewritestomodule',
name='from_module',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='module_from_relation', to='servicecatalog.Module'),
),
migrations.AlterField(
model_name='modulewritestomodule',
name='to_module',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='module_to_relation', to='servicecatalog.Module'),
),
migrations.AlterUniqueTogether(
name='modulewritestomodule',
unique_together=set([('from_module', 'to_module', 'access_direction')]),
),
]
| 0 | 1,202 | 23 |
2e6ed07b3e7f699d3dcbb48a3aca40686c1c15f6 | 208 | py | Python | Libreria/constantes.py | Sannso/GameCG2 | 426e22541bf670e0767395c30f15d1b6ad6183f3 | [
"CC0-1.0"
] | null | null | null | Libreria/constantes.py | Sannso/GameCG2 | 426e22541bf670e0767395c30f15d1b6ad6183f3 | [
"CC0-1.0"
] | null | null | null | Libreria/constantes.py | Sannso/GameCG2 | 426e22541bf670e0767395c30f15d1b6ad6183f3 | [
"CC0-1.0"
] | null | null | null | #850ร480
ANCHO=850 #x4 = 3400
ALTO=480 #x3 = 1440
VERDE=[0,255,0]
ROJO=[255,0,0]
AZUL=[0,0,255]
AMARILLO=[255,255,0]
AZUL_2=[0,255,255]
NEGRO=[0,0,0]
BLANCO=[255,255,255]
GRIS=[180,180,180]
WIN = False | 14.857143 | 22 | 0.639423 | #850ร480
ANCHO=850 #x4 = 3400
ALTO=480 #x3 = 1440
VERDE=[0,255,0]
ROJO=[255,0,0]
AZUL=[0,0,255]
AMARILLO=[255,255,0]
AZUL_2=[0,255,255]
NEGRO=[0,0,0]
BLANCO=[255,255,255]
GRIS=[180,180,180]
WIN = False | 0 | 0 | 0 |
7f6ccce8a07d4cda108330f4696011e12061b900 | 1,938 | py | Python | tests/graph/test_edmonds_karp.py | niemmi/algolib | 81a013af5ae1ca1e8cf8d3f2e2f1b4a9bce6ead8 | [
"BSD-3-Clause"
] | null | null | null | tests/graph/test_edmonds_karp.py | niemmi/algolib | 81a013af5ae1ca1e8cf8d3f2e2f1b4a9bce6ead8 | [
"BSD-3-Clause"
] | null | null | null | tests/graph/test_edmonds_karp.py | niemmi/algolib | 81a013af5ae1ca1e8cf8d3f2e2f1b4a9bce6ead8 | [
"BSD-3-Clause"
] | null | null | null | from .context import Directed, Undirected, edmonds_karp
from unittest import TestCase
# Figure 6.13 from The Algorithm Design Manual
CASES = [
{
'class': Undirected,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 0,
'to': 6,
'expected': 7
},
{
'class': Undirected,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 6,
'to': 0,
'expected': 7
},
{
'class': Directed,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 0,
'to': 6,
'expected': 7
},
{
'class': Directed,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 6,
'to': 0,
'expected': 0
}
]
| 21.775281 | 78 | 0.301858 | from .context import Directed, Undirected, edmonds_karp
from unittest import TestCase
# Figure 6.13 from The Algorithm Design Manual
CASES = [
{
'class': Undirected,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 0,
'to': 6,
'expected': 7
},
{
'class': Undirected,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 6,
'to': 0,
'expected': 7
},
{
'class': Directed,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 0,
'to': 6,
'expected': 7
},
{
'class': Directed,
'edges': [
[0, 1, 5],
[0, 4, 12],
[1, 2, 7],
[1, 3, 9],
[2, 3, 3],
[2, 6, 5],
[3, 5, 3],
[3, 4, 4],
[4, 5, 7],
[5, 6, 2]
],
'from': 6,
'to': 0,
'expected': 0
}
]
class TestEdmondsKarp(TestCase):
def test_edmonds_karp(self):
for case in CASES:
graph = case['class']()
for x, y, c in case['edges']:
graph.insert_edge(x, y, capacity=c)
result_graph, flow = edmonds_karp(graph, case['from'], case['to'])
self.assertEqual(case['expected'], flow)
| 296 | 11 | 49 |
d12c4d0348bebfabc6afc769e18af8912ca70a17 | 537 | py | Python | l_04_list_and_dictionaries/dictionaries/ex_10_shellbound.py | VasAtanasov/SoftUni-Python-Fundamentals | 471d0537dd6e5c8b61ede92b7673c0d67e2964fd | [
"MIT"
] | 1 | 2019-06-05T11:16:08.000Z | 2019-06-05T11:16:08.000Z | l_04_list_and_dictionaries/dictionaries/ex_10_shellbound.py | VasAtanasov/SoftUni-Python-Fundamentals | 471d0537dd6e5c8b61ede92b7673c0d67e2964fd | [
"MIT"
] | null | null | null | l_04_list_and_dictionaries/dictionaries/ex_10_shellbound.py | VasAtanasov/SoftUni-Python-Fundamentals | 471d0537dd6e5c8b61ede92b7673c0d67e2964fd | [
"MIT"
] | null | null | null | regions = {}
while True:
in_line = input()
if 'Aggregate' == in_line:
break
[region, shell] = filter(None, in_line.split(" "))
if region not in regions:
regions[region] = []
if int(shell) not in regions[region]:
regions[region].append(int(shell))
print(("\n".join(
[f'{region} -> {", ".join(map(str, shells))} ({calculate_giant_shell(shells)})' for region, shells in
regions.items()])))
| 22.375 | 105 | 0.603352 | regions = {}
def calculate_giant_shell(shells):
return sum(shells) - (sum(shells) // len(shells))
while True:
in_line = input()
if 'Aggregate' == in_line:
break
[region, shell] = filter(None, in_line.split(" "))
if region not in regions:
regions[region] = []
if int(shell) not in regions[region]:
regions[region].append(int(shell))
print(("\n".join(
[f'{region} -> {", ".join(map(str, shells))} ({calculate_giant_shell(shells)})' for region, shells in
regions.items()])))
| 67 | 0 | 23 |
8ff68975222c6c7991c82b9ee32f667d41fae4db | 5,624 | py | Python | manage_user.py | JacquesMironneau/Portfolio | 5d4c1c428c40b2885280ccdc05d7a7a3ce302e6a | [
"CNRI-Python"
] | null | null | null | manage_user.py | JacquesMironneau/Portfolio | 5d4c1c428c40b2885280ccdc05d7a7a3ce302e6a | [
"CNRI-Python"
] | null | null | null | manage_user.py | JacquesMironneau/Portfolio | 5d4c1c428c40b2885280ccdc05d7a7a3ce302e6a | [
"CNRI-Python"
] | null | null | null | from models import db, User
import bcrypt
import readline
# global variables that contains several types of commands
create_commands = [ "create", "create_user", "createuser", "adduser", "add" ]
update_commands = [ "update", "update_user", "updateuser", "passwd" ]
delete_commands = [ "delete", "delete_user", "deluser", "del" ]
list_commands = [ "list", "list_users", "listusers", "ls" ]
help_commands = [ "help", "?" ]
exit_commands = [ "quit", "exit", "bye" ]
commands = create_commands + update_commands + delete_commands + list_commands + help_commands + exit_commands
# the prompt or the ps1 variable (reference to the $PS1 variable of a shell in *NIX)
ps4 = "pef_db $ "
readline.set_completer(completer)
readline.parse_and_bind("tab: complete")
def process_command(command:str):
"""
Process the commands that the user enters
"""
try:
# we parse the command
command = command.split()
if command[0] in create_commands:
create_user(command[1], command[2])
elif command[0] in update_commands:
update_user(command[1])
elif command[0] in delete_commands:
delete_user(command[1])
elif command[0] in list_commands:
try:
if command[1] in ["-v","--verbose"]:
list_users(True)
else:
print("No valid argument passed going to default")
list_users()
except IndexError:
list_users()
elif command[0] in help_commands:
usage()
elif command[0] in exit_commands:
quit()
else:
print("No valid command entered type ? or help to find more informations")
except IndexError:
print("")
def create_user(name:str, password):
"""
Create a user with a given username and password in the database
:param str name: the username
:param str password: the password
"""
if User.query.get(name):
print(f"Sorry the user '{name}' already exists in database, please consider using another name")
else:
u = User(name,bcrypt.hashpw(password.encode('utf-8'),bcrypt.gensalt()))
print(f"\nNew User:\nName = {u.name}\nPassword = {password}\nIs that correct ? [Y/n]")
if input(">> ") in ['', 'Y', 'y', 'yes']:
db.session.add(u)
db.session.commit()
print(f"User {u.name} added")
print("")
def update_user(name:str):
"""
Change the password of a user, it updates the user password in the database
:param str name: the name of the user we want to change the password
"""
if User.query.get(name):
u = User.query.get(name)
new_pass = input(f"Enter a new password for the user '{name}': ")
new_pass_confirm = input("Confirm the new password: ")
if new_pass == new_pass_confirm:
u.password = bcrypt.hashpw(new_pass.encode('utf-8'), bcrypt.gensalt())
db.session.commit()
print(f"Password for user '{name}' have been changed successfully")
else:
print("Passwords don't match\nCancelling password update")
else:
print(f"Cannot find the user '{name}'")
print("")
def delete_user(name:str):
"""
Delete a user from database
:param str name: the name of teh user we want to delete
"""
if not User.query.get(name):
print(f"Sorry the user '{name}' cannot be found")
else:
u = User.query.get(name)
print(f"\nDeleting user:\nName = {u.name}\nAre you sure ? [Y/n]")
if input(">> ") in ['', 'Y', 'y', 'yes']:
db.session.delete(u)
db.session.commit()
print(f"User {u.name} deleted")
print("")
def list_users(complete=False):
"""
Give a list of all the users stored in the database
:param boolean complete: whether the output of the command should be verbose or not
"""
users = User.query.all()
if len(users) == 0:
print("No users in database yet")
else:
if not complete:
for user in users:
print(user.name)
else:
for user in users:
print(f"{user.name} : {user.password}")
print("")
def usage():
"""
Shows how to use the cli
"""
print("Here is a list of available commands:")
print(" create / createuser / create_user / adduser / add [username] [password] : Add a new user in the database")
print(" update / updateuser / update_user / passwd [username] : Change the password of the user $username")
print(" delete / deleteuser / delete_user / deluser / del [username] : Delete the user $username from the database")
print(" list / list_users / ls [-v, --verbose] : lists all the users in the database")
print(" help / ? : show this help screen")
print(" quit / bye / exit : Exits the program\n")
def quit():
"""
Quit the cli properly
"""
print("Bye!\n")
exit()
# main loop, keyboardInterrupt behaves like the quit() command
while True:
try:
command = input(ps4)
process_command(command)
except KeyboardInterrupt:
quit()
| 33.879518 | 135 | 0.57468 | from models import db, User
import bcrypt
import readline
# global variables that contains several types of commands
create_commands = [ "create", "create_user", "createuser", "adduser", "add" ]
update_commands = [ "update", "update_user", "updateuser", "passwd" ]
delete_commands = [ "delete", "delete_user", "deluser", "del" ]
list_commands = [ "list", "list_users", "listusers", "ls" ]
help_commands = [ "help", "?" ]
exit_commands = [ "quit", "exit", "bye" ]
commands = create_commands + update_commands + delete_commands + list_commands + help_commands + exit_commands
# the prompt or the ps1 variable (reference to the $PS1 variable of a shell in *NIX)
ps4 = "pef_db $ "
def completer(text,state):
cmds = [c for c in commands if c.startswith(text)]
try:
return cmds[state]
except IndexError:
return None
readline.set_completer(completer)
readline.parse_and_bind("tab: complete")
def process_command(command:str):
"""
Process the commands that the user enters
"""
try:
# we parse the command
command = command.split()
if command[0] in create_commands:
create_user(command[1], command[2])
elif command[0] in update_commands:
update_user(command[1])
elif command[0] in delete_commands:
delete_user(command[1])
elif command[0] in list_commands:
try:
if command[1] in ["-v","--verbose"]:
list_users(True)
else:
print("No valid argument passed going to default")
list_users()
except IndexError:
list_users()
elif command[0] in help_commands:
usage()
elif command[0] in exit_commands:
quit()
else:
print("No valid command entered type ? or help to find more informations")
except IndexError:
print("")
def create_user(name:str, password):
"""
Create a user with a given username and password in the database
:param str name: the username
:param str password: the password
"""
if User.query.get(name):
print(f"Sorry the user '{name}' already exists in database, please consider using another name")
else:
u = User(name,bcrypt.hashpw(password.encode('utf-8'),bcrypt.gensalt()))
print(f"\nNew User:\nName = {u.name}\nPassword = {password}\nIs that correct ? [Y/n]")
if input(">> ") in ['', 'Y', 'y', 'yes']:
db.session.add(u)
db.session.commit()
print(f"User {u.name} added")
print("")
def update_user(name:str):
"""
Change the password of a user, it updates the user password in the database
:param str name: the name of the user we want to change the password
"""
if User.query.get(name):
u = User.query.get(name)
new_pass = input(f"Enter a new password for the user '{name}': ")
new_pass_confirm = input("Confirm the new password: ")
if new_pass == new_pass_confirm:
u.password = bcrypt.hashpw(new_pass.encode('utf-8'), bcrypt.gensalt())
db.session.commit()
print(f"Password for user '{name}' have been changed successfully")
else:
print("Passwords don't match\nCancelling password update")
else:
print(f"Cannot find the user '{name}'")
print("")
def delete_user(name:str):
"""
Delete a user from database
:param str name: the name of teh user we want to delete
"""
if not User.query.get(name):
print(f"Sorry the user '{name}' cannot be found")
else:
u = User.query.get(name)
print(f"\nDeleting user:\nName = {u.name}\nAre you sure ? [Y/n]")
if input(">> ") in ['', 'Y', 'y', 'yes']:
db.session.delete(u)
db.session.commit()
print(f"User {u.name} deleted")
print("")
def list_users(complete=False):
"""
Give a list of all the users stored in the database
:param boolean complete: whether the output of the command should be verbose or not
"""
users = User.query.all()
if len(users) == 0:
print("No users in database yet")
else:
if not complete:
for user in users:
print(user.name)
else:
for user in users:
print(f"{user.name} : {user.password}")
print("")
def usage():
"""
Shows how to use the cli
"""
print("Here is a list of available commands:")
print(" create / createuser / create_user / adduser / add [username] [password] : Add a new user in the database")
print(" update / updateuser / update_user / passwd [username] : Change the password of the user $username")
print(" delete / deleteuser / delete_user / deluser / del [username] : Delete the user $username from the database")
print(" list / list_users / ls [-v, --verbose] : lists all the users in the database")
print(" help / ? : show this help screen")
print(" quit / bye / exit : Exits the program\n")
def quit():
"""
Quit the cli properly
"""
print("Bye!\n")
exit()
# main loop, keyboardInterrupt behaves like the quit() command
while True:
try:
command = input(ps4)
process_command(command)
except KeyboardInterrupt:
quit()
| 139 | 0 | 23 |
f5f5752e18cd6726354f2512e52ca5789f0645e1 | 951 | py | Python | pulumi/aws/kic-pulumi-utils/kic_util/external_process.py | fossabot/kic-reference-architectures | 0a6ebe4a43f9b21965d1456159f281c9cf414102 | [
"Apache-2.0"
] | 72 | 2021-06-15T18:23:14.000Z | 2022-03-30T12:39:15.000Z | pulumi/aws/kic-pulumi-utils/kic_util/external_process.py | fossabot/kic-reference-architectures | 0a6ebe4a43f9b21965d1456159f281c9cf414102 | [
"Apache-2.0"
] | 71 | 2021-06-14T22:45:20.000Z | 2022-03-25T18:52:40.000Z | pulumi/aws/kic-pulumi-utils/kic_util/external_process.py | fossabot/kic-reference-architectures | 0a6ebe4a43f9b21965d1456159f281c9cf414102 | [
"Apache-2.0"
] | 32 | 2021-06-14T22:17:24.000Z | 2022-03-29T11:41:18.000Z | import subprocess
from typing import Optional, Dict
class ExternalProcessExecError(RuntimeError):
"""Error when an external process fails to run successfully"""
def run(cmd: str, suppress_error=False, env: Optional[Dict[str, str]] = None) -> (str, str):
"""Runs an external command and returns back its stdout and stderr"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
(res, err) = proc.communicate()
res = res.decode(encoding="utf-8", errors="ignore")
err = err.decode(encoding="utf-8", errors="ignore")
if proc.returncode != 0 and not suppress_error:
msg = f"Failed to execute external process: {cmd}\n{res}\nError: {err}"
raise ExternalProcessExecError(msg, cmd)
return res, err
| 36.576923 | 101 | 0.679285 | import subprocess
from typing import Optional, Dict
class ExternalProcessExecError(RuntimeError):
"""Error when an external process fails to run successfully"""
def __init__(self, cmd: str, message: str):
self.cmd = cmd
self.message = message
super().__init__(f"{message} when running: {cmd}")
def run(cmd: str, suppress_error=False, env: Optional[Dict[str, str]] = None) -> (str, str):
"""Runs an external command and returns back its stdout and stderr"""
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env)
(res, err) = proc.communicate()
res = res.decode(encoding="utf-8", errors="ignore")
err = err.decode(encoding="utf-8", errors="ignore")
if proc.returncode != 0 and not suppress_error:
msg = f"Failed to execute external process: {cmd}\n{res}\nError: {err}"
raise ExternalProcessExecError(msg, cmd)
return res, err
| 135 | 0 | 26 |
16de070614d19372a80eaa0735c54655b9474bee | 526 | py | Python | sympy/core/interval.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
] | 1 | 2016-05-08T17:54:57.000Z | 2016-05-08T17:54:57.000Z | sympy/core/interval.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
] | null | null | null | sympy/core/interval.py | certik/sympy-oldcore | eb5bd061c309d88cdfb502bfd5df511b30368458 | [
"BSD-3-Clause"
] | null | null | null |
from basic import Basic
| 20.230769 | 60 | 0.547529 |
from basic import Basic
class Interval(Basic):
def __new__(cls, start, end, **assumptions):
start = Basic.sympify(start)
end = Basic.sympify(end)
return Basic.__new__(cls, start, end, **assumptions)
@property
def start(self):
return self._args[0]
@property
def end(self):
return self._args[1]
def tostr(self, level=0):
r = '[%s, %s]' % (self.start, self.end)
if self.precedence <= level:
r = '(%s)' % (r)
return r
| 335 | 137 | 23 |
4e652f29e8860d5a86bd8a22829c65a88c45f18c | 2,877 | py | Python | code/SelectPlots.py | CnrLwlss/fibreAnnotation | 1fcebcd7a4035e2996a04a0aa228c527ec49ccad | [
"MIT"
] | null | null | null | code/SelectPlots.py | CnrLwlss/fibreAnnotation | 1fcebcd7a4035e2996a04a0aa228c527ec49ccad | [
"MIT"
] | null | null | null | code/SelectPlots.py | CnrLwlss/fibreAnnotation | 1fcebcd7a4035e2996a04a0aa228c527ec49ccad | [
"MIT"
] | null | null | null | import pandas as pd
pd.set_option('display.max_columns', None)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import urllib.request
import os.path
from Selecting import *
# Download data
url = "https://raw.githubusercontent.com/CnrLwlss/Warren_2019/master/shiny/dat.txt"
outfile = "dat_py.txt"
mitoprot = "VDAC1"
if not os.path.isfile(outfile):
urllib.request.urlretrieve(url,outfile)
data = pd.read_csv(outfile,sep="\t")
# Drop unwanted columns
chans = data.channel.unique()
chans = [c for c in chans if ("LOG_" not in c) and ("MED_" not in c)]
data = data[data["channel"].isin(chans)]
# Group data by subject type
subjids = data.patient_id.unique()
subjids.sort()
patids = [id for id in subjids if "P" in id]
ctrlids = [id for id in subjids if "C" in id]
# Long to wide
wide = data.pivot_table(index=["cell_id","id","patient_id","subject_group"],values="value",columns="channel").reset_index()
cwide = wide[wide["patient_id"].isin(ctrlids)]
# Plotting options
alpha = 0.2
def_col = (1,0,0,alpha)
norm_col = (0,1,0,alpha)
pos_col = (0,0,1,alpha)
# Manually classify fibres by each protein
prots = ['NDUFB8', 'GRIM19', 'SDHA', 'UqCRC2', 'COX4+4L2', 'MTCO1', 'OSCP']
for prot in prots:
cols = [(0,0,0,alpha) for pt in wide[mitoprot]]
fig,ax = plt.subplots(num = "Select fibres below controls")
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
pts = plt.scatter(np.log(wide[mitoprot]),np.log(wide[prot]),color=cols,edgecolors="none")
cnts = sns.kdeplot(x=np.log(cwide[mitoprot]),y=np.log(cwide[prot]),levels=[0.1,0.25,0.5,0.75,0.95],color="yellow")
ax.set_xlabel("log("+mitoprot+")")
ax.set_ylabel("log("+prot+")")
sel_def = SelectFromCollection(ax,pts,colour_sel=def_col)
plt.show()
cols = [def_col if i in sel_def.ind else col for i,col in enumerate(cols)]
fig,ax = plt.subplots(num = "Select fibres above controls")
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
pts = plt.scatter(wide[mitoprot],wide[prot],color=cols,edgecolors="none")
cnts = sns.kdeplot(x=cwide[mitoprot],y=cwide[prot],levels=[0.1,0.25,0.5,0.75,0.95],color="yellow")
ax.set_xlabel("log("+mitoprot+")")
ax.set_ylabel("log("+prot+")")
sel_pos = SelectFromCollection(ax,pts,colour_sel=pos_col)
plt.show()
wide[prot+"_down"] = [i in sel_def.ind for i,val in enumerate(wide[prot])]
wide[prot+"_up"] = [i in sel_pos.ind for i,val in enumerate(wide[prot])]
wide.to_csv("ClassifiedWide.csv")
# Summarise classifications
clcols = ["patient_id","subject_group"]+[col for col in wide.columns if ("_up" in col) or ("_down" in col)]
cl = wide[clcols]
pid = cl.groupby("patient_id").mean()
sub = cl.groupby("subject_group").mean()
pid.to_csv("SummaryByPatient.csv", float_format='%.2f')
sub.to_csv("SummaryByType.csv", float_format='%.2f')
| 35.518519 | 123 | 0.697602 | import pandas as pd
pd.set_option('display.max_columns', None)
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import urllib.request
import os.path
from Selecting import *
# Download data
url = "https://raw.githubusercontent.com/CnrLwlss/Warren_2019/master/shiny/dat.txt"
outfile = "dat_py.txt"
mitoprot = "VDAC1"
if not os.path.isfile(outfile):
urllib.request.urlretrieve(url,outfile)
data = pd.read_csv(outfile,sep="\t")
# Drop unwanted columns
chans = data.channel.unique()
chans = [c for c in chans if ("LOG_" not in c) and ("MED_" not in c)]
data = data[data["channel"].isin(chans)]
# Group data by subject type
subjids = data.patient_id.unique()
subjids.sort()
patids = [id for id in subjids if "P" in id]
ctrlids = [id for id in subjids if "C" in id]
# Long to wide
wide = data.pivot_table(index=["cell_id","id","patient_id","subject_group"],values="value",columns="channel").reset_index()
cwide = wide[wide["patient_id"].isin(ctrlids)]
# Plotting options
alpha = 0.2
def_col = (1,0,0,alpha)
norm_col = (0,1,0,alpha)
pos_col = (0,0,1,alpha)
# Manually classify fibres by each protein
prots = ['NDUFB8', 'GRIM19', 'SDHA', 'UqCRC2', 'COX4+4L2', 'MTCO1', 'OSCP']
for prot in prots:
cols = [(0,0,0,alpha) for pt in wide[mitoprot]]
fig,ax = plt.subplots(num = "Select fibres below controls")
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
pts = plt.scatter(np.log(wide[mitoprot]),np.log(wide[prot]),color=cols,edgecolors="none")
cnts = sns.kdeplot(x=np.log(cwide[mitoprot]),y=np.log(cwide[prot]),levels=[0.1,0.25,0.5,0.75,0.95],color="yellow")
ax.set_xlabel("log("+mitoprot+")")
ax.set_ylabel("log("+prot+")")
sel_def = SelectFromCollection(ax,pts,colour_sel=def_col)
plt.show()
cols = [def_col if i in sel_def.ind else col for i,col in enumerate(cols)]
fig,ax = plt.subplots(num = "Select fibres above controls")
manager = plt.get_current_fig_manager()
manager.window.showMaximized()
pts = plt.scatter(wide[mitoprot],wide[prot],color=cols,edgecolors="none")
cnts = sns.kdeplot(x=cwide[mitoprot],y=cwide[prot],levels=[0.1,0.25,0.5,0.75,0.95],color="yellow")
ax.set_xlabel("log("+mitoprot+")")
ax.set_ylabel("log("+prot+")")
sel_pos = SelectFromCollection(ax,pts,colour_sel=pos_col)
plt.show()
wide[prot+"_down"] = [i in sel_def.ind for i,val in enumerate(wide[prot])]
wide[prot+"_up"] = [i in sel_pos.ind for i,val in enumerate(wide[prot])]
wide.to_csv("ClassifiedWide.csv")
# Summarise classifications
clcols = ["patient_id","subject_group"]+[col for col in wide.columns if ("_up" in col) or ("_down" in col)]
cl = wide[clcols]
pid = cl.groupby("patient_id").mean()
sub = cl.groupby("subject_group").mean()
pid.to_csv("SummaryByPatient.csv", float_format='%.2f')
sub.to_csv("SummaryByType.csv", float_format='%.2f')
| 0 | 0 | 0 |
38a00872e8591388575e8ad092f7061f22387e6a | 3,876 | py | Python | mirdata/datasets/gtzan_genre.py | chrisdonahue/mirdata | c9906e0948adcc75183f5246e7baa879022efcdb | [
"BSD-3-Clause"
] | null | null | null | mirdata/datasets/gtzan_genre.py | chrisdonahue/mirdata | c9906e0948adcc75183f5246e7baa879022efcdb | [
"BSD-3-Clause"
] | null | null | null | mirdata/datasets/gtzan_genre.py | chrisdonahue/mirdata | c9906e0948adcc75183f5246e7baa879022efcdb | [
"BSD-3-Clause"
] | null | null | null | """GTZAN-Genre Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
This dataset was used for the well known genre classification paper:
.. code-block:: latex
"Musical genre classification of audio signals " by G. Tzanetakis and
P. Cook in IEEE Transactions on Audio and Speech Processing 2002.
The dataset consists of 1000 audio tracks each 30 seconds long. It
contains 10 genres, each represented by 100 tracks. The tracks are all
22050 Hz mono 16-bit audio files in .wav format.
"""
import os
from typing import BinaryIO, Optional, TextIO, Tuple
import librosa
import numpy as np
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import io
BIBTEX = """@article{tzanetakis2002gtzan,
title={GTZAN genre collection},
author={Tzanetakis, George and Cook, P},
journal={Music Analysis, Retrieval and Synthesis for Audio Signals},
year={2002}
}"""
INDEXES = {
"default": "1.0",
"test": "1.0",
"1.0": core.Index(filename="gtzan_genre_index_1.0.json"),
}
REMOTES = {
"all": download_utils.RemoteFileMetadata(
filename="genres.tar.gz",
url="http://opihi.cs.uvic.ca/sound/genres.tar.gz",
checksum="5b3d6dddb579ab49814ab86dba69e7c7",
destination_dir="gtzan_genre",
)
}
LICENSE_INFO = "Unfortunately we couldn't find the license information for the GTZAN_genre dataset."
class Track(core.Track):
"""gtzan_genre Track class
Args:
track_id (str): track id of the track
Attributes:
audio_path (str): path to the audio file
genre (str): annotated genre
track_id (str): track id
"""
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
tags_gtzan_data=[(self.genre, "gtzan-genre")],
metadata={
"title": "Unknown track",
"artist": "Unknown artist",
"release": "Unknown album",
"duration": 30.0,
"curator": "George Tzanetakis",
},
)
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load a GTZAN audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
audio, sr = librosa.load(fhandle, sr=22050, mono=True)
return audio, sr
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The gtzan_genre dataset
"""
@core.copy_docs(load_audio)
| 24.531646 | 100 | 0.600877 | """GTZAN-Genre Dataset Loader
.. admonition:: Dataset Info
:class: dropdown
This dataset was used for the well known genre classification paper:
.. code-block:: latex
"Musical genre classification of audio signals " by G. Tzanetakis and
P. Cook in IEEE Transactions on Audio and Speech Processing 2002.
The dataset consists of 1000 audio tracks each 30 seconds long. It
contains 10 genres, each represented by 100 tracks. The tracks are all
22050 Hz mono 16-bit audio files in .wav format.
"""
import os
from typing import BinaryIO, Optional, TextIO, Tuple
import librosa
import numpy as np
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import io
BIBTEX = """@article{tzanetakis2002gtzan,
title={GTZAN genre collection},
author={Tzanetakis, George and Cook, P},
journal={Music Analysis, Retrieval and Synthesis for Audio Signals},
year={2002}
}"""
INDEXES = {
"default": "1.0",
"test": "1.0",
"1.0": core.Index(filename="gtzan_genre_index_1.0.json"),
}
REMOTES = {
"all": download_utils.RemoteFileMetadata(
filename="genres.tar.gz",
url="http://opihi.cs.uvic.ca/sound/genres.tar.gz",
checksum="5b3d6dddb579ab49814ab86dba69e7c7",
destination_dir="gtzan_genre",
)
}
LICENSE_INFO = "Unfortunately we couldn't find the license information for the GTZAN_genre dataset."
class Track(core.Track):
"""gtzan_genre Track class
Args:
track_id (str): track id of the track
Attributes:
audio_path (str): path to the audio file
genre (str): annotated genre
track_id (str): track id
"""
def __init__(
self,
track_id,
data_home,
dataset_name,
index,
metadata,
):
super().__init__(
track_id,
data_home,
dataset_name,
index,
metadata,
)
self.genre = track_id.split(".")[0]
if self.genre == "hiphop":
self.genre = "hip-hop"
self.audio_path = self.get_path("audio")
@property
def audio(self) -> Optional[Tuple[np.ndarray, float]]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
tags_gtzan_data=[(self.genre, "gtzan-genre")],
metadata={
"title": "Unknown track",
"artist": "Unknown artist",
"release": "Unknown album",
"duration": 30.0,
"curator": "George Tzanetakis",
},
)
@io.coerce_to_bytes_io
def load_audio(fhandle: BinaryIO) -> Tuple[np.ndarray, float]:
"""Load a GTZAN audio file.
Args:
fhandle (str or file-like): File-like object or path to audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
audio, sr = librosa.load(fhandle, sr=22050, mono=True)
return audio, sr
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The gtzan_genre dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="gtzan_genre",
track_class=Track,
bibtex=BIBTEX,
indexes=INDEXES,
remotes=REMOTES,
license_info=LICENSE_INFO,
)
@core.copy_docs(load_audio)
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
| 778 | 0 | 80 |
715d5623737288c8bb08263b4b7b00de3210ba31 | 2,591 | py | Python | blueprints/boards.py | kusl/maniwani | 9c2634342ff3e9cb92dc86cd3a4e3a4225b13a23 | [
"MIT"
] | null | null | null | blueprints/boards.py | kusl/maniwani | 9c2634342ff3e9cb92dc86cd3a4e3a4225b13a23 | [
"MIT"
] | null | null | null | blueprints/boards.py | kusl/maniwani | 9c2634342ff3e9cb92dc86cd3a4e3a4225b13a23 | [
"MIT"
] | null | null | null | from flask import Blueprint, render_template, redirect, url_for, flash
from flask_restful import reqparse
from markdown import markdown
from model.Board import Board
from model.BoardList import BoardList
from model.BoardListCatalog import BoardCatalog
from model.Post import render_for_catalog
from model.Slip import get_slip
from model.Tag import Tag
from shared import db
boards_blueprint = Blueprint('boards', __name__, template_folder='template')
boards_blueprint.add_app_template_global(style_for_tag)
@boards_blueprint.route("/")
@boards_blueprint.route("/<int:board_id>")
@boards_blueprint.route("/rules", defaults={'board_id': None})
@boards_blueprint.route("/rules/<int:board_id>")
@boards_blueprint.route("/admin/<int:board_id>")
@boards_blueprint.route("/admin/<int:board_id>", methods=["POST"])
| 37.014286 | 101 | 0.73408 | from flask import Blueprint, render_template, redirect, url_for, flash
from flask_restful import reqparse
from markdown import markdown
from model.Board import Board
from model.BoardList import BoardList
from model.BoardListCatalog import BoardCatalog
from model.Post import render_for_catalog
from model.Slip import get_slip
from model.Tag import Tag
from shared import db
def style_for_tag(tag_name):
tag = db.session.query(Tag).filter(Tag.name == tag_name).one()
return {"bg_style": tag.bg_style, "text_style": tag.text_style}
boards_blueprint = Blueprint('boards', __name__, template_folder='template')
boards_blueprint.add_app_template_global(style_for_tag)
@boards_blueprint.route("/")
def list():
return render_template("board-index.html", boards=BoardList().get())
@boards_blueprint.route("/<int:board_id>")
def catalog(board_id):
threads = BoardCatalog().retrieve(board_id)
board_name = db.session.query(Board).filter(Board.id == board_id).one().name
render_for_catalog(threads)
return render_template("catalog.html", threads=threads, board_id=board_id, board_name=board_name)
@boards_blueprint.route("/rules", defaults={'board_id': None})
@boards_blueprint.route("/rules/<int:board_id>")
def rules(board_id):
if board_id is None:
return redirect(url_for('main.rules'))
board = db.session.query(Board).filter(Board.id == board_id).one()
return render_template("rules.html", board=board, markdown=markdown)
@boards_blueprint.route("/admin/<int:board_id>")
def admin(board_id):
if get_slip() and get_slip().is_admin:
board = db.session.query(Board).filter(Board.id == board_id).one()
return render_template("board-admin.html", board=board)
else:
flash("Only admins can access board administration!")
return redirect(url_for("boards.catalog", board_id=board_id))
@boards_blueprint.route("/admin/<int:board_id>", methods=["POST"])
def admin_update(board_id):
if get_slip() is None or get_slip().is_admin is False:
flash("Only admins can access board administration!")
return redirect(url_for("boards.catalog", board_id=board_id))
board = db.session.query(Board).filter(Board.id == board_id).one()
parser = reqparse.RequestParser()
parser.add_argument("name", type=str, required=True)
parser.add_argument("rules", type=str, required=True)
args = parser.parse_args()
board.name = args["name"]
board.rules = args["rules"]
db.session.add(board)
db.session.commit()
return redirect(url_for("boards.catalog", board_id=board_id))
| 1,637 | 0 | 133 |
f96db272efe118cb0d2f81a136209b560bfe188e | 5,528 | py | Python | config/am.py | googleinterns/text-norm-for-low-resource-languages | 500c08f863539fc3aa6d000307c91b25848e1adc | [
"Apache-2.0"
] | 1 | 2020-09-03T18:33:13.000Z | 2020-09-03T18:33:13.000Z | config/am.py | googleinterns/text-norm-for-low-resource-languages | 500c08f863539fc3aa6d000307c91b25848e1adc | [
"Apache-2.0"
] | 3 | 2020-06-25T22:01:30.000Z | 2020-07-30T19:40:04.000Z | config/am.py | googleinterns/text-norm-for-low-resource-languages | 500c08f863539fc3aa6d000307c91b25848e1adc | [
"Apache-2.0"
] | null | null | null | "Amharic config with language-specific information."
from pynini import *
from pynini.lib import byte
from config import utils
GRAPHEMES = union("'", "-",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ",
"แฎ", "แฐ", "แฒ", "แณ", "แด", "แต",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ",
"แพ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ")
INITIAL_PUNCTUATION = utils.DEFAULT_INITIAL_PUNCTUATION
FINAL_PUNCTUATION = union(utils.DEFAULT_FINAL_PUNCTUATION,
utils.GEEZ_FINAL_PUNCTUATION)
NUMERALS = union(byte.DIGIT,
utils.GEEZ_NUMERALS)
# Amharic "over-differentiates" H graphemes, emphatic S graphemes, and glottal
# stop graphemes, which were all inherited from Ge'ez. Surveys suggest that
# Amharic speakers prefer one form over the others. These rules convert the
# dispreferred series graphemes to the one preferred series, when available.
# The surveys about grapheme preference come from the paper here:
# https://www.researchgate.net/profile/Fekede_Menuta/publication/312093656_OVER-DIFFERENTIATION_3_Over-differentiation_in_Amharic_Orthography_and_Attitude_towards_Reform/links/586f5d8408ae329d6215fb85/OVER-DIFFERENTIATION-3-Over-differentiation-in-Amharic-Orthography-and-Attitude-towards-Reform.pdf
REDUCE_H = string_map((("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ
"),
("แ", "แ"),
#("แ", "")
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ
", "แ
"),
("แ", "แ"),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
("แธ", "แ"),
("แน", "แ"),
("แบ", "แ"),
("แป", "แ"),
("แผ", "แ"),
("แฝ", "แ
"),
("แพ", "แ")
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ
", "")
))
REDUCE_S = string_map((("แธ", "แ"),
("แน", "แ"),
("แบ", "แ"),
("แป", "แ"),
("แผ", "แ"),
("แฝ", "แ
"),
("แพ", "แ")
#("แฟ", "")
))
REDUCE_A = string_map((("แ", "แ "),
("แ", "แ "),
("แ", "แข"),
("แ", "แฃ"),
("แ", "แค"),
("แ", "แฅ"),
("แ", "แฆ")
))
REDUCE_OVERDIFFERENTIATION = cdrewrite(
union(REDUCE_H, REDUCE_S, REDUCE_A),
"",
"",
byte.BYTES.closure())
LANGUAGE_SPECIFIC_PREPROCESSING = REDUCE_OVERDIFFERENTIATION
# These files are not in the repo. You will need to change these paths to match
# where you place the data files.
UD = "language_data/am/UD_Amharic-ATT/am_att-ud-test.conllu"
UM = ""
AC = "language_data/am/ac/am-wordbigrams.txt"
OSCAR = "language_data/am/oscar/am.txt"
OSCAR_DEDUP = "language_data/am/oscar/am_dedup.txt"
LCC = "language_data/am/lcc/amh_wikipedia_2016_30K/amh_wikipedia_2016_30K-sentences.txt"
| 41.56391 | 299 | 0.31458 | "Amharic config with language-specific information."
from pynini import *
from pynini.lib import byte
from config import utils
GRAPHEMES = union("'", "-",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ",
"แฎ", "แฐ", "แฒ", "แณ", "แด", "แต",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ",
"แพ", "แ", "แ", "แ", "แ", "แ
",
"แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ",
"แ ", "แก", "แข", "แฃ", "แค", "แฅ", "แฆ", "แง",
"แจ", "แฉ", "แช", "แซ", "แฌ", "แญ", "แฎ", "แฏ",
"แฐ", "แฑ", "แฒ", "แณ", "แด", "แต", "แถ", "แท",
"แธ", "แน", "แบ", "แป", "แผ", "แฝ", "แพ", "แฟ",
"แ", "แ", "แ", "แ", "แ", "แ
", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ",
"แ", "แ", "แ", "แ", "แ", "แ", "แ", "แ")
INITIAL_PUNCTUATION = utils.DEFAULT_INITIAL_PUNCTUATION
FINAL_PUNCTUATION = union(utils.DEFAULT_FINAL_PUNCTUATION,
utils.GEEZ_FINAL_PUNCTUATION)
NUMERALS = union(byte.DIGIT,
utils.GEEZ_NUMERALS)
# Amharic "over-differentiates" H graphemes, emphatic S graphemes, and glottal
# stop graphemes, which were all inherited from Ge'ez. Surveys suggest that
# Amharic speakers prefer one form over the others. These rules convert the
# dispreferred series graphemes to the one preferred series, when available.
# The surveys about grapheme preference come from the paper here:
# https://www.researchgate.net/profile/Fekede_Menuta/publication/312093656_OVER-DIFFERENTIATION_3_Over-differentiation_in_Amharic_Orthography_and_Attitude_towards_Reform/links/586f5d8408ae329d6215fb85/OVER-DIFFERENTIATION-3-Over-differentiation-in-Amharic-Orthography-and-Attitude-towards-Reform.pdf
REDUCE_H = string_map((("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ
"),
("แ", "แ"),
#("แ", "")
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ", "แ"),
("แ
", "แ
"),
("แ", "แ"),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
("แธ", "แ"),
("แน", "แ"),
("แบ", "แ"),
("แป", "แ"),
("แผ", "แ"),
("แฝ", "แ
"),
("แพ", "แ")
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ", ""),
#("แ
", "")
))
REDUCE_S = string_map((("แธ", "แ"),
("แน", "แ"),
("แบ", "แ"),
("แป", "แ"),
("แผ", "แ"),
("แฝ", "แ
"),
("แพ", "แ")
#("แฟ", "")
))
REDUCE_A = string_map((("แ", "แ "),
("แ", "แ "),
("แ", "แข"),
("แ", "แฃ"),
("แ", "แค"),
("แ", "แฅ"),
("แ", "แฆ")
))
REDUCE_OVERDIFFERENTIATION = cdrewrite(
union(REDUCE_H, REDUCE_S, REDUCE_A),
"",
"",
byte.BYTES.closure())
LANGUAGE_SPECIFIC_PREPROCESSING = REDUCE_OVERDIFFERENTIATION
# These files are not in the repo. You will need to change these paths to match
# where you place the data files.
UD = "language_data/am/UD_Amharic-ATT/am_att-ud-test.conllu"
UM = ""
AC = "language_data/am/ac/am-wordbigrams.txt"
OSCAR = "language_data/am/oscar/am.txt"
OSCAR_DEDUP = "language_data/am/oscar/am_dedup.txt"
LCC = "language_data/am/lcc/amh_wikipedia_2016_30K/amh_wikipedia_2016_30K-sentences.txt"
| 0 | 0 | 0 |