content
stringlengths
5
1.05M
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright SquirrelNetwork import asyncio from core import decorators from core.utilities.message import message,messageWithId, messageWithAsyncById from core.utilities.strings import Strings from core.database.repository.community import CommunityRepository from telegram.error import BadRequest loop = asyncio.get_event_loop() @decorators.owner.init def init(update, context): msg = update.message.text[2:].strip() rows = CommunityRepository().getAll() for a in rows: id_groups = a['tg_group_id'] try: if msg != "": loop.run_until_complete(messageWithAsyncById(update,context,id_groups,2,msg)) #messageWithId(update,context,id_groups,msg) else: message(update,context,"You cannot send an empty message!") except BadRequest: message(update,context,Strings.ERROR_HANDLING)
import os import io import math import copy import random import numpy as np import tensorflow as tf import cv2 import matplotlib.pyplot as plt from PIL import Image, ImageDraw from itertools import permutations # Helper data generation functions. def serialize_array(array): array = tf.io.serialize_tensor(array) return array def regular_polygon(sides, radius=10, rotation=0, translation=None): """Calculates the vertices of a regular polygon by sweeping out a circle, and puting n equally spaced points on it.""" # The first thing to do is work out the angle (in radians) of each wedge from the center outwards. # The total number of radians in a circle is 2 pi, so our value is 2 pi / n per segment. one_segment = math.pi * 2 / sides # After that a bit of basic trig gives us our points. At this point we scale by our desired radius, # and have the opportunity to offset the rotation by a fixed amount too. points = [ (int(math.sin(one_segment * i + rotation) * radius), int(math.cos(one_segment * i + rotation) * radius)) for i in range(sides)] original_points = copy.copy(points) # After that we translate the values by a certain amount, because you probably want your polygon # in the center of the screen, not in the corner. if translation: points = [[sum(pair) for pair in zip(point, translation)] for point in points] return points, original_points def open_rectangle(radius=8, x_offset=None, rotation=None, translation=None): if rotation is None: rotation = 1 * math.pi * np.random.random_sample() if x_offset is None: x_offset = np.random.randint(8) sides = 4 one_segment = math.pi * 2 / sides points = [ (math.sin(one_segment * i + rotation) * radius, math.cos(one_segment * i + rotation) * radius) for i in range(sides)] line_1 = points[0:2] line_2 = points[2:4] line_2 = [[p[0] - x_offset, p[1]] for p in line_2] original_lines = copy.copy([line_1, line_2]) if translation: line_1 = [[sum(pair) for pair in zip(point, translation)] for point in line_1] line_2 = [[sum(pair) for pair in zip(point, translation)] for point in line_2] lines = [line_1, line_2] return lines, original_lines def ccw_sort(polygon_points): """Sort the points counter clockwise around the mean of all points. The sorting can be imagined like a radar scanner, points are sorted by their angle to the x axis.""" polygon_points = np.array(polygon_points) mean = np.mean(polygon_points,axis=0) d = polygon_points-mean s = np.arctan2(d[:,0], d[:,1]) return polygon_points[np.argsort(s),:] def iregular_polygon_from_regular(sides, radius=1, rotation=0, translation=None, max_dev=0): # Get regular polygon. points, original_points = regular_polygon(sides=sides, radius=radius, rotation=rotation, translation=translation) # Add noise. noise = [[np.random.randint(-max_dev, max_dev+1), np.random.randint(-max_dev, max_dev+1)] for x in points] points = [[x[0] + y[0], x[1] + y[0]] for x, y in zip(points, noise)] original_points = [[x[0] + y[0], x[1] + y[0]] for x, y in zip(original_points, noise)] # Return points and cero-centerd points. return ccw_sort(points), ccw_sort(original_points) def divide_polygon(points): """Divides polygon at the midsection of every side. Args: points: list of points. Returns: List of lits of points.""" mid_points = [] for i in range(len(points)): if i == len(points) - 1: midpoint = [(points[i][0] + points[0][0]) / 2, (points[i][1] + points[0][1]) / 2] else: midpoint = [(points[i][0] + points[i+1][0]) / 2, (points[i][1] + points[i+1][1]) / 2] mid_points.append(midpoint) new_points = [] for i in range(len(mid_points)): if i == len(mid_points) - 1: new_points.append([mid_points[i], points[i], points[0]]) else: new_points.append([mid_points[i], points[i], points[i+1]]) return new_points def displace_line_around_origin(point_list, d): """Displace a line (list of points) away from the center (0, 0) d units.""" point = point_list[1] x, y = point d_x = d if x >= 0 else -d d_y = d if y >= 0 else -d displacement = [d_x, d_y] displaced_point_list = [[sum(pair) for pair in zip(point, displacement)] for point in point_list] return displaced_point_list def displace_polygon_vertices(list_of_points, radius): """Displace polygon subseccions randomly around the center. The displacement keeps the angles of the original polygon. This function assumes that points are the original polygon points around the coordinate (0,0). Args: points: list of points. Returns: List of lits of points.""" mid_points = [] for i in range(len(list_of_points)): if i == len(list_of_points) - 1: midpoint = [(list_of_points[i][0] + list_of_points[0][0]) / 2, (list_of_points[i][1] + list_of_points[0][1]) / 2] else: midpoint = [(list_of_points[i][0] + list_of_points[i+1][0]) / 2, (list_of_points[i][1] + list_of_points[i+1][1]) / 2] mid_points.append(midpoint) new_points = [] for i in range(len(mid_points)): if i == len(mid_points) - 1: new_points.append([mid_points[i], list_of_points[0], mid_points[0]]) else: new_points.append([mid_points[i], list_of_points[i+1], mid_points[i+1]]) # All posible displacements to sample from. all_d = list(range(0, radius)) random.shuffle(all_d) # Displace the points from the distance a randomly chosen amount. displaced_points = [] counter = 0 for point_list in new_points: d = all_d[counter] # random.sample(all_d, 1)[0] new_point_list = displace_line_around_origin(point_list, d) displaced_points.append(new_point_list) counter += 1 # Reset the counter if reach the end of all displacements. if counter >= len(all_d) - 1: counter = 0 return displaced_points def scramble_poligon(img, midpoint, radius): # Augment the radius to cover all pixels in teh target patch. radius += 1 # Get start points and end points fo the 4 quadrants. sp_1 = (midpoint[0]-radius, midpoint[1]-radius) ep_1 = midpoint sp_2 = (midpoint[0], midpoint[1]-radius) ep_2 = (midpoint[0]+radius, midpoint[1]) sp_3 = (midpoint[0]-radius, midpoint[1]) ep_3 = (midpoint[0], midpoint[1]+radius) sp_4 = midpoint ep_4 = (midpoint[0]+radius, midpoint[1]+radius) # Sample offsets. off_x = random.sample(list(range(0, int(radius/2))), 4) off_y = random.sample(list(range(0, int(radius/2))), 4) # Add offsets. new_sp_1 = (sp_1[0]-off_x[0], sp_1[1]-off_y[0]) new_ep_1 = (ep_1[0]-off_x[0], ep_1[1]-off_y[0]) new_sp_2 = (sp_2[0]+off_x[1], sp_2[1]-off_y[1]) new_ep_2 = (ep_2[0]+off_x[1], ep_2[1]-off_y[1]) new_sp_3 = (sp_3[0]-off_x[2], sp_3[1]+off_y[2]) new_ep_3 = (ep_3[0]-off_x[2], ep_3[1]+off_y[2]) new_sp_4 = (sp_4[0]+off_x[3], sp_4[1]+off_y[3]) new_ep_4 = (ep_4[0]+off_x[3], ep_4[1]+off_y[3]) # Copy patches. patch_1 = np.copy(img[sp_1[1]:ep_1[1], sp_1[0]:ep_1[0]]) patch_2 = np.copy(img[sp_2[1]:ep_2[1], sp_2[0]:ep_2[0]]) patch_3 = np.copy(img[sp_3[1]:ep_3[1], sp_3[0]:ep_3[0]]) patch_4 = np.copy(img[sp_4[1]:ep_4[1], sp_4[0]:ep_4[0]]) # Wipe out patches in img. img[sp_1[1]:ep_1[1], sp_1[0]:ep_1[0]] = (255, 255, 255 ) img[sp_2[1]:ep_2[1], sp_2[0]:ep_2[0]] = (255, 255, 255 ) img[sp_3[1]:ep_3[1], sp_3[0]:ep_3[0]] = (255, 255, 255 ) img[sp_4[1]:ep_4[1], sp_4[0]:ep_4[0]] = (255, 255, 255 ) # Paste patches in new locations. img[new_sp_1[1]:new_ep_1[1], new_sp_1[0]:new_ep_1[0]] = patch_1 img[new_sp_2[1]:new_ep_2[1], new_sp_2[0]:new_ep_2[0]] = patch_2 img[new_sp_3[1]:new_ep_3[1], new_sp_3[0]:new_ep_3[0]] = patch_3 img[new_sp_4[1]:new_ep_4[1], new_sp_4[0]:new_ep_4[0]] = patch_4 return img def svrt_1_points_open_rectangle(category=1, radii=None, rotation=None): # Polygon parameters. if rotation is None: rotation = math.pi/4 # 2 * math.pi * np.random.random_sample() # Positions. min_dev_1 = 16 translation_a = [np.random.randint(min_dev_1, 128-min_dev_1), np.random.randint(min_dev_1, 128-min_dev_1)] # Ensure that the second shape is at the other side of at least one dimension. choice_x = False if np.random.random_sample() < 0.5 else True if choice_x: t2_x = np.random.randint(22, 32) if translation_a[0] > 64 else np.random.randint(90, 102) else: t2_x = np.random.randint(22, 102) choice_y = True if choice_x is False else False if np.random.random_sample() < 0.5 else True if choice_y: t2_y = np.random.randint(22, 32) if translation_a[1] > 64 else np.random.randint(90, 102) else: t2_y = np.random.randint(22, 102) translation_b = [t2_x, t2_y] # Generate points. if category == 0: x_offset_1 = np.random.choice(11) # At least 4 points of difference in width. offset_diference = np.random.choice(range(4, 8)) x_offset_2 = x_offset_1 + offset_diference if np.random.random_sample() > 0.5 else x_offset_1 - offset_diference points_a , _ = open_rectangle(translation=translation_a, x_offset=x_offset_1, rotation=rotation) # List of lits of points. points_b , _ = open_rectangle(translation=translation_b, x_offset=x_offset_2, rotation=rotation) # List of lits of points. elif category == 1: points_a , original_a = open_rectangle(translation=translation_a, rotation=rotation) # List of lits of points. line_1, line_2 = original_a line_1 = [[sum(pair) for pair in zip(point, translation_b)] for point in line_1] line_2 = [[sum(pair) for pair in zip(point, translation_b)] for point in line_2] points_b = [line_1, line_2] else: raise ValueError('category has to be 1 or 0!') return points_a, points_b def svrt_1_img_open_rectangle( category=1, thickness=1, color=(0,0,0), rotation=None, closed=False): # Array of size 128x128 filled with ones as values, to create an image with black color. img = np.zeros(shape=(128,128,3),dtype=np.int16) img[:] = (255, 255, 255) # Changing the color of the image # Make a second img with connected lines to get bounding boxes later on. img2 = np.zeros(shape=(128,128,3),dtype=np.int16) img2[:] = (255, 255, 255) # Changing the color of the image # Get points. points_a, points_b = svrt_1_points_open_rectangle(category=category, rotation=rotation) # Draw. for line in points_a: poly_a = np.array(line,dtype=np.int32) # Reshaping according to opencv format. poly_new_a = poly_a.reshape((-1,1,2)) cv2.polylines(img, [poly_new_a], isClosed=closed, color=color, thickness=thickness) cv2.polylines(img2, [poly_new_a], isClosed=closed, color=color, thickness=thickness) for line in points_b: poly_b = np.array(line,dtype=np.int32) # Reshaping according to opencv format. poly_new_b = poly_b.reshape((-1,1,2)) cv2.polylines(img, [poly_new_b], isClosed=closed, color=color, thickness=thickness) cv2.polylines(img2, [poly_new_b], isClosed=closed, color=color, thickness=thickness) # Connect lines in img2 only. points_c = [[points_a[0][0], points_a[1][0]], [points_b[0][0], points_b[1][0]]] for line in points_c: poly_c = np.array(line,dtype=np.int32) # Reshaping according to opencv format. poly_new_c = poly_c.reshape((-1,1,2)) cv2.polylines(img2, [poly_new_c], isClosed=closed, color=color, thickness=thickness) return img.astype('uint8'), img2.astype('uint8') def svrt_1_points(category=1, radii=None, sides=None, rotations=None, regular=False, irregularity=0.25, displace_vertices=False): """Returns polygon points for a single instance of a SVRT problem 1. Args: category: 0 (no) or 1 (yes). radii: radii of the base regular polygon. 2-tuple 8 to 14. sides: number of sides of the base regular polygon. 2-tuple 4 to 8. rotations: rotations of the polygons. 2-tuple 4 to 8. regular: whether to build regular or irregular polygons in radiants. 2-tuple form 0 to pi. irregularity: maximum level of random point translation for irregular polygons. displace_vertices: if True displaces second polygon subseccions randomly around its center in the positive cases. Returns: Two lists of polygon points.""" # Polygon parameters. if radii is None: if displace_vertices: radius_1 = np.random.randint(10, 18) radius_2 = radius_1 #if category==1 else np.random.randint(10, 14) else: radius_1 = np.random.randint(10, 40) # np.random.randint(10, 14) radius_2 = radius_1 #if category==1 else np.random.randint(10, 40) else: radius_1, radius_2 = radii if sides is None: if displace_vertices: possible_sides = random.sample(list(range(3, 8)), 2) else: possible_sides = random.sample(list(range(3, 8)), 2) sides_1 = possible_sides[0] sides_2 = possible_sides[1] if rotations is None: rotation_1 = math.radians(random.randint(0, 360)) rotation_2 = math.radians(random.randint(0, 360)) # I need to calculate min_dev_1 based on the actual points not based on the maximum posible enclosing circle... if not regular and irregularity is None: max_dev_factor = np.random.choice([0.3, 0.4, 0.5, 0.6]) else: max_dev_factor = irregularity max_dev_1 = int(radius_1 * max_dev_factor) min_dev_1 = radius_1 + max_dev_1 max_dev_2 = int(radius_2 * max_dev_factor) min_dev_2 = radius_2 + max_dev_2 # Positions. # As I have a sample rejection step on the generators I can just sample both positions randomly here. if displace_vertices: t_a_1 = [np.random.randint(min_dev_1, 64-min_dev_1), np.random.randint(min_dev_1, 64-min_dev_1)] t_a_2 = [np.random.randint(64+min_dev_1, 128-min_dev_1), np.random.randint(min_dev_1, 64-min_dev_1)] t_a_3 = [np.random.randint(min_dev_1, 64-min_dev_1), np.random.randint(64+min_dev_1, 128-min_dev_1)] t_a_4 = [np.random.randint(64+min_dev_1, 128-min_dev_1), np.random.randint(64+min_dev_1, 128-min_dev_1)] translation_a = random.choice([t_a_1, t_a_2, t_a_3, t_a_4]) else: translation_a = [np.random.randint(min_dev_1, 128-min_dev_1), np.random.randint(min_dev_1, 128-min_dev_1)] # Ensure that the second shape is at the other side of at least one dimension. if displace_vertices: dim = random.choice(['x', 'y']) # Chose dimension to send the object to the other side. if dim == 'x': t2_x = np.random.randint(28, 36) if translation_a[0] > 64 else np.random.randint(92, 100) else: t2_x = np.random.randint(28, 100) if dim == 'y': t2_y = np.random.randint(28, 36) if translation_a[1] > 64 else np.random.randint(92, 100) else: t2_y = np.random.randint(28, 100) translation_b = [t2_x, t2_y] else: translation_b = [np.random.randint(min_dev_2, 128-min_dev_2), np.random.randint(min_dev_2, 128-min_dev_2)] # Generate points. if category == 0 and regular and not displace_vertices: # A math.pi/4 (45 degrees) rotation gives the most stable polygons in the "1" category. points_a , _ = regular_polygon(sides=sides_1, radius=radius_1, rotation=rotation_1, translation=translation_a) points_b , _ = regular_polygon(sides=sides_2, radius=radius_2, rotation=rotation_2, translation=translation_b) elif category == 1 and regular and not displace_vertices: points_a , original_a = regular_polygon(sides=sides_1, radius=radius_1, rotation=rotation_1, translation=translation_a) points_b = [[sum(pair) for pair in zip(point, translation_b)] for point in original_a] elif category == 0 and not regular and not displace_vertices: points_a , _ = iregular_polygon_from_regular( sides=sides_1, radius=radius_1, rotation=rotation_1, translation=translation_a, max_dev=max_dev_1) points_b , _ = iregular_polygon_from_regular( sides=sides_2, radius=radius_2, rotation=rotation_2, translation=translation_b, max_dev=max_dev_2) elif category == 1 and not regular and not displace_vertices: points_a , original_a = iregular_polygon_from_regular( sides=sides_1, radius=radius_1, rotation=rotation_1, translation=translation_a, max_dev=max_dev_1) points_b = [[sum(pair) for pair in zip(point, translation_b)] for point in original_a] elif category == 1 and regular and displace_vertices: # A math.pi/4 (45 degrees) rotation gives the most stable polygons in the "1" category. points_a , original_a = regular_polygon(sides=sides_1, radius=radius_1, rotation=rotation_1, translation=translation_a) points_b = [[sum(pair) for pair in zip(point, translation_b)] for point in original_a] elif category == 0 and regular and displace_vertices: # The negative case is the original poligon with parts displaced. points_a , original_a = regular_polygon(sides=sides_1, radius=radius_1, rotation=rotation_1, translation=translation_a) points_b = [[sum(pair) for pair in zip(point, translation_b)] for point in original_a] # points_b = displace_polygon_vertices(original_a, radius_1) # this is a list of list of points # new_points_b = [] # for point_list in points_b: # b = [[sum(pair) for pair in zip(point, translation_b)] for point in point_list] # new_points_b.append(b) # points_b = new_points_b else: raise ValueError('wrong category or regular args!') return points_a, points_b, tuple(translation_b), radius_1 def svrt_1_img( category=1, radii=None, sides=None, regular=False, rotations=None, irregularity=0.5, thickness=1, color_a=None, color_b=None, filled=False, closed=True, displace_vertices=False, separated_chanels=False): """Returns a picture of single instance of a SVRT problem 1. Args: category: 0 (no) or 1 (yes). radii: radii of the base regular polygon. 2-tuple 8 to 14. sides: number of sides of the base regular polygon. 2-tuple 4 to 8. rotations: rotations of the polygons. 2-tuple 4 to 8. regular: whether to build regular or irregular polygons in radiants. 2-tuple form 0 to pi. irregularity: maximum level of random point translation for irregular polygons. thickness: line width of the shapes. color: line color of the shapes. separated_chanels: if True returns two images with one object per image. Returns: Numpy array.""" # Array of size 128x128 filled with ones as values, to create an image with black color. img = np.zeros(shape=(128,128,3),dtype=np.int16) img[:] = (255, 255, 255) # Changing the color of the image # Create second canvas for the second chanel. if separated_chanels: img2 = np.zeros(shape=(128,128,3),dtype=np.int16) img2[:] = (255, 255, 255) # Set up line color. color_a = (0,0,0) if color_a is None else color_a # By default make the color of the second object the same as the first one. if color_b is None: color_b = color_a # Get points. points_a, points_b, midpoint_2, radius_2 = svrt_1_points( category=category, radii=radii, sides=sides, rotations=rotations, regular=regular, irregularity=irregularity, displace_vertices=displace_vertices) # Assigning sides to polygon poly_a = np.array(points_a,dtype=np.int32) poly_b = np.array(points_b,dtype=np.int32) # Reshaping according to opencv format poly_new_a = poly_a.reshape((-1,1,2)) poly_new_b = poly_b.reshape((-1,1,2)) # Draw. if not filled and not displace_vertices: cv2.polylines(img,[poly_new_a],isClosed=closed,color=color_a,thickness=thickness) if separated_chanels: cv2.polylines(img2,[poly_new_b],isClosed=closed,color=color_b,thickness=thickness) else: cv2.polylines(img,[poly_new_b],isClosed=closed,color=color_b,thickness=thickness) elif not filled and displace_vertices and category == 1: cv2.polylines(img,[poly_new_a],isClosed=closed,color=color_a,thickness=thickness) if separated_chanels: cv2.polylines(img2,[poly_new_b],isClosed=closed,color=color_b,thickness=thickness) else: cv2.polylines(img,[poly_new_b],isClosed=closed,color=color_b,thickness=thickness) elif not filled and displace_vertices and category == 0: cv2.polylines(img,[poly_new_a],isClosed=closed,color=color_a,thickness=thickness) if separated_chanels: cv2.polylines(img2,[poly_new_b],isClosed=closed,color=color_b,thickness=thickness) img2 = scramble_poligon(img2, midpoint=midpoint_2, radius=radius_2) else: cv2.polylines(img,[poly_new_b],isClosed=closed,color=color_b,thickness=thickness) img = scramble_poligon(img, midpoint=midpoint_2, radius=radius_2) else: cv2.fillPoly(img, [poly_new_a], color=color_a) if separated_chanels: cv2.fillPoly(img2, [poly_new_b], color=color_b) else: cv2.fillPoly(img, [poly_new_b], color=color_b) # Return image(s). if separated_chanels: return img.astype('uint8'), img2.astype('uint8') else: return img.astype('uint8') def sample_midpoints(): """Samples midpoints to insert two 28x28 images into a 128 canvas. Assumes OpenCV formating for points. """ # Sample quadrants. quadrants = random.sample([1, 2, 3, 4], 2) # Generate points. points = [] for quadrant in quadrants: if quadrant == 1: x = random.randint(1, 35) y = random.randint(1, 35) elif quadrant == 2: x = random.randint(65, 99) y = random.randint(1, 35) elif quadrant == 3: x = random.randint(1, 35) y = random.randint(65, 99) elif quadrant == 4: x = random.randint(65, 99) y = random.randint(65, 99) else: raise ValueError("quadrant has to be an integer between 1 and 4!") points.append((x, y)) return points[0], points[1] # Arrows dataset functions. def rotate(origin, point, angle): """Rotate a point counterclockwise by a given angle around a given origin. Because in OpenCV the y-axis is inverted this function swaps the x and y axis. Args: origin: (x, y) tuple. point: the point (x, y) to rotate. angle: in radiants. The angle should be given in radians. """ oy, ox = origin py, px = point qx = ox + int(math.cos(angle) * (px - ox)) - int(math.sin(angle) * (py - oy)) qy = oy + int(math.sin(angle) * (px - ox)) + int(math.cos(angle) * (py - oy)) return int(qy), int(qx) def rotate_and_translate(origin, point_list, angle, translation): """Rotate polygon points counterclockwise by a given angle around a given origin and translate. Args: origin: (x, y) tuple. point_list: list of points (x, y) to rotate. angle: in degrees. Returns: New list of points rotated and translated. """ # Get angle in ratiants. radiants = math.radians(angle) # Rotate all points. new_points = [rotate(origin=origin, point=p, angle=radiants) for p in point_list] # Translate all points. new_points = [[sum(pair) for pair in zip(point, translation)] for point in new_points] return new_points def get_triangle_top_midpoint(point_list): """Returns the midpoint of the top of a triangle regardless of the orientation.""" y = int(min([x[1] for x in point_list])) x = int((min([x[0] for x in point_list]) + max([x[0] for x in point_list])) / 2) return x, y def get_triangle_bottom_midpoint(point_list): """Returns the midpoint of the top of a triangle regardless of the orientation.""" y = int(max([x[1] for x in point_list])) x = int((min([x[0] for x in point_list]) + max([x[0] for x in point_list])) / 2) return x, y def get_arrow_points(radius, center, rotation=0, shape_a='normal', shape_b='normal', continuous=True): """Calculates the points for a arrow. Args: radius: of the base circle to build the triangles (heads). 5, 7, 9 works well. rotation: of the arrow in degrees. center: center of the arrow. shape_a: shape of head a. "normal", "inverted". shape_b: shape of head b. "normal", "inverted". continuous: weather the line touches the available heads. Returns: 3 lists of lists of points. the first is the "top" head, the second the "bottom" and the third is the line. """ # The base arrow is based on 4 circles. # The overall centre is at 2 radi from the top head centre. origin_top = (center[0], int(center[1]-2*radius)) origin_bottom = [center[0], int(center[1]+2*radius)] points_top, cero_centered_top = regular_polygon(sides=3, radius=radius, rotation=math.radians(180), translation=origin_top) # Use the same function to generate the bottom! points_bottom, cero_centered_bottom = regular_polygon(sides=3, radius=radius, rotation=math.radians(0), translation=origin_bottom) # Get line points. top_mid_point = get_triangle_bottom_midpoint(points_top) bottom_mid_point = get_triangle_top_midpoint(points_bottom) # If the arrow isn't continious shrink the line. if not continuous: separation = int(radius) top_mid_point = center[0], top_mid_point[1] + separation bottom_mid_point = center[0], bottom_mid_point[1] - separation points_line = [top_mid_point, bottom_mid_point] if shape_a == 'inverted': # - radius/2. origin_top = [origin_top[0], int(origin_top[1]-radius/2)] points_top, cero_centered_top = regular_polygon(sides=3, radius=radius, rotation=math.radians(0), translation=origin_top) if shape_b == 'inverted': # + radius/2. origin_bottom = [origin_bottom[0], int(origin_bottom[1]+radius/2)+1] points_bottom, cero_centered_bottom = regular_polygon(sides=3, radius=radius, rotation=math.radians(180), translation=origin_bottom) # Get angle in ratiants. radiants = math.radians(rotation) # Rotate all elements the given amount. points_top = [rotate(origin=center, point=p, angle=radiants) for p in points_top] points_bottom = [rotate(origin=center, point=p, angle=radiants) for p in points_bottom] points_line = [rotate(origin=center, point=p, angle=radiants) for p in points_line] return points_top, points_bottom, points_line def sample_midpoints_arrows(size): """Samples midpoints to arrows if sizes 5, 7 or 9 into a 128x128 image.""" xs = random.sample(list(range(size*4, 127-size*4)), 2) ys = random.sample(list(range(size*4, 127-size*4)), 2) point_1 = [xs[0], ys[0]] point_2 = [xs[1], ys[1]] return point_1, point_2 def make_arrows_sd( category, continuous=False, line_width=1, hard_test=True, separated_chanels=False): """ Args: category: 1 (same) or 0 (different). continuous: weather the line touches the available heads. line_width: line width in pixels. Returns: image (array). """ # Background image. img = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img[:] = (255, 255, 255) if separated_chanels: img2 = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img2[:] = (255, 255, 255) # Sample sizes. size_1 = random.sample([4, 5, 6, 7, 8, 9, 10], 1)[0] size_2 = random.sample([4, 5, 6, 7, 8, 9, 10], 1)[0] # Sample rotations. rotation_1 = random.randint(0, 135) rotation_2 = random.randint(0, 135) # Sample booleans for arrows: head_a_1 = bool(random.sample([0, 1], 1)[0]) head_b_1 = True if not head_a_1 else bool(random.sample([0, 1], 1)[0]) head_a_2 = bool(random.sample([0, 1], 1)[0]) head_b_2 = True if not head_a_2 else bool(random.sample([0, 1], 1)[0]) # Sample shapes. shape_a_1 = random.sample(['normal', 'inverted'], 1)[0] shape_b_1 = random.sample(['normal', 'inverted'], 1)[0] shape_a_2 = random.sample(['normal', 'inverted'], 1)[0] shape_b_2 = random.sample(['normal', 'inverted'], 1)[0] # Assign size_b and rotation_b based on category. if category==1: size_2 = size_1 rotation_2 = rotation_1 head_a_2 = head_a_1 head_b_2 = head_b_1 shape_a_2 = shape_a_1 shape_b_2 = shape_b_1 if hard_test and category==0: size_2 = size_1 rotation_2 = rotation_1 # Ensure that the values of shape_a_1 and shape_b_1 are different. shape_a_1 = 'inverted' if shape_b_1 == 'inverted' else 'normal' # Ensure that the second arrow has the oposite head orientations. shape_a_2 = 'inverted' if shape_b_1 == 'normal' else 'normal' shape_b_2 = 'inverted' if shape_a_1 == 'normal' else 'normal' if not head_a_1: head_b_2 = False head_a_2 = True if not head_b_1: head_a_2 = False head_b_2 = True if head_a_1 and head_b_1: head_a_2 = head_a_1 head_b_2 = head_b_1 # Get midpoints. midpoint_1, midpoint_2 = sample_midpoints_arrows(size=size_1) # Get arrow points. points_top_1, points_bottom_1, points_line_1 = get_arrow_points(radius=size_1, rotation=rotation_1, shape_a=shape_a_1, shape_b=shape_b_1, center=midpoint_1, continuous=continuous) points_top_2, points_bottom_2, points_line_2 = get_arrow_points(radius=size_2, rotation=rotation_2, shape_a=shape_a_2, shape_b=shape_b_2, center=midpoint_2, continuous=continuous) # Reshape arrow points according to opencv format. poly_top_1 = np.array(points_top_1, dtype=np.int32) poly_bottom_1 = np.array(points_bottom_1, dtype=np.int32) poly_new_bottom_1 = poly_bottom_1.reshape((-1,1,2)) poly_new_top_1 = poly_top_1.reshape((-1,1,2)) poly_top_2 = np.array(points_top_2, dtype=np.int32) poly_bottom_2 = np.array(points_bottom_2, dtype=np.int32) poly_new_bottom_2 = poly_bottom_2.reshape((-1,1,2)) poly_new_top_2 = poly_top_2.reshape((-1,1,2)) # Draw! if head_a_1: cv2.polylines(img,[poly_new_top_1],isClosed=True,color=(0, 0, 0),thickness=line_width) if head_b_1: cv2.polylines(img,[poly_new_bottom_1],isClosed=True,color=(0, 0, 0),thickness=line_width) cv2.line(img, points_line_1[0], points_line_1[1], (0, 0, 0), thickness=line_width) if separated_chanels: if head_a_2: cv2.polylines(img2,[poly_new_top_2],isClosed=True,color=(0, 0, 0),thickness=line_width) if head_b_2: cv2.polylines(img2,[poly_new_bottom_2],isClosed=True,color=(0, 0, 0),thickness=line_width) cv2.line(img2, points_line_2[0], points_line_2[1], (0, 0, 0), thickness=line_width) else: if head_a_2: cv2.polylines(img,[poly_new_top_2],isClosed=True,color=(0, 0, 0),thickness=line_width) if head_b_2: cv2.polylines(img,[poly_new_bottom_2],isClosed=True,color=(0, 0, 0),thickness=line_width) cv2.line(img, points_line_2[0], points_line_2[1], (0, 0, 0), thickness=line_width) if separated_chanels: return img.astype('uint8'), img2.astype('uint8') else: return img.astype('uint8') # Simulation 5 datasets functions. def sample_midpoints_lines(sizes): """Samples midpoints to arrows if sizes 5, 7 or 9 into a 128x128 image.""" size_1, size_2 = sizes x_1 = random.sample(list(range(int(size_1/2)+2, 127-int(size_1/2+2))), 1)[0] y_1 = random.sample(list(range(int(size_1/2)+2, 127-int(size_1/2+2))), 1)[0] x_2 = random.sample(list(range(int(size_2/2)+2, 127-int(size_2/2+2))), 1)[0] y_2 = random.sample(list(range(int(size_2/2)+2, 127-int(size_2/2+2))), 1)[0] point_1 = (x_1, y_1) point_2 = (x_2, y_2) return point_1, point_2 def get_line_points(size, rotation, center): radius = size/2 angle_1 = math.radians(rotation) angle_2 = math.radians(rotation+180) x_1 = int(center[0] + int(radius * math.cos(angle_1))) y_1 = int(center[1] + int(radius * math.sin(angle_1))) x_2 = int(center[0] + int(radius * math.cos(angle_2))) y_2 = int(center[1] + int(radius * math.sin(angle_2))) return [(x_1, y_1), (x_2, y_2)] def make_straingt_lines_sd(category, var_factor, line_thickness=1, separated_chanels=False): # Background image. img = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img[:] = (255, 255, 255) if separated_chanels: img2 = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img2[:] = (255, 255, 255) # Sample sizes. sizes = random.sample(list(range(16, 64, 4)), 2) size_1 = sizes[0] size_2 = sizes[1] # Sample rotations. rotations = random.sample([0, 45, 90, 135], 2) rotation_1 = rotations[0] rotation_2 = rotations[1] # Assign size_2 and rotation_2 based on category and the variation factor. if category==1: size_2 = size_1 rotation_2 = rotation_1 if category==0: size_2 = size_1 if var_factor=='rotation' else size_2 rotation_2 = rotation_1 if var_factor=='size' else rotation_2 # Get midpoints. midpoint_1, midpoint_2 = sample_midpoints_lines(sizes=(size_1, size_2)) # Get arrow points. points_line_1 = get_line_points(size=size_1, rotation=rotation_1, center=midpoint_1) points_line_2 = get_line_points(size=size_2, rotation=rotation_2, center=midpoint_2) # Draw! cv2.line(img, points_line_1[0], points_line_1[1], (0, 0, 0), thickness=line_thickness) if separated_chanels: cv2.line(img2, points_line_2[0], points_line_2[1], (0, 0, 0), thickness=line_thickness) else: cv2.line(img, points_line_2[0], points_line_2[1], (0, 0, 0), thickness=line_thickness) if separated_chanels: return img.astype('uint8'), img2.astype('uint8') else: return img.astype('uint8') def make_rectangles_sd(category, line_thickness=1, separated_chanels=False): # Background image. img = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img[:] = (255, 255, 255) if separated_chanels: img2 = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img2[:] = (255, 255, 255) # Sample constant dimension. const_dim = 'x' if random.random() > 0.5 else 'y' # Sample sizes. if const_dim == 'y': sizes_x = random.sample(list(range(16, 64, 4)), 2) size_x_1 = sizes_x[0] size_x_2 = sizes_x[1] size_y_1 = random.sample(list(range(16, 64, 4)), 1)[0] size_y_2 = size_y_1 # Assign size_x_2 based on category. if category==1: size_x_2 = size_x_1 elif const_dim == 'x': sizes_y = random.sample(list(range(16, 64, 4)), 2) size_y_1 = sizes_y[0] size_y_2 = sizes_y[1] size_x_1 = random.sample(list(range(16, 64, 4)), 1)[0] size_x_2 = size_x_1 # Assign size_y_2 based on category. if category==1: size_y_2 = size_y_1 # Sample start and end points. x_1 = random.sample(list(range(2, 127-(size_x_1+2))), 1)[0] y_1 = random.sample(list(range(2, 127-(size_y_1+2))), 1)[0] x_2 = random.sample(list(range(2, 127-(size_x_2+2))), 1)[0] y_2 = random.sample(list(range(2, 127-(size_y_2+2))), 1)[0] start_point_1 = (x_1, y_1) start_point_2 = (x_2, y_2) end_point_1 = (x_1+size_x_1, y_1+size_y_1) end_point_2 = (x_2+size_x_2, y_2+size_y_2) # Draw squares. img = cv2.rectangle(img, start_point_1, end_point_1, (0, 0, 0), 1) if separated_chanels: img2 = cv2.rectangle(img2, start_point_2, end_point_2, (0, 0, 0), 1) else: img = cv2.rectangle(img, start_point_2, end_point_2, (0, 0, 0), 1) if separated_chanels: return img.astype('uint8'), img2.astype('uint8') else: return img.astype('uint8') def make_connected_open_squares(category, line_width=1, is_closed=False, separated_chanels=False): # Background image. img = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img[:] = (255, 255, 255) if separated_chanels: img2 = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img2[:] = (255, 255, 255) # Sample segment size. size = random.randint(10, 24) # Define figure points. points_a = [ [0, size], [0, 0], [size, 0], [size, size], [size, 2*size], [2*size, 2*size], [2*size, size] ] points_b = [ [0, size], [0, 2*size], [size, 2*size], [size, size], [size, 0], [2*size, 0], [2*size, size] ] # Assign points based on category. if category==1: points_b = points_a # Sample translations and apply. translation_a = [np.random.randint(1, 127-size*2), np.random.randint(1, 127-size*2)] translation_b = [np.random.randint(1, 127-size*2), np.random.randint(1, 127-size*2)] points_a = [[sum(pair) for pair in zip(point, translation_a)] for point in points_a] points_b = [[sum(pair) for pair in zip(point, translation_b)] for point in points_b] # Assigning sides to polygon poly_a = np.array(points_a,dtype=np.int32) poly_b = np.array(points_b,dtype=np.int32) # Reshaping according to opencv format poly_new_a = poly_a.reshape((-1,1,2)) poly_new_b = poly_b.reshape((-1,1,2)) # Draw. cv2.polylines(img,[poly_new_a],isClosed=is_closed,color=(0, 0, 0),thickness=line_width) if separated_chanels: cv2.polylines(img2,[poly_new_b],isClosed=is_closed,color=(0, 0, 0),thickness=line_width) else: cv2.polylines(img,[poly_new_b],isClosed=is_closed,color=(0, 0, 0),thickness=line_width) if separated_chanels: return img, img2 else: return img def make_connected_circles(category, line_width=1, separated_chanels=False): # Background image. img = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img[:] = (255, 255, 255) if separated_chanels: img2 = np.zeros(shape=(128, 128, 3), dtype=np.uint8) img2[:] = (255, 255, 255) # Get small and big radious. radii = random.sample(list(range(6, 20, 4)), 2) radii.sort() radius_small = radii[0] radius_big = radii[1] # Sample ordering of circes: small-big vs big-small. order_1 = 'sb' if random.random() < 0.5 else 'bs' order_2 = 'bs' if order_1 == 'sb' else 'sb' # Assign radii based on order. radius_1_a = radius_small if order_1 == 'sb' else radius_big radius_1_b = radius_big if order_1 == 'sb' else radius_small radius_2_a = radius_small if order_2 == 'sb' else radius_big radius_2_b = radius_big if order_2 == 'sb' else radius_small # Assign radius_big based on category. if category==1: radius_2_a = radius_1_a radius_2_b = radius_1_b # Sample midpoints. mpdt_1 = ( np.random.randint(radius_big+2, 126-radius_big), np.random.randint(radius_1_a+radius_1_b+2, 126-(radius_1_a+radius_1_b)) ) mpdt_2 = ( np.random.randint(radius_big+2, 127-radius_big), np.random.randint(radius_2_a+radius_2_b+2, 126-(radius_2_a+radius_2_b)) ) mdpt_1_a = (mpdt_1[0], mpdt_1[1]-radius_1_b) mdpt_1_b = (mpdt_1[0], mpdt_1[1]+radius_1_a) mdpt_2_a = (mpdt_2[0], mpdt_2[1]-radius_2_b) mdpt_2_b = (mpdt_2[0], mpdt_2[1]+radius_2_a) # Draw circles. img = cv2.circle(img, mdpt_1_a, radius_1_a, (0, 0, 0), 1) img = cv2.circle(img, mdpt_1_b, radius_1_b, (0, 0, 0), 1) if separated_chanels: img2 = cv2.circle(img2, mdpt_2_a, radius_2_a, (0, 0, 0), 1) img2 = cv2.circle(img2, mdpt_2_b, radius_2_b, (0, 0, 0), 1) else: img = cv2.circle(img, mdpt_2_a, radius_2_a, (0, 0, 0), 1) img = cv2.circle(img, mdpt_2_b, radius_2_b, (0, 0, 0), 1) if separated_chanels: return img, img2 else: return img # Relative position functions. def compare_xy(point_1, point_2): # Is the lower object to the right of the upper object? lower_obj = point_1 if point_1[1] >= point_2[1] else point_2 upper_obj = point_1 if lower_obj is point_2 else point_2 comparison = 1 if lower_obj[0] >= upper_obj[0] else 0 return comparison def get_shapes_info(img, scrambled_negative=False, draw_b_rects=False): """Returns info for multi-task learning. Obj1 and obj2 are determined by a biased distance from the origin (d = 1.1*x^2 + y^2). Args: img: image (np.array) scrambled_negative: whether img is a negative example from the scrambled condition. Returns: coor1: x, y coordinate of the center of obj1. coor2: x, y coordinate of the center of obj2. relative_position: whether the lower object is to the right of the upper object. flag: True if the objects are touching. Used to discard the sample.""" ## Get coordinates. image = img.copy() gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] # Find contours, obtain bounding box. cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) cnts = cnts[0] if len(cnts) == 2 else cnts[1] # this is to deal with different opencv versions. # Get bounding boxes. b_rects = [] for c in cnts: b_rects.append(cv2.boundingRect(c)) if scrambled_negative: # Get the bigest bounding box. new_b_rects = [] bigest = max(enumerate(b_rects),key=lambda x: x[1][2]*x[1][3])[1] new_b_rects.append(bigest) # Get all smaller bounding boxes. smaller_b_rects = [x for x in b_rects if x not in new_b_rects] # Build second bounding box enclosing all smaller bounding boxes. if len(smaller_b_rects) > 0: min_x = min(enumerate(smaller_b_rects),key=lambda x: x[1][0])[1][0] min_y = min(enumerate(smaller_b_rects),key=lambda x: x[1][1])[1][1] max_x = max(enumerate(smaller_b_rects),key=lambda x: x[1][0]+x[1][2])[1] max_y = max(enumerate(smaller_b_rects),key=lambda x: x[1][1]+x[1][3])[1] max_x = max_x[0] + max_x[2] max_y = max_y[1] + max_y[3] w = max_x - min_x h = max_y - min_y new_b_rect = (min_x, min_y, w, h) new_b_rects.append(new_b_rect) b_rects = new_b_rects # If there are none smaller rects, set b_rects to a list of 3 bounding rects, # such that it activates the bad sample flag else: b_rects = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]] # Define obj1 and obj2. b_rect_1 = min(enumerate(b_rects),key=lambda x: 1.1*x[1][0]**2 + x[1][1]**2)[1] b_rect_2 = max(enumerate(b_rects),key=lambda x: 1.1*x[1][0]**2 + x[1][1]**2)[1] mp_1 = np.array([b_rect_1[0]+b_rect_1[2]/2, b_rect_1[1]+b_rect_1[3]/2]) mp_2 = np.array([b_rect_2[0]+b_rect_2[2]/2, b_rect_2[1]+b_rect_2[3]/2]) # Get relations. relative_position = compare_xy(mp_1, mp_2) # Get flag. flag = False if len(b_rects) == 2 else True if draw_b_rects: # Draw bounding rects. for b in b_rects: x,y,w,h = b cv2.rectangle(img, (x, y), (x + w, y + h), (36,255,12), 1) # Draw midpoints. cv2.line(img, (int(mp_1[0]), int(mp_1[1])), (int(mp_1[0]), int(mp_1[1])), (255,0,0), 2) cv2.line(img, (int(mp_2[0]), int(mp_2[1])), (int(mp_2[0]), int(mp_2[1])), (255,0,0), 2) # show image. # plt.imshow(img) # plt.show() return np.concatenate((mp_1, mp_2), axis=0), relative_position, flag # Define data generators. def irregular_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = svrt_1_img(category=y, regular=False, color_a=(0,0,0), sides=None, thickness=1) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def regular_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = svrt_1_img(category=y, regular=True, color_a=(0,0,0), sides=None, thickness=1) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def open_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = svrt_1_img(category=y, regular=False, color_a=(0,0,0), sides=None, thickness=1, closed=False) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def wider_line_gen(batch_size=100, thickness=2, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = svrt_1_img(category=y, regular=False, color_a=(0,0,0), sides=None, thickness=thickness) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def scrambled_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] scrambled_negative = True if y == 0 else False x = svrt_1_img(category=y, regular=True, color_a=(0,0,0), sides=None, thickness=1, displace_vertices=True) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=scrambled_negative, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def random_color_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: color = tuple(np.random.randint(256, size=3)) color = (int(color[0]), int(color[1]), int(color[2])) y = labels[i] x = svrt_1_img(category=y, regular=False, color_a=color, sides=None, thickness=1) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def filled_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = svrt_1_img(category=y, regular=False, color_a=(0,0,0), sides=None, thickness=1, filled=True) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def lines_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = make_connected_open_squares(category=y, line_width=1) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def arrows_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = make_arrows_sd(y, continuous=True, line_width=1, hard_test=True) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def rectangles_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = make_rectangles_sd(category=y, line_thickness=1) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def straight_lines_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = make_straingt_lines_sd(y, var_factor='size', line_thickness=1) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def connected_squares_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = make_connected_open_squares(category=y, line_width=1, is_closed=True) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] def connected_circles_gen(batch_size=100, category_type='both'): if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs = [] sd_labels = [] coordinates = [] rel_positions = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == 'different': labels = [0] * batch_size elif category_type == 'same': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', 'different', or 'same'!") i = 0 while True: y = labels[i] x = make_connected_circles(category=y, line_width=1) coors, rel_pos, bad_sample = get_shapes_info(x, scrambled_negative=False, draw_b_rects=False) if bad_sample: pass else: inputs.append(x) sd_labels.append(y) coordinates.append(coors) rel_positions.append(rel_pos) i += 1 if i == batch_size: break yield np.array(inputs), [np.array(sd_labels), np.array(coordinates), np.array(rel_positions)] # Define two-chanels generators. def irregular_two_chanels_gen(batch_size=64, category_type='both'): """Generatot for problem 1, test 1 (irregular polygon). Test 1: shape=irregular, color=black, thickness=1, inverted=not.""" if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = svrt_1_img(category=y, regular=False, color_a=(0,0,0), sides=None, thickness=1, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def regular_two_chanels_gen(batch_size=64, category_type='both'): """Generatot for problem 1, test 2 (regular polygon). Test 2: shape=regular, color=black, thickness=1, inverted=not.""" if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = svrt_1_img(category=y, regular=True, color_a=(0,0,0), sides=None, thickness=1, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def open_two_chanels_gen(batch_size=64, category_type='both'): """Generator for problem 1, test 8 (open). Test 1: shape=irregular, color=black, thickness=1, inverted=not, closed=not.""" if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = svrt_1_img( category=y, regular=False, thickness=1, closed=False, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def wider_two_chanels_gen(thickness=2, batch_size=64, category_type='both'): """Generatot for problem 1, test 4 (wider line). Test 4: shape=irregular, color=black, thickness=[2, 3, 4, 5], inverted=not.""" if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = svrt_1_img(category=y, regular=False, thickness=thickness, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def scrambled_two_chanels_gen(batch_size=64, category_type='both'): """Generator for problem 1, test 9 (Scrambled). Test 1: shape=rregular, color=black, thickness=1, inverted=not, displace_vertices=True.""" if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = svrt_1_img(category=y, regular=True, thickness=1, displace_vertices=True, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def random_two_chanels_gen(batch_size=64, category_type='both'): """Generatot for problem 1, test 3. Test 3: shape=irregular, color=random, thickness=1, inverted=not.""" if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: color = tuple(np.random.randint(256, size=3)) color = (int(color[0]), int(color[1]), int(color[2])) y = labels[i] x1, x2 = svrt_1_img(category=y, regular=False, color_a=color, thickness=1, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def filled_two_chanels_gen(batch_size=64, category_type='both'): """Generator for problem 1, test 6 (filled). Test 1: shape=irregular, color=black, thickness=1, inverted=not, filled=True.""" if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = svrt_1_img(category=y, regular=False, thickness=1, filled=True, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def lines_two_chanels_gen(batch_size=64, category_type='both'): # Check that batch_size even if both categories are being generated. if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = make_connected_open_squares(category=y, line_width=1, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def arrows_two_chanels_gen(batch_size=64, category_type='both', continuous=True): # Check that batch_size even if both categories are being generated. if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = make_arrows_sd(y, continuous=continuous, line_width=1, hard_test=True, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def rectangles_two_chanels_gen(batch_size=64, category_type='both'): # Check that batch_size even if both categories are being generated. if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = make_rectangles_sd(category=y, line_thickness=1, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def straingt_lines_two_chanels_gen(batch_size=64, category_type='both', var_factor='size'): # Check that batch_size even if both categories are being generated. if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = make_straingt_lines_sd(y, var_factor=var_factor, line_thickness=1, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def connected_squares_two_chanels_gen(batch_size=64, category_type='both'): # Check that batch_size even if both categories are being generated. if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = make_connected_open_squares(category=y, line_width=1, is_closed=True, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) def connected_circles_two_chanels_gen(batch_size=64, category_type='both'): # Check that batch_size even if both categories are being generated. if category_type == 'both': try: assert batch_size % 2 == 0 except: print("batch_size should be an even number!") else: half_batch = int(batch_size/2) while True: inputs1 = [] inputs2 = [] sd_labels = [] if category_type == 'both': labels = [0] * half_batch + [1] * half_batch random.shuffle(labels) elif category_type == '0': labels = [0] * batch_size elif category_type == '1': labels = [1] * batch_size else: raise ValueError("category_type should be 'both', '0', or '1'!") i = 0 while True: y = labels[i] x1, x2 = make_connected_circles(category=y, line_width=1, separated_chanels=True) inputs1.append(x1) inputs2.append(x2) sd_labels.append(y) i += 1 if i == batch_size: break yield [np.array(inputs1), np.array(inputs2)], np.array(sd_labels) # Helper tensorflow records functions. def _bytes_feature(value): """Returns a bytes_list from a string / byte.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) # Dataset saving functions. def save_dataset(data_generator, size, tfrecord_file_name): with tf.io.TFRecordWriter(tfrecord_file_name) as writer: for i in range(size): # print('i:', i) # Generate data. xs, ys = next(data_generator) for j in range(xs.shape[0]): # Get data. img = xs[j] label = ys[0][j] coors = ys[1][j] serialized_coors = serialize_array(coors) rel_pos = ys[2][j] # Save image to bytes. pil_image=Image.fromarray(img) buf = io.BytesIO() pil_image.save(buf, format='PNG') byte_im = buf.getvalue() # Parse example. feature = { 'image_raw': _bytes_feature(byte_im), 'label': _int64_feature(label), 'coordinates': _bytes_feature(serialized_coors), 'relative_position': _int64_feature(rel_pos)} example = tf.train.Example(features=tf.train.Features(feature=feature)) # Write example. writer.write(example.SerializeToString()) return def save_dataset_two_chanels(data_generator, size, tfrecord_file_name): with tf.io.TFRecordWriter(tfrecord_file_name) as writer: for i in range(size): # print('i:', i) # Generate data. xs, ys = next(data_generator) for j in range(xs[0].shape[0]): # Get data. img1 = xs[0][j] img2 = xs[1][j] label = ys[j] # Save image1 to bytes. pil_image1=Image.fromarray(img1) buf1 = io.BytesIO() pil_image1.save(buf1, format='PNG') byte_im1 = buf1.getvalue() # Save image2 to bytes. pil_image2=Image.fromarray(img2) buf2 = io.BytesIO() pil_image2.save(buf2, format='PNG') byte_im2 = buf2.getvalue() # Parse example. feature = { 'image1_raw': _bytes_feature(byte_im1), 'image2_raw': _bytes_feature(byte_im2), 'label': _int64_feature(label)} example = tf.train.Example(features=tf.train.Features(feature=feature)) # Write example. writer.write(example.SerializeToString()) def make_datasets(train_generators, train_data_names, test_generators, test_data_names, train_size, test_size): for gen, name in zip(train_generators, train_data_names): save_dataset(data_generator=gen, size=train_size, tfrecord_file_name=name) for gen, name in zip(test_generators, test_data_names): save_dataset(data_generator=gen, size=test_size, tfrecord_file_name=name) return def make_datasets_two_chanels(train_generators, train_data_names, test_generators, test_data_names, train_size, test_size): for gen, name in zip(train_generators, train_data_names): save_dataset_two_chanels(data_generator=gen, size=train_size, tfrecord_file_name=name) for gen, name in zip(test_generators, test_data_names): save_dataset_two_chanels(data_generator=gen, size=test_size, tfrecord_file_name=name) return if __name__ == '__main__': # Dataset parameters. BATCH_SIZE = 100 TRAIN_SIZE = 28000 // BATCH_SIZE VAL_SIZE = 5600 // BATCH_SIZE TEST_SIZE = 11200 // BATCH_SIZE # Instantiate train/test generators. irregular_gen = irregular_gen(batch_size=BATCH_SIZE) regular_gen = regular_gen(batch_size=BATCH_SIZE) open_gen = open_gen(batch_size=BATCH_SIZE) wider_line_gen = wider_line_gen(batch_size=BATCH_SIZE) scrambled_gen = scrambled_gen(batch_size=BATCH_SIZE) random_color_gen = random_color_gen(batch_size=BATCH_SIZE) filled_gen = filled_gen(batch_size=BATCH_SIZE) lines_gen = lines_gen(batch_size=BATCH_SIZE) arrows_gen = arrows_gen(batch_size=BATCH_SIZE) # Instantiate test only generators. rectangles_gen = rectangles_gen(batch_size=BATCH_SIZE) straight_lines_gen = straight_lines_gen(batch_size=BATCH_SIZE) connected_squares_gen = connected_squares_gen(batch_size=BATCH_SIZE) connected_circles_gen = connected_circles_gen(batch_size=BATCH_SIZE) # Instantiate double chanel train/test generators. irregular_two_chanels_gen = irregular_two_chanels_gen(batch_size=BATCH_SIZE) regular_two_chanels_gen = regular_two_chanels_gen(batch_size=BATCH_SIZE) open_two_chanels_gen = open_two_chanels_gen(batch_size=BATCH_SIZE) wider_line_two_chanels_gen = wider_two_chanels_gen(batch_size=BATCH_SIZE) scrambled_two_chanels_gen = scrambled_two_chanels_gen(batch_size=BATCH_SIZE) random_color_two_chanels_gen = random_two_chanels_gen(batch_size=BATCH_SIZE) filled_two_chanels_gen = filled_two_chanels_gen(batch_size=BATCH_SIZE) lines_two_chanels_gen = lines_two_chanels_gen(batch_size=BATCH_SIZE) arrows_two_chanels_gen = arrows_two_chanels_gen(batch_size=BATCH_SIZE) # Instantiate double chanel test only generators. rectangles_two_chanels_gen = rectangles_two_chanels_gen(batch_size=BATCH_SIZE) straight_lines_two_chanels_gen = straingt_lines_two_chanels_gen(batch_size=BATCH_SIZE) connected_squares_two_chanels_gen = connected_squares_two_chanels_gen(batch_size=BATCH_SIZE) connected_circles_two_chanels_gen = connected_circles_two_chanels_gen(batch_size=BATCH_SIZE) train_dataset_generators = [ irregular_gen, regular_gen, open_gen, wider_line_gen, scrambled_gen, random_color_gen, filled_gen, lines_gen, arrows_gen] train_dataset_names = [ 'data/irregular_train.tfrecords', 'data/regular_train.tfrecords', 'data/open_train.tfrecords', 'data/wider_line_train.tfrecords', 'data/scrambled_train.tfrecords', 'data/random_color_train.tfrecords', 'data/filled_train.tfrecords', 'data/lines_train.tfrecords', 'data/arrows_train.tfrecords'] test_dataset_generators = [ irregular_gen, regular_gen, open_gen, wider_line_gen, scrambled_gen, random_color_gen, filled_gen, lines_gen, arrows_gen, rectangles_gen, straight_lines_gen, connected_squares_gen, connected_circles_gen] test_dataset_names = [ 'data/irregular_test.tfrecords', 'data/regular_test.tfrecords', 'data/open_test.tfrecords', 'data/wider_line_test.tfrecords', 'data/scrambled_test.tfrecords', 'data/random_color_test.tfrecords', 'data/filled_test.tfrecords', 'data/lines_test.tfrecords', 'data/arrows_test.tfrecords', 'data/rectangles_test.tfrecords', 'data/straight_lines_test.tfrecords', 'data/connected_squares_test.tfrecords', 'data/connected_circles_test.tfrecords'] # Save single chanel datasets. make_datasets( train_generators=train_dataset_generators, train_data_names=train_dataset_names, test_generators=test_dataset_generators, test_data_names=test_dataset_names, train_size=TRAIN_SIZE, test_size=TEST_SIZE) two_chanels_train_dataset_generators = [ irregular_two_chanels_gen, regular_two_chanels_gen, open_two_chanels_gen, wider_line_two_chanels_gen, scrambled_two_chanels_gen, random_color_two_chanels_gen, filled_two_chanels_gen, lines_two_chanels_gen, arrows_two_chanels_gen] two_chanels_train_dataset_names = [ 'data/irregular_two_chanels_train.tfrecords', 'data/regular_two_chanels_train.tfrecords', 'data/open_two_chanels_train.tfrecords', 'data/wider_line_two_chanels_train.tfrecords', 'data/scrambled_two_chanels_train.tfrecords', 'data/random_color_two_chanels_train.tfrecords', 'data/filled_two_chanels_train.tfrecords', 'data/lines_two_chanels_train.tfrecords', 'data/arrows_two_chanels_train.tfrecords'] two_chanels_test_dataset_generators = [ irregular_two_chanels_gen, regular_two_chanels_gen, open_two_chanels_gen, wider_line_two_chanels_gen, scrambled_two_chanels_gen, random_color_two_chanels_gen, filled_two_chanels_gen, lines_two_chanels_gen, arrows_two_chanels_gen, rectangles_two_chanels_gen, straight_lines_two_chanels_gen, connected_squares_two_chanels_gen, connected_circles_two_chanels_gen] two_chanels_test_dataset_names = [ 'data/irregular_two_chanels_test.tfrecords', 'data/regular_two_chanels_test.tfrecords', 'data/open_two_chanels_test.tfrecords', 'data/wider_line_two_chanels_test.tfrecords', 'data/scrambled_two_chanels_test.tfrecords', 'data/random_color_two_chanels_test.tfrecords', 'data/filled_two_chanels_test.tfrecords', 'data/lines_two_chanels_test.tfrecords', 'data/arrows_two_chanels_test.tfrecords', 'data/rectangles_two_chanels_test.tfrecords', 'data/straight_lines_two_chanels_test.tfrecords', 'data/connected_squares_two_chanels_test.tfrecords', 'data/connected_circles_two_chanels_test.tfrecords'] # Save double chanel datasets. make_datasets_two_chanels( train_generators=two_chanels_train_dataset_generators, train_data_names=two_chanels_train_dataset_names, test_generators=two_chanels_test_dataset_generators, test_data_names=two_chanels_test_dataset_names, train_size=TRAIN_SIZE, test_size=TEST_SIZE) print('datasets saved to: ~/data')
token = '264862990:AAEODOt9LfE3qRBOpOpiawq5iECp8yPPkuU' # Add Your Token is_sudo = '90516804.172561830' # add Your ID
from django.http import HttpResponse from rest_framework import generics from django.contrib.auth.models import User from rest_framework.response import Response from api import serializers from api.models import Post, Comment, Category from rest_framework import permissions from api.permissions import IsOwnerOrReadOnly from api.serializers import PostSerializer, CommentSerializer # https://blog.logrocket.com/use-django-rest-framework-to-build-a-blog/ class UserList(generics.ListAPIView): queryset = User.objects.all() serializer_class = serializers.UserSerializer class UserDetail(generics.RetrieveAPIView): queryset = User.objects.all() serializer_class = serializers.UserSerializer class PostList(generics.ListCreateAPIView): queryset = Post.objects.all() serializer_class = serializers.PostSerializer # permission_classes = [permissions.IsAuthenticatedOrReadOnly] # def create(self, validated_data): # choices = validated_data.pop('categories') # tags = validated_data.pop('tags') # question = Question.objects.create(**validated_data) # for choice in choices: # Choice.objects.create(**choice, question=question) # question.tags.set(tags) # return question def create(self, request, *args, **kwargs): post_data = request.data print(self.request.user) new_post = Post.objects.create(title=post_data['title'], body=post_data['body'], owner=self.request.user) # Get our categories category_data = post_data['categories'].split(',') for category in category_data: category = Category.objects.get(id=category) new_post.categories.add(category.id) new_post.save() serializer = PostSerializer(new_post) return Response(serializer.data) class PostDetail(generics.RetrieveUpdateDestroyAPIView): queryset = Post.objects.all() serializer_class = serializers.PostSerializer # permission_classes = [permissions.IsAuthenticatedOrReadOnly, # IsOwnerOrReadOnly] class CommentList(generics.ListCreateAPIView): queryset = Comment.objects.all() serializer_class = serializers.CommentSerializer # permission_classes = [permissions.IsAuthenticatedOrReadOnly] # def perform_create(self, serializer): # serializer.save(owner=self.request.user) def create(self, request, *args, **kwargs): comment_data = request.data new_comment = Comment.objects.create(body=comment_data['body'], post=Post.objects.get(id=comment_data['post']), owner=self.request.user) new_comment.save() serializer = CommentSerializer(new_comment) return Response(serializer.data) class CommentDetail(generics.RetrieveUpdateDestroyAPIView): queryset = Comment.objects.all() serializer_class = serializers.CommentSerializer permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly] class CategoryList(generics.ListCreateAPIView): queryset = Category.objects.all() serializer_class = serializers.CategorySerializer permission_classes = [permissions.IsAuthenticatedOrReadOnly] def perform_create(self, serializer): serializer.save(owner=self.request.user) class CategoryDetail(generics.RetrieveUpdateDestroyAPIView): queryset = Category.objects.all() serializer_class = serializers.CategorySerializer permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
# -*- coding: utf-8 -*- """ log_it.extensions ----------------- Flask Extensions used by Log-It :copyright: (c) 2020 by John te Bokkel :license: BSD, see LICENSE for more details """ from sqlalchemy import MetaData from flask_sqlalchemy import SQLAlchemy from flask_debugtoolbar import DebugToolbarExtension from flask_bootstrap import Bootstrap from flask_nav import Nav from flask_login import LoginManager from flask_principal import Principal from . import classful # noqa metadata = MetaData( naming_convention={ "ix": "idx_%(column_0_label)s", "uq": "uq_%(table_name)s_%(column_0_name)s", "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s", "pk": "pk_%(table_name)s", } ) db = SQLAlchemy(metadata=metadata) debugtoolbar = DebugToolbarExtension() bootstrap = Bootstrap() nav = Nav() login_manager = LoginManager() principal = Principal()
from re import search import requests from bs4 import BeautifulSoup as bs def _find_real_page(url): soup = bs(requests.get(url).text, "html.parser") real_url = [ m for m in [ search(r"(?:go\_to\"\:\")([^\s]+)(?:\"\}\;)", str(sc)) for sc in soup.find_all("script") ] if m ] if real_url: real_url = real_url[0].group(1).replace("\\", "") return real_url if real_url else url def get_suggestion_list(eurostreaming_url, search): soup = bs( requests.get( eurostreaming_url + "/?s=" + search.lower().replace(r"\s", "+") ).text, "html.parser", ) series = [ (serie.find("a").get("title"), _find_real_page(serie.find("a").get("href"))) for serie in soup.find_all("h2") if serie.find("a") ] return series
# Copyright 2020 The PyMC Developers # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import pytest import pymc as pm @pytest.mark.parametrize( "distribution, lower, upper, init_guess, fixed_params", [ (pm.Gamma, 0.1, 0.4, {"alpha": 1, "beta": 10}, {}), (pm.Normal, 155, 180, {"mu": 170, "sigma": 3}, {}), (pm.StudentT, 0.1, 0.4, {"mu": 10, "sigma": 3}, {"nu": 7}), (pm.StudentT, 0, 1, {"mu": 5, "sigma": 2, "nu": 7}, {}), # (pm.Exponential, 0, 1, {"lam": 1}, {}), PyMC Exponential gradient is failing miserably, # need to figure out why (pm.HalfNormal, 0, 1, {"sigma": 1}, {}), (pm.Binomial, 0, 8, {"p": 0.5}, {"n": 10}), (pm.Poisson, 1, 15, {"mu": 10}, {}), (pm.Poisson, 19, 41, {"mu": 30}, {}), ], ) @pytest.mark.parametrize("mass", [0.5, 0.75, 0.95]) def test_find_constrained_prior(distribution, lower, upper, init_guess, fixed_params, mass): with pytest.warns(None) as record: opt_params = pm.find_constrained_prior( distribution, lower=lower, upper=upper, mass=mass, init_guess=init_guess, fixed_params=fixed_params, ) assert len(record) == 0 opt_distribution = distribution.dist(**opt_params) mass_in_interval = ( pm.math.exp(pm.logcdf(opt_distribution, upper)) - pm.math.exp(pm.logcdf(opt_distribution, lower)) ).eval() assert np.abs(mass_in_interval - mass) <= 1e-5 @pytest.mark.parametrize( "distribution, lower, upper, init_guess, fixed_params", [ (pm.Gamma, 0.1, 0.4, {"alpha": 1}, {"beta": 10}), (pm.Exponential, 0.1, 1, {"lam": 1}, {}), (pm.Binomial, 0, 2, {"p": 0.8}, {"n": 10}), ], ) def test_find_constrained_prior_error_too_large( distribution, lower, upper, init_guess, fixed_params ): with pytest.warns(UserWarning, match="instead of the requested 95%"): pm.find_constrained_prior( distribution, lower=lower, upper=upper, mass=0.95, init_guess=init_guess, fixed_params=fixed_params, ) def test_find_constrained_prior_input_errors(): # missing param with pytest.raises(TypeError, match="required positional argument"): pm.find_constrained_prior( pm.StudentT, lower=0.1, upper=0.4, mass=0.95, init_guess={"mu": 170, "sigma": 3}, ) # mass too high with pytest.raises(AssertionError, match="has to be between 0.01 and 0.99"): pm.find_constrained_prior( pm.StudentT, lower=0.1, upper=0.4, mass=0.995, init_guess={"mu": 170, "sigma": 3}, fixed_params={"nu": 7}, ) # mass too low with pytest.raises(AssertionError, match="has to be between 0.01 and 0.99"): pm.find_constrained_prior( pm.StudentT, lower=0.1, upper=0.4, mass=0.005, init_guess={"mu": 170, "sigma": 3}, fixed_params={"nu": 7}, ) # non-scalar params with pytest.raises(NotImplementedError, match="does not work with non-scalar parameters yet"): pm.find_constrained_prior( pm.MvNormal, lower=0, upper=1, mass=0.95, init_guess={"mu": 5, "cov": np.asarray([[1, 0.2], [0.2, 1]])}, )
import os import math import numpy as np import matplotlib.pyplot as plt from simuran.plot.figure import SimuranFigure from neurochat.nc_utils import butter_filter from lfp_atn_simuran.analysis.lfp_clean import LFPClean def mne_plot(recording, base_dir, figures, **kwargs): method = kwargs.get("clean_method", "avg") method_kwargs = kwargs.get("clean_kwargs", {}) min_f = kwargs.get("fmin", 1) max_f = kwargs.get("fmax", 100) img_format = kwargs.get("image_format", "png") lc = LFPClean(method=method, visualise=True, show_vis=False) result = lc.clean(recording, min_f=min_f, max_f=max_f, method_kwargs=method_kwargs) fig = result["fig"] location = os.path.splitext(recording.source_file)[0] out_name = "--".join( os.path.dirname(location)[len(base_dir + os.sep) :].split(os.sep) ) figures.append(SimuranFigure(fig, out_name, dpi=100, format=img_format, done=True)) return result["bad_channels"]
#!python3 import os import sys import logging import json import shutil from util.validate import validate_init_params from util.downloaders import DownloadGenomeToFNA , DownloadFASTQs, \ GetGenomeOrganismName, download_mutantpool """ This function needs to do a number of things: 1. Validate Parameters and make sure they're all of the right type, etc. 2. Download genbank file and fastq file(s). 3. Create gene table file 4. Create config files for the function "RunFullProgram" """ def PrepareProgramInputs(params, cfg_d): """ Args: params: (d) As imported by spec file cfg_d: (d) gfu: GenomeFileUtil object tmp_dir: (s) path to tmp dir dfu: DataFileUtil object gt_obj: GeneTable Object model_dir: (s) Path to model's directory: (Should always be scratch_dir) ws: Workspace Object Adds the following keys: genome_fna_fp: (File path to the Genome FNA file) Description: This function needs to do a number of things: 1. Validate Parameters and make sure they're all of the right type, etc. 2. Download genbank file and fastq file(s). 3. Create gene table file 4. Create config files for the function "RunFullProgram" 5. Get the TnSeq Model (transposon model) 6. """ # validated params vp = validate_init_params(params) # Download genome in GBK format and convert it to fna: # gt stands for genome table genome_fna_fp = DownloadGenomeToFNA(cfg_d['gfu'], vp['genome_ref'], cfg_d['tmp_dir']) cfg_d['genome_fna_fp'] = genome_fna_fp genome_scientific_name = GetGenomeOrganismName(cfg_d['ws'], vp['genome_ref']) # Note that this gene table will be created at workdir/g2gt_results/genes.GC g2gt_results = cfg_d['gt_obj'].genome_to_genetable({'genome_ref': vp['genome_ref']}) logging.info(g2gt_results) genes_table_fp = os.path.join(cfg_d['tmp_dir'], 'g2gt_results', 'genes.GC') pool_fp = os.path.join(cfg_d["tmp_dir"], 'pool.n10') download_mutantpool(vp["mutantpool_ref"], pool_fp, cfg_d["dfu"], genome_ref=vp['genome_ref']) ''' model_str, past_end_str = get_model_and_pastEnd_strs(vp['tnseq_model_name']) model_fp = os.path.join(cfg_d['model_dir'], vp['tnseq_model_name'] + '.txt') write_model_to_file(model_fp, model_str, past_end_str) cfg_d["model_fp"] = model_fp ''' #genome_fna_fp, genes_table_fp, poolfile_fp, genome_name = res return [genome_fna_fp, genes_table_fp, pool_fp, genome_scientific_name] def PrepareUserOutputs(workspace_name, dfu, op_HTML_dir): """ """ ''' # We make a directory containing the resultant files res_dir = os.path.join(cfg_d['tmp_dir'], "results") os.mkdir(res_dir) # We move files to this directory (mutant pool & gene table) shutil.copy(cfg_d['pool_fp'], res_dir) shutil.copy(cfg_d["gene_table_fp"], res_dir) # Move the Map TnSeq Tables to the output directory shutil.copytree(cfg_d["mts_tables_dir"], os.path.join(res_dir, "MTS_Tables")) # Returning file in zipped format:------------------------------- file_zip_shock_id = cfg_d['dfu'].file_to_shock({'file_path': res_dir, 'pack': 'zip'})['shock_id'] dir_link = { 'shock_id': file_zip_shock_id, 'name': 'results.zip', 'label':'map_tnseq_output_dir', 'description': 'The directory of outputs from running' \ + ' Map TnSeq and Design Random Pool' } ''' # Preparing HTML output HTML_report_shock_id = dfu.file_to_shock({ "file_path": op_HTML_dir, "pack": "zip" })['shock_id'] HTML_report_d_l = [{"shock_id": HTML_report_shock_id, "name": os.path.basename(os.path.join(op_HTML_dir,"FullDisplay_index.html")), "label": "MutantReport", "description": "HTML Summary Report for MapTnSeq and Design Random Pool app" }] report_params = { 'workspace_name' : workspace_name, "html_links": HTML_report_d_l, "direct_html_link_index": 0, "html_window_height": 333, "message": "Finished Running MapTnSeq" } return report_params # MTS - Map Tn Seq, DRP - Design Random Pool def Create_MTS_DRP_config(cfg_d, vp): """ Args: cfg_d: (as above in PrepareProgramInputs) Plus: fastq_fp_l: (list<s>) List of file paths genome_fna_fp: (File path to the Genome FNA file) vp: (d) must contain all used cases below Returns: [MTS_cfg, DRP_cfg] """ # Here we create the config dicts map_tnseq_config_dict = { "values": { "debug": False, "keepblat8": True, "keepTMPfna": True, "flanking": 5, "wobbleAllowed": 2, "tmp_dir": cfg_d["tmp_dir"], "tileSize": 11, "stepSize": 11, "blatcmd": cfg_d["blat_cmd"], "model_fp": cfg_d["model_fp"], "maxReads": vp["maxReads"], "minQuality": vp["minQuality"], "minIdentity": vp["minIdentity"], "minScore": vp["minScore"], "delta": vp["delta"], "fastq_fp_list": cfg_d['fastq_fp_l'], "orig_fq_fns": cfg_d['orig_fq_fns'], "genome_fp": cfg_d['genome_fna_fp'] } } """ "unmapped_fp": cfg_d["unmapped_fp"], "tmpFNA_fp": cfg_d["tmpFNA_fp"], "trunc_fp": cfg_d["trunc_fp"], "endFNA_fp": cfg_d["endFNA_fp"], """ design_random_pool_config_dict = { "values": { "minN": vp["minN"], "minFrac": vp["minFrac"], "minRatio": vp["minRatio"], "maxQBeg": vp["maxQBeg"], "tmp_dir": cfg_d["tmp_dir"], "R_fp": cfg_d["R_fp"], "R_op_fp": cfg_d["R_op_fp"], "genes_table_fp": cfg_d["gene_table_fp"] } } return [map_tnseq_config_dict, design_random_pool_config_dict] def get_model_and_pastEnd_strs(standard_model_name): """ Args: standard_model_name (str): Name of the model. Should be one of below. Description: In this function we get the two parts of the model- The model string, which is the part of the transposon in which the barcode sits. And the past end string, which is after the transposon. Returns: model_str, past_end_str """ if standard_model_name == "Sc_Tn5": model_str = "nnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACCAGCAGCTATGACATGAAGATGTGTATAAGAGACAG" past_end_str = "GGAAGGGCCCGACGTCGCATGCTCCCGGCCGCCATGGCGGCCGCGGGAATTCGATTGGGCCCAGGTACCAACTACGTCAGGTGGCACTTT" elif standard_model_name == "ezTn5_Tet_Bifido": model_str = "nnnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCACCTCGACAGATGTGTATAAGAGACAG" past_end_str = "" elif standard_model_name == "ezTn5_kan1": model_str = "nnnnnnCTAAGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACAGATGTGTATAAGAGACAG" past_end_str = "" elif standard_model_name == "ezTn5_kanU": model_str = "nnnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACAGATGTGTATAAGAGACAG" past_end_str = "" elif standard_model_name == "magic_Tn5": model_str = 'nnnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACCAGCGGCCGGCCGGTTGAGATGTGTATAAGAGACAG' past_end_str = 'TCGACGGCTTGGTTTCATAAGCCATCCGCTTGCCCTCATCTGTTACGCCGGCGGTAGCCGGCCAGCCTCGCAGAGCAGGATTCCCGTTGA' elif standard_model_name == "magic_mariner": model_str = 'nnnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACCAGCGGCCGGCCAGACCGGGGACTTATCAGCCAACCTGT' past_end_str = 'TATGTGTTGGGTAACGCCAGGGTTTTCCCAGTCACGACGTTGTAAAACGACGGCCAGTGAATTAATTCTTGCTTATCGGCCAGCCTCGCAGAGCAGGATTCCCGTTGAGCACCGCCAGGTGCGAATAAGGGACAGTGAAGAAG' elif standard_model_name == "magic_mariner.2": model_str = 'nnnnnnnnnnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACCAGCGGCCGGCCAGACCGGGGACTTATCAGCCAACCTGT' past_end_str = 'TATGTGTTGGGTAACGCCAGGGTTTTCCCAGTCACGACGTTGTAAAACGACGGCCAGTGAATTAATTCTTGCTTATCGGCCAGCCTCGCAGAGCAGGATTCCCGTTGAGCACCGCCAGGTGCGAATAAGGGACAGTGAAGAAG' elif standard_model_name == "pHIMAR_kan": model_str = 'nnnnnnCGCCCTGCAGGGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACGGCCGGCCAGACCGGGGACTTATCAGCCAACCTGT' past_end_str = 'TATGTGTTGGGTAACGCCAGGGTTTTCCCAGTCACGACGTTGTAAAACGACGGCCAGTGAATTAATTCTTGAAGA' elif standard_model_name == "pKMW3": model_str = 'nnnnnnCGCCCTGCAGGGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACGGCCGGCCAGACCGGGGACTTATCAGCCAACCTGT' past_end_str = 'TATGTGTTGGGTAACGCCAGGGTTTTCCCAGTCACGACGTTGTAAAACGACGGCCAGTGAATTAATTCTTGAAGA' elif standard_model_name == "pKMW3_universal": model_str = 'nnnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACGGCCGGCCAGACCGGGGACTTATCAGCCAACCTGT' past_end_str = 'TATGTGTTGGGTAACGCCAGGGTTTTCCCAGTCACGACGTTGTAAAACGACGGCCAGTGAATTAATTCTTGAAGA' elif standard_model_name == "pKMW7": model_str = 'nnnnnnCGCCCTGCAGGGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACGGCCGGCCGGTTGAGATGTGTATAAGAGACAG' past_end_str = 'TCGACGGCTTGGTTTCATCAGCCATCCGCTTGCCCTCATCTGTTACGCCGGCGGTAGCCGGCCAGCCTCGCAGAGC' elif standard_model_name == "pKMW7_U": model_str = 'nnnnnnGATGTCCACGAGGTCTCTNNNNNNNNNNNNNNNNNNNNCGTACGCTGCAGGTCGACGGCCGGCCGGTTGAGATGTGTATAAGAGACAG' past_end_str = 'TCGACGGCTTGGTTTCATCAGCCATCCGCTTGCCCTCATCTGTTACGCCGGCGGTAGCCGGCCAGCCTCGCAGAGC' else: raise Exception(f"Model name {standard_model_name} not recognized.") logging.info(f"Model String: '{model_str}'." f" Past End String: '{past_end_str}'.") return model_str, past_end_str def write_model_to_file(op_fp, model_str, past_end_str): """ Args: op_fp (str): Path to output model model_str (str): Complete model string past_end_str (str): Complete past End str (short sequence of vector after transposon) Description: We take the two components of the model and write out to a file. """ with open(op_fp, "w") as g: g.write(model_str + "\n" + past_end_str) logging.info(f"Wrote TnSeq model to file {op_fp}.") def clear_dir(dir_path): for filename in os.listdir(dir_path): file_path = os.path.join(dir_path, filename) try: if os.path.isfile(file_path) or os.path.islink(file_path): os.unlink(file_path) elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: print('Failed to delete %s. Reason: %s' % (file_path, e)) def test(): return None def main(): test() return None if __name__ == "__main__": main()
from shutil import rmtree from tarfile import open as tar_open from os.path import join from django.conf import settings from django.contrib import admin, messages from django.core.exceptions import ValidationError from django import forms from django.core.management import call_command from django.urls import reverse_lazy from django.views.generic import FormView, TemplateView class UploadForm(forms.Form): file = forms.FileField() def process(self): # Delete directory if present, then extract new archive rmtree(join(settings.VUE_ROOT, 'dist'), ignore_errors=True) with tar_open(fileobj=self.cleaned_data['file'].file, mode='r:gz') as archive: archive.extractall(settings.VUE_ROOT) # Extract index.html to templates dir archive.extract(archive.getmember('dist/index.html'), settings.TEMPLATES[0]['DIRS'][0]) archive.close() call_command('collectstatic', verbosity=0, interactive=False) def clean_file(self): if not self.cleaned_data['file'].content_type == 'application/gzip': raise ValidationError("Not a valid file") return self.cleaned_data['file'] class FrontendUpdateView(FormView): form_class = UploadForm template_name = 'frontend_update.html' success_url = reverse_lazy('admin:index') def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context.update({ 'title': 'Frontend Upload', 'site_title': admin.site.site_title, 'site_header': admin.site.site_header, 'site_url': admin.site.site_url, 'has_permission': admin.site.has_permission(self.request), 'is_popup': False, }) return context def form_valid(self, form): form.process() messages.add_message(self.request, messages.INFO, 'The frontend code deployment has started') return super().form_valid(form) class VueView(TemplateView): template_name = 'dist/index.html'
import os, stat import subprocess from odoo_env.__init__ import __version__ from odoo_env.messages import Msg from odoo_env.odoo_conf import OdooConf msg = Msg() class Command: def __init__(self, parent, command=False, usr_msg=False, args=False, client_name=False): """ :param parent: El objeto OdooEnv que lo contiene por los parametros :param command: El comando a ejecutar en el shell :param usr_msg: El mensaje a mostrarle al usuario :param args: Argumentos para chequear, define si se ejecuta o no :return: El objeto Comando que se ejecutara luego """ self._parent = parent self._command = command self._usr_msg = usr_msg self._args = args self._client_name = client_name def check(self): # si no tiene argumentos para chequear no requiere chequeo, # lo dejamos pasar if not self._args: return True # le pasamos el chequeo al objeto especifico return self.check_args() def check_args(self): raise NotImplementedError def execute(self): cmd = self.command self.subrpocess_call(cmd) def subrpocess_call(self, params, shell=True): """ Run command or command list with arguments. Wait for commands to complete If args.verbose is true, prints command If any errors stop list execution and returns error if shell=True go shell mode (only for --cron-jobs) :param params: command or command list :return: error return """ # if not a list convert to a one element list params = params if isinstance(params, list) else [params] # traverse list executing shell commands for _cmd in params: # if shell = True we do no split cmd = _cmd if shell else _cmd.split() if self._parent.verbose: msg.run(' ') if shell: msg.run(cmd) else: msg.run(' '.join(cmd)) msg.run(' ') ret = subprocess.call(cmd, shell=shell) if ret: return msg.err('The command %s returned with %s' % (cmd, str(ret))) @property def args(self): return self._args @property def usr_msg(self): return self._usr_msg @property def command(self): return self._command class CreateGitignore(Command): def execute(self): # crear el gitignore en el archivo que viene del comando values = ['.idea/\n', '*.pyc\n', '__pycache__\n'] with open(self._command, 'w') as _f: for value in values: _f.write(value) @staticmethod def check_args(): return True class MakedirCommand(Command): def check_args(self): # si el directorio existe no lo creamos return not os.path.isdir(self._args) class ExtractSourcesCommand(Command): @staticmethod def check_args(): return True class CloneRepo(Command): def check_args(self): # si el directorio no existe dejamos clonar return not os.path.isdir(self._args) class PullRepo(Command): def check_args(self): # si el directorio existe dejamos pulear return os.path.isdir(self._args) class PullImage(Command): @staticmethod def check_args(): return True class CreateNginxTemplate(Command): def check_args(self): # si el archivo existe no lo dejamos pasar return not os.path.isfile(self._args) def execute(self): # leer el nginx.conf with open('/usr/local/nginx.conf', 'r') as _f: conf = _f.read() # poner el nombre del cliente en el config conf = conf.replace('$client$', self._client_name) with open(self._command, 'w') as _f: _f.write(conf) class WriteConfigFile(Command): def check_args(self): return True def execute(self): # obtener el cliente a partir del nombre arg = self._args client = arg['client'] # obtener los repositorios que hay en sources, para eso se recorre souces y se # obtienen todos los directorios que tienen un .git adentro. repos = list() sources = client.sources_dir for root, dirs, files in os.walk(sources): # si tiene un directorio .git es un repositorio if '.git' in dirs: repos.append(root.replace(sources,'')) # si tiene un archivo .git es un subdirectorio if '.git' in files: repos.append(root.replace(sources, '')) repos = ['/opt/odoo/custom-addons/' + x for x in repos] repos = ','.join(repos) # obtener la configuracion definida en el manifiesto conf = client.config or [] # pisar el config con las cosas agregadas o modificadas, esto permite mantener # por ejemplo la contraseña odoo_conf = OdooConf(client.config_file) odoo_conf.read_config() odoo_conf.add_list_data(conf) # siempre sobreescribimos estas tres cosas. odoo_conf.add_line('addons_path = %s' % repos) odoo_conf.add_line('unaccent = True') odoo_conf.add_line('data_dir = /opt/odoo/data') # si estoy en modo debug, sobreescribo esto if client.debug: odoo_conf.add_line('workers = 0') odoo_conf.add_line('max_cron_threads = 0') odoo_conf.add_line('limit_time_cpu = 0') odoo_conf.add_line('limit_time_real = 0') else: # You should use 2 worker threads + 1 cron thread per available CPU if 'workers' not in odoo_conf.config: odoo_conf.add_line('workers = %s' % (os.cpu_count() * 2)) if 'max_cron_threads' not in odoo_conf.config: odoo_conf.add_line('max_cron_threads = %s' % os.cpu_count()) odoo_conf.write_config() # Corregir los permisos de odoo.conf os.chmod(client.config_file, stat.S_IREAD + stat.S_IWRITE + stat.S_IWOTH + stat.S_IROTH) class MessageOnly(Command): @staticmethod def check_args(): """ Siempre lo dejamos pasar """ return True @staticmethod def execute(): pass
#!/usr/bin/env python # William Lam # www.virtuallyghetto.com """ vSphere Python SDK program for listing all ESXi datastores and their associated devices """ import argparse import atexit import json from pyVim import connect from pyVmomi import vmodl from pyVmomi import vim from tools import cli def get_args(): """ Supports the command-line arguments listed below. """ parser = argparse.ArgumentParser( description='Process args for retrieving all the Virtual Machines') parser.add_argument('-s', '--host', required=True, action='store', help='Remote host to connect to') parser.add_argument('-o', '--port', type=int, default=443, action='store', help='Port to connect on') parser.add_argument('-u', '--user', required=True, action='store', help='User name to use when connecting to host') parser.add_argument('-p', '--password', required=True, action='store', help='Password to use when connecting to host') parser.add_argument('-j', '--json', default=False, action='store_true', help='Output to JSON') args = parser.parse_args() return args # http://stackoverflow.com/questions/1094841/ def sizeof_fmt(num): """ Returns the human readable version of a file size :param num: :return: """ for item in ['bytes', 'KB', 'MB', 'GB']: if num < 1024.0: return "%3.1f%s" % (num, item) num /= 1024.0 return "%3.1f%s" % (num, 'TB') def print_fs(host_fs): """ Prints the host file system volume info :param host_fs: :return: """ print "{}\t{}\t".format("Datastore: ", host_fs.volume.name) print "{}\t{}\t".format("UUID: ", host_fs.volume.uuid) print "{}\t{}\t".format("Capacity: ", sizeof_fmt( host_fs.volume.capacity)) print "{}\t{}\t".format("VMFS Version: ", host_fs.volume.version) print "{}\t{}\t".format("Is Local VMFS: ", host_fs.volume.local) print "{}\t{}\t".format("SSD: ", host_fs.volume.ssd) def main(): """ Simple command-line program for listing all ESXi datastores and their associated devices """ args = get_args() cli.prompt_for_password(args) try: service_instance = connect.SmartConnectNoSSL(host=args.host, user=args.user, pwd=args.password, port=int(args.port)) if not service_instance: print("Could not connect to the specified host using specified " "username and password") return -1 atexit.register(connect.Disconnect, service_instance) content = service_instance.RetrieveContent() # Search for all ESXi hosts objview = content.viewManager.CreateContainerView(content.rootFolder, [vim.HostSystem], True) esxi_hosts = objview.view objview.Destroy() datastores = {} for esxi_host in esxi_hosts: if not args.json: print "{}\t{}\t\n".format("ESXi Host: ", esxi_host.name) # All Filesystems on ESXi host storage_system = esxi_host.configManager.storageSystem host_file_sys_vol_mount_info = \ storage_system.fileSystemVolumeInfo.mountInfo datastore_dict = {} # Map all filesystems for host_mount_info in host_file_sys_vol_mount_info: # Extract only VMFS volumes if host_mount_info.volume.type == "VMFS": extents = host_mount_info.volume.extent if not args.json: print_fs(host_mount_info) else: datastore_details = { 'uuid': host_mount_info.volume.uuid, 'capacity': host_mount_info.volume.capacity, 'vmfs_version': host_mount_info.volume.version, 'local': host_mount_info.volume.local, 'ssd': host_mount_info.volume.ssd } extent_arr = [] extent_count = 0 for extent in extents: if not args.json: print "{}\t{}\t".format( "Extent[" + str(extent_count) + "]:", extent.diskName) extent_count += 1 else: # create an array of the devices backing the given # datastore extent_arr.append(extent.diskName) # add the extent array to the datastore info datastore_details['extents'] = extent_arr # associate datastore details with datastore name datastore_dict[host_mount_info.volume.name] = \ datastore_details if not args.json: print # associate ESXi host with the datastore it sees datastores[esxi_host.name] = datastore_dict if args.json: print json.dumps(datastores) except vmodl.MethodFault as error: print "Caught vmodl fault : " + error.msg return -1 return 0 # Start program if __name__ == "__main__": main()
### example code of GIN using DGL import torch from torch.utils.data import DataLoader import dgl.function as fn import dgl import torch.optim as optim import torch.nn.functional as F from tqdm import tqdm import argparse import time import numpy as np ### importing OGB ### for loading dataset from ogb.graphproppred.dataset_dgl import DglGraphPropPredDataset, collate_dgl ### for encoding raw molecule features from ogb.graphproppred.mol_encoder import AtomEncoder, BondEncoder ### for evaluation from ogb.graphproppred import Evaluator criterion = torch.nn.BCEWithLogitsLoss() class GINConv(torch.nn.Module): """ - GIN architecture. - Assume both node_feat and edge_feat have the dimensionality of emb_dim. """ def __init__(self, emb_dim): super(GINConv, self).__init__() self.mlp = torch.nn.Sequential(torch.nn.Linear(emb_dim, 2*emb_dim), torch.nn.BatchNorm1d(2*emb_dim), torch.nn.ReLU(), torch.nn.Linear(2*emb_dim, emb_dim)) self.eps = torch.nn.Parameter(torch.Tensor([0])) def forward(self, graph, node_feat, edge_feat): graph = graph.local_var() graph.ndata['h_n'] = node_feat graph.edata['h_e'] = edge_feat ### u, v, e represent source nodes, destination nodes and edges among them graph.update_all(fn.u_add_e('h_n', 'h_e', 'm'), fn.sum('m', 'neigh')) rst = (1 + self.eps) * node_feat + graph.ndata['neigh'] rst = self.mlp(rst) return rst class GIN(torch.nn.Module): def __init__(self, num_layer = 5, emb_dim = 100, num_task = 2, device = "cpu"): super(GIN, self).__init__() self.num_layer = num_layer self.gins = torch.nn.ModuleList() self.batch_norms = torch.nn.ModuleList() for layer in range(self.num_layer): self.gins.append(GINConv(emb_dim)) self.batch_norms.append(torch.nn.BatchNorm1d(emb_dim)) ### convenient module to encode/embed raw molecule node/edge features. (TODO) make it more efficient. self.atom_encoder = AtomEncoder(emb_dim) self.bond_encoder = BondEncoder(emb_dim) self.graph_pred_linear = torch.nn.Linear(emb_dim, num_task) self.device = device def forward(self, g): h_node = self.atom_encoder(g.ndata["feat"].to(self.device)) h_edge = self.bond_encoder(g.edata["feat"].to(self.device)) ### iterative message passing to obtain node embeddings for layer in range(self.num_layer): h_node = self.gins[layer](g, h_node, h_edge) h_node = self.batch_norms[layer](h_node) h_node = F.relu(h_node) ### pooling g.ndata['h_node'] = h_node h_graph = dgl.mean_nodes(g, 'h_node') return self.graph_pred_linear(h_graph) def train(model, device, loader, optimizer): model.train() for step, (graphs, labels) in enumerate(tqdm(loader, desc="Iteration")): labels = labels.to(device) pred = model(graphs) optimizer.zero_grad() is_valid = labels == labels loss = criterion(pred.to(torch.float32)[is_valid], labels.to(torch.float32)[is_valid]) loss.backward() optimizer.step() def eval(model, device, loader, evaluator): model.eval() y_true = [] y_pred = [] for step, (graphs, labels) in enumerate(tqdm(loader, desc="Iteration")): with torch.no_grad(): pred = model(graphs) y_true.append(labels.view(pred.shape).detach().cpu()) y_pred.append(pred.detach().cpu()) y_true = torch.cat(y_true, dim = 0).numpy() y_pred = torch.cat(y_pred, dim = 0).numpy() input_dict = {"y_true": y_true, "y_pred": y_pred} return evaluator.eval(input_dict) def main(): # Training settings parser = argparse.ArgumentParser(description='GIN with Pytorch Geometrics') parser.add_argument('--device', type=int, default=0, help='which gpu to use if any (default: 0)') parser.add_argument('--batch_size', type=int, default=32, help='input batch size for training (default: 32)') parser.add_argument('--epochs', type=int, default=100, help='number of epochs to train (default: 100)') parser.add_argument('--num_workers', type=int, default=0, help='number of workers (default: 0)') parser.add_argument('--dataset', type=str, default="ogbg-mol-tox21", help='dataset name (default: ogbg-mol-tox21)') args = parser.parse_args() device = torch.device("cuda:" + str(args.device)) if torch.cuda.is_available() else torch.device("cpu") ### automatic dataloading and splitting dataset = DglGraphPropPredDataset(name = args.dataset) splitted_idx = dataset.get_idx_split() ### automatic evaluator. takes dataset name as input evaluator = Evaluator(args.dataset) train_loader = DataLoader(dataset[splitted_idx["train"]], batch_size=args.batch_size, shuffle=True, collate_fn = collate_dgl, num_workers = args.num_workers) valid_loader = DataLoader(dataset[splitted_idx["valid"]], batch_size=args.batch_size, shuffle=False, collate_fn = collate_dgl, num_workers = args.num_workers) test_loader = DataLoader(dataset[splitted_idx["test"]], batch_size=args.batch_size, shuffle=False, collate_fn = collate_dgl, num_workers = args.num_workers) model = GIN(num_task = dataset.num_tasks, device = device).to(device) optimizer = optim.Adam(model.parameters(), lr=0.001) for epoch in range(1, args.epochs + 1): train(model, device, train_loader, optimizer) #print("Evaluating training...") #print(eval(model, device, train_loader, evaluator)) print("Evaluating validation:") print(eval(model, device, valid_loader, evaluator)) if __name__ == "__main__": main()
from lib import agent from lib import congaboard from lib import node # # end-of-game logging # def log (loser, movecount): winner = 1 if 0 == loser else 0 print 'GAME OVER. PLAYER ', winner, 'WINS.' for p in range(0,2): print 'player', p, ': moved', movecount[p], 'times' #human will play as player 1 player = agent.Agent(0, 0, 0) #AI plays as player 2 with maxDepth 3 opponent = agent.Agent(1, 0, 3) #setup new Conga board board = congaboard.CongaBoard() MoveCount = [ 0, 0 ] # # Continuously make moves until # one of the Agents can't move # while (True): board.draw() player.updateBoard(board) #get move from stdin while True: #prompt user a = raw_input('\nMove from where?\n') b = raw_input('\nTo where?\n') fromTile = [ int(num) for num in a.split(',') ] toTile = [ int(num) for num in b.split(',') ] try: board = player.makeMove(fromTile, toTile, player.player, board) #@player is still in the game MoveCount[player.player] += 1 break except: print 'NOT A VALID MOVE. Please try again with a valid move.' print '<< player', player.player, '>> has moved:' board.draw() opponent.updateBoard(board) bestMove = opponent.getBestMove() #end game if player cannot move if not bestMove: #GAME OVER board.gameOver = True log(opponent.player) break #@opponent is still in the game MoveCount[opponent.player] += 1 #apply the move - update @board board = opponent.makeMove(bestMove['from'], bestMove['to'], opponent.player, board) print '<< player ', opponent.player, '>> has moved:'
import unittest import os, glob from test_all import db, test_support, get_new_environment_path, \ get_new_database_path #---------------------------------------------------------------------- class DBEnv(unittest.TestCase): def setUp(self): self.homeDir = get_new_environment_path() self.env = db.DBEnv() def tearDown(self): self.env.close() del self.env test_support.rmtree(self.homeDir) class DBEnv_general(DBEnv) : def test_get_open_flags(self) : flags = db.DB_CREATE | db.DB_INIT_MPOOL self.env.open(self.homeDir, flags) self.assertEqual(flags, self.env.get_open_flags()) def test_get_open_flags2(self) : flags = db.DB_CREATE | db.DB_INIT_MPOOL | \ db.DB_INIT_LOCK | db.DB_THREAD self.env.open(self.homeDir, flags) self.assertEqual(flags, self.env.get_open_flags()) if db.version() >= (4, 7) : def test_lk_partitions(self) : for i in [10, 20, 40] : self.env.set_lk_partitions(i) self.assertEqual(i, self.env.get_lk_partitions()) def test_getset_intermediate_dir_mode(self) : self.assertEqual(None, self.env.get_intermediate_dir_mode()) for mode in ["rwx------", "rw-rw-rw-", "rw-r--r--"] : self.env.set_intermediate_dir_mode(mode) self.assertEqual(mode, self.env.get_intermediate_dir_mode()) self.assertRaises(db.DBInvalidArgError, self.env.set_intermediate_dir_mode, "abcde") if db.version() >= (4, 6) : def test_thread(self) : for i in [16, 100, 1000] : self.env.set_thread_count(i) self.assertEqual(i, self.env.get_thread_count()) def test_cache_max(self) : for size in [64, 128] : size = size*1024*1024 # Megabytes self.env.set_cache_max(0, size) size2 = self.env.get_cache_max() self.assertEqual(0, size2[0]) self.assertTrue(size <= size2[1]) self.assertTrue(2*size > size2[1]) if db.version() >= (4, 4) : def test_mutex_stat(self) : self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK) stat = self.env.mutex_stat() self.assertTrue("mutex_inuse_max" in stat) def test_lg_filemode(self) : for i in [0600, 0660, 0666] : self.env.set_lg_filemode(i) self.assertEqual(i, self.env.get_lg_filemode()) def test_mp_max_openfd(self) : for i in [17, 31, 42] : self.env.set_mp_max_openfd(i) self.assertEqual(i, self.env.get_mp_max_openfd()) def test_mp_max_write(self) : for i in [100, 200, 300] : for j in [1, 2, 3] : j *= 1000000 self.env.set_mp_max_write(i, j) v=self.env.get_mp_max_write() self.assertEqual((i, j), v) def test_invalid_txn(self) : # This environment doesn't support transactions self.assertRaises(db.DBInvalidArgError, self.env.txn_begin) def test_mp_mmapsize(self) : for i in [16, 32, 64] : i *= 1024*1024 self.env.set_mp_mmapsize(i) self.assertEqual(i, self.env.get_mp_mmapsize()) def test_tmp_dir(self) : for i in ["a", "bb", "ccc"] : self.env.set_tmp_dir(i) self.assertEqual(i, self.env.get_tmp_dir()) def test_flags(self) : self.env.set_flags(db.DB_AUTO_COMMIT, 1) self.assertEqual(db.DB_AUTO_COMMIT, self.env.get_flags()) self.env.set_flags(db.DB_TXN_NOSYNC, 1) self.assertEqual(db.DB_AUTO_COMMIT | db.DB_TXN_NOSYNC, self.env.get_flags()) self.env.set_flags(db.DB_AUTO_COMMIT, 0) self.assertEqual(db.DB_TXN_NOSYNC, self.env.get_flags()) self.env.set_flags(db.DB_TXN_NOSYNC, 0) self.assertEqual(0, self.env.get_flags()) def test_lk_max_objects(self) : for i in [1000, 2000, 3000] : self.env.set_lk_max_objects(i) self.assertEqual(i, self.env.get_lk_max_objects()) def test_lk_max_locks(self) : for i in [1000, 2000, 3000] : self.env.set_lk_max_locks(i) self.assertEqual(i, self.env.get_lk_max_locks()) def test_lk_max_lockers(self) : for i in [1000, 2000, 3000] : self.env.set_lk_max_lockers(i) self.assertEqual(i, self.env.get_lk_max_lockers()) def test_lg_regionmax(self) : for i in [128, 256, 1000] : i = i*1024*1024 self.env.set_lg_regionmax(i) j = self.env.get_lg_regionmax() self.assertTrue(i <= j) self.assertTrue(2*i > j) def test_lk_detect(self) : flags= [db.DB_LOCK_DEFAULT, db.DB_LOCK_EXPIRE, db.DB_LOCK_MAXLOCKS, db.DB_LOCK_MINLOCKS, db.DB_LOCK_MINWRITE, db.DB_LOCK_OLDEST, db.DB_LOCK_RANDOM, db.DB_LOCK_YOUNGEST] flags.append(db.DB_LOCK_MAXWRITE) for i in flags : self.env.set_lk_detect(i) self.assertEqual(i, self.env.get_lk_detect()) def test_lg_dir(self) : for i in ["a", "bb", "ccc", "dddd"] : self.env.set_lg_dir(i) self.assertEqual(i, self.env.get_lg_dir()) def test_lg_bsize(self) : log_size = 70*1024 self.env.set_lg_bsize(log_size) self.assertTrue(self.env.get_lg_bsize() >= log_size) self.assertTrue(self.env.get_lg_bsize() < 4*log_size) self.env.set_lg_bsize(4*log_size) self.assertTrue(self.env.get_lg_bsize() >= 4*log_size) def test_setget_data_dirs(self) : dirs = ("a", "b", "c", "d") for i in dirs : self.env.set_data_dir(i) self.assertEqual(dirs, self.env.get_data_dirs()) def test_setget_cachesize(self) : cachesize = (0, 512*1024*1024, 3) self.env.set_cachesize(*cachesize) self.assertEqual(cachesize, self.env.get_cachesize()) cachesize = (0, 1*1024*1024, 5) self.env.set_cachesize(*cachesize) cachesize2 = self.env.get_cachesize() self.assertEqual(cachesize[0], cachesize2[0]) self.assertEqual(cachesize[2], cachesize2[2]) # Berkeley DB expands the cache 25% accounting overhead, # if the cache is small. self.assertEqual(125, int(100.0*cachesize2[1]/cachesize[1])) # You can not change configuration after opening # the environment. self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) cachesize = (0, 2*1024*1024, 1) self.assertRaises(db.DBInvalidArgError, self.env.set_cachesize, *cachesize) cachesize3 = self.env.get_cachesize() self.assertEqual(cachesize2[0], cachesize3[0]) self.assertEqual(cachesize2[2], cachesize3[2]) # In Berkeley DB 5.1, the cachesize can change when opening the Env self.assertTrue(cachesize2[1] <= cachesize3[1]) def test_set_cachesize_dbenv_db(self) : # You can not configure the cachesize using # the database handle, if you are using an environment. d = db.DB(self.env) self.assertRaises(db.DBInvalidArgError, d.set_cachesize, 0, 1024*1024, 1) def test_setget_shm_key(self) : shm_key=137 self.env.set_shm_key(shm_key) self.assertEqual(shm_key, self.env.get_shm_key()) self.env.set_shm_key(shm_key+1) self.assertEqual(shm_key+1, self.env.get_shm_key()) # You can not change configuration after opening # the environment. self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) # If we try to reconfigure cache after opening the # environment, core dump. self.assertRaises(db.DBInvalidArgError, self.env.set_shm_key, shm_key) self.assertEqual(shm_key+1, self.env.get_shm_key()) if db.version() >= (4, 4) : def test_mutex_setget_max(self) : v = self.env.mutex_get_max() v2 = v*2+1 self.env.mutex_set_max(v2) self.assertEqual(v2, self.env.mutex_get_max()) self.env.mutex_set_max(v) self.assertEqual(v, self.env.mutex_get_max()) # You can not change configuration after opening # the environment. self.env.open(self.homeDir, db.DB_CREATE) self.assertRaises(db.DBInvalidArgError, self.env.mutex_set_max, v2) def test_mutex_setget_increment(self) : v = self.env.mutex_get_increment() v2 = 127 self.env.mutex_set_increment(v2) self.assertEqual(v2, self.env.mutex_get_increment()) self.env.mutex_set_increment(v) self.assertEqual(v, self.env.mutex_get_increment()) # You can not change configuration after opening # the environment. self.env.open(self.homeDir, db.DB_CREATE) self.assertRaises(db.DBInvalidArgError, self.env.mutex_set_increment, v2) def test_mutex_setget_tas_spins(self) : self.env.mutex_set_tas_spins(0) # Default = BDB decides v = self.env.mutex_get_tas_spins() v2 = v*2+1 self.env.mutex_set_tas_spins(v2) self.assertEqual(v2, self.env.mutex_get_tas_spins()) self.env.mutex_set_tas_spins(v) self.assertEqual(v, self.env.mutex_get_tas_spins()) # In this case, you can change configuration # after opening the environment. self.env.open(self.homeDir, db.DB_CREATE) self.env.mutex_set_tas_spins(v2) def test_mutex_setget_align(self) : v = self.env.mutex_get_align() v2 = 64 if v == 64 : v2 = 128 self.env.mutex_set_align(v2) self.assertEqual(v2, self.env.mutex_get_align()) # Requires a nonzero power of two self.assertRaises(db.DBInvalidArgError, self.env.mutex_set_align, 0) self.assertRaises(db.DBInvalidArgError, self.env.mutex_set_align, 17) self.env.mutex_set_align(2*v2) self.assertEqual(2*v2, self.env.mutex_get_align()) # You can not change configuration after opening # the environment. self.env.open(self.homeDir, db.DB_CREATE) self.assertRaises(db.DBInvalidArgError, self.env.mutex_set_align, v2) class DBEnv_log(DBEnv) : def setUp(self): DBEnv.setUp(self) self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG) def test_log_file(self) : log_file = self.env.log_file((1, 1)) self.assertEqual("log.0000000001", log_file[-14:]) if db.version() >= (4, 4) : # The version with transactions is checked in other test object def test_log_printf(self) : msg = "This is a test..." self.env.log_printf(msg) logc = self.env.log_cursor() self.assertTrue(msg in (logc.last()[1])) if db.version() >= (4, 7) : def test_log_config(self) : self.env.log_set_config(db.DB_LOG_DSYNC | db.DB_LOG_ZERO, 1) self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC)) self.assertTrue(self.env.log_get_config(db.DB_LOG_ZERO)) self.env.log_set_config(db.DB_LOG_ZERO, 0) self.assertTrue(self.env.log_get_config(db.DB_LOG_DSYNC)) self.assertFalse(self.env.log_get_config(db.DB_LOG_ZERO)) class DBEnv_log_txn(DBEnv) : def setUp(self): DBEnv.setUp(self) self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG | db.DB_INIT_TXN) if (db.version() >= (4, 5)) and (db.version() < (5, 2)) : def test_tx_max(self) : txns=[] def tx() : for i in xrange(self.env.get_tx_max()) : txns.append(self.env.txn_begin()) tx() self.assertRaises(MemoryError, tx) # Abort the transactions before garbage collection, # to avoid "warnings". for i in txns : i.abort() if db.version() >= (4, 4) : # The version without transactions is checked in other test object def test_log_printf(self) : msg = "This is a test..." txn = self.env.txn_begin() self.env.log_printf(msg, txn=txn) txn.commit() logc = self.env.log_cursor() logc.last() # Skip the commit self.assertTrue(msg in (logc.prev()[1])) msg = "This is another test..." txn = self.env.txn_begin() self.env.log_printf(msg, txn=txn) txn.abort() # Do not store the new message logc.last() # Skip the abort self.assertTrue(msg not in (logc.prev()[1])) msg = "This is a third test..." txn = self.env.txn_begin() self.env.log_printf(msg, txn=txn) txn.commit() # Do not store the new message logc.last() # Skip the commit self.assertTrue(msg in (logc.prev()[1])) class DBEnv_memp(DBEnv): def setUp(self): DBEnv.setUp(self) self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG) self.db = db.DB(self.env) self.db.open("test", db.DB_HASH, db.DB_CREATE, 0660) def tearDown(self): self.db.close() del self.db DBEnv.tearDown(self) def test_memp_1_trickle(self) : self.db.put("hi", "bye") self.assertTrue(self.env.memp_trickle(100) > 0) # Preserve the order, do "memp_trickle" test first def test_memp_2_sync(self) : self.db.put("hi", "bye") self.env.memp_sync() # Full flush # Nothing to do... self.assertTrue(self.env.memp_trickle(100) == 0) self.db.put("hi", "bye2") self.env.memp_sync((1, 0)) # NOP, probably # Something to do... or not self.assertTrue(self.env.memp_trickle(100) >= 0) self.db.put("hi", "bye3") self.env.memp_sync((123, 99)) # Full flush # Nothing to do... self.assertTrue(self.env.memp_trickle(100) == 0) def test_memp_stat_1(self) : stats = self.env.memp_stat() # No param self.assertTrue(len(stats)==2) self.assertTrue("cache_miss" in stats[0]) stats = self.env.memp_stat(db.DB_STAT_CLEAR) # Positional param self.assertTrue("cache_miss" in stats[0]) stats = self.env.memp_stat(flags=0) # Keyword param self.assertTrue("cache_miss" in stats[0]) def test_memp_stat_2(self) : stats=self.env.memp_stat()[1] self.assertTrue(len(stats))==1 self.assertTrue("test" in stats) self.assertTrue("page_in" in stats["test"]) class DBEnv_logcursor(DBEnv): def setUp(self): DBEnv.setUp(self) self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOG | db.DB_INIT_TXN) txn = self.env.txn_begin() self.db = db.DB(self.env) self.db.open("test", db.DB_HASH, db.DB_CREATE, 0660, txn=txn) txn.commit() for i in ["2", "8", "20"] : txn = self.env.txn_begin() self.db.put(key = i, data = i*int(i), txn=txn) txn.commit() def tearDown(self): self.db.close() del self.db DBEnv.tearDown(self) def _check_return(self, value) : self.assertTrue(isinstance(value, tuple)) self.assertEqual(len(value), 2) self.assertTrue(isinstance(value[0], tuple)) self.assertEqual(len(value[0]), 2) self.assertTrue(isinstance(value[0][0], int)) self.assertTrue(isinstance(value[0][1], int)) self.assertTrue(isinstance(value[1], str)) # Preserve test order def test_1_first(self) : logc = self.env.log_cursor() v = logc.first() self._check_return(v) self.assertTrue((1, 1) < v[0]) self.assertTrue(len(v[1])>0) def test_2_last(self) : logc = self.env.log_cursor() lsn_first = logc.first()[0] v = logc.last() self._check_return(v) self.assertTrue(lsn_first < v[0]) def test_3_next(self) : logc = self.env.log_cursor() lsn_last = logc.last()[0] self.assertEqual(logc.next(), None) lsn_first = logc.first()[0] v = logc.next() self._check_return(v) self.assertTrue(lsn_first < v[0]) self.assertTrue(lsn_last > v[0]) v2 = logc.next() self.assertTrue(v2[0] > v[0]) self.assertTrue(lsn_last > v2[0]) v3 = logc.next() self.assertTrue(v3[0] > v2[0]) self.assertTrue(lsn_last > v3[0]) def test_4_prev(self) : logc = self.env.log_cursor() lsn_first = logc.first()[0] self.assertEqual(logc.prev(), None) lsn_last = logc.last()[0] v = logc.prev() self._check_return(v) self.assertTrue(lsn_first < v[0]) self.assertTrue(lsn_last > v[0]) v2 = logc.prev() self.assertTrue(v2[0] < v[0]) self.assertTrue(lsn_first < v2[0]) v3 = logc.prev() self.assertTrue(v3[0] < v2[0]) self.assertTrue(lsn_first < v3[0]) def test_5_current(self) : logc = self.env.log_cursor() logc.first() v = logc.next() self.assertEqual(v, logc.current()) def test_6_set(self) : logc = self.env.log_cursor() logc.first() v = logc.next() self.assertNotEqual(v, logc.next()) self.assertNotEqual(v, logc.next()) self.assertEqual(v, logc.set(v[0])) def test_explicit_close(self) : logc = self.env.log_cursor() logc.close() self.assertRaises(db.DBCursorClosedError, logc.next) def test_implicit_close(self) : logc = [self.env.log_cursor() for i in xrange(10)] self.env.close() # This close should close too all its tree for i in logc : self.assertRaises(db.DBCursorClosedError, i.next) def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(DBEnv_general)) suite.addTest(unittest.makeSuite(DBEnv_memp)) suite.addTest(unittest.makeSuite(DBEnv_logcursor)) suite.addTest(unittest.makeSuite(DBEnv_log)) suite.addTest(unittest.makeSuite(DBEnv_log_txn)) return suite if __name__ == '__main__': unittest.main(defaultTest='test_suite')
#!/usr/bin/env python2 # -*- coding: utf-8 -*- from __future__ import print_function import glob import imp import inspect import logging import os import subprocess as sp import sys try: from lxml import etree as et except ImportError: logging.critical("Cannot run the program!") logging.critical("Are you in the correct virtual environment? Try running " "`workon spirit` and `toggleglobalsitepackages` in this " "terminal.") sys.exit(1) import tqdm import yaml import rospkg CONFIG_FILE = "launch_params.yaml" NAMESPACES = {"xacro": "{http://www.ros.org/wiki/xacro}"} DEGREE_KEYS = ("thresh_yaw",) # et.register_namespace("xacro", "http://www.ros.org/wiki/xacro") def get_ros_dir(package, dirname): return os.path.join(rospkg.RosPack().get_path(package), dirname) def get_file_root(path): """ >>> get_file_root("/tmp/test.txt") 'test' """ return os.path.splitext(os.path.basename(path))[0] def compile_xacro(inpath, outpath, stdout): sp.call("rosrun xacro xacro {inpath} --inorder -o {outpath}" .format(inpath=inpath, outpath=outpath).split(), stdout=stdout) def get_past_image_keys(launch_params): keys = {} past_image_params = launch_params["past_image"] for eval_method in past_image_params: if eval_method != "general": keys[eval_method] = past_image_params[eval_method].keys() return keys def load_xml(path): parser = et.XMLParser(remove_blank_text=True) return et.parse(path, parser) def remove_old_elements(node): for element in node.findall("{}if".format(NAMESPACES["xacro"])): node.remove(element) def add_new_keys(node, keys): for method_name, key_list in keys.items(): element = et.Element("{}if".format(NAMESPACES["xacro"]), attrib={"value": "${{method == '{}'}}" .format(method_name)}) for key in key_list: et.SubElement( element, "param", attrib={"name": key, "value": "${{{}method_ns['{}']}}".format( "3.14159/180*" if key in DEGREE_KEYS else "", key)}) node.append(element) def add_message(tree): root = tree.getroot() root.addprevious( et.Comment("Generated automatically from launch config file.") ) def update_past_image_generator(keys, path="xacro/past_images.launch.xacro"): tree = load_xml(path) for node in tree.findall("node[@name='past_image_selector']"): remove_old_elements(node) add_new_keys(node, keys) tree.write(path, encoding="utf-8", xml_declaration=True, pretty_print=True) def verify_coeffs(method, past_image_keys): """ Parameters ---------- method past_image_keys Raises ------ AttributeError If a key does not exist. TypeError If a key is not callable. """ method_params = past_image_keys[method] cwd = os.getcwd() os.chdir(get_ros_dir("spirit", "src")) helpers = imp.load_source("helpers", "helpers.py") # Needed for import evaluators = imp.load_source("evaluators", "evaluators.py") components = [param.split("coeff_", 1)[1] for param in method_params if param.startswith("coeff_")] evaluator = getattr(evaluators, method) bad_keys = [component for component in components if not (inspect.ismethod(getattr(evaluator, component)) or inspect.isfunction(getattr(evaluator, component)))] if bad_keys: raise TypeError("The following components are not callable: {}" .format(bad_keys)) os.chdir(cwd) def update_auto_keys(params): def extract_js_number(s): return int(s[2:]) params = params.copy() if (params["control"]["use_joystick"] and params["control"]["js_number"] == "auto"): joysticks = [os.path.basename(i) for i in glob.glob("/dev/input/js*")] try: js_number = extract_js_number(max(joysticks, key=extract_js_number)) except ValueError: logging.warning("No joysticks detected! Defaulting to /dev/input/js0") js_number = 0 params["control"]["js_number"] = js_number print("js_number set to", js_number) return params def main(): os.chdir(get_ros_dir("spirit", "config")) with open(CONFIG_FILE) as fin: launch_params = yaml.load(fin) updated_params = update_auto_keys(launch_params) method = launch_params["past_image"]["general"]["eval_method"] past_image_keys = get_past_image_keys(launch_params) verify_coeffs(method, past_image_keys) with open("." + CONFIG_FILE, "w") as fout: yaml.dump(updated_params, fout) os.chdir(get_ros_dir("spirit", "launch")) update_past_image_generator(past_image_keys) with open(os.devnull, "w") as DEVNULL: for path in tqdm.tqdm(glob.glob("xacro/*.xacro"), desc="Regenerating launch files", unit=" files", leave=True): root = get_file_root(path) compile_xacro(path, os.path.join("launchers", root), DEVNULL) if __name__ == "__main__": main()
'''This plots the results of the parameter sweep for the long-term bubbling example. ''' from os import mkdir from os.path import isdir from pickle import load from numpy import arange, array, atleast_2d, hstack, sum, where, zeros from matplotlib.pyplot import axes, close, colorbar, imshow, set_cmap, subplots from mpl_toolkits.axes_grid1 import make_axes_locatable from seaborn import heatmap if isdir('plots/long_term_bubbles') is False: mkdir('plots/long_term_bubbles') with open('outputs/long_term_bubbles/results.pkl','rb') as f: (growth_rate, peaks, R_end, hh_prop, attack_ratio, bubble_prob_range, external_mix_range) = load(f) r_min = growth_rate.min() r_max = growth_rate.max() peak_min = peaks.min() peak_max = peaks.max() R_end_min = R_end.min() R_end_max = R_end.max() hh_prop_min = hh_prop.min() hh_prop_max = hh_prop.max() attack_ratio_min = attack_ratio.min() attack_ratio_max = attack_ratio.max() fig, ax = subplots(1,1,sharex=True) imshow(growth_rate,origin='lower',extent=(0,1,0,1),vmin=r_min,vmax=r_max) ax.set_xlabel('% reduction in between-household transmission') ax.set_ylabel('% uptake of support bubbles') cbar = colorbar(label="Growth rate",fraction=0.046, pad=0.04) fig.savefig('plots/long_term_bubbles/growth_rate.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) imshow(peaks,origin='lower',extent=(0,1,0,1),vmin=peak_min,vmax=peak_max) ax.set_xlabel('% reduction in between-household transmission') ax.set_ylabel('% uptake of support bubbles') cbar = colorbar(label="Peak % prevalence",fraction=0.046, pad=0.04) fig.savefig('plots/long_term_bubbles/peak.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) imshow(R_end,origin='lower',extent=(0,1,0,1),vmin=R_end_min,vmax=R_end_max) ax.set_xlabel('% reduction in between-household transmission') ax.set_ylabel('% uptake of support bubbles') cbar = colorbar(label="% immune at end of projection",fraction=0.046, pad=0.04) fig.savefig('plots/long_term_bubbles/immunity.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) imshow(hh_prop,origin='lower',extent=(0,1,0,1),vmin=hh_prop_min,vmax=hh_prop_max) ax.set_xlabel('% reduction in between-household transmission') ax.set_ylabel('% uptake of support bubbles') cbar = colorbar(label="% of households infected during projection",fraction=0.046, pad=0.04) fig.savefig('plots/long_term_bubbles/hh_prop.png',bbox_inches='tight', dpi=300) close() fig, ax = subplots(1,1,sharex=True) imshow(attack_ratio,origin='lower',extent=(0,1,0,1),vmin=attack_ratio_min,vmax=attack_ratio_max) ax.set_xlabel('% reduction in between-household transmission') ax.set_ylabel('% uptake of support bubbles') cbar = colorbar(label="% attack rate in infected households",fraction=0.046, pad=0.04) fig.savefig('plots/long_term_bubbles/attack_ratio.png',bbox_inches='tight', dpi=300) close() fig, ((ax1, ax2), (ax3, ax4)) = subplots(2, 2) axim=ax1.imshow(peaks, origin='lower', extent=(0,1,0,1), vmin=peak_min, vmax=peak_max) ax1.set_ylabel('% reduction in\n within-household\n transmission') divider = make_axes_locatable(ax1) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = colorbar(axim, label="Peak % prevalence", cax=cax) axim=ax2.imshow(R_end, origin='lower', extent=(0,1,0,1), vmin=R_end_min, vmax=R_end_max) divider = make_axes_locatable(ax2) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = colorbar(axim, label="Cumulative % prevalence", cax=cax) axim=ax3.imshow(hh_prop, origin='lower', extent=(0,1,0,1), vmin=hh_prop_min, vmax=hh_prop_max) ax3.set_ylabel('% uptake of support bubbles') ax3.set_xlabel('% reduction in\n between-household\n transmission') divider = make_axes_locatable(ax3) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = colorbar(axim, label="% of households infected", cax=cax) axim=ax4.imshow(attack_ratio, origin='lower', extent=(0,1,0,1), vmin=attack_ratio_min, vmax=attack_ratio_max) ax4.set_xlabel('% reduction in\n between-household\n transmission') divider = make_axes_locatable(ax4) cax = divider.append_axes("right", size="5%", pad=0.05) cbar = colorbar(axim, label="% attack ratio", cax=cax) fig.savefig('plots/long_term_bubbles/grid_plot.png', bbox_inches='tight', dpi=300) close()
import logging from progress.bar import IncrementalBar import numpy as np from counter_attack import utils logger = logging.getLogger(__name__) def get_iterator(name, logger, loader): if logger.getEffectiveLevel() == logging.INFO: return IncrementalBar(name).iter(loader) else: return loader # https://en.wikipedia.org/wiki/N-sphere def _l2_ball(radius, shape): count = np.prod(shape) random_direction = utils.random_unit_vector([count]) u = np.random.random() random_point = random_direction * np.power(u, 1 / count) random_point = random_point * radius assert utils.lp_norm(random_point, 2) <= radius logger.debug('Generated L2 point with norm {} (radius: {})'.format(utils.lp_norm(random_point, 2), radius)) return random_point.reshape(shape) # Just an hypercube def _linf_ball(radius, shape): random_point = (np.random.rand(*shape) * 2 - 1) * radius logger.debug('Generated LInf point with norm {} (radius: {})'.format(utils.lp_norm(random_point, np.inf), radius)) return random_point def sample_lp_ball(p, radius, shape): if p == 2: return _l2_ball(radius, shape) elif np.isposinf(p): return _linf_ball(radius, shape) else: return NotImplementedError('Ball sampling is only supported for L2 and LInf norms.')
c = get_config() c.NotebookApp.port = 8998 c.NotebookApp.open_browser = False c.KernelRestarter.time_to_dead = 6.0 c.NotebookApp.ip = "*" c.NotebookApp.allow_origin = "*"
import datetime from sqlalchemy import Column, DateTime, Integer, String from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class Migration(Base): __tablename__ = "migrations" id = Column(Integer, primary_key=True) name = Column(String(50)) migrated_at = Column(DateTime, default=datetime.datetime.utcnow) def __repr__(self): return "<Migration(name={0})>".format(self.name)
""" Computation engines for executing Pipelines. This module defines the core computation algorithms for executing Pipelines. The primary entrypoint of this file is SimplePipelineEngine.run_pipeline, which implements the following algorithm for executing pipelines: 1. Determine the domain of the pipeline. The domain determines the top-level set of dates and assets that serve as row- and column-labels for the computations performed by this pipeline. This logic lives in zipline.pipeline.domain.infer_domain. 2. Build a dependency graph of all terms in `pipeline`, with information about how many extra rows each term needs from its inputs. At this point we also **specialize** any generic LoadableTerms to the domain determined in (1). This logic lives in zipline.pipeline.graph.TermGraph and zipline.pipeline.graph.ExecutionPlan. 3. Combine the domain computed in (2) with our AssetFinder to produce a "lifetimes matrix". The lifetimes matrix is a DataFrame of booleans whose labels are dates x assets. Each entry corresponds to a (date, asset) pair and indicates whether the asset in question was tradable on the date in question. This logic primarily lives in AssetFinder.lifetimes. 4. Call self._populate_initial_workspace, which produces a "workspace" dictionary containing cached or otherwise pre-computed terms. By default, the initial workspace contains the lifetimes matrix and its date labels. 5. Topologically sort the graph constructed in (1) to produce an execution order for any terms that were not pre-populated. This logic lives in TermGraph. 6. Iterate over the terms in the order computed in (5). For each term: a. Fetch the term's inputs from the workspace, possibly removing unneeded leading rows from the input (see ExecutionPlan.offset for details on why we might have extra leading rows). b. Call ``term._compute`` with the inputs. Store the results into the workspace. c. Decrement "reference counts" on the term's inputs, and remove their results from the workspace if the refcount hits 0. This significantly reduces the maximum amount of memory that we consume during execution This logic lives in SimplePipelineEngine.compute_chunk. 7. Extract the pipeline's outputs from the workspace and convert them into "narrow" format, with output labels dictated by the Pipeline's screen. This logic lives in SimplePipelineEngine._to_narrow. """ from abc import ABCMeta, abstractmethod from six import iteritems, with_metaclass, viewkeys from numpy import array from pandas import DataFrame, MultiIndex from toolz import groupby from zipline.lib.adjusted_array import ensure_adjusted_array, ensure_ndarray from zipline.errors import NoFurtherDataError from zipline.utils.input_validation import expect_types from zipline.utils.numpy_utils import ( as_column, repeat_first_axis, repeat_last_axis, ) from zipline.utils.pandas_utils import explode from .domain import Domain, GENERIC from .graph import maybe_specialize from .term import AssetExists, InputDates, LoadableTerm from zipline.utils.date_utils import compute_date_range_chunks from zipline.utils.pandas_utils import categorical_df_concat from zipline.utils.sharedoc import copydoc class PipelineEngine(with_metaclass(ABCMeta)): @abstractmethod def run_pipeline(self, pipeline, start_date, end_date): """ Compute values for ``pipeline`` between ``start_date`` and ``end_date``. Returns a DataFrame with a MultiIndex of (date, asset) pairs. Parameters ---------- pipeline : zipline.pipeline.Pipeline The pipeline to run. start_date : pd.Timestamp Start date of the computed matrix. end_date : pd.Timestamp End date of the computed matrix. Returns ------- result : pd.DataFrame A frame of computed results. The ``result`` columns correspond to the entries of `pipeline.columns`, which should be a dictionary mapping strings to instances of :class:`zipline.pipeline.term.Term`. For each date between ``start_date`` and ``end_date``, ``result`` will contain a row for each asset that passed `pipeline.screen`. A screen of ``None`` indicates that a row should be returned for each asset that existed each day. """ raise NotImplementedError("run_pipeline") @abstractmethod def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize): """ Compute values for `pipeline` in number of days equal to `chunksize` and return stitched up result. Computing in chunks is useful for pipelines computed over a long period of time. Parameters ---------- pipeline : Pipeline The pipeline to run. start_date : pd.Timestamp The start date to run the pipeline for. end_date : pd.Timestamp The end date to run the pipeline for. chunksize : int The number of days to execute at a time. Returns ------- result : pd.DataFrame A frame of computed results. The ``result`` columns correspond to the entries of `pipeline.columns`, which should be a dictionary mapping strings to instances of :class:`zipline.pipeline.term.Term`. For each date between ``start_date`` and ``end_date``, ``result`` will contain a row for each asset that passed `pipeline.screen`. A screen of ``None`` indicates that a row should be returned for each asset that existed each day. See Also -------- :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline` """ raise NotImplementedError("run_chunked_pipeline") class NoEngineRegistered(Exception): """ Raised if a user tries to call pipeline_output in an algorithm that hasn't set up a pipeline engine. """ class ExplodingPipelineEngine(PipelineEngine): """ A PipelineEngine that doesn't do anything. """ def run_pipeline(self, pipeline, start_date, end_date): raise NoEngineRegistered( "Attempted to run a pipeline but no pipeline " "resources were registered." ) def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize): raise NoEngineRegistered( "Attempted to run a chunked pipeline but no pipeline " "resources were registered." ) def default_populate_initial_workspace(initial_workspace, root_mask_term, execution_plan, dates, assets): """The default implementation for ``populate_initial_workspace``. This function returns the ``initial_workspace`` argument without making any modifications. Parameters ---------- initial_workspace : dict[array-like] The initial workspace before we have populated it with any cached terms. root_mask_term : Term The root mask term, normally ``AssetExists()``. This is needed to compute the dates for individual terms. execution_plan : ExecutionPlan The execution plan for the pipeline being run. dates : pd.DatetimeIndex All of the dates being requested in this pipeline run including the extra dates for look back windows. assets : pd.Int64Index All of the assets that exist for the window being computed. Returns ------- populated_initial_workspace : dict[term, array-like] The workspace to begin computations with. """ return initial_workspace class SimplePipelineEngine(PipelineEngine): """ PipelineEngine class that computes each term independently. Parameters ---------- get_loader : callable A function that is given a loadable term and returns a PipelineLoader to use to retrieve raw data for that term. asset_finder : zipline.assets.AssetFinder An AssetFinder instance. We depend on the AssetFinder to determine which assets are in the top-level universe at any point in time. populate_initial_workspace : callable, optional A function which will be used to populate the initial workspace when computing a pipeline. See :func:`zipline.pipeline.engine.default_populate_initial_workspace` for more info. See Also -------- :func:`zipline.pipeline.engine.default_populate_initial_workspace` """ __slots__ = ( '_get_loader', '_finder', '_root_mask_term', '_root_mask_dates_term', '_populate_initial_workspace', ) @expect_types( default_domain=Domain, __funcname='SimplePipelineEngine', ) def __init__(self, get_loader, asset_finder, default_domain=GENERIC, populate_initial_workspace=None): self._get_loader = get_loader self._finder = asset_finder self._root_mask_term = AssetExists() self._root_mask_dates_term = InputDates() self._populate_initial_workspace = ( populate_initial_workspace or default_populate_initial_workspace ) self._default_domain = default_domain def run_pipeline(self, pipeline, start_date, end_date): """ Compute a pipeline. Parameters ---------- pipeline : zipline.pipeline.Pipeline The pipeline to run. start_date : pd.Timestamp Start date of the computed matrix. end_date : pd.Timestamp End date of the computed matrix. Returns ------- result : pd.DataFrame A frame of computed results. The ``result`` columns correspond to the entries of `pipeline.columns`, which should be a dictionary mapping strings to instances of :class:`zipline.pipeline.term.Term`. For each date between ``start_date`` and ``end_date``, ``result`` will contain a row for each asset that passed `pipeline.screen`. A screen of ``None`` indicates that a row should be returned for each asset that existed each day. See Also -------- :meth:`zipline.pipeline.engine.PipelineEngine.run_pipeline` :meth:`zipline.pipeline.engine.PipelineEngine.run_chunked_pipeline` """ # See notes at the top of this module for a description of the # algorithm implemented here. if end_date < start_date: raise ValueError( "start_date must be before or equal to end_date \n" "start_date=%s, end_date=%s" % (start_date, end_date) ) domain = self._resolve_domain(pipeline) graph = pipeline.to_execution_plan( domain, self._root_mask_term, start_date, end_date, ) extra_rows = graph.extra_rows[self._root_mask_term] root_mask = self._compute_root_mask( domain, start_date, end_date, extra_rows, ) dates, assets, root_mask_values = explode(root_mask) initial_workspace = self._populate_initial_workspace( { self._root_mask_term: root_mask_values, self._root_mask_dates_term: as_column(dates.values) }, self._root_mask_term, graph, dates, assets, ) results = self.compute_chunk(graph, dates, assets, initial_workspace) return self._to_narrow( graph.outputs, results, results.pop(graph.screen_name), dates[extra_rows:], assets, ) @copydoc(PipelineEngine.run_chunked_pipeline) def run_chunked_pipeline(self, pipeline, start_date, end_date, chunksize): domain = self._resolve_domain(pipeline) ranges = compute_date_range_chunks( domain.all_sessions(), start_date, end_date, chunksize, ) chunks = [self.run_pipeline(pipeline, s, e) for s, e in ranges] if len(chunks) == 1: # OPTIMIZATION: Don't make an extra copy in `categorical_df_concat` # if we don't have to. return chunks[0] return categorical_df_concat(chunks, inplace=True) def _compute_root_mask(self, domain, start_date, end_date, extra_rows): """ Compute a lifetimes matrix from our AssetFinder, then drop columns that didn't exist at all during the query dates. Parameters ---------- domain : zipline.pipeline.domain.Domain Domain for which we're computing a pipeline. start_date : pd.Timestamp Base start date for the matrix. end_date : pd.Timestamp End date for the matrix. extra_rows : int Number of extra rows to compute before `start_date`. Extra rows are needed by terms like moving averages that require a trailing window of data. Returns ------- lifetimes : pd.DataFrame Frame of dtype `bool` containing dates from `extra_rows` days before `start_date`, continuing through to `end_date`. The returned frame contains as columns all assets in our AssetFinder that existed for at least one day between `start_date` and `end_date`. """ sessions = domain.all_sessions() if start_date not in sessions: raise ValueError( "Pipeline start date ({}) is not a trading session for " "domain {}.".format(start_date, domain) ) elif end_date not in sessions: raise ValueError( "Pipeline end date {} is not a trading session for " "domain {}.".format(end_date, domain) ) start_idx, end_idx = sessions.slice_locs(start_date, end_date) if start_idx < extra_rows: raise NoFurtherDataError.from_lookback_window( initial_message="Insufficient data to compute Pipeline:", first_date=sessions[0], lookback_start=start_date, lookback_length=extra_rows, ) # NOTE: This logic should probably be delegated to the domain once we # start adding more complex domains. # # Build lifetimes matrix reaching back to `extra_rows` days before # `start_date.` finder = self._finder lifetimes = finder.lifetimes( sessions[start_idx - extra_rows:end_idx], include_start_date=False, country_codes=(domain.country_code,), ) if not lifetimes.columns.unique: columns = lifetimes.columns duplicated = columns[columns.duplicated()].unique() raise AssertionError("Duplicated sids: %d" % duplicated) # Filter out columns that didn't exist from the farthest look back # window through the end of the requested dates. existed = lifetimes.any() ret = lifetimes.loc[:, existed] num_assets = ret.shape[1] if num_assets == 0: raise ValueError( "Failed to find any assets with country_code {!r} that traded " "between {} and {}.\n" "This probably means that your asset db is old or that it has " "incorrect country/exchange metadata.".format( domain.country_code, start_date, end_date, ) ) return ret @staticmethod def _inputs_for_term(term, workspace, graph, domain): """ Compute inputs for the given term. This is mostly complicated by the fact that for each input we store as many rows as will be necessary to serve **any** computation requiring that input. """ offsets = graph.offset out = [] # We need to specialize here because we don't change ComputableTerm # after resolving domains, so they can still contain generic terms as # inputs. specialized = [maybe_specialize(t, domain) for t in term.inputs] if term.windowed: # If term is windowed, then all input data should be instances of # AdjustedArray. for input_ in specialized: adjusted_array = ensure_adjusted_array( workspace[input_], input_.missing_value, ) out.append( adjusted_array.traverse( window_length=term.window_length, offset=offsets[term, input_], ) ) else: # If term is not windowed, input_data may be an AdjustedArray or # np.ndarray. Coerce the former to the latter. for input_ in specialized: input_data = ensure_ndarray(workspace[input_]) offset = offsets[term, input_] # OPTIMIZATION: Don't make a copy by doing input_data[0:] if # offset is zero. if offset: input_data = input_data[offset:] out.append(input_data) return out def compute_chunk(self, graph, dates, sids, initial_workspace): """ Compute the Pipeline terms in the graph for the requested start and end dates. This is where we do the actual work of running a pipeline. Parameters ---------- graph : zipline.pipeline.graph.ExecutionPlan Dependency graph of the terms to be executed. dates : pd.DatetimeIndex Row labels for our root mask. assets : pd.Int64Index Column labels for our root mask. initial_workspace : dict Map from term -> output. Must contain at least entry for `self._root_mask_term` whose shape is `(len(dates), len(assets))`, but may contain additional pre-computed terms for testing or optimization purposes. Returns ------- results : dict Dictionary mapping requested results to outputs. """ self._validate_compute_chunk_params( graph, dates, sids, initial_workspace, ) get_loader = self._get_loader # Copy the supplied initial workspace so we don't mutate it in place. workspace = initial_workspace.copy() refcounts = graph.initial_refcounts(workspace) execution_order = graph.execution_order(refcounts) domain = graph.domain # Many loaders can fetch data more efficiently if we ask them to # retrieve all their inputs at once. For example, a loader backed by a # SQL database can fetch multiple columns from the database in a single # query. # # To enable these loaders to fetch their data efficiently, we group # together requests for LoadableTerms if they are provided by the same # loader and they require the same number of extra rows. # # The extra rows condition is a simplification: we don't currently have # a mechanism for asking a loader to fetch different windows of data # for different terms, so we only batch requests together when they're # going to produce data for the same set of dates. That may change in # the future if we find a loader that can still benefit significantly # from batching unequal-length requests. def loader_group_key(term): loader = get_loader(term) extra_rows = graph.extra_rows[term] return loader, extra_rows # Only produce loader groups for the terms we expect to load. This # ensures that we can run pipelines for graphs where we don't have a # loader registered for an atomic term if all the dependencies of that # term were supplied in the initial workspace. will_be_loaded = graph.loadable_terms - viewkeys(workspace) loader_groups = groupby( loader_group_key, (t for t in execution_order if t in will_be_loaded), ) for term in graph.execution_order(refcounts): # `term` may have been supplied in `initial_workspace`, and in the # future we may pre-compute loadable terms coming from the same # dataset. In either case, we will already have an entry for this # term, which we shouldn't re-compute. if term in workspace: continue # Asset labels are always the same, but date labels vary by how # many extra rows are needed. mask, mask_dates = graph.mask_and_dates_for_term( term, self._root_mask_term, workspace, dates, ) if isinstance(term, LoadableTerm): loader = get_loader(term) to_load = sorted( loader_groups[loader_group_key(term)], key=lambda t: t.dataset ) loaded = loader.load_adjusted_array( domain, to_load, mask_dates, sids, mask, ) assert set(loaded) == set(to_load), ( 'loader did not return an AdjustedArray for each column\n' 'expected: %r\n' 'got: %r' % (sorted(to_load), sorted(loaded)) ) workspace.update(loaded) else: workspace[term] = term._compute( self._inputs_for_term(term, workspace, graph, domain), mask_dates, sids, mask, ) if term.ndim == 2: assert workspace[term].shape == mask.shape else: assert workspace[term].shape == (mask.shape[0], 1) # Decref dependencies of ``term``, and clear any terms whose # refcounts hit 0. for garbage_term in graph.decref_dependencies(term, refcounts): del workspace[garbage_term] # At this point, all the output terms are in the workspace. out = {} graph_extra_rows = graph.extra_rows for name, term in iteritems(graph.outputs): # Truncate off extra rows from outputs. out[name] = workspace[term][graph_extra_rows[term]:] return out def _to_narrow(self, terms, data, mask, dates, assets): """ Convert raw computed pipeline results into a DataFrame for public APIs. Parameters ---------- terms : dict[str -> Term] Dict mapping column names to terms. data : dict[str -> ndarray[ndim=2]] Dict mapping column names to computed results for those names. mask : ndarray[bool, ndim=2] Mask array of values to keep. dates : ndarray[datetime64, ndim=1] Row index for arrays `data` and `mask` assets : ndarray[int64, ndim=2] Column index for arrays `data` and `mask` Returns ------- results : pd.DataFrame The indices of `results` are as follows: index : two-tiered MultiIndex of (date, asset). Contains an entry for each (date, asset) pair corresponding to a `True` value in `mask`. columns : Index of str One column per entry in `data`. If mask[date, asset] is True, then result.loc[(date, asset), colname] will contain the value of data[colname][date, asset]. """ if not mask.any(): # Manually handle the empty DataFrame case. This is a workaround # to pandas failing to tz_localize an empty dataframe with a # MultiIndex. It also saves us the work of applying a known-empty # mask to each array. # # Slicing `dates` here to preserve pandas metadata. empty_dates = dates[:0] empty_assets = array([], dtype=object) return DataFrame( data={ name: array([], dtype=arr.dtype) for name, arr in iteritems(data) }, index=MultiIndex.from_arrays([empty_dates, empty_assets]), ) resolved_assets = array(self._finder.retrieve_all(assets)) dates_kept = repeat_last_axis(dates.values, len(assets))[mask] assets_kept = repeat_first_axis(resolved_assets, len(dates))[mask] final_columns = {} for name in data: # Each term that computed an output has its postprocess method # called on the filtered result. # # As of Mon May 2 15:38:47 2016, we only use this to convert # LabelArrays into categoricals. final_columns[name] = terms[name].postprocess(data[name][mask]) return DataFrame( data=final_columns, index=MultiIndex.from_arrays([dates_kept, assets_kept]), ).tz_localize('UTC', level=0) def _validate_compute_chunk_params(self, graph, dates, sids, initial_workspace): """ Verify that the values passed to compute_chunk are well-formed. """ root = self._root_mask_term clsname = type(self).__name__ # Writing this out explicitly so this errors in testing if we change # the name without updating this line. compute_chunk_name = self.compute_chunk.__name__ if root not in initial_workspace: raise AssertionError( "root_mask values not supplied to {cls}.{method}".format( cls=clsname, method=compute_chunk_name, ) ) shape = initial_workspace[root].shape implied_shape = len(dates), len(sids) if shape != implied_shape: raise AssertionError( "root_mask shape is {shape}, but received dates/assets " "imply that shape should be {implied}".format( shape=shape, implied=implied_shape, ) ) for term in initial_workspace: if self._is_special_root_term(term): continue if term.domain is GENERIC: # XXX: We really shouldn't allow **any** generic terms to be # populated in the initial workspace. A generic term, by # definition, can't correspond to concrete data until it's # paired with a domain, and populate_initial_workspace isn't # given the domain of execution, so it can't possibly know what # data to use when populating a generic term. # # In our current implementation, however, we don't have a good # way to represent specializations of ComputableTerms that take # only generic inputs, so there's no good way for the initial # workspace to provide data for such terms except by populating # the generic ComputableTerm. # # The right fix for the above is to implement "full # specialization", i.e., implementing ``specialize`` uniformly # across all terms, not just LoadableTerms. Having full # specialization will also remove the need for all of the # remaining ``maybe_specialize`` calls floating around in this # file. # # In the meantime, disallowing ComputableTerms in the initial # workspace would break almost every test in # `test_filter`/`test_factor`/`test_classifier`, and fixing # them would require updating all those tests to compute with # more specialized terms. Once we have full specialization, we # can fix all the tests without a large volume of edits by # simply specializing their workspaces, so for now I'm leaving # this in place as a somewhat sharp edge. if isinstance(term, LoadableTerm): raise ValueError( "Loadable workspace terms must be specialized to a " "domain, but got generic term {}".format(term) ) elif term.domain != graph.domain: raise ValueError( "Initial workspace term {} has domain {}. " "Does not match pipeline domain {}".format( term, term.domain, graph.domain, ) ) def _resolve_domain(self, pipeline): """Resolve a concrete domain for ``pipeline``. """ domain = pipeline.domain(default=self._default_domain) if domain is GENERIC: raise ValueError( "Unable to determine domain for Pipeline.\n" "Pass domain=<desired domain> to your Pipeline to set a " "domain." ) return domain def _is_special_root_term(self, term): return ( term is self._root_mask_term or term is self._root_mask_dates_term )
import scipy import datetime import calendar import numpy as np from dateutil.relativedelta import relativedelta import math # Takes in the start date for the Day Ahead and converts in into a datetime with daily resolution. def da_date(start_date): return datetime.date(start_date.year, start_date.month, start_date.day + 1) # Takes in the start date for the Bound of Month and returns a list of all days remaining in the BOM. def bom_dates(start_date): start_year, start_month, start_day = start_date.year, start_date.month, start_date.day + 2 bom = [] bom_days = calendar.monthrange(start_year, start_month)[1] for day in range(start_day, bom_days + 1): bom.append(datetime.date(start_year, start_month, day)) return bom # Takes in the start date for BOM and returns a list of all days in the first full month following BOM. # If num_steps is larger than 1, the function will return a list of list containing each full month for # each step defined in num_steps. def eom_dates(start_date, num_steps): # Monthly eoms = [] for i in range(1, num_steps + 1): current_eom = start_date + relativedelta(months=+i) eom_days = calendar.monthrange(current_eom.year, current_eom.month)[1] eom = [] for day in range(1, eom_days + 1): eom.append(datetime.date(current_eom.year, current_eom.month, day)) eoms.append(eom) return eoms # Return the DA, BOM and EOM for a given number of steps. # TODO: Implement other date systems. def date_ranges(start_date, num_eoms, date_system='monthly'): # Day ahead da = da_date(start_date) # Bound of month bom = bom_dates(start_date) # eom eom = eom_dates(start_date, num_eoms) if ((start_date + relativedelta(days=+2)).month != start_date.month): return [[da]] + eom else: return [[da]] + [bom] + eom # Copies the date range with equivalent ranges of price values # Ex: prices_ranges([[1,2,3], [4,5,6]], [1,2])) -> [[1,1,1],[2,2,2]] def price_ranges(date_ranges, forward_prices): if len(date_ranges) != len(forward_prices): raise ValueError('The number of date ranges and forwards prices need to be the same') y = [] for i, date_range in enumerate(date_ranges): y.append([forward_prices[i]] * len(date_range)) return y # Flattens a 2D list with ranges into a 1D list of range values # Ex: [[1,2],[3,4]] -> [1,2,3,4] # If ranges are overlapping, set remove_overlap=True # Ex: [[1,2,3],[3,4,5]] -> [1,2,3,4] def flatten_ranges(ranges, no_overlap=False): if no_overlap: return [i for i in range(ranges[0][0], ranges[-1][-1])] else: return [item for sublist in ranges for item in sublist] # Returns a list of the midpoint value in each range # Ex: [[3, 5, 6], [3, 1, 8, 7]] -> [5, 8] def midpoint_values(ranges, include_last=False): midpoints = [] len_count = 0 for r in ranges: mp_index = len(r) // 2 midpoints.append(r[mp_index]) len_count += len(r) if include_last: midpoints.append(ranges[-1][-1]) return midpoints # Returns the relative index to the midpoint of a range based on the start of the given range # Ex: [[3, 5, 6, 3], [3, 1, 8, 7, 8]] -> [2, 2] # TODO: Test with include last param def midpoint_relative_index(ranges): relative_index = [] len_count = 0 for r in ranges: mp_index = len(r) // 2 relative_index.append(mp_index) len_count += len(r) return relative_index # Returns the absolute index to the midpoint, counted from the start of the first range # Ex: [[3, 5, 6], [3, 1, 8, 7]] -> [1, 5] # TODO: Test with include last param def midpoint_absolute_index(ranges, include_last=False): absolute_index = [] len_count = 0 for r in ranges: mp_index = len(r) // 2 absolute_index.append(len_count + mp_index) len_count += len(r) if include_last: absolute_index.append(len_count - 1) return absolute_index # Returns the absolute start and end index for each range, counted from the start of the first range # Ex: [[3, 5, 6], [3, 1, 8, 7]] -> [[0, 2], [3, 6]] # With overlap=1, the function returns: # Ex: [[3, 5, 6], [3, 1, 8, 7]] -> [[0, 3], [3, 7]] def start_end_absolute_index(ranges, overlap=0): se_index = [] len_count = 0 for r in ranges: se_index.append([len_count, len_count + len(r) - 1 + overlap]) len_count += len(r) return se_index # Returns the index for each step between start and end for each range # Ex: [[0, 2], [3, 7]] -> [[0, 1, 2], [3, 4, 5, 6, 7]] def full_index(se_index): full_index = [] len_count = 0 for r in se_index: full_index.append([j for j in range(r[0], r[1] + 1)]) len_count += len(r) return full_index # Returns the middle value between two ranges for all ranges in a list of ranges # Ex: [[1, 2],[2, 6], [6, 10]] -> [2, 6] def knot_index(ranges): if len(ranges) < 2: raise ValueError('There must be at least 2 ranges in the list') knots = [] for i in range(0, len(ranges[:-1])): knots.append((ranges[i][-1] + ranges[i + 1][0]) // 2) return knots def get_ranges(start_date, prices): if len(prices) < 2: raise ValueError('The price list must contain at least 2 values') dr = date_ranges(start_date, len(prices) - 2) x = flatten_ranges(dr) pr = price_ranges(dr, prices) y = flatten_ranges(pr) return x, y, dr, pr
from lib.imports.default import * import lib.permissions.applications.get as get_application import lib.permissions.users.add as add_user def call(**kwargs): manager = Manager() db = manager.db("appinit") cursor = db.permissions.find_one({ "application": kwargs["application"], "uid": kwargs["uid"], }) if cursor is None: add_user.call(uid=kwargs["uid"], application=kwargs["application"]) db.permissions.update( { "application": kwargs["application"], "uid": kwargs["uid"] }, { "$push": { "permissions": kwargs["permission"] } } ) return get_application.call(application=kwargs["application"])
# -*- coding: utf-8 -*- # pylint: disable=C0103,R0201,W0401,W0614,W0621 # C0103 Invalid name (setUp(), ...) # R0201 Method could be a function # W0401 Wildcard import # W0614 Unused import ... from wildcard import # W0621 Redefining name ... from outer scope from __future__ import absolute_import import pytest from behave.formatter import ansi_escapes from six.moves import range # -------------------------------------------------------------------------- # TEST SUPPORT and TEST DATA # -------------------------------------------------------------------------- TEXTS = [ u"lorem ipsum", u"Alice and Bob", u"Alice\nBob", ] ALL_COLORS = list(ansi_escapes.colors.keys()) CURSOR_UPS = [ansi_escapes.up(count) for count in range(10)] def colorize(text, color): color_escape = "" if color: color_escape = ansi_escapes.colors[color] return color_escape + text + ansi_escapes.escapes["reset"] def colorize_text(text, colors=None): if not colors: colors = [] colors_size = len(colors) color_index = 0 colored_chars = [] for char in text: color = colors[color_index] colored_chars.append(colorize(char, color)) color_index += 1 if color_index >= colors_size: color_index = 0 return "".join(colored_chars) # -------------------------------------------------------------------------- # TEST SUITE # -------------------------------------------------------------------------- def test_module_setup(): """Ensure that the module setup (aliases, escapes) occured.""" # colors_count = len(ansi_escapes.colors) aliases_count = len(ansi_escapes.aliases) escapes_count = len(ansi_escapes.escapes) assert escapes_count >= (2 + aliases_count + aliases_count) class TestStripEscapes(object): @pytest.mark.parametrize("text", TEXTS) def test_should_return_same_text_without_escapes(self, text): assert text == ansi_escapes.strip_escapes(text) @pytest.mark.parametrize("text", ansi_escapes.colors.values()) def test_should_return_empty_string_for_any_ansi_escape_color(self, text): assert "" == ansi_escapes.strip_escapes(text) @pytest.mark.parametrize("text", ansi_escapes.escapes.values()) def test_should_return_empty_string_for_any_ansi_escape(self, text): assert "" == ansi_escapes.strip_escapes(text) @pytest.mark.parametrize("text", TEXTS) def test_should_strip_color_escapes_from_all_colored_text(self, text): colored_text = colorize_text(text, ALL_COLORS) assert text == ansi_escapes.strip_escapes(colored_text) assert text != colored_text @pytest.mark.parametrize("text", TEXTS) @pytest.mark.parametrize("color", ALL_COLORS) def test_should_strip_color_escapes_from_text(self, text, color): colored_text = colorize(text, color) assert text == ansi_escapes.strip_escapes(colored_text) assert text != colored_text colored_text2 = colorize(text, color) + text text2 = text + text assert text2 == ansi_escapes.strip_escapes(colored_text2) assert text2 != colored_text2 @pytest.mark.parametrize("text", TEXTS) @pytest.mark.parametrize("cursor_up", CURSOR_UPS) def test_should_strip_cursor_up_escapes_from_text(self, text, cursor_up): colored_text = cursor_up + text + ansi_escapes.escapes["reset"] assert text == ansi_escapes.strip_escapes(colored_text) assert text != colored_text
from vcf2fhir.converter import Converter
import numpy as np import random import matplotlib.pyplot as plt ''' בנו מערך שמכיל עשרה מספרים שלמים רנדומליים. .a ''' random_int_array = np.random.randint(1,50,10) ''' מערך הזה יהיה .x שברים( ושמרו אותו במשתנה ( floats בנו מערך שמכיל עשרה .b הבסיס לבניית שלושת הדאטהסטים בהמשך. ''' random_fractions_float_array = np.random.uniform(1,20,10) / np.random.uniform(21,100,10) ''' בנו וקטור של חמישה מספרים אקראיים שהם כפולה של שלוש. .c ''' random_devidable_by_three_array = np.random.randint(1,20,5) * 3 ''' שאלת אתגר: בחרו באקראיות מספר ששייך לעשרת המספרים הראשונים של סדרת .d פיבונאצ'י. ''' def create_fibonacci_array(array_lenght): fibonacci_array = [1,1] i = 1 while i < array_lenght-1: fibonacci_array.append(fibonacci_array[i]+fibonacci_array[i-1]) i += 1 return fibonacci_array random_fibonacci_value = random.choice(create_fibonacci_array(10)) ''' e. נבחר בשיפוע אקראי עבור הקו הישר, לדוגמא2 . ניקח10 נקודות אקראיות על ציר ה x, )הנקודות מסעיף1b (, ונגדיר אותן כערכיx . ''' random_slope_power1 = np.random.randint(1,20,1) ''' f. כדי לחשב את ערכיy , נציב במשוואה של קו ישר, כלומר, נכפיל בשיפוע שבחרנו. התוצאה נשמור במשתנה בשם1 _y_line. ''' y_line_1 = random_slope_power1 * random_int_array ''' g. נוסיף רעש גאוסיאני לכל אחת מנקודות הy בעזרת הפונקציה )(random.normal ש numpy שיודעת לייצר נקודות אקראיות מתוך ההתפלגות הגאוסיאנית: ''' y_line_1 += np.int32(np.random.normal(loc=0.0, scale=10.0, size=10)) ''' 𝑦 = 𝑎 · 𝑥 + 𝑏 h. נבחר קבוע אחר שהוא השיפוע של קו זה, ונכפיל בו את ערכיx ונוסיף לו קבוע אחר שהוא ה-b )סקאלר(. את התוצאה נשמור במשתנה בשם2 _y_line ונוסיף לו רעש גאוסיאני. ''' random_free_value = np.random.randint(1,20,1) y_line_2 = random_slope_power1 * random_int_array + random_free_value y_line_2 += np.int32(np.random.normal(loc=0.0, scale=10.0, size=10)) ''' 𝑦 = 𝑎 · 𝑥2 + 𝑏 · 𝑥 + 𝑐 i. עבור משוואת פרבולה יש לבחור שלושה קבועים אקראיים )a,b,c(, ולהציב את ערכי ה-x ממקודם בנוסחה כדי לקבל את ערכי הy התואמים ושימרו במשתנה3 _y_line ונוסיף גם לו רעש גאוסיאני. ''' random_slope_power2 = np.random.randint(1,20,1) y_line_3 = random_slope_power2 * random_int_array**2 + random_slope_power1 * random_int_array + random_free_value y_line_3 += np.int32(np.random.normal(loc=0.0, scale=100.0, size=10)) ''' linear regression calculation h = (((x**t)*x)**-1)*((x**t)*y) x: matrix of [n, features] x [m, amount of factors] y: results values h: outcome value for new coefficients ''' ''' 𝑦 = 𝑎 · 𝑥 ''' x_set1 = np.reshape(random_int_array, (len(random_int_array),1)) var1 = np.linalg.inv(x_set1.T @ x_set1) # (((x**t)*x)**-1) var2 = x_set1.T @ y_line_1 #((x**t)*y) h_1 = var1 * var2 print('The coefficient of a linear equation passing through (0,0):', h_1) ''' 𝑦 = 𝑎 · 𝑥 + 𝑏 ונוסיף לו קבוע אחר x נבחר קבוע אחר שהוא השיפוע של קו זה, ונכפיל בו את ערכי .h ונוסיף לו רעש y_line_2 סקאלר(. את התוצאה נשמור במשתנה בשם ( b- שהוא ה גאוסיאני. ''' x_once = np.ones((len(random_int_array),1)) x_set2 = np.column_stack((x_set1, x_once)) var1 = np.linalg.inv(x_set2.T @ x_set2) # (((x**t)*x)**-1) var2 = x_set2.T @ y_line_2 #((x**t)*y) h_2 = var1 @ var2 print('The coefficients of a linear equation passing through (0,{}): {}' .format(random_free_value, h_2)) ''' 𝑦 = 𝑎 · 𝑥2 + 𝑏 · 𝑥 + 𝑐 עשו התאמה של הנקודות בדאטה-סט זה לפרבולה. רמז: הדרך לעשות את זה היא מחברים בין כל .ones ועמודה של 𝑥2 ערכי ,x לייצר שלוש עמודות של פיצ'רים: ערכי .np.column_stack העמודות למטריצה בעזר ''' x_set3 = np.column_stack((x_set1**2, x_set2)) var1 = np.linalg.inv(x_set3.T @ x_set3) # (((x**t)*x)**-1) var2 = x_set3.T @ y_line_3 #((x**t)*y) h_3 = var1 @ var2 print('The coefficients of a parabola equation passing through (0,{}): {}' .format(random_free_value, h_3)) ################ ### Plotting ### ################ x_set_non_random = np.arange(0,40,1) y_set_non_random = (h_1 * x_set_non_random).T figure, axis = plt.subplots(1, 1) plt.scatter(x_set1,y_line_1) plt.plot(x_set_non_random, y_set_non_random, color = 'r') plt.show() figure, axis = plt.subplots(1, 1) y_set_non_random = (h_2[0] * x_set_non_random + h_2[1]).T figure, axis = plt.subplots(1, 1) plt.scatter(x_set1,y_line_2) plt.plot(x_set_non_random, y_set_non_random, color = 'r') plt.show() figure, axis = plt.subplots(1, 1) y_set_non_random = (h_3[0] * x_set_non_random**2 + h_3[1] * x_set_non_random + h_3[2]).T plt.scatter(x_set1,y_line_3) plt.plot(x_set_non_random, y_set_non_random, color = 'r') plt.show() ################# ### Challange ### ################# ''' equation: 𝑦 = 𝑎 · 𝑒 (𝑏 𝑥**2 + 𝑐 𝑥) ln(y) = ln(a) * 𝑏 𝑥**2 + 𝑐 𝑥 ''' x=np.array([0.08750722,0.01433097,0.30701415,0.35099786,0.80772547,0.16525226, 0.46913072,0.69021229,0.84444625,0.2393042,0.37570761,0.28601187,0.26468939, 0.54419358,0.89099501,0.9591165,0.9496439,0.82249202,0.99367066,0.50628823]) y=np.array([4.43317755,4.05940367,6.56546859,7.26952699,33.07774456,4.98365345, 9.93031648,20.68259753,38.74181668,5.69809299,7.72386118,6.27084933, 5.99607266,12.46321171,47.70487443,65.70793999,62.7767844, 35.22558438,77.84563303,11.08106882]) x_lan_random = np.random.uniform(0.01,1,20) x_lan_set_a = np.ones(20) x_lan_set_ab = np.column_stack((x_lan_random**2, x_lan_set_a)) x_lan_set_abc = np.column_stack((x_lan_random, x_lan_set_ab)) y_lan = np.log(y) var1 = np.linalg.inv(x_lan_set_abc.T @ x_lan_set_abc) # (((x**t)*x)**-1) var2 = x_lan_set_abc.T @ y_lan #((x**t)*y) h_lan = var1 @ var2 x_set_non_random = np.arange(0,1,0.1) y_set_non_random = (h_lan[2] * np.exp((h_lan[1]*x_set_non_random**2)+h_lan[1]*x_set_non_random)).T figure, axis = plt.subplots(1, 1) plt.scatter(x,y) plt.plot(x_set_non_random, y_set_non_random, color = 'r') plt.show()
from .base import Field, Model class TicketData(Model): authority = Field(str) level = Field(str) price = Field(float) price_child = Field(float) def __eq__(self, other): if not isinstance(other, TicketData): return False if self.price != other.price: return False if self.price_child is not None and other.price_child is not None and self.price_child != other.price_child: return False if self.authority is None or self.level is None: return None if self.authority == other.authority and self.level == other.level: return True return None def __repr__(self): childprice = '' if self.price_child is not None: childprice = ' %.2f' % self.price_child return '<TicketData %s %s %.2f%s>' % (self.authority, self.level, self.price, childprice) class TicketList(Model): currency = Field(str) level_name = Field(str) single = Field(TicketData) bike = Field(TicketData) # other = Field(Mapping[str, TicketData]) def __repr__(self): return '<TicketList %s %s (+%d)>' % (self.currency, repr(self.single), len(self.other))
from imgaug import augmenters as iaa from keras.preprocessing.image import * import cv2 from tqdm import tqdm from config import * # rotate images def rotate(image, angle, center=None, scale=1.0): (h, w) = image.shape[:2] if center is None: center = (w / 2, h / 2) M = cv2.getRotationMatrix2D(center, angle, scale) rotated = cv2.warpAffine(image, M, (w, h)) return rotated # load and preprocess images def process(aug,model,width,fnames_test,n_test): X_test = np.zeros((n_test, width, width, 3), dtype=np.uint8) if (aug == 'default'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) X_test[i] = img[:, :, ::-1] elif (aug == 'flip'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = cv2.flip(img, 1) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate1'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = rotate(img, 5) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate2'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = rotate(img, -5) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate3'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = cv2.flip(img, 1) img = rotate(img, 5) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate4'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = cv2.flip(img, 1) img = rotate(img, -5) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate5'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = rotate(img, 13) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate6'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = rotate(img, -13) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate7'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = cv2.flip(img, 1) img = rotate(img, 13) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate8'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = cv2.flip(img, 1) img = rotate(img, -13) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate9'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = rotate(img, 21) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate10'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = rotate(img, -21) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate11'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = cv2.flip(img, 1) img = rotate(img, 21) X_test[i] = img[:, :, ::-1] elif (aug == 'rotate12'): for i in tqdm(range(n_test)): img = cv2.resize(cv2.imread(fnames_test[i]), (width, width)) img = cv2.flip(img, 1) img = rotate(img, -21) X_test[i] = img[:, :, ::-1] y_pred = model.predict(X_test, batch_size=32, verbose=1) a del X_test return y_pred # data augmentation def customizedImgAug(input_img): rarely = lambda aug: iaa.Sometimes(0.1, aug) sometimes = lambda aug: iaa.Sometimes(0.25, aug) often = lambda aug: iaa.Sometimes(0.5, aug) seq = iaa.Sequential([ iaa.Fliplr(0.5), often(iaa.Affine( scale={"x": (0.9, 1.1), "y": (0.9, 1.1)}, translate_percent={"x": (-0.1, 0.1), "y": (-0.12, 0)}, rotate=(-10, 10), shear=(-8, 8), order=[0, 1], cval=(0, 255), )), iaa.SomeOf((0, 4), [ rarely( iaa.Superpixels( p_replace=(0, 0.3), n_segments=(20, 200) ) ), iaa.OneOf([ iaa.GaussianBlur((0, 2.0)), iaa.AverageBlur(k=(2, 4)), iaa.MedianBlur(k=(3, 5)), ]), iaa.Sharpen(alpha=(0, 0.3), lightness=(0.75, 1.5)), iaa.Emboss(alpha=(0, 1.0), strength=(0, 0.5)), rarely(iaa.OneOf([ iaa.EdgeDetect(alpha=(0, 0.3)), iaa.DirectedEdgeDetect( alpha=(0, 0.7), direction=(0.0, 1.0) ), ])), iaa.AdditiveGaussianNoise( loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5 ), iaa.OneOf([ iaa.Dropout((0.0, 0.05), per_channel=0.5), iaa.CoarseDropout( (0.03, 0.05), size_percent=(0.01, 0.05), per_channel=0.2 ), ]), rarely(iaa.Invert(0.05, per_channel=True)), often(iaa.Add((-40, 40), per_channel=0.5)), iaa.Multiply((0.7, 1.3), per_channel=0.5), iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), iaa.Grayscale(alpha=(0.0, 1.0)), sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.03))), sometimes( iaa.ElasticTransformation(alpha=(0.5, 1.5), sigma=0.25) ), ], random_order=True), iaa.Fliplr(0.5), iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True) ], random_order=True) # apply augmenters in random order output_img = seq.augment_image(input_img) return output_img # generate data class Generator(): def __init__(self, X, y, batch_size=8, aug=False): def generator(): while True: for i in range(0, len(X), batch_size): X_batch = X[i:i + batch_size].copy() y_barch = [x[i:i + batch_size] for x in y] if aug: for j in range(len(X_batch)): X_batch[j] = customizedImgAug(X_batch[j]) yield X_batch, y_barch self.generator = generator() self.steps = len(X) // batch_size + 1
#------------------------------------------------------------------------------ # Copyright (c) 2008, Riverbank Computing Limited # All rights reserved. # # This software is provided without warranty under the terms of the BSD # license included in enthought/LICENSE.txt and may be redistributed only # under the conditions described in the aforementioned license. The license # is also available online at http://www.enthought.com/licenses/BSD.txt # Thanks for using Enthought open source! # # Author: Riverbank Computing Limited # Description: <Enthought application scripting package component> #------------------------------------------------------------------------------ # Enthought library imports. from pyface.action.api import Action from traits.api import Bool, Unicode # Local imports. from apptools.appscripting.package_globals import get_script_manager class StopRecordingAction(Action): """An action that stops the recording of changes to scriptable objects to a script.""" #### 'Action' interface ################################################### enabled = Bool(False) name = Unicode("Stop recording") ########################################################################### # 'object' interface. ########################################################################### def __init__(self, **traits): """ Initialise the instance. """ super(StopRecordingAction, self).__init__(**traits) get_script_manager().on_trait_change(self._on_recording, 'recording') ########################################################################### # 'Action' interface. ########################################################################### def perform(self, event): """ Perform the action. """ get_script_manager().stop_recording() ########################################################################### # Private interface. ########################################################################### def _on_recording(self, new): """ Handle a change to the script manager's recording trait. """ self.enabled = new
import sys import os import tempfile import subprocess SMINA_PATH = os.path.join(os.getcwd(), 'nanome_docking', 'smina', 'smina_binary') class DockingCalculations(): def __init__(self, plugin): self.plugin = plugin self.requires_site = True self.loading_bar_counter = 0 async def start_docking(self, receptor_pdb, ligand_pdbs, site_pdb, temp_dir, exhaustiveness=None, modes=None, autobox=None, deterministic=None, **kwargs): # Start docking process self.loading_bar_counter = 0 log_file = tempfile.NamedTemporaryFile(delete=False, dir=temp_dir) smina_output_sdfs = [] ligand_count = len(ligand_pdbs) for ligand_pdb in ligand_pdbs: output_sdf = tempfile.NamedTemporaryFile(delete=False, prefix="output", suffix=".sdf", dir=temp_dir) process = self.run_smina(ligand_pdb, receptor_pdb, site_pdb, output_sdf, log_file, exhaustiveness, modes, autobox, ligand_count, deterministic) self.handle_loading_bar(process, ligand_count) smina_output_sdfs.append(output_sdf) return smina_output_sdfs def run_smina(self, ligand_pdb, receptor_pdb, site_pdb, output_sdf, log_file, exhaustiveness=None, modes=None, autobox=None, ligand_count=1, deterministic=False, **kwargs): smina_args = [ '-r', receptor_pdb.name, '-l', ligand_pdb.name, '--autobox_ligand', site_pdb.name, '--out', output_sdf.name, '--log', log_file.name, '--exhaustiveness', str(exhaustiveness), '--num_modes', str(modes), '--autobox_add', str(autobox), '--atom_term_data' ] # To make runs deterministic, we manually set the seed. Otherwise random seed is used. if deterministic: seed = '12345' smina_args.extend(['--seed', seed]) cmd = [SMINA_PATH, *smina_args] process = subprocess.Popen(cmd, stdout=subprocess.PIPE) self.handle_loading_bar(process, ligand_count) return process def handle_loading_bar(self, process, ligand_count): """Render loading bar from stdout on the menu. stdout has a loading bar of asterisks. Every asterisk represents about 2% completed """ stars_per_complex = 51 total_stars = stars_per_complex * ligand_count for c in iter(lambda: process.stdout.read(1), b''): if c.decode() == '*': self.loading_bar_counter += 1 self.plugin.update_loading_bar(self.loading_bar_counter, total_stars) sys.stdout.buffer.write(c)
import sys, wx, numpy, wx.lib.plot, plot, thread, time wx.lib.plot = plot from __main__ import DEBUG, USE_THREADS DEBUG = '--debug' in sys.argv class PlotCanvas(plot.PlotCanvas): def __init__(self, parent, color='green'): plot.PlotCanvas.__init__(self, parent) self.mutex = thread.allocate_lock() self.color = color self.SetMinSize((200, 50)) self.SetInitialSize((1280,420)) def Downsample(self, vector, factor): """ downsample(vector, factor): Downsample (by averaging) a vector by an integer factor. """ rem = (len(vector) % factor) if rem: vector = vector[:-rem] vector.shape = (len(vector)/factor, factor) return numpy.mean(vector, axis=1) def Update(self, data, times, decimate=True, xLabel="Time (minutes)", yLabel="HEG ratio", title='', zero_x=True): t1 = time.time() #minpersample = .008192 / 60. * 4. # minutes per sample; ~= 30.5 hz #minpersample = 1. / 10.1 / 60. data = data[:] print data times = times[:] if len(data) < len(times): times = times[:len(data)] elif len(times) < len(data): data = data[:len(times)] data1 = numpy.zeros((len(data), 2)) data1[:, 1] = data data1[:, 0] = times if zero_x: data1[:, 0] = (data1[:, 0] - data1[0, 0]) if not 'seconds' in xLabel: data1[:, 0] /= 60. #data1[:,0] = numpy.arange(len(data)) * minpersample #decimate = False if decimate: decimate = 1 + len(data)/2048 data2 = numpy.zeros((len(data)/decimate, 2)) data2[:,0] = self.Downsample(data1[:,0], decimate) data2[:,1] = self.Downsample(data1[:,1], decimate) else: data2 = data1 #data2 = data1[::decimate, :] t2 = time.time() #lines = wx.lib.plot.PolyMarker(data2, colour=self.color, fillstyle=wx.TRANSPARENT) #lines = wx.lib.plot.PolySpline(data2, colour=self.color) lines = wx.lib.plot.PolyLine(data2, colour=self.color) graph = wx.lib.plot.PlotGraphics([lines], xLabel=xLabel, yLabel=yLabel, title=title) t3 = time.time() if wx.version() < '4': try: self.Draw(graph) except wx.PyDeadObjectError: pass else: self.Draw(graph) t4 = time.time() #if DEBUG: print "%1.4f %1.4f %1.4f" % (t4-t3, t3-t2, t2-t1), decimate, data1.shape, data2.shape if USE_THREADS: try: self.mutex.release() except thread.error: # usually happens due to the initial # plot.Update([100., 100.]) call during init pass#if DEBUG: # print "Plot mutex not locked when release attempted." #def GetXCurrentRange(self): # xr = super(plot.PlotCanvas, self).GetXCurrentRange() class BarGraphMeter(plot.PlotCanvas): def __init__(self, parent, color='red', width=40, *args, **kwargs): plot.PlotCanvas.__init__(self, parent, *args, **kwargs) self.color = color self.ymin, self.ymax = None, None self.width = width self.SetMinSize((40, 100)) self.SetInitialSize((80,150)) self.title = '' self.xlabel = '' self.ylabel = '' def SetYRange(self, ymin=None, ymax=None): self.ymin, self.ymax = ymin, ymax def SetColor(self, color): self.color = color # def _getCurrentRange(self): # """Returns (minY, maxY) y-axis range for displayed graph""" # if self.ymin != None and self.ymax != None: # if DEBUG: print self._axisInterval(self._ySpec, self.ymin, self.ymax) # return self._axisInterval(self._ySpec, self.ymin, self.ymax) # else: # return plot.PlotCanvas._getCurrentRange(self) # def _getYMaxRange(self): # """Returns (minY, maxY) y-axis range for displayed graph""" # if self.ymin != None and self.ymax != None: # if DEBUG: print self._axisInterval(self._ySpec, self.ymin, self.ymax) # return self._axisInterval(self._ySpec, self.ymin, self.ymax) # graphics= self.last_draw[0] # p1, p2 = graphics.boundingBox() # min, max points of graphics # yAxis = self._axisInterval(self._ySpec, p1[1], p2[1]) # return yAxis def SetValue(self, value): points1=[(1,0), (1,value)] line1 = wx.lib.plot.PolyLine(points1, colour=self.color, width=self.width) graph = wx.lib.plot.PlotGraphics([line1], self.title, self.xlabel, self.ylabel) if self.ymin != None and self.ymax != None: yAxis = (self.ymin, self.ymax) else: yAxis = None self.Draw(graph, yAxis=yAxis)
import logging import logging.config import os import shutil import sys from typing import List, Optional import click import pkg_resources from plumbum import ProcessExecutionError from pyfzf import FzfPrompt from . import utils from .config import config from .parse import parse_pipeline_file from .runner import PipelineRunner, PipelineRunRequest logger = logging.getLogger(__name__) def _init_logger(): logging.config.dictConfig(config.log_config) def _get_pipelines_list(pipeline_file: str) -> List[str]: pipelines_definition = parse_pipeline_file(pipeline_file) return pipelines_definition.get_available_pipelines() def _prompt_for_pipeline(pipeline_file) -> Optional[str]: pipeline = None pipelines = _get_pipelines_list(pipeline_file) try: fzf = FzfPrompt() pipeline = next(iter(fzf.prompt(pipelines))) except SystemError: logger.warning("fzf executable not found, disabling interactive pipeline selection.") except ProcessExecutionError: logger.warning("No pipeline selected") return pipeline @click.group("Pipeline Runner", invoke_without_command=True) @click.option( "-V", "--version", "show_version", is_flag=True, help="Print project version and exit.", ) @click.pass_context def main(ctx, show_version): if show_version: print(f"Pipeline Runner {pkg_resources.get_distribution('bitbucket_pipeline_runner').version}") ctx.exit() if not ctx.invoked_subcommand: print(ctx.get_help()) ctx.exit(1) @main.command() @click.argument("pipeline", default="") @click.option( "-r", "--repository-path", help="Path to the git repository. Defaults to current directory.", ) @click.option( "-s", "--step", "steps", multiple=True, help="Steps to run. If none are specified, they will all be run. Can be specified multiple times.", ) @click.option( "-e", "--env-file", "env_files", multiple=True, help="Read in a file of environment variables. Can be specified multiple times.", ) @click.option( "-c", "--color/--no-color", default=True, help="Enable colored output. Default: True", ) @click.option( "--cpu-limits/--no-cpu-limits", default=False, help="Enable to enforce cpu limits for the runner. Default: False", ) def run(pipeline, repository_path, steps, env_files, color, cpu_limits): """ Runs the pipeline PIPELINE. PIPELINE is the full path to the pipeline to run. Ex: branches.master """ config.color = color config.cpu_limits = cpu_limits _init_logger() if not pipeline: pipeline = _prompt_for_pipeline(os.path.join(repository_path or ".", "bitbucket-pipelines.yml")) if not pipeline: logger.error("pipeline not specified") sys.exit(2) req = PipelineRunRequest(pipeline, repository_path, steps, env_files) runner = PipelineRunner(req) try: runner.run() except Exception as e: logger.exception(str(e)) sys.exit(1) @main.command("list") @click.option( "-r", "--repository-path", help="Path to the git repository. Defaults to current directory.", ) @click.option( "-c", "--color/--no-color", default=True, help="Enable colored output", ) def list_(repository_path, color): """ List the available pipelines. """ config.color = color _init_logger() pipelines = _get_pipelines_list(os.path.join(repository_path or ".", "bitbucket-pipelines.yml")) logger.info("Available pipelines:\n\t%s", "\n\t".join(sorted(pipelines))) @main.command() @click.argument("pipeline", default="") @click.option( "-r", "--repository-path", help="Path to the git repository. Defaults to current directory.", ) def parse(pipeline, repository_path): """ Parse the pipeline file. """ pipeline_file = os.path.join(repository_path or ".", "bitbucket-pipelines.yml") pipelines_definition = parse_pipeline_file(pipeline_file) if pipeline: parsed = pipelines_definition.get_pipeline(pipeline) if not parsed: raise ValueError(f"Invalid pipeline: {pipeline}") else: parsed = pipelines_definition print(parsed.json(indent=2)) @main.command() @click.argument("action", type=click.Choice(["clear", "list"])) def cache(action): cache_dir = utils.get_cache_directory() projects = sorted(os.listdir(cache_dir)) if action == "list": print("Caches:") print("\n".join(map(lambda i: f"\t{i}", projects))) elif action == "clear": for p in projects: shutil.rmtree(os.path.join(cache_dir, p)) if __name__ == "__main__": main()
#! /usr/bin/env python import sys for linha in sys.stdin: try: campos = linha.split(';') mercadoria = str(campos[3]) print '%s\t%s' % (mercadoria, 1) except: continue
import cv2 import numpy as np from PIL import Image import os os.environ['CUDA_VISIBLE_DEVICES'] = '-1' import tensorflow as tf from tensorflow.python.platform import gfile def image_normalized(img): ''' tif,size:512*512,gray :param dir_path: path to your images directory :return: ''' MAX_LEN = 600 # img = Image.open(file_path).convert('RGB') # img = ImageEnhance.Contrast(img).enhance(3) # img = np.array(img) img_shape = img.shape image_size = (img_shape[1], img_shape[0]) h, w = img_shape[1], img_shape[0] if (w < h): if (h > MAX_LEN): scale = 1.0 * MAX_LEN / h w = w * scale h = MAX_LEN elif (h <= w): if (w > MAX_LEN): scale = 1.0 * MAX_LEN / w h = scale * h w = MAX_LEN w = int(w // 16 * 16) h = int(h // 16 * 16) # h, w = 512, 512 img_standard = cv2.resize(img, (h, w), interpolation=cv2.INTER_AREA) # cv2.imshow('resize', img_standard) # cv2.waitKey(0) print(img_standard.shape) img_new = img_standard img_new = np.asarray([img_new / 255.]) return img_new, image_size def saveResult(npyfile, size): for i, item in enumerate(npyfile): img = item img_std = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8) # img_std += 1 img = np.squeeze(img, axis=-1) coor = np.argwhere(img < 0.2) for i in coor: img_std[i[0]][i[1]] = 255 # img_std img_std = cv2.resize(img_std, size, interpolation=cv2.INTER_CUBIC) # print(img_std) # cv2.imwrite(os.path.join(self.save_path, ("%s." + self.img_type) % (name)), img_std) return img_std sess = tf.Session() with tf.device('/cpu:0'): with gfile.FastGFile('line_extract/model.pb', 'rb') as f: graph_def = tf.GraphDef() graph_def.ParseFromString(f.read()) sess.graph.as_default() tf.import_graph_def(graph_def, name='') # 导入计算图 # 需要有一个初始化的过程 sess.run(tf.global_variables_initializer()) # for tensor_name in tf.contrib.graph_editor.get_tensors(tf.get_default_graph()): # print(tensor_name) input_x = sess.graph.get_tensor_by_name("input_1:0") out_softmax = sess.graph.get_tensor_by_name("conv2d_18/Sigmoid:0") # out_label = sess.graph.get_tensor_by_name("output:0") def line_detect(img): new_img = np.zeros((img.shape[0] + 60, img.shape[1] + 60, 3), dtype=np.uint8) new_img += 255 # print(111111111, img.shape) # print(222222222, new_img.shape) new_img[30:-30, 30:-30] = img img, img_size = image_normalized(new_img) print(img.shape) img_out_softmax = sess.run(out_softmax, feed_dict={input_x: img}) print(img_out_softmax.shape) img = saveResult(img_out_softmax, img_size) Image.fromarray(img).save('line_extract/1.jpg') img = img[30:-30, 30:-30] return img
# -*- coding: utf-8 -*- #-------------------------------------------------------------------------- # # Copyright (c) Microsoft Corporation. All rights reserved. # # The MIT License (MIT) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the ""Software""), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # #-------------------------------------------------------------------------- import sys import xml.etree.ElementTree as ET import pytest from msrest.serialization import Serializer, Deserializer, Model, xml_key_extractor def assert_xml_equals(x1, x2): print("--------X1--------") ET.dump(x1) print("--------X2--------") ET.dump(x2) assert x1.tag == x2.tag assert (x1.text or "").strip() == (x2.text or "").strip() # assert x1.tail == x2.tail # Swagger does not change tail assert x1.attrib == x2.attrib assert len(x1) == len(x2) for c1, c2 in zip(x1, x2): assert_xml_equals(c1, c2) class TestXmlDeserialization: def test_basic(self): """Test an ultra basic XML.""" basic_xml = """<?xml version="1.0"?> <Data country="france"> <Long>12</Long> <EmptyLong/> <Age>37</Age> <EmptyAge/> <EmptyString/> </Data>""" class XmlModel(Model): _attribute_map = { 'longnode': {'key': 'longnode', 'type': 'long', 'xml':{'name': 'Long'}}, 'empty_long': {'key': 'empty_long', 'type': 'long', 'xml':{'name': 'EmptyLong'}}, 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age'}}, 'empty_age': {'key': 'empty_age', 'type': 'int', 'xml':{'name': 'EmptyAge'}}, 'empty_string': {'key': 'empty_string', 'type': 'str', 'xml':{'name': 'EmptyString'}}, 'not_set': {'key': 'not_set', 'type': 'str', 'xml':{'name': 'NotSet'}}, 'country': {'key': 'country', 'type': 'str', 'xml':{'name': 'country', 'attr': True}}, } _xml_map = { 'name': 'Data' } s = Deserializer({"XmlModel": XmlModel}) result = s(XmlModel, basic_xml, "application/xml") assert result.longnode == 12 assert result.empty_long is None assert result.age == 37 assert result.empty_age is None assert result.country == "france" assert result.empty_string == "" assert result.not_set is None def test_basic_unicode(self): """Test a XML with unicode.""" basic_xml = u"""<?xml version="1.0" encoding="utf-8"?> <Data language="français"/>""" class XmlModel(Model): _attribute_map = { 'language': {'key': 'language', 'type': 'str', 'xml':{'name': 'language', 'attr': True}}, } _xml_map = { 'name': 'Data' } s = Deserializer({"XmlModel": XmlModel}) result = s(XmlModel, basic_xml, "application/xml") assert result.language == u"français" def test_add_prop(self): """Test addProp as a dict. """ basic_xml = """<?xml version="1.0"?> <Data> <Metadata> <Key1>value1</Key1> <Key2>value2</Key2> </Metadata> </Data>""" class XmlModel(Model): _attribute_map = { 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, } _xml_map = { 'name': 'Data' } s = Deserializer({"XmlModel": XmlModel}) result = s(XmlModel, basic_xml, "application/xml") assert len(result.metadata) == 2 assert result.metadata['Key1'] == "value1" assert result.metadata['Key2'] == "value2" def test_object(self): basic_xml = """<?xml version="1.0"?> <Data country="france"> <Age>37</Age> </Data>""" s = Deserializer() result = s('object', basic_xml, "application/xml") # Should be a XML tree assert result.tag == "Data" assert result.get("country") == "france" for child in result: assert child.tag == "Age" assert child.text == "37" def test_object_no_text(self): basic_xml = """<?xml version="1.0"?><Data country="france"><Age>37</Age></Data>""" s = Deserializer() result = s('object', basic_xml, "application/xml") # Should be a XML tree assert result.tag == "Data" assert result.get("country") == "france" for child in result: assert child.tag == "Age" assert child.text == "37" def test_basic_empty(self): """Test an basic XML with an empty node.""" basic_xml = """<?xml version="1.0"?> <Data> <Age/> </Data>""" class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'str', 'xml':{'name': 'Age'}}, } _xml_map = { 'name': 'Data' } s = Deserializer({"XmlModel": XmlModel}) result = s(XmlModel, basic_xml, "application/xml") assert result.age == "" def test_basic_empty_list(self): """Test an basic XML with an empty node.""" basic_xml = """<?xml version="1.0"?> <Data/>""" class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'str', 'xml':{'name': 'Age'}}, } _xml_map = { 'name': 'Data' } s = Deserializer({"XmlModel": XmlModel}) result = s('[XmlModel]', basic_xml, "application/xml") assert result == [] def test_list_wrapped_items_name_basic_types(self): """Test XML list and wrap, items is basic type and there is itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <GoodApples> <Apple>granny</Apple> <Apple>fuji</Apple> </GoodApples> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples', 'wrapped': True, 'itemsName': 'Apple'}}, } _xml_map = { 'name': 'AppleBarrel' } s = Deserializer({"AppleBarrel": AppleBarrel}) result = s(AppleBarrel, basic_xml, "application/xml") assert result.good_apples == ["granny", "fuji"] def test_list_not_wrapped_items_name_basic_types(self): """Test XML list and no wrap, items is basic type and there is itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <Apple>granny</Apple> <Apple>fuji</Apple> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples', 'itemsName': 'Apple'}}, } _xml_map = { 'name': 'AppleBarrel' } s = Deserializer({"AppleBarrel": AppleBarrel}) result = s(AppleBarrel, basic_xml, "application/xml") assert result.good_apples == ["granny", "fuji"] def test_list_wrapped_basic_types(self): """Test XML list and wrap, items is basic type and there is no itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <GoodApples> <GoodApples>granny</GoodApples> <GoodApples>fuji</GoodApples> </GoodApples> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples', 'wrapped': True}}, } _xml_map = { 'name': 'AppleBarrel' } s = Deserializer({"AppleBarrel": AppleBarrel}) result = s(AppleBarrel, basic_xml, "application/xml") assert result.good_apples == ["granny", "fuji"] def test_list_not_wrapped_basic_types(self): """Test XML list and no wrap, items is basic type and there is no itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <GoodApples>granny</GoodApples> <GoodApples>fuji</GoodApples> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples'}}, } _xml_map = { 'name': 'AppleBarrel' } s = Deserializer({"AppleBarrel": AppleBarrel}) result = s(AppleBarrel, basic_xml, "application/xml") assert result.good_apples == ["granny", "fuji"] def test_list_wrapped_items_name_complex_types(self): """Test XML list and wrap, items is ref and there is itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <GoodApples> <Apple name="granny"/> <Apple name="fuji"/> </GoodApples> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'wrapped': True, 'itemsName': 'Apple'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Pomme' # Should be ignored, since "itemsName" is defined } s = Deserializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) result = s('AppleBarrel', basic_xml, "application/xml") assert [apple.name for apple in result.good_apples] == ["granny", "fuji"] def test_list_not_wrapped_items_name_complex_types(self): """Test XML list and wrap, items is ref and there is itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <Apple name="granny"/> <Apple name="fuji"/> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { # Pomme should be ignored, since it's invalid to define itemsName for a $ref type 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'itemsName': 'Pomme'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } s = Deserializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) result = s(AppleBarrel, basic_xml, "application/xml") assert [apple.name for apple in result.good_apples] == ["granny", "fuji"] def test_list_wrapped_complex_types(self): """Test XML list and wrap, items is ref and there is no itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <GoodApples> <Apple name="granny"/> <Apple name="fuji"/> </GoodApples> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'wrapped': True}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } s = Deserializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) result = s(AppleBarrel, basic_xml, "application/xml") assert [apple.name for apple in result.good_apples] == ["granny", "fuji"] def test_list_not_wrapped_complex_types(self): """Test XML list and wrap, items is ref and there is no itemsName. """ basic_xml = """<?xml version="1.0"?> <AppleBarrel> <Apple name="granny"/> <Apple name="fuji"/> </AppleBarrel>""" class AppleBarrel(Model): _attribute_map = { # Name is ignored if wrapped is False 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } s = Deserializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) result = s(AppleBarrel, basic_xml, "application/xml") assert [apple.name for apple in result.good_apples] == ["granny", "fuji"] def test_basic_additional_properties(self): """Test an ultra basic XML.""" basic_xml = """<?xml version="1.0"?> <Metadata> <number>1</number> <name>bob</name> </Metadata>""" class XmlModel(Model): _attribute_map = { 'additional_properties': {'key': '', 'type': '{str}', 'xml': {'name': 'additional_properties'}}, 'encrypted': {'key': 'Encrypted', 'type': 'str', 'xml': {'name': 'Encrypted', 'attr': True}}, } _xml_map = { 'name': 'Metadata' } def __init__(self, **kwargs): super(XmlModel, self).__init__(**kwargs) self.additional_properties = kwargs.get('additional_properties', None) self.encrypted = kwargs.get('encrypted', None) s = Deserializer({"XmlModel": XmlModel}) result = s(XmlModel, basic_xml, "application/xml") assert result.additional_properties == {'name': 'bob', 'number': '1'} assert result.encrypted is None def test_basic_namespace(self): """Test an ultra basic XML.""" basic_xml = """<?xml version="1.0"?> <Data xmlns:fictional="http://characters.example.com"> <fictional:Age>37</fictional:Age> </Data>""" class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age', 'prefix':'fictional','ns':'http://characters.example.com'}}, } _xml_map = { 'name': 'Data' } s = Deserializer({"XmlModel": XmlModel}) result = s(XmlModel, basic_xml, "application/xml") assert result.age == 37 class TestXmlSerialization: def test_basic(self): """Test an ultra basic XML.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data country="france"> <Age>37</Age> </Data>""") class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age'}}, 'country': {'key': 'country', 'type': 'str', 'xml':{'name': 'country', 'attr': True}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( age=37, country="france" ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel') assert_xml_equals(rawxml, basic_xml) def test_basic_unicode(self): """Test a XML with unicode.""" basic_xml = ET.fromstring(u"""<?xml version="1.0" encoding="utf-8"?> <Data language="français"/>""".encode("utf-8")) class XmlModel(Model): _attribute_map = { 'language': {'key': 'language', 'type': 'str', 'xml':{'name': 'language', 'attr': True}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( language=u"français" ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel') assert_xml_equals(rawxml, basic_xml) def test_nested_unicode(self): class XmlModel(Model): _attribute_map = { 'message_text': {'key': 'MessageText', 'type': 'str', 'xml': {'name': 'MessageText'}}, } _xml_map = { 'name': 'Message' } mymodel_no_unicode = XmlModel(message_text=u'message1') s = Serializer({"XmlModel": XmlModel}) body = s.body(mymodel_no_unicode, 'XmlModel') xml_content = ET.tostring(body, encoding="utf8") assert xml_content == b"<?xml version='1.0' encoding='utf8'?>\n<Message><MessageText>message1</MessageText></Message>" mymodel_with_unicode = XmlModel(message_text=u'message1㚈') s = Serializer({"XmlModel": XmlModel}) body = s.body(mymodel_with_unicode, 'XmlModel') xml_content = ET.tostring(body, encoding="utf8") assert xml_content == b"<?xml version='1.0' encoding='utf8'?>\n<Message><MessageText>message1\xe3\x9a\x88</MessageText></Message>" @pytest.mark.skipif(sys.version_info < (3,6), reason="Dict ordering not guaranted before 3.6, makes this complicated to test.") def test_add_prop(self): """Test addProp as a dict. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data> <Metadata> <Key1>value1</Key1> <Key2>value2</Key2> </Metadata> </Data>""") class XmlModel(Model): _attribute_map = { 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( metadata={ 'Key1': 'value1', 'Key2': 'value2', } ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel') assert_xml_equals(rawxml, basic_xml) def test_object(self): """Test serialize object as is. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data country="france"> <Age>37</Age> </Data>""") s = Serializer() rawxml = s.body(basic_xml, 'object') # It should actually be the same object, should not even try to touch it assert rawxml is basic_xml @pytest.mark.skipif(sys.version_info < (3,6), reason="Unstable before python3.6 for some reasons") def test_type_basic(self): """Test some types.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data> <Age>37</Age> <Enabled>true</Enabled> </Data>""") class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age'}}, 'enabled': {'key': 'enabled', 'type': 'bool', 'xml':{'name': 'Enabled'}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( age=37, enabled=True ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel') assert_xml_equals(rawxml, basic_xml) def test_direct_array(self): """Test an ultra basic XML.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <bananas> <Data country="france"/> </bananas> """) class XmlModel(Model): _attribute_map = { 'country': {'key': 'country', 'type': 'str', 'xml':{'name': 'country', 'attr': True}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( country="france" ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body( [mymodel], '[XmlModel]', serialization_ctxt={'xml': {'name': 'bananas', 'wrapped': True}} ) assert_xml_equals(rawxml, basic_xml) def test_list_wrapped_basic_types(self): """Test XML list and wrap, items is basic type and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples> <GoodApples>granny</GoodApples> <GoodApples>fuji</GoodApples> </GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples', 'wrapped': True}}, } _xml_map = { 'name': 'AppleBarrel' } mymodel = AppleBarrel( good_apples=['granny', 'fuji'] ) s = Serializer({"AppleBarrel": AppleBarrel}) rawxml = s.body(mymodel, 'AppleBarrel') assert_xml_equals(rawxml, basic_xml) def test_list_not_wrapped_basic_types(self): """Test XML list and no wrap, items is basic type and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples>granny</GoodApples> <GoodApples>fuji</GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples'}}, } _xml_map = { 'name': 'AppleBarrel' } mymodel = AppleBarrel( good_apples=['granny', 'fuji'] ) s = Serializer({"AppleBarrel": AppleBarrel}) rawxml = s.body(mymodel, 'AppleBarrel') assert_xml_equals(rawxml, basic_xml) def test_list_wrapped_items_name_complex_types(self): """Test XML list and wrap, items is ref and there is itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples> <Apple name="granny"/> <Apple name="fuji"/> </GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { # Pomme should be ignored, since it's invalid to define itemsName for a $ref type 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'wrapped': True, 'itemsName': 'Pomme'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel') assert_xml_equals(rawxml, basic_xml) def test_list_not_wrapped_items_name_complex_types(self): """Test XML list and wrap, items is ref and there is itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <Apple name="granny"/> <Apple name="fuji"/> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { # Pomme should be ignored, since it's invalid to define itemsName for a $ref type 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'itemsName': 'Pomme'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel') assert_xml_equals(rawxml, basic_xml) def test_list_wrapped_complex_types(self): """Test XML list and wrap, items is ref and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples> <Apple name="granny"/> <Apple name="fuji"/> </GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'wrapped': True}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel') assert_xml_equals(rawxml, basic_xml) def test_list_not_wrapped_complex_types(self): """Test XML list and wrap, items is ref and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <Apple name="granny"/> <Apple name="fuji"/> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { # Name is ignored if "wrapped" is False 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel') assert_xml_equals(rawxml, basic_xml) @pytest.mark.skipif(sys.version_info < (3,6), reason="Unstable before python3.6 for some reasons") def test_two_complex_same_type(self): """Two different attribute are same type """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <EuropeanApple name="granny"/> <USAApple name="fuji"/> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'eu_apple': {'key': 'EuropeanApple', 'type': 'Apple', 'xml': {'name': 'EuropeanApple'}}, 'us_apple': {'key': 'USAApple', 'type': 'Apple', 'xml': {'name': 'USAApple'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { } mymodel = AppleBarrel( eu_apple=Apple(name='granny'), us_apple=Apple(name='fuji'), ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel') assert_xml_equals(rawxml, basic_xml) def test_basic_namespace(self): """Test an ultra basic XML.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data xmlns:fictional="http://characters.example.com"> <fictional:Age>37</fictional:Age> </Data>""") class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age', 'prefix':'fictional','ns':'http://characters.example.com'}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( age=37, ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel') assert_xml_equals(rawxml, basic_xml) def test_basic_is_xml(self): """Test an ultra basic XML.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data country="france"> <Age>37</Age> </Data>""") class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age'}}, 'country': {'key': 'country', 'type': 'str', 'xml':{'name': 'country', 'attr': True}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( age=37, country="france", ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_basic_unicode_is_xml(self): """Test a XML with unicode.""" basic_xml = ET.fromstring(u"""<?xml version="1.0" encoding="utf-8"?> <Data language="français"/>""".encode("utf-8")) class XmlModel(Model): _attribute_map = { 'language': {'key': 'language', 'type': 'str', 'xml':{'name': 'language', 'attr': True}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( language=u"français" ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel', is_xml=True) assert_xml_equals(rawxml, basic_xml) @pytest.mark.skipif(sys.version_info < (3,6), reason="Dict ordering not guaranted before 3.6, makes this complicated to test.") def test_add_prop_is_xml(self): """Test addProp as a dict. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data> <Metadata> <Key1>value1</Key1> <Key2>value2</Key2> </Metadata> </Data>""") class XmlModel(Model): _attribute_map = { 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( metadata={ 'Key1': 'value1', 'Key2': 'value2', } ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_object_is_xml(self): """Test serialize object as is. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data country="france"> <Age>37</Age> </Data>""") s = Serializer() rawxml = s.body(basic_xml, 'object', is_xml=True) # It should actually be the same object, should not even try to touch it assert rawxml is basic_xml @pytest.mark.skipif(sys.version_info < (3,6), reason="Unstable before python3.6 for some reasons") def test_type_basic_is_xml(self): """Test some types.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data> <Age>37</Age> <Enabled>true</Enabled> </Data>""") class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age'}}, 'enabled': {'key': 'enabled', 'type': 'bool', 'xml':{'name': 'Enabled'}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( age=37, enabled=True ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_direct_array_is_xml(self): """Test an ultra basic XML.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <bananas> <Data country="france"/> </bananas> """) class XmlModel(Model): _attribute_map = { 'country': {'key': 'country', 'type': 'str', 'xml':{'name': 'country', 'attr': True}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( country="france" ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body( [mymodel], '[XmlModel]', serialization_ctxt={'xml': {'name': 'bananas', 'wrapped': True}}, is_xml=True ) assert_xml_equals(rawxml, basic_xml) def test_list_wrapped_basic_types_is_xml(self): """Test XML list and wrap, items is basic type and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples> <GoodApples>granny</GoodApples> <GoodApples>fuji</GoodApples> </GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples', 'wrapped': True}}, } _xml_map = { 'name': 'AppleBarrel' } mymodel = AppleBarrel( good_apples=['granny', 'fuji'] ) s = Serializer({"AppleBarrel": AppleBarrel}) rawxml = s.body(mymodel, 'AppleBarrel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_list_not_wrapped_basic_types_is_xml(self): """Test XML list and no wrap, items is basic type and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples>granny</GoodApples> <GoodApples>fuji</GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[str]', 'xml': {'name': 'GoodApples'}}, } _xml_map = { 'name': 'AppleBarrel' } mymodel = AppleBarrel( good_apples=['granny', 'fuji'] ) s = Serializer({"AppleBarrel": AppleBarrel}) rawxml = s.body(mymodel, 'AppleBarrel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_list_wrapped_items_name_complex_types_is_xml(self): """Test XML list and wrap, items is ref and there is itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples> <Apple name="granny"/> <Apple name="fuji"/> </GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { # Pomme should be ignored, since it's invalid to define itemsName for a $ref type 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'wrapped': True, 'itemsName': 'Pomme'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_list_not_wrapped_items_name_complex_types_is_xml(self): """Test XML list and wrap, items is ref and there is itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <Apple name="granny"/> <Apple name="fuji"/> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { # Pomme should be ignored, since it's invalid to define itemsName for a $ref type 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'itemsName': 'Pomme'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_list_wrapped_complex_types_is_xml(self): """Test XML list and wrap, items is ref and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <GoodApples> <Apple name="granny"/> <Apple name="fuji"/> </GoodApples> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples', 'wrapped': True}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_list_not_wrapped_complex_types_is_xml(self): """Test XML list and wrap, items is ref and there is no itemsName. """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <Apple name="granny"/> <Apple name="fuji"/> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { # Name is ignored if "wrapped" is False 'good_apples': {'key': 'GoodApples', 'type': '[Apple]', 'xml': {'name': 'GoodApples'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { 'name': 'Apple' } mymodel = AppleBarrel( good_apples=[ Apple(name='granny'), Apple(name='fuji') ] ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel', is_xml=True) assert_xml_equals(rawxml, basic_xml) @pytest.mark.skipif(sys.version_info < (3,6), reason="Unstable before python3.6 for some reasons") def test_two_complex_same_type_is_xml(self): """Two different attribute are same type """ basic_xml = ET.fromstring("""<?xml version="1.0"?> <AppleBarrel> <EuropeanApple name="granny"/> <USAApple name="fuji"/> </AppleBarrel>""") class AppleBarrel(Model): _attribute_map = { 'eu_apple': {'key': 'EuropeanApple', 'type': 'Apple', 'xml': {'name': 'EuropeanApple'}}, 'us_apple': {'key': 'USAApple', 'type': 'Apple', 'xml': {'name': 'USAApple'}}, } _xml_map = { 'name': 'AppleBarrel' } class Apple(Model): _attribute_map = { 'name': {'key': 'name', 'type': 'str', 'xml':{'name': 'name', 'attr': True}}, } _xml_map = { } mymodel = AppleBarrel( eu_apple=Apple(name='granny'), us_apple=Apple(name='fuji'), ) s = Serializer({"AppleBarrel": AppleBarrel, "Apple": Apple}) rawxml = s.body(mymodel, 'AppleBarrel', is_xml=True) assert_xml_equals(rawxml, basic_xml) def test_basic_namespace_is_xml(self): """Test an ultra basic XML.""" basic_xml = ET.fromstring("""<?xml version="1.0"?> <Data xmlns:fictional="http://characters.example.com"> <fictional:Age>37</fictional:Age> </Data>""") class XmlModel(Model): _attribute_map = { 'age': {'key': 'age', 'type': 'int', 'xml':{'name': 'Age', 'prefix':'fictional','ns':'http://characters.example.com'}}, } _xml_map = { 'name': 'Data' } mymodel = XmlModel( age=37, ) s = Serializer({"XmlModel": XmlModel}) rawxml = s.body(mymodel, 'XmlModel', is_xml=True) assert_xml_equals(rawxml, basic_xml)
"""Module in charge of generating mocks.""" __all__ = ["GeneratedMock", "FileGenerator"] import os import re from copy import deepcopy from jinja2 import Environment, PackageLoader from pycparser import c_ast as node from pycparser.c_generator import CGenerator from . import __version__ def decl(name, type): return node.Decl(name, [], [], [], type, None, None) def function_ptr_decl(name, return_type, parameters): return decl( name, node.PtrDecl([], node.FuncDecl(node.ParamList(parameters), return_type)) ) def void_params(): return [decl(None, node.TypeDecl(None, [], node.IdentifierType(["void"])))] def rename_return_type(return_type, name): return_type = deepcopy(return_type) type_decl = return_type while not isinstance(type_decl, node.TypeDecl): type_decl = type_decl.type type_decl.declname = name return return_type def get_guard_name(filename): slug = re.sub(r"[^a-zA-Z0-9]", "_", os.path.normpath(os.path.relpath(filename))) return re.sub(r"_+", "_", slug).upper().strip("_") def generate_includes(system_includes, local_includes, directory): includes = "\n".join( includes for includes in ( "".join(f"#include <{path}>\n" for path in sorted(system_includes)), "".join( f'#include "{os.path.relpath(path, directory)}"\n' for path in sorted(local_includes) ), ) if includes ) return includes and f"\n{includes}" class GeneratedMock: DECL_MARKER = "// NARMOCK_DECLARATION" IMPL_MARKER = "// NARMOCK_IMPLEMENTATION" FLAGS_MARKER = "// NARMOCK_LINKER_FLAGS" FLAGS_REGEX = re.compile(fr"^{FLAGS_MARKER}\s+(.+)$", re.MULTILINE) @classmethod def extract_flags(cls, mock_declarations): return cls.FLAGS_REGEX.findall(mock_declarations) def __init__(self, function): self.function = function self.func_name = function.name self.wrapped_func = f"__wrap_{self.func_name}" self.real_func = f"__real_{self.func_name}" self.linker_flags = f"-Wl,--wrap={self.func_name}" self.state_name = f"_narmock_state_for_{self.func_name}" self.state_type = f"_narmock_state_type_for_{self.func_name}" self.private_state_type = f"_narmock_private_state_type_for_{self.func_name}" self.params_type = f"_narmock_params_type_for_{self.func_name}" self.func_decl = self.function.declaration.type self.func_params = self.func_decl.args.params if self.func_decl.args else [] self.params_struct = [ decl(param.name, node.PtrDecl([], param.type.type)) if isinstance(param.type, node.ArrayDecl) else param for param in self.func_params if param.name ] self.forward_args = ", ".join(param.name for param in self.params_struct) return_type = self.func_decl.type self.return_value = ( None if isinstance(return_type, node.TypeDecl) and isinstance(return_type.type, node.IdentifierType) and return_type.type.names[0] == "void" else "return_value" ) self.save_args = ", ".join( filter(None, [self.forward_args, self.return_value, "errno"]) ) self.return_value_decl = decl( self.return_value, rename_return_type(return_type, self.return_value) ) self.implementation_decl = function_ptr_decl( "implementation", rename_return_type(return_type, "implementation"), self.func_params, ) self.mock_return_decl = self.state_function( "mock_return", [self.return_value_decl] ) self.mock_implementation_decl = self.state_function( "mock_implementation", [self.implementation_decl] ) self.mock_errno_decl = self.state_function( "mock_errno", [ decl( "errno_value", node.TypeDecl("errno_value", [], node.IdentifierType(["int"])), ) ], ) self.disable_mock_decl = self.state_function("disable_mock", void_params()) self.reset_decl = self.state_function("reset", void_params()) self.real_decl = self.rename_function(self.real_func) self.wrapped_decl = self.rename_function(self.wrapped_func) def state_function(self, name, parameters): return function_ptr_decl( name, node.PtrDecl( [], node.TypeDecl(name, ["const"], node.IdentifierType([self.state_type])), ), parameters, ) def rename_function(self, name): return decl( name, node.FuncDecl( self.func_decl.args, rename_return_type(self.func_decl.type, name) ), ) class FileGenerator: SOURCE_FILE = "__mocks__.c" HEADER_FILE = "__mocks__.h" def __init__(self): self.code_generator = CGenerator() self.jinja_env = Environment( loader=PackageLoader("narmock", "templates"), trim_blocks=True, lstrip_blocks=True, ) self.jinja_env.filters["render"] = self.code_generator.visit self.source_template = self.jinja_env.get_template(f"{self.SOURCE_FILE}.jinja2") self.header_template = self.jinja_env.get_template(f"{self.HEADER_FILE}.jinja2") self.mocks = [] self.system_includes = set() self.local_includes = set() def add_mock(self, mocked_function): self.mocks.append(GeneratedMock(mocked_function)) if mocked_function.include: if mocked_function.include.system: self.system_includes.add(mocked_function.include.path) else: self.local_includes.add(mocked_function.include.path) def write_to_directory(self, directory): source_filename = os.path.join(directory, self.SOURCE_FILE) header_filename = os.path.join(directory, self.HEADER_FILE) mocks = list(sorted(self.mocks, key=lambda m: m.func_name)) source_code = self.source_template.render( narmock_version=__version__, includes=generate_includes( {"stddef.h", "errno.h"}, {header_filename}, directory ), mocks=mocks, ) header_code = self.header_template.render( narmock_version=__version__, guard_name=get_guard_name(header_filename), includes=generate_includes( self.system_includes, self.local_includes, directory ), mocks=mocks, ) with open(source_filename, "w") as source_file: source_file.write(source_code.strip() + "\n") with open(header_filename, "w") as header_file: header_file.write(header_code.strip() + "\n") @classmethod def read_declarations(cls, directory): with open(os.path.join(directory, cls.HEADER_FILE)) as header_file: return header_file.read()
def _normalize_text(s): """Removing articles and punctuation, and standardizing whitespace are all typical text processing steps.""" import string, re def remove_articles(text): regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) return re.sub(regex, " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def compute_exact_match(prediction, truth): return int(_normalize_text(prediction) == _normalize_text(truth)) def compute_F1(prediction, truth): pred_tokens = _normalize_text(prediction).split() truth_tokens = _normalize_text(truth).split() # if either the prediction or the truth is no-answer then f1 = 1 if they agree, 0 otherwise if len(pred_tokens) == 0 or len(truth_tokens) == 0: return int(pred_tokens == truth_tokens) common_tokens = set(pred_tokens) & set(truth_tokens) # if there are no common tokens then f1 = 0 if len(common_tokens) == 0: return 0 prec = len(common_tokens) / len(pred_tokens) rec = len(common_tokens) / len(truth_tokens) return 2 * (prec * rec) / (prec + rec) def get_gold_answers(example): """helper function that retrieves all possible true answers from a squad2.0 example""" gold_answers = [answer["text"] for answer in example.answers if answer["text"]] # if gold_answers doesn't exist it's because this is a negative example - # the only correct answer is an empty string if not gold_answers: gold_answers = [""] return gold_answers
import cv2 import numpy as np choice = -1 while choice != 1 and choice != 2: choice = int(input("Which set would you like to use? Enter 1 or 2:")) # Training # if choice == 2: samples = np.loadtxt('set2generalsamples.data', np.float32) responses = np.loadtxt('set2generalresponses.data', np.float32) elif choice == 1: samples = np.loadtxt('/home/xilinx/project_files/set1generalsamples.data', np.float32) responses = np.loadtxt('/home/xilinx/project_files/set1generalresponses.data', np.float32) responses = responses.reshape((responses.size, 1)) model = cv2.ml.KNearest_create() model.train(samples, cv2.ml.ROW_SAMPLE, responses) # Testing # if choice == 2: im = cv2.imread('set2test.png') elif choice == 1: im = cv2.imread('/home/xilinx/project_files/set1test.png') out = np.zeros(im.shape, np.uint8) gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) thresh = cv2.adaptiveThreshold(gray, 255, 1, 1, 11, 2) images, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: if cv2.contourArea(cnt) > 50: [x, y, w, h] = cv2.boundingRect(cnt) if h > 28: cv2.rectangle(im, (x, y), (x + w, y + h), (0, 255, 0), 2) roi = thresh[y:y + h, x:x + w] roismall = cv2.resize(roi, (10, 10)) roismall = roismall.reshape((1, 100)) roismall = np.float32(roismall) retval, results, neigh_resp, dists = model.findNearest(roismall, k=1) string = str(int((results[0][0]))) cv2.putText(out, string, (x, y + h), 0, 1, (0, 255, 0)) # cv2.imshow('im', im) cv2.imwrite("/home/xilinx/project_files/in.png", im) cv2.imwrite("/home/xilinx/project_files/out.png", out) # cv2.imshow('out', out)
import json import requests from .base import ( AreaCollection, EventCollection, MembershipCollection, PersonCollection, OrganizationCollection, PostCollection) class Popolo(object): @classmethod def from_filename(cls, filename): with open(filename) as f: return cls(json.load(f)) @classmethod def from_url(cls, url): r = requests.get(url) return cls(r.json()) def __init__(self, json_data): self.json_data = json_data @property def persons(self): return PersonCollection(self.json_data.get('persons', []), self) @property def organizations(self): return OrganizationCollection( self.json_data.get('organizations', []), self) @property def memberships(self): return MembershipCollection( self.json_data.get('memberships', []), self) @property def areas(self): return AreaCollection(self.json_data.get('areas', []), self) @property def posts(self): return PostCollection(self.json_data.get('posts', []), self) @property def events(self): return EventCollection(self.json_data.get('events', []), self) @property def elections(self): return self.events.elections @property def legislative_periods(self): return self.events.legislative_periods @property def terms(self): return self.legislative_periods @property def latest_legislative_period(self): lps = self.legislative_periods return max(lps, key=lambda lp: lp.start_date.midpoint_date) @property def latest_term(self): return self.latest_legislative_period
#1014 - Consumo de combustível distanciakm = int(input()) litros = float(input()) consumo = distanciakm/litros print("%.3f km/l"%consumo)
import sys import warnings from inspect import isfunction import numpy as np import matplotlib.pyplot as plt from pysprint.core.methods.fftmethod import FFTMethod from pysprint.core.phase import Phase from pysprint.core._fft_tools import find_roi from pysprint.core._fft_tools import find_center from pysprint.utils.decorators import inplacify from pysprint.utils import NotCalculatedException from pysprint.utils import PySprintWarning from pysprint.utils.misc import find_nearest from pysprint.core.window import GaussianWindow from pysprint.core.window import WindowBase try: from dask import delayed, compute from dask.diagnostics import ProgressBar CAN_PARALLELIZE = True except ImportError: CAN_PARALLELIZE = False def delayed(func=None, *args, **kwargs): if isfunction(func): return func class WFTMethod(FFTMethod): """Basic interface for Windowed Fourier Transform Method. The `window_class` attribute can be set up for custom windowing. """ def __init__(self, *args, **kwargs): self.window_class = kwargs.pop("window_class", GaussianWindow) assert issubclass(self.window_class, WindowBase), "window_class must subclass pysprint.core.window.WindowBase" super().__init__(*args, **kwargs) self.window_seq = {} self.found_centers = {} self.GD = None self.cachedlen = 0 self.X_cont = np.array([]) self.Y_cont = np.array([]) self.Z_cont = np.array([]) self.fastmath = True self.errorcounter = 0 @inplacify def add_window(self, center, **kwargs): """ Add a Gaussian window to the interferogram. Parameters ---------- center : float The center of the Gaussian window. kwargs : dict Keyword arguments to pass to the `window_class`. """ window = self.window_class(self.x, center=center, **kwargs) self.window_seq[center] = window return self @property def windows(self): return self.window_seq @property def centers(self): return self.window_seq.keys() @inplacify def add_window_generic(self, array, **kwargs): """ Build a window sequence of given parameters with centers specified with ``array`` argument. Parameters ---------- array : list, np.ndarray The array containing the centers of windows. kwargs : dict Keyword arguments to pass to the `window_class`. """ if not isinstance(array, (list, np.ndarray)): raise TypeError("Expected list-like as ``array``.") for center in array: self.add_window(center=center, **kwargs) return self @inplacify def add_window_arange(self, start, stop, step, **kwargs): """ Build a window sequence of given parameters to apply on ifg. Works similar to numpy.arange. Parameters ---------- start : float The start of the centers. stop : float The end value of the center step : float The step value to increment center. kwargs : dict Keyword arguments to pass to the `window_class`. """ arr = np.arange(start, stop, step) for cent in arr: self.add_window(center=cent, **kwargs) return self @inplacify def add_window_linspace(self, start, stop, num, **kwargs): """ Build a window sequence of given parameters to apply on ifg. Works similar to numpy.linspace. Parameters ---------- start : float The start of the centers. stop : float The end value of the center num : float The number of Gaussian windows. kwargs : dict Keyword arguments to pass to the `window_class`. """ arr = np.linspace(start, stop, num) for cent in arr: self.add_window(center=cent, **kwargs) return self @inplacify def add_window_geomspace(self, start, stop, num, **kwargs): """ Build a window sequence of given parameters to apply on ifg. Works similar to numpy.geomspace. Parameters ---------- start : float The start of the centers. stop : float The end value of the center num : float The number of Gaussian windows. kwargs : dict Keyword arguments to pass to the `window_class`. """ arr = np.geomspace(start, stop, num) for cent in arr: self.add_window(center=cent, **kwargs) return self def view_windows(self, ax=None, maxsize=80, **kwargs): """ Gives a rough view of the different windows along with the ifg. Parameters ---------- ax : matplotlib.axes.Axes, optional An axis to draw the plot on. If not given, it will plot of the last used axis. maxsize : int, optional The maximum number of Gaussian windows to display on plot. Default is 80, but be aware that setting a high value can drastically reduce performance. kwargs : dict, optional Additional keyword arguments to pass to plot function. """ winlen = len(self.window_seq) if maxsize != 0: ratio = winlen // maxsize if winlen > maxsize: warnings.warn( "Image seems crowded, displaying only a subsample of the given windows.", PySprintWarning ) for i, (_, val) in enumerate(self.window_seq.items()): if i % ratio == 0: val.plot(ax=ax, scalefactor=np.max(self.y) * .75, **kwargs) else: for _, val in self.window_seq.items(): val.plot(ax=ax, scalefactor=np.max(self.y) * .75, **kwargs) self.plot(ax=ax) @inplacify def remove_all_windows(self): """ Remove all the Gaussian windows. """ self.window_seq.clear() return self @inplacify def reset_state(self): """ Reset the object's state fully: delete all the calculated GD, caches, heatmaps and window sequences. """ self.remove_all_windows() self.found_centers.clear() self.X_cont = np.array([]) self.Y_cont = np.array([]) self.Z_cont = np.array([]) self.GD = None self.cachedlen = 0 self.fastmath = True return self @inplacify def remove_window_at(self, center): """ Removes a window at center. Parameters ---------- center : float The center of the window to remove. Raises ValueError if there is not such window. """ if center not in self.window_seq.keys(): c = find_nearest( np.fromiter(self.window_seq.keys(), dtype=float), center ) raise ValueError( f"There is no window with center {center}. " f"Did you mean {c[0]}?" ) self.window_seq.pop(center, None) return self @inplacify def remove_window_interval(self, start, stop): """ Remove window interval inclusively. Parameters ---------- start : float The start value of the interval. stop : float The stop value of the interval. """ wins = np.fromiter(self.window_seq.keys(), dtype=float) mask = wins[(wins <= stop) & (wins >= start)] for center in mask: self.window_seq.pop(center, None) return self @inplacify def cover(self, N, **kwargs): """ Cover the whole domain with `N` number of windows uniformly built with the given parameters. Parameters ---------- N : float The number of Gaussian windows. kwargs : dict Keyword arguments to pass to the `window_class`. """ self.add_window_linspace(np.min(self.x), np.max(self.x), N, **kwargs) def _calculate( self, reference_point, order, show_graph=False, silent=False, force_recalculate=False, fastmath=True, usenifft=False, parallel=False, ransac=False, errors="ignore", **kwds ): if len(self.window_seq) == 0: raise ValueError("Before calculating a window sequence must be set.") if self.cachedlen != len(self.window_seq) or fastmath != self.fastmath: force_recalculate = True self.fastmath = fastmath if force_recalculate: self.found_centers.clear() self.build_GD( silent=silent, fastmath=fastmath, usenifft=usenifft, parallel=parallel, errors=errors ) if self.GD is None: self.build_GD( silent=silent, fastmath=fastmath, usenifft=usenifft, parallel=parallel, errors=errors ) self.cachedlen = len(self.window_seq) if order == 1 or order > 6: raise ValueError("Order must be in [2, 6].") if ransac: print("Running RANSAC-filter..") self.GD.ransac_filter(order=order, plot=show_graph, **kwds) self.GD.apply_filter() d, ds, fr = self.GD._fit( reference_point=reference_point, order=order ) if show_graph: self.GD.plot() return d, ds, fr def calculate( self, reference_point, order, show_graph=False, silent=False, force_recalculate=False, fastmath=True, usenifft=False, parallel=False, ransac=False, errors="ignore", **kwds ): """ Calculates the dispersion. Parameters ---------- reference_point : float The reference point. order : int The dispersion order to look for. Must be in [2, 6]. show_graph : bool, optional Whether to show the GD graph on complete. Default is False. silent : bool, optional Whether to print progressbar. By default it will print. force_recalculate : bool, optional Force to recalculate the GD graph not only the curve fitting. Default is False. fastmath : bool, optional Whether to build additional arrays to display heatmap. Default is True. usenifft : bool, optional Whether to use Non-unifrom FFT when calculating GD. Default is False. **Not stable.** parallel : bool, optional Whether to use parallel computation. Only availabe if `Dask` is installed. The speedup is about 50-70%. Default is False. ransac : bool, optional Whether to use RANSAC filtering on the detected peaks. Default is False. errors : str, optional Whether to raise an error is the algorithm couldn't find the center of the peak. Default is "ignore". kwds : optional Other keyword arguments to pass to RANSAC filter. Raises ------ ValueError, if no window sequence is added to the interferogram. ValueError, if order is 1. ModuleNotFoundError, if `Dask` is not available when using parallel=True. """ return self._calculate( reference_point, order, show_graph, silent, force_recalculate, fastmath, usenifft, parallel, ransac, errors, **kwds ) def build_GD(self, silent=False, fastmath=True, usenifft=False, parallel=False, errors="ignore"): """ Build the GD. Parameters ---------- silent : bool, optional Whether to print progressbar. By default it will print. fastmath : bool, optional Whether to build additional arrays to display heatmap. Default is True. usenifft : bool, optional Whether to use Non-unifrom FFT when calculating GD. Default is False. **Not stable.** parallel : bool, optional Whether to use parallel computation. Only availabe if `Dask` is installed. The speedup is about 50-70%. Default is False. errors : str, optional Whether to raise an error is the algorithm couldn't find the center of the peak. Returns ------- GD : pysprint.core.phase.Phase The phase object with `GD_mode=True`. See its docstring for more info. """ if parallel: if not CAN_PARALLELIZE: raise ModuleNotFoundError( "Module `dask` not found. Please install it in order to use parallelism." ) else: self.fastmath = fastmath self._apply_window_seq_parallel(fastmath=fastmath, usenifft=usenifft, errors=errors) if not silent: with ProgressBar(): computed = compute(*self.found_centers.values()) else: computed = compute(*self.found_centers.values()) cleaned_delays = [ k for i, k in enumerate(self.found_centers.keys()) if computed[i] is not None ] delay = np.fromiter(cleaned_delays, dtype=float) omega = np.fromiter([c for c in computed if c is not None], dtype=float) if not silent: print(f"Skipped: {len(self.window_seq) - sum(1 for _ in filter(None.__ne__, computed))}") else: self.fastmath = fastmath self._apply_window_sequence(silent=silent, fastmath=fastmath, usenifft=usenifft) self._clean_centers(silent=silent) delay = np.fromiter(self.found_centers.keys(), dtype=float) omega = np.fromiter(self.found_centers.values(), dtype=float) self.GD = Phase(delay, omega, GD_mode=True) return self.GD def build_phase(self): raise NotImplementedError("Use `build_GD` instead.") def _predict_ideal_window_fwhm(self): pass def _apply_window_sequence( self, silent=False, fastmath=True, usenifft=False, errors="ignore" ): winlen = len(self.window_seq) self.errorcounter = 0 if not fastmath: # here we setup the shape for the Z array because # it is much faster than using np.append in every iteration _x, _y, _, _ = self._safe_cast() _obj = FFTMethod(_x, _y) _obj.ifft(usenifft=usenifft) x, y = find_roi(_obj.x, _obj.y) self.Y_cont = np.array(x) yshape = y.size xshape = len(self.window_seq) self.Z_cont = np.empty(shape=(yshape, xshape)) for idx, (_center, _window) in enumerate(self.window_seq.items()): _x, _y, _, _ = self._safe_cast() _obj = FFTMethod(_x, _y) _obj.y *= _window.y _obj.ifft(usenifft=usenifft) x, y = find_roi(_obj.x, _obj.y) if not fastmath: self.Z_cont[:, idx] = y try: centx, _ = find_center(x, y) self.found_centers[_center] = centx except ValueError as err: self.errorcounter += 1 if errors == "ignore": self.found_centers[_center] = None else: raise err if not silent: # This creates about 5-15% overhead.. maybe create a buffer sys.stdout.write('\r') j = (idx + 1) / winlen sys.stdout.write( "Progress : [%-30s] %d%% (Skipped: %d)" % ('=' * int(30 * j), 100 * j, self.errorcounter) ) sys.stdout.flush() def _apply_window_seq_parallel( self, fastmath=True, usenifft=False, errors="ignore" ): self.errorcounter = 0 if not fastmath: # here we setup the shape for the Z array and allocate Y, because # it is much faster than using np.append in every iteration _x, _y, _, _ = self._safe_cast() _obj = FFTMethod(_x, _y) _obj.ifft(usenifft=usenifft) x, y = find_roi(_obj.x, _obj.y) yshape = y.size self.Y_cont = np.array(x) xshape = len(self.window_seq) self.Z_cont = np.empty(shape=(yshape, xshape)) for idx, (_center, _window) in enumerate(self.window_seq.items()): element = self._prepare_element(idx, _window, fastmath, usenifft, errors) if element is None: self.errorcounter += 1 # This might be useless, since we lazy evaluate things.. self.found_centers[_center] = element @delayed def _prepare_element(self, idx, window, fastmath=True, usenifft=False, errors="ignore"): _x, _y, _, _ = self._safe_cast() _obj = FFTMethod(_x, _y) _obj.y *= window.y _obj.ifft(usenifft=usenifft) x, y = find_roi(_obj.x, _obj.y) if not fastmath: self.Z_cont[:, idx] = y try: centx, _ = find_center(x, y) return centx except ValueError as err: if errors == "ignore": return None else: raise err def _clean_centers(self, silent=False): dct = {k: v for k, v in self.found_centers.items() if v is not None} self.found_centers = dct winlen = len(self.window_seq) usefullen = len(self.found_centers) if not silent: if winlen != usefullen: print( f"\n{abs(winlen-usefullen)} points skipped " f"due to ambiguous peak position." ) def errorplot(self, *args, **kwargs): """ Plot the errors of fitting. Parameters ---------- ax : matplotlib.axes.Axes, optional An axis to draw the plot on. If not given, it will plot of the last used axis. percent : bool, optional Whether to plot percentage difference. Default is False. title : str, optional The title of the plot. Default is "Errors". kwargs : dict, optional Additional keyword arguments to pass to plot function. """ try: getattr(self.GD, "errorplot", None)(*args, **kwargs) except TypeError: raise NotCalculatedException("Must calculate before plotting errors.") @property def get_GD(self): """ Return the GD if it is already calculated. """ if self.GD is not None: return self.GD raise NotCalculatedException("Must calculate GD first.") @property def errors(self): """ Return the fitting errors as np.ndarray. """ return getattr(self.GD, "errors", None) def _collect_failures(self): return [k for k in self.window_seq.keys() if k not in self.found_centers.keys()] def _construct_heatmap_data(self): self.X_cont = np.fromiter(self.window_seq.keys(), dtype=float) def heatmap(self, ax=None, levels=None, cmap="viridis", include_ridge=True): """ Plot the heatmap. Parameters ---------- ax : matplotlib.axes.Axes, optional An axis to draw the plot on. If not given, it will plot of the last used axis. levels : np.ndarray, optional The levels to use for plotting. cmap : str, optional The colormap to use. include_ridge : bool, optional Whether to mark the detected ridge of the plot. Default is True. """ if self.GD is None: raise NotCalculatedException("Must calculate GD first.") if self.fastmath: raise ValueError( "You need to recalculate with `fastmath=False` to plot the heatmap." ) # Only construct if we need to.. if not (self.Y_cont.size, self.X_cont.size) == self.Z_cont.shape: self._construct_heatmap_data() if ax is None: plt.contourf( self.X_cont, self.Y_cont, self.Z_cont, levels=levels, cmap=cmap, extend="both" ) else: ax.contourf( self.X_cont, self.Y_cont, self.Z_cont, levels=levels, cmap=cmap, extend="both" ) if include_ridge: if ax is None: plt.plot(*self.GD.data, color='red', label='detected ridge') else: ax.plot(*self.GD.data, color='red', label='detected ridge') plt.legend() if ax is None: plt.xlabel('Window center [PHz]') plt.ylabel('Delay [fs]') try: upper_bound = min(1.5 * np.max(self.GD.data[1]), np.max(self.Y_cont)) plt.ylim(None, upper_bound) except ValueError: pass # ValueError: zero-size array to reduction operation maximum which has no identity # This means that the array is empty, we should pass that case. else: ax.set_autoscalex_on(False) try: upper_bound = min(1.5 * np.max(self.GD.data[1]), np.max(self.Y_cont)) ax.set( xlabel="Window center [PHz]", ylabel="Delay [fs]", ylim=(None, upper_bound) ) except ValueError: pass def get_heatmap_data(self): """ Return the data which was used to create the heatmap. Returns ------- X_cont : np.ndarray The window centers with shape (n,). Y_cont : np.ndarray The time axis calculated from the IFFT of the dataset with shape (m,). Z_cont : np.ndarray 2D array with shape (m, n) containing the depth information. """ if all([self.Y_cont.size != 0, self.Z_cont.size != 0]): self._construct_heatmap_data() else: raise ValueError("Must calculate with `fastmath=False` before trying to access the heatmap data.") return self.X_cont, self.Y_cont, self.Z_cont
__all__ = ['publisher']
#!/usr/bin/env python # -*- coding: utf-8 -*- # This file was part of Flask-Bootstrap and was modified under the terms of # its BSD License. Copyright (c) 2013, Marc Brinkmann. All rights reserved. # # This file was part of Bootstrap-Flask and was modified under the terms of # its MIT License. Copyright (c) 2018 Grey Li. All rights reserved. # # This file is part of the # Flask-SemanticUI Project ( # https://github.com/juniors90/Flask-SemanticUI/ # ). # Copyright (c) 2021, Ferreira Juan David # License: MIT # Full Text: # https://github.com/juniors90/Flask-SemanticUI/blob/master/LICENSE # ===================================================================== # TESTS # ===================================================================== from flask import current_app from flask_semanticui import cdn_base import pytest as pt @pt.mark.usefixtures("client") class TestSemanticUI: def test_extension_init(self, app): with app.app_context(): extensions = current_app.extensions assert "semantic" in extensions assert "semantic_ui" not in extensions def test_load_css_with_default_versions(self, semantic): rv = semantic.load_css() cdn = ( '<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm' + '/semantic-ui@2.4.2/dist/semantic.min.css" integrity=' + '"sha256-UXesixbeLkB/UYxVTzuj/gg3+LMzgwAmg3zD+C4ZASQ=" ' + 'crossorigin="anonymous">' ) semantic_css = ( '<link rel="stylesheet" href="' + cdn_base + "/semantic-ui@" + semantic.semantic_version + "/dist/" + semantic.semantic_css_filename + '" integrity="' + semantic.semantic_css_integrity + '" crossorigin="anonymous">' ) assert semantic_css == rv assert semantic_css == cdn assert rv == cdn def test_semantic_get_sri(app, semantic): with app.app_context(): app.config["SEMANTIC_SERVE_LOCAL"] = True assert ( semantic._get_sri( name="semantic_js", version="2.4.2", sri="sha256-CgSoWC9w5wNmI1aN8dIMK+6DPelUEtvDr+Bc2m/0Nx8=", ) == "sha256-CgSoWC9w5wNmI1aN8dIMK+6DPelUEtvDr+Bc2m/0Nx8=" ) assert ( semantic._get_sri(name="semantic_js", version="2.4.2", sri=None) is None )
from numpy import arange, newaxis, sin from pylab import randn, plot, scatter, hold from monte.arch.neuralnet import NeuralnetIsl from monte.gym import trainer mynn = NeuralnetIsl(1,10,1) #neural network with one input-, one output-, #and one hidden layer with 10 sigmoid units mytrainer = trainer.Conjugategradients(mynn,10) inputs = arange(-10.0,10.0,0.1)[newaxis,:] #produce some inputs outputs = sin(inputs) + randn(1,inputs.shape[1]) #produce some outputs testinputs = arange(-10.5,10.5,0.05)[newaxis,:] #produce some test-data testoutputs = sin(testinputs) for i in range(50): hold(False) scatter(inputs[0],outputs[0]) hold(True) plot(testinputs[0],mynn.apply(testinputs)[0][0]) mytrainer.step((inputs,outputs),0.0001) print mynn.cost((inputs,outputs),0.0001)
# ***************************************************************************** # # Copyright (c) 2019, the Perspective Authors. # # This file is part of the Perspective library, distributed under the terms of # the Apache License 2.0. The full license can be found in the LICENSE file. # def line(dat, col, index='index', key='key'): from .psp import psp return psp(dat, view='xy_line', columns=[index, col], rowpivots=[index], columnpivots=[key], aggregates={index: 'last', col: 'last'})
# Implementar la funcion crear_tabla_peso, que cree una tabla PersonaPeso con: # - IdPersona: Int() (Clave Foranea Persona) # - Fecha: Date() # - Peso: Int() # Implementar la funcion borrar_tabla, que borra la tabla creada anteriormente. from frro_soporte_2019_01.practico_03a.ejercicio_01 import crear_tabla, borrar_tabla from frro_soporte_2019_01.practico_03a.ejercicio_01 import Base, Persona from sqlalchemy import Column, Date, Integer, ForeignKey from sqlalchemy.orm import relationship from frro_soporte_2019_01.practico_03a.ejercicio_01 import session, engine class PersonaPeso(Base): __tablename__ = 'personaPeso' idPeso = Column(Integer, primary_key=True) peso = Column(Integer) fecha = Column(Date, nullable=False) id_persona = Column(Integer, ForeignKey('persona.idPersona')) persona = relationship(Persona) def crear_tabla_peso(): Base.metadata.create_all(engine) def borrar_tabla_peso(): PersonaPeso.__table__.drop(engine) # no modificar def reset_tabla(func): def func_wrapper(): crear_tabla() crear_tabla_peso() func() borrar_tabla_peso() borrar_tabla() return func_wrapper
import smbus import time # Register Map Scale_Registers = {'NAU7802_PU_CTRL': 0x00, 'NAU7802_CTRL1': 1, 'NAU7802_CTRL2': 2, 'NAU7802_OCAL1_B2': 3, 'NAU7802_OCAL1_B1': 4, 'NAU7802_OCAL1_B0': 5, 'NAU7802_GCAL1_B3': 6, 'NAU7802_GCAL1_B2': 7, 'NAU7802_GCAL1_B1': 8, 'NAU7802_GCAL1_B0': 9, 'NAU7802_OCAL2_B2': 10, 'NAU7802_OCAL2_B1': 11, 'NAU7802_OCAL2_B0': 12, 'NAU7802_GCAL2_B3': 13, 'NAU7802_GCAL2_B2': 14, 'NAU7802_GCAL2_B1': 15, 'NAU7802_GCAL2_B0': 16, 'NAU7802_I2C_CONTROL': 17, 'NAU7802_ADCO_B2': 18, 'NAU7802_ADCO_B1': 19, 'NAU7802_ADCO_B0': 20, 'NAU7802_ADC': 0x15, # Shared ADC and OTP 32:24 'NAU7802_OTP_B1': 22, # OTP 23:16 or 7:0? 'NAU7802_OTP_B0': 23, # OTP 15:8 'NAU7802_PGA': 0x1B, 'NAU7802_PGA_PWR': 0x1C, 'NAU7802_DEVICE_REV': 0x1F} # Bits within the PU_CRTL register PU_CTRL_Bits = {'NAU7802_PU_CTRL_RR': 0, 'NAU7802_PU_CTRL_PUD': 1, 'NAU7802_PU_CTRL_PUA': 2, 'NAU7802_PU_CTRL_PUR': 3, 'NAU7802_PU_CTRL_CS': 4, 'NAU7802_PU_CTRL_CR': 5, 'NAU7802_PU_CTRL_OSCS': 6, 'NAU7802_PU_CTRL_AVDDS': 7} # Bits within the CTRL1 register CTRL1_Bits = {'NAU7802_CTRL1_GAIN': 2, 'NAU7802_CTRL1_VLDO': 5, 'NAU7802_CTRL1_DRDY_SEL': 6, 'NAU7802_CTRL1_CRP': 7} # Bits within the CTRL2 register CTRL2_Bits = {'NAU7802_CTRL2_CALMOD': 0, 'NAU7802_CTRL2_CALS': 2, 'NAU7802_CTRL2_CAL_ERROR': 3, 'NAU7802_CTRL2_CRS': 4, 'NAU7802_CTRL2_CHS': 7} # Bits within the PGA register PGA_Bits = {'NAU7802_PGA_CHP_DIS': 0, 'NAU7802_PGA_INV': 3, 'NAU7802_PGA_BYPASS_EN': 4, 'NAU7802_PGA_OUT_EN': 5, 'NAU7802_PGA_LDOMODE': 6, 'NAU7802_PGA_RD_OTP_SEL': 7} # Bits within the PGA PWR register PGA_PWR_Bits = {'NAU7802_PGA_PWR_PGA_CURR': 0, 'NAU7802_PGA_PWR_ADC_CURR': 2, 'NAU7802_PGA_PWR_MSTR_BIAS_CURR': 4, 'NAU7802_PGA_PWR_PGA_CAP_EN': 7} # Allowed Low drop out regulator voltages NAU7802_LDO_Values = {'NAU7802_LDO_2V4': 0b111, 'NAU7802_LDO_2V7': 0b110, 'NAU7802_LDO_3V0': 0b101, 'NAU7802_LDO_3V3': 0b100, 'NAU7802_LDO_3V6': 0b011, 'NAU7802_LDO_3V9': 0b010, 'NAU7802_LDO_4V2': 0b001, 'NAU7802_LDO_4V5': 0b000} # Allowed gains NAU7802_Gain_Values = {'NAU7802_GAIN_128': 0b111, 'NAU7802_GAIN_64': 0b110, 'NAU7802_GAIN_32': 0b101, 'NAU7802_GAIN_16': 0b100, 'NAU7802_GAIN_8': 0b011, 'NAU7802_GAIN_4': 0b010, 'NAU7802_GAIN_2': 0b001, 'NAU7802_GAIN_1': 0b000} # Allowed samples per second NAU7802_SPS_Values = {'NAU7802_SPS_320': 0b111, 'NAU7802_SPS_80': 0b011, 'NAU7802_SPS_40': 0b010, 'NAU7802_SPS_20': 0b001, 'NAU7802_SPS_10': 0b000} # Select between channel values NAU7802_Channels = {'NAU7802_CHANNEL_1': 0, 'NAU7802_CHANNEL_2': 1} # Calibration state NAU7802_Cal_Status = {'NAU7802_CAL_SUCCESS': 0, 'NAU7802_CAL_IN_PROGRESS': 1, 'NAU7802_CAL_FAILURE': 2} class NAU7802(): # Default constructor def __init__(self, i2cPort = 1, deviceAddress = 0x2A, zeroOffset = False, calibrationFactor = False): self.bus = smbus.SMBus(i2cPort) # This stores the user's requested i2c port self.deviceAddress = deviceAddress # Default unshifted 7-bit address of the NAU7802 # y = mx + b self.zeroOffset = zeroOffset; # This is b self.calibrationFactor = calibrationFactor # This is m. User provides this number so that we can output y when requested # Returns true if Cycle Ready bit is set (conversion is complete) def available(self): # Returns true if Cycle Ready bit is set (conversion is complete) return self.getBit(PU_CTRL_Bits['NAU7802_PU_CTRL_CR'], Scale_Registers['NAU7802_PU_CTRL']) # Check calibration status. def calAFEStatus(self): # Check calibration status. if self.getBit(CTRL2_Bits['NAU7802_CTRL2_CALS'], Scale_Registers['NAU7802_CTRL2']): return NAU7802_Cal_Status['NAU7802_CAL_IN_PROGRESS'] if self.getBit(CTRL2_Bits['NAU7802_CTRL2_CAL_ERROR'], Scale_Registers['NAU7802_CTRL2']): return NAU7802_Cal_Status['NAU7802_CAL_FAILURE'] # Calibration passed return NAU7802_Cal_Status['NAU7802_CAL_SUCCESS'] # Call when scale is setup, level, at running temperature, with nothing on it def calculateZeroOffset(self, averageAmount): # Also called taring. Call this with nothing on the scale self.setZeroOffset(self.getAverage(averageAmount)) # Calibrate analog front end of system. Returns true if CAL_ERR bit is 0 (no error) # Takes approximately 344ms to calibrate; wait up to 1000ms. # It is recommended that the AFE be re-calibrated any time the gain, SPS, or channel number is changed. def calibrateAFE(self): # Synchronous calibration of the analog front end of the NAU7802. Returns true if CAL_ERR bit is 0 (no error) self.beginCalibrateAFE() return self.waitForCalibrateAFE(1) # Sets up the NAU7802 for basic function # If initialize is true (or not specified), default init and calibration is performed # If initialize is false, then it's up to the caller to initalize and calibrate # Returns true upon completion def begin(self, initialized = True): # Check communication and initialize sensor # Check if the device ack's over I2C if self.isConnected() == False: # There are rare times when the sensor is occupied and doesn't ack. A 2nd try resolves this. if self.isConnected() == False: return False result = True # Accumulate a result as we do the setup if initialized: result &= self.reset() # Reset all registers result &= self.powerUp() # Power on analog and digital sections of the scale result &= self.setLDO(NAU7802_LDO_Values['NAU7802_LDO_3V3']) # Set LDO to 3.3V result &= self.setGain(NAU7802_Gain_Values['NAU7802_GAIN_128']) # Set gain to 128 result &= self.setSampleRate(NAU7802_SPS_Values['NAU7802_SPS_80']) # Set samples per second to 10 result &= self.setRegister(Scale_Registers['NAU7802_ADC'], 0x30) # Turn off CLK_CHP. From 9.1 power on sequencing. result &= self.setBit(PGA_PWR_Bits['NAU7802_PGA_PWR_PGA_CAP_EN'], Scale_Registers['NAU7802_PGA_PWR']) # Enable 330pF decoupling cap on chan 2. From 9.14 application circuit note. result &= self.calibrateAFE() # Re-cal analog front end when we change gain, sample rate, or channel return result # Begin asynchronous calibration of the analog front end. # Poll for completion with calAFEStatus() or wait with waitForCalibrateAFE() def beginCalibrateAFE(self): # Begin asynchronous calibration of the analog front end of the NAU7802. Poll for completion with calAFEStatus() or wait with waitForCalibrateAFE(). self.setBit(CTRL2_Bits['NAU7802_CTRL2_CALS'], Scale_Registers['NAU7802_CTRL2']); # Call after zeroing. Provide the float weight sitting on scale. Units do not matter. def calculateCalibrationFactor(self, weightOnScale, averageAmount): # Call this with the value of the thing on the scale. Sets the calibration factor based on the weight on scale and zero offset. onScale = self.getAverage(averageAmount) newCalFactor = (onScale - self.zeroOffset) / weightOnScale self.setCalibrationFactor(newCalFactor) # Mask & clear a given bit within a register def clearBit(self, bitNumber, registerAddress): # Mask & clear a given bit within a register value = self.getRegister(registerAddress) value &= ~(1 << bitNumber) # Set this bit return self.setRegister(registerAddress, value) # Return the average of a given number of readings # Gives up after 1000ms so don't call this function to average 8 samples setup at 1Hz output (requires 8s) def getAverage(self, averageAmount): # Return the average of a given number of readings total = 0 samplesAcquired = 0 startTime = time.time() while True: try: total += self.getReading() except: return False if samplesAcquired == averageAmount: break # All done if time.time() - startTime > 1: return False # Timeout - Bail with error samplesAcquired += 1 time.sleep(0.001) total /= averageAmount; return total # Return a given bit within a register def getBit(self, bitNumber, registerAddress): # Return a given bit within a register value = self.getRegister(registerAddress) # value &= (1 << bitNumber) # Clear all but this bit value = value >> bitNumber & 1 return bool(value) def getCalibrationFactor(self): # Ask library for this value. Useful for storing value into NVM. return self.calibrationFactor # Returns 24-bit reading # Assumes CR Cycle Ready bit (ADC conversion complete) has been checked to be 1 def getReading(self): # Returns 24-bit reading. Assumes CR Cycle Ready bit (ADC conversion complete) has been checked by .available() while not self.available(): pass block = self.bus.read_i2c_block_data(self.deviceAddress, Scale_Registers['NAU7802_ADCO_B2'], 3) valueRaw = block[0] << 16 # MSB valueRaw |= block[1] << 8 #MidSB valueRaw |= block[2] # LSB # the raw value coming from the ADC is a 24-bit number, so the sign bit now # resides on bit 23 (0 is LSB) of the container. By shifting the # value to the left, I move the sign bit to the MSB of the container. # By casting to a signed container I now have properly recovered # the sign of the original value valueShifted = valueRaw << 8 # shift the number back right to recover its intended magnitude value = valueShifted >> 8 return value # Get contents of a register def getRegister(self, registerAddress): # Get contents of a register try: return self.bus.read_i2c_block_data(self.deviceAddress, registerAddress, 1)[0] except: return False # Error # Get the revision code of this IC def getRevisionCode(self): # Get the revision code of this IC. Always 0x0F. revisionCode = self.getRegister(Scale_Registers['NAU7802_DEVICE_REV']); return revisionCode & 0x0F # Returns the y of y = mx + b using the current weight on scale, the cal factor, and the offset. def getWeight(self, allowNegativeWeights = False, samplesToTake = 10): # Once you've set zero offset and cal factor, you can ask the library to do the calculations for you. onScale = self.getAverage(samplesToTake) # Prevent the current reading from being less than zero offset # This happens when the scale is zero'd, unloaded, and the load cell reports a value slightly less than zero value # causing the weight to be negative or jump to millions of pounds if not allowNegativeWeights: if onScale < self.zeroOffset: onScale = self.zeroOffset # Force reading to zero try: weight = (onScale - self.zeroOffset) / self.calibrationFactor return weight except: print('Needs calibrating') return False def getZeroOffset(self): # Ask library for this value. Useful for storing value into NVM. return self.zeroOffset # Returns true if device is present # Tests for device ack to I2C address def isConnected(self): # Returns true if device acks at the I2C address try: self.bus.read_byte(self.deviceAddress) return True except: return False # Puts scale into low-power mode def powerDown(self): # Puts scale into low-power 200nA mode self.clearBit(PU_CTRL_Bits['NAU7802_PU_CTRL_PUD'], Scale_Registers['NAU7802_PU_CTRL']) return self.clearBit(PU_CTRL_Bits['NAU7802_PU_CTRL_PUA'], Scale_Registers['NAU7802_PU_CTRL']) # Power up digital and analog sections of scale def powerUp(self): # Power up digital and analog sections of scale, ~2mA self.setBit(PU_CTRL_Bits['NAU7802_PU_CTRL_PUD'], Scale_Registers['NAU7802_PU_CTRL']); self.setBit(PU_CTRL_Bits['NAU7802_PU_CTRL_PUA'], Scale_Registers['NAU7802_PU_CTRL']); # Wait for Power Up bit to be set - takes approximately 200us counter = 0; while True: if self.getBit(PU_CTRL_Bits['NAU7802_PU_CTRL_PUR'], Scale_Registers['NAU7802_PU_CTRL']) != 0: break # Good to go time.sleep(0.001) if counter > 100: return False # Error counter += 1 return True # Resets all registers to Power Of Defaults def reset(self): # Resets all registers to Power Of Defaults self.setBit(PU_CTRL_Bits['NAU7802_PU_CTRL_RR'], Scale_Registers['NAU7802_PU_CTRL']) # Set RR time.sleep(0.001) return self.clearBit(PU_CTRL_Bits['NAU7802_PU_CTRL_RR'], Scale_Registers['NAU7802_PU_CTRL']) # Clear RR to leave reset state # Mask & set a given bit within a register def setBit(self, bitNumber, registerAddress): # Mask & set a given bit within a register value = self.getRegister(registerAddress) value |= (1 << bitNumber) # Set this bit return self.setRegister(registerAddress, value) # Pass a known calibration factor into library. Helpful if users is loading settings from NVM. # If you don't know your cal factor, call setZeroOffset(), then calculateCalibrationFactor() with a known weight def setCalibrationFactor(self, newCalFactor): # Pass a known calibration factor into library. Helpful if users is loading settings from NVM. self.calibrationFactor = newCalFactor # Select between 1 and 2 def setChannel(self, channelNumber): # Select between 1 and 2 if channelNumber == NAU7802_Channels['NAU7802_CHANNEL_1']: return self.clearBit(CTRL2_Bits['NAU7802_CTRL2_CHS'], Scale_Registers['NAU7802_CTRL2']) # Channel 1 (default) else: return self.setBit(CTRL2_Bits['NAU7802_CTRL2_CHS'], Scale_Registers['NAU7802_CTRL2']) # Channel 2 # Set the gain # x1, 2, 4, 8, 16, 32, 64, 128 are available def setGain(self, gainValue): # Set the gain. x1, 2, 4, 8, 16, 32, 64, 128 are available if gainValue > 0b111: gainValue = 0b111 # Error check value = self.getRegister(Scale_Registers['NAU7802_CTRL1']) value &= 0b11111000 # Clear gain bits value |= gainValue # Mask in new bits return self.setRegister(Scale_Registers['NAU7802_CTRL1'], value) # Set Int pin to be high when data is ready (default) def setIntPolarityHigh(self): # # Set Int pin to be high when data is ready (default) return self.clearBit(CTRL1_Bits['NAU7802_CTRL1_CRP'], Scale_Registers['NAU7802_CTRL1']) # 0 = CRDY pin is high active (ready when 1) # Set Int pin to be low when data is ready def setIntPolarityLow(self): # Set Int pin to be low when data is ready return self.setBit(CTRL1_Bits['NAU7802_CTRL1_CRP'], Scale_Registers['NAU7802_CTRL1']) # 1 = CRDY pin is low active (ready when 0) # Set the onboard Low-Drop-Out voltage regulator to a given value # 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.2, 4.5V are available def setLDO(self, ldoValue): # Set the onboard Low-Drop-Out voltage regulator to a given value. 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.2, 4.5V are available if ldoValue > 0b111: ldoValue = 0b111 # Error check # Set the value of the LDO value = self.getRegister(Scale_Registers['NAU7802_CTRL1']); value &= 0b11000111; # Clear LDO bits value |= ldoValue << 3; # Mask in new LDO bits self.setRegister(Scale_Registers['NAU7802_CTRL1'], value); return self.setBit(PU_CTRL_Bits['NAU7802_PU_CTRL_AVDDS'], Scale_Registers['NAU7802_PU_CTRL']) # Enable the internal LDO # Send a given value to be written to given address # Return true if successful def setRegister(self, registerAddress, value): # Send a given value to be written to given address. Return true if successful try: self.bus.write_word_data(self.deviceAddress, registerAddress, value) except: return False # Sensor did not ACK return True # Set the readings per second # 10, 20, 40, 80, and 320 samples per second is available def setSampleRate(self, rate): # Set the readings per second. 10, 20, 40, 80, and 320 samples per second is available if rate > 0b111: rate = 0b111 # Error check value = self.getRegister(Scale_Registers['NAU7802_CTRL2']) value &= 0b10001111 # Clear CRS bits value |= rate << 4 # Mask in new CRS bits return self.setRegister(Scale_Registers['NAU7802_CTRL2'], value) # Sets the internal variable. Useful for users who are loading values from NVM. def setZeroOffset(self, newZeroOffset): self.zeroOffset = newZeroOffset # Sets the internal variable. Useful for users who are loading values from NVM. # Wait for asynchronous AFE calibration to complete with optional timeout. # If timeout is not specified (or set to 0), then wait indefinitely. # Returns true if calibration completes succsfully, otherwise returns false. def waitForCalibrateAFE(self, timeout = 0): # Wait for asynchronous AFE calibration to complete with optional timeout. begin = time.time() cal_ready = 0 while cal_ready == NAU7802_Cal_Status['NAU7802_CAL_IN_PROGRESS']: if (timeout > 0) and ((time.time() - begin) > timeout): break time.sleep(0.001) cal_ready = self.calAFEStatus() if cal_ready == NAU7802_Cal_Status['NAU7802_CAL_SUCCESS']: return True return False
from operator import eq from typing import AbstractSet, Tuple import pytest from ...exceptions import NeuroLangException from ...expressions import Constant, FunctionApplication, Symbol from ...relational_algebra import (ColumnInt, ColumnStr, Difference, NameColumns, NaturalJoin, Projection, RenameColumn, Selection) from ...utils import NamedRelationalAlgebraFrozenSet from ..expressions import Conjunction, Negation from ..translate_to_named_ra import TranslateToNamedRA C_ = Constant S_ = Symbol F_ = FunctionApplication def test_translate_set(): x = S_('x') y = S_('y') R1 = S_('R1') fa = R1(x, y) tr = TranslateToNamedRA() res = tr.walk(fa) assert res == NameColumns( Projection(R1, (C_(ColumnInt(0)), C_(ColumnInt(1)))), (x, y) ) fa = R1(C_(1), y) tr = TranslateToNamedRA() res = tr.walk(fa) assert res == NameColumns( Projection( Selection(R1, C_(eq)(C_(ColumnInt(0)), C_(1))), (C_(ColumnInt(1)),) ), (y,) ) def test_equality_constant_symbol(): x = S_('x') a = C_('a') R1 = S_('R1') expected_result = \ C_[AbstractSet[Tuple[str]]]( NamedRelationalAlgebraFrozenSet(('x',), {'a'}) ) fa = C_(eq)(x, a) tr = TranslateToNamedRA() res = tr.walk(fa) assert res == expected_result fa = C_(eq)(a, x) tr = TranslateToNamedRA() res = tr.walk(fa) assert res == expected_result y = S_('y') fb = R1(x, y) exp = Conjunction((fb, fa)) fb_trans = NameColumns( Projection(R1, (C_(ColumnInt(0)), C_(ColumnInt(1)))), (x, y) ) res = tr.walk(exp) assert res == NaturalJoin(fb_trans, expected_result) def test_equality_symbols(): x = S_('x') y = S_('y') z = S_('z') w = S_('w') R1 = S_('R1') y = S_('y') fb = R1(x, y) fb_trans = NameColumns( Projection(R1, (C_(ColumnInt(0)), C_(ColumnInt(1)))), (x, y) ) exp = Conjunction((fb, C_(eq)(x, y))) expected_result = Selection( fb_trans, C_(eq)(C_(ColumnStr('x')), C_(ColumnStr('y'))) ) tr = TranslateToNamedRA() res = tr.walk(exp) assert res == expected_result exp = Conjunction((fb, C_(eq)(x, z))) expected_result = Selection( NaturalJoin(fb_trans, RenameColumn(fb_trans, x, z)), C_(eq)(C_(ColumnStr('x')), C_(ColumnStr('z'))) ) res = tr.walk(exp) assert res == expected_result exp = Conjunction((fb, C_(eq)(z, x))) expected_result = Selection( NaturalJoin(fb_trans, RenameColumn(fb_trans, x, z)), C_(eq)(C_(ColumnStr('z')), C_(ColumnStr('x'))) ) res = tr.walk(exp) assert res == expected_result exp = Conjunction((fb, C_(eq)(z, w))) with pytest.raises(NeuroLangException, match="At least.*"): res = tr.walk(exp) def test_joins(): x = S_('x') y = S_('y') z = S_('z') R1 = S_('R1') fa = R1(x, y) fb = R1(y, z) exp = Conjunction((fa, fb)) fa_trans = NameColumns( Projection(R1, (C_(ColumnInt(0)), C_(ColumnInt(1)))), (x, y) ) fb_trans = NameColumns( Projection(R1, (C_(ColumnInt(0)), C_(ColumnInt(1)))), (y, z) ) tr = TranslateToNamedRA() res = tr.walk(exp) assert res == NaturalJoin(fa_trans, fb_trans) R2 = S_('R2') fb = R2(x, y) fb_trans = NameColumns( Projection(R2, (C_(ColumnInt(0)), C_(ColumnInt(1)))), (x, y) ) exp = Conjunction((fa, Negation(fb))) tr = TranslateToNamedRA() res = tr.walk(exp) assert res == Difference(fa_trans, fb_trans) fa = R1(x, y) fb = R2(y, C_(0)) fb_trans = NameColumns( Projection( Selection(R2, C_(eq)(C_(ColumnInt(1)), C_(0))), (C_(ColumnInt(0)),) ), (y,) ) exp = Conjunction((fa, Negation(fb))) tr = TranslateToNamedRA() res = tr.walk(exp) assert res == Difference(fa_trans, NaturalJoin(fa_trans, fb_trans))
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_get_user_config -------------------- Tests formerly known from a unittest residing in test_config.py named TestGetUserConfig.test_get_user_config_valid TestGetUserConfig.test_get_user_config_invalid TestGetUserConfig.test_get_user_config_nonexistent """ import os import shutil import pytest from cookiecutter import config from cookiecutter.exceptions import InvalidConfiguration VALID_CONFIG_PATH = 'tests/test-config/valid-config.yaml' VALID_CONFIG = { 'cookiecutters_dir': '/home/example/some-path-to-templates', 'default_context': { 'full_name': 'Firstname Lastname', 'email': 'firstname.lastname@gmail.com', 'github_username': 'example' } } @pytest.fixture(scope='module') def user_config_path(): return os.path.expanduser('~/.cookiecutterrc') @pytest.fixture(scope='function') def back_up_rc(request, user_config_path): """ Back up an existing cookiecutter rc and restore it after the test. If ~/.cookiecutterrc is pre-existing, move it to a temp location """ user_config_path_backup = os.path.expanduser('~/.cookiecutterrc.backup') if os.path.exists(user_config_path): shutil.copy(user_config_path, user_config_path_backup) os.remove(user_config_path) def remove_test_rc(): """ Remove the ~/.cookiecutterrc that has been created in the test. """ if os.path.exists(user_config_path): os.remove(user_config_path) def restore_original_rc(): """ If it existed, restore the original ~/.cookiecutterrc """ if os.path.exists(user_config_path_backup): shutil.copy(user_config_path_backup, user_config_path) os.remove(user_config_path_backup) # According to the py.test source code finalizers are popped from an # internal list that we populated via 'addfinalizer'. As a result the # last-added finalizer function is executed first. request.addfinalizer(restore_original_rc) request.addfinalizer(remove_test_rc) @pytest.mark.usefixtures('back_up_rc') def test_get_user_config_valid(user_config_path): """ Get config from a valid ~/.cookiecutterrc file """ shutil.copy(VALID_CONFIG_PATH, user_config_path) conf = config.get_user_config() assert conf == VALID_CONFIG def test_get_user_config_from_path(): """ Get config from a valid ~/.cookiecutterrc file directly """ conf = config.get_user_config(VALID_CONFIG_PATH) assert conf == VALID_CONFIG @pytest.mark.usefixtures('back_up_rc') def test_get_user_config_no_rc(user_config_path): """ Do NOT get config from a valid ~/.cookiecutterrc file """ shutil.copy(VALID_CONFIG_PATH, user_config_path) for rc_file in (None, '', 'this-will-not-ever-exist'): conf = config.get_user_config(rc_file) assert conf == config.DEFAULT_CONFIG @pytest.mark.usefixtures('back_up_rc') def test_get_user_config_invalid(user_config_path): """ Get config from an invalid ~/.cookiecutterrc file """ shutil.copy('tests/test-config/invalid-config.yaml', user_config_path) with pytest.raises(InvalidConfiguration): config.get_user_config() @pytest.mark.usefixtures('back_up_rc') def test_get_user_config_nonexistent(): """ Get config from a nonexistent ~/.cookiecutterrc file """ assert config.get_user_config() == config.DEFAULT_CONFIG
class SettingStateException(Exception): def __str__(self): return "invalid values for state massive: values in each row have to be distinct" class SettingWinningCombinationsException(Exception): def __str__(self): return "invalid values for dict combinations:" class CombinationLengthException(SettingWinningCombinationsException): def __str__(self): return super().__str__() + " key length of dict combinations have to equal number columns " \ "of massive state in class OneHandBandit" class CombinationValuesException(SettingWinningCombinationsException): def __str__(self): return super().__str__() + " key values of dict combinations have to be " \ "lower than (picturesNumb - 1) in class OneHandBandit" \ "and greater than 0" class WinningMoneyException(SettingWinningCombinationsException): def __str__(self): return super().__str__() + " winning money have to be greater than 0" class SettingProbabilities2PicturesException(Exception): def __str__(self): return "error when set the probabilities for pictures;" class ProbabilitiesArrayLengthException(SettingProbabilities2PicturesException): def __str__(self): return super().__str__() + " probabilities array must be the same length as pictures array length" class ProbabilitiesSumException(SettingProbabilities2PicturesException): def __str__(self): return super().__str__() + " probabilities array sum must equal 1"
import numpy as np class LineElement: """LineElement represents a finite arc in the discretization of the domain boundary.""" def __init__(self, a, b, n, is_fault): """Constructor. :param a: Start point :param b: End point :param n: Outward-pointing normal :param is_fault: Flag this element as fault """ self.a = np.array(a) self.h = np.array(b) - self.a self.h_norm = np.linalg.norm(self.h) self.n = np.array(n) self.n = self.n / np.linalg.norm(self.n) self.is_fault = is_fault def xi(self, theta): """Map from interval [-1, 1] to line a-b. :param theta: Scalar in [-1, 1]. """ # TODO - done ThetaInv = self.h * (theta + 1.0)/2.0 + self.a #return np.array([0.0, 0.0]) return ThetaInv def basis(self, theta): """Basis function evaluated at theta. :param theta: Scalar in [-1, 1] """ # TODO: done #return 0.0 return 1 def factor(self, theta): """Integration factor. Must return basis(theta) * |xi'(theta)| :param theta: Scalar in [-1, 1] """ # TODO: done #return 0.0 return self.h_norm /2 def collocation_point(self): """Returns midpoint of line.""" return self.xi(0) def __repr__(self): return 'LineElement({}, {})'.format(self.a, self.a + self.h) class InfiniteLineElement: """InfiniteLineElement represents an infinite arc in the discretization of the domain boundary.""" def __init__(self, a, n): """Constructor. :param a: Start point (line direction is also a) :param n: Outward-pointing normal :param is_fault: Flag this element as fault """ self.a = np.array(a) self.a_norm = np.linalg.norm(self.a) self.n = np.array(n) self.n = self.n / np.linalg.norm(self.n) self.is_fault = False def xi(self, theta): """Map from interval [-1, 1] to line starting at "a" with direction "a" extending to infinity. :param theta: Scalar in [-1, 1]. """ # TODO: done ChiMap = self.a* (theta + 3)/(1-theta) #return np.array([0.0, 0.0]) return ChiMap def basis(self, theta): """Basis function evaluated at theta. :param theta: Scalar in [-1, 1] """ # TODO: done #return 0.0 return ((1-theta)/(theta + 3))**2 def factor(self, theta): """Integration factor. Must return basis(theta) * |xi'(theta)| :param theta: Scalar in [-1, 1] """ # TODO: done #return 0.0 return 4* self.a_norm / (theta+3)**2 def collocation_point(self): """Returns start point of line.""" return self.xi(-1) def __repr__(self): return 'InfiniteLineElement({})'.format(self.a) def tessellate_line(a, b, resolution, normal, is_fault=False): """Tessellate the line from a to b into small arcs, such that the arc length is smaller than resolution. :param a: Start point :param b: End point :param resolution: Target arc length :param normal: Outward-pointing normal :param is_fault: Flag all line elements as fault """ origin = np.array(a) h = np.array(b) - origin N = int(np.ceil(np.linalg.norm(h) / resolution)) return [ LineElement(origin + n / N * h, origin + (n + 1) / N * h, normal, is_fault) for n in range(N) ] def num_fault_elements(mesh): """Counts number of fault elements in mesh. :param mesh: List of LineElements. """ count = 0 for m in mesh: if m.is_fault: count = count + 1 return count def line_normal(a, b, star_centre): """Computes the outward-pointing of a line from a to b. The parameter star_centre here defines the inside, i.e. dot(star_centre-a, normal) < 0. :param a: Start point :param b: End point :param star_centre: Star centre of a star-shaped domain """ c = np.array(star_centre) - np.array(a) normal = np.array(b) - np.array(a) normal[0], normal[1] = -normal[1], normal[0] normal /= np.linalg.norm(normal) return normal if np.inner(c, normal) < 0 else -normal
from ftfy import fix_text from config import normalization from date_detector import Parser as dParser import re # regexp to remove multiple whitespaces # https://stackoverflow.com/a/2077906/1169798 _RE_COMBINE_WHITESPACE = re.compile(r"\s+") # init common objects dDetector = dParser() def decode(text): # https://github.com/LuminosoInsight/python-ftfy/tree/v5.5.1 return fix_text(text, normalization=normalization) def remove_special_chars(word): # parts = [] # for part in word.split(' '): # parts = parts + [''.join(e for e in part if e.isalnum() or e == '\'')] # return '_'.join(parts) # remove whitespace word = _RE_COMBINE_WHITESPACE.sub(" ", word).strip() # remove parenthesis odd_chars = ['[', ']', '{', '}', '(', ')', '*', '^', '/', '\\'] return ''.join(e for e in word if e not in odd_chars) def clean_data(lst): """ Removes artificial NaNs and noisy cells - Noisy cell contains only special chars. """ empty = ['nan', 'NaN'] res = [] for cell in lst: if str(cell) in empty: cell = "" else: alphaNum = ''.join(e for e in str(cell) if e.isalnum()) if alphaNum == "": cell = "" res = res + [cell] return res def find_date(txt): """ Find first date match in a given string, a string contains multiple dates, it will return the first one only Fixed format will also be returned YYY-MM-DD """ clean = txt matches = dDetector.parse(txt) for m in matches: # override clean value with only one date # m.date will always return YYYY-MM-DD format clean = str(m.date) break return clean def find_num(txt): """ Find first match to any number in a given txt """ p = r'[\d]+[.,\d]+|[\d]*[.][\d]+|[\d]+' lst_nums = re.findall(p, txt) # if not match, then return the original string if not lst_nums: return txt else: # if some matches here, just pick the first one num = lst_nums[0] # remove most common mask for numbers num = num.replace(',', '') return num
from tkinter import * import random import subprocess startWindow = Tk() startWindow.title('Enigma') startWindow.geometry('1920x1080') startWindow.resizable(0, 0) # startWindow.overrideredirect(True) # startWindow.geometry("{0}x{1}+0+0".format(startWindow.winfo_screenwidth(), startWindow.winfo_screenheight())) startWindow.wm_iconbitmap('Filters\\Icons\\chat.ico') backgroundImage = PhotoImage(file='Filters\\BackGround\\9.png') slicer = PhotoImage(file='Filters\\BackGround\\t.gif') startWindow.config(bg='#60616b') # Global Variables colorList = ['white', 'red', '#0dff7e', 'blue', 'pink', 'yellow', '#ff8d87', '#ffaa54', '#e0ff54', '#87ff54', '#54ff90', '#54ffe0', '#54aaff', '#9354ff', '#eda5fa', '#fcb6df', '#a6a1a2'] startHeaderColor = 1 # Functions def aboutUs(): teamInfoPanel = Toplevel(startWindow) teamInfoPanel.title('About Us') teamInfoPanel.geometry('320x230') teamInfoPanel.resizable(0, 0) teamInfoPanel.wm_iconbitmap('Filters\\Icons\\among_us_player_light_blue_icon.ico') teamInfoPanel.config(bg='white') def on_enter_aboutUs(e): teamInfoPanel.config(bg='#7c008f') teamNameHeader.config(bg='#7c008f', fg='white') teamMembersLabel.config(bg='#7c008f', fg='white') def on_leaving_aboutUs(e): teamInfoPanel.config(bg='white') teamNameHeader.config(bg='white', fg='black') teamMembersLabel.config(bg='white', fg='black') teamNameHeader = Label( teamInfoPanel, text='Enigma', bg='white', fg='black' ) teamNameHeader.place(x=15, y=20) teamNameHeader.config(font=("Courier New Bold Italic", 30)) teamMembersLabel = Label( teamInfoPanel, justify=LEFT, text='Manthan Raj Rajoria\n' 'Vedansh Chasta\n' 'Mohit Agarwal\n' 'Naveen Kumar Jangir', bg='white', fg='black' ) teamMembersLabel.place(x=15, y=75) teamMembersLabel.config(font=("Courier New Bold Italic", 18)) teamInfoPanel.bind('<Enter>', on_enter_aboutUs) teamInfoPanel.bind('<Leave>', on_leaving_aboutUs) def on_enter_startHeader(e): global startHeaderColor startHeaderColor = randCorrection() if startHeaderColor is None: startHeader.config(bg='#60616b', fg='#00ffaa') else: startHeader.config(bg='#60616b', fg=colorList[startHeaderColor - 1]) # def on_leaving_startHeader(e): # startHeader.config(bg='#60616b', fg='white') def on_enter_teamNameLabel(e): teamNameLabel.config(bg='#60616b', fg="#ffb152") def on_leaving_teamNameLabel(e): teamNameLabel.config(bg='#60616b', fg='white') def on_enter_aboutButton(e): aboutButton.config(bg='#0dff7e', fg="white") def on_leaving_aboutButton(e): aboutButton.config(bg='white', fg='black') def on_enter_enterButton(e): enterButton.config(bg='#52dcf7', fg="white") def on_leaving_enterButton(e): enterButton.config(bg='white', fg='black') def on_enter_quit(e): quitButton.config(bg='black', fg='white') def on_leaving_quit(e): quitButton.config(bg='white', fg='black') def on_enter_description(e): descriptionHead.config(fg='#ff541c') descriptionLabel.config(fg='yellow') def on_leaving_description(e): descriptionHead.config(fg='white') descriptionLabel.config(fg='white') def on_enter_contact(e): contactHead.config(fg='#ff541c') contactLabel.config(fg='yellow') def on_leaving_contact(e): contactHead.config(fg='white') contactLabel.config(fg='white') def quitStartFrame(): startWindow.destroy() def generateRandInt(): randomColor = random.randint(1, len(colorList)) return randomColor def randCorrection(): randomNumber = generateRandInt() if randomNumber != startHeaderColor: return randomNumber else: randCorrection() def openLogForm(): startWindow.destroy() subprocess.call(["python", "Log.py"]) # Start Window Background # mainFrameBackground = Label( # startWindow, # image=backgroundImage # ) # mainFrameBackground.place(x=-30, y=-70) startHeader = Label( startWindow, text='AeroTrials', bg='#60616b', fg='white', font=('Gothic', 90, 'bold') ) startHeader.pack(side=TOP, pady=50) teamNameLabel = Label( startWindow, text='by Enigma', bg='#60616b', fg='white', font=('Gothic', 20, 'bold') ) teamNameLabel.place(x=1123, y=180) descriptionHead = Label( startWindow, justify=LEFT, text='What we do...', bg='#60616b', fg='white', font=('Gothic', 40, 'bold') ) descriptionHead.place(x=30, y=270) descriptionLabel = Label( startWindow, justify=LEFT, text='AeroTrials is a groundbreaking \n\napp built by the developers of \n\nEnigma. ' 'It allows a user to try \n\non products using in a virtual \n\nworld making it ' 'easier to make \n\ndecisions. We here at Enigma \n\ntry our best to make your lives ' '\n\neasier and better', bg='#60616b', fg='white', font=('Gothic', 20) ) descriptionLabel.place(x=30, y=360) contactHead = Label( startWindow, justify=LEFT, text='Contact Us', bg='#60616b', fg='white', font=('Gothic', 40, 'bold') ) contactHead.place(x=30, y=830) contactLabel = Label( startWindow, justify=LEFT, text='Email : manthanrajrajoria@gmail.com\n\n' 'Ph.No : 7689904767', bg='#60616b', fg='white', font=('Gothic', 20) ) contactLabel.place(x=30, y=910) enterButton = Button( startWindow, text="Start the journey", justify=LEFT, height='4', width='20', bg='white', fg='black', bd='7', relief=RAISED, font=('Gothic', 30, 'bold'), command=openLogForm ) enterButton.pack(side=TOP, pady=300) errorLabel = Label( startWindow, text='', width='29', font=('Gothic', 20, 'bold'), bg='#60616b', fg='#ff7a7a', ) errorLabel.place(x=740, y=740) aboutButton = Button( startWindow, text="About Us", height='4', width='20', bg='white', fg='black', bd='7', relief=RAISED, font=('Gothic', 20), command=aboutUs ) aboutButton.place(x=1583, y=740) quitButton = Button( startWindow, height='4', width='20', text='Quit', bg='white', fg='black', bd='7', relief=RAISED, command=quitStartFrame ) quitButton.config(font=("Gothic", 20)) quitButton.place(x=1583, y=882) # Code startHeader.bind('<Enter>', on_enter_startHeader) # startHeader.bind('<Leave>', on_leaving_startHeader) teamNameLabel.bind('<Enter>', on_enter_teamNameLabel) teamNameLabel.bind('<Leave>', on_leaving_teamNameLabel) aboutButton.bind('<Enter>', on_enter_aboutButton) aboutButton.bind('<Leave>', on_leaving_aboutButton) descriptionHead.bind('<Enter>', on_enter_description) descriptionHead.bind('<Leave>', on_leaving_description) descriptionLabel.bind('<Enter>', on_enter_description) descriptionLabel.bind('<Leave>', on_leaving_description) contactHead.bind('<Enter>', on_enter_contact) contactHead.bind('<Leave>', on_leaving_contact) contactLabel.bind('<Enter>', on_enter_contact) contactLabel.bind('<Leave>', on_leaving_contact) enterButton.bind('<Enter>', on_enter_enterButton) enterButton.bind('<Leave>', on_leaving_enterButton) quitButton.bind('<Enter>', on_enter_quit) quitButton.bind('<Leave>', on_leaving_quit) try: startWindow.mainloop() except EXCEPTION: errorLabel.config(text='We encountered a error.')
from math import sin, cos, sqrt, atan2, radians from pprint import pprint from random import random, shuffle import matplotlib.pyplot as plt import gspread from oauth2client.service_account import ServiceAccountCredentials import ast scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] try: credentials = ServiceAccountCredentials.from_json_keyfile_name("Legislator-b96aaa67134d.json", scope) gc = gspread.authorize(credentials) except: pass def get_data(start_point): wks = gc.open('CARTEIRA DE CLIENTES AREA 18 JULHO 18').get_worksheet(0) data = wks.get_all_records() coords = {i + 1: ast.literal_eval(coord['Latitude/Longitude']) for i, coord in enumerate(data)} coords[0] = start_point return coords def distance_set(coords): R = 6373.0 d = dict() n = len(coords) for i in range(n): for j in range(n): lat1 = radians(coords[i][0]) lon1 = radians(coords[i][1]) lat2 = radians(coords[j][0]) lon2 = radians(coords[j][1]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2 c = 2 * atan2(sqrt(a), sqrt(1 - a)) d[i, j] = R * c return d def fitness(solution, entrada): i = 0 total_distance = 0 while i < len(solution): need_to_return = False total_day_time = d[0, solution[i]] * entrada['tempo_km'] + entrada['tempo_atendimento'] total_distance += d[0, solution[i]] if total_day_time + d[solution[i], 0] * entrada['tempo_km'] >= entrada['horas_dia']: total_day_time += d[solution[i], 0] * entrada['tempo_km'] total_distance += d[solution[i], 0] i += 1 need_to_return = True if i == len(solution): return total_distance else: i += 1 if i == len(solution) - 1: total_day_time += d[solution[i - 1], solution[i]] * entrada['tempo_km'] + entrada['tempo_atendimento'] total_distance += d[solution[i - 1], solution[i]] total_day_time += d[solution[i], 0] * entrada['tempo_km'] total_distance += d[solution[i], 0] return total_distance while not need_to_return: total_day_time += d[solution[i - 1], solution[i]] * entrada['tempo_km'] + entrada['tempo_atendimento'] total_distance += d[solution[i - 1], solution[i]] if total_day_time + d[solution[i], 0] * entrada['tempo_km'] >= entrada['horas_dia']: total_day_time += d[solution[i], 0] * entrada['tempo_km'] total_distance += d[solution[i], 0] need_to_return = True else: need_to_return = False i += 1 if i == len(solution) - 1: total_day_time = d[solution[i - 1], solution[i]] * entrada['tempo_km'] + entrada['tempo_atendimento'] total_distance += d[solution[i - 1], solution[i]] total_day_time += d[solution[i], 0] * entrada['tempo_km'] total_distance += d[solution[i], 0] return total_distance def result(solution, entrada): i = 0 path = [0] while i < len(solution): need_to_return = False total_day_time = d[0, solution[i]] * entrada['tempo_km'] + entrada['tempo_atendimento'] path.append(solution[i]) if total_day_time + d[solution[i], 0] * entrada['tempo_km'] >= entrada['horas_dia']: total_day_time += d[solution[i], 0] * entrada['tempo_km'] path.append(0) i += 1 need_to_return = True if i == len(solution): return path else: i += 1 if i == len(solution) - 1: total_day_time += d[solution[i - 1], solution[i]] * entrada['tempo_km'] + entrada['tempo_atendimento'] path.append(solution[i]) total_day_time += d[solution[i], 0] * entrada['tempo_km'] path.append(solution[i]) path.append(0) return path while not need_to_return: total_day_time += d[solution[i - 1], solution[i]] * entrada['tempo_km'] + entrada['tempo_atendimento'] path.append(solution[i]) if total_day_time + d[solution[i], 0] * entrada['tempo_km'] >= entrada['horas_dia']: total_day_time += d[solution[i], 0] * entrada['tempo_km'] path.append(0) need_to_return = True else: need_to_return = False i += 1 if i == len(solution) - 1: total_day_time += d[solution[i], 0] * entrada['tempo_km'] path.append(solution[i]) path.append(0) return path def local_search(solution, entrada, opt=2): best_cost = fitness(solution, entrada) best_solution = list(solution) count = 0 r = list(range(len(solution))) shuffle(r) for i in range(len(solution) - 1): for j in range(i + 1, len(solution)): solution[r[i]], solution[r[j]] = solution[r[j]], solution[r[i]] cur_cost = fitness(solution, entrada) if cur_cost < best_cost: best_cost = cur_cost best_solution = list(solution) count += 1 if count == 2: return best_cost, best_solution return best_cost, best_solution def greedy_adaptative(entrada): nodes = list(range(1, len(entrada['coords']))) d = distance_set(entrada['coords']) i = 0 solution = [] while len(nodes) > 0: inverted = [1 / d[i, j] for j in nodes] total = sum(inverted) prob = [inv / total for inv in inverted] solution.append(random_pick(nodes, prob)) nodes.remove(solution[-1]) return solution def random_pick(some_list, probabilities): x = random() cumulative_probability = 0.0 for item, item_probability in zip(some_list, probabilities): cumulative_probability += item_probability if x < cumulative_probability: break return item def GRASP(entrada, n=1000): best_solution = list(range(1, len(entrada['coords']))) best_cost = fitness(best_solution, entrada) for i in range(n): if i % 100 == 0: print('ite {}'.format(i)) solution = greedy_adaptative(entrada) cur_cost, cur_solution = local_search(solution, entrada) if cur_cost < best_cost: best_cost = cur_cost best_solution = list(cur_solution) print(best_cost, '{}%'.format(i / n * 100)) print(result(best_solution, entrada)) return best_cost, best_solution def report(path): output = [] for i in range(len(path) - 1): if path[i] == 0: output.append([]) else: output[-1].append(path[i]) return output def plot_solution(entrada, ouput): from itertools import cycle n = len(entrada['coords']) cycol = cycle('bgrcmk') x = [entrada['coords'][i][0] for i in range(1, n)] y = [entrada['coords'][i][1] for i in range(1, n)] fig, ax = plt.subplots() ax.plot(entrada['coords'][0][0], entrada['coords'][0][1], 'o') ax.plot(x, y, 'o') x = [entrada['coords'][i][0] for i in range(n)] y = [entrada['coords'][i][1] for i in range(n)] for nodes in ouput: color = next(cycol) temp = [0] + nodes + [0] for k in range(len(temp) - 1): connectpoints(x, y, temp[k], temp[k + 1], color) plt.show() def connectpoints(x, y, p1, p2, color): x1, x2 = x[p1], x[p2] y1, y2 = y[p1], y[p2] plt.plot([x1, x2], [y1, y2], c=color) entrada = { 'coords': { 0: (-3.8412925646756273, -38.19748083389053), 1: (-3.8076093143793464, -38.31531507188827), 2: (-3.807076473247719, -38.534964161996015), 3: (-3.8937784109629603, -38.276942506062476), 4: (-3.4984004291098874, -38.20459829781128), 5: (-3.624210043291873, -39.228378700101416), 6: (-3.547778842055511, -38.40887494561352), 7: (-3.660961612846057, -38.64362570444013), 8: (-3.8977471504396077, -38.429476020171556), 9: (-3.67754845892978, -39.014612378325545), 10: (-3.730734725846105, -38.61605187627415), 11: (-3.843487789860545, -38.74566546750679), 12: (-3.8627966544258188, -38.587290555785295), 13: (-3.553873034380957, -38.819609163825476)}, 'tempo_atendimento': 0.4, 'tempo_km': 1 / 40, 'horas_dia': 8 } entrada['coords'] = get_data((-3.7897703, -38.6155416)) print('ok') if __name__ == '__main__': d = distance_set(entrada['coords']) print('distance_set = ok') best_cost, best_solution = GRASP(entrada, n=30000) path = result(best_solution, entrada) ouput = report(path) plot_solution(entrada, ouput)
from Clientes import Cliente class Mercado: ''' Modelagem de um sistema de mercadinho''' def __init__(self, usuario): self._usuario = usuario self._produtos = {'arroz': [2.50, 1000], 'feijao': [4.50, 1500]} self._status = False self._extrato = [] @property def usuario(self): return self._usuario.nome @property def produtos(self): return self._produtos @property def status(self): return self._status def abrir_mercadinho(self): ''' Método para abrir o sistema do mercadinho''' self._status = True def fechar_mercadinho(self): ''' Método para fechar o sistema do mercadinho''' self._status = False def _verificar_status(self): ''' Essa função faz a verificação do status do mercadinho''' if self._status: return True else: return False def adicionar_produto(self, produto: str, valor: float, quantidade: int): ''' Essa função adiciona um produto no "banco de dados do mercadinho". ''' if self._verificar_status(): self._produtos[produto.lower()] = [valor, quantidade] else: print('Operação indisponível!') def produtos_disponiveis(self): ''' Essa função imprime os produtos disponíves.''' if self._verificar_status(): for produto in self._produtos: print(f'{produto.capitalize():<10} -------- Preço: {self._produtos[produto][0]:.2f} R$ | ' f'Quantidade disponível: {self._produtos[produto][1]} unidades') else: print('Operação indisponível!') def _conta(self): if self._verificar_status(): soma = 0 compra_cliente = self._usuario.compras for produto in self._produtos: if produto in compra_cliente: extrato_produto = f'{produto.capitalize()} | {self._produtos[produto][0]}x' \ f'{compra_cliente[produto]} = ' \ f'{self._produtos[produto][0] * compra_cliente[produto]}' self._extrato.append(extrato_produto) soma += self._produtos[produto][0] * compra_cliente[produto] #self._produtos[produto] -= 1 total_conta = f'Total: {soma} R$' self._extrato.append(total_conta) return True else: print('Operação inválida!') def imprimir_extrato(self): if self._verificar_status() and self._conta(): print() for item in self._extrato: print(item) else: print('Operação inválida!') def pagamento(self, tipo): if self._verificar_status(): if tipo == 'debito': valor = input('Valor em dinheiro: ') if __name__ == '__main__': compras = {'feijao': 3, 'arroz': 5} pedro = Cliente('pedro', 'wkitawarairon@gmail.com', '55533311122', '9988774455', compras=compras) print(pedro.compras) python_mercado = Mercado(pedro) python_mercado.abrir_mercadinho() python_mercado.imprimir_extrato()
''' Functions for loading the zBUS, PA5 and RPcoX drivers and connecting to the specified device. In addition to loading the appropriate ActiveX driver, some minimal configuration is done. Network-aware proxies of the zBUS and RPcoX drivers have been written for TDTPy. To connect to TDT hardware that is running on a remote computer, both the :func:`connect_zbus` and :func:`connect_rpcox` functions take the address of the server via a tuple (hostname, port):: connect_rpcox('RZ6', address=(tdt_server.cns.nyu.edu, 3333)) .. autofunction:: connect_zbus .. autofunction:: connect_rpcox .. autofunction:: connect_pa5 .. note:: The network-aware proxy code should be considered alpha stage. Although it appears to work in our tests, we have not deployed this in our data aqcuisition experiments. ''' import os import numpy as np import ctypes # Initialize import pythoncom import pywintypes from .dsp_error import DSPError from . import dsp_server, actxobjects import logging log = logging.getLogger(__name__) def connect_pa5(interface='GB', device_id=1, address=None): ''' Connect to the PA5 ''' debug_string = '%d via %s interface' % (device_id, interface) log.debug(debug_string) try: pythoncom.CoInitialize() if address is None: driver = actxobjects.PA5x() else: driver = dsp_server.PA5NET(address) if not driver.ConnectPA5(interface, device_id): raise DSPError("PA5", "Connection failed") log.debug("Connected to PA5") return driver except pywintypes.com_error: raise ImportError('ActiveX drivers from TDT not installed') def connect_zbus(interface='GB', address=None): ''' Connect to the zBUS interface and set the zBUS A and zBUS B triggers to low Parameters ---------- interface : {'GB', 'USB'} Type of interface (depends on the card that you have from TDT). See the TDT ActiveX documentation for clarification on which interface you would be using if you are still unsure. address : {None, (hostname, port)} If None, loads the ActiveX drivers directly, otherwise connects to the remote server specified by the hostname, port tuple. ''' try: pythoncom.CoInitialize() if address is not None: driver = dsp_server.zBUSNET(address) else: driver = actxobjects.ZBUSx() if not driver.ConnectZBUS(interface): raise DSPError("zBUS", "Connection failed") log.debug("Connected to zBUS") # zBUS trigger is set to high for record mode, so ensure that both # triggers are initialized to low. driver.zBusTrigA(0, 2, 10) driver.zBusTrigB(0, 2, 10) log.debug("Set zBusTrigA to low") log.debug("Set zBusTrigB to low") return driver except pywintypes.com_error: raise ImportError('ActiveX drivers from TDT not installed') def connect_rpcox(name, interface='GB', device_id=1, address=None): ''' Connect to the specifed device using the RPcoX driver Note that the appropriate RPcoX.Connect method is called so you do not need to perform that step in your code. Parameters ---------- name : {'RZ6', 'RZ5', 'RP2', ... (any valid device string) } Name of device (as defined by the corresponding RPcoX.Connect* method). interface : {'GB', 'USB'} Type of interface (depends on the card that you have from TDT). See the TDT ActiveX documentation for clarification on which interface you would be using if you are still unsure. device_id : int (default 1) Id of device in the rack. Only applicable if you have more than one of the same device (e.g. two RX6 devices). address : {None, (hostname, port)} If None, loads the ActiveX drivers directly, otherwise connects to the remote server specified by the hostname, port tuple. ''' pythoncom.CoInitialize() debug_string = '%s %d via %s interface' % (name, device_id, interface) log.debug(debug_string) if address is None: driver = actxobjects.RPcoX() else: driver = dsp_server.RPcoXNET(address) if not getattr(driver, 'Connect%s' % name)(interface, device_id): raise DSPError(name, "Connection failed") log.debug("Connected to %s", name) return driver def get_cof_path(circuit_name): ''' Given relative path, returns absolute path to circuit file. The *.rcx extension may be omitted. ''' search_dirs = [os.path.join(os.path.dirname(__file__), 'components'), os.getcwd(), ] log.debug("Searching %r", search_dirs) success = False if not circuit_name.endswith('.rcx'): circuit_name += '.rcx' log.debug("Attempting to locate circuit %s", circuit_name) for dir in search_dirs: circuit_path = os.path.join(dir, circuit_name) log.debug('Checking %s', circuit_path) if os.path.exists(circuit_path): success = True break if not success: raise IOError("Could not find circuit %s" % circuit_name) return circuit_path def dtype_to_type_str(data_type): ''' Convert Numpy dtype to the type string required by TDT's libraries TDT's ActiveX ReadTagVEX and WriteTagVEX functions require the type string to be one of I8, I16, I32 or F32. Any valid format for specify Numpy dtype is supported. >>> dtype_to_type_str(np.int32) 'I32' >>> dtype_to_type_str(np.float32) 'F32' >>> dtype_to_type_str('float32') 'F32' >>> dtype_to_type_str('int8') 'I8' If a certain type is not supported by TDT, a Value error is raised: >>> dtype_to_type_str(np.float16) Traceback (most recent call last): ... ValueError: Unsupported Numpy dtype ''' if np.issubdtype(data_type, np.integer): type_code = 'I' elif np.issubdtype(data_type, np.floating): type_code = 'F' else: raise ValueError("Unsupported Numpy dtype") # Since dtype.itemsize is the number of bytes, and the number in the TDT # type string reflects bit number, we can translate it by multiplying by 8. # Likewise, dtype.char is 'i' for integer and 'f' for floating point # datatypes. type_str = "{0}{1}".format(type_code, data_type.itemsize*8) log.debug("%r TDT type string is %s", data_type, type_str) if type_str not in ['F32', 'I32', 'I16', 'I8']: raise ValueError("Unsupported dtype") return type_str def best_sf(data_type, range): ''' Computes the optimal scaling factor for data compression Parameters ---------- data_type Data type that values are being compressed to range : scalar or tuple Expected data range. If scalar, assumes the value falls in the range (-range, range) ''' data_type = np.dtype(data_type) try: info = np.iinfo(data_type) except: info = np.finfo(data_type) return info.max/np.abs(range).max() def resolution(data_type, scaling_factor): ''' Computes resolution for data type given scaling factor Parameters ---------- data_type : dtype Numpy data type (or string) scaling_factor : float Scaling factor applied to data ''' data_type = np.dtype(data_type) if np.issubdtype(data_type, np.integer): return 1/float(scaling_factor) else: raise ValueError("Float data types not supported") CTYPES_TO_NP = { ctypes.c_char: np.int8, ctypes.c_wchar: np.int16, ctypes.c_byte: np.int8, ctypes.c_ubyte: np.uint8, ctypes.c_short: np.int16, ctypes.c_ushort: np.uint16, ctypes.c_int: np.int32, ctypes.c_uint: np.int32, ctypes.c_long: np.int32, ctypes.c_ulong: np.int32, ctypes.c_float: np.float32, ctypes.c_double: np.float64, } # Reverse lookup NP_TO_CTYPES = dict((np.dtype(v), k) for k, v in CTYPES_TO_NP.items()) def shmem_as_ndarray(raw_array): ''' Create a ndarray wrapper around shared memory space ''' address = raw_array._wrapper.get_address() size = raw_array._wrapper.get_size() dtype = CTYPES_TO_NP[raw_array._type_] class NDArrayView(object): pass d = NDArrayView() d.__array_interface__ = { 'data': (address, False), 'typestr': np.dtype('uint8').str, 'descr': np.dtype('uint8').descr, 'shape': (size,), 'strides': None, 'version': 3, } return np.asarray(d).view(dtype=dtype) if __name__ == '__main__': import doctest doctest.testmod()
from __future__ import absolute_import from PIL import Image from torchvision import models, transforms from torch.autograd import Variable from torch.nn import functional as F from matplotlib import pyplot as plt import numpy as np from pathlib import Path import cv2 __all__ = ['CAM'] features = [] class CAM(object): def __init__(self,model,final_conv,dim=(256,256)): self.model = model self.final_conv = final_conv self.model.eval() # print(self.model) # print(list(self.model._modules['module']._modules.keys())) final_layer = self.model._modules['module']._modules.get(self.final_conv) if not final_layer == None: final_layer.register_forward_hook(self.append_final_conv) else: raise ValueError('Final conv layer is None.') # TODO: use GAP or FC as weights in CAM? params = dict(self.model.named_parameters()) # NOTE: fc.0 is NOT the fc layer but actually the GAP layer self.weight_softmax = np.squeeze(params['module.fc.0.weight'].data.cpu().numpy()) normalize = transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) # FIXME: Do we want to change normalize to fit our dataset? self.preprocess = transforms.Compose([ transforms.Resize(dim), transforms.ToTensor(), normalize ]) # TODO: Also, did we use 224 by 224? def append_final_conv(self, module, input, output): features.append(output.data.cpu().numpy()) def returnCAM(self, feature_conv, weight_softmax, class_idx): # generate the class activation maps upsample to 256x256 size_upsample = (256, 256) bz, nc, h, w = feature_conv.shape output_cam = [] for idx in class_idx: cam = weight_softmax[idx].dot(feature_conv.reshape((nc, h*w))) cam = cam.reshape(h, w) cam = cam - np.min(cam) cam_img = cam / np.max(cam) cam_img = 1.0 - cam_img cam_img = np.uint8(255 * cam_img) output_cam.append(cv2.resize(cam_img, size_upsample)) return output_cam def process_name(self, fname): fname = fname.split('/')[-1] fname = fname.split('.') extention = fname[-1] return '.'.join(fname[:-1]) + '_CAM.' + extention def computeCAM(self, image_fname, cam_location='.', display=False): img = cv2.imread(image_fname) height, width, _ = img.shape img_tensor = self.preprocess(Image.open(image_fname)) img_variable = Variable(img_tensor.unsqueeze(0)) logit = self.model(img_variable) h_x = F.softmax(logit, dim=1).data.squeeze() probs, idx = h_x.sort(0, True) probs = probs.cpu().numpy() idx = idx.cpu().numpy() CAMs = self.returnCAM(features[-1], self.weight_softmax, [idx[0]]) heatmap = cv2.applyColorMap(cv2.resize(CAMs[0], (width, height)), cv2.COLORMAP_JET) result = heatmap * 0.3 + img * 0.5 output_path = str(Path(cam_location)/self.process_name(image_fname)) # cv2.imwrite(output_path, result) plt.figure() plt.title('CAM of {}'.format(image_fname)) plt.subplot(1,2,1) plt.imshow(img) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.xlabel('original') plt.subplot(1,2,2) plt.imshow(result.astype(np.int32)) plt.grid(False) plt.xticks([]) plt.yticks([]) plt.xlabel('CAM') plt.savefig(output_path) plt.close() # print('img wrote to {}'.format(output_path)) if display: img = plt.imread(image_fname) heatmap = cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB) result = heatmap * 0.3 + img * 0.5 plt.imshow(result.astype(np.int32)) plt.show() if __name__ == "__main__": model_id = 1 if model_id == 1: net = models.squeezenet1_1(pretrained=True) finalconv_name = 'features' elif model_id == 2: net = models.resnet18(pretrained=True) finalconv_name = 'layer4' elif model_id == 3: net = models.densenet161(pretrained=True) finalconv_name = 'features' cam = CAM(net,finalconv_name) cam.computeCAM('test.jpg') # save(net, finalconv_name, 'test.jpg', '.', True)
from handler import PingpongHandler as Handler OMP_NUM_THREADS = 2
'''Junos implementation of ShowRun class''' class ConfigureSnapshotCommand(object): """ConfigureSnapshotCommands class `ConfigureSnapshotCommands` class provides the functionality to retrieve the platform specific configurations command. """ @property def command(self): """Method to return the configurations fetch command. Args: None Returns: `str`: a `str` of the command Examples: # Creating an instnace of ConfigureSnapshotCommand >>> configure_snapshot_command = Lookup.from_device(device).sdk.\ libs.abstracted_libs.configure_snapshot_command.ConfigureSnapshotCommand() # Calling get_commands method >>> configure_snapshot_command = configure_snapshot_command.command 'show configuration' """ # return Junos default command return 'show configuration | display set'
"""Pure-Python scanner and parser, used if _speedups is not available.""" from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals from collections import deque import re DEBUG = False # TODO copied from __init__ _blocks_re = re.compile(r'[{},;()\'"\n]') def locate_blocks(codestr): """ For processing CSS like strings. Either returns all selectors (that can be "smart" multi-lined, as long as it's joined by `,`, or enclosed in `(` and `)`) with its code block (the one between `{` and `}`, which can be nested), or the "lose" code (properties) that doesn't have any blocks. """ lineno = 1 par = 0 instr = None depth = 0 skip = False i = init = lose = 0 start = end = None lineno_stack = deque() for m in _blocks_re.finditer(codestr): i = m.start(0) c = codestr[i] if c == '\n': lineno += 1 if instr is not None: if c == instr: instr = None # A string ends (FIXME: needs to accept escaped characters) elif c in ('"', "'"): instr = c # A string starts elif c == '(': # parenthesis begins: par += 1 elif c == ')': # parenthesis ends: par -= 1 elif not par and not instr: if c == '{': # block begins: if depth == 0: if i > 0 and codestr[i - 1] == '#': # Do not process #{...} as blocks! skip = True else: lineno_stack.append(lineno) start = i if lose < init: _property = codestr[lose:init].strip() if _property: yield lineno, _property, None lose = init depth += 1 elif c == '}': # block ends: if depth <= 0: raise SyntaxError("Unexpected closing brace on line {0}".format(lineno)) else: depth -= 1 if depth == 0: if not skip: end = i _selectors = codestr[init:start].strip() _codestr = codestr[start + 1:end].strip() if _selectors: yield lineno_stack.pop(), _selectors, _codestr init = lose = end + 1 skip = False elif depth == 0: if c == ';': # End of property (or block): init = i if lose < init: _property = codestr[lose:init].strip() if _property: yield lineno, _property, None init = lose = i + 1 if depth > 0: if not skip: _selectors = codestr[init:start].strip() _codestr = codestr[start + 1:].strip() if _selectors: yield lineno, _selectors, _codestr if par: raise Exception("Missing closing parenthesis somewhere in block: '%s'" % _selectors) elif instr: raise Exception("Missing closing string somewhere in block: '%s'" % _selectors) else: raise Exception("Block never closed: '%s'" % _selectors) losestr = codestr[lose:] for _property in losestr.split(';'): _property = _property.strip() lineno += _property.count('\n') if _property: yield lineno, _property, None ################################################################################ # Parser class NoMoreTokens(Exception): """ Another exception object, for when we run out of tokens """ pass class Scanner(object): def __init__(self, patterns, ignore, input=None): """ Patterns is [(terminal,regex)...] Ignore is [terminal,...]; Input is a string """ self.reset(input) self.ignore = ignore # The stored patterns are a pair (compiled regex,source # regex). If the patterns variable passed in to the # constructor is None, we assume that the class already has a # proper .patterns list constructed if patterns is not None: self.patterns = [] for k, r in patterns: self.patterns.append((k, re.compile(r))) def reset(self, input): self.tokens = [] self.restrictions = [] self.input = input self.pos = 0 def __repr__(self): """ Print the last 10 tokens that have been scanned in """ output = '' for t in self.tokens[-10:]: output = "%s\n (@%s) %s = %s" % (output, t[0], t[2], repr(t[3])) return output def _scan(self, restrict): """ Should scan another token and add it to the list, self.tokens, and add the restriction to self.restrictions """ # Keep looking for a token, ignoring any in self.ignore token = None while True: best_pat = None # Search the patterns for a match, with earlier # tokens in the list having preference best_pat_len = 0 for tok, regex in self.patterns: if DEBUG: print("\tTrying %s: %s at pos %d -> %s" % (repr(tok), repr(regex.pattern), self.pos, repr(self.input))) # First check to see if we're restricting to this token if restrict and tok not in restrict and tok not in self.ignore: if DEBUG: print("\tSkipping %r!" % (tok,)) continue m = regex.match(self.input, self.pos) if m: # We got a match best_pat = tok best_pat_len = len(m.group(0)) if DEBUG: print("Match OK! %s: %s at pos %d" % (repr(tok), repr(regex.pattern), self.pos)) break # If we didn't find anything, raise an error if best_pat is None: msg = "Bad token found" if restrict: msg = "Bad token found while trying to find one of the restricted tokens: %s" % (", ".join(repr(r) for r in restrict)) raise SyntaxError("SyntaxError[@ char %s: %s]" % (repr(self.pos), msg)) # If we found something that isn't to be ignored, return it if best_pat in self.ignore: # This token should be ignored... self.pos += best_pat_len else: end_pos = self.pos + best_pat_len # Create a token with this data token = ( self.pos, end_pos, best_pat, self.input[self.pos:end_pos] ) break if token is not None: self.pos = token[1] # Only add this token if it's not in the list # (to prevent looping) if not self.tokens or token != self.tokens[-1]: self.tokens.append(token) self.restrictions.append(restrict) return 1 return 0 def token(self, i, restrict=None): """ Get the i'th token, and if i is one past the end, then scan for another token; restrict is a list of tokens that are allowed, or 0 for any token. """ tokens_len = len(self.tokens) if i == tokens_len: # We are at the end, get the next... tokens_len += self._scan(restrict) if i < tokens_len: if restrict and self.restrictions[i] and restrict > self.restrictions[i]: raise NotImplementedError("Unimplemented: restriction set changed") return self.tokens[i] raise NoMoreTokens def rewind(self, i): tokens_len = len(self.tokens) if i <= tokens_len: token = self.tokens[i] self.tokens = self.tokens[:i] self.restrictions = self.restrictions[:i] self.pos = token[0]
import os import threading import logging import platform from pocketsphinx import get_model_path from pocketsphinx.pocketsphinx import Decoder import alexapi.triggers as triggers from .basetrigger import BaseTrigger logger = logging.getLogger(__name__) class PocketsphinxTrigger(BaseTrigger): type = triggers.TYPES.VOICE AUDIO_CHUNK_SIZE = 1024 AUDIO_RATE = 16000 _capture = None def __init__(self, config, trigger_callback, capture): super(PocketsphinxTrigger, self).__init__(config, trigger_callback, 'pocketsphinx') self._capture = capture self._enabled_lock = threading.Event() # self._disabled_sync_lock = threading.Event() self._decoder = None def setup(self): # PocketSphinx configuration ps_config = Decoder.default_config() # Set recognition model to US ps_config.set_string('-hmm', os.path.join(get_model_path(), 'en-us')) ps_config.set_string('-dict', os.path.join(get_model_path(), 'cmudict-en-us.dict')) # Specify recognition key phrase ps_config.set_string('-keyphrase', self._tconfig['phrase']) ps_config.set_float('-kws_threshold', float(self._tconfig['threshold'])) # Hide the VERY verbose logging information when not in debug if logging.getLogger('alexapi').getEffectiveLevel() != logging.DEBUG: null_path = '/dev/null' if platform.system() == 'Windows': null_path = 'nul' ps_config.set_string('-logfn', null_path) # Process audio chunk by chunk. On keyword detected perform action and restart search self._decoder = Decoder(ps_config) def run(self): thread = threading.Thread(target=self.thread, args=()) thread.setDaemon(True) thread.start() def thread(self): while True: self._enabled_lock.wait() self._capture.handle_init(self.AUDIO_RATE, self.AUDIO_CHUNK_SIZE) self._decoder.start_utt() triggered = False while not triggered: if not self._enabled_lock.isSet(): break # Read from microphone data = self._capture.handle_read() # Detect if keyword/trigger word was said self._decoder.process_raw(data, False, False) triggered = self._decoder.hyp() is not None self._capture.handle_release() self._decoder.end_utt() if triggered: self._trigger_callback(self) def enable(self): self._enabled_lock.set() def disable(self): self._enabled_lock.clear()
import hashlib import math import multiprocessing as mp import time from math import pi, sqrt import cython import numpy as np import numpy.typing as npt from scipy.special import erf, expi from .mesh import Element from .quadrature import (DuffyScheme2D, ProductScheme2D, gauss_quadrature_scheme, log_quadrature_scheme) from .single_layer_exact import (spacetime_evaluated_1, spacetime_integrated_kernel) FPI_INV = cython.declare(cython.double) FPI_INV = (4 * pi)**-1 PI_SQRT = cython.declare(cython.double) PI_SQRT = math.sqrt(pi) def kernel(t, x): assert isinstance(t, float) and isinstance(x, float) if (t <= 0): return 0 else: return FPI_INV * 1. / t * np.exp(-x**2 / (4 * t)) def alpha(z): """ Returns lambda a_z(x) """ return lambda x: np.sum(x**2, axis=0) / (4 * z) def noop(x): return 0 def g(a, b): """ Returns g_z for z = a - b. """ if a <= b: return noop z = a - b return lambda x: FPI_INV * expi(-np.sum(x**2, axis=0) / (4 * z)) def f(a, b): """ Returns f_z for z = a - b""" if a <= b: return noop z = a - b def f_z(x_sqr): a_z = x_sqr / (4 * z) return FPI_INV * (z * np.exp(-a_z) + z * (1 + a_z) * expi(-a_z)) return f_z def time_integrated_kernel(t, a, b): """ Returns heat kernel G(t-s,x) integrated over s in [a,b]. """ assert a < b g_ta = g(t, a) g_tb = g(t, b) return lambda x: g_tb(x) - g_ta(x) def double_time_integrated_kernel(a, b, c, d): """ Returns kernel integrated in time over [a,b] x [c, d], """ assert a < b and c < d def G(x): x_sqr = np.sum(x**2, axis=0) / 4 result = 0 if b > d: z = b - d result += FPI_INV * (z * np.exp(-x_sqr / z) + (x_sqr + z) * expi(-x_sqr / z)) if b > c: z = b - c result -= FPI_INV * (z * np.exp(-x_sqr / z) + (x_sqr + z) * expi(-x_sqr / z)) if a > c: z = a - c result += FPI_INV * (z * np.exp(-x_sqr / z) + (x_sqr + z) * expi(-x_sqr / z)) if a > d: z = a - d result -= FPI_INV * (z * np.exp(-x_sqr / z) + (x_sqr + z) * expi(-x_sqr / z)) return result return G def MP_SL_matrix_col(j: int) -> npt.ArrayLike: """ Function to evaluate SL in parallel using the multiprocessing library. """ global __SL, __elems_test, __elems_trial elem_trial = __elems_trial[j] col = np.zeros(len(__elems_test)) for i, elem_test in enumerate(__elems_test): if elem_test.time_interval[1] <= elem_trial.time_interval[0]: continue col[i] = __SL.bilform(elem_trial, elem_test) return col class SingleLayerOperator: def __init__(self, mesh, quad_order=12, pw_exact=False, cache_dir=None): self.pw_exact = pw_exact self.gauss_scheme = gauss_quadrature_scheme(23) self.gauss_2d = ProductScheme2D(self.gauss_scheme) self.log_scheme = log_quadrature_scheme(quad_order, quad_order) self.log_scheme_m = self.log_scheme.mirror() self.log_log = ProductScheme2D(self.log_scheme, self.log_scheme) self.duff_log_log = DuffyScheme2D(self.log_log, symmetric=False) self.mesh = mesh self.gamma_len = self.mesh.gamma_space.gamma_length self.glue_space = self.mesh.glue_space self.cache_dir = cache_dir self._init_elems(self.mesh.leaf_elements) def _init_elems(self, elems): # For all elements in the mesh, register the log scheme. for elem in elems: a, b = elem.space_interval elem.__log_scheme_y = elem.gamma_space(a + (b - a) * self.log_scheme.points) elem.__log_scheme_m_y = elem.gamma_space(a + (b - a) * self.log_scheme_m.points) @cython.locals(h_x=cython.double, h_y=cython.double) def __integrate(self, f: object, a: float, b: float, c: float, d: float) -> float: """ Integrates a symmetric singular f over the square [a,b]x[c,d]. """ h_x = b - a h_y = d - c assert h_x > 1e-8 and h_y > 1e-8 assert (a < b and c < d) assert (a, b) <= (c, d) # If are the same panel. if a == c and b == d: return self.duff_log_log.integrate(f, a, b, c, d) # If the panels touch in the middle, split into even parts. if b == c: if abs(h_x - h_y) < 1e-10: return self.duff_log_log.mirror_x().integrate(f, a, b, c, d) elif h_x > h_y: return self.duff_log_log.mirror_x().integrate( f, b - h_y, b, c, d) + self.__integrate( f, a, b - h_y, c, d) else: return self.duff_log_log.mirror_x().integrate( f, a, b, c, c + h_x) + self.__integrate( f, a, b, c + h_x, d) assert not math.isclose(b, c) # If the panels touch through in the glued boundary, split into even parts. if a == 0 and d == self.gamma_len and self.glue_space: assert b < c if abs(h_x - h_y) < 1e-10: return self.duff_log_log.mirror_y().integrate(f, a, b, c, d) elif h_x > h_y: return self.duff_log_log.mirror_y().integrate( f, a, a + h_y, c, d) + self.__integrate( f, a + h_y, b, c, d) else: return self.__integrate( f, a, b, c, d - h_x) + self.duff_log_log.mirror_y().integrate( f, a, b, d - h_x, d) # If we are disjoint. TODO: Do more singular stuff if close? # TODO: Gauss 2d for disjoint.. if b < c: #return self.gauss_2d.integrate(f, a, b, c, d) if c - b < self.gamma_len - d + a or not self.glue_space: return self.log_log.mirror_x().integrate(f, a, b, c, d) else: return self.log_log.mirror_y().integrate(f, a, b, c, d) # If the first panel is longer than the second panel. if d < b: # TODO: Is this correct? return self.__integrate( f, a, d, c, d) + self.duff_log_log.mirror_y().integrate( f, d, b, c, d) # First panel is contained in second one. if a == c: assert b < d return self.__integrate(f, a, b, c, b) + self.__integrate( f, a, b, b, d) assert not math.isclose(a, c) # We have overlap, split this in two parts. assert a < c return self.__integrate(f, a, c, c, d) + self.__integrate( f, c, b, c, d) @cython.locals(a=cython.double, b=cython.double, c=cython.double, d=cython.double) def bilform(self, elem_trial: Element, elem_test: Element) -> float: """ Evaluates <V 1_trial, 1_test>. """ # If the test element lies below the trial element, we are done. if elem_test.time_interval[1] <= elem_trial.time_interval[0]: return 0 if self.pw_exact and elem_test.gamma_space is elem_trial.gamma_space: return spacetime_integrated_kernel(*elem_test.time_interval, *elem_trial.time_interval, *elem_test.space_interval, *elem_trial.space_interval) a, b = elem_test.time_interval c, d = elem_trial.time_interval # Calculate the time integrated kernel. G_time = double_time_integrated_kernel(a, b, c, d) gamma_test = elem_test.gamma_space gamma_trial = elem_trial.gamma_space if elem_test.space_interval <= elem_trial.space_interval: G_time_parametrized = lambda x: G_time( gamma_test(x[0]) - gamma_trial(x[1])) return self.__integrate(G_time_parametrized, *elem_test.space_interval, *elem_trial.space_interval) else: # Swap x,y coordinates. G_time_parametrized = lambda x: G_time( gamma_test(x[1]) - gamma_trial(x[0])) return self.__integrate(G_time_parametrized, *elem_trial.space_interval, *elem_test.space_interval) def bilform_matrix(self, elems_test=None, elems_trial=None, use_mp=False): """ Returns the dense matrix <V 1_trial, 1_test>. """ if elems_test is None: elems_test = list(self.mesh.leaf_elements) if elems_trial is None: elems_trial = elems_test N = len(elems_test) M = len(elems_trial) # For small N, M, simply construct matrix inline and return. if N * M < 100: mat = np.zeros((N, M)) for i, elem_test in enumerate(elems_test): for j, elem_trial in enumerate(elems_trial): mat[i, j] = self.bilform(elem_trial, elem_test) return mat if self.cache_dir is not None: md5 = hashlib.md5((str(self.mesh.gamma_space) + str(elems_test) + str(elems_trial)).encode()).hexdigest() cache_fn = "{}/SL_{}_{}x{}_{}.npy".format(self.cache_dir, self.mesh.gamma_space, N, M, md5) try: mat = np.load(cache_fn) print("Loaded Single Layer from file {}".format(cache_fn)) return mat except: pass time_mat_begin = time.time() mat = np.zeros((N, M)) if not use_mp: for i, elem_test in enumerate(elems_test): for j, elem_trial in enumerate(elems_trial): mat[i, j] = self.bilform(elem_trial, elem_test) else: # Set up global variables for parallelizing. globals()['__elems_test'] = elems_test globals()['__elems_trial'] = elems_trial globals()['__SL'] = self cpu = mp.cpu_count() for j, col in enumerate( mp.Pool(mp.cpu_count()).imap(MP_SL_matrix_col, range(M), M // (16 * cpu) + 1)): mat[:, j] = col if self.cache_dir is not None: try: np.save(cache_fn, mat) print("Stored Single Layer to {}".format(cache_fn)) except: pass print('Calculating SL matrix took {}s'.format(time.time() - time_mat_begin)) return mat def potential(self, elem_trial, t, x): """ Evaluates (V 1_trial)(t,x) for t,x not on the bdr. """ assert x.shape == (2, 1) if t <= elem_trial.time_interval[0]: return 0 # Calculate the time integrated kernel. G_time = time_integrated_kernel(t, *elem_trial.time_interval) G_time_parametrized = lambda y: G_time(x - elem_trial.gamma_space(y)) return self.gauss_scheme.integrate(G_time_parametrized, *elem_trial.space_interval) def potential_vector(self, t, x): """ Returns the vector (V 1_elem)(t, x) for all elements in mesh. """ elems = list(self.mesh.leaf_elements) N = len(elems) vec = np.zeros(shape=N) for j, elem_trial in enumerate(elems): vec[j] = self.potential(elem_trial, t, x) return vec @cython.locals(x_a=cython.double, x_b=cython.double, d_a=cython.double, d_b=cython.double, t_a=cython.double, t_b=cython.double) def evaluate(self, elem_trial: Element, t: float, x_hat: float, x: npt.ArrayLike) -> float: """ Evaluates (V 1_trial)(t, gamma(x_hat)) for t, x_hat in the param domain. """ if t <= elem_trial.time_interval[0]: return 0 #if x is None: x = self.mesh.gamma_space.eval(x_hat) x_a = elem_trial.space_interval[0] x_b = elem_trial.space_interval[1] t_a = elem_trial.time_interval[0] t_b = elem_trial.time_interval[1] # Check if singularity lies in this element. if x_a * (1 + 1e-10) <= x_hat <= x_b * (1 - 1e-10): # Calculate the time integrated kernel. def G_time_parametrized(y_hat: npt.ArrayLike): xy = (x - elem_trial.gamma_space(y_hat))**2 xy = xy[0] + xy[1] a, b = elem_trial.time_interval if t <= b: return -FPI_INV * expi(-xy / (4 * (t - a))) else: return FPI_INV * (expi(-xy / (4 * (t - b))) - expi(-xy / (4 * (t - a)))) return self.log_scheme_m.integrate( G_time_parametrized, x_a, x_hat) + self.log_scheme.integrate( G_time_parametrized, x_hat, x_b) # Calculate distance of x_hat to both endpoints. if self.glue_space: d_a = min(abs(x_hat - x_a), abs(self.gamma_len - x_hat + x_a)) d_b = min(abs(x_hat - x_b), abs(self.gamma_len - x_b + x_hat)) else: d_a = abs(x_hat - x_a) d_b = abs(x_hat - x_b) # Calculate |x - gamma(yhat)|^2 for the quadrature rule. if d_a <= d_b: xy_sqr = (x - elem_trial.__log_scheme_y)**2 else: xy_sqr = (x - elem_trial.__log_scheme_m_y)**2 xy = xy_sqr[0] + xy_sqr[1] # Evaluate the time integrated kernel for the above points. if t <= t_b: vec = -FPI_INV * expi(-xy / (4 * (t - t_a))) else: vec = FPI_INV * (expi(-xy / (4 * (t - t_b))) - expi(-xy / (4 * (t - t_a)))) # Return the quadrature result. return (x_b - x_a) * np.dot(self.log_scheme.weights, vec) def evaluate_exact(self, elem_trial: Element, t: float, x: float) -> float: """ Evaluates (V 1_trial)(t, x) for elem_trial lying on the same pane as x. """ if t <= elem_trial.time_interval[0]: return 0 a, b = elem_trial.space_interval if x < a or x > b: h = min(abs(a - x), abs(b - x)) k = max(abs(a - x), abs(b - x)) a, b = elem_trial.time_interval if t <= b: return -FPI_INV * (PI_SQRT * (2 * sqrt( (t - a))) * (erf(h / (2 * sqrt( (t - a)))) - erf(k / (2 * sqrt( (t - a))))) - h * expi(-(h**2 / (4 * (t - a)))) + k * expi(-(k**2 / (4 * (t - a))))) else: return FPI_INV * ( 2 * PI_SQRT * (sqrt(t - a) * (-erf(h / (2 * sqrt(t - a))) + erf(k / (2 * sqrt(t - a)))) + sqrt(t - b) * (erf(h / (2 * sqrt(t - b))) - erf(k / (2 * sqrt(t - b))))) + h * expi(h**2 / (4 * (a - t))) - k * expi(k**2 / (4 * (a - t))) - h * expi(h**2 / (4 * (b - t))) + k * expi(k**2 / (4 * (b - t)))) elif a < x < b: return spacetime_evaluated_1( t, *elem_trial.time_interval, x - a) + spacetime_evaluated_1( t, *elem_trial.time_interval, b - x) elif x == a or x == b: return spacetime_evaluated_1(t, *elem_trial.time_interval, b - a) def evaluate_vector(self, t, x_hat): """ Returns the vector (V 1_elem)(t, gamma(x_hat)) for all elements in mesh. """ elems = list(self.mesh.leaf_elements) N = len(elems) vec = np.zeros(shape=N) x = self.mesh.gamma_space.eval(x_hat) for j, elem_trial in enumerate(elems): vec[j] = self.evaluate(elem_trial, t, x_hat, x) return vec def rhs_vector(self, f, gauss_order=23): """ Returns the vector f(1_elem) for all elements in the mesh. """ gauss_scheme = gauss_quadrature_scheme(gauss_order) gauss_2d = ProductScheme2D(gauss_scheme, gauss_scheme) elems = list(self.mesh.leaf_elements) N = len(elems) vec = np.zeros(shape=N) for i, elem_test in enumerate(elems): f_param = lambda tx: f(tx[0], elem_test.gamma_space(tx[1])) vec[i] = gauss_2d.integrate(f_param, *elem_test.time_interval, *elem_test.space_interval) return vec
import sys import os import torch import torch.nn as nn from models.gen_components import Filtration, PositionwiseFeedForward, ResUnet import unittest class TestFilteringCNN(unittest.TestCase): def test_shape(self): sino = torch.randn(1, 1, 180, 512) _ , _ , n_angles, detect_len = sino.shape conv_filter = Filtration(detect_len, n_angles, 3, 8) filtered_sino = conv_filter(sino) self.assertEqual(sino.shape, filtered_sino.shape) class TestFBPNet(unittest.TestCase): def test_shape(self): sino = torch.randn(1, 1, 180, 512) tomo_slice = torch.randn(1, 1, 512, 512) _ , _, n_angles, detect_len = sino.shape fbp = PositionwiseFeedForward(detect_len, n_angles, 64) img = fbp(sino) self.assertEqual(img.shape, tomo_slice.shape) class TestRefinement(unittest.TestCase): def test_shape(self): tomo_slice = torch.randn(1, 1, 512, 512) refine = ResUnet(1) tomo_slice_enhanced = refine(tomo_slice) self.assertEqual(tomo_slice_enhanced.shape, tomo_slice.shape) class TestFilterFBPCombination(unittest.TestCase): def test_shape(self): sino = torch.randn(1, 1, 180, 512) _, _, n_angles, detect_len = sino.shape conv_filter = Filtration(detect_len, n_angles, 3, 8) filtered_sino = conv_filter(sino) tomo_slice = torch.randn(1, 1, 512, 512) fbp = PositionwiseFeedForward(detect_len, n_angles, 64) img = fbp(filtered_sino) self.assertEqual(img.shape, tomo_slice.shape) if __name__ == '__main__': unittest.main()
# coding: utf-8 # # Machine learning - Protein Chain Classification # # In this demo we try to classify a protein chain as either an all alpha or all beta protein based on protein sequence. We use n-grams and a Word2Vec representation of the protein sequence as a feature vector. # # [Word2Vec model](https://spark.apache.org/docs/latest/mllib-feature-extraction.html#word2vec) # # [Word2Vec example](https://spark.apache.org/docs/latest/ml-features.html#word2vec) # # ## Imports # In[17]: from pyspark import SparkConf, SparkContext, SQLContext from pyspark.sql import SparkSession from pyspark.sql.functions import * from pyspark.sql.types import * from mmtfPyspark.io import mmtfReader from mmtfPyspark.webfilters import Pisces from mmtfPyspark.filters import ContainsLProteinChain from mmtfPyspark.mappers import StructureToPolymerChains from mmtfPyspark.datasets import secondaryStructureExtractor from mmtfPyspark.ml import ProteinSequenceEncoder, SparkMultiClassClassifier, datasetBalancer from pyspark.sql.functions import * from pyspark.ml.classification import DecisionTreeClassifier, LogisticRegression, MultilayerPerceptronClassifier, RandomForestClassifier # ## Configure Spark Context # In[18]: conf = SparkConf() .setMaster("local[*]") .setAppName("MachineLearningDemo") sc = SparkContext(conf = conf) # ## Read MMTF File and create a non-redundant set (<=40% seq. identity) of L-protein clains # In[19]: pdb = mmtfReader.read_sequence_file('../../resources/mmtf_reduced_sample/', sc) .flatMap(StructureToPolymerChains()) .filter(Pisces(sequenceIdentity=40,resolution=3.0)) # ## Get secondary structure content # In[20]: data = secondaryStructureExtractor.get_dataset(pdb) # ## Define addProteinFoldType function # In[21]: def add_protein_fold_type(data, minThreshold, maxThreshold): ''' Adds a column "foldType" with three major secondary structure class: "alpha", "beta", "alpha+beta", and "other" based upon the fraction of alpha/beta content. The simplified syntax used in this method relies on two imports: from pyspark.sql.functions import when from pyspark.sql.functions import col Attributes: data (Dataset<Row>): input dataset with alpha, beta composition minThreshold (float): below this threshold, the secondary structure is ignored maxThreshold (float): above this threshold, the secondary structure is ignored ''' return data.withColumn("foldType", when((col("alpha") > maxThreshold) & (col("beta") < minThreshold), "alpha"). when((col("beta") > maxThreshold) & (col("alpha") < minThreshold), "beta"). when((col("alpha") > maxThreshold) & (col("beta") > maxThreshold), "alpha+beta"). otherwise("other") ) # ## Classify chains by secondary structure type # In[22]: data = add_protein_fold_type(data, minThreshold=0.05, maxThreshold=0.15) # ## Create a Word2Vec representation of the protein sequences # # **n = 2** # create 2-grams # # **windowSize = 25** # 25-amino residue window size for Word2Vector # # **vectorSize = 50** # dimension of feature vector # In[23]: encoder = ProteinSequenceEncoder(data) data = encoder.overlapping_ngram_word2vec_encode(n=2, windowSize=25, vectorSize=50).cache() data.toPandas().head(5) # ## Keep only a subset of relevant fields for further processing # In[24]: data = data.select(['structureChainId','alpha','beta','coil','foldType','features']) # ## Select only alpha and beta foldType to parquet file # In[25]: data = data.where((data.foldType == 'alpha') | (data.foldType == 'beta')) #| (data.foldType == 'other')) print(f"Total number of data: {data.count()}") data.toPandas().head() # ## Basic dataset information and setting # In[26]: label = 'foldType' testFraction = 0.1 seed = 123 vector = data.first()["features"] featureCount = len(vector) print(f"Feature count : {featureCount}") classCount = int(data.select(label).distinct().count()) print(f"Class count : {classCount}") print(f"Dataset size (unbalanced) : {data.count()}") data.groupby(label).count().show(classCount) data = datasetBalancer.downsample(data, label, 1) print(f"Dataset size (balanced) : {data.count()}") data.groupby(label).count().show(classCount) # ## Decision Tree Classifier # In[27]: dtc = DecisionTreeClassifier() mcc = SparkMultiClassClassifier(dtc, label, testFraction, seed) matrics = mcc.fit(data) for k,v in matrics.items(): print(f"{k}\t{v}") # ## Random Forest Classifier # In[28]: rfc = RandomForestClassifier() mcc = SparkMultiClassClassifier(rfc, label, testFraction, seed) matrics = mcc.fit(data) for k,v in matrics.items(): print(f"{k}\t{v}") # ## Logistic Regression Classifier # In[29]: lr = LogisticRegression() mcc = SparkMultiClassClassifier(lr, label, testFraction, seed) matrics = mcc.fit(data) for k,v in matrics.items(): print(f"{k}\t{v}") # ## Simple Multilayer Perception Classifier # In[30]: layers = [featureCount, 64, 64, classCount] mpc = MultilayerPerceptronClassifier().setLayers(layers) .setBlockSize(128) .setSeed(1234) .setMaxIter(100) mcc = SparkMultiClassClassifier(mpc, label, testFraction, seed) matrics = mcc.fit(data) for k,v in matrics.items(): print(f"{k}\t{v}") # ## Terminate Spark # In[31]: sc.stop()
from django.db import migrations, transaction class Migration(migrations.Migration): dependencies = [ ('auth', '__latest__'), ('AcadeMeData', '0001_initial'), ] def generate_data(apps, schema_editor): from AcadeMeData.models import Degree test_data = [ (1, 'Computer Science', 'Recihman University, Ben Gurion University, Tel Aviv University', 'A degree in Computer Science offers the tools to succeed in todays technology driven world'), (0, 'History', 'Hebrew University, Afeka College', 'A degree in History offers students a broad understanding of historical events'), ] with transaction.atomic(): for id, name, universities, description in test_data: Degree(degree_id=id, name=name, universities=universities, description=description).save() operations = [ migrations.RunPython(generate_data), ]
from pyfluidsynth3 import fluidaudiodriver, fluidhandle, fluidplayer, fluidsettings, fluidsynth import sys ''' Based on the examples from pyfluidsynth by MostAwesomeDude. ''' if len( sys.argv ) < 4: print( "Usage: {0} library soundfont.sf2 song.mid".format(sys.argv[0]) ) sys.exit() handle = fluidhandle.FluidHandle( sys.argv[1] ) settings = fluidsettings.FluidSettings( handle ) synth = fluidsynth.FluidSynth( handle, settings ) driver = fluidaudiodriver.FluidAudioDriver( handle, synth, settings ) player = fluidplayer.FluidPlayer( handle, synth ) synth.load_soundfont( sys.argv[2] ) player.play( sys.argv[3] ) player.join()
import numpy as np import argparse from MultinomialNBClassifier import multinomial_nb_classifier from NBClassifier import nb_classifier from LogisticRegressionClassifer import lr_classifer from SGDClassifier import sgd_classifier from Parser import get_vocabulary, bag_of_words, bernoulli def main(): parser = argparse.ArgumentParser(description="Instructions:") parser.add_argument( "-nb", dest="nb", help="Discrete Naive Bayes Classifier", action="store_true" ) parser.add_argument( "-mnb", dest="mnb", help="Multinomial Naive Bayes Classifier", action="store_true", ) parser.add_argument( "-lr", dest="lr", help="Logistic Regression Classifier", action="store_true" ) parser.add_argument( "-sgd", dest="sgd", help="Stochastic Gradient Descent Classifier", action="store_true", ) parser.add_argument( "-train", dest="train_data_path", help="train_data_path", required=True ) parser.add_argument( "-test", dest="test_data_path", help="test_data_path", required=True ) parse(parser.parse_args()) def print_result(arr): accuracy, precision, recall, f1 = arr print(f"{accuracy=}, {precision=}, {recall=}, {f1=}") def parse(args): vocabulary = get_vocabulary(args.train_data_path) bow_train_data, bow_train_classes = bag_of_words(args.train_data_path, vocabulary) bow_test_data, bow_test_classes = bag_of_words(args.test_data_path, vocabulary) bnl_train_data, bnl_train_classes = bernoulli(args.train_data_path, vocabulary) bnl_test_data, bnl_test_classes = bernoulli(args.test_data_path, vocabulary) if args.nb: nb = nb_classifier() nb.train(bow_train_data, bow_train_classes) print("Discrete Naive Bayes Classifier:") print_result(nb.test(bow_test_data, bow_test_classes)) if args.mnb: mnb = multinomial_nb_classifier() mnb.train(bnl_train_data, bnl_train_classes) print("Multinomial Naive Bayes Classifier:") print_result(mnb.test(bnl_test_data, bnl_test_classes)) if args.lr: np.warnings.filterwarnings("ignore", "overflow") lr = lr_classifer() print("Logistic Regression Classifier:") print("bag_of_words:") print("lambda:", lr.train(bow_train_data, bow_train_classes)) print_result(lr.test(bow_test_data, bow_test_classes)) print("bernoulli:") print("lambda:", lr.train(bnl_train_data, bnl_train_classes)) print_result(lr.test(bnl_test_data, bnl_test_classes)) if args.sgd: sgd = sgd_classifier() print("Stochastic Gradient Descent Classifier:") print("bag_of_words:") print(sgd.train(bow_train_data, bow_train_classes)) print_result(sgd.test(bow_test_data, bow_test_classes)) print("bernoulli:") print(sgd.train(bnl_train_data, bnl_train_classes)) print_result(sgd.test(bnl_test_data, bnl_test_classes)) if __name__ == "__main__": main()
# -*- encoding: utf-8 -*- import time import allure # Одновременное изменение всех контактных данных Клиента @allure.title("Одновременное изменение всех контактных данных Клиента (позитивный тест)") def test_changing_client_info(app): print("test_changing_client_info.py is running") client_name = "Autotest#" + str(app.calc_check_sum_from_date()) print("client_name: ", client_name) locator1 = "//span[contains(text(),'" + client_name + "')]" email = str(app.calc_check_sum_from_date()) + "@autotest.ru" print("email: ", email) locator2 = "//span[contains(text(),'" + email + "')]" phone, phone_mask = app.get_phone_as_random_set() locator3 = "//input[@value='" + phone_mask + "']" app.go_to_online_version() app.login_client() app.go_to_customer_support_service() time.sleep(7) app.go_to_client_info() time.sleep(2) app.changing_client_name(client_name) app.changing_client_email(email) app.changing_client_phone(phone) app.save_client_info() time.sleep(2) if (app.is_element_present_main(locator1) == True): print("В ОД имя Клиента совпадает с новым значением - ТЕСТ УСПЕШНЫЙ") allure.dynamic.description( 'В ОД имя Клиента совпадает с новым значением - ТЕСТ УСПЕШНЫЙ') else: print("ОШИБКА: В ОД имя Клиента не совпадает с новым значением - ТЕСТ НЕ УСПЕШНЫЙ!!!") allure.dynamic.description( 'ОШИБКА: В ОД имя Клиента не совпадает с новым значением - ТЕСТ НЕ УСПЕШНЫЙ!!!') assert (app.is_element_present_main(locator1) == True) if (app.is_element_present_main(locator2) == True): print("В ОД email Клиента совпадает с новым значением - ТЕСТ УСПЕШНЫЙ") allure.dynamic.description( 'В ОД email Клиента совпадает с новым значением - ТЕСТ УСПЕШНЫЙ') else: print("ОШИБКА: В ОД email Клиента не совпадает с новым значением - ТЕСТ НЕ УСПЕШНЫЙ!!!") allure.dynamic.description( 'ОШИБКА: В ОД email Клиента не совпадает с новым значением - ТЕСТ НЕ УСПЕШНЫЙ!!!') assert (app.is_element_present_main(locator2) == True) app.go_to_client_info() if (app.is_element_present_main(locator3) == True): print("В окне 'Изменить контактные данные' после перевхода номер телефона совпадает с новым значением " "- ТЕСТ УСПЕШНЫЙ") allure.dynamic.description('Номер телефона совпадает с новым значением - ТЕСТ УСПЕШНЫЙ') else: print( "ОШИБКА: В окне 'Изменить контактные данные' после перевхода номер телефона не совпадает с новым значением " "- ТЕСТ НЕ УСПЕШНЫЙ!!!") allure.dynamic.description('ОШИБКА: номер телефона не совпадает с новым значением - ТЕСТ НЕ УСПЕШНЫЙ!!!') assert (app.is_element_present_main(locator3) == True) allure.dynamic.description('Имя, email и телефон Клиента изменены - ТЕСТ УСПЕШНЫЙ') app.logout_client() print("test_changing_client_info.py is done successfully")
import tldextract try: from django.utils.deprecation import MiddlewareMixin except ImportError: MiddlewareMixin = object class SubDomainMiddleware(MiddlewareMixin): """ Append subdomain ie. UUID to the request object to identify projects. """ def process_request(self, request): try: raw_url = request.get_raw_uri() subdomain = tldextract.extract(raw_url).subdomain request.subdomain = subdomain except: pass
# -*- encoding: utf-8 -*- ''' @Func : transfer file to bert-readable style @Time : 2021/03/04 17:36:47 @Author : Yixiao Ma @Contact : mayx20@mails.tsinghua.edu.cn ''' import os import re import numpy as np import json import argparse from tqdm import tqdm import sys sys.path.append('/work/mayixiao/www22/') from pre_ajjbqk import process_ajjbqk parser = argparse.ArgumentParser(description="Help info.") parser.add_argument('--short', type=bool, default=False, help='if pre ajjbqk.') parser.add_argument('--w', type=str, default='/work/mayixiao/www22/', help='Write file path.') # parser.add_argument('--mode', type=str, choices=['train', 'test'], help='mode.') # parser.add_argument('--l', type=str, default='/work/mayixiao/similar_case/LeCaRD/LeCaRD_github/data/label/label_top30_dict.json', help='Label file path.') # parser.add_argument('--q', type=str, default='/work/mayixiao/similar_case/LeCaRD/LeCaRD_github/data/query/query.json', help='Query file path.') # parser.add_argument('--d', type=str, default='/work/mayixiao/similar_case/candidates', help='Document dir path.') # cpfxgc_dic = json.load(open('/work/mayixiao/www22/extracted_cpfxgc.json', 'r')) parser.add_argument('--mode', type=str, choices=['train_2', 'test_2'], help='mode.') parser.add_argument('--l', type=str, default='/work/mayixiao/similar_case/202006/data/label/label_top30_dict_2.json', help='Label file path.') parser.add_argument('--q', type=str, default='/work/mayixiao/similar_case/202006/data/query/', help='Query file path.') parser.add_argument('--d', type=str, default='/work/mayixiao/similar_case/202006/data/candidates_2', help='Document dir path.') cpfxgc_dic = json.load(open('/work/mayixiao/www22/extracted_cpfxgc_2.json', 'r')) args = parser.parse_args() w_list = [] if args.mode == 'train' or args.mode == 'test': with open(args.q, 'r') as f: lines = f.readlines() if args.mode == 'train': lines = [line for line in lines if (lines.index(line)%5 != 0 or lines.index(line)>=100)] elif args.mode == 'test': lines = [line for line in lines if (lines.index(line)%5 == 0 and lines.index(line)<100)] else: raise NotImplementedError else: name_map = {'train_2':'query2_final.json', 'test_2':'query2_big.json'} lines = open(args.q + name_map[args.mode], 'r').readlines() with open(args.l, 'r') as f: labels = json.load(f) raw_c_list = open('/work/mayixiao/similar_case/LeCaRD/LeCaRD_github/data/others/criminal charges.txt', 'r').readlines() c_list = [c[:-1] for c in raw_c_list[:-1]] c_list.append(raw_c_list[-1]) def make_data(dic, qid, cid, cls=2): tem = {} tem['guid'] = qid + '_' + cid tem['text_a'] = dic['q'] c_dic = json.load(open(os.path.join(args.d, qid, cid+'.json'), 'r')) if 'pjjg' in c_dic: c_doc = c_dic['pjjg'] else: c_doc = c_dic['ajjbqk'] if args.short: tem['text_b'] = process_ajjbqk(c_dic['ajjbqk']) else: tem['text_b'] = c_dic['ajjbqk'] if cid in cpfxgc_dic: tem['text_c'] = cpfxgc_dic[cid] else: tem['text_c'] = [] tem['c_a'] = dic['crime'] tem['c_b'] = [] for crime in c_list: if crime in c_doc: idx = c_doc.index(crime) if crime == '侵占罪' and c_doc[idx-2: idx] == '职务': continue if crime == '受贿罪' and ( c_doc[idx-2: idx] == '单位' or c_doc[idx-5: idx] == '利用影响力' or c_doc[idx-7: idx] == '非国家工作人员' ): continue if crime == '行贿罪' and ( c_doc[idx-2: idx] == '单位' or c_doc[idx-3: idx] == '对单位' or c_doc[idx-7: idx] == '对有影响力的人' or c_doc[idx-8: idx] == '对非国家工作人员' or c_doc[idx-16: idx] == '对外国公职人员、国际公共组织官员'): continue tem['c_b'].append(crime) if '走私、贩卖、运输、制造毒品罪' not in tem['c_b']: tem_c_list = ['走私毒品罪', '贩卖毒品罪', '运输毒品罪', '制造毒品罪', '走私、贩卖毒品罪', '走私、运输毒品罪', '走私、制造毒品罪', '贩卖、运输毒品罪', '贩卖、制造毒品罪', '运输、制造毒品罪', '贩卖、运输、制造毒品罪', '走私、运输、制造毒品罪', '走私、贩卖、制造毒品罪', '走私、贩卖、运输毒品罪'] for crime in tem_c_list: if crime in c_doc and '走私、贩卖、运输、制造毒品罪' not in tem['c_b']: tem['c_b'].append('走私、贩卖、运输、制造毒品罪') if '非法持有、私藏枪支、弹药罪' not in tem['c_b']: tem_c_list = ['非法持有枪支、弹药罪', '非法持有枪支罪', '非法持有弹药罪', '非法私藏枪支、弹药罪', '非法私藏枪支罪', '非法私藏弹药罪', '非法持有、私藏枪支罪', '非法持有、私藏弹药罪'] for crime in tem_c_list: if crime in c_doc and '非法持有、私藏枪支、弹药罪' not in tem['c_b']: tem['c_b'].append('非法持有、私藏枪支、弹药罪') if cls == 2: if cid in labels[qid]: if labels[qid][cid] >= 2: tem['label'] = 1 else: tem['label'] = 0 else: tem['label'] = 0 elif cls == 4: if cid in labels[qid]: tem['label'] = labels[qid][cid] else: tem['label'] = 0 return tem max_len = 0 for line in tqdm(lines): dic = eval(line) qid = str(dic['ridx']) pos_num = 0 # l0_num = 0 for cid in labels[qid]: tem = make_data(dic, qid, cid, 2) w_list.append(tem) if tem['label'] == 1: pos_num += 1 # elif tem['label'] == 0: # l0_num += 1 max_len = max(max_len, len(tem['text_a']) + len(tem['text_b'])) if args.mode[:4] != 'test' and pos_num > 15: delta = 2*pos_num - 30 files = os.listdir(os.path.join(args.d, qid)) cids = [file_.split('.')[0] for file_ in files if file_.split('.')[0] not in labels[qid]][:delta] for cid in cids: tem = make_data(dic, qid, cid, 2) w_list.append(tem) max_len = max(max_len, len(tem['text_a']) + len(tem['text_b'])) if args.short: tail = '_short.json' else: tail = '.json' # if args.short: # tail = '_short_4cls.json' # else: # tail = '_4cls.json' with open(args.w + args.mode + tail, 'w') as f: for line in w_list[:-1]: json.dump(line, f, ensure_ascii=False) f.write('\n') json.dump(w_list[-1], f, ensure_ascii=False) print(max_len)
""" The module of tools for parallelization (MPI) """ import numpy as np try: from mpi4py import MPI def get_id_within_node(comm=MPI.COMM_WORLD): rank = comm.rank nodename = MPI.Get_processor_name() nodelist = comm.allgather(nodename) return len([i for i in nodelist[:rank] if i==nodename]) numpy_to_MPI_typemap = { np.dtype(np.float64) : MPI.DOUBLE, np.dtype(np.float32) : MPI.FLOAT, np.dtype(np.int) : MPI.INT, np.dtype(np.int8) : MPI.CHAR, np.dtype(np.uint8) : MPI.UNSIGNED_CHAR, np.dtype(np.int32) : MPI.INT, np.dtype(np.uint32) : MPI.UNSIGNED_INT, } except: pass def divide_data(datanum, rank, size): assert rank<size and datanum>0 residue = (datanum)%size datanum_list = np.empty((size),dtype=np.int32) for i in range(size): if i<residue: datanum_list[i] = int(datanum/size)+1 else: datanum_list[i] = int(datanum/size) if rank<residue: size = datanum/size+1 offset = size*rank else: size = datanum/size offset = size*rank+residue return offset, offset+size, datanum_list def optimize_parallel(model, optimizer=None, messages=True, max_iters=1000, outpath='.', interval=100, name=None): from math import ceil from datetime import datetime import os if name is None: name = model.name stop = 0 for iter in range(int(ceil(float(max_iters)/interval))): model.optimize(optimizer=optimizer, messages= True if messages and model.mpi_comm.rank==model.mpi_root else False, max_iters=interval) if model.mpi_comm.rank==model.mpi_root: timenow = datetime.now() timestr = timenow.strftime('%Y:%m:%d_%H:%M:%S') model.save(os.path.join(outpath, name+'_'+timestr+'.h5')) opt = model.optimization_runs[-1] if opt.funct_eval<opt.max_f_eval: stop = 1 stop = model.mpi_comm.bcast(stop, root=model.mpi_root) if stop: break
#!/usr/bin/python3 import os, requests, sys, json import asyncio import argparse import random RPi_IPs = [ {"column_num": 1, "ip_addr": "129.217.152.74", "mac_id": "b8:27:eb:41:99:a0", "hostname": "raspberrypi"}, {"column_num": 2, "ip_addr": "129.217.152.111", "mac_id": "b8:27:eb:c0:fd:6a", "hostname": "raspberrypi"}, {"column_num": 3, "ip_addr": "129.217.152.79", "mac_id": "b8:27:eb:18:92:c7", "hostname": "raspberrypi"}, {"column_num": 4, "ip_addr": "129.217.152.54", "mac_id": "b8:27:eb:53:f2:33", "hostname": "raspberrypi"}, {"column_num": 5, "ip_addr": "129.217.152.86", "mac_id": "b8:27:eb:e7:6f:dc", "hostname": "raspberrypi"}, {"column_num": 6, "ip_addr": "129.217.152.89", "mac_id": "b8:27:eb:38:4b:07", "hostname": "raspberrypi"}, {"column_num": 7, "ip_addr": "129.217.152.84", "mac_id": "b8:27:eb:1b:cf:26", "hostname": "raspberrypi"}, {"column_num": 8, "ip_addr": "129.217.152.119", "mac_id": "b8:27:eb:6d:0e:53", "hostname": "raspberrypi"}, {"column_num": 9, "ip_addr": "129.217.152.77", "mac_id": "b8:27:eb:b7:a3:b7", "hostname": "raspberrypi"}, {"column_num": 10, "ip_addr": "129.217.152.118", "mac_id": "b8:27:eb:be:dc:32", "hostname": "raspberrypi"}, {"column_num": 11, "ip_addr": "129.217.152.69", "mac_id": "b8:27:eb:ff:a4:48", "hostname": "raspberrypi"}, {"column_num": 12, "ip_addr": "129.217.152.59", "mac_id": "b8:27:eb:a9:7d:4d", "hostname": "raspberrypi"}, {"column_num": 13, "ip_addr": "129.217.152.85", "mac_id": "b8:27:eb:c4:f8:c7", "hostname": "raspberrypi"}, {"column_num": 14, "ip_addr": "129.217.152.48", "mac_id": "b8:27:eb:e4:43:6d", "hostname": "raspberrypi"}, {"column_num": 15, "ip_addr": "129.217.152.63", "mac_id": "b8:27:eb:98:69:6e", "hostname": "raspberrypi"}, {"column_num": 16, "ip_addr": "129.217.152.50", "mac_id": "b8:27:eb:75:c7:a2", "hostname": "raspberrypi"}, {"column_num": 17, "ip_addr": "129.217.152.37", "mac_id": "b8:27:eb:09:3d:77", "hostname": "raspberrypi"}, {"column_num": 18, "ip_addr": "129.217.152.60", "mac_id": "b8:27:eb:05:d8:4d", "hostname": "raspberrypi"}, {"column_num": 19, "ip_addr": "129.217.152.64", "mac_id": "b8:27:eb:36:da:22", "hostname": "raspberrypi"}, {"column_num": 20, "ip_addr": "129.217.152.62", "mac_id": "b8:27:eb:f5:5d:04", "hostname": "raspberrypi"}, {"column_num": 21, "ip_addr": "129.217.152.51", "mac_id": "b8:27:eb:88:8d:56", "hostname": "raspberrypi"}, {"column_num": 22, "ip_addr": "129.217.152.87", "mac_id": "b8:27:eb:00:be:93", "hostname": "raspberrypi"}, {"column_num": 23, "ip_addr": "129.217.152.33", "mac_id": "b8:27:eb:c0:10:ae", "hostname": "raspberrypi"}, ] def send_flash_req(ip_addr, filename, devices): if filename: url = "http://" + ip_addr payload = {'device': devices} headers = {} files = [('file', open(filename, 'rb'))] response = requests.request("POST", url, headers=headers, data=payload, files=files) if response.status_code == 200: return (['success: ', ip_addr]) else: return (['failed: ', ip_addr]) else: return (['error', 'Please provide absolute path file name as arg']) if __name__ == '__main__': nodes = list() for i in range(1,24): for j in range(1,16): if i==6: continue # broken strip elif i==7 and j==12: continue # broken node elif i==23 and j==10: continue # broken node elif i==12 and j==8: continue # initiator elif i==12 and j==9: continue # tester nodes.append((i,j)) random.shuffle(nodes) # flash initiator firmware_path = os.path.dirname(os.path.realpath(__file__)) + '/test_firmware/initiator.bin' strip_id, node_id = (12,8) print('Flashing initiator node strip_id:', strip_id, 'ip:', RPi_IPs[strip_id-1]['ip_addr'], 'node_id:', node_id) send_flash_req(RPi_IPs[strip_id-1]['ip_addr'], firmware_path, devices=node_id) # flash tester node firmware_path = os.path.dirname(os.path.realpath(__file__)) + '/test_firmware/tester.bin' strip_id, node_id = (12,9) print('Flashing tester node strip_id:', strip_id, 'ip:', RPi_IPs[strip_id-1]['ip_addr'], 'node_id:', node_id) send_flash_req(RPi_IPs[strip_id-1]['ip_addr'], firmware_path, devices=node_id) # flash 100 test node for i in range(10): print('group:', i) firmware_path = os.path.dirname(os.path.realpath(__file__)) + '/test_firmware/node_' + str(i) + '.bin' for j in range(10): strip_id, node_id = nodes.pop() print('Flashing node strip_id:', strip_id, 'ip:', RPi_IPs[strip_id-1]['ip_addr'], 'node_id:', node_id) send_flash_req(RPi_IPs[strip_id-1]['ip_addr'], firmware_path, devices=node_id)
# --------------------------------------------------------------------- # Parse OSM XML and return address to coodinates bindings # --------------------------------------------------------------------- # Copyright (C) 2007-2020 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules from xml.parsers.expat import ParserCreate # NOC modules from .base import GeocodingParser class OSMXMLParser(GeocodingParser): ID_ADDR = "OSM_ID" def __init__(self): super().__init__() self.xml_parser = ParserCreate() self.xml_parser.StartElementHandler = self.xml_start_element self.xml_parser.EndElementHandler = self.xml_stop_element self.current = None self.nodes = {} # id -> (lon, lat, tags) self.buildings = [] # {points: ...} def xml_start_element(self, name, attrs): if name in ("node", "way"): self.current = {"name": name, "attrs": attrs, "tags": {}, "nodes": []} elif name == "tag": self.current["tags"][attrs["k"]] = attrs["v"] elif name == "nd": self.current["nodes"] += [attrs["ref"]] def xml_stop_element(self, name): if name == "node": a = self.current["attrs"] self.nodes[a["id"]] = (a["lon"], a["lat"], self.current["tags"]) elif name == "way": t = self.current["tags"] if t.get("building"): # Get address fp = [ t.get("addr:country"), t.get("addr:city"), t.get("addr:street"), t.get("addr:housenumber"), ] addr = ", ".join(x for x in fp if x) # Get points points = [ (float(self.nodes[n][0]), float(self.nodes[n][1])) for n in self.current["nodes"] ] self.feed_building(self.current["attrs"]["id"], addr, self.get_centroid(points)) def parse(self, f): self.xml_parser.ParseFile(f)
#strings a = """Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.""" print(a) #string as array a = "Hello, World!" print(len(a)) print(a[1]) print(a[2:5]) print(a[-5:-2]) print(" sss s sss s ".strip()) print(a.lower()) print(a.upper()) print(a.replace("H", "J")) print(a.split(",")) print("ell" in a) print("jgjhgjhg" in a) print(a.count("l")) txt = "We are the so-called \"Vikings\" from the north." # format price = 49 #txt = "The price is {} dollars" txt = "The price is {:.2f} dollars" print(txt.format(price)) quantity = 3 itemno = 567 price = 49 myorder = "I want {} pieces of item number {} for {:.2f} dollars." print(myorder.format(quantity, itemno, price)) quantity = 3 itemno = 567 price = 49 myorder = "I want {0} pieces of item number {1} for {2:.2f} dollars." print(myorder.format(quantity, itemno, price)) age = 36 name = "John" txt = "His name is {1}. {1} is {0} years old." print(txt.format(age, name)) myorder = "I have a {carname}, it is a {model}." print(myorder.format(carname = "Ford", model = "Mustang"))
import numpy as np from scipy.optimize import leastsq import matplotlib matplotlib.use('TkAgg') # Actually lets the graph be displayed import pylab as plt from process_data import * fig = plt.figure(1, figsize=(9, 5), dpi=150) #fig.suptitle('\\textbf{Torque Felt by Driven Gear vs. Difference in Displacements}', fontweight='bold') fig.subplots_adjust(left=0.08, top=0.97, right=0.98, bottom=0.1) plt.plot(x1, y1, '.', c='black', label='Trial 1') plt.plot(x2, y2, '*', c='black', label='Trial 2') plt.plot(x2, y2, 'x', c='black', label='Trial 3') plt.ylabel('\\textbf{Torque Felt by Driven Gear (Nm)}') plt.xlabel('\\textbf{Difference in Displacements (rad)}') plt.legend(numpoints=1, handletextpad=0.1) plt.show()
from datetime import date from decimal import Decimal import pytest from biip import ParseError from biip.gln import Gln from biip.gs1 import GS1ApplicationIdentifier, GS1ElementString, GS1Prefix from biip.gtin import Gtin, GtinFormat from biip.sscc import Sscc @pytest.mark.parametrize( "value, expected", [ ( "00373400306809981733", GS1ElementString( ai=GS1ApplicationIdentifier.extract("00"), value="373400306809981733", pattern_groups=["373400306809981733"], sscc=Sscc( value="373400306809981733", prefix=GS1Prefix(value="734", usage="GS1 Sweden"), extension_digit=3, payload="37340030680998173", check_digit=3, ), ), ), ( "0107032069804988", GS1ElementString( ai=GS1ApplicationIdentifier.extract("01"), value="07032069804988", pattern_groups=["07032069804988"], gtin=Gtin( value="07032069804988", format=GtinFormat.GTIN_13, prefix=GS1Prefix(value="703", usage="GS1 Norway"), payload="703206980498", check_digit=8, ), ), ), ( "100329", GS1ElementString( ai=GS1ApplicationIdentifier.extract("10"), value="0329", pattern_groups=["0329"], ), ), ( "4101234567890128", GS1ElementString( ai=GS1ApplicationIdentifier.extract("410"), value="1234567890128", pattern_groups=["1234567890128"], gln=Gln( value="1234567890128", prefix=GS1Prefix(value="123", usage="GS1 US"), payload="123456789012", check_digit=8, ), ), ), ( "800370713240010220085952", GS1ElementString( ai=GS1ApplicationIdentifier.extract("8003"), value="70713240010220085952", pattern_groups=["70713240010220", "085952"], ), ), ], ) def test_extract(value: str, expected: GS1ElementString) -> None: assert GS1ElementString.extract(value) == expected @pytest.mark.parametrize( "ai_code, bad_value", [ # Too short product number ("01", "01123"), # Too short weight ("3100", "3100123"), ], ) def test_extract_fails_when_not_matching_pattern(ai_code: str, bad_value: str) -> None: ai = GS1ApplicationIdentifier.extract(ai_code) with pytest.raises(ParseError) as exc_info: GS1ElementString.extract(bad_value) assert ( str(exc_info.value) == f"Failed to match {bad_value!r} with GS1 AI {ai} pattern '{ai.pattern}'." ) @pytest.mark.parametrize( "ai_code, bad_value", [ # Bad production date ("11", "131313"), # Bad best before date ("15", "999999"), ], ) def test_extract_fails_with_invalid_date(ai_code: str, bad_value: str) -> None: ai = GS1ApplicationIdentifier.extract(ai_code) with pytest.raises(ParseError) as exc_info: GS1ElementString.extract(f"{ai_code}{bad_value}") assert ( str(exc_info.value) == f"Failed to parse GS1 AI {ai} date from {bad_value!r}." ) THIS_YEAR = date.today().year THIS_YEAR_SHORT = str(THIS_YEAR)[2:] MIN_YEAR = THIS_YEAR - 49 MIN_YEAR_SHORT = str(MIN_YEAR)[2:] MAX_YEAR = THIS_YEAR + 50 MAX_YEAR_SHORT = str(MAX_YEAR)[2:] @pytest.mark.parametrize( "value, expected", [ ( # Best before date, around the current date f"15{THIS_YEAR_SHORT}0526", GS1ElementString( ai=GS1ApplicationIdentifier.extract("15"), value=f"{THIS_YEAR_SHORT}0526", pattern_groups=[f"{THIS_YEAR_SHORT}0526"], date=date(THIS_YEAR, 5, 26), ), ), ( # Best before date, 49 years into the past f"15{MIN_YEAR_SHORT}0526", GS1ElementString( ai=GS1ApplicationIdentifier.extract("15"), value=f"{MIN_YEAR_SHORT}0526", pattern_groups=[f"{MIN_YEAR_SHORT}0526"], date=date(MIN_YEAR, 5, 26), ), ), ( # Best before date, 50 years into the future f"15{MAX_YEAR_SHORT}0526", GS1ElementString( ai=GS1ApplicationIdentifier.extract("15"), value=f"{MAX_YEAR_SHORT}0526", pattern_groups=[f"{MAX_YEAR_SHORT}0526"], date=date(MAX_YEAR, 5, 26), ), ), ], ) def test_extract_handles_min_and_max_year_correctly( value: str, expected: GS1ElementString ) -> None: assert GS1ElementString.extract(value) == expected @pytest.mark.parametrize( "value, expected", [ ("15200200", date(2020, 2, 29)), ("15210200", date(2021, 2, 28)), ("17211200", date(2021, 12, 31)), ], ) def test_extract_handles_zero_day_as_last_day_of_month( value: str, expected: date ) -> None: assert GS1ElementString.extract(value).date == expected @pytest.mark.parametrize( "value, expected", [ # Trade measures (GS1 General Specifications, section 3.6.2) ("3105123456", Decimal("1.23456")), # Net weight (kg) ("3114123456", Decimal("12.3456")), # First dimension (m) ("3123123456", Decimal("123.456")), # Second dimension (m) ("3132123456", Decimal("1234.56")), # Third dimension (m) ("3141123456", Decimal("12345.6")), # Area (m^2) ("3150123456", Decimal("123456")), # Net volume (l) ("3161123456", Decimal("12345.6")), # Net volume (m^3) # ... plus equivalent for imperial units ("3661123456", Decimal("12345.6")), # Net volume (cubic yards) # # Logistic measures (GS1 General Specifications, section 3.6.3) ("3302023456", Decimal("234.56")), # Logistic weight (kg) ("3313023456", Decimal("23.456")), # First dimension (m) ("3324023456", Decimal("2.3456")), # Second dimension (m) ("3335023456", Decimal("0.23456")), # Third dimension (m) ("3344023456", Decimal("2.3456")), # Area (m^2) ("3353023456", Decimal("23.456")), # Logistic volume (l) ("3362023456", Decimal("234.56")), # Logistic volume (m^3) # ... plus equivalent for imperial units ("3691123456", Decimal("12345.6")), # Logistic volume (cubic yards) # # Kilograms per square meter (GS1 General Specifications, section 3.6.4) ("3372123456", Decimal("1234.56")), ], ) def test_extract_variable_measures(value: str, expected: Decimal) -> None: assert GS1ElementString.extract(value).decimal == expected @pytest.mark.parametrize( "value, expected", [ # Amount payable or coupon value (GS1 General Specifications, section 3.6.6) ("3901123", Decimal("12.3")), ("3901123456", Decimal("12345.6")), ("3903123456789012345", Decimal("123456789012.345")), ("3909123456789012345", Decimal("123456.789012345")), # Amount payable for variable measure trade item (section 3.6.8) ("3921123", Decimal("12.3")), ("3921123456", Decimal("12345.6")), ("3923123456789012345", Decimal("123456789012.345")), ("3929123456789012345", Decimal("123456.789012345")), ], ) def test_extract_amount_payable(value: str, expected: Decimal) -> None: assert GS1ElementString.extract(value).decimal == expected @pytest.mark.parametrize( "value, expected_currency, expected_decimal", [ # Amount payable and ISO currency code (section 3.6.7) ("39127101230", "ZAR", Decimal("12.30")), ("39117101230", "ZAR", Decimal("123.0")), ("391097812301", "EUR", Decimal("12301")), # # Amount payable for variable mesure trade item and currency (section 3.6.9) ("39327101230", "ZAR", Decimal("12.30")), ("39317101230", "ZAR", Decimal("123.0")), ("393097812301", "EUR", Decimal("12301")), ], ) def test_extract_amount_payable_and_currency( value: str, expected_currency: str, expected_decimal: Decimal ) -> None: element_string = GS1ElementString.extract(value) assert element_string.decimal == expected_decimal # Optional: If py-moneyed is installed, create Money instances assert element_string.money is not None assert element_string.money.amount == expected_decimal assert element_string.money.currency.code == expected_currency @pytest.mark.parametrize( "value, expected", [("39400010", Decimal("10")), ("39410055", Decimal("5.5"))], ) def test_extract_percentage_discount(value: str, expected: Decimal) -> None: assert GS1ElementString.extract(value).decimal == expected @pytest.mark.parametrize( "value, expected", [("0107032069804988", "(01)07032069804988")] ) def test_as_hri(value: str, expected: str) -> None: assert GS1ElementString.extract(value).as_hri() == expected
from django.urls import path from . import views urlpatterns = [ path('', views.index, name='users-index'), path('<username>/profile', views.profile, name='user-profile'), path('<username>/update', views.update, name='user-update'), path('<username>/delete', views.delete, name='user-delete'), path('<username>/user_rso_college_dist', views.get_college_dist, name='user-rso-college-dist'), path('<username>/user_event_rso_dist', views.get_rso_event_dist, name='user-event-rso-dist') ]
__author__ = "Kyle Beyer" __email__ = "beykyle@umich.edu" import glob import sys import os import argparse import numpy as np from numpy.linalg import multi_dot import xml.etree.ElementTree as et from pathlib import Path from enum import Enum # plottting import matplotlib from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D from matplotlib.ticker import MaxNLocator import matplotlib.pylab as pylab from matplotlib import rc from matplotlib.colors import LogNorm import matplotlib.font_manager """ This module performs numerical neutron slowing down calculations for homogenous mixtures """ import process_data from nuclide import Nuclide from process_data import Reactions as RXN #from skernel import solver_eq_leth as ksolver class BoundaryCondition(Enum): asymptotic_scatter_source = 1 class Grid: def __init__(self, gmax, gmin, sz): self.max = gmax self.min = gmin self.size = sz def validate_xml_path(input_path): if not input_path.is_file(): print("Input path must point to a valid xml file, completely specifying a material") exit(1) def build_nuclide_data(xml_root, input_path, grid): nuclear_data_node = xml_root.find("nuclear_data") nuclides = [] base_path = input_path.parents[1] print("Base data path: " + str(base_path)) for nuclide_node in nuclear_data_node.findall('nuclide'): nuclides.append(Nuclide(nuclide_node, base_path, grid)) return nuclides def skernel_const_gsize(in_scatter_source, sig_p_red, denom, alpha, du, phi): # calculate lowest group that can scatter into current group for each # TODO for now, we are assuming constant groupwidth for speed max_group_dist = np.array( [int(round(np.log(1/a) / du)) for a in alpha[:] ]) (num_nuclides, num_groups) = in_scatter_source.shape for i in np.arange(0,num_groups): phi[i] = 0 for nuc in np.arange(0,num_nuclides): min_g = i - max_group_dist[nuc] leftover = 0 if min_g < 0: leftover = -min_g min_g = 0 back_idx = np.array([i,i+leftover]) asym_scat_src = sig_p_red[nuc] * np.sum( (np.exp(-(back_idx-1)*du)*(1-np.exp(-du))**2) ) phi[i] = phi[i] + np.dot( in_scatter_source[nuc][min_g:i] , phi[min_g:i] ) + asym_scat_src phi[i] = phi[i] / denom[i] return phi def slow_down(nuclides, ratios, display=False, out=False, outpath=None): # get microscopic xs alpha = np.array([nuc.alpha for nuc in nuclides]) sig_s = np.array( [nuc.xs[RXN.elastic_sc].xs for nuc in nuclides ]) sig_a = np.array( [nuc.xs[RXN.rad_cap].xs for nuc in nuclides ]) # get lethargy grid u = nuclides[0].xs[RXN.elastic_sc].leth_boundaries du = u[1:] - u[:-1] egrid = nuclides[0].xs[RXN.elastic_sc].E num_groups = len(u)-1 # precompute exponential factors and bin widths exp_der = np.exp(-1*u[:-2]) + np.exp(-1*u[2:]) - 2 * np.exp(-1*u[1:-1]) flx = {} sig_t = [] save = True for i in range(len(ratios)): print("Running problem " + str(i)) # new flux p = np.zeros(num_groups) sig_t_all = np.zeros(num_groups) # compute xs in form needed by solver sig_p = np.array( [n*nuc.pot_scatterxs_b/(1-a) for nuc,a,n in zip(nuclides,alpha,ratios[i])] ) sig_s_red = np.vstack([n*s/(1-a) for s,a,n in zip(sig_s,alpha,ratios[i])]) sig_p_red = np.vstack([n*s/(1-a) for s,a,n in zip(sig_p,alpha,ratios[i])]) sig_s_red_all = np.sum(sig_s_red, axis=0) for j in range(0,len(alpha)): sig_t_all = sig_t_all + ratios[i][j] * (sig_s[j] + sig_a[j]) sig_t.append(sig_t_all) # precompute denominator denom = np.multiply(sig_t_all,du) - np.multiply(sig_s_red_all, du - 1 + np.exp(-1* du)) # precompute scattering sources in_scatter_source = sig_s_red[:,:-1] * exp_der # run solver kernel flux = skernel_const_gsize(in_scatter_source, sig_p_red, denom, alpha, du[0], p) # display and output if display: name = "problem_" + str(i) plot_flux(egrid , flux, sig_t_all, name) if out: name = "problem_" + str(i) write_problem_data(outpath, egrid, flux, name, ratios[i], nuclides) if save: flx[ratios[i][0]] = flux plot_all(egrid , flx, sig_t, "all") def plot_flux(energy, flux, sig_t_all, name): f,a = process_data.fig_setup() plt.loglog(energy[:-1], flux[:-1], label="$\Phi$") plt.loglog(energy, sig_t_all * max(flux)/max(sig_t_all), label=r"$\Sigma_t$ - scaled") plt.xlabel("Energy [eV]", fontsize=20) plt.ylabel("Scalar Flux [a.u.]", fontsize=20) plt.legend(fontsize=18) a.tick_params(size=10, labelsize=20) plt.savefig(name + ".png") def plot_all(energy, fluxes, sig_t_all, name): f,a = process_data.fig_setup() for lbl, flx in fluxes.items(): label = r"$\frac{N_H}{N_{U238}} = $" + str(lbl) plt.loglog(energy[:-1], flx[:-1], label=label) plt.xlabel("Energy [eV]", fontsize=20) plt.ylabel("Scalar Flux [a.u.]", fontsize=20) plt.legend(fontsize=18) a.tick_params(size=10, labelsize=20) plt.savefig(name + ".png") def write_problem_data(path, en, flux, name, ratios, nuclides): if path != None: path = path + (name + ".csv") print("Writing output to " + path) with process_data.smart_open(path) as fh: print(name , file=fh) # iterate through the table and print in csv format print("Nuclides:", file=fh) print([n.name for n in nuclides], file=fh) print("Ratios:", file=fh) print(ratios, file=fh) print("{}, {}".format("Energy [eV]", "Flux [a.u.]"), file=fh) for i in range(len(en)-1): print("{:1.8e}, {:1.8e}".format( en[i], flux[i]), file=fh) def parse_args_and_run(argv: list): # default args current_path = Path(os.getcwd()) def_out_path = None def_max_energy_eV = 2.0E4 def_min_energy_eV = 1.0 def_gridsize = 6E5 # argument parsing parser = argparse.ArgumentParser( description='Interpolate pointwise microscopic cross sections to equal lethargy groups') parser.add_argument('-i', '--input', help='Path to xml file describing maerial composition of system', dest='input_path', required=True) parser.add_argument('-o', '--ouput', help='Path to write output file to', dest='output_path', required=False) parser.add_argument('--max-energy', type=float, help='maximum energy in [eV] for slowing down equations - for defining lethargy. Defalut: 2E4', dest='max_energy_eV', default=def_max_energy_eV) parser.add_argument('--min-energy', type=float, help='minimum energy in [eV] for slowing down equations - for defining lethargy. Default: 1.0', dest='min_energy_eV', default=def_min_energy_eV) parser.add_argument('-n', '--num-gridpoints', type=int, help='desired number of points on lethargy grid: Default: 6E5', dest='gridsize', default=def_gridsize) parser.add_argument('-d', '--display', action='store_true', help='if flag present, generates flux plots', dest='display') args = parser.parse_args() input_path = Path(args.input_path) validate_xml_path(input_path) grid = Grid(args.max_energy_eV , args.min_energy_eV, args.gridsize) output_path = args.output_path if output_path != None: output_path = Path(output_path) tree = et.parse(str(input_path)) root = tree.getroot() nuclides = build_nuclide_data(root, input_path, grid) r = np.array([[1. , 1.] ,[2.5 , 1.] ,[5. , 1.]]) slow_down(nuclides, r, display=args.display, out=True, outpath=args.output_path) if __name__ == "__main__": parse_args_and_run(sys.argv)
#/usr/bin/env python # Matthieu Brucher # Last Change : 2007-08-22 14:01 from __future__ import absolute_import import unittest import numpy from PyDSTool.Toolbox.optimizers.line_search import GoldsteinRule class Function(object): def __call__(self, x): return (x[0] - 2) ** 3 + (2 * x[1] + 4) ** 2 def gradient(self, x): return numpy.array((3 * (x[0] - 2) ** 2, 4 * (2 * x[1] + 4))) class test_GoldsteinRuleSearch(unittest.TestCase): def test_call_gradient_direction(self): lineSearch = GoldsteinRule() state = {'gradient' : numpy.array((12., 16.)), 'direction' : numpy.array((4., -8.))} function = Function() x = lineSearch(origin = numpy.zeros((2)), state = state, function = function) assert(function(x) <= function(numpy.zeros((2))) + 0.1 * state['alpha_step'] * numpy.dot(numpy.array((12., 16.)), numpy.array((4., -8.)))) assert(function(x) >= function(numpy.zeros((2))) + 0.9 * state['alpha_step'] * numpy.dot(numpy.array((12., 16.)), numpy.array((4., -8.)))) assert(state['alpha_step'] > 0) if __name__ == "__main__": unittest.main()
from django.utils.translation import ugettext_lazy as _ label = _('expenses') VERSION = '0.3.0'
from pathlib import Path import argparse from sys import getsizeof from base64 import b85encode from zlib import compress from hashlib import sha256 parser = argparse.ArgumentParser() parser.add_argument("target", help="build target") args = parser.parse_args() target: str = args.target stem: str = Path(target).stem binary: str = str( b85encode(compress(Path(stem).read_bytes())), encoding="utf-8") file_hash: str = sha256(Path(target).read_bytes()).hexdigest() Path(f"{stem}.py").write_text("# This code is generated by [Atcoder_base64](https://github.com/kyomukyomupurin/AtCoder_base64)\n\n" "from base64 import b85decode\n" "import subprocess\n" "from pathlib import Path\n" "from zlib import decompress\n\n\n" f"binary = \"{binary}\"\n" f"Path(\"{file_hash}.bin\").write_bytes(decompress(b85decode(binary)))\n" f"Path(\"{file_hash}.bin\").chmod(0o755)\n" f"subprocess.run(\"./{file_hash}.bin\")\n\n" "# Original source code:\n" "\"\"\"\n" f"{Path(target).read_text()}\n" "\"\"\"" ) sz: int = getsizeof(Path(f"{stem}.py").read_text()) print( f"The size of {stem}.py is {sz / 1000:.1f}KB, {sz / 5120:.1f}% of limit.")
import so3g from spt3g import core import numpy as np from so3g import hk class HKScanner: """Module that scans and reports on HK archive contents and compliance. Attributes: stats (dict): A nested dictionary of statistics that are updated as frames are processed by the module. Elements: - ``n_hk`` (int): The number of HK frames encountered. - ``n_other`` (int): The number of non-HK frames encountered. - ``n_session`` (int): The number of distinct HK sessions processed. - ``concerns`` (dict): The number of warning (key ``n_warning``) and error (key ``n_error``) events encountered. The detail for such events is logged to ``spt3g.core.log_warning`` / ``log_error``. - ``versions`` (dict): The number of frames (value) (value) encountered that have a given hk_agg_version (key). """ def __init__(self): self.session_id = None self.providers = {} self.stats = { 'n_hk': 0, 'n_other': 0, 'n_session': 0, 'concerns': { 'n_error': 0, 'n_warning': 0 }, 'versions': {}, } def report_and_reset(self): core.log_info('Report for session_id %i:\n' % self.session_id + str(self.stats) + '\n' + str(self.providers) + '\nEnd report.', unit='HKScanner') self.session_id = None def __call__(self, f): """Processes a frame. Only Housekeeping frames will be examined; other frames will simply be counted. All frames are passed through unmodified. """ if f.type == core.G3FrameType.EndProcessing: self.report_and_reset() return [f] if f.type != core.G3FrameType.Housekeeping: self.stats['n_other'] += 1 return f self.stats['n_hk'] += 1 vers = f.get('hkagg_version', 0) self.stats['versions'][vers] = self.stats['versions'].get(vers, 0) + 1 if f['hkagg_type'] == so3g.HKFrameType.session: session_id = f['session_id'] if self.session_id is not None: if self.session_id != session_id: self.report_and_reset() # note this does clear self.session_id. if self.session_id is None: core.log_info('New HK Session id = %i, timestamp = %i' % (session_id, f['start_time']), unit='HKScanner') self.session_id = session_id self.stats['n_session'] += 1 elif f['hkagg_type'] == so3g.HKFrameType.status: # Have any providers disappeared? now_prov_id = [p['prov_id'].value for p in f['providers']] for p, info in self.providers.items(): if p not in now_prov_id: info['active'] = False # New providers? for p in now_prov_id: info = self.providers.get(p) if info is not None: if not info['active']: core.log_warn('prov_id %i came back to life.' % p, unit='HKScanner') self.stats['concerns']['n_warning'] += 1 info['n_active'] += 1 info['active'] = True else: self.providers[p] = { 'active': True, # Currently active (during processing). 'n_active': 1, # Number of times this provider id became active. 'n_frames': 0, # Number of data frames. 'timestamp_init': f['timestamp'], # Timestamp of provider appearance 'timestamp_data': None, # Timestamp of most recent data frame. 'ticks': 0, # Total number of timestamps in all blocks. 'span': None, # (earliest_time, latest_time) 'block_streams_map': {}, # Map from field name to block name. } elif f['hkagg_type'] == so3g.HKFrameType.data: info = self.providers[f['prov_id']] vers = f.get('hkagg_version', 0) info['n_frames'] += 1 t_this = f['timestamp'] if info['timestamp_data'] is None: t_ref = info['timestamp_init'] if t_this < t_ref: core.log_warn('data timestamp (%.1f) precedes provider ' 'timestamp by %f seconds.' % (t_this, t_this - t_ref), unit='HKScanner') self.stats['concerns']['n_warning'] += 1 elif t_this <= info['timestamp_data']: core.log_warn('data frame timestamps are not strictly ordered.', unit='HKScanner') self.stats['concerns']['n_warning'] += 1 info['timestamp_data'] = t_this # update t_check = [] blocks = f['blocks'] if vers == 0: block_timef = lambda block: block.t block_itemf = lambda block: [(k, block.data[k]) for k in block.data.keys()] elif vers >= 1: block_timef = lambda block: np.array([t.time / core.G3Units.seconds for t in b.times]) block_itemf = lambda block: [(k, block[k]) for k in block.keys()] if vers in [0]: block_name = lambda block_idx: list(sorted(blocks[block_idx].data.keys()))[0] if vers in [1]: block_name = lambda block_idx: list(sorted(blocks[block_idx].keys()))[0] elif vers >= 2: block_names = f.get('block_names', []) if len(block_names) != len(blocks): # This is a schema error in its own right. core.log_error('Frame does not have "block_names" entry, ' 'or it is not the same length as "blocks".', unit='HKScanner') self.stats['concerns']['n_error'] += 1 # Fall back on v1 strategy. block_name = lambda block_idx: list(sorted(blocks[block_idx].keys()))[0] else: block_name = lambda block_idx: f['block_names'][block_idx] for block_idx, b in enumerate(blocks): times = block_timef(b) if len(times): if info['span'] is None: info['span'] = times[0], times[-1] else: t0, t1 = info['span'] info['span'] = min(times[0], t0), max(times[-1], t1) t_check.append(times[0]) info['ticks'] += len(times) bname = block_name(block_idx) for k, v in block_itemf(b): if len(v) != len(times): core.log_error('Field "%s" has %i samples but .t has %i samples.' % (k, len(v), len(times))) self.stats['concerns']['n_error'] += 1 # Make sure field has a block_stream registered. if k not in info['block_streams_map']: info['block_streams_map'][k] = bname if info['block_streams_map'][k] != bname: core.log_error('Field "%s" appeared in block_name %s ' 'and later in block_name %s.' % (k, info['block_streams_map'][k], bname)) self.stats['concerns']['n_error'] += 1 if len(t_check) and abs(min(t_check) - t_this) > 60: core.log_warn('data frame timestamp (%.1f) does not correspond to ' 'data timestamp vectors (%s) .' % (t_this, t_check), unit='HKScanner') self.stats['concerns']['n_warning'] += 1 else: core.log_warn('Weird hkagg_type: %i' % f['hkagg_type'], unit='HKScanner') self.stats['concerns']['n_warning'] += 1 return [f] if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument('--translate', action='store_true') parser.add_argument('--target-version', type=int, default=2) parser.add_argument('files', nargs='+') args = parser.parse_args() # The report is displayed at level LOG_INFO. core.set_log_level(core.G3LogLevel.LOG_INFO) # Run me on a G3File containing a Housekeeping stream. for f in args.files: p = core.G3Pipeline() p.Add(core.G3Reader(f)) if args.translate: p.Add(hk.HKTranslator(target_version=args.target_version)) p.Add(HKScanner()) p.Run()
# backend/urls.py from django.urls import re_path from backend.views import views, tenant_model_view_set, user_model_view_set, aws_model_view_set, \ resource_view_set, monitor_view_set, notification_destination_model_view_set, \ notification_group_view_set,\ schedule_view_set, backup_view_set, operation_log_model_view_set, document_model_view_set from rest_framework_nested import routers router = routers.SimpleRouter() router.register(r'tenants', tenant_model_view_set.TenantModelViewSet, base_name='tenants') tenant_router = routers.NestedSimpleRouter(router, r'tenants', lookup='tenant') tenant_router.register(r'users', user_model_view_set.UserModeViewSet) tenant_router.register(r'aws-environments', aws_model_view_set.AwsEnvironmentModelViewSet, base_name='aws-environments') tenant_router.register(r'notification-destinations', notification_destination_model_view_set.NotificationDestinationViewSet, base_name='notification-destinations') tenant_router.register(r'notification-groups', notification_group_view_set.NotificationGroupViewSet, base_name='notification-groups') tenant_router.register(r'logs', operation_log_model_view_set.OperationLogModelViewSet, base_name='logs') aws_router = routers.NestedSimpleRouter(tenant_router, r'aws-environments', lookup='aws_env') aws_router.register(r'resources', resource_view_set.ResourceViewSet, base_name='resources') aws_router.register(r'regions', resource_view_set.RegionViewSet, base_name='regions') region_router = routers.NestedSimpleRouter(aws_router, r'regions', lookup='region') region_router.register(r'services', resource_view_set.ServiceViewSet, base_name=r'services') region_router.register(r'documents', document_model_view_set.DocumentViewSet, base_name=r'documents') service_router = routers.NestedSimpleRouter(region_router, r'services', lookup='service') service_router.register(r'resources', resource_view_set.ResourceViewSet, base_name=r'resources') resource_router = routers.NestedSimpleRouter(service_router, r'resources', lookup='resource') resource_router.register(r'monitors', monitor_view_set.MonitorViewSet, base_name='monitors') resource_router.register(r'schedules', schedule_view_set.ScheduleViewSet, base_name='schedules') resource_router.register(r'backups', backup_view_set.BackupViewSet, base_name='backups') urlpatterns = [ re_path('^.*$', views.HomePageView.as_view()), ]
from Constants import constant from WatchYourBack.Board import Board from Agents.NegamaxTranspositionTable import Negamax from ActionBook.ActionBook import ActionBook from random import randint ''' THIS IS THE FINAL SUBMISSION: THIS PLAYER IMPLEMENTS THE FOLLOWING TO INFORM ITSELF ON WHAT MOVE TO MAKE NEXT: - NEGAMAX WITH TRANSPOSITION TABLE AS ITS MAIN SEARCH STRATEGY - GREEDY MOVE ORDERING USING A LIGHT EVALUATION FUNCTION AND SELECTION OF THE BEST MOVES TO COMPLETE THE SEARCH ON - MOVE ORDERING USING THE TRANSPOSITION TABLE IN NEGAMAX -- WE TRY THE BEST MOVE FOUND SO FAR AT EARLIER DEPTH ITERATIONS FIRST, BECAUSE CHANCES ARE, THIS MOVE MAY BE THE BEST MOVE FOR THE NEXT DEPTH AS WELL - AN OPENING BOOK OF MOVES TO CUT DOWN SEARCH TIME AT THE START OF THE GAME WHERE THERE ARE POSITIONS THAT WE SHOULDN'T NEED TO SEARCH ON. ''' class Player: def __init__(self, colour): # set the colour of the player if colour == 'white': self.colour = constant.WHITE_PIECE elif colour == 'black': self.colour = constant.BLACK_PIECE # each players internal board representation self.board = Board() # set up the minimax search strategy -- NEGAMAX self.minimax = Negamax(self.board, self.colour, "/eval_weights") # set the colour of the opponent self.opponent = self.board.get_opp_piece_type(self.colour) # set up the mini-max return values self.depth_eval = 0 self.minimax_val = 0 self.policy_vector = 0 # initialise the action book self.action_book = ActionBook(self.colour) def update(self, action): # update the board based on the action of the opponent if self.board.phase == constant.PLACEMENT_PHASE: # update board also returns the pieces of the board that will be eliminated self.board.update_board(action, self.opponent) self.minimax.update_board(self.board) elif self.board.phase == constant.MOVING_PHASE: if isinstance(action[0], tuple) is False: print("ERROR: action is not a tuple") return # get the "to" square direction using the provided positions move_type = self.board.convert_coord_to_direction(action[0], action[1]) # update the player board representation with the action self.board.update_board((action[0], move_type), self.opponent) def action(self, turns): # update the negamax/minimax board representation self.minimax.update_board(self.board) # reset the move counter of the board if turns == 0 and self.board.phase == constant.MOVING_PHASE: self.board.move_counter = 0 self.board.phase = constant.MOVING_PHASE # check the action book to see if there is a state board_state = self.board.board_state if self.board.phase == constant.PLACEMENT_PHASE: action = self.action_book.check_state(board_state) # check if the action is legal if action is not None and self.board.check_free_square(action) is True: # return the action found and update the board representations self.board.update_board(action, self.colour) self.minimax.update_board(self.board) return action # if there is no found state in the action book, therefore we just do a negamax search best_move = self.minimax.itr_negamax() self.depth_eval = self.minimax.eval_depth self.minimax_val = self.minimax.minimax_val # do an alpha beta search on this node # once we have found the best move we must apply it to the board representation if self.board.phase == constant.PLACEMENT_PHASE: self.board.update_board(best_move, self.colour) self.minimax.update_board(self.board) return best_move else: # if we are in moving phase, return the correctly formatted positions if best_move is None: self.board.update_board(best_move, self.colour) self.minimax.update_board(self.board) return None new_pos = Board.convert_direction_to_coord(best_move[0], best_move[1]) self.board.update_board(best_move, self.colour) self.minimax.update_board(self.board) return best_move[0], new_pos
#!/usr/bin/env python # # Copyright (C) 2016 Inkton <thebird@nest.yt> # # This file is part of nester # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. #from distutils.core import setup from setuptools import setup # import nester specifics from nester.variables import __version__ if __name__ == '__main__': setup(name='nester', version='%s' % __version__, description='Nester Shell', long_description="""Nester <long-description>.""", author='rajitha wijayaratne', author_email='thebird@nest.yt', maintainer='rajitha wijayaratne', maintainer_email='thebird@nest.yt', keywords=['nest', 'api', 'cli', 'python'], url='https://nester.yt', license='GPL', platforms='UNIX', scripts=['bin/nester'], package_dir={'nester': 'nester'}, packages=['nester', 'nester.api'], data_files=[('/etc', ['etc/nester.conf']), ('share/doc/nester', ['README.md']), ('share/man/man1/', ['man/nester.1'])], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console' 'Intended Audience :: Advanced End Users', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU General Public License v3', 'Operating System :: POSIX', 'Programming Language :: Python', 'Topic :: Security', 'Topic :: System Shells', 'Topic :: Terminals' ], )
"""Constants for the Threshold integration.""" DOMAIN = "threshold" CONF_HYSTERESIS = "hysteresis" CONF_LOWER = "lower" CONF_UPPER = "upper" DEFAULT_HYSTERESIS = 0.0
import re import urllib.request import pytest from jaraco import services class TestHTTPStatus: def http_error(self, url): raise urllib.error.HTTPError( url, code=400, msg='Bad Request', hdrs=dict(), fp=None ) def test_HTTPError(self, monkeypatch): monkeypatch.setattr(urllib.request, 'urlopen', self.http_error) monkeypatch.setattr('portend.occupied', lambda *a, **kw: None) status = services.HTTPStatus() status.port = 80 with pytest.raises(services.ServiceNotRunningError) as trap: status.wait_for_http(timeout=0) msg = str(trap.value) assert "Received status 400 from " in msg assert re.search('<jaraco.services.HTTPStatus object .*>', msg) assert ' on localhost:80' in msg
# Third Party from delta import DeltaTable, configure_spark_with_delta_pip # noqa from pyspark.sql import SparkSession from pytest import fixture # About pytest fixtures # https://docs.pytest.org/en/latest/how-to/fixtures.html#how-to-fixtures @fixture(scope="session") def spark(): """Start a local pyspark instance to test against.""" # Setup builder = ( SparkSession.builder.appName("MyApp") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") ) spark = configure_spark_with_delta_pip(builder).getOrCreate() yield spark # Tear down spark.stop() @fixture def spark_logger(spark): # Hook into underlying log4j logger log4jLogger = spark._jvm.org.apache.log4j logger = log4jLogger.LogManager.getLogger(__name__) logger.setLevel(log4jLogger.Level.WARN) yield logger
import os from argparse import ArgumentParser if __name__ == "__main__": # Argument parsing setup parser = ArgumentParser() parser.add_argument('--path', type=str, default='~/Downloads', help='The path to the directory where data files are stored. Defaults to ~/Downloads.') args = parser.parse_args() data_path = args.path __expected_doses_file__ = "/Total_Doses_by_County_data.csv" __expected_initiated_file__ = "/Initiated_Vaccinations_by_Sex_data.csv" __expected_completed_file__ = "/Completed_Vaccinations_by_Sex_data.csv" __expected_testing_file__ = "/Metrics_by_Test_Date_by_County_data.csv" __expected_filenames__ = [__expected_doses_file__, __expected_initiated_file__, __expected_completed_file__, __expected_testing_file__] # If the path includes ~ for the user's home directory, expand it if '~' in data_path: data_path = os.path.expanduser(data_path) for filename in __expected_filenames__: path = f"{data_path}/{filename}".replace("//", "/") if "doses" in filename.lower() or "vaccinations" in filename.lower(): data_type = "vaccine" elif "metrics" in filename.lower(): data_type = "test" if os.path.exists(path): os.system(f"python add-{data_type}-data.py {path} --quiet") else: print(f"{path} does not exist. Skipping.")
import os import pytest import requests import requests_mock from valor import Schema @pytest.fixture def schema_fname(): return os.path.join(os.path.dirname(__file__), 'schema.json') @pytest.fixture def schema(): return Schema.from_file(os.path.join(os.path.dirname(__file__), 'schema.json')) @pytest.fixture def session(): sess = requests.Session() sess.requests_mock = requests_mock.Adapter() sess.mount('http://', sess.requests_mock) sess.mount('https://', sess.requests_mock) return sess
#from .asyncdistribute import AsyncDistribute from .workerresource import * from .workerprocess import * from .workerpool import * from .messaging import * from .exceptions import *
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation from c3d.utils.video_util import * def visualize_clip(clip, convert_bgr=False, save_gif=False, file_path=None): num_frames = len(clip) fig, ax = plt.subplots() fig.set_tight_layout(True) def update(i): if convert_bgr: frame = cv2.cvtColor(clip[i], cv2.COLOR_BGR2RGB) else: frame = clip[i] plt.imshow(frame) return plt # FuncAnimation will call the 'update' function for each frame; here # animating over 10 frames, with an interval of 20ms between frames. anim = FuncAnimation(fig, update, frames=np.arange(0, num_frames), interval=1) if save_gif: anim.save(file_path, dpi=80, writer='imagemagick') else: # plt.show() will just loop the animation forever. plt.show() def visualize_predictions(video_path, predictions, save_path): frames = get_video_frames(video_path) assert len(frames) == len(predictions) fig, ax = plt.subplots(figsize=(5, 5)) fig.set_tight_layout(True) fig_frame = plt.subplot(2, 1, 1) fig_prediction = plt.subplot(2, 1, 2) fig_prediction.set_xlim(0, len(frames)) fig_prediction.set_ylim(0, 1.15) def update(i): frame = frames[i] x = range(0, i) y = predictions[0:i] fig_prediction.plot(x, y, '-') fig_frame.imshow(frame) return plt # FuncAnimation will call the 'update' function for each frame; here # animating over 10 frames, with an interval of 20ms between frames. anim = FuncAnimation(fig, update, frames=np.arange(0, len(frames), 10), interval=1, repeat=False) if save_path: anim.save(save_path, dpi=200, writer='imagemagick') else: plt.show() return
#Introduction to Sets def average(array): # your code goes here return sum(set(array))/len(set(array)) #_____________________________________________________________ #Symmetric Difference # Enter your code here. Read input from STDIN. Print output to STDOUT a,b=(int(input()),input().split()) #a it is size of b c,d=(int(input()),input().split()) #c is size of d x=set(b) y=set(d) p=y.difference(x) # as in example finding their difference q=x.difference(y) r=p.union(q) #at the end union differences in order to print out and do it as sorted print ('\n'.join(sorted(r, key=int))) #__________________________________________________________________ #Set .add() # Enter your code here. Read input from STDIN. Print output to STDOUT num = int(raw_input()) countries = set() for i in range(num): countries.add(raw_input()) print len(countries) #______________________________________________________________
__author__ = "Flora" __version__ = "2020.10.28" """ This componment gets the x,y,z list and creates seperate x,y,z, lists per AA atom. Inputs: AA: Amino Acid id number (int list) AA_name: Amino Acid name (string list) xyz: coordinates in 3Dspace per atom (3d point rhino item) atoms: atom names (string list) Output: a: xyz coordinates for debug start: list of coordinates of all atoms per AA (for starting points of lines) end: list of coordinates of all atoms per AA (for ending poins of lines) """ import rhinoscriptsyntax as rs x_list,y_list,z_list=[],[],[] def create_xyz_list(): for i in range (len(xyz)): x1,y1,z1=[],[],[] for j in range (len(xyz[i])): if (xyz[i][j]!=None): xyz_str= str(xyz[i][j]) x,y,z= xyz_str.split(",") x1.append(x) y1.append(y) z1.append(z) else: x1.append("Ter") y1.append("Ter") z1.append("Ter") x_list.append(x1) y_list.append(y1) z_list.append(z1) create_xyz_list()
from output.models.saxon_data.zone.zone205_xsd.zone205 import ( Doc, DocValue, ) __all__ = [ "Doc", "DocValue", ]
import pyaf.tests.transformations.test_ozone_transf_generic as gen gen.test_transformation('None');
""" admin.py """ from django.contrib import admin from django.contrib import messages from .models import Element class ComposerElementAdmin(admin.ModelAdmin): """ Admin for the composer elements. """ fields = ('content', 'context_example') readonly_fields = ( 'template_name', 'name', 'is_dynamic', 'has_changed', 'last_changed', 'changed_by') list_display = ('template_name', 'name', 'has_changed', 'last_changed', 'changed_by') list_filter = ('is_dynamic', 'has_changed') def save_model(self, request, obj, form, change): """ Attempt to save the model. """ try: obj.attempt_update(form.cleaned_data['content'], request.user) except Exception as e: messages.error(request, 'Could not update element, %s' % str(e)) admin.site.register(Element, ComposerElementAdmin)
import pathlib import tempfile import unittest from unittest.mock import patch import yaml from click.testing import CliRunner from data_pipelines_cli.cli import _cli from data_pipelines_cli.cli_commands.prepare_env import prepare_env from data_pipelines_cli.errors import JinjaVarKeyError class GenHomeProfilesCommandTestCase(unittest.TestCase): goldens_dir_path = pathlib.Path(__file__).parent.parent.joinpath("goldens") rendered_from_vars_profile = { "bigquery": { "target": "env_execution", "outputs": { "env_execution": { "method": "service-account", "project": "example-project", "dataset": "var21-dataset", "keyfile": "/tmp/a/b/c/d.json", "timeout_seconds": 150, "priority": "interactive", "location": "us-west1", "threads": 1337, "retries": 1, "type": "bigquery", } }, } } def setUp(self) -> None: self.maxDiff = None def test_no_var_profiles_generation(self): runner = CliRunner() with tempfile.TemporaryDirectory() as tmp_dir, patch( "data_pipelines_cli.cli_constants.BUILD_DIR", pathlib.Path(tmp_dir) ), patch("data_pipelines_cli.config_generation.BUILD_DIR", pathlib.Path(tmp_dir),), patch( "data_pipelines_cli.dbt_utils.BUILD_DIR", pathlib.Path(tmp_dir), ), patch( "pathlib.Path.cwd", lambda: self.goldens_dir_path ), tempfile.TemporaryDirectory() as tmp_dir2, patch( "pathlib.Path.home", lambda: pathlib.Path(tmp_dir2) ), patch( "data_pipelines_cli.dbt_utils.subprocess_run", lambda _args: None ): runner.invoke(_cli, ["prepare-env"]) with open( pathlib.Path(tmp_dir2).joinpath(".dbt", "profiles.yml"), "r" ) as generated, open( self.goldens_dir_path.joinpath("example_profiles", "local_snowflake.yml"), "r", ) as prepared: self.assertDictEqual(yaml.safe_load(prepared), yaml.safe_load(generated)) def test_vars_profiles_generation(self): with tempfile.TemporaryDirectory() as tmp_dir, patch( "data_pipelines_cli.cli_constants.BUILD_DIR", pathlib.Path(tmp_dir) ), patch("data_pipelines_cli.config_generation.BUILD_DIR", pathlib.Path(tmp_dir),), patch( "data_pipelines_cli.dbt_utils.BUILD_DIR", pathlib.Path(tmp_dir), ), patch.dict( "os.environ", BIGQUERY_KEYFILE="/tmp/a/b/c/d.json" ), patch( "pathlib.Path.cwd", lambda: self.goldens_dir_path ), tempfile.TemporaryDirectory() as tmp_dir2, patch( "pathlib.Path.home", lambda: pathlib.Path(tmp_dir2) ), patch( "data_pipelines_cli.dbt_utils.subprocess_run", lambda _args, **_kwargs: None ): prepare_env("staging") with open(pathlib.Path(tmp_dir2).joinpath(".dbt", "profiles.yml"), "r") as generated: self.assertDictEqual(self.rendered_from_vars_profile, yaml.safe_load(generated)) def test_raise_missing_variable(self): with tempfile.TemporaryDirectory() as tmp_dir, patch( "data_pipelines_cli.cli_constants.BUILD_DIR", pathlib.Path(tmp_dir) ), patch("data_pipelines_cli.config_generation.BUILD_DIR", pathlib.Path(tmp_dir),), patch( "data_pipelines_cli.cli_commands.prepare_env.read_dbt_vars_from_configs", lambda _env: {}, ), patch.dict( "os.environ", BIGQUERY_KEYFILE="/tmp/a/b/c/d.json" ), patch( "pathlib.Path.cwd", lambda: self.goldens_dir_path ), tempfile.TemporaryDirectory() as tmp_dir2, patch( "pathlib.Path.home", lambda: pathlib.Path(tmp_dir2) ), patch( "data_pipelines_cli.dbt_utils.subprocess_run", lambda _args: None ): with self.assertRaises(JinjaVarKeyError): prepare_env("staging") def test_raise_missing_environment_variable(self): with tempfile.TemporaryDirectory() as tmp_dir, patch( "data_pipelines_cli.cli_constants.BUILD_DIR", pathlib.Path(tmp_dir) ), patch("data_pipelines_cli.config_generation.BUILD_DIR", pathlib.Path(tmp_dir),), patch( "data_pipelines_cli.dbt_utils.BUILD_DIR", pathlib.Path(tmp_dir), ), patch.dict( "os.environ", {} ), patch( "pathlib.Path.cwd", lambda: self.goldens_dir_path ), tempfile.TemporaryDirectory() as tmp_dir2, patch( "pathlib.Path.home", lambda: pathlib.Path(tmp_dir2) ), patch( "data_pipelines_cli.dbt_utils.subprocess_run", lambda _args: None ): with self.assertRaises(JinjaVarKeyError): prepare_env("staging")
BASE_DIR = ".\\" BASE_PREPROCESSEDIMAGES_DIR = BASE_DIR + "data_preprocessed_images\\" BASE_TRAIN_SEGMENT_DIR = BASE_DIR + "data_segmenter_trainset\\" PATIENT_PRED_DIR = BASE_DIR + "data_patient_predictions\\" # Quick mode does away with training in different folds. # It does overfit a little in the calibration and submission step. # However it still scores ~0.010552 on the private LB which is enough for the 3rd place # The advantages is that it takes only 4-5 hours to train and 1 hour to predict. QUICK_MODE = True MODEL_NAME = "model_quick" if QUICK_MODE else "model_full" TRAIN_EPOCHS = 20 if QUICK_MODE else 30 FOLD_COUNT = 6 TARGET_SIZE = 256 CROP_INDENT_X = 32 TARGET_CROP = 184 CROP_INDENT_Y = 32 - ((TARGET_CROP - 160) / 2) CROP_SIZE = 16
from os import path from pathlib import Path from shutil import make_archive from time import sleep from requests import get, post, put class AuthorizationError(Exception): pass class RequestError(Exception): pass def needs_authorization(func): def inner(self, *args, **kwargs): if self._access_token is None: raise RequestError("You should call `authenticate` method before using the client!") return func(self, *args, **kwargs) return inner class RoboEpicsClient: fusionauth_base_url = 'https://fusion.roboepics.com' roboepics_api_base_url = 'https://api.roboepics.com' client_id = '7126a051-baea-4fe1-bdf8-fde2fdb31f97' problem_id = None problem_enter_id = None def __init__(self, problem_id: int, problem_enter_id: int, roboepics_api_base_url: str = None, fusionauth_base_url: str = None, client_id: str = None, auto_authenticate: bool = True): self.problem_id = problem_id self.problem_enter_id = problem_enter_id if roboepics_api_base_url is not None: self.roboepics_api_base_url = roboepics_api_base_url if fusionauth_base_url is not None: self.fusionauth_base_url = fusionauth_base_url if client_id is not None: self.client_id = client_id self._device_code = None self._access_token = None if auto_authenticate: self.authenticate() @property def header(self): return {'Authorization': "Bearer " + self._access_token} def authenticate(self): response = post(self.fusionauth_base_url + '/oauth2/device_authorize', data={'client_id': self.client_id, 'scope': 'offline_access'}) if response.status_code != 200: raise AuthorizationError body = response.json() self._device_code = body['device_code'] interval = body['interval'] print(f"Open this URL and confirm your login: {self.fusionauth_base_url}/oauth2/device?client_id={self.client_id}&user_code={body['user_code']}") while True: sleep(interval) response = post(self.fusionauth_base_url + '/oauth2/token', data={'client_id': self.client_id, 'device_code': self._device_code, 'grant_type': 'urn:ietf:params:oauth:grant-type:device_code'}) body = response.json() if response.status_code == 400 and body['error'] == 'invalid_request': raise AuthorizationError if 'access_token' in body: self._access_token = body['access_token'] break print("Login successful.") @needs_authorization def download_dataset(self, download_destination_path: str = '/data'): response = get(f"{self.roboepics_api_base_url}/problem/{self.problem_id}", headers=self.header) if response.status_code != 200: raise RequestError(response.text) datas = response.json()['datasets'] result = {} for data in datas: paths = [] data_directory = '/'.join((download_destination_path, data['dataset_path'], str(data['version']))) Path(data_directory).mkdir(parents=True, exist_ok=True) for file in data['file_set']: response = get(file['url'], stream=True) p = '/'.join((data_directory, file['file_name'])) with open(p, 'wb') as fd: for chunk in response.iter_content(chunk_size=128): fd.write(chunk) paths.append(p) result[data['dataset_path']] = paths print("Datasets are downloaded successfully.") return result @needs_authorization def sync(self) -> str: response = post(f"{self.roboepics_api_base_url}/problem/enter/{str(self.problem_enter_id)}/sync-notebook", headers=self.header) if response.status_code != 201: raise RequestError(response.text) return response.json()['reference'] @needs_authorization def commit(self, directory: str, message: str = None, clean_repo: bool = False, submit: bool = False) -> str: if not path.isdir(directory): raise ValueError("Invalid directory path!") # Create an archive from the code directory make_archive('code', 'zip', directory) # Upload the zip file response = post(self.roboepics_api_base_url + f"/problem/enter/{self.problem_enter_id}/upload", data={ "message": message, "clean_repo": clean_repo, "submit": submit }, files={ "file": open('code.zip', 'rb') }, headers=self.header) if response.status_code != 201: raise RequestError(response.text) print("Directory successfully committed.") return response.json()['reference'] @needs_authorization def upload_result_file(self, file_path: str): # Request an S3 pre-signed url to upload result file response = post(f"{self.roboepics_api_base_url}/problem/enter/{str(self.problem_enter_id)}/upload-result", data={'filename': file_path.split('/')[-1]}, headers=self.header) if response.status_code != 201: raise RequestError(response.text) body = response.json() # Upload the result file to S3 s3_url = body['url'] with open(file_path, 'rb') as f: s3_response = put(s3_url, data=f) if s3_response.status_code != 200: raise RequestError(s3_response.text) @needs_authorization def submit(self, result_file_path: str, reference: str = None) -> int: if reference is None: reference = self.sync() self.upload_result_file(result_file_path) # Create a new submission response = post(f"{self.roboepics_api_base_url}/problem/enter/{self.problem_enter_id}/submissions", data={ "reference": reference }, headers=self.header) if response.status_code != 201: raise RequestError(response.text) print("Submission was successfully sent.") return response.json()['id']