content
stringlengths 5
1.05M
|
|---|
import torch
import torchvision
import os.path
if (not os.path.isfile("./models/maskrcnn_resnet50_fpn.pt")) :
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
model.eval()
script_model = torch.jit.script(model)
script_model.save("./models/maskrcnn_resnet50_fpn.pt")
if (not os.path.isfile("./models/deeplabv3_resnet50.pt")) :
model = torchvision.models.segmentation.deeplabv3_resnet50(pretrained=True)
model.eval()
script_model = torch.jit.script(model)
script_model.save("./models/deeplabv3_resnet50.pt")
if (not os.path.isfile("./models/deeplabv3_resnet101.pt")) :
model = torchvision.models.segmentation.deeplabv3_resnet101(pretrained=True)
model.eval()
script_model = torch.jit.script(model)
script_model.save("./models/deeplabv3_resnet101.pt")
if (not os.path.isfile("./models/keypointrcnn_resnet50_fpn.pt")) :
model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True)
model.eval()
script_model = torch.jit.script(model)
script_model.save("./models/keypointrcnn_resnet50_fpn.pt")
if (not os.path.isfile("./models/MiDaS.pt")) :
midas = torch.hub.load("intel-isl/MiDaS", "MiDaS")
midas.eval()
script_model = torch.jit.script(model)
script_model.save("./models/MiDaS.pt")
|
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import csv
import cv2
import itertools
import json
import numpy as np
import pickle
import time
__all__ = ['House']
######################################
# Util Functions
######################################
# allowed target room types
# NOTE: consider "toilet" and "bathroom" the same thing
ALLOWED_TARGET_ROOM_TYPES = ['kitchen', 'dining_room', 'living_room', 'bathroom', 'bedroom'] # 'office'
# allowed room types for auxiliary prediction task
ALLOWED_PREDICTION_ROOM_TYPES = dict(
outdoor=0, indoor=1, kitchen=2, dining_room=3, living_room=4, bathroom=5, bedroom=6, office=7, storage=8)
def _equal_room_tp(room, target):
"""
NOTE: Ensure <target> is always from <ALLOWED_TARGET_ROOM_TYPES>!!!!
DO NOT swap the order of arguments
"""
room = room.lower()
target = target.lower()
return (room == target) or \
((target == 'bathroom') and (room == 'toilet')) or \
((target == 'bedroom') and (room == 'guest_room'))
def _get_pred_room_tp_id(room):
room = room.lower()
if room == 'toilet':
room = 'bathroom'
elif room == 'guest_room':
room = 'bedroom'
if room not in ALLOWED_PREDICTION_ROOM_TYPES:
return ALLOWED_PREDICTION_ROOM_TYPES['indoor']
return ALLOWED_PREDICTION_ROOM_TYPES[room]
def parse_walls(objFile, lower_bound = 1.0):
def create_box(vers):
if len(vers) == 0:
return None
v_max = [-1e20, -1e20, -1e20]
v_min = [1e20, 1e20, 1e20]
for v in vers:
for i in range(3):
if v[i] < v_min[i]: v_min[i] = v[i]
if v[i] > v_max[i]: v_max[i] = v[i]
obj = {}
obj['bbox'] = {}
obj['bbox']['min']=v_min
obj['bbox']['max']=v_max
if v_min[1] < lower_bound:
return obj
return None
walls = []
with open(objFile, 'r') as file:
vers = []
for line in file.readlines():
if len(line) < 2: continue
if line[0] == 'g':
if (vers is not None) and (len(vers) > 0): walls.append(create_box(vers))
if ('Wall' in line):
vers = []
else:
vers = None
if (vers is not None) and (line[0] == 'v') and (line[1] == ' '):
vals = line[2:]
coor =[float(v) for v in vals.split(' ') if len(v)>0]
if len(coor) != 3:
print('line = {}'.format(line))
print('coor = {}'.format(coor))
assert(False)
vers.append(coor)
if (vers is not None) and (len(vers) > 0): walls.append(create_box(vers))
ret_walls = [w for w in walls if w is not None]
return ret_walls
def fill_region(proj, x1, y1, x2, y2, c):
proj[x1:(x2 + 1), y1:(y2 + 1)] = c
def fill_obj_mask(house, dest, obj, c=1):
n_row = dest.shape[0]
_x1, _, _y1 = obj['bbox']['min']
_x2, _, _y2 = obj['bbox']['max']
x1,y1,x2,y2 = house.rescale(_x1,_y1,_x2,_y2,n_row)
fill_region(dest, x1, y1, x2, y2, c)
class House(object):
"""core class for loading and processing a house from SUNCG dataset
"""
def __init__(self, JsonFile, ObjFile, MetaDataFile,
CachedFile=None,
StorageFile=None,
GenRoomTypeMap=False,
EagleViewRes=100,
DebugInfoOn=False,
ColideRes=1000,
RobotRadius=0.1,
RobotHeight=1.0,
CarpetHeight=0.15,
SetTarget=False,
ApproximateMovableMap=False,
_IgnoreSmallHouse=False, # should be only set true when called by "cache_houses.py"
DebugMessages=False
):
"""Initialization and Robot Parameters
Note:
Generally only the first 4 arguments are required to set up a house
Ensure you run the script to generate cached data for all the houses
Args:
JsonFile (str): file name of the house json file (house.json)
ObjFile (str): file name of the house object file (house.obj)
MetaDataFile (str): file name of the meta data (ModelCategoryMapping.csv)
CachedFile (str, recommended): file name of the pickled cached data for this house, None if no such cache (cachedmap1k.pkl)
StorageFile (str, optional): if CachedFile is None, pickle all the data and store in this file
GenRoomTypeMap (bool, optional): if turned on, generate the room type map for each location
EagleViewRes (int, optional): resolution of the topdown 2d map
DebugInfoOn (bool, optional): store additional debugging information when this option is on
ColideRes (int, optional): resolution of the 2d map for collision check (generally should not changed)
RobotRadius (double, optional): radius of the robot/agent (generally should not be changed)
RobotHeight (double, optional): height of the robot/agent (generally should not be changed)
CarpetHeight (double, optional): maximum height of the obstacles that agent can directly go through (gennerally should not be changed)
SetTarget (bool, optional): whether or not to choose a default target room and pre-compute the valid locations
ApproximateMovableMap (bool, optional): Fast initialization of valid locations which are not as accurate or fine-grained. Requires OpenCV if true
DebugMessages=False (bool, optional): whether or not to show debug messages
"""
if DebugMessages == True:
ts = time.time()
print('Data Loading ...')
self.metaDataFile = MetaDataFile
self.objFile = ObjFile
self.robotHei = RobotHeight
self.carpetHei = CarpetHeight
self.robotRad = RobotRadius
self._debugMap = None if not DebugInfoOn else True
with open(JsonFile) as jfile:
self.house = house = json.load(jfile)
self.all_walls = parse_walls(ObjFile, RobotHeight)
# validity check
if abs(house['scaleToMeters'] - 1.0) > 1e-8:
print('[Error] Currently <scaleToMeters> must be 1.0!')
assert(False)
if len(house['levels']) > 1 and DebugMessages == True:
print('[Warning] Currently only support ground floor! <total floors = %d>' % (len(house['levels'])))
self.level = level = house['levels'][0] # only support ground floor now
self.L_min_coor = _L_lo = np.array(level['bbox']['min']).astype('float32')
self.L_lo = min(_L_lo[0], _L_lo[2])
self.L_max_coor = _L_hi = np.array(level['bbox']['max']).astype('float32')
self.L_hi = max(_L_hi[0], _L_hi[2])
self.L_det = self.L_hi - self.L_lo
self.n_row = ColideRes
self.eagle_n_row = EagleViewRes
self.grid_det = self.L_det / np.float32(self.n_row)
self.all_obj = [node for node in level['nodes'] if node['type'].lower() == 'object']
self.all_rooms = [node for node in level['nodes'] if (node['type'].lower() == 'room') and ('roomTypes' in node)]
self.all_roomTypes = [room['roomTypes'] for room in self.all_rooms]
self.all_desired_roomTypes = []
self.default_roomTp = None
for roomTp in ALLOWED_TARGET_ROOM_TYPES:
if any([any([_equal_room_tp(tp, roomTp) for tp in tps]) for tps in self.all_roomTypes]):
self.all_desired_roomTypes.append(roomTp)
if self.default_roomTp is None: self.default_roomTp = roomTp
assert self.default_roomTp is not None, 'Cannot Find Any Desired Rooms!'
if DebugMessages == True:
print('>> Default Target Room Type Selected = {}'.format(self.default_roomTp))
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
if _IgnoreSmallHouse and ((len(self.all_desired_roomTypes) < 2) or ('kitchen' not in self.all_desired_roomTypes)):
self.all_desired_roomTypes=[]
return
if DebugMessages == True:
print('Generating Low Resolution Obstacle Map ...')
ts = time.time()
# generate a low-resolution obstacle map
self.tinyObsMap = np.ones((self.eagle_n_row, self.eagle_n_row), dtype=np.uint8)
self.genObstacleMap(MetaDataFile, gen_debug_map=False, dest=self.tinyObsMap, n_row=self.eagle_n_row-1)
self.eagleMap = np.zeros((4, self.eagle_n_row, self.eagle_n_row), dtype=np.uint8)
self.eagleMap[0, ...] = self.tinyObsMap
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
# load from cache
if CachedFile is not None:
assert not DebugInfoOn, 'Please set DebugInfoOn=True when loading data from cached file!'
if DebugMessages == True:
print('Loading Obstacle Map and Movability Map From Cache File ...')
ts = time.time()
with open(CachedFile, 'rb') as f:
self.obsMap, self.moveMap = pickle.load(f)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
else:
# generate obstacle map
if DebugMessages == True:
print('Generate High Resolution Obstacle Map (For Collision Check) ...')
ts = time.time()
# obsMap was indexed by (x, y), not (y, x)
self.obsMap = np.ones((self.n_row+1, self.n_row+1), dtype=np.uint8) # a small int is enough
if self._debugMap is not None:
self._debugMap = np.ones((self.n_row+1, self.n_row+1), dtype=np.float)
self.genObstacleMap(MetaDataFile)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
# generate movability map for robots considering the radius
if DebugMessages == True:
print('Generate Movability Map ...')
ts = time.time()
self.moveMap = np.zeros((self.n_row+1, self.n_row+1), dtype=np.int8) # initially not movable
self.genMovableMap(ApproximateMovableMap)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
if StorageFile is not None:
if DebugMessages == True:
print('Storing Obstacle Map and Movability Map to Cache File ...')
ts = time.time()
with open(StorageFile, 'wb') as f:
pickle.dump([self.obsMap, self.moveMap], f)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
# set target room connectivity
if DebugMessages == True:
ts = time.time()
self.connMapDict = {}
self.roomTypeLocMap = {} # roomType -> feasible locations
self.targetRoomTp = None
self.targetRooms = []
self.connMap = None
self.inroomDist = None
if SetTarget:
if DebugMessages == True:
print('Generate Target connectivity Map (Default <{}>) ...'.format(self.default_roomTp))
self.setTargetRoom(self.default_roomTp, _setEagleMap=True)
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time()-ts))
self.roomTypeMap = None
if GenRoomTypeMap:
if DebugMessages == True:
ts = time.time()
print('Generate Room Type Map ...')
self.roomTypeMap = np.zeros((self.n_row+1, self.n_row+1), dtype=np.uint16)
self._generate_room_type_map()
if DebugMessages == True:
print(' --> Done! Elapsed = %.2fs' % (time.time() - ts))
def _generate_room_type_map(self):
rtMap = self.roomTypeMap
# fill all the mask of rooms
for room in self.all_rooms:
msk = 1 << _get_pred_room_tp_id('indoor')
for tp in room['roomTypes']: msk = msk | (1 << _get_pred_room_tp_id(tp))
_x1, _, _y1 = room['bbox']['min']
_x2, _, _y2 = room['bbox']['max']
x1, y1, x2, y2 = self.rescale(_x1, _y1, _x2, _y2)
for x in range(x1, x2+1):
for y in range(y1, y2+1):
if self.moveMap[x, y] > 0:
rtMap[x, y] = rtMap[x, y] | msk
for x in range(self.n_row+1):
for y in range(self.n_row+1):
if (self.moveMap[x, y] > 0) and (rtMap[x, y] == 0):
rtMap[x, y] = 1 << _get_pred_room_tp_id('outdoor')
def _find_components(self, x1, y1, x2, y2, dirs=None, return_largest=False, return_open=False):
"""
return a list of components (coors), which are grid locations connencted and canMove
@:param return_largest: return_largest == True, return only a single list of coors, the largest components
@:param return_open: return_open == True, return only those components connected to outside of the room
@:param dirs: connected directions, by default 4-connected (L,R,U,D)
"""
if dirs is None:
dirs = [[0, 1], [1, 0], [-1, 0], [0, -1]]
comps = []
open_comps = set()
visit = {}
n = 0
for x in range(x1, x2+1):
for y in range(y1, y2+1):
pos = (x, y)
if self.canMove(x, y) and (pos not in visit):
que = [pos]
visit[pos] = n
ptr = 0
is_open = False
while ptr < len(que):
cx, cy = que[ptr]
ptr += 1
for det in dirs:
tx, ty = cx + det[0], cy + det[1]
if self.canMove(tx, ty):
if (tx < x1) or (tx > x2) or (ty < y1) or (ty > y2):
is_open=True
continue
tp = (tx, ty)
if tp not in visit:
visit[tp] = n
que.append(tp)
if is_open: open_comps.add(n)
n += 1
comps.append(que)
if n == 0: return [] # no components found!
ret_comps = comps
if return_open:
if len(open_comps) == 0:
print('WARNING!!!! [House] <find components in Target Room [%s]> No Open Components Found!!!! Return Largest Instead!!!!' % self.targetRoomTp)
return_largest = True
else:
ids = sorted(list(open_comps))
ret_comps = [comps[i] for i in ids]
if return_largest:
max_c = np.argmax([len(c) for c in ret_comps])
ret_comps = ret_comps[max_c]
# del visit
return ret_comps
"""
set the distance to a particular room type
"""
def setTargetRoom(self, targetRoomTp = 'kitchen', _setEagleMap = False):
targetRoomTp = targetRoomTp.lower()
assert targetRoomTp in ALLOWED_TARGET_ROOM_TYPES, '[House] room type <{}> not supported!'.format(targetRoomTp)
if targetRoomTp == self.targetRoomTp:
return False # room not changed!
else:
self.targetRoomTp = targetRoomTp
###########
# Caching
if targetRoomTp in self.connMapDict:
self.connMap, self.connectedCoors, self.inroomDist, self.maxConnDist = self.connMapDict[targetRoomTp]
return True # room Changed!
self.targetRooms = targetRooms = \
[room for room in self.all_rooms if any([ _equal_room_tp(tp, targetRoomTp) for tp in room['roomTypes']])]
assert (len(targetRooms) > 0), '[House] no room of type <{}> in the current house!'.format(targetRoomTp)
##########
# generate destination mask map
if _setEagleMap: # TODO: Currently a hack to speedup mult-target learning!!! So eagleMap become *WRONG*!
self.eagleMap[1, ...] = 0
for room in self.targetRooms:
_x1, _, _y1 = room['bbox']['min']
_x2, _, _y2 = room['bbox']['max']
x1,y1,x2,y2 = self.rescale(_x1,_y1,_x2,_y2,self.eagleMap.shape[1]-1)
self.eagleMap[1, x1:(x2+1), y1:(y2+1)]=1
print('[House] Caching New ConnMap for Target <{}>! (total {} rooms involved)'.format(targetRoomTp,len(targetRooms)))
self.connMap = connMap = np.ones((self.n_row+1, self.n_row+1), dtype=np.int32) * -1
self.inroomDist = inroomDist = np.ones((self.n_row+1, self.n_row+1), dtype=np.float32) * -1
dirs = [[0, 1], [1, 0], [-1, 0], [0, -1]]
que = []
for flag_find_open_components in [True, False]:
if not flag_find_open_components:
print('WARINING!!!! [House] No Space Found for Room Type {}! Now search even for closed region!!!'.format(targetRoomTp))
for room in targetRooms:
_x1, _, _y1 = room['bbox']['min']
_x2, _, _y2 = room['bbox']['max']
cx, cy = (_x1 + _x2) / 2, (_y1 + _y2) / 2
x1,y1,x2,y2 = self.rescale(_x1,_y1,_x2,_y2)
curr_components = self._find_components(x1, y1, x2, y2, dirs=dirs, return_open=flag_find_open_components) # find all the open components
if len(curr_components) == 0:
print('WARNING!!!! [House] No Space Found in TargetRoom <tp=%s, bbox=[%.2f, %2f] x [%.2f, %.2f]>' %
(targetRoomTp, _x1, _x2, _y1, _y2))
continue
if isinstance(curr_components[0], list): # join all the coors in the open components
curr_major_coors = list(itertools.chain(*curr_components))
else:
curr_major_coors = curr_components
min_dist_to_center = 1e50
for x, y in curr_major_coors:
connMap[x, y] = 0
que.append((x, y))
tx, ty = self.to_coor(x, y)
tdist = np.sqrt((tx - cx) ** 2 + (ty - cy) ** 2)
if tdist < min_dist_to_center:
min_dist_to_center = tdist
inroomDist[x, y] = tdist
for x, y in curr_major_coors:
inroomDist[x, y] -= min_dist_to_center
if len(que) > 0:
break
assert len(que) > 0, "Error!! [House] No space found for room type {}. House ID = {}"\
.format(targetRoomTp, (self._id if hasattr(self, '_id') else 'NA'))
ptr = 0
self.maxConnDist = 1
while ptr < len(que):
x,y = que[ptr]
cur_dist = connMap[x, y]
ptr += 1
for dx,dy in dirs:
tx,ty = x+dx,y+dy
if self.inside(tx,ty) and self.canMove(tx,ty) and not self.isConnect(tx, ty):
que.append((tx,ty))
connMap[tx,ty] = cur_dist + 1
if cur_dist + 1 > self.maxConnDist:
self.maxConnDist = cur_dist + 1
self.connMapDict[targetRoomTp] = (connMap, que, inroomDist, self.maxConnDist)
self.connectedCoors = que
print(' >>>> ConnMap Cached!')
return True # room changed!
def _getRoomBounds(self, room):
_x1, _, _y1 = room['bbox']['min']
_x2, _, _y2 = room['bbox']['max']
return self.rescale(_x1, _y1, _x2, _y2)
"""
returns a random location of a given room type
"""
def getRandomLocation(self, roomTp):
roomTp = roomTp.lower()
assert roomTp in ALLOWED_TARGET_ROOM_TYPES, '[House] room type <{}> not supported!'.format(roomTp)
# get list of valid locations within the room bounds
locations = []
rooms = self._getRooms(roomTp)
for room in rooms:
room_locs = self._getValidRoomLocations(room)
if room_locs and len(room_locs) > 0:
locations.extend(room_locs)
# choose random location
result = None
if len(locations) > 0:
idx = np.random.choice(len(locations))
result = self.to_coor(locations[idx][0], locations[idx][1], True)
return result
def getRandomLocationForRoom(self, room_node):
'''Given a room node from the SUNCG house.json, returns a randomly
sampled valid location from that room. Returns None if no valid
locations are found.
'''
room_locs = self._getValidRoomLocations(room_node)
if len(room_locs) == 0:
return None
idx = np.random.choice(len(room_locs))
return self.to_coor(room_locs[idx][0], room_locs[idx][1], True)
def _getValidRoomLocations(self, room_node):
room_locs = None
if room_node['id'] in self.roomTypeLocMap:
room_locs = self.roomTypeLocMap[room_node['id']]
else:
room_bounds = self._getRoomBounds(room_node)
room_locs = self._find_components(*room_bounds, return_largest=True)
self.roomTypeLocMap[room_node['id']] = room_locs
return room_locs
"""
cache the shortest distance to all the possible room types
"""
def cache_all_target(self):
for t in self.all_desired_roomTypes:
self.setTargetRoom(t)
self.setTargetRoom(self.default_roomTp)
def genObstacleMap(self, MetaDataFile, gen_debug_map=True, dest=None, n_row=None):
# load all the doors
target_match_class = 'nyuv2_40class'
target_door_labels = ['door', 'fence', 'arch']
door_ids = set()
fine_grained_class = 'fine_grained_class'
ignored_labels = ['person', 'umbrella', 'curtain']
person_ids = set()
window_ids = set()
with open(MetaDataFile) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row[target_match_class] in target_door_labels:
door_ids.add(row['model_id'])
if row[target_match_class] == 'window':
window_ids.add(row['model_id'])
if row[fine_grained_class] in ignored_labels:
person_ids.add(row['model_id'])
def is_door(obj):
if obj['modelId'] in door_ids:
return True
if (obj['modelId'] in window_ids) and (obj['bbox']['min'][1] < self.carpetHei):
return True
return False
solid_obj = [obj for obj in self.all_obj if (not is_door(obj)) and (obj['modelId'] not in person_ids)] # ignore person
door_obj = [obj for obj in self.all_obj if is_door(obj)]
colide_obj = [obj for obj in solid_obj if obj['bbox']['min'][1] < self.robotHei and obj['bbox']['max'][1] > self.carpetHei]
# generate the map for all the obstacles
obsMap = dest if dest is not None else self.obsMap
if n_row is None:
n_row = obsMap.shape[0] - 1
x1,y1,x2,y2 = self.rescale(self.L_min_coor[0],self.L_min_coor[2],self.L_max_coor[0],self.L_max_coor[2],n_row) # fill the space of the level
fill_region(obsMap,x1,y1,x2,y2,0)
if gen_debug_map and (self._debugMap is not None):
fill_region(self._debugMap, x1, y1, x2, y2, 0)
# fill boundary of rooms
maskRoom = np.zeros_like(obsMap,dtype=np.int8)
for wall in self.all_walls:
_x1, _, _y1 = wall['bbox']['min']
_x2, _, _y2 = wall['bbox']['max']
x1,y1,x2,y2 = self.rescale(_x1,_y1,_x2,_y2,n_row)
fill_region(obsMap, x1, y1, x2, y2, 1)
if gen_debug_map and (self._debugMap is not None):
fill_region(self._debugMap, x1, y1, x2, y2, 1)
fill_region(maskRoom, x1, y1, x2, y2, 1)
# remove all the doors
for obj in door_obj:
_x1, _, _y1 = obj['bbox']['min']
_x2, _, _y2 = obj['bbox']['max']
x1,y1,x2,y2 = self.rescale(_x1,_y1,_x2,_y2,n_row)
cx = (x1 + x2) // 2
cy = (y1 + y2) // 2
# expand region
if x2 - x1 < y2 - y1:
while (x1 - 1 >= 0) and (maskRoom[x1-1,cy] > 0):
x1 -= 1
while (x2 + 1 < maskRoom.shape[0]) and (maskRoom[x2+1,cy] > 0):
x2 += 1
else:
while (y1 - 1 >= 0) and (maskRoom[cx,y1-1] > 0):
y1 -= 1
while (y2+1 < maskRoom.shape[1]) and (maskRoom[cx,y2+1] > 0):
y2 += 1
fill_region(obsMap,x1,y1,x2,y2,0)
if gen_debug_map and (self._debugMap is not None):
fill_region(self._debugMap, x1, y1, x2, y2, 0.5)
# mark all the objects obstacle
for obj in colide_obj:
_x1, _, _y1 = obj['bbox']['min']
_x2, _, _y2 = obj['bbox']['max']
x1,y1,x2,y2 = self.rescale(_x1,_y1,_x2,_y2,n_row)
fill_region(obsMap,x1,y1,x2,y2,1)
if gen_debug_map and (self._debugMap is not None):
fill_region(self._debugMap, x1, y1, x2, y2, 0.8)
def genMovableMap(self, approximate=False):
roi_bounds = self._getRegionsOfInterest()
for roi in roi_bounds:
if approximate:
self._updateMovableMapApproximate(*roi)
else:
self._updateMovableMap(*roi)
if approximate:
self._adjustApproximateRobotMoveMap()
def _adjustApproximateRobotMoveMap(self):
# Here we haven't yet accounted for the robot radius, so do some
# approximate accommodation
robotGridSize = int(np.rint(self.robotRad * 2 * self.n_row / self.L_det))
if robotGridSize > 1:
robotGridRadius = robotGridSize // 2
kernel = np.zeros((robotGridSize, robotGridSize), np.uint8)
cv2.circle(kernel, (robotGridRadius + 1, robotGridRadius + 1), robotGridRadius, color=1, thickness=-1)
filtered_obstacles = (self.moveMap == 0).astype(np.uint8)
dilated_obstacles = cv2.dilate(filtered_obstacles, kernel, iterations=1)
self.moveMap = (dilated_obstacles == 0).astype(np.uint8)
def _updateMovableMap(self, x1, y1, x2, y2):
for i in range(x1, x2):
for j in range(y1, y2):
if self.obsMap[i,j] == 0:
cx, cy = self.to_coor(i, j, True)
if self.check_occupy(cx,cy):
self.moveMap[i,j] = 1
def _updateMovableMapApproximate(self, x1, y1, x2, y2):
self.moveMap[x1:x2, y1:y2] = (self.obsMap[x1:x2, y1:y2] == 0).astype(self.moveMap.dtype)
def _getRegionsOfInterest(self):
"""Override this function for customizing the areas of the map to
consider when marking valid movable locations
Returns a list of (x1, y1, x2, y2) tuples representing bounding boxes
of valid areas. Coordinates are normalized grid coordinates.
"""
return [(0, 0, self.n_row+1, self.n_row+1)]
"""
check whether the *grid* coordinate (x,y) is inside the house
"""
def inside(self,x,y):
return x >= 0 and y >=0 and x <= self.n_row and y <= self.n_row
"""
get the corresponding grid coordinate of (x, y) in the topdown 2d map
"""
def get_eagle_view_grid(self, x, y, input_grid=False):
if input_grid:
x, y = self.to_coor(x, y, shft=True)
return self.to_grid(x, y, n_row=self.eagle_n_row-1)
"""
convert the continuous rectangle region in the SUNCG dataset to the grid region in the house
"""
def rescale(self,x1,y1,x2,y2,n_row=None):
if n_row is None: n_row = self.n_row
tiny = 1e-9
tx1 = np.floor((x1 - self.L_lo) / self.L_det * n_row+tiny)
ty1 = np.floor((y1 - self.L_lo) / self.L_det * n_row+tiny)
tx2 = np.floor((x2 - self.L_lo) / self.L_det * n_row+tiny)
ty2 = np.floor((y2 - self.L_lo) / self.L_det * n_row+tiny)
return int(tx1),int(ty1),int(tx2),int(ty2)
def to_grid(self, x, y, n_row=None):
"""
Convert the true-scale coordinate in SUNCG dataset to grid location
"""
if n_row is None: n_row = self.n_row
tiny = 1e-9
x, y = np.float32(x), np.float32(y)
n_row, tiny = np.float32(n_row), np.float32(tiny)
tx = np.floor((x - self.L_lo) / self.L_det * n_row + tiny)
ty = np.floor((y - self.L_lo) / self.L_det * n_row + tiny)
return int(tx), int(ty)
def to_coor(self, x, y, shft=False):
"""
Convert grid location to SUNCG dataset continuous coordinate (the grid center will be returned when shft is True)
"""
x, y = np.float32(x), np.float32(y)
tx, ty = x * self.grid_det + self.L_lo, y * self.grid_det + self.L_lo
if shft:
tx += np.float32(0.5) * self.grid_det
ty += np.float32(0.5) * self.grid_det
return tx, ty
def _check_grid_occupy(self,cx,cy,gx,gy):
for x in range(gx,gx+2):
for y in range(gy,gy+2):
rx, ry = x * self.grid_det + self.L_lo, y * self.grid_det + self.L_lo
if (rx-cx)**2+(ry-cy)**2<=self.robotRad*self.robotRad:
return True
return False
"""
suppose the robot stands at continuous coordinate (cx, cy), check whether it will touch any obstacles
"""
def check_occupy(self, cx, cy): # cx, cy are real coordinates
radius = self.robotRad
x1,y1,x2,y2=self.rescale(cx-radius,cy-radius,cx+radius,cy+radius)
for xx in range(x1,x2+1):
for yy in range(y1,y2+1):
if (not self.inside(xx,yy) or self.obsMap[xx,yy] == 1) \
and self._check_grid_occupy(cx,cy,xx,yy):
return False
return True
"""
check if an agent can reach grid location (gx, gy)
"""
def canMove(self, gx, gy):
return (self.inside(gx, gy)) and (self.moveMap[gx, gy] > 0)
"""
check if grid location (gx, gy) is connected to the target room
"""
def isConnect(self, gx, gy):
return (self.inside(gx, gy)) and (self.connMap[gx, gy] != -1)
"""
get the raw shortest distance from grid location (gx, gy) to the target room
"""
def getDist(self, gx, gy):
return self.connMap[gx, gy]
"""
return a scaled shortest distance, which ranges from 0 to 1
"""
def getScaledDist(self, gx, gy):
ret = self.connMap[gx, gy]
if ret < 0:
return ret
return ret / self.maxConnDist
"""
returns all rooms of a given type
"""
def _getRooms(self, roomTp):
rooms = [
r for r in self.all_rooms
if any([_equal_room_tp(tp, roomTp) for tp in r['roomTypes']])
]
return rooms
"""
return whether or not a given room type exists in the house
"""
def hasRoomType(self, roomTp):
rooms = self._getRooms(roomTp)
return len(rooms) > 0
#######################
# DEBUG functionality #
#######################
def _showDebugMap(self, filename=None):
if self._debugMap is None:
print('[Warning] <showDebugMap>: Please set DebugInfoOn=True before calling this method!')
else:
import matplotlib.pyplot as plt
import seaborn as sns
ax = sns.heatmap(self._debugMap[:,::-1])
if filename is None:
plt.show()
else:
ax.get_figure().savefig(filename)
def _showObsMap(self):
import matplotlib.pyplot as plt
import seaborn as sns
plt.clf()
sns.heatmap(self.obsMap[:,::-1])
plt.show()
def _showMoveMap(self, visualize=True):
import matplotlib.pyplot as plt
import seaborn as sns
proj = np.array(self.obsMap, dtype=np.float32)
for x in range(self.n_row+1):
for y in range(self.n_row+1):
if self.canMove(x, y):
proj[x,y] = 0.5
if visualize:
plt.clf()
ax = sns.heatmap(proj[:,::-1])
if visualize:
plt.show()
return proj
def _showConnMap(self):
import matplotlib.pyplot as plt
import seaborn as sns
proj = self._showMoveMap(False)
for x in range(self.n_row+1):
for y in range(self.n_row+1):
if self.isConnect(x,y):
proj[x,y] = 0.25
plt.clf()
sns.heatmap(proj[:,::-1])
plt.show()
return proj
|
import torch
from Transfromer import Transformer
from config import ngpu, device, save_dir, num_layers, d_model, num_heads, dff, dropout_rate, MAX_LENGTH, BATCH_SIZE
from utils import create_mask, mask_accuracy_func, mask_loss_func
from sklearn.model_selection import train_test_split
import torchtext
from dataset import pairs, get_dataset, DataLoader, normalizeString
def validate_step(model, inp, targ):
targ_inp = targ[:, :-1]
targ_real = targ[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_mask(inp, targ_inp)
inp = inp.to(device)
targ_inp = targ_inp.to(device)
targ_real = targ_real.to(device)
enc_padding_mask = enc_padding_mask.to(device)
combined_mask = combined_mask.to(device)
dec_padding_mask = dec_padding_mask.to(device)
model.eval() # 设置eval mode
with torch.no_grad():
# forward
prediction, _ = model(inp, targ_inp, enc_padding_mask, combined_mask, dec_padding_mask)
val_loss = mask_loss_func(targ_real, prediction)
val_metric = mask_accuracy_func(targ_real, prediction)
return val_loss.item(), val_metric.item()
#划分数据集:训练集和验证集
train_pairs, val_pairs = train_test_split(pairs, test_size=0.2, random_state=1234)
tokenizer = lambda x: x.split() # 分词器
SRC_TEXT = torchtext.legacy.data.Field(sequential=True,
tokenize=tokenizer,
fix_length=MAX_LENGTH + 2,
preprocessing=lambda x: ['<start>'] + x + ['<end>'],
)
TARG_TEXT = torchtext.legacy.data.Field(sequential=True,
tokenize=tokenizer,
fix_length=MAX_LENGTH + 2,
preprocessing=lambda x: ['<start>'] + x + ['<end>'],
)
ds_train = torchtext.legacy.data.Dataset(*get_dataset(train_pairs, SRC_TEXT, TARG_TEXT))
ds_val = torchtext.legacy.data.Dataset(*get_dataset(val_pairs, SRC_TEXT, TARG_TEXT))
# 构建词典
# 建立词表 并建立token和ID的映射关系
SRC_TEXT.build_vocab(ds_train)
TARG_TEXT.build_vocab(ds_train)
# 构建数据管道迭代器
train_iter, val_iter = torchtext.legacy.data.Iterator.splits(
(ds_train, ds_val),
sort_within_batch=True,
sort_key=lambda x: len(x.src),
batch_sizes=(BATCH_SIZE, BATCH_SIZE)
)
train_dataloader = DataLoader(train_iter)
val_dataloader = DataLoader(val_iter)
input_vocab_size = len(SRC_TEXT.vocab) # 3901
target_vocab_size = len(TARG_TEXT.vocab) # 2591
# 加载model
checkpoint = save_dir + '012_0.60_ckpt.tar'
print('checkpoint:', checkpoint)
#ckpt = torch.load(checkpoint, map_location=device) # dict save 在 CPU 加载到GPU
ckpt = torch.load(checkpoint) # dict save 在 GPU 加载到 GPU
transformer_sd = ckpt['net']
reload_model = Transformer(num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
target_vocab_size,
pe_input=input_vocab_size,
pe_target=target_vocab_size,
rate=dropout_rate)
reload_model = reload_model.to(device)
if ngpu > 1:
reload_model = torch.nn.DataParallel(reload_model, device_ids=list(range(ngpu))) # 设置并行执行 device_ids=[0,1]
print('Loading model ...')
if device.type == 'cuda' and ngpu > 1:
reload_model.module.load_state_dict(transformer_sd)
else:
reload_model.load_state_dict(transformer_sd)
print('Model loaded ...')
def test(model, dataloader):
model.eval()
test_loss_sum = 0.
test_metric_sum = 0.
for test_step, (inp, targ) in enumerate(dataloader, start=1):
# inp [64, 10] , targ [64, 10]
loss, metric = validate_step(model, inp, targ)
# print('*'*8, loss, metric)
test_loss_sum += loss
test_metric_sum += metric
# 打印
print('*' * 8, 'Test: loss: {:.3f}, {}: {:.3f}'.format(test_loss_sum / test_step, 'test_acc', test_metric_sum / test_step))
test(reload_model, val_dataloader)
def tokenizer_encode(tokenize, sentence, vocab):
sentence = normalizeString(sentence)
sentence = tokenize(sentence) # list
sentence = ['<start>'] + sentence + ['<end>']
sentence_ids = [vocab.stoi[token] for token in sentence]
return sentence_ids
def tokenzier_decode(sentence_ids, vocab):
sentence = [vocab.itos[id] for id in sentence_ids if id<len(vocab)]
return " ".join(sentence)
# 只有一个句子,不需要加pad
s = 'je pars en vacances pour quelques jours .'
print(tokenizer_encode(tokenizer, s, SRC_TEXT.vocab))
def evaluate(model, inp_sentence):
model.eval() # 设置eval mode
inp_sentence_ids = tokenizer_encode(tokenizer, inp_sentence, SRC_TEXT.vocab) # 转化为索引
encoder_input = torch.tensor(inp_sentence_ids).unsqueeze(dim=0) # =>[b=1, inp_seq_len=10]
decoder_input = [TARG_TEXT.vocab.stoi['<start>']]
decoder_input = torch.tensor(decoder_input).unsqueeze(0) # =>[b=1,seq_len=1]
with torch.no_grad():
for i in range(MAX_LENGTH + 2):
enc_padding_mask, combined_mask, dec_padding_mask = create_mask(encoder_input.cpu(), decoder_input.cpu())
# [b,1,1,inp_seq_len], [b,1,targ_seq_len,inp_seq_len], [b,1,1,inp_seq_len]
encoder_input = encoder_input.to(device)
decoder_input = decoder_input.to(device)
enc_padding_mask = enc_padding_mask.to(device)
combined_mask = combined_mask.to(device)
dec_padding_mask = dec_padding_mask.to(device)
# forward
predictions, attention_weights = model(encoder_input,
decoder_input,
enc_padding_mask,
combined_mask,
dec_padding_mask)
# 看最后一个词并计算它的 argmax
prediction = predictions[:, -1:, :] # =>[b=1, 1, target_vocab_size]
prediction_id = torch.argmax(prediction, dim=-1) # => [b=1, 1]
if prediction_id.squeeze().item() == TARG_TEXT.vocab.stoi['<end>']:
return decoder_input.squeeze(dim=0), attention_weights
decoder_input = torch.cat([decoder_input, prediction_id],
dim=-1) # [b=1,targ_seq_len=1]=>[b=1,targ_seq_len=2]
return decoder_input.squeeze(dim=0), attention_weights
s = 'je pars en vacances pour quelques jours .'
s_targ = 'i m taking a couple of days off .'
pred_result, attention_weights = evaluate(reload_model, s)
pred_sentence = tokenzier_decode(pred_result, TARG_TEXT.vocab)
print('real target:', s_targ)
print('pred_sentence:', pred_sentence)
# 批量翻译
sentence_pairs = [
['je pars en vacances pour quelques jours .', 'i m taking a couple of days off .'],
['je ne me panique pas .', 'i m not panicking .'],
['je recherche un assistant .', 'i am looking for an assistant .'],
['je suis loin de chez moi .', 'i m a long way from home .'],
['vous etes en retard .', 'you re very late .'],
['j ai soif .', 'i am thirsty .'],
['je suis fou de vous .', 'i m crazy about you .'],
['vous etes vilain .', 'you are naughty .'],
['il est vieux et laid .', 'he s old and ugly .'],
['je suis terrifiee .', 'i m terrified .'],
]
def batch_translate(sentence_pairs):
for pair in sentence_pairs:
print('input:', pair[0])
print('target:', pair[1])
pred_result, _ = evaluate(reload_model, pair[0])
pred_sentence = tokenzier_decode(pred_result, TARG_TEXT.vocab)
print('pred:', pred_sentence)
print('')
batch_translate(sentence_pairs)
|
# -*- coding: utf-8 -*-
"""
@author: MAQ
"""
def seq_search_unord(l, el):
itr = 0
for i in l:
itr = itr+1
if i == el:
return itr
return False
def seq_search_ord(l, el):
itr = 0
for i in l:
itr = itr+1
if i > el:
return False
elif i == el:
return itr
return False
import random
l = []
for i in range(10):
n = random.randint(1,3000)
l.append(n)
seq_search_unord(l, 462)
l.sort()
seq_search_ord(l, 462)
|
# 144. Binary Tree Preorder Traversal
# Runtime: 28 ms, faster than 85.09% of Python3 online submissions for Binary Tree Preorder Traversal.
# Memory Usage: 14.3 MB, less than 12.95% of Python3 online submissions for Binary Tree Preorder Traversal.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
# Iterations
def preorderTraversal(self, root: TreeNode) -> list[int]:
if not root:
return []
stack, output = [], []
node = root
while node or stack:
while node:
output.append(node.val)
stack.append(node)
node = node.left
node = stack.pop(-1).right
return output
|
import pytest
from app import app as flask_app
@pytest.fixture
def set_up_client():
flask_app.testing = True
client = flask_app.test_client()
yield client
@pytest.fixture()
def set_up_url():
url = 'http://0.0.0.0:5000/test'
yield url
def test_get_request(set_up_client, set_up_url):
get_response = set_up_client.get(set_up_url)
assert get_response.status_code == 200
def test_post_request(set_up_client, set_up_url):
post_response = set_up_client.post(set_up_url)
assert post_response.status_code == 200
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import argparse
import os
from typing import Any, Dict
import yaml
from normalization.destination_type import DestinationType
from normalization.transform_catalog.catalog_processor import CatalogProcessor
class TransformCatalog:
"""
To run this transformation:
```
python3 main_dev_transform_catalog.py \
--integration-type <postgres|bigquery|redshift|snowflake>
--profile-config-dir . \
--catalog integration_tests/catalog.json \
--out dir \
--json-column json_blob
```
"""
config: dict = {}
DBT_PROJECT = "dbt_project.yml"
def __init__(self):
self.config = {}
def run(self, args) -> None:
self.parse(args)
self.process_catalog()
def parse(self, args) -> None:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("--integration-type", type=str, required=True, help="type of integration dialect to use")
parser.add_argument("--profile-config-dir", type=str, required=True, help="path to directory containing DBT profiles.yml")
parser.add_argument("--catalog", nargs="+", type=str, required=True, help="path to Catalog (JSON Schema) file")
parser.add_argument("--out", type=str, required=True, help="path to output generated DBT Models to")
parser.add_argument("--json-column", type=str, required=False, help="name of the column containing the json blob")
parsed_args = parser.parse_args(args)
profiles_yml = read_profiles_yml(parsed_args.profile_config_dir)
self.config = {
"integration_type": parsed_args.integration_type,
"schema": extract_schema(profiles_yml),
"catalog": parsed_args.catalog,
"output_path": parsed_args.out,
"json_column": parsed_args.json_column,
"profile_config_dir": parsed_args.profile_config_dir,
}
def process_catalog(self) -> None:
destination_type = DestinationType.from_string(self.config["integration_type"])
schema = self.config["schema"]
output = self.config["output_path"]
json_col = self.config["json_column"]
processor = CatalogProcessor(output_directory=output, destination_type=destination_type)
for catalog_file in self.config["catalog"]:
print(f"Processing {catalog_file}...")
processor.process(catalog_file=catalog_file, json_column_name=json_col, default_schema=schema)
self.update_dbt_project_vars(json_column=self.config["json_column"], models_to_source=processor.models_to_source)
def update_dbt_project_vars(self, **vars_config: Dict[str, Any]):
filename = os.path.join(self.config["profile_config_dir"], self.DBT_PROJECT)
config = read_yaml_config(filename)
config["vars"] = {**config.get("vars", {}), **vars_config}
write_yaml_config(config, filename)
def read_profiles_yml(profile_dir: str) -> Any:
with open(os.path.join(profile_dir, "profiles.yml"), "r") as file:
config = yaml.load(file, Loader=yaml.FullLoader)
obj = config["normalize"]["outputs"]["prod"]
return obj
def read_yaml_config(filename: str) -> Dict[str, Any]:
with open(filename, "r") as fp:
config = yaml.safe_load(fp)
if not isinstance(config, dict):
raise RuntimeError("{} does not parse to a dictionary".format(os.path.basename(filename)))
return config
def write_yaml_config(config: Dict[str, Any], filename: str):
with open(filename, "w") as fp:
fp.write(yaml.dump(config, sort_keys=False))
def extract_schema(profiles_yml: Dict) -> str:
if "dataset" in profiles_yml:
return str(profiles_yml["dataset"])
elif "schema" in profiles_yml:
return str(profiles_yml["schema"])
else:
raise KeyError("No Dataset/Schema defined in profiles.yml")
def main(args=None):
TransformCatalog().run(args)
|
# Status codes
OK = 200
ACCEPTED = 202
NO_CONTENT = 204
FORBIDDEN = 403
NOT_FOUND = 404
INTERNAL_SERVER_ERROR = 500
|
import json
import numpy as np
from opentrons import robot, instruments
from opentrons import deck_calibration as dc
from opentrons.deck_calibration import endpoints
from opentrons.config import robot_configs
from opentrons import types
# Note that several tests in this file have target/expected values that do not
# accurately reflect robot operation, because of differences between return
# values from the driver during simulating vs. non-simulating modes. In
# particular, during simulating mode the driver's `position` method returns
# the xyz position of the tip of the pipette, but during non-simulating mode
# it returns a position that correponds roughly to the gantry (e.g.: where the
# Smoothie board sees the position of itself--after a fashion). Simulating mode
# should be replaced with something that accurately reflects actual robot
# operation, and then these tests should be revised to match expected reality.
# ------------ Function tests (unit) ----------------------
async def test_add_and_remove_tip(async_server, dc_session):
hardware = dc_session.adapter
mount = 'left'
version = async_server['api_version']
if version == 1:
hardware.reset()
pip = instruments.P10_Single(mount=mount)
dc_session.current_mount = mount
else:
hardware.reset()
hardware.cache_instruments({
types.Mount.LEFT: 'p10_single_v1',
types.Mount.RIGHT: None})
pip = hardware.attached_instruments[types.Mount.LEFT]
dc_session.current_mount = types.Mount.LEFT
mount = dc_session.current_mount
dc_session.pipettes = {mount: pip}
# Check malformed packet
res0 = await endpoints.attach_tip({})
assert res0.status == 400
assert dc_session.tip_length is None
if version == 1:
assert not pip.tip_attached
else:
assert hardware.attached_instruments[mount]['has_tip'] is False
# Check correct attach command
tip_length = 50
res1 = await endpoints.attach_tip({'tipLength': tip_length})
assert res1.status == 200
assert dc_session.tip_length == tip_length
if version == 1:
assert pip.tip_attached
else:
assert hardware.attached_instruments[mount]['has_tip'] is True
# Check command with tip already attached
res2 = await endpoints.attach_tip({'tipLength': tip_length + 5})
assert res2.status == 200
assert dc_session.tip_length == tip_length + 5
if version == 1:
assert pip.tip_attached
else:
assert hardware.attached_instruments[mount]['has_tip'] is True
# Check correct detach command
res3 = await endpoints.detach_tip({})
assert res3.status == 200
assert dc_session.tip_length is None
if version == 1:
assert not pip.tip_attached
else:
assert hardware.attached_instruments[mount]['has_tip'] is False
# Check command with no tip
res4 = await endpoints.detach_tip({})
assert res4.status == 200
assert dc_session.tip_length is None
if version == 1:
assert not pip.tip_attached
else:
assert hardware.attached_instruments[mount]['has_tip'] is False
async def test_save_xy(async_server, dc_session):
hardware = dc_session.adapter
version = async_server['api_version']
if version == 1:
mount = 'left'
hardware.reset()
pip = instruments.P10_Single(mount=mount)
else:
mount = types.Mount.LEFT
hardware.reset()
hardware.cache_instruments({
mount: 'p10_single_v1',
types.Mount.RIGHT: None})
pip = hardware.attached_instruments[mount]
dc_session.pipettes = {mount: pip}
dc_session.current_mount = mount
dc_session.tip_length = 25
if version == 1:
dc_session.pipettes.get(mount)._add_tip(dc_session.tip_length)
hardware.home()
else:
dc_session.pipettes.get(mount)['has_tip'] = True
dc_session.pipettes.get(mount)['tip_length'] = dc_session.tip_length
hardware.add_tip(types.Mount.LEFT, dc_session.tip_length)
hardware.home()
x = 100
y = 101
if version == 1:
dc_session.pipettes.get(mount).move_to((hardware.deck, (x, y, 102)))
else:
# relative move or not?
hardware.move_to(types.Mount.LEFT, types.Point(x=x, y=y, z=102))
point = '1'
data = {
'point': point
}
await endpoints.save_xy(data)
actual = dc_session.points[point]
if version == 1:
expected = (
robot._driver.position['X'] + hardware.config.mount_offset[0],
robot._driver.position['Y']
)
else:
coordinates = hardware.gantry_position(types.Mount.LEFT)
expected = (
coordinates.x,
coordinates.y)
assert actual == expected
async def test_save_z(async_server, dc_session, monkeypatch):
hardware = dc_session.adapter
model = 'p10_single_v1'
# Z values were bleeding in from other tests, mock robot configs
# to encapsulate this test
fake_config = robot_configs.load()
monkeypatch.setattr(hardware, 'config', fake_config)
if async_server['api_version'] == 1:
mount = 'left'
hardware.reset()
pip = instruments.P10_Single(mount=mount)
else:
mount = types.Mount.LEFT
hardware.reset()
hardware.cache_instruments({
mount: 'p10_single_v1',
types.Mount.RIGHT: None})
pip = hardware.attached_instruments[mount]
dc_session.pipettes = {mount: pip}
dc_session.current_mount = mount
dc_session.current_model = model
dc_session.tip_length = 25
if async_server['api_version'] == 1:
dc_session.pipettes.get(mount)._add_tip(dc_session.tip_length)
else:
dc_session.pipettes.get(mount)['has_tip'] = True
dc_session.pipettes.get(mount)['tip_length'] = dc_session.tip_length
z_target = 80.0
if async_server['api_version'] == 1:
hardware.home()
dc_session.pipettes.get(mount).move_to(
(hardware.deck, (0, 0, z_target)))
else:
hardware.home()
# Unsure whether to use move_to or move_rel
hardware.move_to(
types.Mount.LEFT, types.Point(x=0, y=0, z=z_target))
await endpoints.save_z({})
new_z = dc_session.z_value
expected_z = z_target
assert new_z == expected_z
async def test_save_calibration_file(dc_session, monkeypatch):
hardware = dc_session.adapter
hardware.reset()
expected_pos = endpoints.expected_points()
dc_session.points = {
k: (v[0], v[1] + 0.3)
for k, v in expected_pos.items()}
dc_session.z_value = 0.2
persisted_data = []
def dummy_save(config, filename=None, tag=None):
nonlocal persisted_data
persisted_data.append((config, filename, tag))
monkeypatch.setattr(robot_configs, 'save_deck_calibration', dummy_save)
await endpoints.save_transform({})
in_memory = hardware.config.gantry_calibration
assert len(persisted_data) == 2
assert persisted_data[0][0].gantry_calibration == in_memory
assert persisted_data[1][0].gantry_calibration == in_memory
assert persisted_data[1][-1] is not None
expected = [[1.0, 0.0, 0.0, 0.0],
[0.0, 1.0, 0.0, 0.3],
[0.0, 0.0, 1.0, 0.2],
[0.0, 0.0, 0.0, 1.0]]
assert np.allclose(in_memory, expected)
async def test_transform_calculation(dc_session, monkeypatch):
# This transform represents a 5 degree rotation, with a shift in x, y, & z.
# Values for the points and expected transform come from a hand-crafted
# transformation matrix and the points that would generate that matrix.
hardware = dc_session.adapter
cos_5deg_p = 0.9962
sin_5deg_p = 0.0872
sin_5deg_n = -sin_5deg_p
const_zero = 0.0
const_one_ = 1.0
delta_x___ = 0.3
delta_y___ = 0.4
delta_z___ = 0.5
expected_transform = [
[cos_5deg_p, sin_5deg_p, const_zero, delta_x___],
[sin_5deg_n, cos_5deg_p, const_zero, delta_y___],
[const_zero, const_zero, const_one_, delta_z___],
[const_zero, const_zero, const_zero, const_one_]]
dc_session.z_value = 0.5
dc_session.points = {
'1': [13.16824337, 8.30855312],
'2': [380.50507635, -23.82925545],
'3': [34.87002331, 256.36103295]
}
await endpoints.save_transform({})
assert np.allclose(hardware.config.gantry_calibration, expected_transform)
# ------------ Session and token tests ----------------------
async def test_create_session(async_client, async_server, monkeypatch):
"""
Tests that the POST request to initiate a session manager for factory
calibration returns a good token, along with the correct preferred pipette
"""
dummy_token = 'Test Token'
def uuid_mock():
return dummy_token
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
# each tuple in this list is (left-mount, right-mount, correct-choice)
pipette_combinations = [
('p300_multi_v1', 'p10_single_v1', 'p10_single_v1'),
('p300_single_v1', 'p10_single_v1', 'p10_single_v1'),
('p10_multi_v1', 'p300_multi_v1', 'p300_multi_v1'),
(None, 'p10_single_v1', 'p10_single_v1'),
('p300_multi_v1', None, 'p300_multi_v1'),
('p10_single_v1', 'p300_multi_v1', 'p10_single_v1')]
hardware = async_server['com.opentrons.hardware']
for left_model, right_model, preferred in pipette_combinations:
def dummy_read_model(mount):
if mount == 'left':
res = left_model
else:
res = right_model
return res
if async_server['api_version'] == 1:
monkeypatch.setattr(
hardware._driver, 'read_pipette_model', dummy_read_model)
hardware.reset()
else:
await hardware.cache_instruments(
{types.Mount.LEFT: left_model, types.Mount.RIGHT: right_model})
resp = await async_client.post('/calibration/deck/start')
start_result = await resp.json()
endpoints.session = None
assert start_result.get('token') == dummy_token
assert start_result.get('pipette', {}).get('model') == preferred
assert resp.status == 201
async def test_create_session_fail(async_client, monkeypatch):
"""
Tests that the GET request to initiate a session manager for factory
calibration returns a good token.
"""
from opentrons.legacy_api.robot import Robot
dummy_token = 'Test Token'
def uuid_mock():
return dummy_token
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
def dummy_get_pipettes(self):
return {
'left': {
'mount_axis': 'z',
'plunger_axis': 'b',
'model': None
},
'right': {
'mount_axis': 'a',
'plunger_axis': 'c',
'model': None
}
}
monkeypatch.setattr(Robot, 'get_attached_pipettes', dummy_get_pipettes)
resp = await async_client.post('/calibration/deck/start')
text = await resp.text()
assert json.loads(text) == {'message': 'Error, pipette not recognized'}
assert resp.status == 403
assert endpoints.session is None
async def test_release(async_client, async_server, monkeypatch, dc_session):
"""
Tests that the GET request to initiate a session manager for factory
calibration returns an error if a session is in progress, and can be
overridden.
"""
if async_server['api_version'] == 1:
test_model = 'p300_multi_v1'
def dummy_read_model(mount):
return test_model
monkeypatch.setattr(
robot._driver, 'read_pipette_model', dummy_read_model)
robot.reset()
resp1 = await async_client.post('/calibration/deck/start')
assert resp1.status == 409
# Release
resp2 = await async_client.post(
'/calibration/deck',
json={
'token': dc_session.id,
'command': 'release'
})
assert resp2.status == 200
assert endpoints.session is None
if async_server['api_version'] == 2:
await async_server['com.opentrons.hardware'].cache_instruments({
types.Mount.LEFT: None,
types.Mount.RIGHT: 'p300_multi_v1'
})
resp3 = await async_client.post('/calibration/deck/start')
assert resp3.status == 201
async def test_forcing_new_session(
async_server, async_client, monkeypatch, dc_session):
"""
Tests that the GET request to initiate a session manager for factory
calibration returns an error if a session is in progress, and can be
overridden.
"""
test_model = 'p300_multi_v1'
if async_server['api_version'] == 1:
def dummy_read_model(mount):
return test_model
monkeypatch.setattr(
robot._driver, 'read_pipette_model', dummy_read_model)
robot.reset()
dummy_token = 'fake token'
def uuid_mock():
return dummy_token
async def mock_release(data):
return data
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
resp1 = await async_client.post('/calibration/deck/start')
assert resp1.status == 409
if async_server['api_version'] == 2:
monkeypatch.setattr(endpoints, 'release', mock_release)
resp2 = await async_client.post(
'/calibration/deck/start', json={'force': 'true'})
text2 = await resp2.json()
assert resp2.status == 201
expected2 = {
'token': dummy_token,
'pipette': {
'mount': 'right',
'model': test_model
}
}
assert text2 == expected2
async def test_incorrect_token(async_client, dc_session):
"""
Test that putting in an incorrect token for a POST request does not work
after a session was already created with a different token.
"""
resp = await async_client.post(
'/calibration/deck',
json={
'token': 'FAKE TOKEN',
'command': 'init pipette',
'mount': 'left',
'model': 'p10_single_v1'
})
assert resp.status == 403
# ------------ Router tests (integration) ----------------------
# TODO(mc, 2018-05-02): this does not adequately test z to smoothie axis logic
async def test_set_and_jog_integration(
async_client, async_server, monkeypatch):
"""
Test that the jog function works.
Note that in order for the jog function to work, the following must
be done:
1. Create a session manager
Then jog requests will work as expected.
"""
test_model = 'p300_multi_v1'
hardware = async_server['com.opentrons.hardware']
if async_server['api_version'] == 1:
def dummy_read_model(mount):
return test_model
monkeypatch.setattr(
hardware._driver, 'read_pipette_model', dummy_read_model)
hardware.reset()
else:
# Why does this need to be awaited for a synch adapter
await hardware.cache_instruments(
{types.Mount.LEFT: None, types.Mount.RIGHT: test_model})
dummy_token = 'Test Token'
def uuid_mock():
return dummy_token
monkeypatch.setattr(endpoints, '_get_uuid', uuid_mock)
token_res = await async_client.post('/calibration/deck/start')
assert token_res.status == 201, token_res
token_text = await token_res.json()
token = token_text['token']
axis = 'z'
direction = 1
step = 3
# left pipette z carriage motor is smoothie axis "Z", right is "A"
sess = dc.endpoints.session
smoothie_axis = 'Z' if sess.current_mount == 'left' else 'A'
if async_server['api_version'] == 1:
hardware.reset()
prior_x, prior_y, prior_z = dc.position(smoothie_axis, sess.adapter)
else:
sess.adapter.home()
prior_x, prior_y, prior_z = dc.position(
sess.current_mount, sess.adapter, sess.cp)
resp = await async_client.post(
'/calibration/deck',
json={
'token': token,
'command': 'jog',
'axis': axis,
'direction': direction,
'step': step
})
body = await resp.json()
msg = body.get('message')
assert '{}'.format((prior_x, prior_y, prior_z + step)) in msg
|
from tkinter import *
#create a new GUI window
window = Tk()
window.title("A Window")
#a label
lbl = Label(window,text="A Label")
lbl.pack()
#an 'entry' textbox
txt = Entry(window)
txt.pack()
#a button
btn = Button(window,text="A Button")
btn.pack()
window.mainloop()
|
import comet_ml
def log_extra(config, f1_score):
current_experiment = comet_ml.get_global_experiment()
afterlog_experiment = comet_ml.ExistingExperiment(previous_experiment=current_experiment.get_key())
exp_name = f"{config['dataset_name']}:{config['kmer_len']}:{config['stride']}:freeze={config['freeze']}:LR={config['learning_rate']}:WD={config['weight_decay']}:BS={config['batch_size']}:rand_weights={config['random_weights']}:"
afterlog_experiment.set_name(exp_name)
afterlog_experiment.log_parameters(config)
afterlog_experiment.log_metric("test F1 score", f1_score)
afterlog_experiment.end()
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
tf.disable_eager_execution()
import numpy as np
import os
from absl import app, flags, logging
from absl.flags import FLAGS
from tensorflow.python.platform import gfile
import cv2
import sys
SCORETHRES = 0.2
INPUTSIZE = 416
IOU = 0.45
SCORE = 0.25
#from PIL import Image
from script.model_darknet19 import darknet
from script.decode import decode
from script.utils import preprocess_image, postprocess, draw_detection
from script.config import anchors, class_names
labels_to_names = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorbike', 4: 'aeroplane', 5: 'bus',
6: 'train', 7: 'truck', 8: 'boat', 9: 'trafficlight', 10: 'firehydrant',
11: 'stopsign', 12: 'parkingmeter', 13: 'bench', 14: 'bird', 15: 'cat',
16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant',
21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella',
26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis',
31: 'snowboard', 32: 'sportsball', 33: 'kite', 34: 'baseballbat',
35: 'baseballglove', 36: 'skateboard', 37: 'surfboard', 38: 'tennisracket', 39: 'bottle',
40: 'wineglass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',
46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli',
51: 'carrot', 52: 'hotdog', 53: 'pizza', 54: 'donut', 55: 'cake',
56: 'chair', 57: 'couch', 58: 'pottedplant', 59: 'bed', 60: 'diningtable',
61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote',
66: 'keyboard', 67: 'cellphone', 68: 'microwave', 69: 'oven',
70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock',
75: 'vase', 76: 'scissors', 77: 'teddybear', 78: 'hairdrier', 79: 'toothbrush'}
npu_PATH = '../result_Files/'
class yolov2_npu:
def run(self, npu_output):
bboxes = np.fromfile(npu_PATH+npu_output[:-5]+"2.bin", dtype="float32").reshape(1, 169, 5, 4)
obj_probs = np.fromfile(npu_PATH+npu_output, dtype="float32").reshape(1, 169, 5)
class_probs = np.fromfile(npu_PATH+npu_output[:-5]+"1.bin", dtype="float32").reshape(1, 169, 5, 80)
image = cv2.imread("./JPEGImages/" + npu_output.split("_output")[0] + ".jpg")
image_shape = image.shape[:2]
boxes, scores, classes = postprocess(bboxes,obj_probs,class_probs,image_shape=image_shape)
path = './detections_npu/'
with open(os.path.join(path + npu_output.split("_output")[0] + ".txt"),'a+') as f:
for i in range(len(scores)):
if ' ' in labels_to_names[classes[i]]:
labels_to_name = labels_to_names[classes[i]].split(' ')[0] + labels_to_names[classes[i]].split(' ')[1]
f.write(labels_to_name + " " + str(scores[i]) + " " + str(boxes[i][0])+ " " + str(boxes[i][1])+ " " + str(boxes[i][2])+ " " + str(boxes[i][3]))
else:
f.write(labels_to_names[classes[i]] + " " + str(scores[i]) + " " + str(boxes[i][0])+ " " + str(boxes[i][1])+ " " + str(boxes[i][2])+ " " + str(boxes[i][3]))
if __name__ == "__main__":
all_result_NAME = os.listdir(npu_PATH)
all_result_NAME.sort()
all_image_NAME = [fn for fn in all_result_NAME if fn[-5]=="0"]
yolov2_Npu = yolov2_npu()
for npu_output in all_image_NAME:
yolov2_Npu.run(npu_output)
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum, auto
from typing import Any, Callable, Iterable, Optional, Sequence, Tuple, Union
from typing_extensions import final
from libcst._parser.parso.pgen2.generator import ReservedString
from libcst._parser.parso.python.token import PythonTokenTypes, TokenType
from libcst._parser.types.token import Token
from libcst._tabs import expand_tabs
_EOF_STR: str = "end of file (EOF)"
_INDENT_STR: str = "an indent"
_DEDENT_STR: str = "a dedent"
_NEWLINE_CHARS: str = "\r\n"
class EOFSentinel(Enum):
EOF = auto()
def get_expected_str(
encountered: Union[Token, EOFSentinel],
expected: Union[Iterable[Union[TokenType, ReservedString]], EOFSentinel],
) -> str:
if (
isinstance(encountered, EOFSentinel)
or encountered.type is PythonTokenTypes.ENDMARKER
):
encountered_str = _EOF_STR
elif encountered.type is PythonTokenTypes.INDENT:
encountered_str = _INDENT_STR
elif encountered.type is PythonTokenTypes.DEDENT:
encountered_str = _DEDENT_STR
else:
encountered_str = repr(encountered.string)
if isinstance(expected, EOFSentinel):
expected_names = [_EOF_STR]
else:
expected_names = sorted(
[
repr(el.name) if isinstance(el, TokenType) else repr(el.value)
for el in expected
]
)
if len(expected_names) > 10:
# There's too many possibilities, so it's probably not useful to list them.
# Instead, let's just abbreviate the message.
return f"Unexpectedly encountered {encountered_str}."
else:
if len(expected_names) == 1:
expected_str = expected_names[0]
else:
expected_str = f"{', '.join(expected_names[:-1])}, or {expected_names[-1]}"
return f"Encountered {encountered_str}, but expected {expected_str}."
# pyre-fixme[2]: 'Any' type isn't pyre-strict.
def _parser_syntax_error_unpickle(kwargs: Any) -> "ParserSyntaxError":
return ParserSyntaxError(**kwargs)
@final
class PartialParserSyntaxError(Exception):
"""
An internal exception that represents a partially-constructed
:class:`ParserSyntaxError`. It's raised by our internal parser conversion functions,
which don't always know the current line and column information.
This partial object only contains a message, with the expectation that the line and
column information will be filled in by :class:`libcst._base_parser.BaseParser`.
This should never be visible to the end-user.
"""
message: str
def __init__(self, message: str) -> None:
self.message = message
@final
class ParserSyntaxError(Exception):
"""
Contains an error encountered while trying to parse a piece of source code. This
exception shouldn't be constructed directly by the user, but instead may be raised
by calls to :func:`parse_module`, :func:`parse_expression`, or
:func:`parse_statement`.
This does not inherit from :class:`SyntaxError` because Python's may raise a
:class:`SyntaxError` for any number of reasons, potentially leading to unintended
behavior.
"""
#: A human-readable explanation of the syntax error without information about where
#: the error occurred.
#:
#: For a human-readable explanation of the error alongside information about where
#: it occurred, use :meth:`__str__` (via ``str(ex)``) instead.
message: str
# An internal value used to compute `editor_column` and to pretty-print where the
# syntax error occurred in the code.
_lines: Sequence[str]
#: The one-indexed line where the error occured.
raw_line: int
#: The zero-indexed column as a number of characters from the start of the line
#: where the error occured.
raw_column: int
def __init__(
self, message: str, *, lines: Sequence[str], raw_line: int, raw_column: int
) -> None:
super(ParserSyntaxError, self).__init__(message)
self.message = message
self._lines = lines
self.raw_line = raw_line
self.raw_column = raw_column
def __reduce__(
self,
) -> Tuple[Callable[..., "ParserSyntaxError"], Tuple[object, ...]]:
return (
_parser_syntax_error_unpickle,
(
{
"message": self.message,
"lines": self._lines,
"raw_line": self.raw_line,
"raw_column": self.raw_column,
},
),
)
def __str__(self) -> str:
"""
A multi-line human-readable error message of where the syntax error is in their
code. For example::
Syntax Error @ 2:1.
Incomplete input. Encountered end of file (EOF), but expected 'except', or 'finally'.
try: pass
^
"""
context = self.context
return (
f"Syntax Error @ {self.editor_line}:{self.editor_column}.\n"
+ f"{self.message}"
+ (f"\n\n{context}" if context is not None else "")
)
def __repr__(self) -> str:
return (
"ParserSyntaxError("
+ f"{self.message!r}, lines=[...], raw_line={self.raw_line!r}, "
+ f"raw_column={self.raw_column!r})"
)
@property
def context(self) -> Optional[str]:
"""
A formatted string containing the line of code with the syntax error (or a
non-empty line above it) along with a caret indicating the exact column where
the error occurred.
Return ``None`` if there's no relevant non-empty line to show. (e.g. the file
consists of only blank lines)
"""
displayed_line = self.editor_line
displayed_column = self.editor_column
# we want to avoid displaying a blank line for context. If we're on a blank line
# find the nearest line above us that isn't blank.
while displayed_line >= 1 and not len(self._lines[displayed_line - 1].strip()):
displayed_line -= 1
displayed_column = len(self._lines[displayed_line - 1])
# only show context if we managed to find a non-empty line
if len(self._lines[displayed_line - 1].strip()):
formatted_source_line = expand_tabs(self._lines[displayed_line - 1]).rstrip(
_NEWLINE_CHARS
)
# fmt: off
return (
f"{formatted_source_line}\n"
+ f"{' ' * (displayed_column - 1)}^"
)
# fmt: on
else:
return None
@property
def editor_line(self) -> int:
"""
The expected one-indexed line in the user's editor. This is the same as
:attr:`raw_line`.
"""
return self.raw_line # raw_line is already one-indexed.
@property
def editor_column(self) -> int:
"""
The expected one-indexed column that's likely to match the behavior of the
user's editor, assuming tabs expand to 1-8 spaces. This is the column number
shown when the syntax error is printed out with `str`.
This assumes single-width characters. However, because python doesn't ship with
a wcwidth function, it's hard to handle this properly without a third-party
dependency.
For a raw zero-indexed character offset without tab expansion, see
:attr:`raw_column`.
"""
prefix_str = self._lines[self.raw_line - 1][: self.raw_column]
tab_adjusted_column = len(expand_tabs(prefix_str))
# Text editors use a one-indexed column, so we need to add one to our
# zero-indexed column to get a human-readable result.
return tab_adjusted_column + 1
class MetadataException(Exception):
pass
|
#!/usr/bin/env python3
import pandas as pd # Read CSV
import requests # Make web requests
import os
from tqdm.auto import tqdm # Progress bar
import glob # Matching filenames
files = glob.glob("*.csv")
for file in files:
df = pd.read_csv(file)
for i,row in tqdm(df.iterrows(), total=df.shape[0]):
url = f"https://files.interpret.co.nz/Retrolens/Imagery/SN{row.Survey}/{row.Released_F}/High.jpg"
folder = f"{row.Region_1}/{row.Site}/{row.Date[-4:]}"
os.makedirs(folder, exist_ok=True)
filename = f"{folder}/{row.Released_F}.jpg"
if os.path.exists(filename):
print(f"{filename} exists, skipping")
continue
print(f"Fetching {url}")
r = requests.get(url)
print(f"Writing to {filename}")
with open(filename, "wb") as f:
f.write(r.content)
|
from cStringIO import StringIO
SEPARATOR = None
def print_table(fields, rows):
widths = {}
for field in fields:
widths[field] = len(field)
for row in rows:
if row is SEPARATOR:
continue
try:
widths[field] = max(widths[field],
len(str(row[field])))
except KeyError:
pass
io = StringIO()
io.write(' '.join(['%*s' % (widths[field], field) for field in fields]))
io.write('\n')
sums = {}
for row in rows:
if row is SEPARATOR:
io.write(' '.join(['%*s' % (widths[field], '-' * widths[field]) for field in fields]))
else:
for field in fields:
try:
io.write('%*s ' % (widths[field], row[field]))
except KeyError:
io.write('%*s ' % (widths[field], ' ' * widths[field]))
io.write('\n')
return io.getvalue()
def by_language(rslt):
categories = rslt.categories()
types = rslt.types()
rows = [SEPARATOR]
sum_row = {}
for filetype in types:
fields = rslt.counts_by_type(filetype)
fields['type'] = filetype
rows.append(fields)
for cat,count in fields.items():
try:
sum_row[cat] += count
except KeyError:
sum_row[cat] = count
sum_row['type'] = 'SUM'
rows.append(SEPARATOR)
rows.append(sum_row)
categories = ['type'] + categories
return print_table(categories, rows)
|
import pprint
data = {'created_at': '2021-12-15T01:14:28Z',
'description': 'Measured temperature by the Si7021 sensor connected to '
'Adafruit Feather Huzzah32.',
'enabled': True,
'feed_status_changes': [],
'feed_webhook_receivers': [],
'group': {'id': 55706,
'key': 'feather-iot',
'name': 'FeatherIOT',
'user_id': 63146},
'groups': [{'id': 55706,
'key': 'feather-iot',
'name': 'FeatherIOT',
'user_id': 63146}],
'history': True,
'id': 1767913,
'key': 'temperatureesp32',
'last_value': '67.13',
'license': None,
'name': 'Temperature ESP32',
'owner': {'id': 63146, 'username': 'FlynntKnapp'},
'status': 'online',
'status_notify': False,
'status_timeout': 4320,
'unit_symbol': None,
'unit_type': None,
'updated_at': '2022-02-06T00:20:59Z',
'username': 'FlynntKnapp',
'visibility': 'public',
'wipper_pin_info': None,
'writable': True}
pprint.pprint(data)
|
import pytest
from unittest import TestCase, mock
import core.config
import core.widget
import modules.contrib.uptime
def build_module():
config = core.config.Config([])
return modules.contrib.uptime.Module(config=config, theme=None)
def widget(module):
return module.widgets()[0]
class UptimeTest(TestCase):
def test_load_module(self):
__import__("modules.contrib.uptime")
@mock.patch('builtins.open', new_callable=mock.mock_open, read_data='300000 10.45')
def test_uptime(self, uptime_mock):
module = build_module()
module.update()
uptime_mock.assert_called_with('/proc/uptime', 'r')
assert widget(module).full_text() == '3 days, 11:20:00'
|
from __future__ import print_function
import roslib
roslib.load_manifest('mct_camera_tools')
import rospy
import sys
from mct_camera_tools import camera_master
camera_master.set_camera_launch_param('calibration', False)
print(camera_master.get_camera_launch_param())
#camera_master.set_camera_launch_param('tracking', True)
#print(camera_master.get_camera_launch_param())
#camera_master.set_camera_launch_param('default', False)
#print(camera_master.get_camera_launch_param())
|
'''
Hello! Thank you for downloading a CORGIS library. However, you do not
need to open this file. Instead you should make your own Python file and
add the following line:
import police_shootings
Then just place the files you downloaded alongside it.
'''
import os as _os
import pickle as _pickle
__all__ = ['get_shootings']
def _tifa_definitions():
return {"type": "ModuleType",
"fields": {
'get': {
"type": "FunctionType",
"name": 'get',
"returns": {
"type": "ListType",
"empty": False,
"subtype": {"type": "NumType"}
}
},
'get_shootings': {
"type": "FunctionType",
"name": 'get_shootings',
"returns": {
"type": "ListType",
"empty": False,
"subtype": {
"type": "DictType",
"literals": [
{"type": "LiteralStr", "value": "Person.Name"},
{"type": "LiteralStr", "value": "Person.Age"},
{"type": "LiteralStr", "value": "Person.Gender"},
{"type": "LiteralStr", "value": "Person.Race"},
{"type": "LiteralStr", "value": "Incident.Date.Month"},
{"type": "LiteralStr", "value": "Incident.Date.Day"},
{"type": "LiteralStr", "value": "Incident.Date.Year"},
{"type": "LiteralStr", "value": "Incident.Date.Full"},
{"type": "LiteralStr", "value": "Incident.Location.City"},
{"type": "LiteralStr", "value": "Incident.Location.State"},
{"type": "LiteralStr", "value": "Factors.Armed"},
{"type": "LiteralStr", "value": "Factors.Mental-Illness"},
{"type": "LiteralStr", "value": "Factors.Threat-Level"},
{"type": "LiteralStr", "value": "Factors.Fleeing"},
{"type": "LiteralStr", "value": "Shooting.Manner"},
{"type": "LiteralStr", "value": "Shooting.Body-Camera"},
],
"values": [
{"type": "StrType"},
{"type": "NumType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "NumType"},
{"type": "NumType"},
{"type": "NumType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "BoolType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "StrType"},
{"type": "BoolType"},
]
}
}
},
}
}
|
from __future__ import print_function
"""
read_dsv.py
Author: Vasudev Ram
Web site: https://vasudevram.github.io
Blog: https://jugad2.blogspot.com
Product store: https://gumroad.com/vasudevram
Purpose: Shows how to read DSV data, i.e.
https://en.wikipedia.org/wiki/Delimiter-separated_values
from either files or standard input, split the fields of each
line on the delimiter, and process the fields in some way.
The delimiter character is configurable by the user and can
be specified as either a character or its ASCII code.
Reference:
TAOUP (The Art Of Unix Programming): Data File Metaformats:
http://www.catb.org/esr/writings/taoup/html/ch05s02.html
ASCII table: http://www.asciitable.com/
"""
import sys
import string
def err_write(message):
sys.stderr.write(message)
def error_exit(message):
err_write(message)
sys.exit(1)
def usage(argv, verbose=False):
usage1 = \
"{}: read and process DSV (Delimiter-Separated-Values) data.\n".format(argv[0])
usage2 = "Usage: python" + \
" {} [ -c delim_char | -n delim_code ] [ dsv_file ] ...\n".format(argv[0])
usage3 = [
"where one of either the -c or -n option must be given,\n",
"delim_char is a single ASCII delimiter character, and\n",
"delim_code is a delimiter character's ASCII code.\n",
"Text lines will be read from specified DSV file(s) or\n",
"from standard input, split on the specified delimiter\n",
"specified by either the -c or -n option, processed, and\n",
"written to standard output.\n",
]
err_write(usage1)
err_write(usage2)
if verbose:
for line in usage3:
err_write(line)
def str_to_int(s):
try:
return int(s)
except ValueError as ve:
error_exit(repr(ve))
def valid_delimiter(delim_code):
return not invalid_delimiter(delim_code)
def invalid_delimiter(delim_code):
# Non-ASCII codes not allowed, i.e. codes outside
# the range 0 to 255.
if delim_code < 0 or delim_code > 255:
return True
# Also, don't allow some specific ASCII codes;
# add more, if it turns out they are needed.
if delim_code in (10, 13):
return True
return False
def read_dsv(dsv_fil, delim_char):
for idx, lin in enumerate(dsv_fil):
fields = lin.split(delim_char)
assert len(fields) > 0
# Knock off the newline at the end of the last field,
# since it is the line terminator, not part of the field.
if fields[-1][-1] == '\n':
fields[-1] = fields[-1][:-1]
# Treat a blank line as a line with one field,
# an empty string (that is what split returns).
print("Line", idx, "fields:")
for idx2, field in enumerate(fields):
print(str(idx2) + ":", "|" + field + "|")
def main():
# Get and check validity of arguments.
sa = sys.argv
lsa = len(sa)
if lsa == 1:
usage(sa)
sys.exit(0)
if lsa == 2:
# Allow the help option with any letter case.
if sa[1].lower() in ("-h", "--help"):
usage(sa, verbose=True)
sys.exit(0)
else:
usage(sa)
sys.exit(0)
# If we reach here, lsa is >= 3.
# Check for valid mandatory options (sic).
if not sa[1] in ("-c", "-n"):
usage(sa, verbose=True)
sys.exit(0)
# If -c option given ...
if sa[1] == "-c":
# If next token is not a single character ...
if len(sa[2]) != 1:
error_exit(
"{}: Error: -c option needs a single character after it.".format(sa[0]))
if not sa[2] in string.printable:
error_exit(
"{}: Error: -c option needs a printable ASCII character after it.".format(\
sa[0]))
delim_char = sa[2]
# else if -n option given ...
elif sa[1] == "-n":
delim_code = str_to_int(sa[2])
if invalid_delimiter(delim_code):
error_exit(
"{}: Error: invalid delimiter code {} given for -n option.".format(\
sa[0], delim_code))
delim_char = chr(delim_code)
else:
# Checking for what should not happen ... a bit of defensive programming here.
error_exit("{}: Program error: neither -c nor -n option given.".format(sa[0]))
try:
# If no filenames given, read sys.stdin ...
if lsa == 3:
print("processing sys.stdin")
dsv_fil = sys.stdin
read_dsv(dsv_fil, delim_char)
dsv_fil.close()
# else (filenames given), read them ...
else:
for dsv_filename in sa[3:]:
print("processing file:", dsv_filename)
dsv_fil = open(dsv_filename, 'r')
read_dsv(dsv_fil, delim_char)
dsv_fil.close()
except IOError as ioe:
error_exit("{}: Error: {}".format(sa[0], repr(ioe)))
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_window.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1000, 684)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(130, 30, 861, 591))
self.tabWidget.setObjectName("tabWidget")
self.tab_timer = QtWidgets.QWidget()
self.tab_timer.setObjectName("tab_timer")
self.groupBox = QtWidgets.QGroupBox(self.tab_timer)
self.groupBox.setGeometry(QtCore.QRect(30, 30, 291, 241))
self.groupBox.setObjectName("groupBox")
self.gridLayoutWidget = QtWidgets.QWidget(self.groupBox)
self.gridLayoutWidget.setGeometry(QtCore.QRect(20, 40, 226, 181))
self.gridLayoutWidget.setObjectName("gridLayoutWidget")
self.Grid_micro_break = QtWidgets.QGridLayout(self.gridLayoutWidget)
self.Grid_micro_break.setContentsMargins(0, 0, 0, 0)
self.Grid_micro_break.setObjectName("Grid_micro_break")
self.work_s_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.work_s_mic.setMaximum(59)
self.work_s_mic.setObjectName("work_s_mic")
self.Grid_micro_break.addWidget(self.work_s_mic, 0, 3, 1, 1)
self.label_2 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_2.setObjectName("label_2")
self.Grid_micro_break.addWidget(self.label_2, 1, 0, 1, 1)
self.post_m_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.post_m_mic.setMaximum(59)
self.post_m_mic.setObjectName("post_m_mic")
self.Grid_micro_break.addWidget(self.post_m_mic, 2, 2, 1, 1)
self.break_s_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.break_s_mic.setMaximum(59)
self.break_s_mic.setProperty("value", 16)
self.break_s_mic.setObjectName("break_s_mic")
self.Grid_micro_break.addWidget(self.break_s_mic, 1, 3, 1, 1)
self.post_h_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.post_h_mic.setObjectName("post_h_mic")
self.Grid_micro_break.addWidget(self.post_h_mic, 2, 1, 1, 1)
self.work_m_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.work_m_mic.setMaximum(59)
self.work_m_mic.setObjectName("work_m_mic")
self.Grid_micro_break.addWidget(self.work_m_mic, 0, 2, 1, 1)
self.label = QtWidgets.QLabel(self.gridLayoutWidget)
self.label.setObjectName("label")
self.Grid_micro_break.addWidget(self.label, 0, 0, 1, 1)
self.post_s_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.post_s_mic.setMaximum(59)
self.post_s_mic.setObjectName("post_s_mic")
self.Grid_micro_break.addWidget(self.post_s_mic, 2, 3, 1, 1)
self.break_m_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.break_m_mic.setMaximum(59)
self.break_m_mic.setObjectName("break_m_mic")
self.Grid_micro_break.addWidget(self.break_m_mic, 1, 2, 1, 1)
self.work_h_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.work_h_mic.setObjectName("work_h_mic")
self.Grid_micro_break.addWidget(self.work_h_mic, 0, 1, 1, 1)
self.label_3 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_3.setObjectName("label_3")
self.Grid_micro_break.addWidget(self.label_3, 2, 0, 1, 1)
self.break_h_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.break_h_mic.setObjectName("break_h_mic")
self.Grid_micro_break.addWidget(self.break_h_mic, 1, 1, 1, 1)
self.label_4 = QtWidgets.QLabel(self.gridLayoutWidget)
self.label_4.setObjectName("label_4")
self.Grid_micro_break.addWidget(self.label_4, 3, 0, 1, 1)
self.idle_h_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.idle_h_mic.setObjectName("idle_h_mic")
self.Grid_micro_break.addWidget(self.idle_h_mic, 3, 1, 1, 1)
self.idle_m_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.idle_m_mic.setMaximum(59)
self.idle_m_mic.setObjectName("idle_m_mic")
self.Grid_micro_break.addWidget(self.idle_m_mic, 3, 2, 1, 1)
self.idle_s_mic = QtWidgets.QSpinBox(self.gridLayoutWidget)
self.idle_s_mic.setMaximum(59)
self.idle_s_mic.setObjectName("idle_s_mic")
self.Grid_micro_break.addWidget(self.idle_s_mic, 3, 3, 1, 1)
self.groupBox_2 = QtWidgets.QGroupBox(self.tab_timer)
self.groupBox_2.setGeometry(QtCore.QRect(350, 30, 291, 241))
self.groupBox_2.setObjectName("groupBox_2")
self.gridLayoutWidget_2 = QtWidgets.QWidget(self.groupBox_2)
self.gridLayoutWidget_2.setGeometry(QtCore.QRect(20, 40, 226, 181))
self.gridLayoutWidget_2.setObjectName("gridLayoutWidget_2")
self.Grid_micro_break_3 = QtWidgets.QGridLayout(self.gridLayoutWidget_2)
self.Grid_micro_break_3.setContentsMargins(0, 0, 0, 0)
self.Grid_micro_break_3.setObjectName("Grid_micro_break_3")
self.label_15 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_15.setObjectName("label_15")
self.Grid_micro_break_3.addWidget(self.label_15, 3, 0, 1, 1)
self.idle_m_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.idle_m_mac.setMaximum(59)
self.idle_m_mac.setObjectName("idle_m_mac")
self.Grid_micro_break_3.addWidget(self.idle_m_mac, 3, 2, 1, 1)
self.idle_h_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.idle_h_mac.setObjectName("idle_h_mac")
self.Grid_micro_break_3.addWidget(self.idle_h_mac, 3, 1, 1, 1)
self.work_m_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.work_m_mac.setMaximum(59)
self.work_m_mac.setObjectName("work_m_mac")
self.Grid_micro_break_3.addWidget(self.work_m_mac, 0, 2, 1, 1)
self.work_s_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.work_s_mac.setMaximum(59)
self.work_s_mac.setObjectName("work_s_mac")
self.Grid_micro_break_3.addWidget(self.work_s_mac, 0, 3, 1, 1)
self.label_8 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_8.setObjectName("label_8")
self.Grid_micro_break_3.addWidget(self.label_8, 0, 0, 1, 1)
self.break_h_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.break_h_mac.setObjectName("break_h_mac")
self.Grid_micro_break_3.addWidget(self.break_h_mac, 1, 1, 1, 1)
self.label_7 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_7.setObjectName("label_7")
self.Grid_micro_break_3.addWidget(self.label_7, 1, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.gridLayoutWidget_2)
self.label_9.setObjectName("label_9")
self.Grid_micro_break_3.addWidget(self.label_9, 2, 0, 1, 1)
self.work_h_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.work_h_mac.setObjectName("work_h_mac")
self.Grid_micro_break_3.addWidget(self.work_h_mac, 0, 1, 1, 1)
self.post_s_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.post_s_mac.setMaximum(59)
self.post_s_mac.setObjectName("post_s_mac")
self.Grid_micro_break_3.addWidget(self.post_s_mac, 2, 3, 1, 1)
self.break_s_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.break_s_mac.setMaximum(59)
self.break_s_mac.setObjectName("break_s_mac")
self.Grid_micro_break_3.addWidget(self.break_s_mac, 1, 3, 1, 1)
self.post_m_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.post_m_mac.setMaximum(59)
self.post_m_mac.setObjectName("post_m_mac")
self.Grid_micro_break_3.addWidget(self.post_m_mac, 2, 2, 1, 1)
self.post_h_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.post_h_mac.setObjectName("post_h_mac")
self.Grid_micro_break_3.addWidget(self.post_h_mac, 2, 1, 1, 1)
self.break_m_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.break_m_mac.setMaximum(59)
self.break_m_mac.setObjectName("break_m_mac")
self.Grid_micro_break_3.addWidget(self.break_m_mac, 1, 2, 1, 1)
self.idle_s_mac = QtWidgets.QSpinBox(self.gridLayoutWidget_2)
self.idle_s_mac.setMaximum(59)
self.idle_s_mac.setObjectName("idle_s_mac")
self.Grid_micro_break_3.addWidget(self.idle_s_mac, 3, 3, 1, 1)
self.groupBox_3 = QtWidgets.QGroupBox(self.tab_timer)
self.groupBox_3.setGeometry(QtCore.QRect(50, 320, 171, 141))
self.groupBox_3.setObjectName("groupBox_3")
self.widget = QtWidgets.QWidget(self.groupBox_3)
self.widget.setGeometry(QtCore.QRect(20, 30, 115, 76))
self.widget.setObjectName("widget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.widget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.show_timer_cbox = QtWidgets.QCheckBox(self.widget)
self.show_timer_cbox.setObjectName("show_timer_cbox")
self.verticalLayout.addWidget(self.show_timer_cbox)
self.show_skip_cbox = QtWidgets.QCheckBox(self.widget)
self.show_skip_cbox.setObjectName("show_skip_cbox")
self.verticalLayout.addWidget(self.show_skip_cbox)
self.show_post_cbox = QtWidgets.QCheckBox(self.widget)
self.show_post_cbox.setObjectName("show_post_cbox")
self.verticalLayout.addWidget(self.show_post_cbox)
self.start_push = QtWidgets.QPushButton(self.tab_timer)
self.start_push.setGeometry(QtCore.QRect(270, 340, 93, 28))
self.start_push.setObjectName("start_push")
self.get_value_push = QtWidgets.QPushButton(self.tab_timer)
self.get_value_push.setGeometry(QtCore.QRect(400, 340, 93, 28))
self.get_value_push.setObjectName("get_value_push")
self.save_push = QtWidgets.QPushButton(self.tab_timer)
self.save_push.setGeometry(QtCore.QRect(400, 390, 93, 28))
self.save_push.setObjectName("save_push")
self.groupBox_7 = QtWidgets.QGroupBox(self.tab_timer)
self.groupBox_7.setGeometry(QtCore.QRect(660, 30, 161, 91))
self.groupBox_7.setObjectName("groupBox_7")
self.total_time = QtWidgets.QTimeEdit(self.groupBox_7)
self.total_time.setGeometry(QtCore.QRect(10, 30, 118, 22))
self.total_time.setTime(QtCore.QTime(10, 0, 0))
self.total_time.setObjectName("total_time")
self.tabWidget.addTab(self.tab_timer, "")
self.tab_camera = QtWidgets.QWidget()
self.tab_camera.setObjectName("tab_camera")
self.groupBox_4 = QtWidgets.QGroupBox(self.tab_camera)
self.groupBox_4.setGeometry(QtCore.QRect(70, 70, 291, 241))
self.groupBox_4.setObjectName("groupBox_4")
self.gridLayoutWidget_3 = QtWidgets.QWidget(self.groupBox_4)
self.gridLayoutWidget_3.setGeometry(QtCore.QRect(20, 40, 235, 181))
self.gridLayoutWidget_3.setObjectName("gridLayoutWidget_3")
self.Grid_micro_break_2 = QtWidgets.QGridLayout(self.gridLayoutWidget_3)
self.Grid_micro_break_2.setContentsMargins(0, 0, 0, 0)
self.Grid_micro_break_2.setObjectName("Grid_micro_break_2")
self.sample_period_m = QtWidgets.QSpinBox(self.gridLayoutWidget_3)
self.sample_period_m.setMaximum(59)
self.sample_period_m.setObjectName("sample_period_m")
self.Grid_micro_break_2.addWidget(self.sample_period_m, 0, 2, 1, 1)
self.label_5 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_5.setObjectName("label_5")
self.Grid_micro_break_2.addWidget(self.label_5, 0, 0, 1, 1)
self.label_6 = QtWidgets.QLabel(self.gridLayoutWidget_3)
self.label_6.setObjectName("label_6")
self.Grid_micro_break_2.addWidget(self.label_6, 1, 0, 1, 1)
self.sample_period_h = QtWidgets.QSpinBox(self.gridLayoutWidget_3)
self.sample_period_h.setObjectName("sample_period_h")
self.Grid_micro_break_2.addWidget(self.sample_period_h, 0, 1, 1, 1)
self.sample_duration_m = QtWidgets.QSpinBox(self.gridLayoutWidget_3)
self.sample_duration_m.setMaximum(59)
self.sample_duration_m.setObjectName("sample_duration_m")
self.Grid_micro_break_2.addWidget(self.sample_duration_m, 1, 2, 1, 1)
self.sample_period_s = QtWidgets.QSpinBox(self.gridLayoutWidget_3)
self.sample_period_s.setMaximum(59)
self.sample_period_s.setObjectName("sample_period_s")
self.Grid_micro_break_2.addWidget(self.sample_period_s, 0, 3, 1, 1)
self.sample_duration_s = QtWidgets.QSpinBox(self.gridLayoutWidget_3)
self.sample_duration_s.setMaximum(59)
self.sample_duration_s.setObjectName("sample_duration_s")
self.Grid_micro_break_2.addWidget(self.sample_duration_s, 1, 3, 1, 1)
self.sample_duration_h = QtWidgets.QSpinBox(self.gridLayoutWidget_3)
self.sample_duration_h.setObjectName("sample_duration_h")
self.Grid_micro_break_2.addWidget(self.sample_duration_h, 1, 1, 1, 1)
self.groupBox_5 = QtWidgets.QGroupBox(self.tab_camera)
self.groupBox_5.setGeometry(QtCore.QRect(390, 70, 371, 241))
self.groupBox_5.setObjectName("groupBox_5")
self.gridLayoutWidget_4 = QtWidgets.QWidget(self.groupBox_5)
self.gridLayoutWidget_4.setGeometry(QtCore.QRect(20, 30, 345, 181))
self.gridLayoutWidget_4.setObjectName("gridLayoutWidget_4")
self.Grid_micro_break_4 = QtWidgets.QGridLayout(self.gridLayoutWidget_4)
self.Grid_micro_break_4.setContentsMargins(0, 0, 0, 0)
self.Grid_micro_break_4.setObjectName("Grid_micro_break_4")
self.rbut_cf = QtWidgets.QRadioButton(self.gridLayoutWidget_4)
self.rbut_cf.setObjectName("rbut_cf")
self.Grid_micro_break_4.addWidget(self.rbut_cf, 0, 2, 1, 1)
self.param_box_tf = QtWidgets.QDoubleSpinBox(self.gridLayoutWidget_4)
self.param_box_tf.setMaximum(1.0)
self.param_box_tf.setSingleStep(0.1)
self.param_box_tf.setObjectName("param_box_tf")
self.Grid_micro_break_4.addWidget(self.param_box_tf, 1, 1, 1, 1)
self.post_m_mic_3 = QtWidgets.QSpinBox(self.gridLayoutWidget_4)
self.post_m_mic_3.setMaximum(1)
self.post_m_mic_3.setObjectName("post_m_mic_3")
self.Grid_micro_break_4.addWidget(self.post_m_mic_3, 1, 2, 1, 1)
self.post_s_mic_3 = QtWidgets.QSpinBox(self.gridLayoutWidget_4)
self.post_s_mic_3.setObjectName("post_s_mic_3")
self.Grid_micro_break_4.addWidget(self.post_s_mic_3, 1, 3, 1, 1)
self.label_11 = QtWidgets.QLabel(self.gridLayoutWidget_4)
self.label_11.setObjectName("label_11")
self.Grid_micro_break_4.addWidget(self.label_11, 1, 0, 1, 1)
self.rbut_tf = QtWidgets.QRadioButton(self.gridLayoutWidget_4)
self.rbut_tf.setObjectName("rbut_tf")
self.Grid_micro_break_4.addWidget(self.rbut_tf, 0, 1, 1, 1)
self.label_10 = QtWidgets.QLabel(self.gridLayoutWidget_4)
self.label_10.setObjectName("label_10")
self.Grid_micro_break_4.addWidget(self.label_10, 0, 0, 1, 1)
self.rbut_haar = QtWidgets.QRadioButton(self.gridLayoutWidget_4)
self.rbut_haar.setObjectName("rbut_haar")
self.Grid_micro_break_4.addWidget(self.rbut_haar, 0, 3, 1, 1)
self.label_12 = QtWidgets.QLabel(self.gridLayoutWidget_4)
self.label_12.setObjectName("label_12")
self.Grid_micro_break_4.addWidget(self.label_12, 2, 0, 1, 1)
self.img_resize_box = QtWidgets.QDoubleSpinBox(self.gridLayoutWidget_4)
self.img_resize_box.setMaximum(1.0)
self.img_resize_box.setSingleStep(0.1)
self.img_resize_box.setObjectName("img_resize_box")
self.Grid_micro_break_4.addWidget(self.img_resize_box, 2, 1, 1, 1)
self.take_pic_push = QtWidgets.QPushButton(self.tab_camera)
self.take_pic_push.setEnabled(False)
self.take_pic_push.setGeometry(QtCore.QRect(80, 340, 93, 28))
self.take_pic_push.setObjectName("take_pic_push")
self.save_pic_push = QtWidgets.QPushButton(self.tab_camera)
self.save_pic_push.setEnabled(False)
self.save_pic_push.setGeometry(QtCore.QRect(80, 410, 93, 28))
self.save_pic_push.setCheckable(False)
self.save_pic_push.setObjectName("save_pic_push")
self.take_vid_push = QtWidgets.QPushButton(self.tab_camera)
self.take_vid_push.setEnabled(False)
self.take_vid_push.setGeometry(QtCore.QRect(190, 340, 93, 28))
self.take_vid_push.setObjectName("take_vid_push")
self.save_vid_push = QtWidgets.QPushButton(self.tab_camera)
self.save_vid_push.setEnabled(False)
self.save_vid_push.setGeometry(QtCore.QRect(190, 410, 93, 28))
self.save_vid_push.setObjectName("save_vid_push")
self.vid_view = QtWidgets.QLabel(self.tab_camera)
self.vid_view.setGeometry(QtCore.QRect(410, 330, 291, 181))
self.vid_view.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.vid_view.setFrameShadow(QtWidgets.QFrame.Sunken)
self.vid_view.setText("")
self.vid_view.setObjectName("vid_view")
self.det_check = QtWidgets.QCheckBox(self.tab_camera)
self.det_check.setGeometry(QtCore.QRect(300, 400, 81, 20))
self.det_check.setChecked(True)
self.det_check.setObjectName("det_check")
self.test_cam_check = QtWidgets.QCheckBox(self.tab_camera)
self.test_cam_check.setEnabled(True)
self.test_cam_check.setGeometry(QtCore.QRect(300, 350, 81, 20))
self.test_cam_check.setChecked(False)
self.test_cam_check.setObjectName("test_cam_check")
self.tabWidget.addTab(self.tab_camera, "")
self.tab_misc = QtWidgets.QWidget()
self.tab_misc.setObjectName("tab_misc")
self.groupBox_6 = QtWidgets.QGroupBox(self.tab_misc)
self.groupBox_6.setGeometry(QtCore.QRect(170, 90, 291, 241))
self.groupBox_6.setObjectName("groupBox_6")
self.gridLayoutWidget_5 = QtWidgets.QWidget(self.groupBox_6)
self.gridLayoutWidget_5.setGeometry(QtCore.QRect(20, 40, 235, 181))
self.gridLayoutWidget_5.setObjectName("gridLayoutWidget_5")
self.Grid_micro_break_5 = QtWidgets.QGridLayout(self.gridLayoutWidget_5)
self.Grid_micro_break_5.setContentsMargins(0, 0, 0, 0)
self.Grid_micro_break_5.setObjectName("Grid_micro_break_5")
self.work_m_mic_3 = QtWidgets.QSpinBox(self.gridLayoutWidget_5)
self.work_m_mic_3.setObjectName("work_m_mic_3")
self.Grid_micro_break_5.addWidget(self.work_m_mic_3, 0, 2, 1, 1)
self.label_13 = QtWidgets.QLabel(self.gridLayoutWidget_5)
self.label_13.setObjectName("label_13")
self.Grid_micro_break_5.addWidget(self.label_13, 0, 0, 1, 1)
self.label_14 = QtWidgets.QLabel(self.gridLayoutWidget_5)
self.label_14.setObjectName("label_14")
self.Grid_micro_break_5.addWidget(self.label_14, 1, 0, 1, 1)
self.work_h_mic_3 = QtWidgets.QSpinBox(self.gridLayoutWidget_5)
self.work_h_mic_3.setObjectName("work_h_mic_3")
self.Grid_micro_break_5.addWidget(self.work_h_mic_3, 0, 1, 1, 1)
self.post_m_mic_4 = QtWidgets.QSpinBox(self.gridLayoutWidget_5)
self.post_m_mic_4.setObjectName("post_m_mic_4")
self.Grid_micro_break_5.addWidget(self.post_m_mic_4, 1, 2, 1, 1)
self.work_s_mic_3 = QtWidgets.QSpinBox(self.gridLayoutWidget_5)
self.work_s_mic_3.setObjectName("work_s_mic_3")
self.Grid_micro_break_5.addWidget(self.work_s_mic_3, 0, 3, 1, 1)
self.post_s_mic_4 = QtWidgets.QSpinBox(self.gridLayoutWidget_5)
self.post_s_mic_4.setObjectName("post_s_mic_4")
self.Grid_micro_break_5.addWidget(self.post_s_mic_4, 1, 3, 1, 1)
self.post_h_mic_3 = QtWidgets.QSpinBox(self.gridLayoutWidget_5)
self.post_h_mic_3.setObjectName("post_h_mic_3")
self.Grid_micro_break_5.addWidget(self.post_h_mic_3, 1, 1, 1, 1)
self.lcdNumber = QtWidgets.QLCDNumber(self.tab_misc)
self.lcdNumber.setGeometry(QtCore.QRect(150, 340, 64, 23))
self.lcdNumber.setSmallDecimalPoint(True)
self.lcdNumber.setObjectName("lcdNumber")
self.lcdNumber_2 = QtWidgets.QLCDNumber(self.tab_misc)
self.lcdNumber_2.setGeometry(QtCore.QRect(230, 340, 64, 23))
self.lcdNumber_2.setObjectName("lcdNumber_2")
self.lcdNumber_3 = QtWidgets.QLCDNumber(self.tab_misc)
self.lcdNumber_3.setGeometry(QtCore.QRect(310, 340, 64, 23))
self.lcdNumber_3.setObjectName("lcdNumber_3")
self.progressBar = QtWidgets.QProgressBar(self.tab_misc)
self.progressBar.setEnabled(True)
self.progressBar.setGeometry(QtCore.QRect(230, 400, 118, 23))
self.progressBar.setProperty("value", 24)
self.progressBar.setInvertedAppearance(False)
self.progressBar.setObjectName("progressBar")
self.tabWidget.addTab(self.tab_misc, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1000, 26))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.tab_timer.setWhatsThis(_translate("MainWindow", "<html><head/><body><p><br/></p></body></html>"))
self.groupBox.setTitle(_translate("MainWindow", "Micro Break"))
self.label_2.setText(_translate("MainWindow", "break duration"))
self.label.setText(_translate("MainWindow", "work duration"))
self.label_3.setText(_translate("MainWindow", "postpone time"))
self.label_4.setText(_translate("MainWindow", "idle time"))
self.groupBox_2.setTitle(_translate("MainWindow", "Macro Break"))
self.label_15.setText(_translate("MainWindow", "idle time"))
self.label_8.setText(_translate("MainWindow", "work duration"))
self.label_7.setText(_translate("MainWindow", "break duration"))
self.label_9.setText(_translate("MainWindow", "postpone time"))
self.groupBox_3.setTitle(_translate("MainWindow", "Options"))
self.show_timer_cbox.setText(_translate("MainWindow", "show timer"))
self.show_skip_cbox.setText(_translate("MainWindow", "show skip"))
self.show_post_cbox.setText(_translate("MainWindow", "show postpone"))
self.start_push.setText(_translate("MainWindow", "start"))
self.get_value_push.setText(_translate("MainWindow", "get values"))
self.save_push.setText(_translate("MainWindow", "save values"))
self.groupBox_7.setTitle(_translate("MainWindow", "Total daily"))
self.total_time.setDisplayFormat(_translate("MainWindow", "h:mm "))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_timer), _translate("MainWindow", "timer"))
self.groupBox_4.setTitle(_translate("MainWindow", "Sampling"))
self.label_5.setText(_translate("MainWindow", "sample period"))
self.label_6.setText(_translate("MainWindow", "sample duration"))
self.groupBox_5.setTitle(_translate("MainWindow", "Algorithm"))
self.rbut_cf.setText(_translate("MainWindow", "caffe"))
self.label_11.setText(_translate("MainWindow", "parameter"))
self.rbut_tf.setText(_translate("MainWindow", "tensorflow"))
self.label_10.setText(_translate("MainWindow", "Algorithm"))
self.rbut_haar.setText(_translate("MainWindow", "haaris"))
self.label_12.setText(_translate("MainWindow", "image"))
self.take_pic_push.setText(_translate("MainWindow", "take a picture"))
self.save_pic_push.setText(_translate("MainWindow", "save picture"))
self.take_vid_push.setText(_translate("MainWindow", "take a vid"))
self.save_vid_push.setText(_translate("MainWindow", "save vid"))
self.det_check.setText(_translate("MainWindow", "detect"))
self.test_cam_check.setText(_translate("MainWindow", "test cam"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_camera), _translate("MainWindow", "camera"))
self.groupBox_6.setTitle(_translate("MainWindow", "Sampling"))
self.label_13.setText(_translate("MainWindow", "sample period"))
self.label_14.setText(_translate("MainWindow", "sample duration"))
self.progressBar.setFormat(_translate("MainWindow", "%p%"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_misc), _translate("MainWindow", "misc"))
|
#!/usr/bin/env python3
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import sys
from readmemaker import ReadmeMaker
PROJECT_NAME = "typepy"
OUTPUT_DIR = ".."
def write_examples(maker):
maker.set_indent_level(0)
maker.write_chapter("Usage")
maker.write_introduction_file("usage.txt")
def main():
maker = ReadmeMaker(
PROJECT_NAME,
OUTPUT_DIR,
is_make_toc=True,
project_url=f"https://github.com/thombashi/{PROJECT_NAME}",
)
maker.write_chapter("Summary")
maker.write_introduction_file("summary.txt")
maker.write_introduction_file("badges.txt")
maker.write_chapter("Features")
maker.write_introduction_file("features.txt")
maker.write_introduction_file("installation.rst")
write_examples(maker)
maker.set_indent_level(0)
maker.write_chapter("Documentation")
maker.write_lines([f"https://{PROJECT_NAME:s}.rtfd.io/"])
return 0
if __name__ == "__main__":
sys.exit(main())
|
"""
authenticators: the server instance accepts an authenticator object,
which is basically any callable (i.e., a function) that takes the newly
connected socket and "authenticates" it.
the authenticator should return a socket-like object with its associated
credentials (a tuple), or raise AuthenticationError if it fails.
a very trivial authenticator might be
def magic_word_authenticator(sock):
if sock.recv(5) != "Ma6ik":
raise AuthenticationError("wrong magic word")
return sock, None
s = ThreadedServer(...., authenticator = magic_word_authenticator)
your authenticator can return any socket-like object. for instance, it may
authenticate the client and return a TLS/SSL-wrapped socket object that
encrypts the transport.
the credentials returned alongside with the new socket can be any object.
it will be stored in the rpyc connection configruation under the key
"credentials", and may be used later by the service logic. if no credentials
are applicable, just return None as in the example above.
rpyc includes integration with tlslite, a TLS/SSL library:
the VdbAuthenticator class authenticates clients based on username-password
pairs.
"""
import os
import sys
import anydbm
from rpyc.lib import safe_import
tlsapi = safe_import("tlslite.api")
ssl = safe_import("ssl")
class AuthenticationError(Exception):
pass
class SSLAuthenticator(object):
def __init__(self, keyfile, certfile, ca_certs = None, ssl_version = None):
self.keyfile = keyfile
self.certfile = certfile
self.ca_certs = ca_certs
if ca_certs:
self.cert_reqs = ssl.CERT_REQUIRED
else:
self.cert_reqs = ssl.CERT_NONE
if ssl_version:
self.ssl_version = ssl_version
else:
self.ssl_version = ssl.PROTOCOL_TLSv1
def __call__(self, sock):
try:
sock2 = ssl.wrap_socket(sock, keyfile = self.keyfile, certfile = self.certfile,
server_side = True, ssl_version = self.ssl_version, ca_certs = self.ca_certs,
cert_reqs = self.cert_reqs)
except ssl.SSLError:
ex = sys.exc_info()[1]
raise AuthenticationError(str(ex))
return sock2, sock2.getpeercert()
class TlsliteVdbAuthenticator(object):
__slots__ = ["vdb"]
BITS = 2048
def __init__(self, vdb):
self.vdb = vdb
@classmethod
def from_dict(cls, users):
inst = cls(tlsapi.VerifierDB())
for username, password in users.iteritems():
inst.set_user(username, password)
return inst
@classmethod
def _load_vdb_with_mode(cls, vdb, mode):
"""taken from tlslite/BaseDB.py -- patched for file mode"""
# {{
db = anydbm.open(vdb.filename, mode)
try:
if db["--Reserved--type"] != vdb.type:
raise ValueError("Not a %s database" % (vdb.type,))
except KeyError:
raise ValueError("Not a recognized database")
vdb.db = db
# }}
@classmethod
def from_file(cls, filename, mode = "w"):
vdb = tlsapi.VerifierDB(filename)
if os.path.exists(filename):
cls._load_vdb_with_mode(vdb, mode)
else:
if mode not in "ncw":
raise ValueError("%s does not exist but mode does not allow "
"writing (%r)" % (filename, mode))
vdb.create()
return cls(vdb)
def sync(self):
self.vdb.db.sync()
def set_user(self, username, password):
self.vdb[username] = self.vdb.makeVerifier(username, password, self.BITS)
def del_user(self, username):
del self.vdb[username]
def list_users(self):
return self.vdb.keys()
def __call__(self, sock):
sock2 = tlsapi.TLSConnection(sock)
sock2.fileno = lambda fd = sock.fileno(): fd # tlslite omitted fileno
try:
sock2.handshakeServer(verifierDB = self.vdb)
except Exception:
ex = sys.exc_info()[1]
raise AuthenticationError(str(ex))
return sock2, sock2.allegedSrpUsername
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import ORTH, LEMMA, NORM, PRON_LEMMA
_exc = {}
for exc_data in [
{ORTH: "ት/ቤት", LEMMA: "ትምህርት ቤት"},
{ORTH: "ወ/ሮ", LEMMA: PRON_LEMMA, NORM: "ወይዘሮ"},
]:
_exc[exc_data[ORTH]] = [exc_data]
for orth in [
"ዓ.ም.",
"ኪ.ሜ.",
]:
_exc[orth] = [{ORTH: orth}]
TOKENIZER_EXCEPTIONS = _exc
|
"""
Common meta-analytic workflows
"""
from .ale import ale_sleuth_workflow
from .conperm import conperm_workflow
from .macm import macm_workflow
from .peaks2maps import peaks2maps_workflow
__all__ = ['ale_sleuth_workflow', 'conperm_workflow', 'macm_workflow',
'peaks2maps_workflow']
|
import os
from typing import Literal
CWD = os.getcwd()
MODEL_NAME = "lstm"
DATA_PATH = f"/data/kindle_reviews.csv"
GLOVE_PATH = f"/data/glove.6B.50d.txt"
MODEL_PATH = f"/data/'MODEL_NAME'.h5"
SEED: int = 42
TRAIN_SIZE: float = 0.8
MAX_NB_WORDS: Literal[100000] = 100000
MAX_SEQUENCE_LENGTH: Literal[30] = 30
EMBEDDING_DIM: int = 300
LR: float = 0.003
BATCH_SIZE: int = 1024
EPOCHS: int = 10
|
print('Calculo do aumento do salario de funcionario em 15%')
n=int(input('Qual o salario atual do funcionario?'))
p=n*0.15
a=n+p
print('O salario do funcionario com aumento é {:.2f}!'.format(a))
|
def zmozi_sez(sez):
stevilo = 1
for x in sez:
stevilo *= x
return stevilo
def najmajši_zmonzek(n=20):
stevila = [2]
for x in range(3,n+1):
stevilo = x
trenuta_stevila = stevila
for y in trenuta_stevila:
if stevilo % y == 0:
stevilo = int(stevilo / y)
stevila.append(stevilo)
return zmozi_sez(stevila)
najmajši_zmonzek()
|
from scipy import misc
f = misc.face()
misc.imsave('face.jpg', f) # uses the Image module (PIL)
import matplotlib.pyplot as plt
plt.imshow(f)
plt.show()
|
import factory
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.utils import timezone
from ..models import Tag, TaggedItem
from .models import TagTestArticle0, TagTestArticle1
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = settings.AUTH_USER_MODEL
django_get_or_create = ('username',)
last_name = 'John'
first_name = 'Doe'
username = factory.sequence(lambda n: 'username{0}'.format(n))
email = 'webmaster@example.com'
password = make_password('password')
last_login = timezone.now()
class TagTestArticle0Factory(factory.DjangoModelFactory):
class Meta:
model = TagTestArticle0
title = 'Test article'
class TagTestArticle1Factory(factory.DjangoModelFactory):
class Meta:
model = TagTestArticle1
title = 'Test article'
class TagFactory(factory.DjangoModelFactory):
class Meta:
model = Tag
django_get_or_create = ('label',)
label = factory.Sequence(lambda n: 'Tag {}'.format(n))
class TaggedItemFactory(factory.DjangoModelFactory):
class Meta:
model = TaggedItem
tag = factory.SubFactory(TagFactory)
author = factory.SubFactory(UserFactory)
content_object = factory.SubFactory(TagTestArticle0Factory)
|
#!/usr/bin/env python
# encoding: utf-8
# John O'Meara, 2006
# Thomas Nagy 2009
"Bison processing"
from waflib import Task, TaskGen
import os
bison = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}'
cls = Task.task_factory(
'bison', bison, color='CYAN', ext_in=['.yc', '.y', '.yy'], ext_out='.cxx .h', before='c cxx flex'
)
def post_run_bison(task):
source = task.outputs[0]
header = task.outputs[1]
env = task.env
try:
os.stat(header.abspath())
except OSError:
try:
oldheader = source.change_ext(source.suffix() + '.h')
os.rename(oldheader.abspath(), header.abspath())
except OSError:
pass
task.post_run_orig()
cls.post_run_orig = cls.post_run
cls.post_run = post_run_bison
@TaskGen.extension('.y', '.yc', '.yy')
def big_bison(self, node):
"""when it becomes complicated (unlike flex), the old recipes work better (cwd)"""
has_h = '-d' in self.env['BISONFLAGS']
outs = []
if node.name.endswith('.yc') or node.name.endswith('.yy'):
out_node = self.make_bld_node('src', node.parent, node.name[:-2] + 'cc')
outs.append(out_node)
if has_h:
outs.append(out_node.change_ext('.hh'))
else:
out_node = self.make_bld_node('src', node.parent, node.name[:-1] + 'c')
outs.append(out_node)
if has_h:
outs.append(out_node.change_ext('.h'))
tsk = self.create_task('bison', node, outs)
tsk.cwd = out_node.parent.abspath()
# and the c/cxx file must be compiled too
try:
self.out_sources.append(outs[0])
except:
self.out_sources = [outs[0]]
def configure(conf):
bison = conf.find_program('bison', var='BISON', mandatory=True)
v = conf.env
v['BISONFLAGS'] = '-d'
|
# ========================
# Information
# ========================
# Direct Link: https://www.hackerrank.com/challenges/text-wrap/problem
# Difficulty: Easy
# Max Score: 10
# Language: Python
# ========================
# Solution
# ========================
import textwrap # Solution made without using the required library
def wrap(string, max_width):
'''Wraps the string into a paragraph of width w'''
string = [c for c in string]
for i in range(max_width, len(string) + max_width, max_width+1):
string.insert(i, '\n')
return ("").join(string)
if __name__ == '__main__':
string, max_width = input(), int(input())
result = wrap(string, max_width)
print(result)
|
import argparse
import json
import sys
parser = argparse.ArgumentParser(description="Python script updates the channels_EP.json file")
parser._action_groups.pop() # removing optional argument title because I want to show required args before optional
#setting up required arguments
requiredArgs = parser.add_argument_group('required arguments')
requiredArgs.add_argument("-nc", "--numberOfChannels", type=int, metavar='', required=True, help="Number of channels to be created")
requiredArgs.add_argument("-pc", "--peersPerChannel", type=int, metavar='', default=[1], nargs='+', help="Number of peers per channel")
#setting up optional arguments
optionalArgs = parser.add_argument_group('optional arguments')
optionalArgs.add_argument("-sc", "--specialChannels", type=int, metavar='', nargs='+', help="Special Channels")
optionalArgs.add_argument("-sp", "--specialPeersPerChannel", type=int, metavar='', nargs='+', help="Special Peers per channel")
args = parser.parse_args()
class UpdateChannels:
def __init__(self):
self.number_channels = args.numberOfChannels
self.peers_per_channel = args.peersPerChannel
self.special_channels, self.special_peer_members = self.verify_args()
self.channel_structure = self.generate_channels()
self.output = "../../conf/channels_EP.json"
self.writeToOutput()
def verify_args(self):
# this function ensures we are passing special channels cases with the peer members for that case together
if args.specialChannels:
if args.specialPeersPerChannel:
return args.specialChannels, args.specialPeersPerChannel
else:
print("You must pass in the --specialPeersPerChannel or -sp argument")
parser.parse_args(['-h'])
else:
return None, None
def generate_channels(self):
structure = {}
structure["channels"] = []
for number in range(1, self.number_channels + 1):
temp = {}
temp["name"] = "channel{}".format(number)
temp["members"] = []
temp["batchSize"] = {}
temp["batchSize"]["messageCount"] = 100
temp["batchSize"]["absoluteMaxBytes"] = 103809024
temp["batchSize"]["preferredMaxBytes"] = 103809024
temp["batchTimeout"] = "10s"
temp["channelRestrictionMaxCount"] = "150"
# for special channel configuration
if self.special_channels:
if number in self.special_channels:
for special_peer in self.special_peer_members:
temp["members"].append("PeerOrg{}".format(special_peer))
# Add the channel structure to the list
structure["channels"].append(temp)
continue
# setting up the peerOrg members
for peer in self.peers_per_channel:
temp["members"].append("PeerOrg{}".format(peer))
# Add the channel structure to the list
structure["channels"].append(temp)
# deleting temp for next iteration
del temp
return structure
def writeToOutput(self):
# this function writes to config-net-${networkID}.json file
with open(self.output, "w") as f:
json.dump(self.channel_structure, f, indent=4, sort_keys=True)
def main():
channels = UpdateChannels()
if __name__ == "__main__":
main()
|
import threading
import logging
import time
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s [%(threadName)s] %(message)s')
def worker(lock):
with lock:
logging.debug("ha ha ha ")
if __name__ == '__main__':
lock = threading.Lock()
t1 = threading.Thread(target=worker, name='t1', args=(lock, ))
t1.start()
t2 = threading.Thread(target=worker, name='t2', args=(lock, ))
t2.start()
|
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
import glob
import math
import os
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
from dask import dataframe as dd
from dask.utils import natural_sort_key
import cudf
import dask_cudf
# Check if create_metadata_file is supported by
# the current dask.dataframe version
need_create_meta = pytest.mark.skipif(
dask_cudf.io.parquet.create_metadata_file is None,
reason="Need create_metadata_file support in dask.dataframe.",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)],
},
index=pd.Index(range(nrows), name="index"),
) # Sorted
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.mark.parametrize("stats", [True, False])
def test_roundtrip_from_dask(tmpdir, stats):
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine="pyarrow")
files = sorted(
(os.path.join(tmpdir, f) for f in os.listdir(tmpdir)),
key=natural_sort_key,
)
# Read list of parquet files
ddf2 = dask_cudf.read_parquet(files, gather_statistics=stats)
dd.assert_eq(ddf, ddf2, check_divisions=stats)
# Specify columns=['x']
ddf2 = dask_cudf.read_parquet(
files, columns=["x"], gather_statistics=stats
)
dd.assert_eq(ddf[["x"]], ddf2, check_divisions=stats)
# Specify columns='y'
ddf2 = dask_cudf.read_parquet(files, columns="y", gather_statistics=stats)
dd.assert_eq(ddf[["y"]], ddf2, check_divisions=stats)
# Now include metadata
ddf2 = dask_cudf.read_parquet(tmpdir, gather_statistics=stats)
dd.assert_eq(ddf, ddf2, check_divisions=stats)
# Specify columns=['x'] (with metadata)
ddf2 = dask_cudf.read_parquet(
tmpdir, columns=["x"], gather_statistics=stats
)
dd.assert_eq(ddf[["x"]], ddf2, check_divisions=stats)
# Specify columns='y' (with metadata)
ddf2 = dask_cudf.read_parquet(tmpdir, columns="y", gather_statistics=stats)
dd.assert_eq(ddf[["y"]], ddf2, check_divisions=stats)
def test_roundtrip_from_dask_index_false(tmpdir):
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine="pyarrow")
ddf2 = dask_cudf.read_parquet(tmpdir, index=False)
dd.assert_eq(ddf.reset_index(drop=False), ddf2)
def test_roundtrip_from_dask_none_index_false(tmpdir):
tmpdir = str(tmpdir)
path = os.path.join(tmpdir, "test.parquet")
df2 = ddf.reset_index(drop=True).compute()
df2.to_parquet(path, engine="pyarrow")
ddf3 = dask_cudf.read_parquet(path, index=False)
dd.assert_eq(df2, ddf3)
@pytest.mark.parametrize("write_meta", [True, False])
def test_roundtrip_from_dask_cudf(tmpdir, write_meta):
tmpdir = str(tmpdir)
gddf = dask_cudf.from_dask_dataframe(ddf)
gddf.to_parquet(tmpdir, write_metadata_file=write_meta)
gddf2 = dask_cudf.read_parquet(tmpdir)
dd.assert_eq(gddf, gddf2, check_divisions=write_meta)
def test_roundtrip_none_rangeindex(tmpdir):
fn = str(tmpdir.join("test.parquet"))
gdf = cudf.DataFrame(
{"id": [0, 1, 2, 3], "val": [None, None, 0, 1]},
index=pd.RangeIndex(start=5, stop=9),
)
dask_cudf.from_cudf(gdf, npartitions=2).to_parquet(fn)
ddf2 = dask_cudf.read_parquet(fn)
dd.assert_eq(gdf, ddf2, check_index=True)
def test_roundtrip_from_pandas(tmpdir):
fn = str(tmpdir.join("test.parquet"))
# First without specifying an index
dfp = df.copy()
dfp.to_parquet(fn, engine="pyarrow", index=False)
dfp = dfp.reset_index(drop=True)
ddf2 = dask_cudf.read_parquet(fn)
dd.assert_eq(dfp, ddf2, check_index=True)
# Now, specifying an index
dfp = df.copy()
dfp.to_parquet(fn, engine="pyarrow", index=True)
ddf2 = dask_cudf.read_parquet(fn, index=["index"])
dd.assert_eq(dfp, ddf2, check_index=True)
def test_strings(tmpdir):
fn = str(tmpdir)
dfp = pd.DataFrame(
{"a": ["aa", "bbb", "cccc"], "b": ["hello", "dog", "man"]}
)
dfp.set_index("a", inplace=True, drop=True)
ddf2 = dd.from_pandas(dfp, npartitions=2)
ddf2.to_parquet(fn, engine="pyarrow")
read_df = dask_cudf.read_parquet(fn, index=["a"])
dd.assert_eq(ddf2, read_df.compute().to_pandas())
read_df_cats = dask_cudf.read_parquet(
fn, index=["a"], strings_to_categorical=True
)
dd.assert_eq(read_df_cats.dtypes, read_df_cats.compute().dtypes)
dd.assert_eq(read_df_cats.dtypes[0], "int32")
def test_dask_timeseries_from_pandas(tmpdir):
fn = str(tmpdir.join("test.parquet"))
ddf2 = dask.datasets.timeseries(freq="D")
pdf = ddf2.compute()
pdf.to_parquet(fn, engine="pyarrow")
read_df = dask_cudf.read_parquet(fn)
dd.assert_eq(ddf2, read_df.compute())
@pytest.mark.parametrize("index", [False, None])
@pytest.mark.parametrize("stats", [False, True])
def test_dask_timeseries_from_dask(tmpdir, index, stats):
fn = str(tmpdir)
ddf2 = dask.datasets.timeseries(freq="D")
ddf2.to_parquet(fn, engine="pyarrow", write_index=index)
read_df = dask_cudf.read_parquet(fn, index=index, gather_statistics=stats)
dd.assert_eq(
ddf2, read_df, check_divisions=(stats and index), check_index=index
)
@pytest.mark.parametrize("index", [False, None])
@pytest.mark.parametrize("stats", [False, True])
def test_dask_timeseries_from_daskcudf(tmpdir, index, stats):
fn = str(tmpdir)
ddf2 = dask_cudf.from_cudf(
cudf.datasets.timeseries(freq="D"), npartitions=4
)
ddf2.name = ddf2.name.astype("object")
ddf2.to_parquet(fn, write_index=index)
read_df = dask_cudf.read_parquet(fn, index=index, gather_statistics=stats)
dd.assert_eq(
ddf2, read_df, check_divisions=(stats and index), check_index=index
)
@pytest.mark.parametrize("index", [False, True])
def test_empty(tmpdir, index):
fn = str(tmpdir)
dfp = pd.DataFrame({"a": [11.0, 12.0, 12.0], "b": [4, 5, 6]})[:0]
if index:
dfp.set_index("a", inplace=True, drop=True)
ddf2 = dd.from_pandas(dfp, npartitions=2)
ddf2.to_parquet(fn, write_index=index, engine="pyarrow")
read_df = dask_cudf.read_parquet(fn)
dd.assert_eq(ddf2, read_df.compute())
def test_filters(tmpdir):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine="pyarrow")
a = dask_cudf.read_parquet(tmp_path, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dask_cudf.read_parquet(tmp_path, filters=[("y", "==", "c")])
assert b.npartitions == 1
b = b.compute().to_pandas()
assert (b.y == "c").all()
c = dask_cudf.read_parquet(
tmp_path, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
def test_filters_at_row_group_level(tmpdir):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine="pyarrow", row_group_size=10 / 5)
a = dask_cudf.read_parquet(tmp_path, filters=[("x", "==", 1)])
assert a.npartitions == 1
assert (a.shape[0] == 2).compute()
ddf.to_parquet(tmp_path, engine="pyarrow", row_group_size=1)
b = dask_cudf.read_parquet(tmp_path, filters=[("x", "==", 1)])
assert b.npartitions == 1
assert (b.shape[0] == 1).compute()
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("daskcudf", [True, False])
@pytest.mark.parametrize(
"parts", [["year", "month", "day"], ["year", "month"], ["year"]]
)
def test_roundtrip_from_dask_partitioned(tmpdir, parts, daskcudf, metadata):
tmpdir = str(tmpdir)
df = pd.DataFrame()
df["year"] = [2018, 2019, 2019, 2019, 2020, 2021]
df["month"] = [1, 2, 3, 3, 3, 2]
df["day"] = [1, 1, 1, 2, 2, 1]
df["data"] = [0, 0, 0, 0, 0, 0]
df.index.name = "index"
if daskcudf:
ddf2 = dask_cudf.from_cudf(cudf.from_pandas(df), npartitions=2)
ddf2.to_parquet(
tmpdir, write_metadata_file=metadata, partition_on=parts
)
else:
ddf2 = dd.from_pandas(df, npartitions=2)
ddf2.to_parquet(
tmpdir,
engine="pyarrow",
write_metadata_file=metadata,
partition_on=parts,
)
df_read = dd.read_parquet(tmpdir, engine="pyarrow")
gdf_read = dask_cudf.read_parquet(tmpdir)
# TODO: Avoid column selection after `CudfEngine`
# can be aligned with dask/dask#6534
columns = list(df_read.columns)
assert set(df_read.columns) == set(gdf_read.columns)
dd.assert_eq(
df_read.compute(scheduler=dask.get)[columns],
gdf_read.compute(scheduler=dask.get)[columns],
)
assert gdf_read.index.name == "index"
# Check that we don't have uuid4 file names
for _, _, files in os.walk(tmpdir):
for fn in files:
if not fn.startswith("_"):
assert "part" in fn
if parse_version(dask.__version__) > parse_version("2021.07.0"):
# This version of Dask supports `aggregate_files=True`.
# Check that we can aggregate by a partition name.
df_read = dd.read_parquet(
tmpdir, engine="pyarrow", aggregate_files="year"
)
gdf_read = dask_cudf.read_parquet(tmpdir, aggregate_files="year")
dd.assert_eq(df_read, gdf_read)
@pytest.mark.parametrize("metadata", [True, False])
@pytest.mark.parametrize("chunksize", [None, 1024, 4096, "1MiB"])
def test_chunksize(tmpdir, chunksize, metadata):
nparts = 2
df_size = 100
row_group_size = 5
df = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
"index": np.arange(0, df_size),
}
).set_index("index")
ddf1 = dd.from_pandas(df, npartitions=nparts)
ddf1.to_parquet(
str(tmpdir),
engine="pyarrow",
row_group_size=row_group_size,
write_metadata_file=metadata,
)
if metadata:
path = str(tmpdir)
else:
dirname = str(tmpdir)
files = os.listdir(dirname)
assert "_metadata" not in files
path = os.path.join(dirname, "*.parquet")
ddf2 = dask_cudf.read_parquet(
path,
chunksize=chunksize,
split_row_groups=True,
gather_statistics=True,
)
ddf2.compute(scheduler="synchronous")
dd.assert_eq(ddf1, ddf2, check_divisions=False)
num_row_groups = df_size // row_group_size
if not chunksize:
assert ddf2.npartitions == num_row_groups
else:
assert ddf2.npartitions < num_row_groups
if parse_version(dask.__version__) > parse_version("2021.07.0"):
# This version of Dask supports `aggregate_files=True`.
# Test that it works as expected.
ddf3 = dask_cudf.read_parquet(
path,
chunksize=chunksize,
split_row_groups=True,
gather_statistics=True,
aggregate_files=True,
)
dd.assert_eq(ddf1, ddf3, check_divisions=False)
if not chunksize:
# Files should not be aggregated
assert ddf3.npartitions == num_row_groups
elif chunksize == "1MiB":
# All files should be aggregated into
# one output partition
assert ddf3.npartitions == 1
else:
# Files can be aggregated together, but
# chunksize is not large enough to produce
# a single output partition
assert ddf3.npartitions < num_row_groups
@pytest.mark.parametrize("row_groups", [1, 3, 10, 12])
@pytest.mark.parametrize("index", [False, True])
def test_row_groups_per_part(tmpdir, row_groups, index):
nparts = 2
df_size = 100
row_group_size = 5
file_row_groups = 10 # Known apriori
npartitions_expected = math.ceil(file_row_groups / row_groups) * 2
df = pd.DataFrame(
{
"a": np.random.choice(["apple", "banana", "carrot"], size=df_size),
"b": np.random.random(size=df_size),
"c": np.random.randint(1, 5, size=df_size),
"index": np.arange(0, df_size),
}
)
if index:
df = df.set_index("index")
ddf1 = dd.from_pandas(df, npartitions=nparts)
ddf1.to_parquet(
str(tmpdir),
engine="pyarrow",
row_group_size=row_group_size,
write_metadata_file=True,
)
ddf2 = dask_cudf.read_parquet(
str(tmpdir),
row_groups_per_part=row_groups,
)
dd.assert_eq(ddf1, ddf2, check_divisions=False)
assert ddf2.npartitions == npartitions_expected
@need_create_meta
@pytest.mark.parametrize("partition_on", [None, "a"])
def test_create_metadata_file(tmpdir, partition_on):
tmpdir = str(tmpdir)
# Write ddf without a _metadata file
df1 = cudf.DataFrame({"b": range(100), "a": ["A", "B", "C", "D"] * 25})
df1.index.name = "myindex"
ddf1 = dask_cudf.from_cudf(df1, npartitions=10)
ddf1.to_parquet(
tmpdir,
write_metadata_file=False,
partition_on=partition_on,
)
# Add global _metadata file
if partition_on:
fns = glob.glob(os.path.join(tmpdir, partition_on + "=*/*.parquet"))
else:
fns = glob.glob(os.path.join(tmpdir, "*.parquet"))
dask_cudf.io.parquet.create_metadata_file(
fns,
split_every=3, # Force tree reduction
)
# Check that we can now read the ddf
# with the _metadata file present
ddf2 = dask_cudf.read_parquet(
tmpdir,
gather_statistics=True,
split_row_groups=False,
index="myindex",
)
if partition_on:
ddf1 = df1.sort_values("b")
ddf2 = ddf2.compute().sort_values("b")
ddf2.a = ddf2.a.astype("object")
dd.assert_eq(ddf1, ddf2)
@need_create_meta
def test_create_metadata_file_inconsistent_schema(tmpdir):
# NOTE: This test demonstrates that the CudfEngine
# can be used to generate a global `_metadata` file
# even if there are inconsistent schemas in the dataset.
# Write file 0
df0 = pd.DataFrame({"a": [None] * 10, "b": range(10)})
p0 = os.path.join(tmpdir, "part.0.parquet")
df0.to_parquet(p0, engine="pyarrow")
# Write file 1
b = list(range(10))
b[1] = None
df1 = pd.DataFrame({"a": range(10), "b": b})
p1 = os.path.join(tmpdir, "part.1.parquet")
df1.to_parquet(p1, engine="pyarrow")
# New pyarrow-dataset base can handle an inconsistent
# schema (even without a _metadata file), but computing
# and dtype validation may fail
ddf1 = dask_cudf.read_parquet(str(tmpdir), gather_statistics=True)
# Add global metadata file.
# Dask-CuDF can do this without requiring schema
# consistency.
dask_cudf.io.parquet.create_metadata_file([p0, p1])
# Check that we can still read the ddf
# with the _metadata file present
ddf2 = dask_cudf.read_parquet(str(tmpdir), gather_statistics=True)
# Check that the result is the same with and
# without the _metadata file. Note that we must
# call `compute` on `ddf1`, because the dtype of
# the inconsistent column ("a") may be "object"
# before computing, and "int" after
dd.assert_eq(ddf1.compute(), ddf2)
dd.assert_eq(ddf1.compute(), ddf2.compute())
@pytest.mark.parametrize(
"data",
[
["dog", "cat", "fish"],
[[0], [1, 2], [3]],
[None, [1, 2], [3]],
[{"f1": 1}, {"f1": 0, "f2": "dog"}, {"f2": "cat"}],
[None, {"f1": 0, "f2": "dog"}, {"f2": "cat"}],
],
)
def test_cudf_dtypes_from_pandas(tmpdir, data):
# Simple test that we can read in list and struct types
fn = str(tmpdir.join("test.parquet"))
dfp = pd.DataFrame({"data": data})
dfp.to_parquet(fn, engine="pyarrow", index=True)
# Use `split_row_groups=True` to avoid "fast path" where
# schema is not is passed through in older Dask versions
ddf2 = dask_cudf.read_parquet(fn, split_row_groups=True)
dd.assert_eq(cudf.from_pandas(dfp), ddf2)
def test_cudf_list_struct_write(tmpdir):
df = cudf.DataFrame(
{
"a": [1, 2, 3],
"b": [[[1, 2]], [[2, 3]], None],
"c": [[[["a", "z"]]], [[["b", "d", "e"]]], None],
}
)
df["d"] = df.to_struct()
ddf = dask_cudf.from_cudf(df, 3)
temp_file = str(tmpdir.join("list_struct.parquet"))
ddf.to_parquet(temp_file)
new_ddf = dask_cudf.read_parquet(temp_file)
dd.assert_eq(df, new_ddf)
|
# Generated by Django 2.2.7 on 2019-11-27 16:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hotel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dailyPrice', models.DecimalField(decimal_places=2, max_digits=6)),
('address', models.CharField(max_length=30)),
('location', models.CharField(max_length=30)),
('companyName', models.CharField(default='hotel', max_length=30)),
],
),
migrations.CreateModel(
name='Payment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=6)),
('paymentType', models.CharField(choices=[('credit', 'Credit'), ('debit', 'Debit')], max_length=6)),
('cardNo', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='Train',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('companyName', models.CharField(max_length=30)),
('srcLocation', models.CharField(max_length=30)),
('destLocation', models.CharField(max_length=30)),
('departureDate', models.DateField()),
('departureTime', models.TimeField()),
('priceEconomy', models.DecimalField(decimal_places=2, max_digits=6)),
('priceBusiness', models.DecimalField(decimal_places=2, max_digits=6)),
('numSeatsRemainingEconomy', models.IntegerField()),
('numSeatsRemainingBusiness', models.IntegerField()),
],
),
migrations.CreateModel(
name='User',
fields=[
('username', models.CharField(max_length=40, primary_key=True, serialize=False, unique=True)),
('email', models.CharField(max_length=35, unique=True)),
('password', models.CharField(max_length=20)),
],
),
]
|
from vyper.exceptions import InvalidLiteralException
def test_function_with_units(get_contract_with_gas_estimation):
code = """
units: {
N: "Newton",
m: "Meter",
s: "Second",
}
@public
def foo(f: uint256(N), d: uint256(m), t: uint256(s)) -> uint256(N*m/s**2):
return f * d / (t * t)
@public
def bar(a: uint256(m), b: uint256(m), c: uint256(m)) -> uint256(m**3):
return (a * b * c)
"""
c = get_contract_with_gas_estimation(code)
assert c._classic_contract.abi[0]["outputs"] == [
{"type": "uint256", "name": "out", "unit": "Newton-Meter per Second squared"}
]
assert c._classic_contract.abi[0]["inputs"] == [
{"type": "uint256", "name": "f", "unit": "Newton"},
{"type": "uint256", "name": "d", "unit": "Meter"},
{"type": "uint256", "name": "t", "unit": "Second"},
]
assert c._classic_contract.abi[1]["outputs"] == [
{"type": "uint256", "name": "out", "unit": "Meter**3"}
]
assert c._classic_contract.abi[1]["inputs"] == [
{"type": "uint256", "name": "a", "unit": "Meter"},
{"type": "uint256", "name": "b", "unit": "Meter"},
{"type": "uint256", "name": "c", "unit": "Meter"},
]
def test_event_with_units(get_contract_with_gas_estimation):
code = """
units: {
m: "Meter",
s: "Second",
}
Speed: event({value: uint256(m/s)})
"""
c = get_contract_with_gas_estimation(code)
assert c._classic_contract.abi[0]["inputs"] == [
{
"type": "uint256",
"name": "value",
"indexed": False,
"unit": "Meter per Second",
}
]
def test_function_with_tuple_output(get_contract_with_gas_estimation):
code = """
units: {
m: "Meter",
s: "Second",
}
@public
def foo(t: uint256(s), d: uint256(m)) -> (uint256(m), uint256(s)):
return (d, t)
"""
c = get_contract_with_gas_estimation(code)
assert c._classic_contract.abi[0]["outputs"] == [
{"type": "uint256", "name": "out", "unit": "Meter"},
{"type": "uint256", "name": "out", "unit": "Second"},
]
assert c._classic_contract.abi[0]["inputs"] == [
{"type": "uint256", "name": "t", "unit": "Second"},
{"type": "uint256", "name": "d", "unit": "Meter"},
]
def test_function_without_units(get_contract_with_gas_estimation):
code = """
@public
def foo(a: uint256, b: uint256, c: uint256) -> uint256:
return (a * b / c)
"""
c = get_contract_with_gas_estimation(code)
assert c._classic_contract.abi[0]["outputs"] == [{"type": "uint256", "name": "out"}]
assert c._classic_contract.abi[0]["inputs"] == [
{"type": "uint256", "name": "a"},
{"type": "uint256", "name": "b"},
{"type": "uint256", "name": "c"},
]
def test_function_call_explicit_unit_literal(get_contract, assert_compile_failed):
code = """
@public
def unit_func(a: uint256(wei)) -> uint256(wei):
return a + 1
@public
def foo() -> uint256(wei):
c: uint256(wei) = self.unit_func(111)
return c
"""
assert_compile_failed(lambda: get_contract(code), InvalidLiteralException)
|
from .nexe import Nexe
from .pkg import Pkg
|
#!/usr/bin/env python
from unittest import TestCase
from boutiques import bosh
from boutiques import __file__ as bfile
from jsonschema.exceptions import ValidationError
import json
import os
import os.path as op
from os.path import join as opj
import pytest
from boutiques.importer import ImportError
import boutiques
import tarfile
from contextlib import closing
class TestImport(TestCase):
def test_import_bids_good(self):
bids_app = opj(op.split(bfile)[0],
"schema/examples/bids-apps/example_good")
outfile = "test-import.json"
ref_name = "test-import-ref.json"
if op.isfile(outfile):
os.remove(outfile)
self.assertFalse(bosh(["import", "bids", outfile, bids_app]))
assert(open(outfile, "U").read().strip() == open(opj(bids_app,
ref_name),
"U").read().strip())
def test_import_bids_bad(self):
bids_app = opj(op.split(bfile)[0],
"schema/examples/bids-apps/example_bad")
self.assertRaises(ValidationError, bosh, ["import", "bids",
"test-import.json",
bids_app])
def test_import_bids_valid(self):
self.assertFalse(bosh(["validate", "test-import.json", "-b"]))
os.remove("test-import.json")
def test_upgrade_04(self):
fin = opj(op.split(bfile)[0], "schema/examples/upgrade04.json")
fout = opj(op.split(bfile)[0], "schema/examples/upgraded05.json")
ref_name = "test-import-04-ref.json"
ref_file = opj(op.split(bfile)[0], "schema/examples", ref_name)
ref_name_p2 = "test-import-04-ref-python2.json"
ref_file_p2 = opj(op.split(bfile)[0], "schema/examples",
ref_name_p2)
if op.isfile(fout):
os.remove(fout)
self.assertFalse(bosh(["import", "0.4", fout, fin]))
result = open(fout, "U").read().strip()
assert(result == open(ref_file, "U").read().strip() or
result == open(ref_file_p2, "U").read().strip())
os.remove(fout)
def test_upgrade_04_json_obj(self):
fin = open(opj(op.split(bfile)[0],
"schema/examples/upgrade04.json")).read()
fout = opj(op.split(bfile)[0], "schema/examples/upgraded05.json")
ref_name = "test-import-04-ref.json"
ref_file = opj(op.split(bfile)[0], "schema/examples", ref_name)
ref_name_p2 = "test-import-04-ref-python2.json"
ref_file_p2 = opj(op.split(bfile)[0], "schema/examples",
ref_name_p2)
if op.isfile(fout):
os.remove(fout)
self.assertFalse(bosh(["import", "0.4", fout, fin]))
result = open(fout, "U").read().strip()
assert(result == open(ref_file, "U").read().strip() or
result == open(ref_file_p2, "U").read().strip())
os.remove(fout)
def test_import_cwl_valid(self):
ex_dir = opj(op.split(bfile)[0], "tests/cwl")
# These ones are supposed to crash
bad_dirs = ["1st-workflow", # workflow
"record", # complex type
"array-inputs", # input bindings specific to array element
"expression", # Javascript expression
"nestedworkflows" # workflow
]
for d in os.listdir(ex_dir):
if d == "README.md":
continue
files = os.listdir(opj(ex_dir, d))
cwl_descriptor = None
cwl_invocation = None
for f in files:
if op.basename(f).endswith(".cwl"):
cwl_descriptor = op.abspath(opj(ex_dir, d, f))
if op.basename(f).endswith(".yml"):
cwl_invocation = op.abspath(opj(ex_dir, d, f))
assert(cwl_descriptor is not None)
out_desc = "./cwl_out.json"
out_inv = "./cwl_inv_out.json"
run = False
if cwl_invocation is not None:
args = ["import",
"cwl",
out_desc,
cwl_descriptor,
"-i", cwl_invocation,
"-o", out_inv]
run = True
else:
args = ["import",
"cwl",
out_desc,
cwl_descriptor]
if d in bad_dirs:
with pytest.raises(ImportError):
bosh(args)
else:
self.assertFalse(bosh(args), cwl_descriptor)
if run:
# write files required by cwl tools
with open('hello.js', 'w') as f:
f.write("'hello'")
with open('goodbye.txt', 'w') as f:
f.write("goodbye")
# closing required for Python 2.6...
with tarfile.open('hello.tar',
'w') as tar:
tar.add('goodbye.txt')
ret = boutiques.execute(
"launch",
out_desc,
out_inv
)
self.assertFalse(ret.exit_code,
cwl_descriptor)
|
import torch
from .base import Flow
# Flow layers to reshape the latent features
class Split(Flow):
"""
Split features into two sets
"""
def __init__(self, mode='channel'):
"""
Constructor
:param mode: Splitting mode, can be
channel: Splits first feature dimension, usually channels, into two halfs
channel_inv: Same as channel, but with z1 and z2 flipped
checkerboard: Splits features using a checkerboard pattern (last feature dimension must be even)
checkerboard_inv: Same as checkerboard, but with inverted coloring
"""
super().__init__()
self.mode = mode
def forward(self, z):
if self.mode == 'channel':
z1, z2 = z.chunk(2, dim=1)
elif self.mode == 'channel_inv':
z2, z1 = z.chunk(2, dim=1)
elif 'checkerboard' in self.mode:
n_dims = z.dim()
cb0 = 0
cb1 = 1
for i in range(1, n_dims):
cb0_ = cb0
cb1_ = cb1
cb0 = [cb0_ if j % 2 == 0 else cb1_ for j in range(z.size(n_dims - i))]
cb1 = [cb1_ if j % 2 == 0 else cb0_ for j in range(z.size(n_dims - i))]
cb = cb1 if 'inv' in self.mode else cb0
cb = torch.tensor(cb)[None].repeat(len(z), *((n_dims - 1) * [1]))
cb = cb.to(z.device)
z_size = z.size()
z1 = z.reshape(-1)[torch.nonzero(cb.view(-1), as_tuple=False)].view(*z_size[:-1], -1)
z2 = z.reshape(-1)[torch.nonzero((1 - cb).view(-1), as_tuple=False)].view(*z_size[:-1], -1)
else:
raise NotImplementedError('Mode ' + self.mode + ' is not implemented.')
log_det = 0
return [z1, z2], log_det
def inverse(self, z):
z1, z2 = z
if self.mode == 'channel':
z = torch.cat([z1, z2], 1)
elif self.mode == 'channel_inv':
z = torch.cat([z2, z1], 1)
elif 'checkerboard' in self.mode:
n_dims = z1.dim()
z_size = list(z1.size())
z_size[-1] *= 2
cb0 = 0
cb1 = 1
for i in range(1, n_dims):
cb0_ = cb0
cb1_ = cb1
cb0 = [cb0_ if j % 2 == 0 else cb1_ for j in range(z_size[n_dims - i])]
cb1 = [cb1_ if j % 2 == 0 else cb0_ for j in range(z_size[n_dims - i])]
cb = cb1 if 'inv' in self.mode else cb0
cb = torch.tensor(cb)[None].repeat(z_size[0], *((n_dims - 1) * [1]))
cb = cb.to(z1.device)
z1 = z1[..., None].repeat(*(n_dims * [1]), 2).view(*z_size[:-1], -1)
z2 = z2[..., None].repeat(*(n_dims * [1]), 2).view(*z_size[:-1], -1)
z = cb * z1 + (1 - cb) * z2
else:
raise NotImplementedError('Mode ' + self.mode + ' is not implemented.')
log_det = 0
return z, log_det
class Merge(Split):
"""
Same as Split but with forward and backward pass interchanged
"""
def __init__(self, mode='channel'):
super().__init__(mode)
def forward(self, z):
return super().inverse(z)
def inverse(self, z):
return super().forward(z)
class Squeeze(Flow):
"""
Squeeze operation of multi-scale architecture, RealNVP or Glow paper
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
def forward(self, z):
log_det = 0
s = z.size()
z = z.view(s[0], s[1] // 4, 2, 2, s[2], s[3])
z = z.permute(0, 1, 4, 2, 5, 3).contiguous()
z = z.view(s[0], s[1] // 4, 2 * s[2], 2 * s[3])
return z, log_det
def inverse(self, z):
log_det = 0
s = z.size()
z = z.view(*s[:2], s[2] // 2, 2, s[3] // 2, 2)
z = z.permute(0, 1, 3, 5, 2, 4).contiguous()
z = z.view(s[0], 4 * s[1], s[2] // 2, s[3] // 2)
return z, log_det
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
from flask import (Flask, render_template, jsonify)
from .markov import Markov
with open(sys.argv[1], "rb") as f:
drjc_facts = map(str, f)
app = Flask(__name__)
drjc = Markov(drjc_facts)
@app.route("/")
def index():
return render_template("index.html", fact=drjc.markov_gen())
@app.route("/drjc.json")
def drjc_json():
return jsonify({"fact": drjc.markov_gen()})
|
import json
from flask import Flask
app = Flask(__name__)
@app.route("/")
def patientpage():
with open("patient.json") as patientfile:
data = json.load(patientfile)
return data
if __name__=="__main__":
app.run(debug=True, use_reloader=True)
|
from stg.api import PulseFile, STG4000
# we initialize the STG and print information about it
stg = STG4000()
print(stg, stg.version)
# create a pulsefile with default parameters
p = PulseFile()
# compile the pulsefile and expand the tuple to positional arguments
# and download it into channel 1
# As you can see, indexing starts at zero
stg.download(0, *p())
# start stimulation at channel 1
stg.start_stimulation([0])
# sleep for 500ms
stg.sleep(500)
# create a new pulsefile consisting of 600 repetitve pulses
p = PulseFile(intensity_in_mA=1, burstcount=600)
stg.download(0, *p())
# start and immediatly stop it again
# this shows that an ongoing stimulation can be aborted
stg.start_stimulation([0])
stg.stop_stimulation()
# create a biphasic pulse with 1mA amplitude and a pulse-width of 2ms
# and trigger it every 250 ms
# timing is here determined by python and therefore necessarily not as exact
p = PulseFile(intensity_in_mA=1, pulsewidth_in_ms=2)
stg.download(0, *p())
while True:
stg.start_stimulation([0, 1])
stg.sleep(duration_in_ms=250)
|
from __future__ import print_function
from builtins import str
from builtins import object
from collections import defaultdict
class GapMasker(object):
def __init__(self, template):
self.template = template
self.gap_positions = self.get_gap_positions()
def get_gap_positions(self):
gap_positions = defaultdict(list)
names = self.template.headers
for name in names:
seq = self.template.mapping[name]
for pos, char in enumerate(seq):
if char == '-':
gap_positions[name].append(pos)
return gap_positions
def mask(self, target):
try:
self.check_seqs(target)
return self.write_gap_positions(target)
except Exception as e:
print(e)
return
def check_seqs(self, target):
if len(self.template) != len(target):
raise Exception('Alignments have different numbers of sequences')
if set(self.template.headers) != set(target.headers):
raise Exception('Sequence names don\'t match')
if self.template.seqlength != target.seqlength:
raise Exception('Alignments are different lengths')
def write_gap_positions(self, target):
for name in self.gap_positions:
if not name in target.headers:
raise Exception('Trying to write gaps to non-existent sequence: ',
name)
listseq = list(target.mapping[name])
for pos in self.gap_positions[name]:
listseq[pos] = '-'
seq = ''.join(str(x) for x in listseq)
target.mapping[name] = seq
seqs = []
for name in target.headers:
seqs.append(target.mapping[name])
target.sequences = seqs
target.update()
return target
|
from finbot.core.environment import get_snapwsrv_endpoint
from finbot.clients import SnapClient
import pytest
@pytest.fixture
def api() -> SnapClient:
return SnapClient(get_snapwsrv_endpoint())
def test_healthy(api: SnapClient):
assert api.healthy
|
import json
from django.http import response
from django.shortcuts import render
import base64
from django.http.response import HttpResponse, JsonResponse
from rest_framework import status, decorators
from rest_framework.decorators import api_view, parser_classes
from myapi.models import RegdevData, RegtseData, RequestData, RequestDataTest, RegdevDataTest, RegtseDataTest
@api_view(['POST'])
def ReceivingData(request):
#if request.content_type != 'application/xml' or request.content_type != 'text/plain':
# return HttpResponse('<xml><result>Failed. The request content_type is not "application/xml" or xml-file.</result></xml>', status=status.HTTP_405_METHOD_NOT_ALLOWED, content_type='application/xml')
#file = request.data['file']
#request.body.decode('utf-8')
answer = {
'request_id': 0,
'count': 0,
'success': 0,
'fail': 0,
'records': [
{
'status': 'ok',
'index': 0,
'record_id': 0,
'errors': {}
},
]
}
try:
RequestObj = RequestData.objects.create(request=request.body.decode('utf-8'), endpoint='ReceivingData', method=request.method, scheme=request.scheme, headers=request.headers)
RequestObj.save()
answer['request_id']=RequestObj.pk
answer['records'][0]['record_id']=RequestObj.pk
data = json.loads(request.body.decode('utf-8'))
table = ""
file_type = ""
if 'TABLE' in data["data"][0]:
table = data["data"][0]["TABLE"]
else:
file_type = 'Not field'
if table == 'MERCHANTS_ALL':
file_type = 'REGTSE'
elif table == 'SET0_ACC_TR_ALL':
file_type = 'REGDEV'
else:
file_type = 'Unknown'
if 'MERCHANT' in data["data"][0]:
if data["data"][0]["MERCHANT"]!="":
if file_type == 'REGTSE':
regtse = RegtseData.objects.create(MERCHANT=data["data"][0]["MERCHANT"], PARENT=data["data"][0]["PARENT"], ABRV_NAME=data["data"][0]["ABRV_NAME"], FULL_NAME=data["data"][0]["FULL_NAME"], CNTRY=data["data"][0]["CNTRY"], CITY=data["data"][0]["CITY"], STREET=data["data"][0]["STREET"], REG_NR=data["data"][0]["REG_NR"], PHONE=data["data"][0]["PHONE"], MCC=data["data"][0]["MCC"], POST_IND=data["data"][0]["POST_IND"], MRC_PHONE=data["data"][0]["MRC_PHONE"], req=request.body.decode('utf-8'), dt=RequestObj.createdAt)
regtse.save()
answer['records'][0]['record_id']=regtse.pk
answer['success'] = 1
return JsonResponse(answer, status=status.HTTP_202_ACCEPTED)
elif ('TERMINAL_ID' in data["data"][0] and 'ACCEPTOR_ID' in data["data"][0]):
if data["data"][0]["TERMINAL_ID"]!="" and data["data"][0]["ACCEPTOR_ID"]!="":
if file_type == 'REGDEV':
regtse_obj = RegtseData.objects.filter(MERCHANT=data["data"][0]["ACCEPTOR_ID"])
print(regtse_obj.first())
if regtse_obj.first():
regdev = RegdevData.objects.create(TERMINAL_ID=data["data"][0]["TERMINAL_ID"], ACCEPTOR_ID=data["data"][0]["ACCEPTOR_ID"], TERM_TYPE=data["data"][0]["TERM_TYPE"], POINT_CODE=data["data"][0]["POINT_CODE"], SERIAL_NR=data["data"][0]["SERIAL_NR"], INV_NR=data["data"][0]["INV_NR"], CURRENCY="", regtseId=regtse_obj.first(), req=request.body.decode('utf-8'), dt=RequestObj.createdAt)
regdev.save()
answer['records'][0]['record_id']=regdev.pk
answer['success'] = 1
return JsonResponse(answer, status=status.HTTP_202_ACCEPTED)
print("Not analyzed data. ", table)
answer['records'][0]['status']='error'
answer['records'][0]['errors']={ 'REQ_ERROR': ['Not analyzed data.'] }
answer['count'] = 1
answer['fail'] = 1
return JsonResponse(answer, status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as e:
print("ReceivingData: ", e)
print(request)
answer['records'][0]['status']='error'
answer['records'][0]['errors']={ 'REQ_ERROR': [str(e)] }
answer['count'] = 1;
answer['fail'] = 1;
return JsonResponse(answer, status=status.HTTP_406_NOT_ACCEPTABLE)
@api_view(['POST'])
def ReceivingDataTest(request):
answer = {
'request_id': 0,
'count': 0,
'success': 0,
'fail': 0,
'records': [
{
'status': 'ok',
'index': 0,
'record_id': 0,
'errors': {}
},
]
}
try:
RequestObj = RequestDataTest.objects.create(request=request.body.decode('utf-8'), endpoint='ReceivingDataTest', method=request.method, scheme=request.scheme, headers=request.headers)
RequestObj.save()
answer['request_id']=RequestObj.pk
answer['records'][0]['record_id']=RequestObj.pk
data = json.loads(request.body.decode('utf-8'))
table = ""
file_type = ""
if 'TABLE' in data["data"][0]:
table = data["data"][0]["TABLE"]
else:
file_type = 'Not field'
if table == 'MERCHANTS_ALL':
file_type = 'REGTSE'
elif table == 'SET0_ACC_TR_ALL':
file_type = 'REGDEV'
else:
file_type = 'Unknown'
if 'MERCHANT' in data["data"][0]:
if data["data"][0]["MERCHANT"]!="":
if file_type == 'REGTSE':
regtse = RegtseDataTest.objects.create(MERCHANT=data["data"][0]["MERCHANT"], PARENT=data["data"][0]["PARENT"], ABRV_NAME=data["data"][0]["ABRV_NAME"], FULL_NAME=data["data"][0]["FULL_NAME"], CNTRY=data["data"][0]["CNTRY"], CITY=data["data"][0]["CITY"], STREET=data["data"][0]["STREET"], REG_NR=data["data"][0]["REG_NR"], PHONE=data["data"][0]["PHONE"], MCC=data["data"][0]["MCC"], POST_IND=data["data"][0]["POST_IND"], MRC_PHONE=data["data"][0]["MRC_PHONE"], req=request.body.decode('utf-8'), dt=RequestObj.createdAt)
regtse.save()
answer['records'][0]['record_id']=regtse.pk
answer['success'] = 1
return JsonResponse(answer, status=status.HTTP_202_ACCEPTED)
elif ('TERMINAL_ID' in data["data"][0] and 'ACCEPTOR_ID' in data["data"][0]):
if data["data"][0]["TERMINAL_ID"]!="" and data["data"][0]["ACCEPTOR_ID"]!="":
if file_type == 'REGDEV':
regtse_obj = RegtseDataTest.objects.filter(MERCHANT=data["data"][0]["ACCEPTOR_ID"])
print(regtse_obj.first())
if regtse_obj.first():
regdev = RegdevDataTest.objects.create(TERMINAL_ID=data["data"][0]["TERMINAL_ID"], ACCEPTOR_ID=data["data"][0]["ACCEPTOR_ID"], TERM_TYPE=data["data"][0]["TERM_TYPE"], POINT_CODE=data["data"][0]["POINT_CODE"], SERIAL_NR=data["data"][0]["SERIAL_NR"], INV_NR=data["data"][0]["INV_NR"], CURRENCY="", regtseId=regtse_obj.first(), req=request.body.decode('utf-8'), dt=RequestObj.createdAt)
regdev.save()
answer['records'][0]['record_id']=regdev.pk
answer['success'] = 1
return JsonResponse(answer, status=status.HTTP_202_ACCEPTED)
print("Not analyzed data. ", table)
answer['records'][0]['status']='error'
answer['records'][0]['errors']={ 'REQ_ERROR': ['Not analyzed data.'] }
answer['count'] = 1
answer['fail'] = 1
return JsonResponse(answer, status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as e:
print("ReceivingData: ", e)
print(request)
answer['records'][0]['status']='error'
answer['records'][0]['errors']={ 'REQ_ERROR': [str(e)] }
answer['count'] = 1;
answer['fail'] = 1;
return JsonResponse(answer, status=status.HTTP_406_NOT_ACCEPTABLE)
|
n = input("Digite seu nome: ")
print('Olá, {}! É um prazer te conhecer.'.format(n))
|
#!/usr/bin/python3
# MIT License
#
# Copyright (c) 2017 Marcel de Vries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import os.path
import re
import shutil
import time
from datetime import datetime
from urllib import request
#region Configuration
STEAM_CMD = "/home/steam/arma3/steam/steamcmd.sh"
STEAM_USER = ""
STEAM_PASS = ""
A3_SERVER_ID = "233780"
A3_SERVER_DIR = "/home/steam/arma3/install"
A3_WORKSHOP_ID = "107410"
A3_WORKSHOP_DIR = "{}/steamapps/workshop/content/{}".format(A3_SERVER_DIR, A3_WORKSHOP_ID)
A3_MODS_DIR = "/home/steam/arma3/mods"
MODS = {
"@cba_a3": "450814997",
"@ace3": "463939057",
"@alive": "620260972",
"@cup_terrains_core": "583496184",
"@cup_terrains_maps": "583544987",
"@cup_weapons": "497660133",
"@cup_units": "497661914",
"@cup_vehicles": "541888371"
}
PATTERN = re.compile(r"workshopAnnouncement.*?<p id=\"(\d+)\">", re.DOTALL)
WORKSHOP_CHANGELOG_URL = "https://steamcommunity.com/sharedfiles/filedetails/changelog"
#endregion
#region Functions
def log(msg):
print("")
print("{{0:=<{}}}".format(len(msg)).format(""))
print(msg);
print("{{0:=<{}}}".format(len(msg)).format(""))
def call_steamcmd(params):
os.system("{} {}".format(STEAM_CMD, params))
print("")
def update_server():
steam_cmd_params = " +login {} {}".format(STEAM_USER, STEAM_PASS)
steam_cmd_params += " +force_install_dir {}".format(A3_SERVER_DIR)
steam_cmd_params += " +app_update {} validate".format(A3_SERVER_ID)
steam_cmd_params += " +quit"
call_steamcmd(steam_cmd_params)
def mod_needs_update(mod_id, path):
if os.path.isdir(path):
response = request.urlopen("{}/{}".format(WORKSHOP_CHANGELOG_URL, mod_id)).read()
response = response.decode("utf-8")
match = PATTERN.search(response)
if match:
updated_at = datetime.fromtimestamp(int(match.group(1)))
created_at = datetime.fromtimestamp(os.path.getctime(path))
return (updated_at >= created_at)
return False
def update_mods():
for mod_name, mod_id in MODS.items():
path = "{}/{}".format(A3_WORKSHOP_DIR, mod_id)
# Check if mod needs to be updated
if os.path.isdir(path):
if mod_needs_update(mod_id, path):
# Delete existing folder so that we can verify whether the
# download succeeded
shutil.rmtree(path)
else:
print("No update required for \"{}\" ({})... SKIPPING".format(mod_name, mod_id))
continue
# Keep trying until the download actually succeeded
tries = 0
while os.path.isdir(path) == False and tries < 10:
log("Updating \"{}\" ({}) | {}".format(mod_name, mod_id, tries + 1))
steam_cmd_params = " +login {} {}".format(STEAM_USER, STEAM_PASS)
steam_cmd_params += " +force_install_dir {}".format(A3_SERVER_DIR)
steam_cmd_params += " +workshop_download_item {} {} validate".format(
A3_WORKSHOP_ID,
mod_id
)
steam_cmd_params += " +quit"
call_steamcmd(steam_cmd_params)
# Sleep for a bit so that we can kill the script if needed
time.sleep(5)
tries = tries + 1
if tries >= 10:
log("!! Updating {} failed after {} tries !!".format(mod_name, tries))
def lowercase_workshop_dir():
os.system("(cd {} && find . -depth -exec rename -v 's/(.*)\/([^\/]*)/$1\/\L$2/' {{}} \;)".format(A3_WORKSHOP_DIR))
def create_mod_symlinks():
for mod_name, mod_id in MODS.items():
link_path = "{}/{}".format(A3_MODS_DIR, mod_name)
real_path = "{}/{}".format(A3_WORKSHOP_DIR, mod_id)
if os.path.isdir(real_path):
if not os.path.islink(link_path):
os.symlink(real_path, link_path)
print("Creating symlink '{}'...".format(link_path))
else:
print("Mod '{}' does not exist! ({})".format(mod_name, real_path))
#endregion
log("Updating A3 server ({})".format(A3_SERVER_ID))
update_server()
log("Updating mods")
update_mods()
log("Converting uppercase files/folders to lowercase...")
lowercase_workshop_dir()
log("Creating symlinks...")
create_mod_symlinks()
|
#!/usr/bin/env python
import sys
import os
import getopt
import time
if sys.version_info < (3,0):
import ConfigParser
else:
import configparser as ConfigParser
import aliyun
import logging
import logging.handlers
# Set the global configuration
CONFIG_FILENAME = 'config.ini'
configFilepath = os.path.split(os.path.realpath(__file__))[0] + os.path.sep + CONFIG_FILENAME
config = ConfigParser.ConfigParser()
config.read(configFilepath)
logger = logging.getLogger("logger")
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(msg)s")
consoleHandler = logging.StreamHandler(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
consoleHandler.setLevel(logging.DEBUG)
consoleHandler.setFormatter(formatter)
logger.addHandler(consoleHandler)
fileLogFlag = True if config.get('log','enable').lower() == 'true' else False
if fileLogFlag:
logfile = config.get('log','logfile')
fileHandler = logging.FileHandler(filename=logfile)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
def getAliyunDnsInstance():
appid = config.get('aliyun', 'appid')
appsecret = config.get('aliyun', 'appsecret')
return aliyun.AliyunDns(appid, appsecret)
def auth(tld_length):
try:
if 'CERTBOT_DOMAIN' not in os.environ:
raise Exception('Environment variable CERTBOT_DOMAIN is empty.')
if 'CERTBOT_VALIDATION' not in os.environ:
raise Exception(
'Environment variable CERTBOT_VALIDATION is empty.')
domain = os.environ['CERTBOT_DOMAIN']
value = os.environ['CERTBOT_VALIDATION']
logger.info('Start setting DNS')
logger.info('Domain:' + domain)
logger.info('Domain length:' + tld_length)
logger.info('Value:' + value)
aliyunDns = getAliyunDnsInstance()
# aliyunDns.toString()
# add letsencrypt domain record
aliyunDns.addLetsencryptDomainRecord(domain, int(tld_length), value)
# wait for completion
logger.info('sleep 10 secs')
time.sleep(10)
logger.info('Success.')
logger.info('DNS setting end!')
except Exception as e:
#logger.error('Error: ' + str(e.msg) + '\n')
logger.error('Error: ' + str(e) + '\n')
sys.exit()
def cleanup():
try:
if 'CERTBOT_DOMAIN' not in os.environ:
raise Exception('Environment variable CERTBOT_DOMAIN is empty.')
domain = os.environ['CERTBOT_DOMAIN']
logger.info('Start to clean up')
logger.info('Domain:' + domain)
aliyunDns = getAliyunDnsInstance()
# aliyunDns.toString()
# delete letsencrypt domain record
aliyunDns.deleteLetsencryptDomainRecord(domain)
# wait for completion
time.sleep(10)
logger.info('Success.')
logger.info('Clean up end!')
except Exception as e:
logger.error('Error: ' + str(e.msg) + '\n')
sys.exit()
def usage():
def printOpt(opt, desc):
firstPartMaxLen = 30
firstPart = ' ' + ', '.join(opt)
secondPart = desc.replace('\n', '\n' + ' ' * firstPartMaxLen)
delim = ''
firstPartLen = len(firstPart)
if firstPartLen >= firstPartMaxLen:
spaceLen = firstPartMaxLen
delim = '\n'
else:
spaceLen = firstPartMaxLen - firstPartLen
delim = delim + ' ' * spaceLen
print(firstPart + delim + secondPart)
print('Usage: python %s [option] [arg] ...' % os.path.basename(__file__))
print('Options:')
printOpt(['-h', '--help'],
'Display help information.')
printOpt(['-v', '--version'],
'Display version information.')
printOpt(['--auth'],
'auth hook, with lenght of the domain.')
printOpt(['--cleanup'],
'auth hook.')
def version():
print('dmlkdevtool.py ' + __version__)
print(__copyright__)
print('License ' + __license__ + '.')
print('Written by ' + __author__ + '.')
def main(argc, argv):
try:
if(argc == 1):
usage()
raise Exception('')
opts, args = getopt.getopt(
argv[1:],
'hv',
[
'help',
'version',
'auth=',
'cleanup',
]
)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
elif opt in ('-v', '--version'):
version()
elif opt in ('--auth, with length of top level of the domain'):
auth(arg)
elif opt in ('--cleanup'):
cleanup()
else:
logger.error('Invalid option: ' + opt)
except getopt.GetoptError as e:
logger.error('Error: ' + str(e) + '\n')
except AttributeError as e:
logger.error(e.args)
except Exception as e:
if e.msg != '':
logger.error('Error: ' + str(e.msg) + '\n')
sys.exit()
if __name__ == '__main__':
main(len(sys.argv), sys.argv)
|
import cv2
from cv_object_detector import CVTFObjectDetector
IMAGE = "/home/nj/Desktop/CV/Dataset/DS/images/CV19_image_106.png"
FROZEN_GRAPH = "/home/nj/Desktop/CV/Trained_Models/FRCNN_Tray.pb"
PBTEXT = "/home/nj/Desktop/CV/Dataset/Trained/FRCNN_TRAY/opencv_frcnn_tray.pbtxt"
LABELS = {0: "Walnut",
1: "Peanut",
2: "Hazelnut",
3: "Tray"}
image = cv2.imread(IMAGE)
obj_detector = CVTFObjectDetector()
obj_detector.set_parameters(FROZEN_GRAPH,PBTEXT)
obj_detector.set_labels(LABELS)
obj_detector.set_input_image(image,(640,480))
obj_detector.run_detection()
output = obj_detector.get_inference_image()
cv2.imwrite("result_false.jpg",output)
print(obj_detector.get_results())
|
# pylint: disable=missing-module-docstring
# pylint: disable=missing-function-docstring
from unittest import mock
import pytest
from sql_judge.parse_configuration import build_configuration
# ConfigurationBuilder.is_valid
def test_empty_builder_is_invalid():
assert build_configuration.default().is_valid() is False
def test_valid_builder(configuration_builder):
assert configuration_builder.is_valid() is True
def test_builder_is_invalid_when_there_is_no_adapter_module(build_configuration_builder):
assert build_configuration_builder(adapter_module=None).is_valid() is False
def test_builder_is_invalid_when_there_is_no_adapter_class(build_configuration_builder):
assert build_configuration_builder(adapter_class=None).is_valid() is False
def test_builder_is_invalid_when_there_is_no_validations_module(build_configuration_builder):
assert build_configuration_builder(validations_module=None).is_valid() is False
def test_builder_is_invalid_when_there_is_no_export_format(build_configuration_builder):
assert build_configuration_builder(export_format=None).is_valid() is False
# ConfigurationBuilder.merge
def test_merging_a_builder_to_an_empty_build_does_not_affect_it():
configuration_builder = build_configuration.default()
assert configuration_builder == \
configuration_builder.merge(build_configuration.default())
def test_merging_a_builder_does_preserves_adapter_module_if_not_present(build_configuration_builder):
assert build_configuration_builder() \
.merge(build_configuration_builder(adapter_module=None)) \
.adapter.module == build_configuration_builder().adapter.module
def test_merging_a_builder_overwrites_adapter_module_if_present(build_configuration_builder):
assert build_configuration_builder() \
.merge(build_configuration_builder(adapter_module='overwritten_module')) \
.adapter.module == 'overwritten_module'
def test_merging_a_builder_preserves_adapter_class_if_not_present(build_configuration_builder):
assert build_configuration_builder() \
.merge(build_configuration_builder(adapter_class=None)) \
.adapter.klass == build_configuration_builder().adapter.klass
def test_merging_a_builder_overwrites_adapter_class_if_present(build_configuration_builder):
assert build_configuration_builder(adapter_class='original_class') \
.merge(build_configuration_builder(adapter_class='overwritten_class')) \
.adapter.klass == 'overwritten_class'
def test_mergind_a_builder_preserves_adapter_class_if_not_present(build_configuration_builder):
assert build_configuration_builder(adapter_class='original_class') \
.merge(build_configuration_builder(adapter_class=None)) \
.adapter.klass == 'original_class'
def test_merging_a_build_overwrites_adapter_params_if_present(build_configuration_builder):
assert build_configuration_builder(adapter_params=['foo']) \
.merge(build_configuration_builder(adapter_params=['bar'])) \
.adapter.params == ['bar']
def test_merging_a_build_preserves_adapter_params_if_not_present(build_configuration_builder):
assert build_configuration_builder(adapter_params=['foo']) \
.merge(build_configuration_builder()) \
.adapter.params == ['foo']
def test_merging_two_builders_concatenate_their_ignored_tables(build_configuration_builder):
builder_one = build_configuration_builder(ignore_tables=['foo'])
builder_two = build_configuration_builder(ignore_tables=['bar'])
assert builder_one.merge(builder_two).ignore_tables == ['foo', 'bar']
def test_merging_a_builder_does_preserves_validations_module_if_not_present(build_configuration_builder):
assert build_configuration_builder() \
.merge(build_configuration_builder(validations_module=None)) \
.validations_module == build_configuration_builder().validations_module
def test_merging_a_builder_overwrites_validations_module_if_present(build_configuration_builder):
assert build_configuration_builder() \
.merge(build_configuration_builder(validations_module='overwritten_module')) \
.validations_module == 'overwritten_module'
# ConfigurationBuilder.build
def test_trying_to_build_an_invalid_configuration_raises_runtime_error(build_configuration_builder):
with pytest.raises(RuntimeError):
build_configuration_builder(adapter_module=None).build()
@mock.patch('sql_judge.parse_configuration.build_configuration.importlib')
def test_build_a_configuration(build_configuration_builder):
assert build_configuration_builder().build()
# from_json
def test_from_json_uses_nested_dict_and_generates_a_configuration_builder(build_configuration_builder):
options = {
'adapter': {
'module': 'adapter', 'class': 'Adapter',
'params': ['foo'], 'named_params': {'foo': 'bar'}},
'ignore_tables':['metainfo'], 'validations': {'module': 'validations'}
}
assert build_configuration.load(options) == \
build_configuration_builder(
adapter_module='adapter',
adapter_class='Adapter',
adapter_params=['foo'],
adapter_named_params={'foo':'bar'},
ignore_tables=['metainfo'],
validations_module='validations',
export_format=None
)
def test_from_json_succeeds_even_with_an_empty_config():
assert build_configuration.load({}) == build_configuration.default()
|
from zion.gateways.docker.protocol import Protocol
from zion.gateways.docker.function import Function
from zion.gateways.docker.worker import Worker
import time
class DockerGateway():
def __init__(self, be):
self.be = be
self.req = be.req
self.conf = be.conf
self.logger = be.logger
self.account = be.account
self.redis = be.redis
self.method = self.req.method.lower()
self.scope = self.account[5:18]
self.functions_container = self.conf["functions_container"]
self.execution_server = self.conf["execution_server"]
def _get_object_stream(self):
if self.method == 'get':
return self.be.response.app_iter
if self.method == 'put':
return self.req.environ['wsgi.input']
def _get_object_metadata(self):
headers = dict()
if self.method == "get":
headers = self.be.response.headers
elif self.method == "put":
if 'Content-Length' in self.req.headers:
headers['Content-Length'] = self.req.headers['Content-Length']
if 'Content-Type' in self.req.headers:
headers['Content-Type'] = self.req.headers['Content-Type']
for header in self.req.headers:
if header.startswith('X-Object'):
headers[header] = self.req.headers[header]
return headers
def execute_function(self, function_info):
"""
Executes the function.
:param function_info: function information
:returns: response from the function
"""
object_stream = self._get_object_stream()
object_metadata = self._get_object_metadata()
request_headers = dict(self.req.headers)
f_name = function_info.keys()[0]
if function_info[f_name]:
function_parameters = eval(function_info[f_name])
else:
function_parameters = dict()
time1 = time.time()
function = Function(self.be, self.scope, f_name)
time2 = time.time()
fc = time2-time1
# print '------ FUNCTION took %0.6f s' % ((time2-time1))
time1 = time.time()
worker = Worker(self.be, self.scope, self.redis, function)
time2 = time.time()
wkr = time2-time1
# print '------ WORKER took %0.6f s' % ((time2-time1))
time1 = time.time()
protocol = Protocol(worker, object_stream, object_metadata,
request_headers, function_parameters, self.be)
resp = protocol.comunicate()
time2 = time.time()
ptc = time2-time1
# print '----- PROTOCOL took %0.6f s' % ((time2-time1))
total = fc + wkr + ptc
fl = open("/tmp/zion.times", "a")
fl.write("%0.6f\t%0.6f\t%0.6f : \t%0.6f\n" % ((fc, wkr, ptc, total)))
fl.close()
# return {"command": "RC"}
return resp
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
import sys
import unittest
import numpy as np
import torch
import torch.nn.functional as F
import lava.lib.dl.slayer as slayer
verbose = True if (('-v' in sys.argv) or ('--verbose' in sys.argv)) else False
seed = np.random.randint(1000)
# seed = 902
np.random.seed(seed)
if verbose:
print(f'{seed=}')
if torch.cuda.is_available():
device = torch.device('cuda')
else:
if verbose:
print(
'CUDA is not available in the system. '
'Testing for CPU version only.'
)
device = torch.device('cpu')
# neuron parameters
threshold = 1
scale = (1 << 12)
decay = int(np.random.random() * scale)
decay = torch.FloatTensor([decay]).to(device)
th_decay = int(np.random.random() * scale)
th_decay = torch.FloatTensor([th_decay]).to(device)
ref_decay = int(np.random.random() * scale)
ref_decay = torch.FloatTensor([ref_decay]).to(device)
state = torch.FloatTensor([0]).to(device)
# create input
time = torch.FloatTensor(np.arange(200)).to(device)
# expand to (batch, neuron, time) tensor
spike_input = torch.autograd.Variable(
torch.zeros([5, 4, len(time)]),
requires_grad=True
).to(device)
spike_input.data[..., np.random.randint(spike_input.shape[-1], size=5)] = 1
weight = torch.FloatTensor(
5 * np.random.random(size=spike_input.shape[-1]) - 0.5
).reshape([1, 1, spike_input.shape[-1]]).to(device)
w_input = slayer.utils.quantize(weight) * spike_input
# get the dynamics response
voltage = slayer.neuron.dynamics.leaky_integrator.dynamics(
w_input, decay=decay, state=state, w_scale=scale,
)
th, ref = slayer.neuron.dynamics.adaptive_threshold.dynamics(
voltage, # dynamics state
ref_state=state, # previous refractory state
ref_decay=ref_decay, # refractory decay
th_state=state + threshold, # previous threshold state
th_decay=th_decay, # threshold decay
th_scale=0.5 * threshold, # threshold step
th0=threshold, # threshold stable state
w_scale=scale, # fixed precision scaling
debug=True
)
spike = (voltage >= (th + ref)).to(voltage.dtype)
class TestAdTh(unittest.TestCase):
def test_input_voltage_range(self):
if verbose:
print(spike_input.sum(), spike_input.flatten())
if verbose:
print(spike.sum(), spike.flatten())
self.assertTrue(
spike_input.sum().item() > 0,
'There was zero input spike. Check the test setting.'
)
self.assertTrue(
spike.sum().item() > 0,
'There was zero ouptut spike. Check the test setting.'
)
def test_th_leak(self):
th_leak_num = th[..., 1:] - threshold
th_leak_den = th[..., :-1] - threshold
th_valid = (torch.abs(th_leak_den) > 100 / scale) \
& (spike[..., :-1] == 0)
if torch.sum(th_valid) > 0:
est_th_decay = torch.mean(
1 - th_leak_num[th_valid] / th_leak_den[th_valid]
) * scale
th_error = np.abs(
(est_th_decay.item() - th_decay.item()) / th_decay.item()
)
if verbose:
print(f'{th_error=}')
print(f'{est_th_decay=}')
print(f'{th_decay=}')
self.assertTrue(
th_error < 1e-1, # the estimate is crude
f'Expected estimated decay to match. '
f'Found {est_th_decay=} and {th_decay=}'
)
def test_ref_leak(self):
ref_leak_num = ref[..., 1:]
ref_leak_den = ref[..., :-1]
ref_valid = (torch.abs(ref_leak_den) > 100 / scale) \
& (spike[..., :-1] == 0)
if torch.sum(ref_valid) > 0:
est_ref_decay = torch.mean(
1 - ref_leak_num[ref_valid] / ref_leak_den[ref_valid]
) * scale
ref_error = np.abs(
(est_ref_decay.item() - ref_decay.item())
/ max(ref_decay.item(), 512)
)
if verbose:
print(f'{ref_error=}')
print(f'{est_ref_decay=}')
print(f'{ref_decay=}')
self.assertTrue(
ref_error < 1e-1, # the estimate is crude
f'Expected estimated decay to match. '
f'Found {est_ref_decay=} and {ref_decay=}'
)
def test_integer_states(self):
# there should be no quantization error
# when states are scaled with s_scale
voltage_error = torch.norm(
torch.floor(voltage * scale) - voltage * scale
)
self.assertTrue(
voltage_error < 1e-5,
f'Voltage calculation has issues with scaling. '
f'De-Scaling must result in integer states. '
f'Error was {voltage_error}'
)
def test_backward(self):
spike_target = spike.clone().detach()
voltage_target = voltage.clone().detach()
spike_target[
..., np.random.randint(spike_input.shape[-1], size=5)
] = 1
voltage_target[
..., np.random.randint(spike_input.shape[-1], size=5)
] -= 1
loss = F.mse_loss(spike, spike_target) \
+ F.mse_loss(voltage, voltage_target)
loss.backward()
# just looking for errors
# self.assertTrue(True, 'Encountered errors.')
|
import os
import random
import numpy as np
import json
import doctest
from decitala import fragment
from decitala.fragment import (
Decitala,
DecitalaException,
GreekFoot,
Breve,
Macron,
ProsodicMeter,
GeneralFragment,
FragmentEncoder,
FragmentDecoder,
get_all_prosodic_meters,
prosodic_meter_query
)
from decitala.utils import flatten
here = os.path.abspath(os.path.dirname(__file__))
decitala_path = os.path.dirname(here) + "/corpora/Decitalas"
greek_path = os.path.dirname(here) + "/corpora/Greek_Metrics"
def test_doctests():
assert doctest.testmod(fragment, raise_on_error=True)
def test_general_fragment_encoder():
g1 = GeneralFragment(data=[1.0, 2.0, 3.0, 4.0, 5.0], name="longerrrr")
dumped_g1 = json.dumps(g1, cls=FragmentEncoder)
expected_g1 = """{"frag_type": "general_fragment", "data": [1.0, 2.0, 3.0, 4.0, 5.0], "name": "longerrrr"}""" # noqa
assert dumped_g1 == expected_g1
def test_decitala_fragment_encoder():
d = Decitala("Anlarakrida")
dumped = json.dumps(d, cls=FragmentEncoder)
expected = """{"frag_type": "decitala", "name": "95_Anlarakrida"}"""
assert dumped == expected
# Possible manipulations for Decitalas and GreekFoot objects.
full_name = lambda x: x
without_extension = lambda x: x[:-4]
without_id_num_pre = lambda x: "".join([i for i in x if not(i.isdigit())])
without_id_num = lambda x: without_id_num_pre(x)[1:]
without_id_num_without_extension = lambda x: without_id_num(x)[:-4]
def test_all_decitala_names():
# 4 possible inputs are allowed for the names.
funcs = [full_name, without_extension, without_id_num, without_id_num_without_extension]
for this_file in os.listdir(decitala_path):
new_name = random.choice(funcs)(this_file)
this_decitala = Decitala(new_name)
assert this_decitala.full_path == decitala_path + "/" + this_file
# I also test a few problematic examples below, just for safety.
def test_gaja_gajajhampa_gajalila():
gaja = Decitala("Gaja")
gajajhampa = Decitala("77_Gajajhampa")
gajalila = Decitala("18_Gajalila.xml")
assert gaja.name == "99_Gaja"
assert gajajhampa.name == "77_Gajajhampa"
assert gajalila.name == "18_Gajalila"
def test_sama_kankala_sama():
sama = Decitala("Sama.xml")
kankala_sama = Decitala("Kankala_Sama")
assert sama.name == "53_Sama"
assert kankala_sama.name == "65_C_Kankala_Sama"
def test_jaya_jayacri_jayamangala():
jaya = Decitala("Jaya")
jaya2 = Decitala("Jaya.xml")
jayacri = Decitala("46_Jayacri")
jayamangala = Decitala("Jayamangala.xml")
assert jaya.name == "28_Jaya"
assert jaya2.name == "28_Jaya"
assert jayacri.name == "46_Jayacri"
assert jayamangala.name == "42_Jayamangala"
def test_all_greek_foot_names():
# 4 possible inputs are allowed for the names.
funcs = [full_name, without_extension]
for this_file in os.listdir(greek_path):
new_name = random.choice(funcs)(this_file)
this_greek_foot = GreekFoot(new_name)
assert this_greek_foot.full_path == greek_path + "/" + this_file
def test_get_by_id():
random_nums = [str(x) for x in [71, 23, 14, 91, 108, 44]]
for this_id in random_nums:
assert Decitala.get_by_id(this_id) is not None
def test_id_num():
for i in range(0, 121, 20):
if i == 0:
assert Decitala("Aditala").id_num == str(i + 1)
else:
assert Decitala.get_by_id(i).id_num == str(i)
def test_decitala_carnatic_string():
rajacudamani = Decitala("Rajacudamani")
predicted = "o o | | | o o | S"
assert rajacudamani.carnatic_string == predicted
def test_dseg():
GeneralFragment.ql_array.cache_clear()
frag = GeneralFragment([1.0, 1.0, 2.0, 2.0, 3.0, 0.125, 1.0, 0.5, 4.0])
predicted = np.array([2, 2, 3, 3, 4, 0, 2, 1, 5])
assert np.array_equal(predicted, frag.dseg())
predicted_reduced = np.array([2, 3, 4, 0, 2, 1, 5])
assert np.array_equal(predicted_reduced, frag.dseg(reduced=True))
def test_fragment_augment():
f1 = GeneralFragment([4.0, 1.0, 2.0], name="myfragment")
f1a = f1.augment(factor=2, difference=0.25)
assert f1a.name == "myfragment/r:2/d:0.25"
def test_decitala_repr():
name_in = "Gajalila"
frag_id = Decitala(name_in).id_num
assert Decitala(name_in).__repr__() == "<fragment.Decitala {0}_{1}>".format(frag_id, name_in)
def test_decitala_num_matras():
frag = Decitala("Rajatala") # [1.0, 1.5, 0.25, 0.25, 1.0, 0.5, 1.5]
assert frag.num_matras == 12
def test_greek_split():
diiamb = GreekFoot("Diiamb")
split = diiamb.split(GreekFoot("Iamb"), GreekFoot("Iamb"))
expected = [GreekFoot("Iamb"), GreekFoot("Iamb")]
assert split == expected
class TestMorrisSymmetryClass():
def test_class_one(self): # X
aditala = Decitala("Aditala")
assert aditala.morris_symmetry_class() == 1
def test_class_two(self): # XY
iamb = GreekFoot("Iamb")
assert iamb.morris_symmetry_class() == 2
def test_macron():
m = Macron()
assert m.name == "Macron"
assert list(m.ql_array()) == [2.0]
assert m.frag_type == "macron"
def test_breve():
m = Breve()
assert m.name == "Breve"
assert list(m.ql_array()) == [1.0]
assert m.frag_type == "breve"
def test_latin_cretic_tetrameter():
"""
Cretic_Tetrameter_2,"[1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0]","[Peon_IV, Amphimacer, Amphimacer, Amphimacer]",latin
"""
ct2 = ProsodicMeter(name="Cretic_Tetrameter_2", origin="latin")
assert list(ct2.ql_array()) == [1.0, 1.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0]
assert len(ct2.components) == 4
assert ct2.origin == "latin"
def test_prosodic_meter_components():
all_prosodic_meters = get_all_prosodic_meters()
for f in all_prosodic_meters:
gen_ql = flatten([list(c.ql_array()) for c in f.components])
assert list(f.ql_array()) == gen_ql
def test_prosodic_meter_query_unordered():
collection={GreekFoot("Ionic_Major"), GreekFoot("Amphimacer")}
assert prosodic_meter_query(collection=collection) == [
ProsodicMeter("Cretic_Tetrameter", origin="latin"),
ProsodicMeter("Cretic_Tetrameter_3", origin="latin"),
ProsodicMeter("Cretic_Tetrameter_5", origin="latin"),
ProsodicMeter("Cretic_Tetrameter_6", origin="latin"),
]
|
// Fizzzzz.......Buzzzzzzzzzz
//Author: @BreathlessVictor on GitHub
//email: sombit0503@gmail.com
for i in range (101):
if i % 3 == 0:
print ("Fizz")
elif i % 5 == 0:
print ("Buzz")
elif i % 3 == 0 and i % 5 == 0:
print ("FizzBuzz")
else:
print(i)
|
# -*- coding: utf-8 -*-
# type: ignore
"""Store simple data between multiple executions of a script.
A simple class to store small amounts data between
multiple executions of a script (for one-off scripts).
Copyright: Manuel Barkhau 2020 Dedicated to the public domain.
CC0: https://creativecommons.org/publicdomain/zero/1.0/
Usage:
with SimpleCache(name="mycache") as cache:
cache['hello'] = u"wörld"
# later (in another process)
with SimpleCache(name="mycache", mode="r") as cache:
assert cache['hello'] == u"wörld"
"""
import os
import time
import pickle
import shutil
import hashlib
import inspect
import logging
import tempfile
import functools
from pathlib import Path
logger = logging.getLogger("litprog.simple_cache")
# TODO (mb 2021-07-25):
# redis backing
# compression
def _digest(parts) -> str:
dig = hashlib.sha1()
for part in parts:
dig.update(str(part).encode('utf-8'))
return dig.hexdigest()
class SimpleCache:
def __init__(
self,
*,
name="default",
mode="w",
dumps=pickle.dumps,
loads=pickle.loads,
cache_id=None,
) -> None:
if mode not in ("r", "w"):
msg = "Invalid value mode='{0}'. Valid modes are: 'r' and 'w'".format(mode)
raise ValueError(msg)
self._mode = mode
self._dumps = dumps
self._loads = loads
if cache_id is None:
frame = inspect.stack()[1]
parts = []
for path in Path(frame.filename).glob("*.py"):
parts.append(path)
parts.append(path.stat().st_mtime)
parts.append(frame.lineno)
cache_id = _digest(parts)
tmp = tempfile.gettempdir()
fname = cache_id + "_" + name
self.path = os.path.join(tmp, "_simple_file_cache_" + fname)
self._in_data = None
self._obj = None
self._modified = False
def _load_obj(self) -> None:
if os.path.exists(self.path):
with open(self.path, mode="rb") as fobj:
self._in_data = fobj.read()
try:
self._obj = self._loads(self._in_data)
except UnicodeError:
self._obj = self._loads(self._in_data.decode("utf-8"))
except Exception as ex:
logger.warning(f"invalid cache data (maybe code changed), error: {ex}")
self._in_data = None
self._obj = {}
else:
self._obj = {}
def decorate(self, make_key: callable = None) -> callable:
def decorator(func: callable) -> callable:
@functools.wraps(func)
def wrapper(*args, **kwargs):
if make_key is None:
key = str(args + tuple(kwargs.items()))
else:
key = make_key(*args, **kwargs)
if key in self:
return self[key]
else:
result = func(*args, **kwargs)
self[key] = result
return result
return wrapper
return decorator
def __contains__(self, key: str) -> bool:
if self._obj is None:
self._load_obj()
key_data = key.encode("utf-8")
cache_key = hashlib.sha1(key_data).hexdigest()
return cache_key in self._obj
def __getitem__(self, key: str):
if self._obj is None:
self._load_obj()
cache_key = hashlib.sha1(key.encode("utf-8")).hexdigest()
return self._obj[cache_key]
def __setitem__(self, key: str, val: any):
if self._obj is None:
self._load_obj()
self._modified = True
cache_key = hashlib.sha1(key.encode("utf-8")).hexdigest()
self._obj[cache_key] = val
def __enter__(self):
return self
def __exit__(self, typ, val, tb):
if typ or val or tb:
return
is_modified = self._obj and self._modified and "w" in self._mode
if not is_modified:
return
output = self._dumps(self._obj)
if isinstance(output, str):
out_data = output.encode("utf-8")
else:
out_data = output
assert isinstance(out_data, bytes)
if self._in_data == out_data:
return
nonce = str(time.time())
tmp_file = self.path + nonce
try:
with open(tmp_file, mode="wb") as fobj:
fobj.write(out_data)
shutil.move(tmp_file, self.path)
finally:
if os.path.exists(tmp_file):
os.unlink(tmp_file)
def main():
with SimpleCache() as cache:
assert isinstance(cache , SimpleCache)
assert isinstance(cache._obj, dict)
cache._obj['hello'] = u"wörld"
print("cache_path:", cache.path)
with open(cache.path) as fobj:
print(fobj.read())
# later
with SimpleCache(mode="r") as cache:
assert isinstance(cache._obj, dict)
assert cache._obj['hello'] == u"wörld"
if __name__ == '__main__':
main()
|
# ignore alignability for the moment, we'll assume everything within 100bp of a read is alignable
import numpy
import argparse
import pysam
from string import maketrans
SAMPLE_MARGIN = 25 # on either side
CHROMS = None
COMPL = maketrans("ACGT", "TGCA")
def readWig(wig_file):
fin = open(wig_file, 'r')
masks = {}
mask = []
chrom = None
start = 1
count = 0
for line in fin:
if line[0] == 'f': # header line ex. "fixedStep chrom=chr1 step=1 ..."
params = {param.split('=')[0]:param.split('=')[1] for param in line.strip().split(' ') if '=' in param}
if params["chrom"] != chrom:
if len(mask) > 0:
masks[chrom] = numpy.array(mask, dtype='b') # boolean?
mask = []
chrom = params["chrom"]
print chrom
'''
if chrom == "chr10":
break
'''
elif start + count != int(params["start"]):
print "GAP!"
start = int(params["start"])
count = 0
else:
mask.append(int(float(line.strip())))
count += 1
if len(mask) > 0:
masks[chrom] = numpy.array(mask, dtype='b') # boolean?
mask = []
fin.close()
# fill out the empty chroms for testing...
for c in xrange(len(CHROMS)):
if not masks.has_key(CHROMS[c]):
print "missing", CHROMS[c]
masks[c] = numpy.zeros(0, dtype='b')
else:
masks[c] = masks[CHROMS[c]]
del masks[CHROMS[c]]
return masks
def getRandom(chrom, first, last, cnt, mask=None):
if mask is not None and numpy.sum(mask[chrom][first:last]) > 0:
nums = numpy.random.choice([i for i in xrange(first, last) if mask[chrom][i] == 1], cnt, replace=True)
else:
# for reads very near the end, this range can be 0, skip it
if last <= first:
nums = []
else:
nums = numpy.random.randint(first, last, cnt)
if not hasattr(nums, '__iter__'):
nums = [nums]
return nums
def main(bam_npy_file, fasta_file, chrom_file, k, output_file, exp=1, limit=None, window_max=SAMPLE_MARGIN*2, mask=None):
global CHROMS
CHROMS = [c.split()[0] for c in open(chrom_file).read().strip().split('\n')]
if mask is not None:
print "Reading alignability mask..."
mask = readWig(mask)
baseline_kmer_counts = {}
trailing = 0
leading = 1
density_count = 1
print "Reading FASTA seqs..."
fa = pysam.Fastafile(fasta_file)
refs = [fa.fetch(c).upper() for c in CHROMS]
ref_lens = [len(r) for r in refs]
print "Reading BAM..."
bam = numpy.load(bam_npy_file)
tot_reads = len(bam)
print "%i reads" % tot_reads
num_reads = 0
b = 0
while b < tot_reads:
read = bam[b]
if b % 10**6 == 0:
print "%i (%.2f%%)" % (b, float(b)/tot_reads*100)
while leading < tot_reads and (bam[leading][0] < read[0] or (bam[leading][0] == read[0] and bam[leading][1] <= read[1] + SAMPLE_MARGIN)):
if bam[leading][1] != bam[leading-1][1]: # count only 1 read at each unique position
density_count += 1
leading += 1
while trailing < b and (bam[trailing][0] < read[0] or bam[trailing][1] < read[1] - SAMPLE_MARGIN):
if bam[trailing][1] != bam[trailing-1][1]:
density_count -= 1
trailing += 1
#print "read %i (%s), leading %i (%s)" % (b, str(read), leading, str(bam[leading]))
if density_count < 0:
print density_count, "too low"
first = max(0, read[1] - SAMPLE_MARGIN) # inclusive
last = min(ref_lens[read[0]] - k, read[1] + SAMPLE_MARGIN + 1 - k) # exclusive
for pos in getRandom(read[0], first, last, min(int(density_count ** exp), window_max), mask): # get (density ** exp) random positions
# get sequence
if read[2]:
seq = refs[read[0]][pos : pos + k].translate(COMPL)[::-1]
else:
seq = refs[read[0]][pos : pos + k]
# count k-mers
#for i in xrange(len(seq) - k + 1):
baseline_kmer_counts[seq] = baseline_kmer_counts.get(seq, 0) + 1
if b == leading + 1: # DON'T KNOW WHY THIS HAPPENS
break
b = leading + 1
fout = open(output_file, 'w')
kmers = baseline_kmer_counts.keys()
total_kmers = sum(v for k,v in baseline_kmer_counts.iteritems())
fout.write(','.join(kmers) + "\n")
fout.write(','.join(['%.4f' % (float(baseline_kmer_counts[k])/total_kmers) for k in kmers]))
fout.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Compute baseline k-mer frequencies")
parser.add_argument("bam", help="BAM.npy file")
parser.add_argument("ref", help="Fasta file")
parser.add_argument("chroms", help="Chromosome file")
parser.add_argument("k", help="k-mer size", type=int)
parser.add_argument("out", help="Output (csv) file")
parser.add_argument("--exp", help="Density exponent", type=float, default=1)
parser.add_argument("--limit", help="Reads to look at, tiled across entire BAM", type=int)
parser.add_argument("--window_max", help="Maximum # reads to count in a single window", type=int)
parser.add_argument("--mask", help="WIG formatted alignability mask")
args = parser.parse_args()
main(args.bam, args.ref, args.chroms, args.k, args.out, args.exp, args.limit, args.window_max, args.mask)
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import torch
from torch import nn
from torch.autograd import Function
from torch.nn.modules.utils import _pair
from torch.autograd.function import once_differentiable
import LAU
class LAUFunction(Function):
@staticmethod
def forward(ctx, input, offset_x, offset_y, k_h, k_w):
ctx.k_h = k_h
ctx.k_w = k_w
output = LAU.location_aware_upsampling_forward(input,
offset_x,
offset_y,
ctx.k_h,
ctx.k_w)
ctx.save_for_backward(input, offset_x, offset_y)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, offset_x, offset_y = ctx.saved_tensors
grad_input, grad_offset_x, grad_offset_y = \
LAU.location_aware_upsampling_backward(input,
offset_x,
offset_y,
grad_output)
return grad_input, grad_offset_x, grad_offset_y, None, None
class LDUFunction(Function):
@staticmethod
def forward(ctx, input, k_h, k_w):
ctx.k_h = k_h
ctx.k_w = k_w
output = LAU.location_determined_upsampling_forward(input,
ctx.k_h,
ctx.k_w)
ctx.save_for_backward(input)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = LAU.location_determined_upsampling_backward(input,
grad_output)
return grad_input, None, None
class LDUMultiOutputFunction(Function):
@staticmethod
def forward(ctx, input, k_h, k_w):
ctx.k_h = k_h
ctx.k_w = k_w
output, output_lt, output_lb, output_rt, output_rb = LAU.location_determined_upsampling_multi_output_forward(input,
ctx.k_h,
ctx.k_w)
ctx.save_for_backward(input)
return output, output_lt, output_lb, output_rt, output_rb
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input = ctx.saved_tensors
grad_input = LAU.location_determined_upsampling_multi_output_backward(input,
grad_output)
return grad_input, None, None
|
import os
import csv
import tensorflow as tf
BUFFER_SIZE=10000
def load_dataset(datapath, vocab, dimesion=0, splits=["split_1", "split_2", "split_4", "split_8", "split_16"]):
dataset = []
data = csv.DictReader(open(datapath, "r"))
midi_paths = []
for row in data:
filepath, valence, arousal = row["filepath"], int(row["valence"]), int(row["arousal"])
for split in splits:
if split + "/" in filepath:
# Form midi filepath
piece_path = os.path.join(os.path.dirname(datapath), filepath)
# Form txt filepath
txt_file = os.path.splitext(piece_path)[0] + ".txt"
if os.path.exists(txt_file):
# Read txt file
tokens = []
with open(txt_file) as fp:
tokens = [vocab[w] for w in fp.read().split(" ")]
if dimesion == 0:
label = [valence]
elif dimesion == 1:
label = [arousal]
elif dimesion == 2:
label = [valence, arousal]
dataset.append((tokens, label))
return dataset
def build_dataset(dataset, batch_size):
tf_dataset = tf.data.Dataset.from_generator(lambda: dataset, (tf.int32, tf.int32))
tf_dataset = tf_dataset.shuffle(BUFFER_SIZE)
tf_dataset = tf_dataset.padded_batch(batch_size, padded_shapes=([None], [1]), padding_values=(1, 1))
return tf_dataset
|
from sqlalchemy import Table, Column, MetaData, select, testing
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.postgresql import JSONB, JSON
from sqlalchemy.orm import sessionmaker
from sqlalchemy.testing import fixtures
from sqlalchemy.types import Integer
from sqlalchemy import JSON as BaseJSON
meta = MetaData()
# Plain table object for the core test.
# Try all three JSON type objects.
json_table = Table('json_model', meta,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('jsonb_data', JSONB),
Column('json_data', JSON),
Column('base_json_data', BaseJSON))
# ORM class for the session test.
class JSONModel(declarative_base()):
__table__ = json_table
class JSONTest(fixtures.TestBase):
def setup_method(self, method):
meta.create_all(testing.db)
testing.db.execute(
json_table.insert(),
[dict(id=1,
jsonb_data={'a': 1},
json_data={'b': 2},
base_json_data={'c': 3}),
dict(id=2,
jsonb_data={'d': 4},
json_data={'e': 5},
base_json_data={'f': 6})])
def teardown_method(self, method):
meta.drop_all(testing.db)
def test_json(self):
result = []
query = select([json_table.c.jsonb_data,
json_table.c.json_data,
json_table.c.base_json_data,
])
for row in testing.db.execute(query):
result.append((row.jsonb_data, row.json_data, row.base_json_data))
assert result == [({'a': 1}, {'b': 2}, {'c': 3}),
({'d': 4}, {'e': 5}, {'f': 6})]
class JSONSessionTest(fixtures.TestBase):
def setup_method(self, method):
meta.create_all(testing.db)
self.sessionmaker = sessionmaker(testing.db)
session = self.sessionmaker()
session.add(JSONModel(id=1,
jsonb_data={'a': 1},
json_data={'b': 2},
base_json_data={'c': 3}))
session.add(JSONModel(id=2,
jsonb_data={'d': 4},
json_data={'e': 5},
base_json_data={'f': 6}))
session.commit()
def teardown_method(self, method):
meta.drop_all(testing.db)
def test_json(self):
session = self.sessionmaker()
result = []
for row in session.query(JSONModel).all():
result.append((row.jsonb_data, row.json_data, row.base_json_data))
assert result == [({'a': 1}, {'b': 2}, {'c': 3}),
({'d': 4}, {'e': 5}, {'f': 6})]
|
# -*- coding: utf-8 -*-
class UmaClasse(object):
def metodo_1(self):
pass
def metodo_2(self, param, param2=None):
pass
def funcao_1():
if True:
pass
a = 1
b = 2
return a * b
|
from django.urls import path
from . import views
app_name = 'servers'
urlpatterns = [
path('', views.index, name='index'),
path('add/', views.add, name='add'),
path('edit/<int:id>', views.edit, name='edit'),
path('delete/<int:id>', views.delete, name='delete'),
path('reset/', views.reset, name='reset'),
]
|
from django.test import TestCase
from django.core.management import call_command
from ..models import Report, Cluster
class CustomCommandTest(TestCase):
def setUp(self):
# Building dataset for test
keys = ('latitude', 'longitude', 'category')
data = (
(-22.0003, -43.0002, 'F'),
(-22.0004, -43.0001, 'C'),
(-22.0003, -43.0002, 'S'),
(-26.0003, -60.0001, 'F'),
(-26.0004, -60.0001, 'C'),
(-26.0005, -60.0002, 'S'),
(-12.0003, -23.0001, 'F'),
(-12.0004, -23.0001, 'C'),
(-12.0005, -23.0002, 'S'),
)
# Creating database rows for dataset
for point in data:
args = dict(zip(keys, point))
Report.objects.create(**args)
def test_update_clusters(self):
'''Compute cluster command should create clusters on database.'''
call_command('compute_clusters')
self.assertEqual(3, Cluster.objects.count())
|
import sqlite3
from contextlib import closing
dados = [
["São Paulo", 43663672], ["Minas Gerais", 21292666], ["Rio de Janeiro", 17366189],["Bahia", 14930634],
["Paraná", 11516840], ["Rio Grande do Sul", 11422973], ["Pernambuco", 9616621], ["Ceará", 9187103],
["Pará", 8690745], ["Santa Catarina", 7252502], ["Maranhão", 7114598], ["Goiáis", 7113540],
["Amazonas", 4207714], ["Espirito Santo", 4064052], ["Paraíba", 4039277],
["Rio Grande do Norte", 3534165], ["Mato Grosso", 3526220], ["Alagoas", 3351543],
["Piauí", 3281480], ["Distrito Federal", 3055149], ["Mato Grosso do Sul", 2809394],
["Sergipe", 2318822], ["Rondônia", 1796460], ["Tocantins", 1590248], ["Acre", 894470],
["Amapá", 861773], ["Roraima", 631181]
]
with sqlite3.connect('Pais.db') as conexao:
conexao.row_factory = sqlite3.Row
print(f"{'id':3s} {'Estado':20s} {'População':12s}")
print("=" * 37)
for estado in conexao.execute("select * from estados order by id"):
print(f"{estado['id']:3d} {estado['nome']:20s} {estado['populacao']:12d}")
busca = input("Digite um nome de um Estado para buscar: ")
with sqlite3.connect('Pais.db') as conexao:
with closing(conexao.cursor()) as cursor:
cursor.execute("select * from estados where nome = ?", (busca,))
while True:
resultado = cursor.fetchone()
if resultado is None:
break
print(f"Nome: {resultado[1]}\nid: {resultado[0]}\npopulação: {resultado[2]}")
busca2 = input("Digite o valor da populaçao do Estado escolhido: ")
with sqlite3.connect("Pais.db") as conexao:
with closing(conexao.cursor()) as cursor:
cursor.execute(
"""
update estados
set populacao = ?
where nome = ?
""",(busca2,busca))
conexao.commit()
with sqlite3.connect('Pais.db') as conexao:
conexao.row_factory = sqlite3.Row
print(f"{'id':3s} {'Estado':20s} {'População':12s}")
print("=" * 37)
for estado in conexao.execute("select * from estados order by id"):
print(f"{estado['id']:3d} {estado['nome']:20s} {estado['populacao']:12d}")
|
#!/usr/bin/env python
import getopt
import json
import os
import shutil
import subprocess
import sys
import urllib2
# colors for pretty printing
COLOR_FAIL = '\033[91m'
COLOR_ENDC = '\033[0m'
# Platform build paths
BUILD_PATH_IOS = "build-ios"
BUILD_PATH_MAC = "build-mac"
BUILD_PATH_LINUX = "build-linux"
BUILD_PATH_WINDOWS = "build-windows"
# global variables
app_name = ""
config_file_path = ""
#
# Helpers
#
# print wrapper
def terminal_output(text, warning = None):
if warning:
print(COLOR_FAIL + text + COLOR_ENDC)
else :
print text
# reset path
def reset_root():
path = sys.path[0]
os.chdir(path)
# create directory
def create_directory(path):
try:
os.mkdir(path)
except OSError:
terminal_output ("Creation of the directory %s failed" % path)
else:
terminal_output ("Successfully created the directory %s " % path)
# replace old_value with new_value in filepath
def replace_in_file(filepath, new_value, old_value):
with open (filepath, 'r') as file:
filedata = file.read().decode("utf8")
filedata = filedata.replace(old_value, new_value)
with open (filepath, 'w') as file:
file.write(filedata.encode("utf8"))
# copy single file
def copy_file(src, dest):
if os.path.exists(src):
terminal_output('copy file:' + src)
shutil.copyfile(src, dest)
else :
terminal_output('source file does not exist:' + src, True)
# copy folder
def copy_folder(src, dest):
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
terminal_output('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
terminal_output('Directory not copied. Error: %s' % e)
terminal_output('Files copied to: %s' % dest)
#
# platform setups
#
def setup_android(config):
android_bundle_id = config['android_bundle_id']
version_name = config['version_name']
copy_android_files(android_bundle_id, version_name)
def setup_ios(config):
ios_bundle_id = config['ios_bundle_id']
version_name = config['version_name']
copy_ios_files(ios_bundle_id, version_name)
run_ios_cmake()
def setup_mac(config):
ios_bundle_id = config['ios_bundle_id']
copy_mac_files(ios_bundle_id)
run_mac_cmake()
def setup_linux(config):
copy_linux_files()
run_linux_cmake()
def setup_windows(config):
copy_windows_files()
run_windows_cmake()
#
# platform copy helpers
#
def copy_ios_files(bundle_id, version_name):
# iOS
dest = 'proj.ios_mac/'
if not os.path.isdir(dest):
create_directory(dest)
src = 'templates/proj.ios_mac/ios'
dest = 'proj.ios_mac/ios'
copy_folder(src, dest)
infoplistpath = dest + '/Info.plist'
replace_in_file(infoplistpath, bundle_id, 'org.cocos2dx.hellocpp')
terminal_output('ios App bundleid set to: %s in folder: %s ' % (bundle_id, infoplistpath))
#ios specific resources
src = config_file_path + '/ios/Images.xcassets' #todo
dest = dest + '/Images.xcassets'
copy_folder(src, dest)
def copy_android_files(bundle_id, version_name):
# Android
src = 'templates/proj.android'
dest = 'proj.android'
if os.path.isdir(dest):
terminal_output('Workspace not clean', True)
sys.exit(2)
copy_folder(src, dest)
src = 'templates/proj.android'
dest = 'proj.android'
set_android_values(dest, bundle_id, version_name)
# rename android folders
short_app_name = app_name.replace(" ", "")
src = 'proj.android/app/jni/hellocpp'
dest = 'proj.android/app/jni/' + short_app_name.lower()
os.rename(src, dest)
#android specific ressources
src = config_file_path + 'android/res/mipmap-hdpi' #todo
dest = 'proj.android/app/res/mipmap-hdpi'
copy_folder(src, dest)
src = config_file_path + 'android/res/mipmap-mdpi' #todo
dest = 'proj.android/app/res/mipmap-mdpi'
copy_folder(src, dest)
src = config_file_path + 'android/res/mipmap-xhdpi' #todo
dest = 'proj.android/app/res/mipmap-xhdpi'
copy_folder(src, dest)
src = config_file_path + 'android/res/mipmap-xxhdpi' #todo
dest = 'proj.android/app/res/mipmap-xxhdpi'
copy_folder(src, dest)
src = config_file_path + 'android/res/mipmap-xxxhdpi' #todo
dest = 'proj.android/app/res/mipmap-xxxhdpi'
copy_folder(src, dest)
def copy_linux_files():
# Linux
src = 'templates/proj.linux'
dest = 'proj.linux'
if os.path.isdir(dest):
terminal_output('Workspace not clean', True)
sys.exit(2)
copy_folder(src, dest)
def copy_mac_files(bundle_id):
dest = 'proj.ios_mac/'
if not os.path.isdir(dest):
create_directory(dest)
src = 'templates/proj.ios_mac/mac'
dest = 'proj.ios_mac/mac'
copy_folder(src, dest)
infoplistpath = dest + '/Info.plist'
replace_in_file(infoplistpath, bundle_id, 'org.cocos2dx.hellocpp')
terminal_output('mac App bundleid set to: %s in folder: %s ' % (bundle_id, infoplistpath))
#mac specific resources
src = config_file_path + '/mac/Images.xcassets' #todo
dest = dest + '/Images.xcassets'
copy_folder(src, dest)
def copy_windows_files():
src = 'templates/proj.win32'
dest = 'proj.win32'
if os.path.isdir(dest):
terminal_output('Workspace not clean')
sys.exit(2)
copy_folder(src, dest)
#
# platform cmake runs
#
def run_ios_cmake():
dest = BUILD_PATH_IOS
create_directory(dest)
os.chdir(dest)
terminal_output('create ios project file')
subprocess.call(["cmake", "..", "-GXcode", "-DCMAKE_SYSTEM_NAME=iOS", "-DCMAKE_OSX_SYSROOT=iphoneos"])
reset_root()
def run_mac_cmake():
dest = BUILD_PATH_MAC
create_directory(dest)
os.chdir(dest)
terminal_output('create mac project file')
subprocess.call(["cmake", "..", "-GXcode"])
reset_root()
def run_linux_cmake():
dest = BUILD_PATH_LINUX
create_directory(dest)
os.chdir(dest)
terminal_output('create linux project file')
subprocess.call(["cmake", ".."])
reset_root()
def run_windows_cmake():
dest = BUILD_PATH_WINDOWS
create_directory(dest)
# os.chdir(dest)
# terminal_output('create windows project file')
#subprocess.call(["cmake", "..", "-GVisual Studio 15 2017 "]) #Todo did not work for CI
# reset_root()
#
# resource copy helpers
#
def copy_color_plugin():
# TODO this is only for LRA - refactor
src = config_file_path + '/code'
if os.path.isdir(src):
terminal_output('copy color plugin')
shutil.copyfile(src + '/ColorPlugin.h', 'Classes/Helpers/ColorPlugin.h')
shutil.copyfile(src + '/ColorPlugin.cpp', 'Classes/Helpers/ColorPlugin.cpp')
def copy_resources():
src = config_file_path + '/Resources' #todo
dest = 'Resources'
if os.path.isdir(dest):
terminal_output("Warning Resource Folder already exists", True)
else :
copy_folder(src, dest)
def copy_cmake():
# cmake
src = 'templates/CMakeLists.txt'
dest = 'CMakeLists.txt'
if os.path.exists(dest):
terminal_output("Warning cmake file already exists", True)
else :
shutil.copyfile(src, dest)
short_app_name = app_name.replace(" ", "")
replace_in_file(dest, short_app_name.lower(), 'hellocpp') #TODO remove lower breaks android ci build
#
# android value helper
#
def set_android_values(path, bundle_id, version_name):
manifestpath = path + '/app/AndroidManifest.xml'
replace_in_file(manifestpath, bundle_id, 'org.cocos2dx.hellocpp')
terminal_output('App bundleid set to: %s in folder: %s ' % (bundle_id, manifestpath))
stringvaluepath = path + '/app/res/values/strings.xml'
replace_in_file(stringvaluepath, app_name, 'HelloCpp')
terminal_output('App name set to: %s in folder: %s ' % (app_name, stringvaluepath))
gradlepath = path + '/settings.gradle'
replace_in_file(gradlepath, app_name, 'HelloCpp')
gradlepath = path + '/app/build.gradle'
replace_in_file(gradlepath, bundle_id, 'org.cocos2dx.hellocpp')
code = 'majorVersion = ' + str(version_name)
replace_in_file(gradlepath, code , 'majorVersion = replace')
name = 'versionName \"' + version_name + '\"'
replace_in_file(gradlepath, name, 'versionName \"1.0\"')
#
# workspace cleanup
#
def clean_workspace():
dest = 'CMakeLists.txt'
if os.path.exists(dest):
os.remove(dest)
terminal_output('Removed %s' % dest)
dest = 'proj.ios_mac'
if os.path.isdir(dest):
shutil.rmtree(dest)
terminal_output('Removed %s' % dest)
dest = 'proj.android'
if os.path.isdir(dest):
shutil.rmtree(dest)
terminal_output('Removed %s' % dest)
dest = 'proj.linux'
if os.path.isdir(dest):
shutil.rmtree(dest)
terminal_output('Removed %s' % dest)
dest = 'Resources'
if os.path.isdir(dest):
shutil.rmtree(dest)
terminal_output('Removed %s' % dest)
dest = BUILD_PATH_IOS
if os.path.isdir(dest):
shutil.rmtree(dest)
terminal_output('Removed %s' % dest)
dest = BUILD_PATH_MAC
if os.path.isdir(dest):
shutil.rmtree(dest)
terminal_output('Removed %s' % dest)
dest = BUILD_PATH_LINUX
if os.path.isdir(dest):
shutil.rmtree(dest)
terminal_output('Removed %s' % dest)
#
# ci
#
def ci_build(platforms):
terminal_output('Starting Ci Build')
global config_file_path
config_file_path = "examples/little-ninja/" # if not on tag use this as fallback CI build
if os.environ.get('TRAVIS_TAG'):
tagname = os.environ["TRAVIS_TAG"]
if "little-ninja" in tagname:
config_file_path = "examples/little-ninja/"
elif "little-robot-adventure" in tagname:
config_file_path = "examples/little-robot-adventure/"
elif "the-dragon-kid" in tagname:
config_file_path = "examples/the-dragon-kid/"
elif "4friends" in tagname:
config_file_path = "examples/4friends/"
prepare_project_files(platforms)
#
# ci - create linux appimage
#
def ci_appimage():
if os.environ.get('TRAVIS_TAG'):
tagname = os.environ["TRAVIS_TAG"]
project_name = "little-ninja" # if not on tag use this as fallback CI build
if "little-ninja" in tagname:
project_name = "little-ninja"
elif "little-robot-adventure" in tagname:
project_name = "little-robot-adventure"
elif "the-dragon-kid" in tagname:
project_name = "the-dragon-kid"
elif "4friends" in tagname:
project_name = "4friends"
else :
sys.exit(0)
short_app_name = project_name.replace("-", "").lower()
# create directories
dest = project_name + '.AppDir'
create_directory(dest)
path = dest + '/usr'
create_directory(path)
src_path = 'examples/'+ project_name + '/android/web_hi_res_512.png'
dest_path = dest + '/' + project_name + '.png'
copy_file(src_path, dest_path)
src_path = 'examples/'+ project_name + '/'+ project_name + '.desktop'
dest_path = dest + '/' + project_name + '.desktop'
copy_file(src_path, dest_path)
# bin files
src_path = BUILD_PATH_LINUX + '/bin/' + short_app_name
dest_path = dest + '/bin'
copy_folder(src_path, dest_path)
# lib files
src_path = BUILD_PATH_LINUX + '/lib'
dest_path = dest + '/usr/lib'
copy_folder(src_path, dest_path)
src_path = 'cocos2d/external/linux-specific/fmod/prebuilt/64-bit/libfmod.so'
dest_path = dest + '/usr/lib/libfmod.so'
copy_file(src_path, dest_path)
src_path = 'cocos2d/external/linux-specific/fmod/prebuilt/64-bit/libfmod.so.6'
dest_path = dest + '/usr/lib/libfmod.so.6'
copy_file(src_path, dest_path)
src_path = 'cocos2d/external/linux-specific/fmod/prebuilt/64-bit/libfmodL.so'
dest_path = dest + '/usr/lib/libfmodL.so.6'
copy_file(src_path, dest_path)
src_path = 'cocos2d/external/linux-specific/fmod/prebuilt/64-bit/libfmodL.so'
dest_path = dest + '/usr/lib/libfmodL.so.6'
copy_file(src_path, dest_path)
src_path = 'cocos2d/external/linux-specific/fmod/prebuilt/64-bit/libfmodL.so'
dest_path = dest + '/usr/lib/libfmodL.so.6'
copy_file(src_path, dest_path)
src_path = '/usr/lib/x86_64-linux-gnu/libpng12.so.0'
dest_path = dest + '/usr/lib/libpng12.so.0'
copy_file(src_path, dest_path)
src_path = '/usr/lib/x86_64-linux-gnu/libcurl-gnutls.so.4'
dest_path = dest + '/usr/lib/libcurl-gnutls.so.4'
copy_file(src_path, dest_path)
src_path = '/usr/lib/x86_64-linux-gnu/libGLEW.so.1.13'
dest_path = dest + '/usr/lib/libGLEW.so.1.13'
copy_file(src_path, dest_path)
# get apprun file
url = 'https://raw.githubusercontent.com/AppImage/AppImageKit/master/resources/AppRun'
filedata = urllib2.urlopen(url)
datatowrite = filedata.read()
with open(dest + '/AppRun', 'wb') as f:
f.write(datatowrite)
os.chmod(dest + '/AppRun', 0o755)
# create appimage
url = 'https://github.com/AppImage/AppImageKit/releases/download/continuous/appimagetool-x86_64.AppImage'
filedata = urllib2.urlopen(url)
datatowrite = filedata.read()
with open('appimagetool-x86_64.AppImage', 'wb') as f:
f.write(datatowrite)
os.chmod('appimagetool-x86_64.AppImage', 0o755)
os.environ["ARCH"] = "x86_64 "
subprocess.call('./appimagetool-x86_64.AppImage ' + dest, shell = True)
# rename appimage file
os.rename(project_name + '-x86_64.AppImage', tagname + '-linux.AppImage')
#
# ci - create mac app
#
def ci_macimage():
if os.environ.get('TRAVIS_TAG'):
tagname = os.environ["TRAVIS_TAG"]
project_name = "little-ninja" # if not on tag use this as fallback CI build
if "little-ninja" in tagname:
project_name = "little-ninja"
elif "little-robot-adventure" in tagname:
project_name = "little-robot-adventure"
elif "the-dragon-kid" in tagname:
project_name = "the-dragon-kid"
elif "4friends" in tagname:
project_name = "4friends"
else :
sys.exit(0)
short_app_name = project_name.replace("-", "").lower()
appname = short_app_name + '.app'
src_path = BUILD_PATH_MAC + '/bin/' + short_app_name + '/Release/' + appname
dest_path = appname
copy_folder(src_path, dest_path)
# rename app file
os.rename(appname, tagname + '.app')
#
# ci - create windows
#
#fresh-engine\build-windows\bin\littleninja\Debug\littleninja.exe
def ci_windows():
if os.environ.get('TRAVIS_TAG'):
tagname = os.environ["TRAVIS_TAG"]
project_name = "little-ninja" # if not on tag use this as fallback CI build
if "little-ninja" in tagname:
project_name = "little-ninja"
elif "little-robot-adventure" in tagname:
project_name = "little-robot-adventure"
elif "the-dragon-kid" in tagname:
project_name = "the-dragon-kid"
elif "4friends" in tagname:
project_name = "4friends"
else :
sys.exit(0)
short_app_name = project_name.replace("-", "").lower()
appname = short_app_name + '.exe'
src_path = BUILD_PATH_WINDOWS + '/bin/' + short_app_name + '/Release/' + appname
dest_path = appname
copy_file(src_path, dest_path)
# rename app file
os.rename(appname, tagname + '-windows.exe')
#
# copy project files
#
def prepare_project_files(platforms):
global app_name
config = json.loads(open(config_file_path + "/config.json").read())
app_name = config['app_name']
copy_resources()
copy_cmake()
copy_color_plugin()
for platform in platforms:
terminal_output('Copy files for platform: ' + platform)
if platform is "android":
setup_android(config)
elif platform is "ios":
setup_ios(config)
elif platform is "mac":
setup_mac(config)
elif platform is "linux":
setup_linux(config)
elif platform is "windows":
setup_windows(config)
#
# main
#
def main(argv):
global config_file_path
platform = []
build_ci = None
try:
opts, args = getopt.getopt(argv,"n:cr",["config_file_path=", "clean", "travis", "appimage", "macapp", "windowsexe", "android", "ios", "linux", "mac", "windows"])
except getopt.GetoptError:
terminal_output("Wrong argument specified", True)
sys.exit(2)
for opt, arg in opts:
terminal_output("build argument: " + opt)
if opt in ("-r", "--travis"):
build_ci = True
elif opt in ("--android"):
platform.append("android")
elif opt in ("--ios"):
platform.append("ios")
elif opt in ("--linux"):
platform.append("linux")
elif opt in ("--mac"):
platform.append("mac")
elif opt in ("--windows"):
platform.append("windows")
elif opt in ("-n", "--config_file_path"):
config_file_path = arg
elif opt in ("--appimage"):
ci_appimage()
sys.exit(0)
elif opt in ("--macapp"):
ci_macimage()
sys.exit(0)
elif opt in ("--windowsexe"):
ci_windows()
sys.exit(0)
elif opt in ("-c", "--clean"):
clean_workspace()
sys.exit(0)
if build_ci:
ci_build(platform)
elif config_file_path != "":
prepare_project_files(platform)
else :
terminal_output('Missing Arguments: config_file_path %s' % (config_file_path))
if __name__ == "__main__":
if len(sys.argv) < 2:
terminal_output("please run with arguments")
terminal_output("for cleaning: build.py -c")
terminal_output("for creating project files: build.py -n <folder path> -p")
terminal_output("and platform: -android -ios -linux -mac")
sys.exit(0)
main(sys.argv[1:])
|
from Adafruit_LED_Backpack import AlphaNum4
class DisplayManager(object):
def __init__(self, **displays):
self.displays = {}
for name, addr in displays.items():
self.register_display(name, addr)
def register_display(self, name, address=0x70, **kwargs):
for check_name, check_disp in self.displays.items():
if check_name == name:
raise ValueError("Display name '{}' already taken".format(name))
if check_disp._device.address == address:
raise ValueError("Address ({}) already bound with '{}' display".format(address, check_name))
disp = self.displays[name] = AlphaNum4.AlphaNum4(address=address, **kwargs)
disp.started = False
return disp
def start_display(self, name):
disp = self.displays[name]
disp.begin()
disp.clear()
disp.started = True
return disp
def clear_all(self):
for name, disp in self.displays.items():
self.displays[name].clear()
def write_all(self):
for name, disp in self.displays.items():
try:
if not disp.started:
self.start_display(name)
disp.write_display()
except IOError:
disp.started = False
|
from __future__ import division
import collections
import itertools
from typing import (List, Dict, Callable, Tuple, Iterable, Set, Counter, Union,
Optional)
NGramsType = Counter[Tuple[str]]
ScoreType = Dict[str, float]
RougeType = Dict[str, Dict[str, float]]
try:
from math import isclose
except ImportError:
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
# type: (float, float, float, float) -> bool
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
"""Precision Recall & F-score"""
def _format_score(fscore, precision, recall):
# type: (float, float, float) -> Dict[str, float]
return {'r': recall, 'p': precision, 'f': fscore}
def _f_score(precision, recall, alpha):
# type: (float, float, float) -> float
if not 0 <= alpha <= 1:
raise ValueError(
'Invalid alpha {}: expected between [0, 1]'.format(alpha))
if isclose(precision, 0) or isclose(recall, 0):
return 0.0
return recall * precision / (alpha * recall + (1 - alpha) * precision)
def _div_or_zero(dividend, divisor):
# type: (float, float) -> float
if isclose(divisor, 0):
return 0.0
else:
return dividend / divisor
def _f_p_r_score(match_score, hyp_score, ref_score, alpha):
# type: (float, float, float, float) -> Dict[str, float]
precision = _div_or_zero(match_score, hyp_score)
recall = _div_or_zero(match_score, ref_score)
fscore = _f_score(precision, recall, alpha)
return _format_score(fscore, precision, recall)
def _flatten(sentences):
# type: (List[List[str]]) -> List[str]
return list(itertools.chain.from_iterable(sentences))
"""Match statistics"""
class _Match(collections.namedtuple('BaseMatch', 'matches hyp_size ref_size')):
def __add__(self, other):
# type: (Union[_Match, int]) -> _Match
if isinstance(other, int) and other == 0:
return self
elif isinstance(other, _Match):
return _Match(self.matches + other.matches,
self.hyp_size + other.hyp_size,
self.ref_size + other.ref_size)
else:
raise ValueError('Unexpected addend {}'.format(other))
def __radd__(self, other):
# type: (Union[_Match, int]) -> _Match
return self.__add__(other)
def to_score(self, alpha):
# type: (float) -> Dict[str, float]
return _f_p_r_score(self.matches, self.hyp_size, self.ref_size, alpha)
def to_weighted_score(self, alpha, weight):
# type: (float, float) -> Dict[str, float]
inv_weight_func = _get_weight_func(weight, inverse=True)
precision = inv_weight_func(_div_or_zero(self.matches, self.hyp_size))
recall = inv_weight_func(_div_or_zero(self.matches, self.ref_size))
fscore = _f_score(precision, recall, alpha)
return _format_score(fscore, precision, recall)
class _MatchAggregator(object):
def aggregate(self, matches):
# type: (Iterable[_Match]) -> _Match
raise NotImplementedError
class _AverageMatchAggregator(_MatchAggregator):
def aggregate(self, matches):
# type: (Iterable[_Match]) -> _Match
result = sum(matches)
if result == 0:
raise ValueError('Average on empty sequence')
return result
class _BestMatchAggregator(_MatchAggregator):
def aggregate(self, matches):
# type: (Iterable[_Match]) -> _Match
return max(matches, key=lambda x: _div_or_zero(x.matches, x.ref_size))
def _build_match_aggregator(multi_ref_mode):
# type: (str) -> _MatchAggregator
if multi_ref_mode == 'average':
return _AverageMatchAggregator()
elif multi_ref_mode == 'best':
return _BestMatchAggregator()
else:
raise ValueError(
'Invalid multi_ref_mode {}: expected (average, best)'.format(
multi_ref_mode))
"""ROUGE-N scores"""
def _build_ngrams(sent, n):
# type: (List[str], int) -> NGramsType
ngrams = collections.Counter()
for i in range(len(sent) - n + 1):
ngrams[tuple(sent[i:i + n])] += 1
return ngrams
def _count_ngrams(ngrams):
# type: (NGramsType) -> int
return sum(ngrams.values())
def _intersect_ngrams(hyp_ngrams, ref_ngrams):
# type: (NGramsType, NGramsType) -> NGramsType
return hyp_ngrams & ref_ngrams
def _union_ngrams(ngrams, other):
# type: (NGramsType, NGramsType) -> NGramsType
return ngrams | other
def _rouge_n_sentence_level(hyp, ref, n):
# type: (List[str], List[str], int) -> _Match
hyp_ngrams = _build_ngrams(hyp, n)
ref_ngrams = _build_ngrams(ref, n)
match_ngrams = _intersect_ngrams(hyp_ngrams, ref_ngrams)
return _Match(_count_ngrams(match_ngrams), _count_ngrams(hyp_ngrams),
_count_ngrams(ref_ngrams))
def _rouge_n_summary_level(hyps, refs, n):
# type: (List[List[str]], List[List[str]], int) -> _Match
return _rouge_n_sentence_level(_flatten(hyps), _flatten(refs), n)
def _rouge_n_multi_ref(hyps, multi_refs, n, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], int, str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_n_summary_level(hyps, refs, n) for refs in multi_refs)
return match.to_score(alpha)
"""ROUGE-L scores"""
def _lcs_table(a, b):
# type: (List[str], List[str]) -> List[List[int]]
m = len(a)
n = len(b)
table = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if a[i - 1] == b[j - 1]:
table[i][j] = table[i - 1][j - 1] + 1
else:
table[i][j] = max(table[i - 1][j], table[i][j - 1])
return table
def _lcs_length(a, b):
# type: (List[str], List[str]) -> int
table = _lcs_table(a, b)
return table[-1][-1]
def _lcs_elements(a, b, table):
# type: (List[str], List[str], List[List[float]]) -> List[Tuple[int, int]]
s = []
i = len(a)
j = len(b)
while i > 0 and j > 0:
if a[i - 1] == b[j - 1]:
i -= 1
j -= 1
s.append((i, j))
elif table[i][j] == table[i][j - 1]:
j -= 1
else:
i -= 1
s.reverse()
return s
def _lcs_union(hyps, ref):
# type: (List[List[str]], List[str]) -> Set[int]
lcs_union = set()
for hyp in hyps:
lcs_elem = _lcs_elements(hyp, ref, _lcs_table(hyp, ref))
lcs_union = lcs_union.union(ref_idx for _, ref_idx in lcs_elem)
return lcs_union
def _rouge_l_sentence_level(hyp, ref):
# type: (List[str], List[str]) -> _Match
return _Match(_lcs_length(hyp, ref), len(hyp), len(ref))
def _rouge_l_summary_level(hyps, refs):
# type: (List[List[str]], List[List[str]]) -> _Match
hyp_unigram = _build_ngrams(_flatten(hyps), 1)
match_size = 0
for ref in refs:
lcs_union = _lcs_union(hyps, ref)
for ref_idx in lcs_union:
unigram = (ref[ref_idx],)
if hyp_unigram.get(unigram, 0) > 0:
hyp_unigram[unigram] -= 1
match_size += 1
ref_len = sum(len(ref) for ref in refs)
hyp_len = sum(len(hyp) for hyp in hyps)
return _Match(match_size, hyp_len, ref_len)
def _rouge_l_multi_ref(hyps, multi_refs, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_l_summary_level(hyps, refs) for refs in multi_refs)
return match.to_score(alpha)
"""ROUGE-W scores"""
def _wlcs_table(a, b, weight):
# type: (List[str], List[str], Callable[[float], float]) -> List[List[float]]
m = len(a)
n = len(b)
wlen = [[0.0 for _ in range(n + 1)] for _ in range(m + 1)]
continuous_matches = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if a[i - 1] == b[j - 1]:
k = continuous_matches[i - 1][j - 1]
wlen[i][j] = wlen[i - 1][j - 1] + weight(k + 1) - weight(k)
continuous_matches[i][j] = k + 1
else:
wlen[i][j] = max(wlen[i - 1][j], wlen[i][j - 1])
continuous_matches[i][j] = 0
return wlen
def _wlcs_union(hyps, ref, weight_func):
# type: (List[List[str]], List[str], Callable[[float], float]) -> List[int]
wlcs_union = set()
for hyp in hyps:
wlcs_elem = _lcs_elements(hyp, ref, _wlcs_table(hyp, ref, weight_func))
wlcs_union = wlcs_union.union(ref_idx for _, ref_idx in wlcs_elem)
return sorted(wlcs_union)
def _rouge_w_sentence_level(hyp, ref, weight):
# type: (List[str], List[str], float) -> _Match
return _rouge_w_summary_level([hyp], [ref], weight)
def _get_weight_func(weight, inverse):
# type: (float, bool) -> Callable[[float], float]
if weight < 1:
raise ValueError('Invalid weight {}: expected >= 1'.format(weight))
if inverse:
weight = 1 / weight
return lambda x: x ** weight
def _rouge_w_summary_level(hyps, refs, weight):
# type: (List[List[str]], List[List[str]], float) -> _Match
weight_func = _get_weight_func(weight, inverse=False)
hyp_flat = _flatten(hyps)
hyp_unigrams = _build_ngrams(hyp_flat, 1)
ref_score = weight_func(sum(weight_func(len(ref)) for ref in refs))
hyp_score = weight_func(sum(len(hyp) for hyp in hyps))
match_score = 0
for ref in refs:
wlcs_union = _wlcs_union(hyps, ref, weight_func)
consecutive_matches = 0
for ref_idx in wlcs_union:
token = (ref[ref_idx],)
if hyp_unigrams[token] > 0:
hyp_unigrams[token] -= 1
consecutive_matches += 1
if ref_idx == len(ref) - 1 or ref_idx + 1 not in wlcs_union:
match_score += weight_func(consecutive_matches)
consecutive_matches = 0
return _Match(match_score, hyp_score, ref_score)
def _rouge_w_multi_ref(hyps, multi_refs, weight, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], float, str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_w_summary_level(hyps, refs, weight) for refs in multi_refs)
return match.to_weighted_score(alpha, weight)
"""ROUGE-S scores"""
def _skip_bigrams(sent, skip_gap):
# type: (List[str], Optional[int]) -> NGramsType
bigrams = collections.Counter()
if skip_gap is None or skip_gap < 0:
skip_gap = len(sent)
for lo in range(len(sent)):
for hi in range(lo + 1, min(len(sent), lo + skip_gap + 2)):
bigrams[(sent[lo], sent[hi])] += 1
return bigrams
def _rouge_s_or_su(hyp, ref, skip_gap, include_unigram):
# type: (List[str], List[str], Optional[int], bool) -> _Match
hyp_skip = _skip_bigrams(hyp, skip_gap)
ref_skip = _skip_bigrams(ref, skip_gap)
if include_unigram:
hyp_skip = _union_ngrams(hyp_skip, _build_ngrams(hyp[:-1], 1))
ref_skip = _union_ngrams(ref_skip, _build_ngrams(ref[:-1], 1))
match_skip = _intersect_ngrams(hyp_skip, ref_skip)
return _Match(_count_ngrams(match_skip), _count_ngrams(hyp_skip),
_count_ngrams(ref_skip))
def _rouge_s_sentence_level(hyp, ref, skip_gap):
# type: (List[str], List[str], Optional[int]) -> _Match
return _rouge_s_or_su(hyp, ref, skip_gap, False)
def _rouge_s_summary_level(hyps, refs, skip_gap):
# type: (List[List[str]], List[List[str]], Optional[int]) -> _Match
return _rouge_s_sentence_level(_flatten(hyps), _flatten(refs), skip_gap)
def _rouge_s_multi_ref(hyps, multi_refs, skip_gap, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], Optional[int], str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_s_summary_level(hyps, refs, skip_gap) for refs in multi_refs)
return match.to_score(alpha)
"""ROUGE-SU scores"""
def _rouge_su_sentence_level(hyp, ref, skip_gap):
# type: (List[str], List[str], Optional[int]) -> _Match
return _rouge_s_or_su(hyp, ref, skip_gap, True)
def _rouge_su_summary_level(hyps, refs, skip_gap):
# type: (List[List[str]], List[List[str]], Optional[int]) -> _Match
return _rouge_su_sentence_level(_flatten(hyps), _flatten(refs), skip_gap)
def _rouge_su_multi_ref(hyps, multi_refs, skip_gap, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], Optional[int], str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_su_summary_level(hyps, refs, skip_gap) for refs in multi_refs)
return match.to_score(alpha)
"""All ROUGE scores"""
def _rouge_scores_multi_ref(
hyp, # type: List[List[str]]
multi_ref, # type: List[List[List[str]]]
rouge_n, # type: Union[int, Iterable[int]]
rouge_l, # type: bool
rouge_w, # type: bool
rouge_w_weight, # type: float
rouge_s, # type: bool
rouge_su, # type: bool
skip_gap, # type: Optional[int]
multi_ref_mode, # type: str
alpha, # type: float
): # type: (...) -> Dict[str, Dict[str, float]]
if isinstance(rouge_n, int):
rouge_n = range(1, 1 + rouge_n)
skip_suffix = str(skip_gap) if skip_gap and skip_gap >= 0 else '*'
result = {}
for n in rouge_n:
result['rouge-{}'.format(n)] = _rouge_n_multi_ref(
hyp, multi_ref, n, multi_ref_mode, alpha)
if rouge_l:
result['rouge-l'] = _rouge_l_multi_ref(
hyp, multi_ref, multi_ref_mode, alpha)
if rouge_w:
result['rouge-w-{}'.format(rouge_w_weight)] = _rouge_w_multi_ref(
hyp, multi_ref, rouge_w_weight, multi_ref_mode, alpha)
if rouge_s:
result['rouge-s{}'.format(skip_suffix)] = _rouge_s_multi_ref(
hyp, multi_ref, skip_gap, multi_ref_mode, alpha)
if rouge_su:
result['rouge-su{}'.format(skip_suffix)] = _rouge_su_multi_ref(
hyp, multi_ref, skip_gap, multi_ref_mode, alpha)
return result
class _RougeAggregator(object):
def aggregate(self, scores):
# type: (Iterable[RougeType]) -> Union[List[RougeType], RougeType]
raise NotImplementedError
class _IndividualRougeAggregator(_RougeAggregator):
def aggregate(self, scores):
# type: (Iterable[RougeType]) -> List[RougeType]
return list(scores)
class _AverageRougeAggregator(_RougeAggregator):
def __init__(self, alpha):
self.alpha = alpha
def aggregate(self, scores):
# type: (Iterable[RougeType]) -> RougeType
scores = list(scores)
if len(scores) == 0:
return {}
results = {}
for key in scores[0].keys():
results[key] = self.average_score(score[key] for score in scores)
return results
def average_score(self, scores):
# type: (Iterable[ScoreType]) -> ScoreType
total_p = 0
total_r = 0
count = 0
for score in scores:
total_p += score['p']
total_r += score['r']
count += 1
precision = _div_or_zero(total_p, count)
recall = _div_or_zero(total_r, count)
fscore = _f_score(precision, recall, self.alpha)
return _format_score(fscore, precision, recall)
def _build_rouge_aggregator(mode, alpha):
# type: (str, float) -> _RougeAggregator
if mode == 'individual':
return _IndividualRougeAggregator()
if mode == 'average':
return _AverageRougeAggregator(alpha)
raise ValueError(
'Invalid mode {}: expected (individual, average)'.format(mode))
class PyRouge(object):
"""Compute ROUGE scores between multiple hypothesis and reference summaries.
:param rouge_n: Compute N-gram co-occurrence (ROUGE-N). Given an integer N,
compute ROUGE-1 to ROUGE-N. Given a list of integers, compute ROUGE-N if
N is on the list.
:param rouge_l: If true, compute longest common subsequence (LCS)
co-occurrence (ROUGE-L).
:param rouge_w: If true, compute Weighted-LCS (WLCS) co-occurrence
(ROUGE-W).
:param rouge_w_weight: The weight w of the weighting function
:math:`f(x) = x^w` to emphasize consecutive matches in ROUGE-W.
:param rouge_s: If true, compute skip-bigram co-occurrence (ROUGE-S).
:param rouge_su: If true, compute skip-bigram with unigram co-occurrence
(ROUGE-SU).
:param skip_gap: The maximum gap between two words in skip-bigram.
:param multi_ref_mode: The method to combine the scores between a
hypothesis and its multiple references. Choose from {average, best}.
:param alpha: The balance factor between recall and precision. Favors recall
if close to 1, precision if close to 0.
:param mode: The method to combine the scores on multiple documents.
Choose from {average, individual}.
Example:
::
>>> from rouge_metric import PyRouge
>>> hypotheses = ['Police killed the gunman'.lower()]
>>> references = [['The gunman killed the policeman'.lower()]]
>>> PyRouge().evaluate(hypotheses, references)
{
'rouge-1': {'r': 0.6, 'p': 0.75, 'f': 0.666666667},
'rouge-2': {'r': 0.5, 'p': 0.666666667, 'f': 0.571428571},
'rouge-l': {'r': 0.4, 'p': 0.5, 'f': 0.444444444}
}
>>> hypotheses = [['Police killed the gunman'.lower().split()]]
>>> references = [[['The gunman killed the policeman'.lower().split()]]]
>>> PyRouge().evaluate_tokenized(hypotheses, references)
{
'rouge-1': {'r': 0.6, 'p': 0.75, 'f': 0.666666667},
'rouge-2': {'r': 0.5, 'p': 0.666666667, 'f': 0.571428571},
'rouge-l': {'r': 0.4, 'p': 0.5, 'f': 0.444444444}
}
"""
def __init__(self,
rouge_n=(1, 2), # type: Union[int, Iterable[int]]
rouge_l=True, # type: bool
rouge_w=False, # type: bool
rouge_w_weight=1.2, # type: float
rouge_s=False, # type: bool
rouge_su=False, # type: bool
skip_gap=None, # type: Optional[int]
multi_ref_mode='average', # type: str
alpha=0.5, # type: float
mode='average', # type: str
):
self.rouge_n = rouge_n
self.rouge_l = rouge_l
self.rouge_w = rouge_w
self.rouge_w_weight = rouge_w_weight
self.rouge_s = rouge_s
self.rouge_su = rouge_su
self.skip_gap = skip_gap
self.multi_ref_mode = multi_ref_mode
self.alpha = alpha
self.mode = mode
@staticmethod
def _default_sentencizer(text):
# type: (str) -> List[str]
return text.split('\n')
@staticmethod
def _default_tokenizer(sent):
# type: (str) -> List[str]
return sent.split()
def evaluate_tokenized(
self,
hypotheses, # type: List[List[List[str]]]
multi_references, # type: List[List[List[List[str]]]]
):
# type: (...) -> Union[RougeType, List[RougeType]]
"""Compute ROUGE scores between tokenized hypotheses and references.
Multiple reference summaries can be specified for a hypothesis summary.
The input should follow the below format so that we know how to match a
hypothesis with its references.
::
hypotheses = [
doc1_hyp_summary, # Hypothesis summary for document 1
doc2_hyp_summary, # Hypothesis summary for document 2
...
]
multi_references = [
[
doc1_ref1_summary, # Reference summary 1 for document 1
doc1_ref2_summary, # Reference summary 2 for document 1
...
],
[
doc2_ref1_summary, # Reference summary 1 for document 2
doc2_ref2_summary, # Reference summary 2 for document 2
...
],
]
Note that a summary is represented by a list of sentences, and a
sentence is represented by a list of tokens. A token is a basic element
here, represented by a ``str``. i.e.,
::
summary = [
[sent1_token1, sent1_token2, ...], # sentence 1
[sent2_token1, sent2_token2, ...], # sentence 2
]
:param hypotheses: A list of predicted summaries for multiple documents.
Each summary contains multiple sentences, and each sentence contains
multiple tokens.
:param multi_references: A list of gold standard summaries for multiple
documents. Each document corresponds to multiple reference
summaries. Each summary contains multiple sentences, and each
sentence contains multiple tokens.
:return: All computed ROUGE scores.
"""
if len(hypotheses) != len(multi_references):
raise ValueError('Hypotheses and references must be the same size')
aggregator = _build_rouge_aggregator(self.mode, self.alpha)
result = aggregator.aggregate(
_rouge_scores_multi_ref(
hyp, multi_ref, self.rouge_n, self.rouge_l, self.rouge_w,
self.rouge_w_weight, self.rouge_s, self.rouge_su, self.skip_gap,
self.multi_ref_mode, self.alpha
) for hyp, multi_ref in zip(hypotheses, multi_references)
)
return result
def evaluate(self,
hypotheses, # type: List[str]
multi_references, # type: List[List[str]]
sentencizer=None, # type: Optional[Callable[[str], List[str]]]
tokenizer=None # type: Optional[Callable[[str], List[str]]]
):
# type: (...) -> Union[RougeType, List[RougeType]]
"""Compute ROUGE scores between hypothesis and reference summaries.
The hypotheses and multi_references should follow the below format.
::
hypotheses = [summary1, summary2, ...]
multi_references = [
[summary1_ref1, summary1_ref2, ...],
[summary2_ref1, summary2_ref2, ...],
...
]
A summary here is a ``str`` with multiple lines, separated by ``\\n``.
Each line represents a sentence.
:param hypotheses: A list of hypothesis summaries.
:param multi_references: A double list of reference summaries.
:param sentencizer: A function to split a paragraph into sentences.
:param tokenizer: A function to split a sentence into tokens.
:return: All computed ROUGE scores.
"""
if sentencizer is None:
sentencizer = self._default_sentencizer
if tokenizer is None:
tokenizer = self._default_tokenizer
tokenized_hyp = [[tokenizer(sent) for sent in sentencizer(hyp)]
for hyp in hypotheses]
tokenized_multi_ref = [[[tokenizer(sent) for sent in sentencizer(ref)]
for ref in multi_ref]
for multi_ref in multi_references]
return self.evaluate_tokenized(tokenized_hyp, tokenized_multi_ref)
|
from rest_framework import viewsets
from rest_framework.viewsets import ModelViewSet
from .models import Supermarket, Product, ProductType, GroceryList
from .serializers import SupermarketSerializer, SupermarketProductSerializer, \
SupermarketProductTypeSerializer, SupermarketGroceryListSerializer
class SupermarketViewSet(viewsets.ViewSet, ModelViewSet):
serializer_class = SupermarketSerializer
queryset = Supermarket.objects.all()
class ProductViewSet(viewsets.ViewSet, ModelViewSet):
serializer_class = SupermarketProductSerializer
queryset = Product.objects.all()
class ProductTypeViewSet(viewsets.ViewSet, ModelViewSet):
serializer_class = SupermarketProductTypeSerializer
queryset = ProductType.objects.all()
class GroceryListViewSet(viewsets.ViewSet, ModelViewSet):
serializer_class = SupermarketGroceryListSerializer
queryset = GroceryList.objects.all()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 21 12:33:00 2022
Author: Gianluca Bianco
"""
#################################################
# Libraries
#################################################
import doctest
from termcolor import colored
import numpy as np
import matplotlib.pyplot as plt
import scipy.interpolate as sci
import arsenalgear.mathematics as mt
#################################################
# "integral" function
#################################################
def integral( function, a, b ):
"""
1-dimensional integral solution for finite and infinite bound conditions, using the Simpson rule.
Args:
a (any): lower integration extreme.
b (any): higher integration extreme.
function (any): integrand function.
Returns:
any: integral of the given function.
Testing:
>>> def test_integral( x ):
... return x * np.exp( -pow( x, 2 ) )
>>> def test_integral_2( x ):
... return np.exp( -2 * pow( x, 2 ) )
>>> def test_integral_3( x ):
... return pow( x, 2 )
>>> def test_integral_4( x ):
... return np.exp( -x + 4 )
>>> mt.IsInBounds( integral( test_integral, -np.Infinity, np.Infinity ), -0.001, 0.001 )
True
>>> mt.IsInBounds( integral( test_integral_2, -np.Infinity, np.Infinity ), 1.23, 1.25 )
True
>>> integral( test_integral_3, -np.Infinity, np.Infinity )
Traceback (most recent call last):
...
RuntimeError: \033[31mThe wave function integral is divergent!\033[0m
>>> mt.IsInBounds( integral( test_integral_3, 1, 4 ), 19.5, 21.5 )
True
>>> mt.IsInBounds( integral( test_integral, 0, np.Infinity ), 0.49, 0.51 )
True
>>> mt.IsInBounds( integral( test_integral_2, -np.Infinity, 0 ), 0.61, 0.63 )
True
>>> mt.IsInBounds( integral( test_integral_4, 0, np.Infinity ), 54.1, 54.9 )
True
"""
if a == -np.Infinity or b == np.Infinity:
inf = 0
sup = np.pi
if a != -np.Infinity and b == np.Infinity:
inf = 0
sup = np.pi / 2
elif a == -np.Infinity and b != np.Infinity:
inf = -np.pi / 2
sup = 0
var = lambda x: function( np.tan( x ) ) / pow( np.cos( x ), 2 )
var_inf = function( np.tan( inf ) ) / pow( np.cos( inf ), 2 )
var_sup = function( np.tan( sup ) ) / pow( np.cos( sup ), 2 )
elif a != -np.Infinity and b != np.Infinity:
inf = a
sup = b
var = lambda x: function( x )
var_inf = function( inf )
var_sup = function( sup )
else:
raise RuntimeError( colored( "Invalid integral bounds!", "red" ) )
first = abs( sup - inf ) / 1000
val = 1000 / 2
result = 0
for i in range( 1, int( val - 1 ) ):
x = inf + 2 * i * first
result = result + 2 * var( x )
for i in range( 1, int( val ) ):
x = inf + ( 2 * i - 1 ) * first
result = result + 4 * var( x )
result = first * ( result + var_inf + var_sup ) / 3
if result < -1e10 or result > 1e10:
raise RuntimeError( colored( "The wave function integral is divergent!", "red" ) )
else:
return result
#################################################
# "prod_integral" function
#################################################
def prod_integral( real_part, imaginary_part, m, n, a, b ):
"""
Function used to compute the product integral between a wave function and is complex-conjugate.
Args:
real_part (any): real part of the given function.
imaginary_part (any): imaginary part of the give function.
m ([type]): wave-function index.
n ([type]): wave-function index.
a (any): lowest integral extreme.
b (any): highest integral extreme.
Returns:
any: returns the product integral between a wave function and is complex-conjugate.
Testing:
Not necessary, since it is performed in the "orthogonality", "orthonormality" and "coefficients" functions.
"""
function_product_real = lambda x: ( mt.e_parser( real_part, imaginary_part, m, x ).conjugate() * mt.e_parser( real_part, imaginary_part, n, x ) ).real
function_product_imag = lambda x: ( mt.e_parser( real_part, imaginary_part, m, x ).conjugate() * mt.e_parser( real_part, imaginary_part, n, x ) ).imag
product_integral = complex( integral( function_product_real, a, b ), integral( function_product_imag, a, b ) )
return product_integral.real
#################################################
# "orthogonality" function
#################################################
def orthogonality( real_part, imaginary_part, a, b ):
"""
Function used to check if a given wave function is orthogonal.
Args:
real_part (any): real part of the given function.
imaginary_part (any): imaginary part of the give function.
a (any): lowest integral extreme.
b (any): highest integral extreme.
Returns:
bool: returns the bool condition for the function orthogonality.
Testing:
>>> orthogonality( "Hermite( x, n ) * np.exp( - pow( x , 2 ) / 2 )", "0", -np.Infinity, np.Infinity )
True
>>> orthogonality( "n * np.exp( -pow( x , 2 ) / 2 )", "0", -np.Infinity, np.Infinity )
False
>>> orthogonality( "np.exp( -2 * abs( x ) ) * pow( np.sin( n ), 2 ) * x", "0", -np.Infinity, np.Infinity )
True
>>> orthogonality( "np.sin( n * np.pi * x )", "0", 0, 1 )
True
>>> orthogonality( "np.sin( n * np.pi * x )", "0", 0, 1 )
True
>>> orthogonality( "np.cos( n * np.pi * x )", "0", 0, 1 )
True
"""
arr = np.array([])
m_ = 5
n_ = 5
for m in range( m_ ):
for n in range( n_ ):
if m == 0 and n == 0:
continue
res = round( prod_integral( real_part, imaginary_part, m, n, a, b ) )
if m != n:
if res == 0:
arr = np.append( arr, True )
else:
arr = np.append( arr, False )
if False in arr:
return False
else:
return True
#################################################
# "orthonormality" function
#################################################
def orthonormality( real_part, imaginary_part, a, b ):
"""
Function used to check if a given wave function is orthonormal.
Args:
real_part (any): real part of the given function.
imaginary_part (any): imaginary part of the give function.
a (any): lower integral extreme.
b (any): upper integral extreme.
Returns:
bool: returns the bool condition for the function orthonormality.
Testing:
>>> orthonormality( "np.sqrt( 2 ) * np.sin( n * np.pi * x )", "0", 0, 1 )
True
>>> orthonormality( "np.sqrt( n ) * np.exp( -n * abs( x ) )", "0", 0, np.Infinity )
True
>>> orthonormality( "Hermite( x, n ) * np.exp( - pow( x , 2 ) / 2 )", "0", -np.Infinity, np.Infinity )
False
>>> orthonormality( "np.sin( n * np.pi * x )", "0", 0, 1 )
False
"""
arr = np.array([])
m_ = 5
n_ = 5
for m in range( m_ ):
for n in range( n_ ):
if m == 0 and n == 0:
continue
res = round( prod_integral( real_part, imaginary_part, m, n, a, b ) )
if res == mt.kronecker( m, n ):
arr = np.append( arr, True )
else:
arr = np.append( arr, False )
if False in arr:
return False
else:
return True
#################################################
# "coefficients" function
#################################################
def coefficients( real_part, imaginary_part, a, b, n ):
"""
Function used to compute the normalization coefficients.
Args:
real_part (any): real part of the given function.
imaginary_part (any): imaginary part of the give function.
a (any): lower integral extreme.
b (any): upper integral extreme.
n (int): wave function index.
Returns:
any: returns the value of the normalization coefficients.
Testing:
>>> round( coefficients( "Hermite( x, n ) * np.exp( - pow( x , 2 ) / 2 )", "0", -np.Infinity, np.Infinity, 1 ), 2 )
0.53
>>> round( coefficients( "Hermite( x, n ) * np.exp( - pow( x , 2 ) / 2 )", "0", -np.Infinity, np.Infinity, 3 ), 2 )
0.11
>>> round( coefficients( "np.sin( n * np.pi * x )", "0", 0, 1, 2 ), 2 )
1.41
>>> round( coefficients( "np.cos( x )", "np.sin( x )", -1, 1, 0 ), 2 )
0.71
"""
res = prod_integral( real_part, imaginary_part, n, n, a, b )
denominator = np.sqrt( res.real )
if denominator == 0:
return colored( "Error, division by 0!", "red" )
else:
return 1 / denominator.real
#################################################
# "plotter_complex" function
#################################################
def plotter_complex( real_part, imaginary_part, a, b, n, coefficient ):
"""
Function used to plot a given wave-function for an index n.
Args:
real_part (string): mathematical real expression part.
imaginary_part (string): mathematical imaginary expression part.
a (any): lower integration extreme.
b (any): higher integration extreme.
n (int): wave function index.
coefficient (any): value of the normalization coefficient.
Returns:
plot: the wave-function plot for the index n is returned.
"""
if coefficient != colored( "Error, division by 0!", "red" ):
if a == -np.inf and b != np.inf:
x = np.arange( -10, b, ( ( b+10 ) / 10 ) )
elif a != -np.inf and b == np.inf:
x = np.arange( a, 10, ( ( 10-a ) / 10 ) )
elif a == -np.inf and b == np.inf:
x = np.arange( -10, 10, ( ( 20 ) / 10 ) )
else:
x = np.arange( 10*a, 10*b, ( ( 10*( b-a ) ) / 10 ) )
def func( x ):
return coefficient * mt.e_parser( real_part, imaginary_part, n, x )
my_label = "Normalized wave-function f(x) for n = " + str( n )
plt.figure( figsize = ( 8, 6 ), dpi = 80 )
plt.xlabel( "Re: f(x)" )
plt.ylabel( "Im: f(x)" )
plt.title( my_label )
if real_part == "0" and imaginary_part != "0":
X_Y_Spline = sci.make_interp_spline( x, np.imag( func( x ) ) )
X = np.linspace( x.min(), x.max(), 500 )
Y = X_Y_Spline( X )
plt.xlabel( "x" )
plt.ylabel( "Im: f(x)" )
plt.plot( X, Y, color = "green" )
elif real_part != "0" and imaginary_part == "0":
X_Y_Spline = sci.make_interp_spline( x, np.real( func( x ) ) )
X = np.linspace( x.min(), x.max(), 500 )
Y = X_Y_Spline( X )
plt.xlabel( "x" )
plt.ylabel( "Re: f(x)" )
plt.plot( X, Y, color = "green" )
else:
X = np.real( func( x ) )
Y = np.imag( func( x ) )
tck, u = sci.splprep( [ X, Y ], s = 0 )
unew = np.arange( 0, 1.01, 0.01 )
out = sci.splev( unew, tck )
plt.plot( X, Y, 'x', out[0], out[1], color = "green" )
plt.show()
#################################################
# Doing tests
#################################################
if __name__ == "__main__":
doctest.testmod()
|
from yaetos.etl_utils import ETL_Base, Commandliner
from pyspark.sql.types import StructType
class Job(ETL_Base):
def transform(self):
return self.sc_sql.createDataFrame([], StructType([]))
if __name__ == "__main__":
args = {'job_param_file': 'conf/jobs_metadata.yml'}
Commandliner(Job, **args)
|
import os
import environ
from pathlib import Path
from django.utils.translation import gettext_lazy as _
BASE_DIR = Path(__file__).resolve().parent.parent
ENV_PATH = os.path.join(BASE_DIR, '.env')
env = environ.Env()
if os.path.isfile(ENV_PATH):
environ.Env.read_env(env_file=ENV_PATH)
SECRET_KEY = env('DJ_SECRET_KEY')
DEBUG = env('DJ_DEBUG')
ALLOWED_HOSTS = env.list('DJ_ALLOWED_HOSTS')
THREAD_SERVER_HOST = '127.0.0.1'
THREAD_SERVER_PORT = 8290
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_summernote',
'webpack_loader',
'mainsite',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'amokryshev.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates',],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.i18n',
#'django.template.context_processors.media',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'mainsite.context_processors.load_settings',
],
},
},
]
WSGI_APPLICATION = 'amokryshev.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': env('DJ_DEFAULT_DB_NAME'),
'USER': env('DJ_DEFAULT_DB_USER'),
'PASSWORD': env('DJ_DEFAULT_DB_PASS'),
'HOST': env('DJ_DEFAULT_DB_HOST'),
'PORT': env('DJ_DEFAULT_DB_PORT'),
},
'adm': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': env('DJ_ADM_DB_NAME'),
'USER': env('DJ_ADM_DB_USER'),
'PASSWORD': env('DJ_ADM_DB_PASS'),
'HOST': env('DJ_ADM_DB_HOST'),
'PORT': env('DJ_ADM_DB_PORT'),
}
}
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env('DJ_CACHE_URL'),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient"
},
"KEY_PREFIX": env('DJ_CACHE_PREFIX')
}
}
CACHE_TIMEOUT = env.int('DJ_CACHE_TIMEOUT')
LOGGING = {
'version': 1,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
}
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
}
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',},
{'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',},
{'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',},
]
LANGUAGE_CODE = 'ru'
LANGUAGES = [
('ru', _('Russian')),
('en', _('English')),
]
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR, 'node_modules/bootstrap/'),
os.path.join(BASE_DIR, 'node_modules/jquery/'),
os.path.join(BASE_DIR, 'node_modules/jquery-ui/'),
os.path.join(BASE_DIR, 'node_modules/popper.js/'),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
MEDIA_URL = '/uploads/'
WEBPACK_LOADER = {
'DEFAULT': {
'CACHE': not DEBUG,
'BUNDLE_DIR_NAME': 'static/bundles/', # must end with slash
'STATS_FILE': os.path.join(BASE_DIR, 'webpack-stats.json'),
'POLL_INTERVAL': 0.1,
'TIMEOUT': None,
'IGNORE': [r'.+\.hot-update.js', r'.+\.map'],
'LOADER_CLASS': 'webpack_loader.loader.WebpackLoader',
}
}
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'locale'),
]
SUMMERNOTE_CONFIG = { 'iframe': False, }
|
import dataclasses
import typing
import pytest
from dataclasses_avroschema import AvroModel, types
@pytest.fixture
def user_dataclass():
@dataclasses.dataclass(repr=False)
class User(AvroModel):
name: str
age: int
has_pets: bool
money: float
encoded: bytes
class Meta:
schema_doc = False
return User
@pytest.fixture
def user_dataclass_with_doc():
@dataclasses.dataclass(repr=False)
class User(AvroModel):
name: str
age: int
has_pets: bool
money: float
encoded: bytes
return User
@pytest.fixture
def user_dataclass_with_field_metadata():
@dataclasses.dataclass(repr=False)
class User(AvroModel):
name: str = dataclasses.field(metadata={"classification": "test"})
age: int = dataclasses.field(metadata={"classification": "test"})
has_pets: bool = dataclasses.field(metadata={"classification": "test"})
money: float = dataclasses.field(metadata={"classification": "test"})
encoded: bytes = dataclasses.field(metadata={"classification": "test"})
class Meta:
schema_doc = False
return User
@pytest.fixture
def user_v2_dataclass():
@dataclasses.dataclass(repr=False)
class UserV2(AvroModel):
"A User V2"
name: str
age: int
return UserV2
@pytest.fixture
def user_extra_avro_atributes_dataclass():
@dataclasses.dataclass(repr=False)
class UserAliasesNamespace(AvroModel):
name: str
age: int
def extra_avro_attributes() -> typing.Dict[str, typing.Any]:
return {
"namespace": "test.com.ar/user/v1",
"aliases": ["User", "My favorite User"],
}
return UserAliasesNamespace
@pytest.fixture
def user_advance_dataclass():
class UserAdvance(AvroModel):
name: str
age: int
pets: typing.List[str]
accounts: typing.Dict[str, int]
has_car: bool = False
favorite_colors: types.Enum = types.Enum(["BLUE", "YELLOW", "GREEN"])
country: str = "Argentina"
address: str = None
md5: types.Fixed = types.Fixed(16)
class Meta:
schema_doc = False
return UserAdvance
@pytest.fixture
def user_advance_with_defaults_dataclass():
class UserAdvance(AvroModel):
name: str
age: int
pets: typing.List[str] = dataclasses.field(default_factory=lambda: ["dog", "cat"])
accounts: typing.Dict[str, int] = dataclasses.field(default_factory=lambda: {"key": 1})
has_car: bool = False
favorite_colors: types.Enum = types.Enum(["BLUE", "YELLOW", "GREEN"], default="BLUE")
country: str = "Argentina"
address: str = None
class Meta:
schema_doc = False
return UserAdvance
|
import torch
D_TYPE = torch.float32
DEVICE = torch.device(f"cuda:{torch.cuda.device_count() - 1}" if torch.cuda.is_available() else "cpu")
|
import numpy as np
import torch
def np_softmax(x):
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=-1)
def get_network_weights(network, exclude_norm=False):
"""
Args:
network: torch.nn.Module, neural network, e.g. actor or critic
exclude_norm: True if layers corresponding to norm will be excluded
Returns:
state_dict: dictionary which contains neural network parameters
"""
state_dict = network.state_dict()
if exclude_norm:
state_dict = {
key: value
for key, value in state_dict.items()
if all(x not in key for x in ["norm", "lstm"])
}
state_dict = {key: value.clone() for key, value in state_dict.items()}
return state_dict
def set_network_weights(network, weights, strict=True):
network.load_state_dict(weights, strict=strict)
def set_params_noise(actor, states, noise_delta=0.2, tol=1e-3, max_steps=1000):
"""
Perturbs parameters of the policy represented by the actor network.
Binary search is employed to find the appropriate magnitude of the noise
corresponding to the desired distance measure (noise_delta) between
non-perturbed and perturbed policy.
Args:
actor: torch.nn.Module, neural network which represents actor
states: batch of states to estimate the distance measure between the
non-perturbed and perturbed policy
noise_delta: float, parameter noise threshold value
tol: float, controls the tolerance of binary search
max_steps: maximum number of steps in binary search
"""
if states is None:
return noise_delta
exclude_norm = True
orig_weights = get_network_weights(actor, exclude_norm=exclude_norm)
orig_actions = actor(states)
sigma_min = 0.
sigma_max = 100.
sigma = sigma_max
for step in range(max_steps):
dist = torch.distributions.normal.Normal(0, sigma)
weights = {
key: w.clone() + dist.sample(w.shape)
for key, w in orig_weights.items()
}
set_network_weights(actor, weights, strict=not exclude_norm)
new_actions = actor(states)
dist = (new_actions - orig_actions).pow(2).sum(1).sqrt().mean().item()
dist_mismatch = dist - noise_delta
# the difference between current dist and desired dist is too small
if np.abs(dist_mismatch) < tol:
break
# too big sigma
if dist_mismatch > 0:
sigma_max = sigma
# too small sigma
else:
sigma_min = sigma
sigma = sigma_min + (sigma_max - sigma_min) / 2
return dist
|
from mind_mapper.models import Model
class Annotation(Model):
def __init__(self, **kwargs):
if kwargs:
self.text = kwargs["text"]
else:
self.text = ""
def __str__(self):
return "<annotation>\n" +\
self.serialize_text(self.text) +\
"\n</annotation>\n"
def deserialize(self, xml):
if xml.attrib.keys():
raise AttributeError("Bad XML format!")
self.text = xml.text
__repr__ = __str__
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.fsl.preprocess import FAST
def test_FAST_inputs():
input_map = dict(args=dict(argstr='%s',
),
bias_iters=dict(argstr='-I %d',
),
bias_lowpass=dict(argstr='-l %d',
units='mm',
),
environ=dict(nohash=True,
usedefault=True,
),
hyper=dict(argstr='-H %.2f',
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
img_type=dict(argstr='-t %d',
),
in_files=dict(argstr='%s',
copyfile=False,
mandatory=True,
position=-1,
),
init_seg_smooth=dict(argstr='-f %.3f',
),
init_transform=dict(argstr='-a %s',
),
iters_afterbias=dict(argstr='-O %d',
),
manual_seg=dict(argstr='-s %s',
),
mixel_smooth=dict(argstr='-R %.2f',
),
no_bias=dict(argstr='-N',
),
no_pve=dict(argstr='--nopve',
),
number_classes=dict(argstr='-n %d',
),
other_priors=dict(argstr='-A %s',
),
out_basename=dict(argstr='-o %s',
),
output_biascorrected=dict(argstr='-B',
),
output_biasfield=dict(argstr='-b',
),
output_type=dict(),
probability_maps=dict(argstr='-p',
),
segment_iters=dict(argstr='-W %d',
),
segments=dict(argstr='-g',
),
terminal_output=dict(nohash=True,
),
use_priors=dict(argstr='-P',
),
verbose=dict(argstr='-v',
),
)
inputs = FAST.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_FAST_outputs():
output_map = dict(bias_field=dict(),
mixeltype=dict(),
partial_volume_files=dict(),
partial_volume_map=dict(),
probability_maps=dict(),
restored_image=dict(),
tissue_class_files=dict(),
tissue_class_map=dict(),
)
outputs = FAST.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
import numpy as np
import logging
from timeit import default_timer as timer
from scipy.optimize import fmin_l_bfgs_b, basinhopping
import torch
import torch.nn.functional as F
from v1_metrics import compute_eer
import data_reader.adv_kaldi_io as ako
"""
validation without stochastic search for threshold
important: EER does not need a threshold.
"""
## Get the same logger from main"
logger = logging.getLogger("anti-spoofing")
def validation(args, model, device, train_loader, train_scp, train_utt2label, val_loader, val_scp, val_utt2label):
logger.info("Starting Validation")
train_loss, train_scores = compute_loss(model, device, train_loader)
val_loss, val_scores = compute_loss(model, device, val_loader)
train_preds, train_labels = utt_scores(train_scores, train_scp, train_utt2label)
val_preds, val_labels = utt_scores(val_scores, val_scp, val_utt2label)
train_eer = compute_eer(train_labels, train_preds)
val_eer = compute_eer(val_labels, val_preds)
logger.info('===> Training set: Average loss: {:.4f}\tEER: {:.4f}\n'.format(
train_loss, train_eer))
logger.info('===> Validation set: Average loss: {:.4f}\tEER: {:.4f}\n'.format(
val_loss, val_eer))
return val_loss, val_eer
def utt_scores(scores, scp, utt2label):
"""return predictions and labels per utterance
"""
utt2len = ako.read_key_len(scp)
utt2label = ako.read_key_label(utt2label)
key_list = ako.read_all_key(scp)
preds, labels = [], []
idx = 0
for key in key_list:
frames_per_utt = utt2len[key]
avg_scores = np.average(scores[idx:idx+frames_per_utt])
idx = idx + frames_per_utt
preds.append(avg_scores)
labels.append(utt2label[key])
return np.array(preds), np.array(labels)
def compute_loss(model, device, data_loader):
model.eval()
loss = 0
correct = 0
scores = []
with torch.no_grad():
for data, target in data_loader:
data, target = data.to(device), target.to(device)
target = target.view(-1,1).float()
#output, hidden = model(data, None)
output = model(data)
loss += F.binary_cross_entropy(output, target, size_average=False)
scores.append(output.data.cpu().numpy())
loss /= len(data_loader.dataset) # average loss
scores = np.vstack(scores) # scores per frame
return loss, scores
|
from litepcie.core.endpoint import LitePCIeEndpoint
from litepcie.core.msi import LitePCIeMSI
|
from enum import Enum
from singleton import Singleton
class ActiveLayerIdentifier(Enum):
A = 0
B = 1
class ActiveLayer(metaclass=Singleton):
def __init__(self):
self._active_layer = ActiveLayerIdentifier.A
self._layer_change_events = []
self._activity_events = []
@property
def active_layer(self):
return self._active_layer
@active_layer.setter
def active_layer(self, value: ActiveLayerIdentifier):
for x in self._activity_events:
x(value)
if value != self._active_layer:
print("Layer Change to", value)
for x in self._layer_change_events:
x(value)
self._active_layer = value
def subscribe_to_layer_change(self, callback):
self._layer_change_events.append(callback)
def subscribe_to_activity(self, callback):
self._activity_events.append(callback)
def clear_all_subscriptions(self):
self._layer_change_events.clear()
self._activity_events.clear()
|
#!/usr/bin/env python
# BinBashCord, Discord quote bot based on an IRC quote bot.
# BinBashCord is Copyright 2017-2018 Dylan Morrison based on code
# Copyright 2010 Dylan Morrison
# See LICENSE file for licensing.
# Users: Edit these
TOKEN="" # Discord authentication token for the bot.
CHANNELIDS=[] # List of channel IDs to listen on
RESTRICTADD=False # Restrict adding quotes to certain roles
ROLEIDS=[] # List of role IDs allowed to add quotes.
ALLOWPMS=True # Allow responding to PMs
MAINTAINER="Someone" # Your name/handle/whatever here.
# Users: Do not edit below here
import discord
import asyncio
import os
import sys
import re
from random import choice, randint
# Removed due to discord flood protection. Probably not needed anymore anyway.
#
#def slicestring(input):
# if len(input) < 1994:
# return [input]
# else:
# return list(filter(lambda x: x != '', re.split("(.{1,1994} )", input+" ")))
#
def slicestring(input):
if len(input) < 1994:
return [input]
else:
return [input[0:1994]]
client = discord.Client()
@client.event
async def on_ready():
print('BinBash logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
origin = message.author
dest = message.channel
if ((dest.is_private == False) and (dest.id not in CHANNELIDS)) or ((dest.is_private == True) and (ALLOWPMS == False)):
return
msg = message.content
splitmsg = msg.split(' ')
command = splitmsg[0]
recom = re.match("^!([a-zA-Z0-9]+)bash$", command)
if recom != None:
try:
with open("bashes/" + recom.group(1) + ".txt") as bashfile:
for i, l in enumerate(bashfile):
pass
numlines = i + 1
bashfile.seek(0)
lines = bashfile.readlines()
if (len(splitmsg) == 2) and (re.match("^[0-9]+$", splitmsg[1]) != None) and (int(splitmsg[1]) > 0) and (int(splitmsg[1]) < len(lines)):
linenum = int(splitmsg[1])
if linenum == None:
await client.send_message(dest, 'Malformed command. (Invalid line number *' + splitmsg[1] + '*)')
return
else:
linenum = randint(1, numlines)
myline = lines[linenum - 1]
slicelist = slicestring(myline.rstrip())
await client.send_message(dest, str(linenum) + ". " + slicelist[0])
del slicelist[0]
if slicelist != []:
for tmpline in slicelist:
await client.send_message(dest, tmpline)
except IOError as e:
await client.send_message(dest, 'Sorry, *' + recom.group(1) + 'bash* is not a valid bash file, or another error occurred: IOError #' + str(e.errno) + ' ' + str(e))
if (command == "!addquote") and (len(splitmsg) >= 3) and (re.match("^[a-zA-Z0-9]+$", splitmsg[1]) != None):
if (dest.is_private == True) and (RESTRICTADD == True): # Can't check roles on a private message.
await client.send_message(dest, 'Sorry, you are not authorized to add quotes over PM.')
return
if RESTRICTADD == True:
allowed = False
for role in origin.roles:
if role.id in ROLEIDS:
allowed = True
if allowed == False:
await client.send_message(dest, 'Sorry, you are not authorized to add quotes to the database.')
return
try:
output = open("bashes/" + splitmsg[1] + ".txt", "a")
output.write(" ".join(splitmsg[2:]) + "\n")
output.close()
await client.send_message(dest, 'Quote successfully added to ' + splitmsg[1] + "bash.")
except IOError as e:
await client.send_message(dest, 'IOError adding quote. Is *' + splitmsg[1] + 'bash* a valid bash file? Ask ' + MAINTAINER + '. IOError #' + str(e.errno) + ' ' + str(e))
elif command == "!bashes":
liststring = " ".join(os.listdir("bashes/"))
liststring = re.sub(".txt", "", liststring)
await client.send_message(dest, 'Bashes currently in my list: ' + liststring)
client.run(TOKEN)
|
from keras.optimizers import Optimizer
from keras.legacy import interfaces
from keras import backend as K
class QHAdam(Optimizer):
def __init__(self, lr=0.001, beta_1=0.999, beta_2=0.999, v1=0.7, v2=1., epsilon=1e-3, **kwargs):
super(QHAdam, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.beta_1 = K.variable(beta_1, name='beta_1')
self.beta_2 = K.variable(beta_2, name='beta_2')
self.v1 = K.variable(v1, name='v1')
self.v2 = K.variable(v2, name='v2')
self.epsilon = epsilon
@interfaces.legacy_get_updates_support
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [K.update_add(self.iterations, 1)]
lr = self.lr
t = K.cast(self.iterations, K.floatx()) + 1
ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
vhats = [K.zeros(1) for _ in params]
self.weights = [self.iterations] + ms + vs + vhats
for p, g, m, v, vhat in zip(params, grads, ms, vs, vhats):
m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
m_t_adj = m_t / (1. - K.pow(self.beta_1, t))
v_t_adj = v_t / (1. - K.pow(self.beta_2, t))
a = (1. - self.v1) * g + self.v1 * (m_t_adj)
b = K.sqrt((1. - self.v2) * K.square(g) + self.v2 * v_t_adj) + self.epsilon
p_t = p - lr * a / b
self.updates.append(K.update(m, m_t))
self.updates.append(K.update(v, v_t))
new_p = p_t
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
raise NotImplementedError
new_p = p.constraint(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'beta_1': float(K.get_value(self.beta_1)),
'beta_2': float(K.get_value(self.beta_2)),
'v1': float(K.get_value(self.v1)),
'v2': float(K.get_value(self.v2)),
'epsilon': self.epsilon}
base_config = super(QHAdam, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
# Generated by Django 3.2.2 on 2021-06-03 09:17
import django.db.models.deletion
import i18nfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('pretixbase', '0191_event_last_modified'),
]
operations = [
migrations.CreateModel(
name='Rule',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('subject', i18nfield.fields.I18nCharField(max_length=255)),
('template', i18nfield.fields.I18nTextField()),
('all_products', models.BooleanField(default=True)),
('include_pending', models.BooleanField(default=False)),
('send_date', models.DateTimeField(blank=True, null=True)),
('send_offset_days', models.IntegerField(null=True)),
('send_offset_time', models.TimeField(blank=True, null=True)),
('date_is_absolute', models.BooleanField(default=True)),
('offset_to_event_end', models.BooleanField(default=False)),
('offset_is_after', models.BooleanField(default=False)),
('send_to', models.CharField(default='orders', max_length=10)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sendmail_rules', to='pretixbase.event')),
('limit_products', models.ManyToManyField(to='pretixbase.Item')),
],
),
migrations.CreateModel(
name='ScheduledMail',
fields=[
('id', models.BigAutoField(primary_key=True, serialize=False)),
('last_computed', models.DateTimeField(auto_now_add=True)),
('computed_datetime', models.DateTimeField(db_index=True)),
('state', models.CharField(default='scheduled', max_length=100)),
('last_successful_order_id', models.BigIntegerField(null=True)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pretixbase.event')),
('rule', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sendmail.rule')),
('subevent', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='pretixbase.subevent')),
],
options={
'unique_together': {('rule', 'subevent')},
},
),
]
|
"""
Define a Pet class with their attributes
"""
class Pet(object):
def __init__(self, name, speaks):
self.name = name
self.speaks = speaks
def full_name(self):
return f"{self.name}"
def speaks(self):
print({self.name} + ' speaks')
if __name__ == "__main__":
pet = Pet(name="Dog", speaks="Woof Woof")
print(pet.full_name)
|
a = [10, 45, 67, 89, 34, 91, 15, 18, 34]
n = len(a)
print(a)
max = a[0]
for i in range(1, n):
if (max < a[i]): max = a[i]
print('Max = ', max)
|
from collections import Counter
from itertools import count
from .common import dict_coords_as_str
from .day12 import cmp
from .day8 import iter_chunks
from .day9 import Interpreter
from .intcode import load_program_from_file
from .plane import Coord
EMPTY = 0
WALL = 1
BLOCK = 2
PADDLE = 3
BALL = 4
debug_colors = {
EMPTY: " ",
WALL: "#",
BLOCK: "X",
PADDLE: "_",
BALL: "o",
}
def find_num_blocks(intcode):
computer = Interpreter.run_program(intcode)
tiles = Counter(tile_id for _, _, tile_id in iter_chunks(3, computer))
return tiles[BLOCK]
def play_game(intcode, debug=False):
framebuffer = {}
score = 0
ball = Coord(0, 0)
paddle = Coord(0, 0)
def joystick():
for i in count():
move = cmp(0, paddle.x - ball.x)
if debug:
print(dict_coords_as_str(framebuffer, debug_colors))
print("Iteration:", i)
print("Paddle:", paddle)
print("Ball:", ball)
print("Move:", move)
print("Score:", score)
yield move
# Put the game in free to play mode
intcode = list(intcode)
intcode[0] = 2
computer = Interpreter.run_program(intcode, joystick())
for x, y, tile_id in iter_chunks(3, computer):
if x == -1 and y == 0:
score = tile_id
continue
framebuffer[(x, y)] = tile_id
if tile_id == PADDLE:
paddle = Coord(x, y)
elif tile_id == BALL:
ball = Coord(x, y)
return score
def solve(path):
intcode = load_program_from_file(path)
return (find_num_blocks(intcode), play_game(intcode))
|
"""The hint for this problem directs us to the page source, which contains
a very large comment with a note saying "find rare characters". Send a get
request to the page and construct a BeautifulSoup parser to find the second
comment (the one with mess of characters) then use a dictionary to keep count
of all the characters. Then find the keys with the smallest value associated
with them.
Once we have a dictionary of the characters and the number of times they appear,
we can sort them based on their values and view the results. We'll notice that
the first eight characters in acending order of appearence will spell
'equality'. We'll use this as the name of the next riddle's page.
"""
import requests
import webbrowser
from bs4 import BeautifulSoup, Comment
# Send the request and build the parse tree.
webpage = "http://www.pythonchallenge.com/pc/def/ocr.html"
r = requests.get(webpage)
soup = BeautifulSoup(r.content, "html.parser") # lmxl parser wouldn't find all
# comments.
# Find the second comment.
chars = soup.find_all(string=lambda text:isinstance(text, Comment))[1]
# Map each character to a count of how many times it appears.
counts = {}
for ch in chars:
if ch not in counts:
counts[ch] = 0
counts[ch] += 1
# Print the characters in order of least ocurrences.
# We'll notice that the first eight letters spell equality.
ordered = sorted(counts, key=lambda x:counts[x])
msg = "".join(ordered[:9])
print(msg)
# Lets open a new page who's name is the contents of the secret message.
split_page = webpage.split("ocr")
webbrowser.open(split_page[0] + msg + split_page[1])
|
coordinates_E0E1E1 = ((123, 112),
(123, 114), (123, 115), (123, 116), (123, 117), (123, 118), (123, 119), (123, 120), (123, 121), (123, 122), (123, 123), (123, 124), (123, 126), (124, 112), (124, 127), (125, 112), (125, 114), (125, 115), (125, 116), (125, 117), (125, 118), (125, 119), (125, 120), (125, 121), (125, 122), (125, 123), (125, 124), (125, 125), (125, 127), (126, 80), (126, 82), (126, 112), (126, 114), (126, 115), (126, 116), (126, 117), (126, 118), (126, 119), (126, 120), (126, 121), (126, 122), (126, 123), (126, 124), (126, 125), (126, 126), (126, 128), (127, 80), (127, 82), (127, 112), (127, 114), (127, 115), (127, 116), (127, 117), (127, 118), (127, 119), (127, 120), (127, 121), (127, 122), (127, 123), (127, 124), (127, 125), (127, 126), (127, 128), (128, 79), (128, 81), (128, 83), (128, 106), (128, 107), (128, 112), (128, 114), (128, 115), (128, 116), (128, 117),
(128, 118), (128, 119), (128, 120), (128, 121), (128, 122), (128, 123), (128, 124), (128, 125), (128, 126), (128, 127), (128, 128), (128, 132), (128, 133), (128, 135), (129, 79), (129, 81), (129, 82), (129, 84), (129, 97), (129, 106), (129, 108), (129, 111), (129, 112), (129, 113), (129, 114), (129, 115), (129, 116), (129, 117), (129, 118), (129, 119), (129, 120), (129, 121), (129, 122), (129, 123), (129, 124), (129, 125), (129, 126), (129, 127), (129, 128), (129, 131), (129, 136), (129, 137), (129, 139), (130, 79), (130, 81), (130, 82), (130, 83), (130, 85), (130, 97), (130, 106), (130, 110), (130, 112), (130, 113), (130, 114), (130, 115), (130, 116), (130, 117), (130, 118), (130, 119), (130, 120), (130, 121), (130, 122), (130, 123), (130, 124), (130, 125), (130, 126), (130, 127), (130, 128), (130, 129), (130, 130), (130, 132), (130, 133),
(130, 134), (130, 135), (130, 138), (131, 75), (131, 78), (131, 79), (131, 80), (131, 81), (131, 82), (131, 83), (131, 84), (131, 86), (131, 96), (131, 98), (131, 106), (131, 108), (131, 109), (131, 111), (131, 112), (131, 113), (131, 114), (131, 115), (131, 116), (131, 117), (131, 118), (131, 119), (131, 120), (131, 121), (131, 122), (131, 123), (131, 124), (131, 125), (131, 126), (131, 127), (131, 128), (131, 129), (131, 130), (131, 131), (131, 132), (131, 133), (131, 134), (131, 135), (131, 137), (132, 75), (132, 79), (132, 80), (132, 81), (132, 82), (132, 83), (132, 84), (132, 85), (132, 87), (132, 95), (132, 98), (132, 106), (132, 108), (132, 109), (132, 110), (132, 111), (132, 112), (132, 113), (132, 114), (132, 115), (132, 116), (132, 117), (132, 118), (132, 119), (132, 120), (132, 121), (132, 122), (132, 123), (132, 124),
(132, 125), (132, 126), (132, 127), (132, 128), (132, 129), (132, 130), (132, 131), (132, 132), (132, 133), (132, 134), (132, 136), (133, 76), (133, 78), (133, 79), (133, 80), (133, 81), (133, 82), (133, 83), (133, 84), (133, 85), (133, 86), (133, 88), (133, 93), (133, 96), (133, 97), (133, 99), (133, 106), (133, 108), (133, 109), (133, 110), (133, 111), (133, 112), (133, 113), (133, 114), (133, 115), (133, 116), (133, 117), (133, 118), (133, 119), (133, 120), (133, 121), (133, 122), (133, 123), (133, 124), (133, 125), (133, 126), (133, 127), (133, 128), (133, 129), (133, 130), (133, 131), (133, 132), (133, 133), (133, 134), (133, 136), (134, 77), (134, 79), (134, 80), (134, 81), (134, 82), (134, 83), (134, 84), (134, 85), (134, 86), (134, 87), (134, 89), (134, 91), (134, 92), (134, 95), (134, 96), (134, 97), (134, 98),
(134, 100), (134, 105), (134, 107), (134, 108), (134, 109), (134, 110), (134, 111), (134, 112), (134, 113), (134, 114), (134, 115), (134, 116), (134, 117), (134, 118), (134, 119), (134, 120), (134, 121), (134, 122), (134, 123), (134, 124), (134, 125), (134, 126), (134, 127), (134, 128), (134, 129), (134, 130), (134, 131), (134, 132), (134, 133), (134, 134), (134, 136), (135, 78), (135, 80), (135, 81), (135, 82), (135, 83), (135, 84), (135, 85), (135, 86), (135, 87), (135, 88), (135, 90), (135, 93), (135, 94), (135, 95), (135, 96), (135, 97), (135, 98), (135, 99), (135, 101), (135, 105), (135, 106), (135, 107), (135, 108), (135, 109), (135, 110), (135, 111), (135, 112), (135, 113), (135, 114), (135, 115), (135, 116), (135, 117), (135, 118), (135, 119), (135, 120), (135, 121), (135, 122), (135, 123), (135, 124), (135, 125), (135, 126),
(135, 127), (135, 128), (135, 129), (135, 130), (135, 131), (135, 132), (135, 133), (135, 134), (135, 136), (135, 145), (136, 78), (136, 82), (136, 88), (136, 89), (136, 91), (136, 92), (136, 93), (136, 94), (136, 95), (136, 96), (136, 97), (136, 98), (136, 99), (136, 100), (136, 103), (136, 105), (136, 106), (136, 107), (136, 108), (136, 109), (136, 110), (136, 111), (136, 112), (136, 113), (136, 114), (136, 115), (136, 116), (136, 117), (136, 118), (136, 119), (136, 120), (136, 121), (136, 122), (136, 123), (136, 124), (136, 125), (136, 126), (136, 127), (136, 128), (136, 129), (136, 130), (136, 131), (136, 132), (136, 133), (136, 134), (136, 135), (136, 137), (136, 144), (137, 78), (137, 80), (137, 81), (137, 82), (137, 83), (137, 84), (137, 85), (137, 86), (137, 89), (137, 90), (137, 91), (137, 92), (137, 93), (137, 94),
(137, 95), (137, 96), (137, 97), (137, 98), (137, 99), (137, 100), (137, 101), (137, 102), (137, 104), (137, 105), (137, 106), (137, 107), (137, 108), (137, 109), (137, 110), (137, 111), (137, 112), (137, 113), (137, 114), (137, 115), (137, 116), (137, 117), (137, 118), (137, 119), (137, 120), (137, 121), (137, 122), (137, 123), (137, 124), (137, 125), (137, 126), (137, 127), (137, 128), (137, 129), (137, 130), (137, 131), (137, 132), (137, 133), (137, 134), (137, 135), (137, 136), (137, 139), (137, 140), (137, 142), (138, 77), (138, 78), (138, 88), (138, 90), (138, 91), (138, 92), (138, 93), (138, 94), (138, 95), (138, 96), (138, 97), (138, 98), (138, 99), (138, 100), (138, 101), (138, 102), (138, 103), (138, 104), (138, 105), (138, 106), (138, 107), (138, 108), (138, 109), (138, 110), (138, 111), (138, 112), (138, 113), (138, 114),
(138, 115), (138, 116), (138, 117), (138, 118), (138, 119), (138, 120), (138, 121), (138, 122), (138, 123), (138, 124), (138, 125), (138, 126), (138, 127), (138, 128), (138, 129), (138, 130), (138, 131), (138, 132), (138, 133), (138, 134), (138, 135), (138, 136), (138, 137), (138, 141), (139, 77), (139, 90), (139, 91), (139, 92), (139, 93), (139, 94), (139, 95), (139, 96), (139, 97), (139, 98), (139, 99), (139, 100), (139, 101), (139, 102), (139, 103), (139, 104), (139, 105), (139, 106), (139, 107), (139, 108), (139, 109), (139, 110), (139, 111), (139, 112), (139, 113), (139, 114), (139, 115), (139, 116), (139, 117), (139, 118), (139, 119), (139, 120), (139, 121), (139, 122), (139, 123), (139, 124), (139, 125), (139, 126), (139, 127), (139, 128), (139, 129), (139, 130), (139, 131), (139, 132), (139, 133), (139, 134), (139, 135), (139, 136),
(139, 137), (139, 138), (139, 140), (140, 90), (140, 92), (140, 93), (140, 94), (140, 95), (140, 96), (140, 97), (140, 98), (140, 99), (140, 100), (140, 101), (140, 102), (140, 103), (140, 104), (140, 105), (140, 106), (140, 107), (140, 108), (140, 109), (140, 110), (140, 111), (140, 112), (140, 113), (140, 114), (140, 115), (140, 116), (140, 117), (140, 118), (140, 119), (140, 120), (140, 121), (140, 122), (140, 123), (140, 124), (140, 125), (140, 126), (140, 127), (140, 128), (140, 129), (140, 130), (140, 131), (140, 132), (140, 133), (140, 134), (140, 135), (140, 136), (140, 137), (140, 138), (140, 140), (141, 91), (141, 93), (141, 94), (141, 95), (141, 96), (141, 97), (141, 98), (141, 99), (141, 100), (141, 101), (141, 102), (141, 103), (141, 104), (141, 105), (141, 106), (141, 107), (141, 108), (141, 109), (141, 110), (141, 111),
(141, 112), (141, 113), (141, 114), (141, 115), (141, 116), (141, 117), (141, 118), (141, 119), (141, 120), (141, 121), (141, 122), (141, 123), (141, 124), (141, 125), (141, 126), (141, 127), (141, 128), (141, 129), (141, 130), (141, 131), (141, 132), (141, 133), (141, 134), (141, 135), (141, 136), (141, 137), (141, 138), (141, 140), (142, 91), (142, 93), (142, 94), (142, 95), (142, 96), (142, 97), (142, 98), (142, 99), (142, 100), (142, 101), (142, 102), (142, 103), (142, 104), (142, 105), (142, 106), (142, 107), (142, 108), (142, 109), (142, 110), (142, 111), (142, 112), (142, 113), (142, 114), (142, 115), (142, 116), (142, 117), (142, 118), (142, 119), (142, 120), (142, 121), (142, 122), (142, 123), (142, 124), (142, 125), (142, 126), (142, 127), (142, 128), (142, 129), (142, 130), (142, 131), (142, 132), (142, 133), (142, 134), (142, 140),
(143, 90), (143, 92), (143, 93), (143, 94), (143, 95), (143, 96), (143, 97), (143, 98), (143, 99), (143, 100), (143, 101), (143, 102), (143, 103), (143, 104), (143, 105), (143, 106), (143, 107), (143, 108), (143, 109), (143, 110), (143, 111), (143, 112), (143, 113), (143, 114), (143, 115), (143, 116), (143, 117), (143, 118), (143, 119), (143, 120), (143, 121), (143, 122), (143, 123), (143, 124), (143, 125), (143, 126), (143, 127), (143, 128), (143, 129), (143, 130), (143, 131), (143, 135), (143, 136), (143, 137), (143, 138), (143, 140), (144, 90), (144, 91), (144, 92), (144, 93), (144, 94), (144, 95), (144, 96), (144, 97), (144, 98), (144, 99), (144, 100), (144, 101), (144, 102), (144, 103), (144, 104), (144, 105), (144, 106), (144, 107), (144, 108), (144, 109), (144, 110), (144, 111), (144, 112), (144, 113), (144, 114), (144, 115),
(144, 116), (144, 117), (144, 118), (144, 119), (144, 120), (144, 121), (144, 122), (144, 123), (144, 124), (144, 125), (144, 126), (144, 127), (144, 128), (144, 129), (144, 130), (144, 133), (145, 88), (145, 90), (145, 91), (145, 92), (145, 93), (145, 94), (145, 95), (145, 96), (145, 97), (145, 98), (145, 99), (145, 100), (145, 101), (145, 102), (145, 103), (145, 104), (145, 105), (145, 106), (145, 107), (145, 108), (145, 109), (145, 110), (145, 111), (145, 112), (145, 113), (145, 114), (145, 115), (145, 116), (145, 117), (145, 118), (145, 119), (145, 120), (145, 121), (145, 122), (145, 123), (145, 124), (145, 125), (145, 126), (145, 127), (145, 128), (145, 129), (145, 131), (146, 86), (146, 87), (146, 90), (146, 91), (146, 92), (146, 93), (146, 94), (146, 95), (146, 96), (146, 97), (146, 98), (146, 99), (146, 100), (146, 101),
(146, 102), (146, 103), (146, 104), (146, 105), (146, 106), (146, 107), (146, 108), (146, 109), (146, 110), (146, 111), (146, 112), (146, 113), (146, 114), (146, 115), (146, 116), (146, 117), (146, 118), (146, 119), (146, 120), (146, 121), (146, 122), (146, 124), (146, 125), (146, 126), (146, 127), (146, 128), (146, 130), (147, 83), (147, 85), (147, 88), (147, 89), (147, 90), (147, 91), (147, 92), (147, 93), (147, 94), (147, 95), (147, 96), (147, 97), (147, 98), (147, 99), (147, 100), (147, 101), (147, 102), (147, 103), (147, 104), (147, 105), (147, 106), (147, 107), (147, 108), (147, 109), (147, 110), (147, 111), (147, 112), (147, 113), (147, 114), (147, 115), (147, 116), (147, 117), (147, 118), (147, 119), (147, 120), (147, 123), (147, 126), (147, 127), (147, 129), (148, 82), (148, 86), (148, 87), (148, 88), (148, 89), (148, 90),
(148, 91), (148, 92), (148, 93), (148, 94), (148, 95), (148, 96), (148, 97), (148, 98), (148, 99), (148, 100), (148, 101), (148, 102), (148, 103), (148, 104), (148, 105), (148, 106), (148, 107), (148, 108), (148, 109), (148, 110), (148, 111), (148, 112), (148, 113), (148, 114), (148, 115), (148, 116), (148, 117), (148, 118), (148, 119), (148, 124), (148, 125), (148, 128), (149, 82), (149, 84), (149, 85), (149, 86), (149, 87), (149, 88), (149, 89), (149, 90), (149, 91), (149, 92), (149, 93), (149, 94), (149, 95), (149, 96), (149, 97), (149, 98), (149, 99), (149, 100), (149, 101), (149, 102), (149, 103), (149, 104), (149, 105), (149, 106), (149, 107), (149, 108), (149, 109), (149, 110), (149, 111), (149, 112), (149, 113), (149, 114), (149, 115), (149, 116), (149, 117), (149, 118), (149, 120), (149, 128), (150, 82), (150, 84),
(150, 85), (150, 86), (150, 87), (150, 88), (150, 89), (150, 90), (150, 91), (150, 92), (150, 93), (150, 94), (150, 95), (150, 96), (150, 97), (150, 98), (150, 99), (150, 100), (150, 101), (150, 102), (150, 103), (150, 104), (150, 105), (150, 106), (150, 107), (150, 108), (150, 109), (150, 110), (150, 111), (150, 112), (150, 113), (150, 114), (150, 115), (150, 116), (150, 117), (150, 119), (151, 82), (151, 84), (151, 85), (151, 86), (151, 87), (151, 94), (151, 95), (151, 96), (151, 97), (151, 98), (151, 99), (151, 100), (151, 101), (151, 102), (151, 103), (151, 104), (151, 105), (151, 106), (151, 107), (151, 108), (151, 109), (151, 110), (151, 111), (151, 112), (151, 113), (151, 114), (151, 115), (151, 116), (151, 117), (151, 119), (152, 82), (152, 87), (152, 88), (152, 89), (152, 90), (152, 91), (152, 92), (152, 93),
(152, 95), (152, 96), (152, 97), (152, 98), (152, 99), (152, 100), (152, 101), (152, 102), (152, 103), (152, 104), (152, 105), (152, 106), (152, 107), (152, 113), (152, 114), (152, 115), (152, 116), (152, 118), (152, 119), (153, 83), (153, 85), (153, 86), (153, 94), (153, 96), (153, 97), (153, 98), (153, 99), (153, 100), (153, 101), (153, 102), (153, 103), (153, 104), (153, 108), (153, 109), (153, 110), (153, 111), (153, 112), (153, 114), (153, 115), (153, 116), (153, 118), (154, 95), (154, 97), (154, 98), (154, 101), (154, 102), (154, 103), (154, 106), (154, 107), (154, 113), (154, 115), (154, 116), (154, 117), (154, 119), (155, 96), (155, 100), (155, 102), (155, 103), (155, 104), (155, 114), (155, 116), (155, 117), (155, 119), (156, 96), (156, 98), (156, 102), (156, 103), (156, 115), (156, 117), (156, 118), (156, 120), (157, 96),
(157, 97), (157, 102), (157, 103), (157, 115), (157, 117), (157, 118), (157, 119), (157, 121), (157, 152), (157, 153), (157, 155), (158, 96), (158, 103), (158, 115), (158, 117), (158, 118), (158, 119), (158, 122), (158, 155), (159, 95), (159, 102), (159, 115), (159, 117), (159, 123), (159, 155), (160, 95), (160, 102), (160, 115), (160, 118), (160, 119), (160, 120), (160, 124), (161, 94), (161, 102), (161, 115), (161, 117), (161, 121), (161, 122), (161, 125), (161, 156), (162, 94), (162, 102), (162, 115), (162, 123), (162, 126), (162, 146), (162, 156), (163, 93), (163, 101), (163, 115), (163, 124), (163, 127), (163, 156), (164, 93), (164, 101), (164, 125), (164, 128), (164, 155), (165, 128), (166, 128), )
coordinates_E1E1E1 = ((75, 106),
(76, 107), (77, 108), (78, 109), (78, 144), (79, 96), (79, 110), (79, 113), (79, 144), (80, 96), (80, 97), (80, 110), (80, 114), (81, 96), (81, 98), (81, 111), (81, 115), (82, 96), (82, 98), (82, 111), (82, 113), (82, 146), (83, 96), (83, 98), (83, 111), (83, 113), (83, 114), (83, 117), (83, 147), (83, 148), (84, 97), (84, 99), (84, 111), (84, 112), (84, 114), (84, 115), (84, 119), (84, 148), (85, 97), (85, 99), (85, 112), (85, 114), (85, 115), (85, 116), (85, 119), (85, 149), (86, 97), (86, 99), (86, 112), (86, 114), (86, 115), (86, 116), (86, 118), (87, 97), (87, 99), (87, 112), (87, 114), (87, 115), (87, 117), (88, 97), (88, 99), (88, 112), (88, 114), (88, 116), (89, 97), (89, 100), (89, 112), (89, 115), (90, 96), (90, 98), (90, 100), (90, 112), (90, 114),
(90, 115), (91, 96), (91, 98), (91, 99), (91, 101), (91, 111), (91, 114), (92, 95), (92, 97), (92, 98), (92, 99), (92, 101), (92, 110), (92, 112), (92, 114), (93, 93), (93, 96), (93, 97), (93, 98), (93, 99), (93, 100), (93, 103), (93, 106), (93, 107), (93, 108), (93, 109), (93, 111), (93, 112), (93, 113), (93, 115), (94, 83), (94, 84), (94, 85), (94, 86), (94, 87), (94, 88), (94, 89), (94, 90), (94, 91), (94, 92), (94, 95), (94, 96), (94, 97), (94, 98), (94, 99), (94, 100), (94, 101), (94, 104), (94, 105), (94, 110), (94, 111), (94, 112), (94, 113), (94, 114), (94, 116), (95, 81), (95, 93), (95, 94), (95, 95), (95, 96), (95, 97), (95, 98), (95, 99), (95, 100), (95, 101), (95, 102), (95, 103), (95, 106), (95, 107), (95, 108), (95, 109), (95, 110),
(95, 111), (95, 112), (95, 113), (95, 114), (95, 116), (96, 81), (96, 84), (96, 85), (96, 86), (96, 87), (96, 88), (96, 89), (96, 90), (96, 91), (96, 92), (96, 93), (96, 94), (96, 95), (96, 96), (96, 97), (96, 98), (96, 99), (96, 100), (96, 101), (96, 102), (96, 103), (96, 104), (96, 105), (96, 106), (96, 107), (96, 108), (96, 109), (96, 110), (96, 111), (96, 112), (96, 113), (96, 114), (96, 115), (96, 117), (97, 82), (97, 87), (97, 88), (97, 89), (97, 90), (97, 91), (97, 92), (97, 93), (97, 94), (97, 95), (97, 96), (97, 97), (97, 98), (97, 99), (97, 100), (97, 101), (97, 102), (97, 103), (97, 104), (97, 105), (97, 106), (97, 107), (97, 108), (97, 109), (97, 110), (97, 111), (97, 112), (97, 113), (97, 114), (97, 115), (97, 116), (97, 119), (97, 120),
(97, 121), (97, 122), (97, 123), (97, 124), (97, 126), (98, 84), (98, 85), (98, 86), (98, 90), (98, 91), (98, 92), (98, 93), (98, 94), (98, 95), (98, 96), (98, 97), (98, 98), (98, 99), (98, 100), (98, 101), (98, 102), (98, 103), (98, 104), (98, 105), (98, 106), (98, 107), (98, 108), (98, 109), (98, 110), (98, 111), (98, 112), (98, 113), (98, 114), (98, 115), (98, 116), (98, 117), (98, 127), (99, 87), (99, 88), (99, 89), (99, 91), (99, 92), (99, 93), (99, 94), (99, 95), (99, 96), (99, 97), (99, 98), (99, 99), (99, 100), (99, 101), (99, 102), (99, 103), (99, 104), (99, 105), (99, 106), (99, 107), (99, 108), (99, 109), (99, 110), (99, 111), (99, 112), (99, 113), (99, 114), (99, 115), (99, 116), (99, 117), (99, 118), (99, 119), (99, 120), (99, 121), (99, 122),
(99, 123), (99, 124), (99, 125), (99, 127), (100, 90), (100, 92), (100, 93), (100, 94), (100, 95), (100, 96), (100, 97), (100, 98), (100, 99), (100, 100), (100, 101), (100, 102), (100, 103), (100, 104), (100, 105), (100, 106), (100, 107), (100, 108), (100, 109), (100, 110), (100, 111), (100, 112), (100, 113), (100, 114), (100, 115), (100, 116), (100, 117), (100, 118), (100, 119), (100, 120), (100, 121), (100, 122), (100, 123), (100, 124), (100, 125), (100, 126), (100, 127), (100, 129), (100, 130), (100, 131), (100, 132), (100, 133), (100, 134), (100, 136), (101, 91), (101, 93), (101, 94), (101, 95), (101, 96), (101, 97), (101, 98), (101, 99), (101, 100), (101, 101), (101, 102), (101, 103), (101, 104), (101, 105), (101, 106), (101, 107), (101, 108), (101, 109), (101, 110), (101, 111), (101, 112), (101, 113), (101, 114), (101, 115),
(101, 116), (101, 117), (101, 126), (101, 127), (101, 136), (102, 91), (102, 93), (102, 94), (102, 95), (102, 96), (102, 97), (102, 98), (102, 99), (102, 100), (102, 101), (102, 102), (102, 103), (102, 104), (102, 105), (102, 106), (102, 107), (102, 108), (102, 109), (102, 110), (102, 111), (102, 112), (102, 113), (102, 114), (102, 115), (102, 116), (102, 118), (102, 119), (102, 120), (102, 121), (102, 122), (102, 123), (102, 124), (102, 127), (102, 128), (102, 129), (102, 130), (102, 131), (102, 132), (102, 133), (102, 134), (102, 136), (103, 92), (103, 94), (103, 95), (103, 96), (103, 97), (103, 98), (103, 99), (103, 100), (103, 101), (103, 102), (103, 103), (103, 104), (103, 105), (103, 106), (103, 107), (103, 108), (103, 109), (103, 110), (103, 111), (103, 112), (103, 113), (103, 114), (103, 115), (103, 116), (103, 117), (103, 126),
(103, 133), (103, 134), (103, 136), (104, 91), (104, 93), (104, 94), (104, 95), (104, 96), (104, 97), (104, 98), (104, 99), (104, 100), (104, 101), (104, 102), (104, 103), (104, 104), (104, 105), (104, 106), (104, 107), (104, 108), (104, 109), (104, 110), (104, 111), (104, 112), (104, 113), (104, 114), (104, 116), (104, 127), (104, 129), (104, 132), (104, 134), (104, 135), (104, 137), (105, 90), (105, 91), (105, 92), (105, 93), (105, 94), (105, 95), (105, 96), (105, 97), (105, 98), (105, 101), (105, 102), (105, 103), (105, 104), (105, 105), (105, 106), (105, 107), (105, 108), (105, 109), (105, 110), (105, 111), (105, 112), (105, 113), (105, 114), (105, 116), (105, 133), (105, 134), (105, 135), (105, 136), (105, 138), (106, 90), (106, 92), (106, 93), (106, 94), (106, 95), (106, 96), (106, 99), (106, 100), (106, 102), (106, 103),
(106, 104), (106, 105), (106, 106), (106, 107), (106, 108), (106, 109), (106, 110), (106, 111), (106, 112), (106, 113), (106, 114), (106, 116), (106, 134), (106, 136), (106, 139), (107, 89), (107, 91), (107, 92), (107, 93), (107, 94), (107, 95), (107, 98), (107, 102), (107, 103), (107, 104), (107, 105), (107, 106), (107, 107), (107, 108), (107, 109), (107, 110), (107, 111), (107, 112), (107, 113), (107, 114), (107, 116), (107, 134), (107, 136), (107, 138), (108, 88), (108, 90), (108, 91), (108, 92), (108, 93), (108, 94), (108, 96), (108, 102), (108, 104), (108, 105), (108, 106), (108, 107), (108, 108), (108, 109), (108, 110), (108, 111), (108, 112), (108, 113), (108, 114), (108, 116), (108, 134), (108, 137), (109, 87), (109, 89), (109, 90), (109, 91), (109, 92), (109, 93), (109, 95), (109, 103), (109, 105), (109, 106), (109, 107),
(109, 108), (109, 109), (109, 110), (109, 111), (109, 112), (109, 113), (109, 114), (109, 115), (109, 117), (109, 134), (109, 136), (110, 74), (110, 75), (110, 85), (110, 88), (110, 89), (110, 90), (110, 91), (110, 92), (110, 94), (110, 103), (110, 105), (110, 106), (110, 107), (110, 108), (110, 109), (110, 110), (110, 111), (110, 112), (110, 113), (110, 114), (110, 115), (110, 116), (110, 119), (110, 134), (110, 136), (111, 75), (111, 77), (111, 78), (111, 79), (111, 80), (111, 81), (111, 82), (111, 83), (111, 87), (111, 88), (111, 89), (111, 90), (111, 91), (111, 92), (111, 94), (111, 103), (111, 105), (111, 106), (111, 107), (111, 108), (111, 109), (111, 110), (111, 111), (111, 112), (111, 113), (111, 114), (111, 115), (111, 116), (111, 117), (111, 121), (111, 122), (111, 123), (111, 124), (111, 129), (111, 130), (111, 131),
(111, 132), (111, 133), (111, 134), (111, 136), (112, 75), (112, 79), (112, 84), (112, 85), (112, 86), (112, 87), (112, 88), (112, 89), (112, 90), (112, 91), (112, 93), (112, 103), (112, 105), (112, 106), (112, 107), (112, 108), (112, 109), (112, 110), (112, 111), (112, 112), (112, 113), (112, 114), (112, 115), (112, 116), (112, 117), (112, 118), (112, 119), (112, 125), (112, 126), (112, 127), (112, 128), (112, 134), (112, 136), (113, 75), (113, 77), (113, 78), (113, 79), (113, 80), (113, 81), (113, 82), (113, 83), (113, 84), (113, 89), (113, 90), (113, 91), (113, 93), (113, 103), (113, 112), (113, 113), (113, 114), (113, 115), (113, 116), (113, 117), (113, 118), (113, 119), (113, 120), (113, 121), (113, 122), (113, 123), (113, 124), (113, 129), (113, 130), (113, 131), (113, 136), (114, 75), (114, 77), (114, 78), (114, 79),
(114, 80), (114, 81), (114, 82), (114, 83), (114, 86), (114, 87), (114, 88), (114, 90), (114, 92), (114, 103), (114, 105), (114, 106), (114, 107), (114, 108), (114, 109), (114, 110), (114, 111), (114, 112), (114, 113), (114, 114), (114, 115), (114, 116), (114, 117), (114, 118), (114, 119), (114, 120), (114, 121), (114, 122), (114, 123), (114, 124), (114, 125), (114, 126), (114, 127), (114, 128), (114, 129), (114, 133), (114, 134), (114, 137), (115, 76), (115, 78), (115, 79), (115, 80), (115, 81), (115, 82), (115, 84), (115, 89), (115, 92), (115, 112), (115, 114), (115, 115), (115, 116), (115, 117), (115, 118), (115, 119), (115, 120), (115, 121), (115, 122), (115, 123), (115, 124), (115, 125), (115, 126), (115, 127), (115, 128), (115, 131), (115, 138), (116, 76), (116, 78), (116, 79), (116, 83), (116, 90), (116, 91), (116, 112),
(116, 114), (116, 115), (116, 116), (116, 117), (116, 118), (116, 119), (116, 120), (116, 121), (116, 122), (116, 123), (116, 124), (116, 125), (116, 126), (116, 127), (116, 129), (117, 76), (117, 78), (117, 79), (117, 80), (117, 82), (117, 90), (117, 112), (117, 114), (117, 115), (117, 116), (117, 117), (117, 118), (117, 119), (117, 120), (117, 121), (117, 122), (117, 123), (117, 124), (117, 125), (117, 126), (117, 128), (118, 112), (118, 114), (118, 115), (118, 116), (118, 117), (118, 118), (118, 119), (118, 120), (118, 121), (118, 122), (118, 123), (118, 124), (118, 125), (118, 127), (119, 113), (119, 126), (120, 112), (120, 114), (120, 115), (120, 116), (120, 117), (120, 118), (120, 119), (120, 120), (120, 121), (120, 122), (120, 123), (120, 124), (120, 126), (121, 112), (121, 126), )
coordinates_FEDAB9 = ((128, 69),
(129, 69), (129, 70), (130, 71), (131, 70), (131, 72), (132, 71), (132, 73), (133, 72), (133, 74), (134, 72), (135, 73), (135, 75), (136, 73), (136, 76), (137, 73), (137, 75), (138, 73), (138, 75), (139, 73), (139, 75), (139, 80), (139, 81), (139, 82), (139, 83), (139, 84), (139, 86), (140, 72), (140, 75), (140, 78), (140, 86), (140, 88), (141, 72), (141, 74), (141, 75), (141, 76), (141, 77), (141, 79), (141, 83), (141, 84), (142, 72), (142, 74), (142, 75), (142, 81), (142, 82), (143, 72), (143, 77), (143, 78), (143, 79), (144, 74), (144, 76), )
coordinates_D970D6 = ((124, 74),
(124, 76), (124, 77), (124, 78), (124, 79), (124, 80), (124, 81), (124, 82), (124, 83), (124, 84), (124, 85), (124, 86), (124, 87), (125, 73), (125, 79), (125, 83), (125, 87), (126, 69), (126, 71), (126, 72), (126, 74), (126, 75), (126, 76), (126, 78), (126, 84), (126, 87), (127, 74), (127, 75), (127, 77), (127, 85), (127, 88), (128, 72), (128, 77), (128, 86), (128, 88), (129, 74), (129, 77), (129, 87), (129, 89), (130, 90), (131, 88), (131, 91), (132, 89), (132, 91), )
coordinates_01CED1 = ((142, 88),
(143, 84), (143, 85), (143, 88), (144, 81), (144, 82), (144, 87), (145, 79), (145, 80), (145, 83), (145, 84), (146, 76), (146, 82), (147, 76), (147, 78), (147, 79), (147, 80), (148, 76), (148, 78), (148, 80), (149, 75), (149, 77), (149, 78), (149, 80), (150, 76), (150, 78), (150, 80), (151, 76), (151, 78), (151, 80), (152, 77), (152, 80), (153, 77), (153, 80), (154, 77), (154, 79), (154, 80), (154, 89), (154, 90), (154, 91), (154, 93), (155, 78), (155, 80), (155, 83), (155, 84), (155, 85), (155, 86), (155, 87), (155, 88), (155, 94), (156, 79), (156, 82), (156, 89), (156, 90), (156, 91), (156, 92), (156, 94), (156, 106), (156, 108), (156, 110), (157, 80), (157, 84), (157, 85), (157, 86), (157, 87), (157, 88), (157, 89), (157, 90), (157, 91), (157, 92), (157, 94), (157, 100), (157, 105),
(157, 110), (158, 81), (158, 83), (158, 84), (158, 85), (158, 86), (158, 87), (158, 88), (158, 89), (158, 90), (158, 91), (158, 93), (158, 100), (158, 105), (158, 107), (158, 109), (159, 84), (159, 86), (159, 87), (159, 88), (159, 89), (159, 90), (159, 91), (159, 93), (159, 98), (159, 100), (159, 105), (159, 109), (160, 84), (160, 86), (160, 87), (160, 88), (160, 89), (160, 90), (160, 92), (160, 97), (160, 100), (160, 104), (161, 84), (161, 86), (161, 87), (161, 88), (161, 89), (161, 90), (161, 92), (161, 97), (161, 100), (161, 104), (161, 106), (162, 85), (162, 87), (162, 88), (162, 89), (162, 91), (162, 96), (162, 99), (162, 104), (163, 85), (163, 87), (163, 88), (163, 89), (163, 91), (163, 96), (163, 97), (163, 99), (163, 105), (164, 86), (164, 88), (164, 90), (164, 95), (164, 97), (164, 99),
(164, 103), (165, 87), (165, 89), (165, 91), (165, 94), (165, 96), (165, 97), (165, 99), (165, 104), (166, 88), (166, 90), (166, 91), (166, 92), (166, 95), (166, 96), (166, 97), (166, 98), (166, 99), (166, 100), (166, 101), (166, 104), (167, 89), (167, 91), (167, 97), (167, 98), (167, 99), (167, 100), (167, 103), (168, 90), (168, 95), (168, 98), (168, 99), (168, 100), (168, 103), (169, 91), (169, 93), (169, 97), (169, 102), (170, 100), (171, 98), )
coordinates_FE3E96 = ((123, 91),
(123, 93), (123, 94), (123, 95), (123, 96), (123, 97), (123, 98), (123, 99), (123, 100), (123, 101), (123, 102), (123, 103), (123, 104), (123, 105), (123, 106), (123, 107), (123, 108), (123, 110), (123, 128), (123, 130), (123, 131), (123, 132), (123, 133), (123, 134), (123, 135), (123, 137), (124, 89), (124, 110), (124, 129), (124, 135), (124, 137), (125, 89), (125, 91), (125, 92), (125, 93), (125, 94), (125, 95), (125, 96), (125, 99), (125, 100), (125, 101), (125, 102), (125, 103), (125, 104), (125, 110), (125, 129), (125, 137), (126, 89), (126, 91), (126, 92), (126, 93), (126, 94), (126, 95), (126, 98), (126, 99), (126, 100), (126, 101), (126, 102), (126, 103), (126, 105), (126, 108), (126, 110), (126, 130), (126, 132), (126, 133), (126, 134), (126, 135), (126, 137), (127, 90), (127, 92), (127, 93), (127, 94), (127, 96),
(127, 99), (127, 101), (127, 102), (127, 104), (127, 109), (127, 110), (127, 137), (128, 91), (128, 93), (128, 95), (128, 99), (128, 101), (128, 102), (128, 104), (129, 92), (129, 95), (129, 99), (129, 101), (129, 102), (129, 104), (130, 94), (130, 100), (130, 102), (130, 104), (131, 93), (131, 100), (131, 102), (131, 104), (132, 101), (132, 104), (133, 101), (133, 103), (134, 103), )
coordinates_AF3060 = ((123, 147),
(123, 150), (124, 146), (124, 151), (125, 146), (125, 148), (126, 145), (126, 149), (126, 151), (127, 144), (127, 148), (128, 143), (128, 146), (129, 141), (129, 145), (130, 140), (130, 142), (130, 144), (131, 139), (131, 141), )
coordinates_ACFF2F = ((128, 150),
(129, 148), (129, 150), (130, 147), (130, 150), (131, 145), (131, 148), (131, 150), (132, 143), (132, 144), (132, 147), (132, 148), (132, 150), (133, 139), (133, 141), (133, 142), (133, 145), (133, 146), (133, 147), (133, 148), (133, 150), (134, 138), (134, 142), (134, 143), (134, 147), (134, 149), (135, 139), (135, 141), (135, 147), (135, 149), (136, 147), (136, 148), (137, 146), (137, 148), (138, 144), (139, 142), (139, 147), (140, 146), )
coordinates_FFDAB9 = ((104, 72),
(104, 73), (105, 71), (105, 75), (105, 84), (105, 86), (105, 88), (106, 71), (106, 73), (106, 77), (106, 84), (106, 88), (107, 70), (107, 72), (107, 78), (107, 83), (107, 87), (108, 69), (108, 71), (108, 72), (108, 73), (108, 74), (108, 75), (108, 80), (108, 81), (108, 85), (109, 69), (109, 72), (109, 77), (109, 79), (109, 80), (109, 81), (109, 83), (110, 69), (110, 72), (111, 69), (111, 72), (112, 69), (112, 71), (112, 73), (113, 70), (113, 73), (114, 70), (114, 73), (115, 71), (115, 73), (116, 71), (116, 74), (117, 71), (117, 74), (118, 71), (118, 72), (119, 70), )
coordinates_DA70D6 = ((116, 86),
(117, 85), (118, 85), (119, 74), (119, 76), (119, 77), (119, 78), (119, 79), (119, 80), (119, 83), (119, 85), (120, 71), (120, 85), (121, 72), (121, 76), (121, 77), (121, 78), (121, 79), (121, 80), (121, 81), (121, 82), (121, 83), (121, 85), (122, 73), )
coordinates_00CED1 = ((73, 97),
(74, 95), (74, 99), (75, 92), (75, 94), (75, 97), (75, 99), (76, 91), (76, 94), (76, 98), (76, 100), (77, 91), (77, 93), (77, 94), (77, 95), (77, 96), (77, 100), (78, 91), (78, 94), (78, 98), (78, 100), (79, 87), (79, 91), (79, 92), (79, 94), (79, 100), (80, 86), (80, 89), (80, 91), (80, 92), (80, 94), (80, 99), (80, 101), (81, 85), (81, 87), (81, 88), (81, 90), (81, 91), (81, 92), (81, 94), (81, 100), (82, 84), (82, 86), (82, 87), (82, 88), (82, 89), (82, 90), (82, 91), (82, 92), (82, 94), (82, 100), (82, 103), (83, 83), (83, 85), (83, 86), (83, 87), (83, 88), (83, 89), (83, 90), (83, 91), (83, 92), (83, 94), (83, 101), (83, 104), (84, 82), (84, 84), (84, 85), (84, 86), (84, 87), (84, 88), (84, 89), (84, 90), (84, 91),
(84, 92), (84, 94), (84, 101), (84, 103), (84, 105), (85, 84), (85, 85), (85, 86), (85, 87), (85, 88), (85, 89), (85, 90), (85, 91), (85, 92), (85, 94), (85, 101), (85, 103), (85, 105), (86, 81), (86, 83), (86, 84), (86, 85), (86, 86), (86, 87), (86, 88), (86, 89), (86, 90), (86, 91), (86, 92), (86, 94), (86, 101), (86, 103), (86, 104), (86, 106), (87, 80), (87, 82), (87, 83), (87, 84), (87, 85), (87, 86), (87, 87), (87, 88), (87, 89), (87, 90), (87, 91), (87, 92), (87, 94), (87, 101), (87, 103), (87, 104), (87, 106), (88, 79), (88, 81), (88, 82), (88, 83), (88, 84), (88, 85), (88, 86), (88, 87), (88, 88), (88, 89), (88, 90), (88, 91), (88, 92), (88, 94), (88, 102), (88, 104), (88, 105), (88, 107), (89, 78), (89, 80), (89, 81),
(89, 82), (89, 83), (89, 84), (89, 85), (89, 86), (89, 87), (89, 88), (89, 89), (89, 90), (89, 91), (89, 92), (89, 94), (89, 102), (89, 104), (89, 105), (89, 107), (90, 77), (90, 79), (90, 80), (90, 81), (90, 82), (90, 83), (90, 84), (90, 85), (90, 86), (90, 87), (90, 88), (90, 89), (90, 90), (90, 91), (90, 94), (90, 102), (91, 77), (91, 79), (91, 80), (91, 81), (91, 93), (91, 103), (91, 105), (91, 106), (91, 108), (92, 76), (92, 78), (92, 79), (92, 82), (92, 83), (92, 84), (92, 85), (92, 86), (92, 87), (92, 88), (92, 89), (92, 90), (92, 91), (93, 76), (93, 78), (93, 79), (93, 81), (94, 75), (94, 77), (94, 79), (95, 75), (95, 77), (95, 79), (96, 75), (96, 78), (96, 79), (97, 76), (97, 79), (98, 76), (98, 78), (98, 80),
(99, 76), (99, 78), (99, 79), (99, 82), (100, 77), (100, 79), (100, 80), (100, 83), (100, 84), (100, 85), (101, 77), (101, 79), (101, 80), (101, 81), (101, 82), (101, 86), (101, 87), (101, 88), (101, 89), (102, 78), (102, 83), (102, 89), (103, 79), (103, 81), (103, 84), (103, 85), (103, 86), (103, 87), (103, 89), (104, 83), )
coordinates_A120F0 = ((123, 139),
(123, 141), (123, 142), (123, 144), (124, 139), (124, 144), (125, 139), (125, 141), (125, 144), (126, 139), (126, 143), (127, 139), (127, 141), )
coordinates_ADFF2F = ((101, 146),
(102, 144), (102, 147), (103, 138), (103, 140), (103, 141), (103, 142), (103, 143), (103, 147), (104, 139), (104, 144), (104, 145), (104, 146), (104, 148), (105, 140), (105, 142), (105, 143), (105, 144), (105, 145), (105, 146), (105, 148), (106, 141), (106, 144), (106, 146), (107, 141), (107, 144), (107, 145), (108, 140), (108, 142), (108, 143), (108, 144), (108, 146), (109, 139), (109, 141), (109, 144), (109, 146), (110, 140), (110, 143), (110, 147), (111, 142), (111, 145), (111, 147), (112, 143), )
coordinates_A020F0 = ((118, 143),
(119, 143), (119, 145), (120, 144), (120, 145), )
coordinates_B03060 = ((111, 138),
(112, 138), (112, 140), (113, 139), (113, 141), (114, 140), (114, 143), (114, 144), (114, 145), (115, 142), (115, 148), (116, 143), (116, 150), (117, 146), (117, 148), (117, 151), (118, 147), (118, 149), (118, 151), (119, 151), (120, 148), (120, 151), )
coordinates_ACD8E6 = ((86, 137),
(86, 138), (87, 137), (87, 138), (88, 137), (88, 139), (89, 136), (89, 139), (90, 136), (90, 138), (90, 140), (91, 136), (91, 138), (91, 139), (92, 136), (92, 138), (92, 141), (93, 136), (93, 140), (94, 136), (94, 138), )
coordinates_FF3E96 = ((108, 100),
(109, 98), (109, 100), (109, 101), (110, 97), (110, 101), (111, 96), (111, 98), (111, 99), (111, 101), (112, 95), (112, 97), (112, 98), (112, 99), (112, 101), (113, 95), (113, 97), (113, 98), (113, 99), (113, 101), (114, 94), (114, 96), (114, 97), (114, 98), (114, 99), (114, 101), (115, 94), (115, 96), (115, 97), (115, 98), (115, 99), (115, 100), (115, 101), (116, 88), (116, 93), (116, 95), (116, 96), (116, 97), (116, 98), (116, 99), (116, 100), (116, 101), (116, 102), (116, 103), (116, 104), (116, 107), (116, 108), (116, 110), (116, 133), (116, 136), (117, 88), (117, 93), (117, 95), (117, 96), (117, 97), (117, 98), (117, 99), (117, 100), (117, 101), (117, 102), (117, 105), (117, 106), (117, 110), (117, 131), (117, 137), (118, 87), (118, 89), (118, 92), (118, 94), (118, 95), (118, 96), (118, 97), (118, 98),
(118, 99), (118, 100), (118, 101), (118, 102), (118, 103), (118, 107), (118, 108), (118, 110), (118, 130), (118, 133), (118, 134), (118, 135), (118, 136), (118, 140), (119, 87), (119, 90), (119, 104), (119, 111), (119, 129), (119, 131), (119, 132), (119, 133), (119, 134), (119, 135), (119, 136), (119, 137), (119, 139), (119, 141), (120, 88), (120, 92), (120, 93), (120, 94), (120, 95), (120, 96), (120, 97), (120, 98), (120, 99), (120, 100), (120, 101), (120, 102), (120, 103), (120, 106), (120, 107), (120, 108), (120, 110), (120, 128), (121, 89), (121, 90), (121, 102), (121, 106), (121, 110), (121, 130), (121, 131), (121, 132), (121, 133), (121, 134), (121, 135), (121, 136), (121, 137), (121, 138), (121, 139), (121, 140), (121, 142), )
coordinates_7FFFD4 = ((69, 113),
(70, 113), (70, 116), (70, 117), (70, 118), (70, 119), (70, 120), (70, 122), (71, 114), (71, 116), (71, 124), (72, 118), (72, 120), (72, 121), (72, 122), (72, 126), (73, 120), (73, 122), (73, 123), (73, 124), (73, 127), (73, 128), (74, 120), (74, 122), (74, 123), (74, 124), (74, 125), (74, 126), (74, 129), (75, 121), (75, 123), (75, 124), (75, 125), (75, 126), (75, 127), (75, 128), (76, 121), (76, 123), (76, 124), (76, 125), (76, 126), (76, 128), (77, 122), (77, 125), (77, 126), (77, 128), (78, 123), (78, 127), (79, 126), )
coordinates_499B3C = ((141, 142),
(142, 142), (142, 144), (142, 145), (143, 142), (143, 145), (144, 142), (144, 145), (145, 139), (145, 141), (145, 142), (145, 143), (145, 145), (146, 138), (146, 142), (146, 143), (146, 145), (147, 138), (147, 140), (147, 141), (147, 142), (147, 143), (147, 145), (148, 137), (148, 139), (148, 140), (148, 141), (148, 142), (148, 144), (149, 137), (149, 139), (149, 140), (149, 141), (149, 142), (149, 144), (150, 136), (150, 138), (150, 139), (150, 140), (150, 141), (150, 142), (150, 144), (151, 136), (151, 139), (151, 140), (151, 141), (151, 143), (152, 136), (152, 138), (152, 143), (153, 137), (153, 139), (153, 140), (153, 142), )
coordinates_CC3E4E = ()
coordinates_633263 = ((157, 112),
(157, 113), (158, 112), (158, 113), (159, 111), (159, 113), (160, 111), (160, 113), (161, 111), (161, 113), (162, 111), (162, 113), (162, 118), (163, 111), (163, 113), (163, 117), (163, 119), (163, 121), (164, 111), (164, 113), (164, 116), (164, 119), (164, 123), (165, 111), (165, 113), (165, 115), (165, 117), (165, 118), (165, 119), (165, 120), (165, 121), (165, 124), (166, 111), (166, 113), (166, 116), (166, 117), (166, 120), (166, 121), (166, 122), (166, 123), (166, 125), (167, 112), (167, 114), (167, 115), (167, 118), (167, 119), (167, 122), (167, 123), (167, 124), (167, 126), (167, 130), (168, 112), (168, 116), (168, 121), (168, 124), (168, 125), (168, 128), (168, 131), (168, 132), (168, 134), (169, 113), (169, 115), (169, 122), (169, 126), (169, 130), (169, 133), (170, 124), (170, 132), (171, 127), (171, 128), (171, 130), )
coordinates_4A9B3C = ((93, 143),
(94, 141), (94, 143), (95, 139), (95, 140), (95, 144), (96, 137), (96, 141), (96, 142), (96, 144), (97, 140), (97, 141), (97, 142), (97, 143), (97, 145), (98, 138), (98, 140), (98, 141), (98, 142), (98, 143), (98, 145), (99, 138), (99, 140), (99, 141), (99, 142), (99, 143), (99, 145), (100, 138), (100, 144), (101, 138), (101, 140), (101, 141), (101, 142), )
coordinates_218B22 = ((147, 155),
(147, 156), (148, 154), (148, 158), (148, 159), (148, 160), (148, 161), (148, 162), (148, 164), (149, 154), (149, 156), (149, 164), (150, 154), (150, 156), (150, 157), (150, 158), (150, 159), (150, 160), (150, 161), (150, 162), (150, 164), (151, 154), (151, 156), (151, 157), (151, 158), (151, 159), (151, 160), (151, 161), (151, 163), (152, 154), (152, 156), (152, 157), (152, 158), (152, 159), (152, 160), (152, 162), (153, 153), (153, 154), (153, 155), (153, 156), (153, 157), (153, 158), (153, 159), (153, 161), (154, 148), (154, 150), (154, 151), (154, 154), (154, 155), (154, 156), (154, 157), (154, 158), (154, 159), (154, 161), (155, 147), (155, 153), (155, 154), (155, 155), (155, 156), (155, 157), (155, 158), (155, 159), (155, 161), (156, 146), (156, 148), (156, 149), (156, 150), (156, 151), (156, 152), (156, 153), (156, 154), (156, 155), (156, 156),
(156, 157), (156, 158), (156, 159), (156, 161), (157, 145), (157, 147), (157, 148), (157, 149), (157, 150), (157, 151), (157, 152), (157, 153), (157, 154), (157, 155), (157, 156), (157, 157), (157, 158), (157, 159), (157, 161), (158, 144), (158, 146), (158, 147), (158, 148), (158, 149), (158, 150), (158, 151), (158, 152), (158, 153), (158, 154), (158, 155), (158, 156), (158, 157), (158, 158), (158, 159), (158, 160), (158, 161), (158, 162), (159, 144), (159, 146), (159, 147), (159, 148), (159, 149), (159, 150), (159, 151), (159, 152), (159, 153), (159, 154), (159, 155), (159, 156), (159, 157), (159, 158), (159, 159), (159, 160), (159, 162), (160, 143), (160, 145), (160, 146), (160, 147), (160, 148), (160, 149), (160, 150), (160, 151), (160, 152), (160, 153), (160, 154), (160, 155), (160, 156), (160, 157), (160, 158), (160, 159), (160, 160), (160, 162),
(161, 142), (161, 144), (161, 145), (161, 146), (161, 147), (161, 148), (161, 149), (161, 150), (161, 151), (161, 152), (161, 153), (161, 154), (161, 155), (161, 156), (161, 157), (161, 158), (161, 159), (161, 161), (161, 162), (162, 143), (162, 144), (162, 145), (162, 146), (162, 147), (162, 148), (162, 149), (162, 150), (162, 151), (162, 152), (162, 153), (162, 154), (162, 155), (162, 156), (162, 157), (162, 158), (162, 159), (162, 161), (163, 141), (163, 143), (163, 144), (163, 145), (163, 146), (163, 147), (163, 148), (163, 149), (163, 150), (163, 151), (163, 152), (163, 153), (163, 154), (163, 155), (163, 156), (163, 157), (163, 158), (163, 159), (163, 161), (164, 141), (164, 142), (164, 143), (164, 144), (164, 145), (164, 146), (164, 147), (164, 148), (164, 149), (164, 150), (164, 151), (164, 152), (164, 153), (164, 154), (164, 155), (164, 156),
(164, 157), (164, 158), (164, 159), (164, 161), (165, 140), (165, 142), (165, 143), (165, 144), (165, 145), (165, 146), (165, 147), (165, 148), (165, 149), (165, 150), (165, 151), (165, 152), (165, 153), (165, 154), (165, 155), (165, 156), (165, 157), (165, 158), (165, 160), (166, 139), (166, 141), (166, 142), (166, 143), (166, 144), (166, 145), (166, 146), (166, 147), (166, 148), (166, 149), (166, 150), (166, 151), (166, 152), (166, 153), (166, 154), (166, 155), (166, 156), (166, 157), (166, 158), (166, 160), (167, 139), (167, 141), (167, 142), (167, 143), (167, 144), (167, 145), (167, 146), (167, 149), (167, 150), (167, 151), (167, 152), (167, 153), (167, 154), (167, 157), (167, 159), (168, 138), (168, 140), (168, 141), (168, 142), (168, 143), (168, 144), (168, 145), (168, 148), (168, 150), (168, 151), (168, 152), (168, 153), (168, 155), (168, 156),
(168, 159), (169, 138), (169, 140), (169, 141), (169, 142), (169, 143), (169, 144), (169, 146), (169, 149), (169, 151), (169, 152), (169, 153), (169, 157), (169, 158), (170, 138), (170, 150), (170, 153), (171, 139), (171, 141), (171, 142), (171, 143), (171, 145), (171, 153), (172, 151), (172, 152), )
coordinates_228B22 = ((71, 140),
(72, 139), (72, 141), (72, 142), (72, 143), (72, 144), (72, 146), (73, 138), (73, 140), (74, 137), (74, 139), (74, 140), (74, 141), (74, 142), (74, 143), (74, 144), (74, 145), (74, 147), (75, 137), (75, 139), (75, 140), (75, 141), (75, 142), (75, 143), (75, 144), (75, 145), (75, 146), (75, 148), (76, 137), (76, 139), (76, 140), (76, 141), (76, 142), (76, 143), (76, 144), (76, 145), (76, 146), (76, 147), (76, 152), (76, 153), (76, 154), (76, 155), (77, 138), (77, 140), (77, 141), (77, 142), (77, 143), (77, 144), (77, 145), (77, 146), (77, 147), (77, 148), (77, 150), (77, 155), (77, 156), (77, 158), (78, 139), (78, 141), (78, 142), (78, 143), (78, 144), (78, 145), (78, 146), (78, 147), (78, 148), (78, 149), (78, 150), (78, 151), (78, 152), (78, 153), (78, 154), (78, 155), (78, 158),
(79, 140), (79, 142), (79, 143), (79, 144), (79, 145), (79, 146), (79, 147), (79, 148), (79, 149), (79, 150), (79, 151), (79, 152), (79, 153), (79, 154), (79, 155), (79, 156), (79, 157), (79, 159), (80, 140), (80, 142), (80, 143), (80, 144), (80, 145), (80, 146), (80, 147), (80, 148), (80, 149), (80, 150), (80, 151), (80, 152), (80, 153), (80, 154), (80, 155), (80, 156), (80, 157), (80, 159), (81, 141), (81, 143), (81, 144), (81, 145), (81, 146), (81, 147), (81, 148), (81, 149), (81, 150), (81, 151), (81, 152), (81, 153), (81, 154), (81, 155), (81, 156), (81, 157), (81, 158), (81, 159), (82, 141), (82, 143), (82, 144), (82, 145), (82, 146), (82, 147), (82, 148), (82, 149), (82, 150), (82, 151), (82, 152), (82, 153), (82, 154), (82, 155), (82, 156), (82, 157), (82, 158), (82, 159),
(83, 142), (83, 144), (83, 145), (83, 146), (83, 147), (83, 148), (83, 149), (83, 150), (83, 151), (83, 152), (83, 153), (83, 154), (83, 155), (83, 156), (83, 157), (83, 158), (83, 159), (83, 162), (84, 142), (84, 144), (84, 145), (84, 146), (84, 147), (84, 148), (84, 149), (84, 150), (84, 151), (84, 152), (84, 153), (84, 154), (84, 155), (84, 156), (84, 157), (84, 158), (84, 159), (84, 160), (84, 163), (85, 143), (85, 145), (85, 146), (85, 147), (85, 148), (85, 149), (85, 150), (85, 151), (85, 152), (85, 153), (85, 154), (85, 155), (85, 156), (85, 157), (85, 158), (85, 159), (85, 160), (85, 161), (85, 162), (85, 164), (86, 144), (86, 146), (86, 147), (86, 148), (86, 149), (86, 150), (86, 151), (86, 152), (86, 153), (86, 154), (86, 155), (86, 156), (86, 157), (86, 158), (86, 159),
(86, 160), (86, 161), (86, 162), (86, 164), (87, 145), (87, 149), (87, 150), (87, 151), (87, 152), (87, 153), (87, 154), (87, 155), (87, 156), (87, 157), (87, 158), (87, 159), (87, 160), (87, 161), (87, 162), (87, 164), (88, 146), (88, 148), (88, 149), (88, 150), (88, 151), (88, 152), (88, 153), (88, 154), (88, 155), (88, 156), (88, 157), (88, 158), (88, 159), (88, 160), (88, 161), (88, 163), (89, 149), (89, 151), (89, 152), (89, 153), (89, 154), (89, 155), (89, 156), (89, 157), (89, 158), (89, 159), (89, 160), (89, 161), (89, 163), (90, 150), (90, 152), (90, 153), (90, 154), (90, 155), (90, 156), (90, 157), (90, 158), (90, 159), (90, 160), (90, 161), (90, 162), (90, 163), (91, 150), (91, 152), (91, 153), (91, 154), (91, 155), (91, 156), (91, 157), (91, 158), (91, 159), (91, 160),
(91, 162), (92, 150), (92, 152), (92, 153), (92, 154), (92, 155), (92, 156), (92, 157), (92, 158), (92, 159), (92, 160), (92, 162), (93, 150), (93, 152), (93, 153), (93, 154), (93, 155), (93, 156), (93, 157), (93, 158), (93, 159), (93, 160), (93, 162), (94, 151), (94, 153), (94, 154), (94, 155), (94, 156), (94, 157), (94, 158), (94, 159), (94, 160), (94, 162), (95, 151), (95, 153), (95, 154), (95, 155), (95, 156), (95, 157), (95, 158), (95, 159), (95, 160), (95, 162), (96, 152), (96, 154), (96, 155), (96, 156), (96, 157), (96, 158), (96, 159), (96, 162), (97, 152), (97, 154), (97, 155), (97, 156), (97, 157), (97, 158), (97, 159), (97, 161), (98, 152), (98, 154), (98, 155), (98, 156), (98, 157), (98, 158), (98, 160), (99, 153), (99, 156), (99, 157), (99, 159), (100, 154), (100, 159),
(101, 156), (101, 159), )
coordinates_771286 = ((131, 120),
(131, 121), (132, 119), (132, 120), (133, 118), (133, 119), (134, 117), (135, 116), )
coordinates_DCA0DD = ((154, 121),
(155, 121), (155, 124), (156, 122), (156, 125), (157, 123), (157, 127), (158, 124), (158, 128), (159, 125), (159, 129), (160, 130), (161, 128), (161, 131), (162, 129), (162, 130), (162, 133), (162, 136), (163, 129), (163, 131), (163, 134), (163, 136), (164, 130), (164, 133), (164, 136), (165, 130), (165, 132), (165, 135), (166, 133), (166, 135), )
coordinates_9F522D = ((145, 137),
(146, 133), (146, 136), (147, 131), (147, 132), (147, 136), (148, 133), (148, 135), (149, 130), (149, 132), (149, 133), (149, 135), (150, 122), (150, 124), (150, 125), (150, 130), (150, 132), (150, 134), (151, 121), (151, 126), (151, 127), (151, 128), (151, 130), (151, 131), (151, 132), (151, 134), (152, 121), (152, 123), (152, 130), (152, 131), (152, 132), (152, 134), (153, 124), (153, 127), (153, 128), (153, 129), (153, 130), (153, 131), (153, 132), (153, 134), (154, 126), (154, 128), (154, 129), (154, 130), (154, 131), (154, 132), (154, 133), (154, 134), (154, 135), (155, 127), (155, 130), (155, 131), (155, 132), (155, 133), (155, 134), (155, 137), (155, 138), (155, 140), (156, 128), (156, 131), (156, 132), (156, 133), (156, 134), (156, 135), (156, 140), (157, 129), (157, 132), (157, 133), (157, 134), (157, 135), (157, 136), (157, 137), (157, 138),
(157, 140), (158, 130), (158, 133), (158, 134), (158, 135), (158, 136), (158, 137), (158, 140), (159, 132), (159, 139), (160, 133), (160, 135), (160, 136), (160, 138), (161, 137), )
coordinates_DDA0DD = ((76, 130),
(76, 131), (77, 130), (77, 132), (78, 129), (78, 131), (78, 133), (79, 129), (79, 131), (79, 133), (80, 128), (80, 130), (80, 131), (80, 133), (81, 127), (81, 129), (81, 130), (81, 131), (81, 133), (82, 126), (82, 128), (82, 129), (82, 130), (83, 123), (83, 124), (83, 127), (83, 128), (83, 129), (83, 132), (84, 121), (84, 126), (84, 127), (84, 128), (84, 130), (85, 121), (85, 123), (85, 124), (85, 125), (85, 126), (85, 127), (85, 129), (86, 120), (86, 122), (86, 123), (86, 124), (86, 125), (86, 128), (87, 119), (87, 121), (87, 122), (87, 123), (87, 124), (87, 127), (88, 118), (88, 125), (89, 121), (89, 122), (89, 124), (90, 117), (90, 119), (91, 118), (92, 117), )
coordinates_A0522D = ((85, 132),
(85, 135), (86, 131), (86, 135), (87, 130), (87, 132), (87, 134), (88, 129), (88, 131), (88, 132), (88, 134), (89, 129), (89, 131), (89, 132), (89, 134), (90, 128), (90, 130), (90, 131), (90, 132), (90, 134), (91, 127), (91, 129), (91, 130), (91, 131), (91, 132), (91, 134), (92, 120), (92, 122), (92, 123), (92, 124), (92, 125), (92, 128), (92, 129), (92, 130), (92, 131), (92, 132), (92, 134), (93, 118), (93, 127), (93, 128), (93, 129), (93, 130), (93, 131), (93, 132), (93, 134), (94, 118), (94, 128), (94, 129), (94, 130), (94, 131), (94, 132), (94, 134), (95, 119), (95, 121), (95, 122), (95, 123), (95, 124), (95, 125), (95, 126), (95, 129), (95, 130), (95, 131), (95, 132), (95, 134), (96, 128), (96, 130), (96, 131), (96, 132), (96, 133), (96, 135), (97, 128), (97, 135), (98, 129),
(98, 131), (98, 132), (98, 133), (98, 134), (98, 136), )
coordinates_781286 = ()
coordinates_79BADC = ((132, 123),
(132, 125), (132, 126), (132, 127), (132, 129), (133, 122), (133, 131), (134, 121), (134, 123), (134, 124), (134, 125), (134, 126), (134, 127), (134, 128), (134, 129), (134, 132), (135, 119), (135, 122), (135, 123), (135, 124), (135, 125), (135, 126), (135, 127), (135, 128), (135, 129), (135, 130), (135, 131), (135, 133), (136, 118), (136, 121), (136, 122), (136, 123), (136, 124), (136, 125), (136, 126), (136, 127), (136, 128), (136, 129), (136, 130), (136, 131), (136, 133), (137, 117), (137, 119), (137, 120), (137, 121), (137, 122), (137, 123), (137, 124), (137, 125), (137, 129), (137, 130), (137, 131), (137, 133), (138, 117), (138, 119), (138, 120), (138, 121), (138, 122), (138, 127), (138, 128), (138, 133), (139, 118), (139, 123), (139, 125), (139, 129), (139, 131), (139, 133), (140, 119), (140, 121), (140, 122), )
coordinates_ED0000 = ((161, 109),
(162, 108), (162, 109), (163, 107), (163, 109), (164, 107), (164, 109), (165, 106), (165, 109), (166, 106), (166, 109), (167, 106), (167, 109), (167, 110), (168, 105), (168, 107), (168, 108), (168, 110), (169, 105), (169, 107), (169, 108), (169, 109), (169, 111), (169, 118), (169, 119), (170, 104), (170, 106), (170, 107), (170, 108), (170, 109), (170, 110), (170, 117), (170, 120), (170, 121), (171, 103), (171, 105), (171, 106), (171, 107), (171, 108), (171, 109), (171, 110), (171, 113), (171, 114), (171, 115), (171, 118), (171, 119), (171, 122), (172, 103), (172, 105), (172, 106), (172, 107), (172, 108), (172, 109), (172, 110), (172, 111), (172, 117), (172, 118), (172, 119), (172, 120), (172, 121), (172, 124), (172, 125), (173, 104), (173, 105), (173, 106), (173, 107), (173, 108), (173, 109), (173, 110), (173, 111), (173, 112), (173, 113), (173, 114),
(173, 115), (173, 116), (173, 117), (173, 118), (173, 119), (173, 120), (173, 121), (173, 122), (173, 123), (173, 127), (174, 104), (174, 106), (174, 107), (174, 108), (174, 109), (174, 110), (174, 111), (174, 112), (174, 113), (174, 114), (174, 115), (174, 116), (174, 117), (174, 118), (174, 119), (174, 120), (174, 121), (174, 122), (174, 126), (175, 104), (175, 110), (175, 111), (175, 112), (175, 113), (175, 114), (175, 115), (175, 116), (175, 117), (175, 118), (175, 119), (175, 120), (175, 124), (176, 106), (176, 108), (176, 109), (176, 122), (177, 111), (177, 112), (177, 113), (177, 114), (177, 115), (177, 116), (177, 117), (177, 118), (177, 120), )
coordinates_7ABADC = ((104, 118),
(104, 120), (104, 121), (104, 122), (104, 124), (105, 118), (105, 125), (106, 118), (106, 120), (106, 121), (106, 122), (106, 123), (106, 124), (106, 127), (106, 129), (106, 132), (107, 118), (107, 121), (107, 122), (107, 123), (107, 124), (107, 125), (107, 132), (108, 119), (108, 126), (108, 127), (108, 132), (109, 121), (109, 123), (109, 124), (109, 125), (109, 126), (109, 127), (109, 128), (109, 129), (109, 130), (109, 132), (110, 126), (110, 127), )
coordinates_633264 = ((70, 103),
(70, 105), (70, 107), (71, 101), (71, 102), (71, 107), (72, 101), (72, 103), (72, 108), (73, 100), (73, 102), (73, 103), (73, 104), (73, 105), (73, 106), (73, 109), (73, 113), (73, 115), (74, 101), (74, 103), (74, 108), (74, 110), (74, 111), (74, 112), (74, 116), (74, 118), (75, 102), (75, 103), (75, 109), (75, 113), (75, 114), (75, 115), (76, 102), (76, 104), (76, 110), (76, 114), (76, 115), (76, 116), (76, 117), (76, 119), (77, 103), (77, 105), (77, 111), (77, 113), (77, 115), (77, 116), (77, 117), (77, 119), (78, 103), (78, 106), (78, 114), (78, 116), (78, 117), (78, 118), (78, 120), (79, 104), (79, 107), (79, 115), (79, 117), (79, 118), (79, 119), (79, 121), (80, 104), (80, 106), (80, 108), (80, 116), (80, 120), (80, 123), (81, 105), (81, 108), (81, 117), (81, 119), (81, 121),
(81, 122), (81, 124), (82, 106), (82, 109), (82, 120), (83, 106), (83, 109), (84, 107), (84, 109), (85, 108), (85, 109), (86, 108), (86, 110), (87, 110), (88, 109), (88, 110), (89, 109), (89, 110), (90, 110), )
|
# coding: utf-8
from fabric.api import task, env, require
from fabric.operations import sudo
from jetpack.helpers import RunAsAdmin
@task(task_class=RunAsAdmin, user=env.local_user, default=True)
def logs(lines=50):
require('PROJECT')
sudo('tail --lines=%s /var/log/%s.log' % (lines, env.PROJECT.appname))
|
import os.path
from visual_mpc.policy.cem_controllers import NCECostController
from visual_mpc.agent.benchmarking_agent import BenchmarkAgent
from visual_mpc.envs.mujoco_env.cartgripper_env.cartgripper_xz_grasp import CartgripperXZGrasp
BASE_DIR = '/'.join(str.split(__file__, '/')[:-1])
current_dir = os.path.dirname(os.path.realpath(__file__))
env_params = {
# resolution sufficient for 16x anti-aliasing
'viewer_image_height': 96,
'viewer_image_width': 128,
'cube_objects': True
}
agent = {
'type': BenchmarkAgent,
'env': (CartgripperXZGrasp, env_params),
'data_save_dir': BASE_DIR,
'T': 45,
'image_height': 48,
'image_width': 64,
'num_load_steps': 16,
'make_final_recording': '',
'start_goal_confs': os.environ['VMPC_DATA_DIR'] + '/cartgripper_xz_grasp/expert_lifting_tasks',
'current_dir': current_dir
}
policy = {
'type': NCECostController,
'action_order': ['x', 'z', 'grasp'],
'initial_std_lift': 0.5, # std dev. in xy
'rejection_sampling': False,
'selection_frac': 0.05,
'verbose_frac_display': 0.05,
'compare_to_expert': True,
'replan_interval': 5,
'num_samples': 800,
'nce_conf_path': os.path.expanduser('~/Documents/control_embedding/experiments/catrgripper_xz_grasp/nce_experiment/exp.json'),
'nce_restore_path': os.path.expanduser('~/Documents/control_embedding/experiments/catrgripper_xz_grasp/nce_experiment/base_model/model-20000')
}
config = {
'traj_per_file': 128,
'current_dir': current_dir,
'save_data': True,
'seperate_good': False,
'save_raw_images': True,
'start_index': 0,
'end_index': 20,
'agent': agent,
'policy': policy,
'ngroup': 1000
}
|
# Generated by Django 3.1 on 2021-11-15 21:48
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0002_auto_20211018_2216'),
]
operations = [
migrations.AlterModelOptions(
name='orders',
options={'ordering': ('-created',)},
),
]
|
import pickle
import numpy as np
import cmfg
from Parser import Parser
from math import pi
from astropy import units as u
import itertools
from sklearn.neighbors import NearestNeighbors
from matplotlib import pyplot as plt
from random import random
from matplotlib import colors, ticker, rc
class MidpointNormalize(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
with open('../out/POL05/data_POL05.pk', 'rb') as arch:
results = pickle.load(arch)
N = results[0][0].shape[0]
Zt = np.zeros((N,N))
Zq = np.zeros((N,N))
Zu = np.zeros((N,N))
Zqr = np.zeros((N,N))
Zur = np.zeros((N,N))
Ncen = 0
for r in results:
Ncen += 1
Zt = Zt + r[0]
Zq = Zq + r[1]
Zu = Zu + r[2]
Zqr = Zqr + r[3]
Zur = Zur + r[4]
del results
Zt = Zt / Ncen * 1.e6
Zq = Zq / Ncen * 1.e6
Zu = Zu / Ncen * 1.e6
Zqr = Zqr / Ncen* 1.e6
Zur = Zur / Ncen* 1.e6
P = np.sqrt(Zq**2 + Zu**2)
alpha = np.arctan2(Zu, Zq) / 2
Pr = np.sqrt(Zqr**2 + Zur**2)
alphar = np.arctan2(Zur, Zqr) / 2
# ADDITIONAL DATA »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
config = Parser('../set/POL05.ini')
X = cmfg.profile2d(config)
X.load_centers()
X.select_subsample_centers()
rmax = config.p.r_stop # rad
rmax_deg = rmax.to(u.deg).value
print('rmax_deg ------> ', rmax_deg)
# COMPUTE RADIAL PROFILE
N = 250
xr = np.linspace(-rmax_deg, rmax_deg, N)
yr = np.linspace(-rmax_deg, rmax_deg, N)
idxs = itertools.product(range(N), range(N))
idxs = np.array(list(idxs))
G = itertools.product(xr, yr)
G = np.array(list(G))
neigh = NearestNeighbors(n_neighbors=6, radius=0.01)
neigh.fit(G)
# --------
rr = np.linspace(0.05, 34, 300)
xpolar, ypolar = [], []
for k, r in enumerate(rr):
nn = 4 + k
tt = np.linspace(0, 2*pi, nn, endpoint=False)
tt = tt + random()*2*pi
x = r*np.cos(tt)
y = r*np.sin(tt)
xpolar.append(x)
ypolar.append(y)
val_avg = []
for xp, yp in zip(xpolar, ypolar):
vals = []
for xx, yy in zip(xp, yp):
dist, ind = neigh.kneighbors([[xx,yy]], 3, return_distance=True)
dd = np.exp(-dist*25)
dsts = dd.sum()
zz = Zt[idxs[ind][0][:,0], idxs[ind][0][:,1]]
vals.append(np.dot(dd, zz)/dsts)
val_avg.append(np.mean(vals))
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
nr = MidpointNormalize(vmin=-15, vmax=15, midpoint=0.)
sc = ax.imshow(Zt, cmap='RdBu_r',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg],
norm=nr)
circle1 = plt.Circle((0, 0), rmax_deg, fc='None', linewidth=6,
#color=(0.0196, 0.188, 0.38, 0.5))
color='white')
ax.add_patch(circle1)
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'averaged temperature [$\mu$K]')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
#cb.formatter.set_powerlimits((1, 1))
cb.update_ticks()
plt.tight_layout()
fig.savefig('Zt_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
nr = MidpointNormalize(vmin=Zq.min(), vmax=Zq.max(), midpoint=0.)
sc = ax.imshow(Zq, cmap='bwr',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg],
norm=nr)
circle1 = plt.Circle((0, 0), rmax_deg, fc='None', linewidth=6,
color='white')
ax.add_patch(circle1)
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\times\; 10^{-6}\quad$ Q Stokes parameter')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
#ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
#cb.formatter.set_powerlimits((-6, -6))
#cb.update_ticks()
plt.tight_layout()
fig.savefig('Zq_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
nr = MidpointNormalize(vmin=Zq.min(), vmax=Zq.max(), midpoint=0.)
sc = ax.imshow(Zu, cmap='bwr',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg],
norm=nr)
circle1 = plt.Circle((0, 0), rmax_deg, fc='None', linewidth=6,
color='white')
ax.add_patch(circle1)
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\times\; 10^{-6}\quad$ U Stokes parameter')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
#ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
#cb.formatter.set_powerlimits((-6, -6))
#cb.update_ticks()
plt.tight_layout()
fig.savefig('Zu_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
nr = MidpointNormalize(vmin=Zqr.min(), vmax=Zqr.max(), midpoint=0.)
sc = ax.imshow(Zqr, cmap='bwr',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg],
norm=nr)
circle1 = plt.Circle((0, 0), rmax_deg, fc='None', linewidth=6,
color='white')
ax.add_patch(circle1)
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\times\; 10^{-6}\quad$ Q$_r$ Stokes parameter')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
#ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
#cb.formatter.set_powerlimits((-6, -6))
#cb.update_ticks()
plt.tight_layout()
fig.savefig('Zqr_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
nr = MidpointNormalize(vmin=Zq.min(), vmax=Zq.max(), midpoint=0.)
sc = ax.imshow(Zur, cmap='bwr',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg],
norm=nr)
circle1 = plt.Circle((0, 0), rmax_deg, fc='None', linewidth=6,
color='white')
ax.add_patch(circle1)
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\times\; 10^{-6}\quad$ U$_r$ Stokes parameter')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
#ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
#cb.formatter.set_powerlimits((-6, -6))
#cb.update_ticks()
plt.tight_layout()
fig.savefig('Zur_POL05.png')
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
ax.plot(rr, val_avg)
ax.axhline(0, linestyle='--', color='silver')
ax.set_xlabel('radial distance [deg]')
ax.set_ylabel(r'averaged temperature [$\times 10^6\,\mu$K]')
plt.tight_layout()
fig.savefig('Zt_POL05_radial.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
nr = MidpointNormalize(vmin=Zq.min(), vmax=Zq.max(), midpoint=0.)
sc = ax.imshow(Zur, cmap='bwr',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg],
norm=nr)
circle1 = plt.Circle((0, 0), rmax_deg, fc='None', linewidth=6,
color='white')
ax.add_patch(circle1)
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\times\; 10^{-6}\quad$ U$_r$ Stokes parameter')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
#ax.xaxis.set_major_formatter(ticker.StrMethodFormatter("{x:.1f}"))
#cb.formatter.set_powerlimits((-6, -6))
#cb.update_ticks()
plt.tight_layout()
fig.savefig('Zur_b_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
# P y angulo -----------------------------
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
sc = ax.imshow(P, cmap='pink_r',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg])
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\times\; 10^{-6}\quad$ P')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
plt.tight_layout()
fig.savefig('P_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
sc = ax.imshow(alpha, cmap='bwr',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg])
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\alpha$ [rad]')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
plt.tight_layout()
fig.savefig('alpha_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
ax.hist(alpha)
ax.set_xlabel('alpha [rad]')
ax.set_ylabel('dN/d(alpha)')
plt.tight_layout()
fig.savefig('alpha_hist_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
# P y angulo -----------------------------
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
sc = ax.imshow(Pr, cmap='pink_r',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg])
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\times\; 10^{-6}\quad$ P')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
plt.tight_layout()
fig.savefig('P_r_POL05.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
sc = ax.imshow(alphar, cmap='bwr',
extent=[-rmax_deg, rmax_deg, -rmax_deg, rmax_deg])
cb = plt.colorbar(sc, ax=ax, shrink=0.8, aspect=60)
cb.set_label(r'$\alpha$ [rad]')
ax.set_xlabel('x [deg]')
ax.set_ylabel('y [deg]')
plt.tight_layout()
fig.savefig('alpha_r_POL05.png')
# tails
tx = G[:,0]
ty = G[:,1]
# heads
dx = (P*np.cos(alpha)).reshape(N*N)*10000
dy = (P*np.sin(alpha)).reshape(N*N)*10000
hx = tx + dx
hy = ty + dy
filt = dx > 1.e-4
for i in range(N*N):
if filt[i]:
print(tx[i], hx[i], ty[i], hy[i])
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax1 = fig.add_subplot(2, 1, 1)
zz = Zq.reshape(N*N)
ax1.hist(zz, bins=50, density=True)
ax1.set_xlim(-1.5, 1.5)
ax1.set_xlabel('Q')
ax1.set_ylabel(r'dN/dQ')
ax2 = fig.add_subplot(2, 1, 2)
zz = Zu.reshape(N*N)
ax2.hist(zz, bins=50, density=True)
ax2.set_xlim(-1.5, 1.5)
ax2.set_xlabel('U')
ax2.set_ylabel(r'dN/dU')
plt.tight_layout()
fig.savefig('hists_POL05_radial.png')
# PLOTS »»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»»
ql = []
ul = []
pl = []
al = []
for i, j in idxs:
r = np.sqrt(xr[i]**2 + yr[i]**2)
if r < rmax_deg:
if abs(Zq[i, j]) > 1.e-6 and abs(Zu[i, j]) > 1.e-6:
ql.append(Zq[i, j])
ul.append(Zu[i, j])
P_tmp = np.sqrt(Zq[i,j]**2 + Zu[i,j]**2)
alpha_tmp = np.arctan2(Zu[i,j], Zq[i,j]) / 2
pl.append(P_tmp)
al.append(alpha_tmp)
ql = np.array(ql)
ul = np.array(ul)
font = {'family' : 'normal',
'weight' : 'medium',
'size' : 14}
rc('font', **font)
plt.close('all')
fig = plt.figure(figsize=(7, 5))
ax = fig.add_subplot()
ax.plot(ql, ul-ql, marker='o', markersize=12, color=(0, 0.7, 1, 0.01), linestyle='None')
ax.set_xlim(-0.7, 0.7)
ax.set_ylim(-0.25, 0.25)
ax.grid(color='silver')
ax.set_xlabel(r'Q [$\times 10^6 \, \mu$K]', fontsize=16)
ax.set_ylabel(r'U - Q [$\times 10^6 \, \mu$K]', fontsize=16)
plt.tight_layout()
fig.savefig('qu_POL05.png')
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
txd = pd.read_csv("luceneDocVector.csv", header=None, sep='\t')
print(txd)
# In[3]:
txd.to_numpy()
# In[4]:
U, S, V = np.linalg.svd(txd)
S = np.diag(S)
k=50
Uk = U[:, :k]
Sk = S[:k, :k]
Vk = V[:k, :]
Ak = Uk.dot(Sk).dot(Vk)
# In[5]:
print("Rank %d approximation of: " %k)
print(Ak)
print()
print(txd)
|
import toml
import importlib
pytom = toml.load("pyproject.toml")
package_name = pytom["project"]["name"]
author_name = " - ".join(pytom["project"]["authors"])
doc_dir_name = "docs"
doctest_notebooks_glob = "notebooks/doc-*.ipynb"
mymodule = importlib.import_module(package_name)
boilerplate_branch = "django-app"
|
from rest_framework.response import Response
from rest_framework.views import APIView
from Harvest.utils import CORSBrowserExtensionView
from plugins.redacted_uploader.create_project import create_transcode_project
from upload_studio.serializers import ProjectDeepSerializer
class TranscodeTorrent(CORSBrowserExtensionView, APIView):
def post(self, request):
tracker_id = int(request.data['tracker_id'])
transcode_type = request.data['transcode_type']
project = create_transcode_project(tracker_id, transcode_type)
return Response(ProjectDeepSerializer(project).data)
|
from distance_sensor_118x import DistanceSensor
sensor = DistanceSensor('/dev/USB_Distance')
sensor.open()
sensor.printSettings()
sensor.close()
|
from django.contrib import admin
# this code does not work i don't know why
# from customer.models import customer
from . import models as customer_model
# for using permalink only...
from django.urls import reverse
from django.utils.html import format_html
# for formfield_overrides only...
from django.forms import Textarea
from django.db import models
class CustomerAdmin(admin.ModelAdmin):
fields = [('name', 'slug'),
('age', 'phone_number', 'picture'),
('sex', 'email_address', 'is_vip'),
'address',
('creation_date', 'made_vip_by'),
'description']
#must for not creating error
readonly_fields = ['slug', 'creation_date']
list_display = ('name', 'permalink', 'sex', 'is_vip', 'made_vip_by', 'creation_date')
list_filter = ('is_vip', 'sex', 'made_vip_by', 'creation_date')
def permalink(self, obj):
url = reverse("customer:show_one_customer",
kwargs={"slug": obj.slug})
# Unicode hex b6 is the Pilcrow sign
return format_html('<a href="{}">{}</a>'.format(url, '\xb6'))
formfield_overrides = {
models.TextField: {'widget': Textarea(attrs={'rows':3.5, 'cols':100})},
}
#admin.site.unregister(PerPiece)
admin.site.register(customer_model.Customer, CustomerAdmin)
|
import unittest
from math import sqrt, ceil
def count_steps_to_center_from(x):
if x == 1:
return 0
# Get ring info
ring = int(ceil((sqrt(x) - 1) / 2)) # 0-based ring around the center.
side_length = 2*ring + 1 # Length of each of the 4 sides of the ring.
perimeter = ring * 8 # Count of numbers in this ring
largest = side_length * side_length # Largest number in this ring.
smallest = largest - perimeter # Largest number in the previous ring.
# Find distance from center of closest side.
ring_pos = x - smallest # Position within the current ring; [1, perimeter]
eighth = ring_pos / ring # Position within the current ring, in eighths; [0,8]
side_center_pct = (eighth % 2) - 1 # Percentage from side center; [-1,1]
side_center_offset = round(side_center_pct * ring) # Offset of x from side center; [-ring,ring]
side_center_distance = abs(side_center_offset) # Distance from side center; [0,ring]
# Find distance to center of grid via Manhattan distance.
steps_to_center = ring + side_center_distance
return steps_to_center
# Unit Tests ########################################################
class TestFindSteps(unittest.TestCase):
def test_1(self):
self.assertEqual(count_steps_to_center_from(1), 0)
def test_12(self):
self.assertEqual(count_steps_to_center_from(12), 3)
def test_23(self):
self.assertEqual(count_steps_to_center_from(23), 2)
def test_1024(self):
self.assertEqual(count_steps_to_center_from(1024), 31)
# Entry Point #######################################################
if __name__ == "__main__":
code = 361527
print('Distance to center:', count_steps_to_center_from(code)) # = 326
unittest.main()
|
from solana.publickey import PublicKey
VOTE_PROGRAM_ID = PublicKey("Vote111111111111111111111111111111111111111")
"""Program id for the native vote program."""
VOTE_STATE_LEN: int = 3731
"""Size of vote account."""
|
#!/usr/bin/env python
"""
Collect BigBOSS throughput pieces from bbspecsim into
Specter throughput format.
TODO: Factor our parameters at the top into a configuration file.
Stephen Bailey, LBL
January 2013
"""
import sys
import os
import numpy as N
import fitsio
from scipy.interpolate import InterpolatedUnivariateSpline as SplineFit
#--- Input Parameters ---
#- TODO: Move these to a configuration file
#- Wavelength range and resolution
wmin, wmax, dw = 3500, 9900, 0.1
#- Fiber parameters
fiber_size_um = 120.0 #- diameter in microns
fiber_frd_loss = 0.970 #- Focal Ratio Degradation
fiber_connection_loss = 0.975 #- Loss from fiber connections
fiberlen = 40.0 #- meters
#- Default exposure time
exptime = 15*60.0 #- 15 minutes -> seconds
#- Input directories, corrector and spectrograph versions
bbdir = os.environ['BBSPECSIM_DIR']
bbcorr = 'BB_CORR_20120402a'
bbspec = 'BB_SPEC_20120428difdet'
extinction_file = bbdir + '/sky/ZenExtinct-KPNO-FTS.fits'
corrector_file = '%s/designs/%s/%s.fits' % (bbdir, bbcorr, bbcorr)
#- use "design_file % n" to get file for spectrograph n=1,2,3
design_file = '%s/designs/%s/%s_%%d.fits.gz' % (bbdir, bbspec, bbspec)
#--- Data to fill ---
thru = dict()
ww = N.arange(wmin, wmax+dw/2, dw)
nw = len(ww)
thru['wavelength'] = ww
#- Vectors of throughputs common to all spectrographs
thru['extinction'] = None #- Atmosphere
thru['optics'] = None #- Telescope + corrector but not spectrographs
thru['fiberinput'] = None #- Fiber input geometric losses
thru['fiber'] = None #- Fiber optics
#- Individual spectrographs
thru['B'] = N.zeros(nw)
thru['R'] = N.zeros(nw)
thru['I'] = N.zeros(nw)
#- Scalars
thru['effarea'] = None
thru['fiberdia'] = None
thru['exptime'] = exptime
#--- Load throughputs from bbspecsim files ---
#- Atmospheric extinction: magnitudes per airmass
#- throughput = 10**(-0.4*airmass*extinction)
atm = fitsio.read(extinction_file, 1, lower=True)
s = SplineFit(atm['wavelength'], atm['extinction'])
thru['extinction'] = s(ww)
#- Telescope + Corrector model
tele = fitsio.read(corrector_file, upper=True)
tw = tele['WAVE'][0]
tx = tele['MIRROREF'][0] * tele['CORRTRANS'][0] * tele['ARTRANS'][0]**tele['NSURF'][0]
s = SplineFit(tw, tx)
thru['optics'] = s(ww)
thru['effarea'] = N.pi * (tele['MIRRORDIAM'][0]*100)**2 / 4 * tele['VIGN'][0] #- cm^2
thru['fiberdia'] = fiber_size_um / tele['FPSCALE'][0] #- microns -> arcsec
#- Fiber input model
#- WARNING: this is seeing and object shape dependent, and uses a
#- file leftover from a previous bbspecsim run.
fin = N.loadtxt(bbdir+'/throughput/fiberee.txt').T
s = SplineFit(fin[0], fin[1])
thru['fiberinput'] = s(ww)
#- Fiber attenuation (dB/km) = 10*log10(in/out)
fatt = N.loadtxt(bbdir+'/throughput/FiberAttenuation.txt').T
fw = fatt[0]*10
fa = fatt[1]
fx = 10**(-fa/10.0 * (fiberlen/1000.0) )
thru['fiber'] = N.interp(ww, fw, fx) #- linear interp instead of ringy spline
thru['fiber'] *= fiber_frd_loss
thru['fiber'] *= fiber_connection_loss
#- Spectrograph efficiencies
specname = {1:'B', 2:'R', 3:'I'}
for ispec in (1,2,3):
spec = fitsio.read(design_file % ispec, 2, lower=True)
spw = spec['lambda'][0] * 10 #- nm -> A
s = SplineFit(spw, spec['spectotal'][0]*spec['detector'][0])
#- Don't extrapolate spline
ii = (spw[0] <= ww) & (ww <= spw[-1])
thru[specname[ispec]][ii] = s(ww[ii])
#--- Write one fits file per spectrograph ---
hdr = list()
hdr.append(dict(name='EXTNAME', value='THROUGHPUT'))
hdr.append(dict(name='EXPTIME', value=thru['exptime'],
comment='Default exposure time [seconds]'))
hdr.append(dict(name='EFFAREA', value=thru['effarea'],
comment='effective mirror area [cm^2]'))
hdr.append(dict(name='FIBERDIA', value=thru['fiberdia'],
comment='Fiber diameter [arcsec]'))
wavelength = thru['wavelength']
extinction = thru['extinction']
fiberinput = thru['fiberinput']
for camera in specname.values():
throughput = thru['optics'] * thru['fiber'] * thru[camera]
thru[camera+'tot'] = throughput * thru['fiberinput']
ii = N.where(throughput > 0)
data = N.rec.fromarrays((wavelength[ii], extinction[ii], throughput[ii], fiberinput[ii]),
names='wavelength,extinction,throughput,fiberinput')
outfile = 'bbthru-%s.fits' % camera
fitsio.write(outfile, data, header=hdr, clobber=True)
#--- DEBUG ---
import pylab as P
P.ion()
P.rcParams['legend.fontsize'] = 12
P.plot(ww, thru['optics'], 'k-', lw=1, alpha=0.5, label='Tele+Corr Optics')
P.plot(ww, thru['fiberinput'], 'g--', lw=2, label='Fiber Input (%.2f arcsec)' % thru['fiberdia'])
P.plot(ww, thru['fiber'], 'm:', lw=3, label='Fibers (%.1f m)' % fiberlen)
P.fill_between(ww, thru['Btot'], color='b', alpha=0.5)
P.plot(ww, thru['B'], color='b', label='B spectro total')
P.fill_between(ww, thru['Rtot'], color='r', alpha=0.5)
P.plot(ww, thru['R'], color='r', label='R spectro total')
P.fill_between(ww, thru['Itot'], color='k', alpha=0.5)
P.plot(ww, thru['I'], color='k', label='I spectro total')
P.legend(loc='upper left', frameon=False)
P.title('BigBOSS Throughputs (no atmosphere)')
P.xlabel('Wavelength [A]')
P.ylabel('Throughput')
P.savefig('bbthru.png', dpi=80)
import IPython
IPython.embed()
#--- DEBUG ---
|
"""
Using monotonic decreasing stack.
Time complexity: O(N)
Space complexity: O(1) not considering result.
"""
from typing import List
class Solution:
def findBuildings(self, heights: List[int]) -> List[int]:
mono_stack = []
for i in range(len(heights)):
while mono_stack and heights[i] >= heights[mono_stack[-1]]:
mono_stack.pop()
mono_stack.append(i)
return mono_stack
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.