content stringlengths 5 1.05M |
|---|
# SVG parser in Python
# Copyright (C) 2013 -- CJlano < cjlano @ free.fr >
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
import traceback
import sys
import os
import copy
import re
import xml.etree.ElementTree as etree
import itertools
import operator
import json
from .geometry import *
svg_ns = '{http://www.w3.org/2000/svg}'
# Regex commonly used
number_re = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?'
unit_re = r'em|ex|px|in|cm|mm|pt|pc|%'
# Unit converter
unit_convert = {
None: 1, # Default unit (same as pixel)
'px': 1, # px: pixel. Default SVG unit
'em': 10, # 1 em = 10 px FIXME
'ex': 5, # 1 ex = 5 px FIXME
'in': 96, # 1 in = 96 px
'cm': 96 / 2.54, # 1 cm = 1/2.54 in
'mm': 96 / 25.4, # 1 mm = 1/25.4 in
'pt': 96 / 72.0, # 1 pt = 1/72 in
'pc': 96 / 6.0, # 1 pc = 1/6 in
'%' : 1 / 100.0 # 1 percent
}
class Transformable:
'''Abstract class for objects that can be geometrically drawn & transformed'''
def __init__(self, elt=None, verbose=True):
# a 'Transformable' is represented as a list of Transformable items
self.items = []
self.verbose = verbose
self.id = hex(id(self))
# Unit transformation matrix on init
self.matrix = Matrix()
self.rotation = 0
self.viewport = Point(800, 600) # default viewport is 800x600
if elt is not None:
self.id = elt.get('id', self.id)
# Parse transform attibute to update self.matrix
self.getTransformations(elt)
def bbox(self):
'''Bounding box'''
bboxes = [x.bbox() for x in self.items]
if len( bboxes ) < 1:
return (Point(0, 0), Point(0, 0))
xmin = min([b[0].x for b in bboxes])
xmax = max([b[1].x for b in bboxes])
ymin = min([b[0].y for b in bboxes])
ymax = max([b[1].y for b in bboxes])
return (Point(xmin,ymin), Point(xmax,ymax))
# Parse transform field
def getTransformations(self, elt):
t = elt.get('transform')
if t is None: return
svg_transforms = [
'matrix', 'translate', 'scale', 'rotate', 'skewX', 'skewY']
# match any SVG transformation with its parameter (until final parenthese)
# [^)]* == anything but a closing parenthese
# '|'.join == OR-list of SVG transformations
transforms = re.findall(
'|'.join([x + '[^)]*\)' for x in svg_transforms]), t)
for t in transforms:
op, arg = t.split('(')
op = op.strip()
# Keep only numbers
arg = [float(x) for x in re.findall(number_re, arg)]
if self.verbose:
print('transform: ' + op + ' '+ str(arg))
if op == 'matrix':
self.matrix *= Matrix(arg)
if op == 'translate':
tx = arg[0]
if len(arg) == 1: ty = 0
else: ty = arg[1]
self.matrix *= Matrix([1, 0, 0, 1, tx, ty])
if op == 'scale':
sx = arg[0]
if len(arg) == 1: sy = sx
else: sy = arg[1]
self.matrix *= Matrix([sx, 0, 0, sy, 0, 0])
if op == 'rotate':
self.rotation += arg[0]
cosa = math.cos(math.radians(arg[0]))
sina = math.sin(math.radians(arg[0]))
if len(arg) != 1:
tx, ty = arg[1:3]
self.matrix *= Matrix([1, 0, 0, 1, tx, ty])
self.matrix *= Matrix([cosa, sina, -sina, cosa, 0, 0])
if len(arg) != 1:
self.matrix *= Matrix([1, 0, 0, 1, -tx, -ty])
if op == 'skewX':
tana = math.tan(math.radians(arg[0]))
self.matrix *= Matrix([1, 0, tana, 1, 0, 0])
if op == 'skewY':
tana = math.tan(math.radians(arg[0]))
self.matrix *= Matrix([1, tana, 0, 1, 0, 0])
def transform(self, matrix=None):
if matrix is None:
matrix = self.matrix
else:
matrix *= self.matrix
#print( "do transform: {}: {}".format( self.__class__.__name__, matrix ) )
#print( "do transform: {}: {}".format( self, matrix ) )
#traceback.print_stack()
for x in self.items:
x.transform(matrix)
def length(self, v, mode='xy'):
# Handle empty (non-existing) length element
if v is None:
return 0
# Get length value
m = re.search(number_re, v)
if m: value = m.group(0)
else: raise TypeError(v + 'is not a valid length')
# Get length unit
m = re.search(unit_re, v)
if m: unit = m.group(0)
else: unit = None
if unit == '%':
if mode == 'x':
return float(value) * unit_convert[unit] * self.viewport.x
if mode == 'y':
return float(value) * unit_convert[unit] * self.viewport.y
if mode == 'xy':
return float(value) * unit_convert[unit] * self.viewport.x # FIXME
return float(value) * unit_convert[unit]
def xlength(self, x):
return self.length(x, 'x')
def ylength(self, y):
return self.length(y, 'y')
def flatten(self):
'''Flatten the SVG objects nested list into a flat (1-D) list,
removing Groups'''
# http://rightfootin.blogspot.fr/2006/09/more-on-python-flatten.html
# Assigning a slice a[i:i+1] with a list actually replaces the a[i]
# element with the content of the assigned list
i = 0
flat = copy.deepcopy(self.items)
while i < len(flat):
while isinstance(flat[i], Group):
flat[i:i+1] = flat[i].items
i += 1
return flat
def scale(self, ratio):
for x in self.items:
x.scale(ratio)
return self
def translate(self, offset):
for x in self.items:
x.translate(offset)
return self
def rotate(self, angle):
for x in self.items:
x.rotate(angle)
return self
class Svg(Transformable):
'''SVG class: use parse to parse a file'''
# class Svg handles the <svg> tag
# tag = 'svg'
def __init__(self, filename=None, verbose=True):
viewport_scale = 1
Transformable.__init__(self, verbose=verbose)
if filename:
self.parse(filename)
def parse(self, filename):
self.filename = filename
tree = etree.parse(filename)
self.root = tree.getroot()
if self.root.tag != svg_ns + 'svg':
raise TypeError('file %s does not seem to be a valid SVG file', filename)
# Create a top Group to group all other items (useful for viewBox elt)
top_group = Group(verbose=self.verbose)
self.items.append(top_group)
# SVG dimension
width = self.xlength(self.root.get('width'))
height = self.ylength(self.root.get('height'))
# update viewport
top_group.viewport = Point(width, height)
# viewBox
if self.root.get('viewBox') is not None:
viewBox = re.findall(number_re, self.root.get('viewBox'))
# If the document somehow doesn't have dimentions get if from viewBox
if self.root.get('width') is None or self.root.get('height') is None:
width = float(viewBox[2])
height = float(viewBox[3])
if self.verbose:
print("\033[91mUnable to find width of height properties. Falling back to viewBox.\033[0m", file=sys.stderr)
sx = width / float(viewBox[2])
sy = height / float(viewBox[3])
tx = -float(viewBox[0])
ty = -float(viewBox[1])
self.viewport_scale = round(float(viewBox[2])/width, 6)
top_group.matrix = Matrix([sx, 0, 0, sy, tx, ty])
if ( self.root.get("width") is None or self.root.get("height") is None ) \
and self.root.get("viewBox") is None:
print("\033[91mFatal Error: Unable to find SVG dimensions. Exiting.\033[0m", file=sys.stderr)
exit()
# Parse XML elements hierarchically with groups <g>
top_group.append(self.root)
self.transform()
def title(self):
t = self.root.find(svg_ns + 'title')
if t is not None:
return t
else:
return os.path.splitext(os.path.basename(self.filename))[0]
def json(self):
return self.items
class Group(Transformable):
'''Handle svg <g> elements'''
# class Group handles the <g> tag
tag = 'g'
def __init__(self, elt=None, verbose=True):
Transformable.__init__(self, elt, verbose)
self.name = ""
self.hidden = False
if elt is not None:
for id, value in elt.attrib.items():
id = self.parse_name( id )
if id[ "name" ] == "label":
self.name = value
if id[ "name" ] == "style":
if re.search( "display\s*:\s*none", value ):
self.hidden = True
@staticmethod
def parse_name( tag ):
m = re.match( r'({(.+)})?(.+)', tag )
return {
'namespace' : m.group( 2 ),
'name' : m.group( 3 ),
}
def append(self, element):
for elt in element:
elt_class = svgClass.get(elt.tag, None)
if elt_class is None:
if self.verbose:
print('No handler for element %s' % elt.tag)
continue
# instanciate elt associated class (e.g. <path>: item = Path(elt)
item = elt_class(elt, verbose=self.verbose)
# Apply group matrix to the newly created object
# Actually, this is effectively done in Svg.__init__() through call to
# self.transform(), so doing it here will result in the transformations
# being applied twice.
#item.matrix = self.matrix * item.matrix
item.viewport = self.viewport
self.items.append(item)
# Recursively append if elt is a <g> (group)
if elt.tag == svg_ns + 'g':
item.append(elt)
def __repr__(self):
return '<Group ' + self.id + " ({})".format( self.name ) + '>: ' + repr(self.items)
def json(self):
return {'Group ' + self.id + " ({})".format( self.name ) : self.items}
class Matrix:
''' SVG transformation matrix and its operations
a SVG matrix is represented as a list of 6 values [a, b, c, d, e, f]
(named vect hereafter) which represent the 3x3 matrix
((a, c, e)
(b, d, f)
(0, 0, 1))
see http://www.w3.org/TR/SVG/coords.html#EstablishingANewUserSpace '''
def __init__(self, vect=[1, 0, 0, 1, 0, 0]):
# Unit transformation vect by default
if len(vect) != 6:
raise ValueError("Bad vect size %d" % len(vect))
self.vect = list(vect)
def __mul__(self, other):
'''Matrix multiplication'''
if isinstance(other, Matrix):
a = self.vect[0] * other.vect[0] + self.vect[2] * other.vect[1]
b = self.vect[1] * other.vect[0] + self.vect[3] * other.vect[1]
c = self.vect[0] * other.vect[2] + self.vect[2] * other.vect[3]
d = self.vect[1] * other.vect[2] + self.vect[3] * other.vect[3]
e = self.vect[0] * other.vect[4] + self.vect[2] * other.vect[5] \
+ self.vect[4]
f = self.vect[1] * other.vect[4] + self.vect[3] * other.vect[5] \
+ self.vect[5]
return Matrix([a, b, c, d, e, f])
elif isinstance(other, Point):
x = other.x * self.vect[0] + other.y * self.vect[2] + self.vect[4]
y = other.x * self.vect[1] + other.y * self.vect[3] + self.vect[5]
return Point(x,y)
else:
return NotImplemented
def __str__(self):
return str(self.vect)
def xlength(self, x):
return x * self.vect[0]
def ylength(self, y):
return y * self.vect[3]
COMMANDS = 'MmZzLlHhVvCcSsQqTtAa'
class Path(Transformable):
'''SVG <path>'''
# class Path handles the <path> tag
tag = 'path'
def __init__(self, elt=None, verbose=True):
Transformable.__init__(self, elt, verbose)
if elt is not None:
self.style = elt.get('style')
self.parse(elt.get('d'))
def parse(self, pathstr):
"""Parse path string and build elements list"""
pathlst = re.findall(number_re + r"|\ *[%s]\ *" % COMMANDS, pathstr)
pathlst.reverse()
command = None
current_pt = Point(0,0)
start_pt = None
while pathlst:
if pathlst[-1].strip() in COMMANDS:
last_command = command
command = pathlst.pop().strip()
absolute = (command == command.upper())
command = command.upper()
else:
if command is None:
raise ValueError("No command found at %d" % len(pathlst))
if command == 'M':
# MoveTo
x = pathlst.pop()
y = pathlst.pop()
pt = Point(x, y)
if absolute:
current_pt = pt
else:
current_pt += pt
start_pt = current_pt
self.items.append(MoveTo(current_pt))
# MoveTo with multiple coordinates means LineTo
command = 'L'
elif command == 'Z':
# Close Path
l = Segment(current_pt, start_pt)
self.items.append(l)
current_pt = start_pt
elif command in 'LHV':
# LineTo, Horizontal & Vertical line
# extra coord for H,V
if absolute:
x,y = current_pt.coord()
else:
x,y = (0,0)
if command in 'LH':
x = pathlst.pop()
if command in 'LV':
y = pathlst.pop()
pt = Point(x, y)
if not absolute:
pt += current_pt
self.items.append(Segment(current_pt, pt))
current_pt = pt
elif command in 'CQ':
dimension = {'Q':3, 'C':4}
bezier_pts = []
bezier_pts.append(current_pt)
for i in range(1,dimension[command]):
x = pathlst.pop()
y = pathlst.pop()
pt = Point(x, y)
if not absolute:
pt += current_pt
bezier_pts.append(pt)
self.items.append(Bezier(bezier_pts))
current_pt = pt
elif command in 'TS':
# number of points to read
nbpts = {'T':1, 'S':2}
# the control point, from previous Bezier to mirror
ctrlpt = {'T':1, 'S':2}
# last command control
last = {'T': 'QT', 'S':'CS'}
bezier_pts = []
bezier_pts.append(current_pt)
if last_command in last[command]:
pt0 = self.items[-1].control_point(ctrlpt[command])
else:
pt0 = current_pt
pt1 = current_pt
# Symetrical of pt1 against pt0
bezier_pts.append(pt1 + pt1 - pt0)
for i in range(0,nbpts[command]):
x = pathlst.pop()
y = pathlst.pop()
pt = Point(x, y)
if not absolute:
pt += current_pt
bezier_pts.append(pt)
self.items.append(Bezier(bezier_pts))
current_pt = pt
elif command == 'A':
rx = pathlst.pop()
ry = pathlst.pop()
xrot = pathlst.pop()
# Arc flags are not necesarily sepatated numbers
flags = pathlst.pop().strip()
large_arc_flag = flags[0]
if large_arc_flag not in '01':
print('\033[91mArc parsing failure\033[0m', file=sys.error)
break
if len(flags) > 1: flags = flags[1:].strip()
else: flags = pathlst.pop().strip()
sweep_flag = flags[0]
if sweep_flag not in '01':
print('\033[91mArc parsing failure\033[0m', file=sys.error)
break
if len(flags) > 1: x = flags[1:]
else: x = pathlst.pop()
y = pathlst.pop()
# TODO
if self.verbose:
print('\033[91mUnsupported ARC: ' +
', '.join([rx, ry, xrot, large_arc_flag, sweep_flag, x, y]) + "\033[0m",
file=sys.stderr
)
# self.items.append(
# Arc(rx, ry, xrot, large_arc_flag, sweep_flag, Point(x, y)))
else:
pathlst.pop()
def __str__(self):
return '\n'.join(str(x) for x in self.items)
def __repr__(self):
return '<Path ' + self.id + '>'
def segments(self, precision=0):
'''Return a list of segments, each segment is ended by a MoveTo.
A segment is a list of Points'''
ret = []
# group items separated by MoveTo
for moveTo, group in itertools.groupby(self.items,
lambda x: isinstance(x, MoveTo)):
# Use only non MoveTo item
if not moveTo:
# Generate segments for each relevant item
seg = [x.segments(precision) for x in group]
# Merge all segments into one
ret.append(list(itertools.chain.from_iterable(seg)))
return ret
def simplify(self, precision):
'''Simplify segment with precision:
Remove any point which are ~aligned'''
ret = []
for seg in self.segments(precision):
ret.append(simplify_segment(seg, precision))
return ret
class Ellipse(Transformable):
'''SVG <ellipse>'''
# class Ellipse handles the <ellipse> tag
tag = 'ellipse'
def __init__(self, elt=None, verbose=True):
Transformable.__init__(self, elt, verbose)
if elt is not None:
self.center = Point(self.xlength(elt.get('cx')),
self.ylength(elt.get('cy')))
self.rx = self.length(elt.get('rx'))
self.ry = self.length(elt.get('ry'))
self.style = elt.get('style')
def __repr__(self):
return '<Ellipse ' + self.id + '>'
def bbox(self):
'''Bounding box'''
pmin = self.center - Point(self.rx, self.ry)
pmax = self.center + Point(self.rx, self.ry)
return (pmin, pmax)
def transform(self, matrix=None):
if matrix is None:
matrix = self.matrix
else:
matrix = self.matrix * matrix
self.center = matrix * self.center
self.rx = matrix.xlength(self.rx)
self.ry = matrix.ylength(self.ry)
def scale(self, ratio):
self.center *= ratio
self.rx *= ratio
self.ry *= ratio
def translate(self, offset):
self.center += offset
def rotate(self, angle):
self.center = self.center.rot(angle)
def P(self, t):
'''Return a Point on the Ellipse for t in [0..1]'''
x = self.center.x + self.rx * math.cos(2 * math.pi * t)
y = self.center.y + self.ry * math.sin(2 * math.pi * t)
return Point(x,y)
def segments(self, precision=0):
if self.verbose and self.rotation % 180 != 0:
print(
"\033[91mUnsupported rotation for {} primitive\033[0m".format(
self.__class__.__name__
),
file=sys.stderr
)
if max(self.rx, self.ry) < precision:
return [[self.center]]
p = [(0,self.P(0)), (1, self.P(1))]
d = 2 * max(self.rx, self.ry)
while d > precision:
for (t1,p1),(t2,p2) in zip(p[:-1],p[1:]):
t = t1 + (t2 - t1)/2.
d = Segment(p1, p2).pdistance(self.P(t))
p.append((t, self.P(t)))
p.sort(key=operator.itemgetter(0))
ret = [x for t,x in p]
return [ret]
def simplify(self, precision):
return self
# A circle is a special type of ellipse where rx = ry = radius
class Circle(Ellipse):
'''SVG <circle>'''
# class Circle handles the <circle> tag
tag = 'circle'
def __init__(self, elt=None, verbose=True):
if elt is not None:
elt.set('rx', elt.get('r'))
elt.set('ry', elt.get('r'))
Ellipse.__init__(self, elt, verbose=verbose)
def __repr__(self):
return '<Circle ' + self.id + '>'
class Rect(Transformable):
'''SVG <rect>'''
# class Rect handles the <rect> tag
tag = 'rect'
def __init__(self, elt=None, verbose=True):
Transformable.__init__(self, elt, verbose)
if elt is not None:
self.P1 = Point(self.xlength(elt.get('x')),
self.ylength(elt.get('y')))
self.P2 = Point(self.P1.x + self.xlength(elt.get('width')),
self.P1.y + self.ylength(elt.get('height')))
self.style = elt.get('style')
if self.verbose and (elt.get('rx') or elt.get('ry')):
print("\033[91mUnsupported corner radius on rect.\033[0m", file=sys.stderr)
def __repr__(self):
return '<Rect ' + self.id + '>'
def bbox(self):
'''Bounding box'''
xmin = min([p.x for p in (self.P1, self.P2)])
xmax = max([p.x for p in (self.P1, self.P2)])
ymin = min([p.y for p in (self.P1, self.P2)])
ymax = max([p.y for p in (self.P1, self.P2)])
return (Point(xmin,ymin), Point(xmax,ymax))
def transform(self, matrix=None):
if matrix is None:
matrix = self.matrix
else:
matrix = self.matrix*matrix
self.P1 = matrix * self.P1
self.P2 = matrix * self.P2
def segments(self, precision=0):
# A rectangle is built with a segment going thru 4 points
ret = []
Pa, Pb = Point(0,0),Point(0,0)
if self.rotation % 90 == 0:
Pa = Point(self.P1.x, self.P2.y)
Pb = Point(self.P2.x, self.P1.y)
else:
sa = math.sin(math.radians(self.rotation)) / math.cos(math.radians(self.rotation))
sb = -1 / sa
ba = -sa * self.P1.x + self.P1.y
bb = -sb * self.P2.x + self.P2.y
x = (ba-bb) / (sb-sa)
Pa = Point(x, sa * x + ba)
bb = -sb * self.P1.x + self.P1.y
ba = -sa * self.P2.x + self.P2.y
x = (ba-bb) / (sb-sa)
Pb = Point(x, sa * x + ba)
ret.append([self.P1, Pa, self.P2, Pb, self.P1])
return ret
def simplify(self, precision):
return self.segments(precision)
class Line(Transformable):
'''SVG <line>'''
# class Line handles the <line> tag
tag = 'line'
def __init__(self, elt=None, verbose=True):
Transformable.__init__(self, elt, verbose)
if elt is not None:
self.P1 = Point(self.xlength(elt.get('x1')),
self.ylength(elt.get('y1')))
self.P2 = Point(self.xlength(elt.get('x2')),
self.ylength(elt.get('y2')))
self.segment = Segment(self.P1, self.P2)
def __repr__(self):
return '<Line ' + self.id + '>'
def bbox(self):
'''Bounding box'''
xmin = min([p.x for p in (self.P1, self.P2)])
xmax = max([p.x for p in (self.P1, self.P2)])
ymin = min([p.y for p in (self.P1, self.P2)])
ymax = max([p.y for p in (self.P1, self.P2)])
return (Point(xmin,ymin), Point(xmax,ymax))
def transform(self, matrix):
self.P1 = matrix * self.P1
self.P2 = matrix * self.P2
self.segment = Segment(self.P1, self.P2)
def segments(self, precision=0):
return [self.segment.segments()]
def simplify(self, precision):
return self.segments(precision)
# overwrite JSONEncoder for svg classes which have defined a .json() method
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if not isinstance(obj, tuple(svgClass.values() + [Svg])):
return json.JSONEncoder.default(self, obj)
if not hasattr(obj, 'json'):
return repr(obj)
return obj.json()
## Code executed on module load ##
# SVG tag handler classes are initialized here
# (classes must be defined before)
import inspect
svgClass = {}
# Register all classes with attribute 'tag' in svgClass dict
for name, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
tag = getattr(cls, 'tag', None)
if tag:
svgClass[svg_ns + tag] = cls
|
import os
import json
import requests
import config
from tools import log
from tools import DDingWarn
from tools.reply_template import *
import pyttsx3
auth = None
logger = log.logger
pyttsx3_engine = pyttsx3.init()
# 获取调用百度api的token,auth为返回的结果
def get_token():
global auth
APIKey = 'bpLlUme0C61GisOY9Ce2QYzu'
SecretKey = 'btQoThXKmmVXGH2hHmBe64goEFQ0kihy'
AppId = '16991715'
url = 'https://openapi.baidu.com/oauth/2.0/token'
params = {'grant_type': "client_credentials",
'client_id': APIKey,
'client_secret': SecretKey}
a = requests.get(url, params=params)
auth = eval(a.text)
with open(config.BaiduYunTokenFileLocation, 'w+', encoding='utf-8') as fo:
json.dump(auth, fo, indent=' ', ensure_ascii=False)
return template(success=True, data=auth)
def read_token():
global auth
if not auth:
with open(config.BaiduYunTokenFileLocation, 'r+', encoding='utf-8') as fo:
auth = json.load(fp=fo)
# auth = eval(auth)
return
# 注意aue=4或者6是语音识别要求的格式,但是音频内容不是语音识别要求的自然人发音,所以识别效果会受影响。
def text2speech(text, file_location=config.tts_location, file_name=None):
global auth
read_token()
print(auth)
tok = auth['access_token']
cuid = 'abc' # 用户唯一标识
ctp = '1' # 客户端类型选择
lan = 'zh' # 中文。固定值
spd = 2 # 语速
pit = 6 # 音调
vol = 10 # 音量
per = 106 # 度小宇=1,度小美=0,度逍遥=3,度丫丫=4,度博文=106,度小童=110,度小萌=111,度米朵=103,度小娇=5
aue = 3 # 3为mp3格式(默认); 4为pcm-16k;5为pcm-8k;6为wav(内容同pcm-16k);
if not os.path.exists(file_location):
os.mkdir(file_location)
speech_url = 'http://tsn.baidu.com/text2audio'
logger.info(f'The Text Is:{text}')
try:
res = requests.post(url=speech_url, params={'tex': text, 'tok': tok, 'cuid': cuid, 'lan': lan, 'ctp': ctp,
'spd': spd, 'pit': pit, 'vol': vol, 'per': per, 'aue': aue})
except Exception as e:
DDingWarn.request_ding(result=[f'请求百度失败!请检查网络连接~ {e}'])
return template(message=f'请求百度失败!请检查网络连接~ {e}')
if res.content[2:12] == b'err_detail':
DDingWarn.request_ding(['请求百度成功,但秘钥错误!现在尝试获取新秘钥!'])
get_token()
return template(message='请求百度成功,但秘钥错误!已经尝试获取新秘钥!等待下一次请求结果!')
if file_name is None:
file_name = text[:5]
file_name = os.path.abspath(os.path.join(file_location, file_name) + '.mp3')
with open(file_name, 'wb') as fo:
fo.write(res.content)
return template(success=True, data=file_name)
|
# -------------------------------------------------------------------#
# Released under the MIT license (https://opensource.org/licenses/MIT)
# Contact: mrinal.haloi11@gmail.com
# Enhancement Copyright 2016, Mrinal Haloi
# -------------------------------------------------------------------#
import random
import os
import tensorflow as tf
from core.player import Player
from env.environment import GymEnvironment, SimpleGymEnvironment
from config.config import cfg
# Set random seed
tf.set_random_seed(123)
random.seed(12345)
def main(_):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
if cfg.env_type == 'simple':
env = SimpleGymEnvironment(cfg)
else:
env = GymEnvironment(cfg)
if not os.path.exists('/tmp/model_dir'):
os.mkdir('/tmp/model_dir')
player = Player(cfg, env, sess, '/tmp/model_dir')
player.play(load_model=False)
if __name__ == '__main__':
tf.app.run()
|
# get the splines
import numpy as np
import scipy.interpolate as si
# TODO - BSpline.predict() -> allow x to be of any shape. return.shape = in.shape + (n_bases)
# MAYBE TODO - implement si.splev using keras.backend.
# - That way you don't have to hash the X_spline in memory.
class BSpline():
"""Class for computing the B-spline funcions b_i(x) and
constructing the penality matrix S.
# Arguments
start: float or int; start of the region
end: float or int; end of the region
n_bases: int; number of spline bases
spline_order: int; spline order
# Methods
- **getS(add_intercept=False)** - Get the penalty matrix S
- Arguments
- **add_intercept**: bool. If true, intercept column is added to the returned matrix.
- Returns
- `np.array`, of shape `(n_bases + add_intercept, n_bases + add_intercept)`
- **predict(x, add_intercept=False)** - For some x, predict the bn(x) for each base
- Arguments
- **x**: np.array; Vector of dimension 1
- **add_intercept**: bool; If True, intercept column is added to the to the final array
- Returns
- `np.array`, of shape `(len(x), n_bases + (add_intercept))`
"""
def __init__(self, start=0, end=1, n_bases=10, spline_order=3):
self.start = start
self.end = end
self.n_bases = n_bases
self.spline_order = spline_order
self.knots = get_knots(self.start, self.end, self.n_bases, self.spline_order)
self.S = get_S(self.n_bases, self.spline_order, add_intercept=False)
def __repr__(self):
return "BSpline(start={0}, end={1}, n_bases={2}, spline_order={3})".\
format(self.start, self.end, self.n_bases, self.spline_order)
def getS(self, add_intercept=False):
"""Get the penalty matrix S
Returns
np.array, of shape (n_bases + add_intercept, n_bases + add_intercept)
"""
S = self.S
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S
def predict(self, x, add_intercept=False):
"""For some x, predict the bn(x) for each base
Arguments:
x: np.array; Vector of dimension 1
add_intercept: bool; should we add the intercept to the final array
Returns:
np.array, of shape (len(x), n_bases + (add_intercept))
"""
# sanity check
if x.min() < self.start:
raise Warning("x.min() < self.start")
if x.max() > self.end:
raise Warning("x.max() > self.end")
return get_X_spline(x=x,
knots=self.knots,
n_bases=self.n_bases,
spline_order=self.spline_order,
add_intercept=add_intercept)
def get_config(self):
return {"start": self.start,
"end": self.end,
"n_bases": self.n_bases,
"spline_order": self.spline_order
}
@classmethod
def from_config(cls, config):
return cls(**config)
############################################
# core functions
def get_gam_splines(start=0, end=100, n_bases=10, spline_order=3, add_intercept=True):
"""Main function required by (TF)Concise class
"""
# make sure n_bases is an int
assert type(n_bases) == int
x = np.arange(start, end + 1)
knots = get_knots(start, end, n_bases, spline_order)
X_splines = get_X_spline(x, knots, n_bases, spline_order, add_intercept)
S = get_S(n_bases, spline_order, add_intercept)
# Get the same knot positions as with mgcv
# https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560
return X_splines, S, knots
############################################
# helper functions
# main resource:
# https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560
def get_knots(start, end, n_bases=10, spline_order=3):
"""
Arguments:
x; np.array of dim 1
"""
x_range = end - start
start = start - x_range * 0.001
end = end + x_range * 0.001
# mgcv annotation
m = spline_order - 1
nk = n_bases - m # number of interior knots
dknots = (end - start) / (nk - 1)
knots = np.linspace(start=start - dknots * (m + 1),
stop=end + dknots * (m + 1),
num=nk + 2 * m + 2)
return knots.astype(np.float32)
# - get knots as arguments
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True):
"""
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
"""
if len(x.shape) is not 1:
raise ValueError("x has to be 1 dimentional")
tck = [knots, np.zeros(n_bases), spline_order]
X = np.zeros([len(x), n_bases])
for i in range(n_bases):
vec = np.zeros(n_bases)
vec[i] = 1.0
tck[1] = vec
X[:, i] = si.splev(x, tck, der=0)
if add_intercept is True:
ones = np.ones_like(X[:, :1])
X = np.hstack([ones, X])
return X.astype(np.float32)
def get_S(n_bases=10, spline_order=3, add_intercept=True):
# mvcv R-code
# S<-diag(object$bs.dim);
# if (m[2]) for (i in 1:m[2]) S <- diff(S)
# object$S <- list(t(S)%*%S) # get penalty
# object$S[[1]] <- (object$S[[1]]+t(object$S[[1]]))/2 # exact symmetry
S = np.identity(n_bases)
m2 = spline_order - 1 # m[2] is the same as m[1] by default
# m2 order differences
for i in range(m2):
S = np.diff(S, axis=0) # same as diff() in R
S = np.dot(S.T, S)
S = (S + S.T) / 2 # exact symmetry
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S.astype(np.float32)
|
# Python Standard Library Imports
from itertools import combinations
from utils import ingest
TARGET = 150
INPUT_FILE = '17.in'
EXPECTED_ANSWERS = (654, 57, )
# TARGET = 25
# INPUT_FILE = '17.test.in'
# EXPECTED_ANSWERS = (4, 3, )
def main():
solution = Solution()
answers = (solution.solve1(), solution.solve2(), )
print(answers)
assert(answers == EXPECTED_ANSWERS)
class Solution:
def __init__(self):
data = ingest(INPUT_FILE)
containers = sorted([int(c) for c in data], reverse=True)
self.kitchen = Kitchen(containers)
self.kitchen.store(TARGET)
def solve1(self):
answer = self.kitchen.ways
self.answer1 = answer
return answer
def solve2(self):
answer = len(self.kitchen.optimal_combos)
self.answer2 = answer
return answer
class Kitchen:
def __init__(self, containers):
self.containers = containers
def store(self, amount):
ways = 0
optimal_combos = None
min_containers = None
for i in range(1, len(self.containers) + 1):
for combo in combinations(self.containers, i):
capacity = sum(combo)
if capacity == amount:
ways += 1
if optimal_combos is None or len(combo) < min_containers:
optimal_combos = [combo]
min_containers = len(combo)
elif len(combo) == min_containers:
optimal_combos.append(combo)
else:
pass
self.ways = ways
self.optimal_combos = optimal_combos
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("twitter", "0003_auto_20150730_1112"),
]
operations = [
migrations.AlterField(
model_name="user",
name="favorites_count",
field=models.PositiveIntegerField(
default=0,
help_text=b"The number of tweets this user has favorited in the account\xe2\x80\x99s lifetime", # noqa: E501
),
),
migrations.AlterField(
model_name="user",
name="followers_count",
field=models.PositiveIntegerField(
default=0, help_text=b"The number of followers this account has"
),
),
migrations.AlterField(
model_name="user",
name="friends_count",
field=models.PositiveIntegerField(
default=0, help_text=b"Tne number of users this account is following."
),
),
migrations.AlterField(
model_name="user",
name="listed_count",
field=models.PositiveIntegerField(
default=0,
help_text=b"The number of public lists this user is a member of",
),
),
migrations.AlterField(
model_name="user",
name="statuses_count",
field=models.PositiveIntegerField(
default=0,
help_text=b"The number of tweets, including retweets, by this user",
),
),
]
|
from django.contrib.auth.models import AbstractUser
from django.conf import settings
from rest_framework import permissions
from ..utils import raise_context
class IsAuthenticatedOpenApiRequest(permissions.IsAuthenticated):
__slots__ = ()
def is_openapi(self, request):
return (
request.path.startswith(f'/{settings.API_URL}/openapi/') or
request.path.startswith(f'/{settings.API_URL}/endpoint/') or
request.path == f'/{settings.API_URL}/{request.version}/_openapi/'
)
def has_permission(self, request, view):
return self.is_openapi(request) or super().has_permission(request, view)
class SuperUserPermission(IsAuthenticatedOpenApiRequest):
__slots__ = ()
def has_permission(self, request, view):
if request.user.is_staff or request.method in permissions.SAFE_METHODS:
# pylint: disable=bad-super-call
return super(IsAuthenticatedOpenApiRequest, self).has_permission(request, view)
with raise_context():
return issubclass(view.get_queryset().model, AbstractUser)\
and str(view.kwargs['pk']) == str(request.user.pk)
return self.is_openapi(request)
def has_object_permission(self, request, view, obj):
if request.user.is_superuser:
return True
elif isinstance(obj, AbstractUser) and obj == request.user:
return True
return False
class StaffPermission(permissions.IsAdminUser):
__slots__ = ()
|
import gc
import os
import struct
import zlib
from collections import defaultdict
from contextlib import closing
import lmdb
import msgpack
import numpy as np
from pyroaring import BitMap
from tqdm import tqdm
from lz4 import frame
import config as cf
from core import io_worker as iw
def is_byte_obj(obj):
if isinstance(obj, bytes) or isinstance(obj, bytearray):
return True
return False
def set_default(obj):
if isinstance(obj, set):
return sorted(list(obj))
raise TypeError
def deserialize_key(key, integerkey=False, is_64bit=False):
if not integerkey:
return key.decode(cf.ENCODING)
try:
if is_64bit:
return struct.unpack("Q", key)[0]
else:
return struct.unpack("I", key)[0]
except Exception:
iw.print_status(key)
raise Exception
def deserialize_value(value, bytes_value=cf.ToBytesType.OBJ, compress_value=False):
if bytes_value == cf.ToBytesType.INT_NUMPY:
value = np.frombuffer(value, dtype=np.uint32).tolist()
elif bytes_value == cf.ToBytesType.INT_BITMAP:
if not isinstance(value, bytes):
value = bytes(value)
value = BitMap.deserialize(value)
else: # mode == "msgpack"
if compress_value:
try:
value = frame.decompress(value)
except RuntimeError:
pass
value = msgpack.unpackb(value, strict_map_key=False)
return value
def deserialize(
key,
value,
integerkey=False,
is_64bit=False,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
):
key = deserialize_key(key, integerkey, is_64bit)
value = deserialize_value(value, bytes_value, compress_value)
res_obj = (key, value)
return res_obj
def serialize_key(key, integerkey=False, is_64bit=False):
if not integerkey:
if not isinstance(key, str):
key = str(key)
return key.encode(cf.ENCODING)[: cf.LMDB_MAX_KEY]
if is_64bit:
return struct.pack("Q", key)
else:
return struct.pack("I", key)
def serialize_value(
value, bytes_value=cf.ToBytesType.OBJ, compress_value=False, sort_values=True
):
if bytes_value == cf.ToBytesType.INT_NUMPY:
if sort_values:
value = sorted(list(value))
if not isinstance(value, np.ndarray):
value = np.array(value, dtype=np.uint32)
value = value.tobytes()
elif bytes_value == cf.ToBytesType.INT_BITMAP:
value = BitMap(value).serialize()
else: # mode == "msgpack"
value = msgpack.packb(value, default=set_default)
if compress_value:
value = frame.compress(value)
return value
def serialize(
key,
value,
integerkey=False,
is_64bit=False,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
):
key = serialize_key(key, integerkey, is_64bit)
value = serialize_value(value, bytes_value, compress_value)
res_obj = (key, value)
return res_obj
def preprocess_data_before_dump(
data,
integerkey=False,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
sort_key=True,
):
if isinstance(data, dict):
data = list(data.items())
if sort_key and integerkey:
data.sort(key=lambda x: x[0])
first_key = data[0][0]
first_value = data[0][0]
if not is_byte_obj(first_key) and not is_byte_obj(first_value):
data = [
serialize(
k,
v,
integerkey=integerkey,
bytes_value=bytes_value,
compress_value=compress_value,
)
for k, v in data
if k is not None
]
if sort_key and not integerkey:
data.sort(key=lambda x: x[0])
return data
class DBCore:
def __init__(self, db_file, max_db, map_size=cf.LMDB_MAP_SIZE):
self._db_file = db_file
iw.create_dir(self._db_file)
self._max_db = max_db
self._env = lmdb.open(
self._db_file,
map_async=True,
map_size=map_size,
subdir=False,
lock=False,
max_dbs=max_db,
)
self._env.set_mapsize(map_size)
@property
def env(self):
return self._env
def get_map_size(self):
tmp = self._env.info().get("map_size")
if not tmp:
return "Unknown"
return f"{tmp / cf.SIZE_1GB:.0f}GB"
def close(self):
self._env.close()
def copy_lmdb(self):
"""
Copy current env to new one (reduce file size)
:return:
:rtype:
"""
iw.print_status(self._env.stat())
if self._env.stat().get("map_size"):
iw.print_status("%.2fGB" % (self._env.stat()["map_size"] % cf.SIZE_1GB))
new_dir = self._db_file + ".copy"
self._env.copy(path=new_dir, compact=True)
try:
if os.path.exists(self._db_file):
os.remove(self._db_file)
except Exception as message:
iw.print_status(message)
os.rename(new_dir, self._db_file)
def get_iter_integerkey(
self,
db,
from_i=0,
to_i=-1,
get_values=True,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
):
with self._env.begin(db=db, write=False) as txn:
if to_i == -1:
to_i = self.get_db_size(db)
cur = txn.cursor()
cur.set_range(serialize_key(from_i, integerkey=True))
for item in cur.iternext(values=get_values):
if get_values:
key, value = item
else:
key = item
key = deserialize_key(key, integerkey=True)
if key > to_i:
break
if get_values:
value = deserialize_value(
value, bytes_value=bytes_value, compress_value=compress_value,
)
yield key, value
else:
yield key
cur.next()
def get_iter_with_prefix(
self,
db,
prefix,
integerkey=False,
get_values=True,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
):
with self._env.begin(db=db, write=False) as txn:
cur = txn.cursor()
prefix = serialize_key(prefix, integerkey=integerkey)
cur.set_range(prefix)
while cur.key().startswith(prefix) is True:
try:
if cur.key() and not cur.key().startswith(prefix):
continue
key = deserialize_key(cur.key(), integerkey=integerkey)
if get_values:
value = deserialize_value(
cur.value(),
bytes_value=bytes_value,
compress_value=compress_value,
)
yield key, value
else:
yield key
except Exception as message:
iw.print_status(message)
cur.next()
def is_available(self, db, key_obj, integerkey=False):
with self._env.begin(db=db) as txn:
key_obj = serialize_key(key_obj, integerkey=integerkey)
if key_obj:
try:
value_obj = txn.get(key_obj)
if value_obj:
return True
except Exception as message:
iw.print_status(message)
return False
def get_memory_size(self, db, key_obj, integerkey=False, is_64bit=False):
with self._env.begin(db=db, buffers=True) as txn:
key_obj = serialize_key(key_obj, integerkey=integerkey, is_64bit=is_64bit)
responds = None
if key_obj:
try:
value_obj = txn.get(key_obj)
if value_obj:
return len(value_obj)
except Exception as message:
iw.print_status(message)
return responds
def get_value(
self,
db,
key_obj,
integerkey=False,
is_64bit=False,
get_deserialize=True,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
):
with self._env.begin(db=db, buffers=True) as txn:
if isinstance(key_obj, np.ndarray):
key_obj = key_obj.tolist()
if (
isinstance(key_obj, list)
or isinstance(key_obj, set)
or isinstance(key_obj, tuple)
):
key_obj = [serialize_key(k, integerkey=integerkey) for k in key_obj]
responds = dict()
for k, v in txn.cursor(db).getmulti(key_obj):
if v:
if get_deserialize:
try:
k, v = deserialize(
k,
v,
integerkey=integerkey,
is_64bit=is_64bit,
bytes_value=bytes_value,
compress_value=compress_value,
)
responds[k] = v
except Exception as message:
iw.print_status(message)
else:
k = deserialize_key(
k, integerkey=integerkey, is_64bit=is_64bit
)
responds[k] = v
else:
key_obj = serialize_key(
key_obj, integerkey=integerkey, is_64bit=is_64bit
)
responds = None
if key_obj:
try:
value_obj = txn.get(key_obj)
if value_obj:
if get_deserialize:
responds = deserialize_value(
value_obj,
bytes_value=bytes_value,
compress_value=compress_value,
)
else:
responds = value_obj
except Exception as message:
iw.print_status(message)
return responds
def head(
self,
db,
n,
bytes_value=cf.ToBytesType.OBJ,
from_i=0,
integerkey=False,
compress_value=False,
):
respond = defaultdict()
for i, (k, v) in enumerate(
self.get_db_iter(
db,
bytes_value=bytes_value,
from_i=from_i,
integerkey=integerkey,
compress_value=compress_value,
)
):
respond[k] = v
if i == n - 1:
break
return respond
def get_db_iter(
self,
db,
get_values=True,
deserialize_obj=True,
from_i=0,
to_i=-1,
integerkey=False,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
):
if to_i == -1:
to_i = self.get_db_size(db)
with self._env.begin(db=db) as txn:
cur = txn.cursor()
for i, db_obj in enumerate(cur.iternext(values=get_values)):
if i < from_i:
continue
if i >= to_i:
break
if get_values:
key, value = db_obj
else:
key = db_obj
try:
if deserialize_obj:
key = deserialize_key(key, integerkey=integerkey)
if get_values:
value = deserialize_value(
value,
bytes_value=bytes_value,
compress_value=compress_value,
)
if get_values:
return_obj = (key, value)
yield return_obj
else:
yield key
# Todo: handlers
except UnicodeDecodeError:
iw.print_status(f"UnicodeDecodeError: {i}")
except Exception:
iw.print_status(i)
raise Exception
def get_db_size(self, db):
with self._env.begin(db=db) as txn:
return txn.stat()["entries"]
def delete(self, db, key, integerkey=False, with_prefix=False):
if not (
isinstance(key, list) or isinstance(key, set) or isinstance(key, tuple)
):
key = [key]
if with_prefix:
true_key = set()
for k in key:
for tmp_k in self.get_iter_with_prefix(
db, k, integerkey=integerkey, get_values=False
):
true_key.add(tmp_k)
if true_key:
key = list(true_key)
deleted_items = 0
with self.env.begin(db=db, write=True, buffers=True) as txn:
for k in key:
try:
status = txn.delete(serialize_key(k, integerkey))
if status:
deleted_items += 1
except Exception as message:
iw.print_status(message)
return deleted_items
@staticmethod
def write_bulk(
env,
db,
data,
sort_key=True,
integerkey=False,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
one_sample_write=False,
):
data = preprocess_data_before_dump(
data,
bytes_value=bytes_value,
integerkey=integerkey,
compress_value=compress_value,
sort_key=sort_key,
)
added_items = 0
try:
with env.begin(db=db, write=True, buffers=True) as txn:
if not one_sample_write:
_, added_items = txn.cursor().putmulti(data)
else:
for k, v in data:
txn.put(k, v)
added_items += 1
except lmdb.MapFullError:
curr_limit = env.info()["map_size"]
new_limit = curr_limit + cf.SIZE_1GB * 5
env.set_mapsize(new_limit)
return DBCore.write_bulk(env, db, data, sort_key=False)
except lmdb.BadValsizeError:
iw.print_status(lmdb.BadValsizeError)
except lmdb.BadTxnError:
if one_sample_write:
return DBCore.write_bulk(
env, db, data, sort_key=False, one_sample_write=True,
)
except Exception:
raise Exception
return added_items
@staticmethod
def write_bulk_with_buffer(
env,
db,
data,
sort_key=True,
integerkey=False,
bytes_value=cf.ToBytesType.OBJ,
compress_value=False,
show_progress=True,
step=10000,
message="DB Write",
):
data = preprocess_data_before_dump(
data,
bytes_value=bytes_value,
integerkey=integerkey,
compress_value=compress_value,
sort_key=sort_key,
)
def update_desc():
return f"{message} buffer: {buff_size / cf.LMDB_BUFF_BYTES_SIZE * 100:.0f}%"
p_bar = None
buff_size = 0
i_pre = 0
if show_progress:
p_bar = tqdm(total=len(data))
for i, (k, v) in enumerate(data):
if show_progress and i and i % step == 0:
p_bar.update(step)
p_bar.set_description(desc=update_desc())
buff_size += len(k) + len(v)
if buff_size >= cf.LMDB_BUFF_BYTES_SIZE:
c = DBCore.write_bulk(env, db, data[i_pre:i], sort_key=False)
if c != len(data[i_pre:i]):
iw.print_status(
f"WriteError: Missing data. Expected: {len(data[i_pre:i])} - Actual: {c}"
)
i_pre = i
buff_size = 0
if buff_size:
DBCore.write_bulk(env, db, data[i_pre:], sort_key=False)
if show_progress:
p_bar.update(len(data) % step)
p_bar.set_description(desc=update_desc())
p_bar.close()
def update_bulk_with_buffer(
self,
env,
db,
data,
update_type=cf.DBUpdateType.SET,
integerkey=False,
bytes_value=cf.ToBytesType.INT_NUMPY,
compress_value=False,
show_progress=True,
step=10000,
message="",
buff_limit=cf.LMDB_BUFF_BYTES_SIZE,
):
buff = []
p_bar = None
c_skip, c_update, c_new, c_buff = 0, 0, 0, 0
def update_desc():
return (
f"{message}"
f"|Skip:{c_skip:,}"
f"|New:{c_new:,}"
f"|Update:{c_update:,}"
f"|Buff:{c_buff / buff_limit * 100:.0f}%"
)
if show_progress:
p_bar = tqdm(total=len(data), desc=update_desc())
for i, (k, v) in enumerate(data.items()):
if show_progress and i and i % step == 0:
p_bar.update(step)
p_bar.set_description(update_desc())
db_obj = self.get_value(
db,
k,
integerkey=integerkey,
bytes_value=bytes_value,
compress_value=compress_value,
)
if update_type == cf.DBUpdateType.SET:
if db_obj:
db_obj = set(db_obj)
v = set(v)
if db_obj and len(v) <= len(db_obj) and db_obj.issuperset(v):
c_skip += 1
continue
if db_obj:
v.update(db_obj)
c_update += 1
else:
c_new += 1
else:
c_new += 1
else:
if db_obj:
v += db_obj
c_update += 1
else:
c_new += 1
k, v = serialize(
k,
v,
integerkey=integerkey,
bytes_value=bytes_value,
compress_value=compress_value,
)
c_buff += len(k) + len(v)
buff.append((k, v))
if c_buff >= buff_limit:
DBCore.write_bulk(env, db, buff)
buff = []
c_buff = 0
if buff:
DBCore.write_bulk(env, db, buff)
if show_progress:
p_bar.set_description(desc=update_desc())
p_bar.close()
def modify_db_compress_value(
self,
c_db,
c_integerkey=False,
c_bytes_value=cf.ToBytesType.OBJ,
c_compress_value=False,
n_integerkey=False,
n_bytes_value=cf.ToBytesType.OBJ,
n_compress_value=False,
step=1000,
):
buff = []
buff_size = 0
def update_desc():
return f"buff:{buff_size / cf.LMDB_BUFF_BYTES_SIZE * 100:.0f}%"
p_bar = tqdm(total=self.get_db_size(c_db))
for i, (k, v) in enumerate(
self.get_db_iter(
c_db,
integerkey=c_integerkey,
bytes_value=c_bytes_value,
compress_value=c_compress_value,
)
):
k, v = serialize(
k,
v,
integerkey=n_integerkey,
bytes_value=n_bytes_value,
compress_value=n_compress_value,
)
buff_size += len(k) + len(v)
buff.append((k, v))
if buff_size >= cf.LMDB_BUFF_BYTES_SIZE:
self.write_bulk(self.env, c_db, buff)
buff = []
buff_size = 0
if i and i % step == 0:
p_bar.update(step)
p_bar.set_description(desc=update_desc())
if buff:
self.write_bulk(self.env, c_db, buff)
def drop_db(self, db):
with self._env.begin(write=True) as in_txn:
in_txn.drop(db)
print(in_txn.stat())
def copy_new_file(
self, db_names, map_size, buff_size=cf.SIZE_512MB, compress=True, message=False,
):
new_dir = self._db_file + ".copy"
print(self._env.info())
iw.print_status("%.2fGB" % (self._env.info()["map_size"] / cf.SIZE_1GB))
save_drive = 0
with closing(
lmdb.open(
new_dir,
subdir=False,
map_async=True,
lock=False,
map_size=map_size,
max_dbs=len(db_names),
)
) as env:
print(env.info())
for db_name_src, copy_args in db_names.items():
db_name_tar = copy_args["name"]
org_db = self._env.open_db(db_name_src)
is_integerkey = False
if copy_args.get("integerkey"):
is_integerkey = copy_args["integerkey"]
tar_db = env.open_db(db_name_tar, integerkey=is_integerkey)
org_db_n = self.get_db_size(org_db)
iw.print_status(
f"\nCopy: {self._db_file} - {str(db_name_src)} --> {str(db_name_tar)}"
)
def update_desc():
if compress:
return f"Save: {save_drive / cf.SIZE_1GB:.2f}GB|buff:{len_buff/cf.SIZE_1MB}MB"
else:
return f"buff:{len_buff/cf.SIZE_1MB:.2f}MB"
with self._env.begin(db=org_db) as txn:
cur = txn.cursor()
buff = []
len_buff = 0
if message:
p_bar = tqdm(desc=update_desc(), total=org_db_n)
for i, (key, value) in enumerate(iter(cur)):
if message:
p_bar.update()
if message and i and i % 100000 == 0:
p_bar.set_description(desc=update_desc())
if compress:
old_size = len(value)
value = zlib.compress(value)
save_drive += old_size - len(value)
buff.append((key, value))
len_buff += len(value) + len(key)
if len_buff > buff_size:
if message:
p_bar.set_description(desc=update_desc())
DBCore.write_bulk(env, tar_db, buff)
buff.clear()
len_buff = 0
gc.collect()
if buff:
if message:
p_bar.set_description(desc=update_desc())
DBCore.write_bulk(env, tar_db, buff)
buff.clear()
gc.collect()
if message:
p_bar.close()
iw.print_status(env.info())
iw.print_status("%.2fGB" % (env.info()["map_size"] / cf.SIZE_1GB))
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def flatten(self, root: TreeNode) -> None:
if not root:
return
self.flatten(root.left)
self.flatten(root.right)
if root.left:
pre = root.left
while pre.right:
pre = pre.right
pre.right = root.right
root.right = root.left
root.left = None
t1 = TreeNode(1)
t1.left = TreeNode(2)
t1.left.left = TreeNode(3)
t1.right = TreeNode(5)
t1.left.right = TreeNode(4)
slu = Solution()
slu.flatten(t1)
|
from rc.test.util import Timer
from rc import gcloud
from rc.util import go, pmap, as_completed
def create_or_get(*, name, **kwargs):
return gcloud.get(name) or gcloud.create(name=name, **kwargs)
def test_create_delete_5_instance():
futures = []
machines = []
with Timer('create 5 instances'):
for i in range(5):
futures.append(go(create_or_get, name="test-rc-node-"+str(i), machine_type="n1-standard-1", disk_size="20G", image_project='ubuntu-os-cloud', image_family='ubuntu-1804-lts',
zone='us-west2-a', preemptible=False, firewall_allows=['tcp:8080']))
for f in as_completed(futures):
m = f.result()
print("Created machine:", m.name)
machines.append(m)
futures = {}
def delete_print(m):
m.delete()
print("Deleted machine:", m.name)
with Timer('delete 5 instances'):
pmap(delete_print, machines)
for m in machines:
assert gcloud.get(m.name) is None
|
import zmq
import time
import subprocess
context = zmq.Context()
socket = context.socket(zmq.REP)
socket.bind('tcp://localhost:5555')
while True:
message = socket.recv()
action, ip = str(message).split(': ')
# print('Received action {} from ip {}'.format(action, ip))
# action = action.decode('utf-8')
if action not in ['ban', 'unban']:
socket.send(b'Wrong action')
else:
if action == 'ban':
command = 'ls -l'
if action == 'unban':
command = 'ls -l'
subprocess.run(command.split(' '))
# Send reply back to client
socket.send(b'Done')
|
import ast
import heisenberg.library.orbit_plot
import heisenberg.util
import numpy as np
import optparse
import sys
class OptionParser:
def __init__ (self, *, module):
help_prolog = heisenberg.util.wrap_and_indent(text=module.subprogram_description, indent_count=1)
self.__op = optparse.OptionParser(usage='%prog [options]\n\n{0}'.format(help_prolog))
self.__op.prog = module.__package__
self.__op.add_option(
'--dt',
dest='dt',
default='0.001',
type='float',
help='Specifies the timestep for the curve integration.'
)
self.__op.add_option(
'--max-time',
dest='max_time',
default='50.0',
type='float',
help='Specifies the max time to integrate the curve to.'
)
self.__op.add_option(
'--embedding-dimension',
dest='embedding_dimension',
default=1,
type='int',
help='Specifies the dimension of the embedding to use; the embedding solves for p_z in terms of the specified coordinates, with zero or more coordinates held constant depending on the dimension. Valid choices for this option are {0}. Default value is 1. See also --embedding-solution-sheet-index. If value is 1, then embedding is [p_y] |-> [[1,0,0],[0,p_y,p_z]]. If value is 2, then embedding is [p_x,p_y] |-> [[1,0,0],[p_x,p_y,p_z]]. If value is 3, then embedding is [x,p_x,p_y] |-> [[x,0,0],[p_x,p_y,p_z]]. If value is 5, then embedding is [x,y,z,p_x,p_y] |-> [[x,y,z],[p_x,p_y,p_z]].'.format(', '.join(str(d) for d in heisenberg.library.heisenberg_dynamics_context.Symbolic.valid_embedding_dimensions()))
)
self.__op.add_option(
'--embedding-solution-sheet-index',
dest='embedding_solution_sheet_index',
default=0,
type='int',
help='Specifies which sheet of the solution for the embedding to use. There are two sheets, 0 and 1. Default value is 0. See also --embedding-dimension.'
)
self.__op.add_option(
'--seed',
dest='seed',
default=666,
type='int',
help='Specifies the seed to use for pseudorandom number generation. Using the same seed should produce the same sequence of random numbers, and therefore provide reproducible program execution.'
)
self.__op.add_option(
'--optimization-annulus-bounds',
dest='optimization_annulus_bounds',
type='string',
default='[1.0e-12,1.0e-1]',
help='Specifies the interval over which to randomly draw radii (uniform on log(r)) for the optimization procedure. Should have the form [low,high], where low and high are floating point literals and low <= high. If it is desired for the optimization to not leave the local minimum\'s neighborhood, than a suitably small upper bound must be chosen. Default is [1.0e-12,1.0e-1].'
)
self.__op.add_option(
'--cut-off-initial-curve-tail',
dest='cut_off_initial_curve_tail',
action='store_true',
default=False,
help='Specifies that the initial curve data should be plotted trimmed exactly to the approximate period t_min (i.e. cut off the curve\'s "tail"). This presents a cleaner looking approximately periodic curve without any overlap. The default is to not cut off the initial curve tail.'
)
self.__op.add_option(
'--dont-cut-off-optimized-curve-tail',
dest='cut_off_optimized_curve_tail',
action='store_false',
default=True,
help='Specifies that the optimized curve data should extend past the the approximate period t_min (i.e. don\'t cut off the curve\'s "tail"). This presents a somewhat messier looking approximately periodic curve because of the overlap, except that it carries more information. The default is to cut off the optimized curve tail.'
)
self.__op.add_option(
'--quantities-to-plot',
dest='quantities_to_plot',
type='str',
default=heisenberg.library.orbit_plot.default_quantities_to_plot,
help='Specifies which quantities to include in the plot. Should be a semicolon-separated string, without spaces, with tokens selected from the following options: {0}. Note that because the value for this option may contain semicolons, which are a special character for some shells, you should quote the value, e.g. --quantities-to-plot="x,y;t,z;sqd". Default value is {1}'.format(';'.join(heisenberg.library.orbit_plot.valid_quantity_to_plot_v), heisenberg.library.orbit_plot.default_quantities_to_plot)
)
self.__op.add_option(
'--disable-plot-decoration',
dest='disable_plot_decoration',
action='store_true',
default=False,
help='Disables plotting certain non-essential labels and decoration. Default behavior is to plot those things.'
)
self.__op.add_option(
'--use-terse-plot-titles',
dest='use_terse_plot_titles',
action='store_true',
default=False,
help='Shortens the titles on plots; best used with --plot-size=3 to make the plot labels appear big relative to the plots themselves.'
)
self.__op.add_option(
'--plot-size',
dest='plot_size',
default=10,
type='int',
help='Specifies the size of the plot, where the plot labels are assumed to have a fixed size. Thus a smaller value means the labels will appear bigger. The default value is 10.'
)
supported_plot_type_d = heisenberg.util.get_supported_plot_type_d()
ext_v = sorted(list(supported_plot_type_d.keys()))
self.__op.epilog = 'Available plot-types are:\n\n{0}'.format('\n'.join(' {0:4} : {1}'.format(ext, supported_plot_type_d[ext]) for ext in ext_v))
help_string = 'Specifies the file type to use for plotting. Filetypes depend on the particular backend in use by matplotlib.pyplot. Available plot types are: {0}'.format(', '.join(ext_v))
default_plot_type = 'png'
assert default_plot_type in ext_v, '"png" not supported by the matplotlib.pyplot backend'
self.__op.add_option(
'--plot-type',
dest='plot_type',
choices=ext_v,
default=default_plot_type,
help=help_string
)
def parse_argv_and_validate (self):
options,args = self.__op.parse_args()
if options.dt is None:
print('required option --dt was not specified.')
self.__op.print_help()
return None,None
if options.max_time is None:
print('required option --max-time was not specified.')
self.__op.print_help()
return None,None
if options.embedding_dimension not in heisenberg.library.heisenberg_dynamics_context.Symbolic.valid_embedding_dimensions():
print('specified invalid value for --embedding-dimension.')
self.__op.print_help()
return None,None
if options.embedding_solution_sheet_index not in [0,1]:
print('specified invalid value for --embedding-solution-sheet-index.')
self.__op.print_help()
return None,None
assert options.quantities_to_plot is not None
print('options.quantities_to_plot =', options.quantities_to_plot)
quantity_to_plot_v = options.quantities_to_plot.split(';')
if not frozenset(quantity_to_plot_v).issubset(frozenset(heisenberg.library.orbit_plot.valid_quantity_to_plot_v)):
print('specified invalid elements in --quantities-to-plot: {0}'.format(','.join(frozenset(quantity_to_plot_v).difference(frozenset(heisenberg.library.orbit_plot.valid_quantity_to_plot_v)))))
self.__op.print_help()
return None,None
options.quantity_to_plot_v = quantity_to_plot_v
# Parse options.optimization_annulus_bounds
try:
options.optimization_annulus_bound_v = ast.literal_eval(options.optimization_annulus_bounds)
assert type(options.optimization_annulus_bound_v) == list, 'expected bracketed pair of floating point literals'
assert len(options.optimization_annulus_bound_v) == 2, 'expected pair of floating point literals (but got {0} of them)'.format(len(options.optimization_annulus_bound_v))
options.optimization_annulus_bound_v = np.array(options.optimization_annulus_bound_v)
assert options.optimization_annulus_bound_v[0] <= options.optimization_annulus_bound_v[1], 'expected low <= high (but low = {0} and high = {1})'.format(options.optimization_annulus_bound_v[0], options.optimization_annulus_bound_v[1])
except Exception as e:
print('error {0} parsing --optimization-annulus-bounds value'.format(e))
self.__op.print_help()
return None,None
# Retrieve and add the git commit (if available)
options.heisenberg_git_commit = heisenberg.util.get_git_commit()
# Add the commandline used in this OptionParser
options.argv = sys.argv
return options,args
def add_option (self, *args, **kwargs):
return self.__op.add_option(*args, **kwargs)
def print_help (self):
self.__op.print_help()
|
import sys
def read_input_definitions():
input_names = {}
with open('chipster-inputs.tsv') as inputs:
for line in inputs:
# remove \n
line = line[:-1]
if line.startswith('#'):
continue
columns = line.split('\t')
input_name = columns[0]
dataset_name = columns[1]
input_names[input_name] = dataset_name
return input_names
def write_output_definitions(output_names):
with open('chipster-outputs.tsv', 'w') as outputs:
for output_name in output_names:
dataset_name = output_names[output_name]
outputs.write(output_name + '\t' + dataset_name + '\n')
def remove_postfix(string, postfix):
if string.endswith(postfix):
return string[:-len(postfix)]
return string
# Prints out version information.
#
def document_version(application, version_string):
print('## VERSION: ' + application)
for line in version_string.splitlines():
print('## ' + line)
def document_python_version():
python_version = str(sys.version_info.major) + '.' + str(sys.version_info.minor) + '.' + str(sys.version_info.micro)
document_version("python", python_version) |
#!/usr/bin/env python
import elasticapm
from assemblyline_core.server_base import ServerBase
from assemblyline.common import forge
from assemblyline.common.metrics import MetricsFactory
from assemblyline.remote.datatypes import get_client
from assemblyline.remote.datatypes.queues.named import NamedQueue
from assemblyline.odm.messages.alerter_heartbeat import Metrics
ALERT_QUEUE_NAME = 'm-alert'
MAX_RETRIES = 10
class Alerter(ServerBase):
def __init__(self):
super().__init__('assemblyline.alerter')
# Publish counters to the metrics sink.
self.counter = MetricsFactory('alerter', Metrics)
self.datastore = forge.get_datastore(self.config)
self.persistent_redis = get_client(
host=self.config.core.redis.persistent.host,
port=self.config.core.redis.persistent.port,
private=False,
)
self.process_alert_message = forge.get_process_alert_message()
self.running = False
self.alert_queue = NamedQueue(ALERT_QUEUE_NAME, self.persistent_redis)
if self.config.core.metrics.apm_server.server_url is not None:
self.log.info(f"Exporting application metrics to: {self.config.core.metrics.apm_server.server_url}")
elasticapm.instrument()
self.apm_client = elasticapm.Client(server_url=self.config.core.metrics.apm_server.server_url,
service_name="alerter")
else:
self.apm_client = None
def close(self):
if self.counter:
self.counter.stop()
if self.apm_client:
elasticapm.uninstrument()
def run_once(self):
alert = self.alert_queue.pop(timeout=1)
if not alert:
return
# Start of process alert transaction
if self.apm_client:
self.apm_client.begin_transaction('Process alert message')
self.counter.increment('received')
try:
alert_type = self.process_alert_message(self.counter, self.datastore, self.log, alert)
# End of process alert transaction (success)
if self.apm_client:
self.apm_client.end_transaction(alert_type, 'success')
return alert_type
except Exception as ex: # pylint: disable=W0703
retries = alert['alert_retries'] = alert.get('alert_retries', 0) + 1
if retries > MAX_RETRIES:
self.log.exception(f'Max retries exceeded for: {alert}')
else:
self.alert_queue.push(alert)
if 'Submission not finalized' not in str(ex):
self.log.exception(f'Unhandled exception processing: {alert}')
# End of process alert transaction (failure)
if self.apm_client:
self.apm_client.end_transaction('unknown', 'exception')
def try_run(self):
while self.running:
self.heartbeat()
self.run_once()
if __name__ == "__main__":
with Alerter() as alerter:
alerter.serve_forever()
|
from .users import User
from .profiles import Profile
from .phone_codes import PhoneCode
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.template import loader
from django.db.models import Sum
import django
from .models import TypyBiletow,Transakcje,NosnikiKartonikowe,Nieimienne, MiejscaTransakcji, TypyUlgi, MetodyPlatnosci, NosnikiElektroniczne, Imienne,Ulgi, TypyNosnikow, Pasazerowie
import datetime
import json
import random
from ztm_app.forms import ConcessionForm, CardTypeForm, DeleteCardTypeForm, DeleteConcessionForm, UpdateConcessionForm, UpdateCardTypeForm
def index(request):
places = MiejscaTransakcji.objects.all()
context = {
'places' : places
}
return render(request, template_name = "landingPage/selectType.html", context= context)
def selectCard(request):
context = {
'name': "Doladowanie karty",
}
return render(request, template_name = "landingPage/cardTicket.html",context=context)
def selectTicket(request):
context = {
'name': "Aktywuj bilet",
}
return render(request, template_name = "landingPage/activationTicket.html",context=context)
def zonesCarton(request):
zones = TypyBiletow.objects.order_by().values('strefa').distinct()
context = {
'name': "Bilet kartonikowy",
'zones': list(zones),
}
return render(request, template_name = "landingPage/selectZone.html", context= context)
def timeCarton(request, zone):
time = TypyBiletow.objects.filter(czas_waznosci__lt=datetime.timedelta(days=4), strefa = zone)
context = {
'name': "Bilet kartonikowy",
'time': list(time),
'next': "{% url 'ztm_app:reductionCarton' %}"
}
return render(request, template_name = "landingPage/selectTime.html", context= context)
def selectTicketCarton(request):
time = TypyBiletow.objects.filter(czas_waznosci__lt=datetime.timedelta(days=4))
context = {
'name': "Bilet kartonikowy",
'time': list(time),
'next': "{% url 'ztm_app:reductionCarton' %}"
}
return render(request, template_name = "landingPage/selectTime.html", context= context)
def reductionCarton(request):
reduction = TypyUlgi.objects.all()
context = {
'name': "Bilet kartonikowy",
'reduction': reduction
}
return render(request, template_name = "landingPage/selectReduction.html", context = context)
def confirmCarton(request):
payment = MetodyPlatnosci.objects.all()
context = {
'payment': payment
}
return render(request, template_name = "landingPage/selectPayment.html", context = context)
def confirmCard(request):
payment = MetodyPlatnosci.objects.all()
context = {
'payment': payment
}
return render(request, template_name = "landingPage/selectPaymentCard.html", context = context)
def findTicket(request):
id_b = request.POST.get('id_b')
print(id_b)
user_ticket = list(Imienne.objects.filter(id_biletu = id_b))
if len(user_ticket) == 0:
user_ticket = list(Nieimienne.objects.filter(id_biletu = id_b))
if len(user_ticket) == 0:
return render(request, "landingPage/invalidTicketId.html")
if user_ticket[0].data_aktywacji is not None:
return render(request, "landingPage/ticketActivated.html")
user_type = TypyBiletow.objects.get(id_typu_biletu = user_ticket[0].id_typu)
contex = {
'name' : "Aktywuj bilet",
'ticket' : user_ticket[0],
'type' : user_type,
}
return render(request, template_name = "landingPage/activate.html", context = contex)
def selectZoneTicket(request):
id_t = request.POST.get('id_t')
print(id_t)
try:
user_card = NosnikiElektroniczne.objects.get(id_nosnika=id_t)
except:
return render(request, template_name = "landingPage/invalidId.html")
user_ticket = list(Imienne.objects.filter(id_nosnika = id_t).order_by('-data_waznosci'))
user_ulga = Ulgi.objects.get(id_ulgi = user_card.id_ulgi)
if user_ticket[0].data_waznosci is None or user_ticket[0].data_waznosci.date() > datetime.date.today(): # DO ZMIANY
contex = {
'name': "Doladowanie karty",
'cardId' : user_card,
'ticket' : user_ticket[0],
'ulga' : user_ulga,
}
return render(request, "landingPage/ticketExist.html", context = contex)
if user_ulga.data_waznosci < datetime.date.today():
contex = {
'name': "Doladowanie karty",
'cardId' : user_card,
'ticket' : user_ticket[0],
'ulga' : user_ulga,
}
return render(request, "landingPage/reductionInvalid.html", context = contex)
time = TypyBiletow.objects.filter(czas_waznosci__gte=datetime.timedelta(days=4)).order_by('-czas_waznosci')
contex = {
'name': "Doladowanie karty",
'cardId' : user_card,
'ticket' : user_ticket[0],
'ulga' : user_ulga,
'time': list(time),
}
return render(request, template_name = "landingPage/selectCardZone.html", context = contex)
def transactionCarton(request):
if request.method == 'POST':
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
print(int(body['place']))
place = MiejscaTransakcji.objects.get(id_miejsca_transakcji = int(body['place']))
payment = MetodyPlatnosci.objects.get(id_metody_platnosci = body['payment'])
transaction = Transakcje.objects.create(id_miejsca_transakcji=place, id_metody_platnosci = payment)
for item in body['items']:
reduction = TypyUlgi.objects.get(id_typu_ulgi = item['reduction'])
typeTicket = TypyBiletow.objects.get(id_typu_biletu= item['type'])
for i in range(1, int(item['amount'])+1):
ticket = NosnikiKartonikowe.objects.create(kod = random.randint(1,1000000))
ticketCarton = Nieimienne.objects.create(id_transakcji= transaction.id_transakcji, id_nosnika=ticket,id_typu=typeTicket.id_typu_biletu,id_typu_ulgi= reduction)
#print('transakcja', transaction)
# context = {
# 'transaction': transaction,
# }
print(place, payment, body)
elif request.method == 'PUT':
pass
elif request.method == 'GET':
pass
elif request.method == 'DELETE':
pass
return render(request, template_name='landingPage/thankYou.html')
def transactionCard(request):
if request.method == 'POST':
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
print(int(body['place']))
place = MiejscaTransakcji.objects.get(id_miejsca_transakcji = int(body['place']))
payment = MetodyPlatnosci.objects.get(id_metody_platnosci = body['payment'])
transaction = Transakcje.objects.create(id_miejsca_transakcji=place, id_metody_platnosci = payment)
for item in body['items']:
reduction = Ulgi.objects.get(id_ulgi = item['reduction'])
typeTicket = TypyBiletow.objects.get(id_typu_biletu= item['type'])
client = Pasazerowie.objects.get(id_pasazera = item['client'])
card = NosnikiElektroniczne.objects.get(id_nosnika = item['card'])
ticket = Imienne.objects.create(id_transakcji= transaction.id_transakcji, id_nosnika=card.id_nosnika, id_pasazera = card,
id_typu=typeTicket.id_typu_biletu,id_ulgi= reduction.id_ulgi)
#print('transakcja', transaction)
# context = {
# 'transaction': transaction,
# }
print(place, payment, body)
pass
elif request.method == 'PUT':
pass
elif request.method == 'GET':
pass
elif request.method == 'DELETE':
pass
return render(request, template_name='landingPage/index.html')
def transactionActivation(request):
if request.method == 'POST':
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
user_ticket = list(Imienne.objects.filter(id_biletu = body['ticket']))
if len(user_ticket) == 0 :
user_ticket = list(Nieimienne.objects.filter(id_biletu = body['ticket']))
user_ticket[0].data_aktywacji = django.utils.timezone.now()
print(user_ticket[0].data_aktywacji)
print(datetime.date.today())
user_ticket[0].save()
print(body)
pass
elif request.method == 'PUT':
pass
elif request.method == 'GET':
pass
elif request.method == 'DELETE':
pass
return render(request, template_name='landingPage/index.html')
def addTicket(request):
if request.method == 'POST':
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
return null
def amount(request):
return render(request, template_name ="landingPage/selectAmount.html")
def continueCarton(request):
return render(request, template_name="landingPage/continueCarton.html")
def thankYou(request):
return render(request, template_name="landingPage/thankYou.html")
def end(request):
return render(request, template_name="landingPage/end.html")
def reportPage(request):
return render(request, template_name="reportPage/reportPage.html")
def editPage(request):
return render(request, template_name="reportPage/editPage.html")
def statsPage(request):
return render(request, template_name="reportPage/statsPage.html")
def addConcession(request):
if request.method == 'POST':
form = ConcessionForm(request.POST)
if form.is_valid():
TypyUlgi.objects.create(kod_podstawowy=form.cleaned_data['code'], wielkosc_ulgi=form.cleaned_data['discount'], nazwa=form.cleaned_data['name'])
return render(request, template_name="reportPage/addingSuccess.html", context={'name': 'Typ Ulgi'})
else:
form = ConcessionForm()
return render(request, template_name="reportPage/addConcession.html", context={'form': form})
def addCardType(request):
if request.method == 'POST':
form = CardTypeForm(request.POST)
if form.is_valid():
TypyNosnikow.objects.create(nazwa=form.cleaned_data['name'])
return render(request, template_name="reportPage/addingSuccess.html", context={'name': 'Typ Nośnika'})
else:
form = CardTypeForm()
return render(request, template_name="reportPage/addCardType.html", context={'form': form})
def deleteConcession(request):
if request.method == 'POST':
form = DeleteConcessionForm(request.POST)
if form.is_valid():
id = form.cleaned_data['id']
TypyUlgi.objects.filter(id_typu_ulgi=id).delete()
return render(request, template_name="reportPage/deletingSuccess.html", context={'name': 'Typ Ulgi'})
else:
types = TypyUlgi.objects.all().values()
form = DeleteConcessionForm()
return render(request, template_name="reportPage/deleteConcession.html", context={'form': form, 'types': types})
def deleteCardType(request):
if request.method == 'POST':
form = DeleteCardTypeForm(request.POST)
if form.is_valid():
id = form.cleaned_data['id']
TypyNosnikow.objects.filter(id_typu_nosnika=id).delete()
return render(request, template_name="reportPage/deletingSuccess.html", context={'name': 'Typ Nośnika'})
else:
types = TypyNosnikow.objects.all().values()
form = DeleteCardTypeForm()
return render(request, template_name="reportPage/deleteCardType.html", context={'form': form, 'types': types})
def updateConcession(request):
if request.method == 'POST':
form = UpdateConcessionForm(request.POST)
if form.is_valid():
concession = TypyUlgi.objects.get(id_typu_ulgi=form.cleaned_data['id'])
if form.cleaned_data['code'] is not None:
concession.kod_podstawowy = form.cleaned_data['code']
if form.cleaned_data['discount'] is not None:
concession.wielkosc_ulgi = form.cleaned_data['discount']
if form.cleaned_data['name']:
concession.nazwa = form.cleaned_data['name']
concession.save()
return render(request, template_name="reportPage/updateSuccess.html", context={'name': 'Typ Ulgi'})
else:
types = TypyUlgi.objects.all().order_by('id_typu_ulgi').values()
form = UpdateConcessionForm()
return render(request, template_name="reportPage/updateConcession.html", context={'form': form, 'types': types})
def updateCardType(request):
if request.method == 'POST':
form = UpdateCardTypeForm(request.POST)
if form.is_valid():
card_type = TypyNosnikow.objects.get(id_typu_nosnika=form.cleaned_data['id'])
if form.cleaned_data['name']:
card_type.nazwa = form.cleaned_data['name']
card_type.save()
return render(request, template_name="reportPage/updateSuccess.html", context={'name': 'Typ Nośnika'})
else:
types = TypyNosnikow.objects.all().order_by('id_typu_nosnika').values()
form = UpdateCardTypeForm()
return render(request, template_name="reportPage/updateCardType.html", context={'form': form, 'types': types})
def statsPage(request):
statistics = ['brak', 'Bilety', 'MiejscaTransakcji', 'Transakcje'];
return render(request, template_name = "reportPage/statsPage.html", context =
{'statistics': statistics})
def ticketStats(request):
numberOfElectronicTickets = Imienne.objects.filter().count()
numberOfPaperTickets = Nieimienne.objects.filter().count()
tickets = TypyBiletow.objects.all()
tmp_k=0;
liczba_biletow_k=0;
for ticket in tickets:
tmp_k = Nieimienne.objects.filter(id_typu = ticket.id_typu_biletu).count()
if tmp_k > liczba_biletow_k:
liczba_biletow_k = tmp_k
mostPopularPaperTicketId = ticket.id_typu_biletu
e_procent = (numberOfElectronicTickets/(numberOfElectronicTickets+numberOfPaperTickets))*100
e_procent = round(e_procent, 2)
p_procent = (numberOfPaperTickets/(numberOfElectronicTickets+numberOfPaperTickets))*100
p_procent = round(p_procent, 2)
mostPopularPaperTicketProcent = (liczba_biletow_k/numberOfPaperTickets)*100
mostPopularPaperTicketProcent = round(mostPopularPaperTicketProcent, 2)
mostPopularPaperTicket = TypyBiletow.objects.filter(id_typu_biletu = mostPopularPaperTicketId)[0].czas_waznosci
tmp_e=0;
liczba_biletow_e=0;
for ticket in tickets:
tmp_e = Imienne.objects.filter(id_typu = ticket.id_typu_biletu).count()
if tmp_e > liczba_biletow_e:
liczba_biletow_e = tmp_e
mostPopularElectronicTicketId = ticket.id_typu_biletu
mostPopularElectronicTicketProcent = (liczba_biletow_e/numberOfElectronicTickets)*100
mostPopularElectronicTicketProcent = round(mostPopularElectronicTicketProcent, 2)
mostPopularElectronicTicket = TypyBiletow.objects.filter(id_typu_biletu = mostPopularElectronicTicketId)[0].czas_waznosci
conncessions = TypyUlgi.objects.all()
tmp_u_n=0
tmp_u_i=0
tmp_u=0
liczba_biletow_u=0;
for concession in conncessions:
if concession.id_typu_ulgi==7:
break;
tmp_u_n = Nieimienne.objects.filter(id_typu_ulgi = concession.id_typu_ulgi).count()
tmp_u_i = Ulgi.objects.filter(id_typu_ulgi = concession.id_typu_ulgi).count()
tmp_u = tmp_u_i+tmp_u_n
if tmp_u > liczba_biletow_u:
liczba_biletow_u = tmp_u
mostPopularConcessionId = concession.id_typu_ulgi
mostPopularConcession = TypyUlgi.objects.filter(id_typu_ulgi = mostPopularConcessionId)[0].nazwa
u_procent = (liczba_biletow_u/(numberOfElectronicTickets+numberOfPaperTickets))*100
u_procent = round(u_procent, 2)
return render(request, template_name = "reportPage/ticketStats.html", context =
{'numberOfElectronicTickets': numberOfElectronicTickets, 'numberOfPaperTickets': numberOfPaperTickets,
'mostPopularPaperTicket': mostPopularPaperTicket, 'liczba_biletow_k': liczba_biletow_k, 'mostPopularPaperTicketProcent': mostPopularPaperTicketProcent,
'mostPopularConcession': mostPopularConcession, 'liczba_biletow_u': liczba_biletow_u,
'p_procent': p_procent, 'e_procent': e_procent, 'u_procent': u_procent,
'mostPopularElectronicTicket': mostPopularElectronicTicket, 'liczba_biletow_e': liczba_biletow_e,
'mostPopularElectronicTicketProcent': mostPopularElectronicTicketProcent})
def transactionPlaceStats(request):
total_income_dictionary = Transakcje.objects.aggregate(Sum('kwota'))
total_income = total_income_dictionary['kwota__sum']
places = MiejscaTransakcji.objects.all()
tmp=0;
tmp_k=0
max_income=0
number_of_transactions=0;
most_popular_place_income=0
for place in places:
transactions = Transakcje.objects.filter(id_miejsca_transakcji = place.id_miejsca_transakcji)
tmp = transactions.count()
tmp_k=0
for transaction in transactions:
tmp_k += transaction.kwota
if tmp_k > max_income:
max_income = tmp_k
maxIncomePlaceId = place.id_miejsca_transakcji
if tmp > number_of_transactions:
number_of_transactions = tmp
most_popular_place_income=tmp_k
mostPopularTransactionPlaceId = place.id_miejsca_transakcji
max_income_procent = (max_income/(total_income))*100
max_income_procent = round(max_income_procent, 2)
most_popular_place_income_procent = (most_popular_place_income/(total_income))*100
most_popular_place_income_procent = round(most_popular_place_income_procent, 2)
mostPopularTransactionPlace = MiejscaTransakcji.objects.filter(id_miejsca_transakcji = mostPopularTransactionPlaceId)[0].nazwa
maxIncomePlace = MiejscaTransakcji.objects.filter(id_miejsca_transakcji = maxIncomePlaceId)[0].nazwa
most_popular_place_income = round(most_popular_place_income, 2)
return render(request, template_name = "reportPage/transactionPlaceStats.html", context =
{'number_of_transactions': number_of_transactions, 'max_income': max_income, 'most_popular_place_income': most_popular_place_income,
'mostPopularTransactionPlace': mostPopularTransactionPlace, 'maxIncomePlace': maxIncomePlace,
'total_income': total_income, 'max_income_procent': max_income_procent,
'most_popular_place_income_procent': most_popular_place_income_procent})
def transactionStats(request):
total_income_dictionary = Transakcje.objects.aggregate(Sum('kwota'))
total_income = total_income_dictionary['kwota__sum']
number_of_transactions = Transakcje.objects.all().count()
average_income_of_transaction = total_income/number_of_transactions
average_income_of_transaction = round(average_income_of_transaction, 2)
transactions = Transakcje.objects.all()
payments = MetodyPlatnosci.objects.all()
tmp_t=0
liczba_transakcji_m=0
for payment in payments:
tmp_t = Transakcje.objects.filter(id_metody_platnosci = payment.id_metody_platnosci).count()
if tmp_t > liczba_transakcji_m:
liczba_transakcji_m = tmp_t
mostPopularPaymentMethodId = payment.id_metody_platnosci
mostPopularPaymentMethod = MetodyPlatnosci.objects.filter(id_metody_platnosci = mostPopularPaymentMethodId)[0].nazwa
return render(request, template_name = "reportPage/transactionStats.html", context =
{'total_income': total_income, 'number_of_transactions': number_of_transactions,
'average_income_of_transaction': average_income_of_transaction,
'mostPopularPaymentMethod': mostPopularPaymentMethod})
|
from bottleneck.slow.func import *
from bottleneck.slow.move import *
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''workflow-calcium-imaging'': conda)'
# name: python379jvsc74a57bd01a512f474e195e32ad84236879d3bb44800a92b431919ef0b10d543f5012a23c
# ---
# # Download example dataset
#
# + This workflow will need two-photon calcium imaging data collected from either ScanImage or Scanbox and the processed with Suite2p or CaImAn. We provide an example dataset to be downloaded to run through the workflow. This notebook walks you through the process to download the dataset.
#
# ## Install `djarchive-client`
#
# + The example dataset is hosted on `djarchive`, an AWS storage.
#
# + We provide a client package, [djarchive-client](https://github.com/datajoint/djarchive-client), to download the data which can be installed with pip:
pip install git+https://github.com/datajoint/djarchive-client.git
# ## Download calcium imaging example datasets using `djarchive-client`
import djarchive_client
client = djarchive_client.client()
# Browse the datasets that are available on `djarchive`:
list(client.datasets())
# Each of the datasets have different versions associated with the version of the `workflow-calcium-imaging` package. Browse the revisions:
list(client.revisions())
# To download the dataset, let's prepare a root directory, for example in `/tmp`:
# mkdir /tmp/example_data
# Get the dataset revision with the current version of `workflow-calcium-imaging`:
from workflow_calcium_imaging import version
revision = version.__version__.replace('.', '_')
revision
# Run download for a given dataset and revision:
client.download('workflow-calcium-imaging-test-set', target_directory='/tmp/example_data', revision=revision)
# ## Directory structure
#
# + After downloading, the directory will be organized as follows:
#
# ```
# /tmp/example_data/
# - subject3/
# - 210107_run00_orientation_8dir/
# - run00_orientation_8dir_000_000.sbx
# - run00_orientation_8dir_000_000.mat
# - suite2p/
# - combined
# - plane0
# - plane1
# - plane2
# - plane3
# - subject7/
# - session1
# - suite2p
# - plane0
# ```
#
# + subject 3 data is recorded with Scanbox and processed with Suite2p.
#
# + subject 7 data is recorded with ScanImage and processed with Suite2p.
#
# + `element-calcium-imaging` and `workflow-calcium-imaging` also support ingestion of data processed with CaImAn.
#
# + We will use the dataset for subject 3 as an example for the rest of the notebooks. If you use your own dataset for the workflow, change the path accordingly.
#
# ## Next step
#
# + In the next notebook ([01-configure](01-configure.ipynb)) we will set up the configuration file for the workflow.
|
import re
from flatland import String
from flatland.validation import (
IsEmail,
HTTPURLValidator,
URLCanonicalizer,
URLValidator,
)
from flatland.validation.network import _url_parts
from tests._util import eq_, unicode_coercion_allowed
def email(value):
return String(value, name=u'email', strip=False)
def assert_email_not_valid(value, kw={}):
validator = IsEmail(**kw)
el = email(value)
assert not validator.validate(el, None)
assert el.errors
def assert_email_valid(value, kw={}):
validator = IsEmail(**kw)
el = email(value)
assert validator.validate(el, None)
assert not el.errors
def test_email():
for addr in (u'bob@noob.com', u'bob@noob.frizbit', u'#"$!+,,@noob.c',
u'bob@bob-bob.bob'):
yield assert_email_valid, addr
def test_email_idna():
with unicode_coercion_allowed():
assert_email_valid(u'bob@snow\u2603man.com')
def test_email_non_local():
assert_email_not_valid(u'root@localhost')
def test_email_non_local_ok():
assert_email_valid(u'root@localhost', {'non_local': False})
def test_email_altlocal():
override = dict(local_part_pattern=re.compile(u'^bob$'))
assert_email_valid(u'bob@bob.com', override)
assert_email_not_valid(u'foo@bar.com', override)
def test_email_bogus():
c64 = u'x' * 64
c63 = u'x' * 63
for addr in (u'bob@zig..', u'bob@', u'@bob.com', u'@', u'snork',
u'bob@zig:zag.com', u'bob@zig zag.com', u'bob@zig/zag.com',
u' @zig.com', u'\t\t@zag.com',
u'bob@%s.com' % c64,
u'bob@%s.%s.%s.%s.com' % (c63, c63, c63, c63),
u'foo.com', u'bob@bob_bob.com', u''):
yield assert_email_not_valid, addr
def scalar(value):
return String(value, name=u'test')
def test_url_validator_default():
v = URLValidator()
el = scalar(u'http://me:you@there/path#fragment')
assert v.validate(el, None)
assert not el.errors
def test_url_validator_schemes():
v = URLValidator(allowed_schemes=(), blocked_scheme='X')
el = scalar(u'http://me:you@there/path#fragment')
assert not v.validate(el, None)
eq_(el.errors, [u'X'])
v = URLValidator(allowed_schemes=('https',), blocked_scheme='X')
el = scalar(u'http://me:you@there/path#fragment')
assert not v.validate(el, None)
eq_(el.errors, [u'X'])
def test_url_validator_parts():
v = URLValidator(allowed_parts=(), blocked_part='X')
el = scalar(u'http://me:you@there/path#fragment')
assert not v.validate(el, None)
eq_(el.errors, [u'X'])
v = URLValidator(allowed_parts=_url_parts)
el = scalar(u'http://me:you@there/path#fragment')
assert v.validate(el, None)
assert not el.errors
v = URLValidator(allowed_parts=('scheme', 'netloc'))
el = scalar(u'http://blarg')
assert v.validate(el, None)
assert not el.errors
v = URLValidator(allowed_parts=('scheme', 'netloc'), blocked_part='X')
el = scalar(u'http://blarg/')
assert not v.validate(el, None)
eq_(el.errors, [u'X'])
def test_http_validator_default():
v = HTTPURLValidator(forbidden_part='X')
el = scalar(u'http://there/path#fragment')
assert v.validate(el, None)
assert not el.errors
el = scalar(u'http://phis:ing@there/path#fragment')
not v.validate(el, None)
eq_(el.errors, [u'X'])
el = scalar('www.example.com')
not v.validate(el, None)
eq_(el.errors, ['test is not a valid URL.'])
def test_http_validator_schemes():
v = HTTPURLValidator()
el = scalar(u'http://there/path')
assert v.validate(el, None)
assert not el.errors
el = scalar(u'//there/path')
assert not v.validate(el, None)
eq_(el.errors, ['test is not a valid URL.'])
v = HTTPURLValidator(required_parts=dict(scheme=(u'https', u''),
hostname=True))
el = scalar(u'http://there/path')
assert not v.validate(el, None)
eq_(el.errors, [u'test is not a valid URL.'])
el = scalar(u'https://there/path')
assert v.validate(el, None)
assert not el.errors
el = scalar(u'//there/path')
assert v.validate(el, None)
assert not el.errors
def test_url_canonicalizer_default():
v = URLCanonicalizer()
el = scalar(u'http://localhost/#foo')
eq_(el.value, u'http://localhost/#foo')
assert v.validate(el, None)
eq_(el.value, u'http://localhost/')
assert not el.errors
def test_url_canonicalizer_want_none():
v = URLCanonicalizer(discard_parts=_url_parts)
el = scalar(u'http://me:you@there/path#fragment')
eq_(el.value, u'http://me:you@there/path#fragment')
assert v.validate(el, None)
eq_(el.value, u'')
assert not el.errors
def test_url_canonicalizer_want_one():
v = URLCanonicalizer(discard_parts=_url_parts[1:])
el = scalar(u'http://me:you@there/path#fragment')
eq_(el.value, u'http://me:you@there/path#fragment')
assert v.validate(el, None)
eq_(el.value, u'http://')
assert not el.errors
def test_url_canonicalizer_want_all():
v = URLCanonicalizer(discard_parts=())
el = scalar(u'http://me:you@there/path#fragment')
eq_(el.value, u'http://me:you@there/path#fragment')
assert v.validate(el, None)
eq_(el.value, u'http://me:you@there/path#fragment')
assert not el.errors
|
import random
class Book:
def __init__(self, description, title, author, page, images, publish):
self.description = description # опис
self.title = title #назва книги
self.author = author # автор
self.page = page # кількість сторінок
self.images = images # кількість ілюстрацій
self.publish = publish # видавництво
def Price(self, title):
page_b = random.randrange(100)
images_b = random.randrange(100)
price = page_b*self.page + images_b*self.images
return print
def Info(self):
print("Book description:", self.description)
print("Book title:", self.title)
print("Book author:", self.author)
print("Book page:", self.page)
print("Book images:", self.images)
print("Book publish:", self.publish,"\n")
def MeanPriceByAuthor(bookList, author):
s = 0
k = 0
for book in bookList:
if book.author == author:
s += book.Price(book.title)
k += 1
if k != 0:
return s/k
else:
return 0
def MeanPriceByPublisher(bookList, publish):
s = 0
k = 0
for book in bookList:
if book.publish == publish:
s += book.Price(book.title)
k += 1
if k != 0:
return s/k
else:
return 0
listOfBooks = []
b1 = Book("роман", "Ідіот","Достоєвський",1,12, "Ranok")
b2 = Book("повість", "Ідіот","Достоєвський",1,1232,"abbb ")
b3 = Book("драма", "Ідіот","Достоєвський",1,1243,"lalala")
b4 = Book("драма", "Злочин і кара","Достоєвський",1,2,"lalala")
listOfBooks.append(b1)
listOfBooks.append(b2)
listOfBooks.append(b3)
b1.Info()
b2.Info()
b3.Info()
b1.Price("Ідіот")
b3.Price("Ідіот")
mean1 = MeanPriceByAuthor(listOfBooks, "Достоєвський")
print("Середня ціна автора = ", mean1)
mean2 = MeanPriceByPublisher(listOfBooks, "lalala")
print("Середня ціна видавництва = ", mean2)
|
import os
import math
import pandas as pd
import sklearn.metrics as metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score, accuracy_score
from sklearn.metrics import average_precision_score
from numpyencoder import NumpyEncoder
from get_data import read_params
import argparse
import joblib
import json
def train_and_evaluate(config_path):
"""
This function trains and evaluates a machine learning model on the dataset.
:param config_path: the path of the config file to use
"""
# Read configuration options
config = read_params(config_path)
model_type = config["train_and_evaluate"]["model"]
test_data_path = config["split_data"]["test_path"]
train_data_path = config["split_data"]["train_path"]
random_state = config["base"]["random_state"]
model_dir = config["model_dir"]
target = [config["base"]["target_col"]]
scores_file = config["reports"]["scores"]
prc_file = config["reports"]["prc"]
roc_file = config["reports"]["roc"]
# Load training and validation datasets
train = pd.read_csv(train_data_path, sep=",")
test = pd.read_csv(test_data_path, sep=",")
# Separate features (x) from label (y)
train_y = train[target]
test_y = test[target]
train_x = train.drop(target, axis=1)
test_x = test.drop(target, axis=1)
if model_type == "logistic_regression":
# Build logistic regression model
model = LogisticRegression(solver='sag', random_state=random_state).fit(train_x, train_y)
elif model_type == "random_forest":
# Build random forest model
model = RandomForestClassifier(n_estimators=50)
else:
return
# Fit the model to the training data
model.fit(train_x, train_y)
# Report training set score
train_score = model.score(train_x, train_y) * 100
print(train_score)
# Report test set score
test_score = model.score(test_x, test_y) * 100
print(test_score)
# Predict output for observations in validation set
predicted_val = model.predict(test_x)
# Calculate performance metrics
precision, recall, prc_thresholds = metrics.precision_recall_curve(test_y, predicted_val)
fpr, tpr, roc_thresholds = metrics.roc_curve(test_y, predicted_val)
nth_point = math.ceil(len(prc_thresholds) / 1000)
prc_points = list(zip(precision, recall, prc_thresholds))[::nth_point]
with open(prc_file, "w") as fd:
prcs = {
"prc": [
{"precision": p, "recall": r, "threshold": t}
for p, r, t in prc_points
]
}
json.dump(prcs, fd, indent=4, cls=NumpyEncoder)
with open(roc_file, "w") as fd:
rocs = {
"roc": [
{"fpr": fp, "tpr": tp, "threshold": t}
for fp, tp, t in zip(fpr, tpr, roc_thresholds)
]
}
json.dump(rocs, fd, indent=4, cls=NumpyEncoder)
# Print classification report
print(classification_report(test_y, predicted_val))
# Confusion Matrix and plot
cm = confusion_matrix(test_y, predicted_val)
print(cm)
df1 = pd.DataFrame(predicted_val, columns=['Predicted'])
df_cm = pd.concat([test_y, df1], axis=1)
print(df_cm)
df_cm.to_csv('cm.csv', index=False)
# Receiver operating characteristic - area under curve
roc_auc = roc_auc_score(test_y, model.predict_proba(test_x)[:, 1])
print('ROC_AUC:{0:0.2f}'.format(roc_auc))
# Model accuracy
model_accuracy = accuracy_score(test_y, predicted_val)
print('Model Accuracy:{0:0.2f}'.format(model_accuracy))
# Average precision score
average_precision = average_precision_score(test_y, predicted_val)
print('Average precision-recall score: {0:0.2f}'.format(average_precision))
with open(scores_file, "w") as f:
scores = {
"train_score": train_score,
"test_score": test_score,
"roc_auc": roc_auc,
"Precision": list(precision),
"Recall": list(recall),
"Average precision": average_precision,
"Model Accuracy": model_accuracy
}
json.dump(scores, f, indent=4)
# Output model
os.makedirs(model_dir, exist_ok=True)
model_path = os.path.join(model_dir, "model.joblib")
joblib.dump(model, model_path)
if __name__ == "__main__":
# If the file is being run, parse command line arguments to get config filepath
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
# Train and evaluate the model
train_and_evaluate(config_path=parsed_args.config)
|
#!/usr/bin/env python
import os.path
import tornado.httpserver
import tornado.ioloop
import tornado.web
from tornado.options import options
from settings import settings
from urls import url_patterns
class MakeApp(tornado.web.Application):
def __init__(self):
tornado.web.Application.__init__(self, url_patterns, **settings)
def main():
app = MakeApp()
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(int(os.environ.get("PORT", 5000)))
# http_server.bind(options.port)
# http_server.start(0)
print('connected...')
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
# Copyright 2014-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=attribute-defined-outside-init
import logging
import re
from collections import namedtuple
from devlib.module import Module
from devlib.exception import TargetStableError
from devlib.utils.misc import list_to_ranges, isiterable
from devlib.utils.types import boolean
class Controller(object):
def __init__(self, kind, hid, clist):
"""
Initialize a controller given the hierarchy it belongs to.
:param kind: the name of the controller
:type kind: str
:param hid: the Hierarchy ID this controller is mounted on
:type hid: int
:param clist: the list of controller mounted in the same hierarchy
:type clist: list(str)
"""
self.mount_name = 'devlib_cgh{}'.format(hid)
self.kind = kind
self.hid = hid
self.clist = clist
self.target = None
self._noprefix = False
self.logger = logging.getLogger('CGroup.'+self.kind)
self.logger.debug('Initialized [%s, %d, %s]',
self.kind, self.hid, self.clist)
self.mount_point = None
self._cgroups = {}
def mount(self, target, mount_root):
mounted = target.list_file_systems()
if self.mount_name in [e.device for e in mounted]:
# Identify mount point if controller is already in use
self.mount_point = [
fs.mount_point
for fs in mounted
if fs.device == self.mount_name
][0]
else:
# Mount the controller if not already in use
self.mount_point = target.path.join(mount_root, self.mount_name)
target.execute('mkdir -p {} 2>/dev/null'\
.format(self.mount_point), as_root=True)
target.execute('mount -t cgroup -o {} {} {}'\
.format(','.join(self.clist),
self.mount_name,
self.mount_point),
as_root=True)
# Check if this controller uses "noprefix" option
output = target.execute('mount | grep "{} "'.format(self.mount_name))
if 'noprefix' in output:
self._noprefix = True
# self.logger.debug('Controller %s using "noprefix" option',
# self.kind)
self.logger.debug('Controller %s mounted under: %s (noprefix=%s)',
self.kind, self.mount_point, self._noprefix)
# Mark this contoller as available
self.target = target
# Create root control group
self.cgroup('/')
def cgroup(self, name):
if not self.target:
raise RuntimeError('CGroup creation failed: {} controller not mounted'\
.format(self.kind))
if name not in self._cgroups:
self._cgroups[name] = CGroup(self, name)
return self._cgroups[name]
def exists(self, name):
if not self.target:
raise RuntimeError('CGroup creation failed: {} controller not mounted'\
.format(self.kind))
if name not in self._cgroups:
self._cgroups[name] = CGroup(self, name, create=False)
return self._cgroups[name].exists()
def list_all(self):
self.logger.debug('Listing groups for %s controller', self.kind)
output = self.target.execute('{} find {} -type d'\
.format(self.target.busybox, self.mount_point),
as_root=True)
cgroups = []
for cg in output.splitlines():
cg = cg.replace(self.mount_point + '/', '/')
cg = cg.replace(self.mount_point, '/')
cg = cg.strip()
if cg == '':
continue
self.logger.debug('Populate %s cgroup: %s', self.kind, cg)
cgroups.append(cg)
return cgroups
def move_tasks(self, source, dest, exclude=None):
if exclude is None:
exclude = []
try:
srcg = self._cgroups[source]
dstg = self._cgroups[dest]
except KeyError as e:
raise ValueError('Unknown group: {}'.format(e))
self.target._execute_util( # pylint: disable=protected-access
'cgroups_tasks_move {} {} \'{}\''.format(
srcg.directory, dstg.directory, exclude),
as_root=True)
def move_all_tasks_to(self, dest, exclude=None):
"""
Move all the tasks to the specified CGroup
Tasks are moved from all their original CGroup the the specified on.
The tasks which name matches one of the string in exclude are moved
instead in the root CGroup for the controller.
The name of a tasks to exclude must be a substring of the task named as
reported by the "ps" command. Indeed, this list will be translated into
a: "ps | grep -e name1 -e name2..." in order to obtain the PID of these
tasks.
:param exclude: list of commands to keep in the root CGroup
:type exclude: list(str)
"""
if exclude is None:
exclude = []
if isinstance(exclude, str):
exclude = [exclude]
if not isinstance(exclude, list):
raise ValueError('wrong type for "exclude" parameter, '
'it must be a str or a list')
logging.debug('Moving all tasks into %s', dest)
# Build list of tasks to exclude
grep_filters = ''
for comm in exclude:
grep_filters += '-e {} '.format(comm)
logging.debug(' using grep filter: %s', grep_filters)
if grep_filters != '':
logging.debug(' excluding tasks which name matches:')
logging.debug(' %s', ', '.join(exclude))
for cgroup in self._cgroups:
if cgroup != dest:
self.move_tasks(cgroup, dest, grep_filters)
# pylint: disable=too-many-locals
def tasks(self, cgroup,
filter_tid='',
filter_tname='',
filter_tcmdline=''):
"""
Report the tasks that are included in a cgroup. The tasks can be
filtered by their tid, tname or tcmdline if filter_tid, filter_tname or
filter_tcmdline are defined respectively. In this case, the reported
tasks are the ones in the cgroup that match these patterns.
Example of tasks format:
TID,tname,tcmdline
903,cameraserver,/system/bin/cameraserver
:params filter_tid: regexp pattern to filter by TID
:type filter_tid: str
:params filter_tname: regexp pattern to filter by tname
:type filter_tname: str
:params filter_tcmdline: regexp pattern to filter by tcmdline
:type filter_tcmdline: str
:returns: a dictionary in the form: {tid:(tname, tcmdline)}
"""
if not isinstance(filter_tid, str):
raise TypeError('filter_tid should be a str')
if not isinstance(filter_tname, str):
raise TypeError('filter_tname should be a str')
if not isinstance(filter_tcmdline, str):
raise TypeError('filter_tcmdline should be a str')
try:
cg = self._cgroups[cgroup]
except KeyError as e:
raise ValueError('Unknown group: {}'.format(e))
output = self.target._execute_util( # pylint: disable=protected-access
'cgroups_tasks_in {}'.format(cg.directory),
as_root=True)
entries = output.splitlines()
tasks = {}
for task in entries:
fields = task.split(',', 2)
nr_fields = len(fields)
if nr_fields < 2:
continue
elif nr_fields == 2:
tid_str, tname = fields
tcmdline = ''
else:
tid_str, tname, tcmdline = fields
if not re.search(filter_tid, tid_str):
continue
if not re.search(filter_tname, tname):
continue
if not re.search(filter_tcmdline, tcmdline):
continue
tasks[int(tid_str)] = (tname, tcmdline)
return tasks
def tasks_count(self, cgroup):
try:
cg = self._cgroups[cgroup]
except KeyError as e:
raise ValueError('Unknown group: {}'.format(e))
output = self.target.execute(
'{} wc -l {}/tasks'.format(
self.target.busybox, cg.directory),
as_root=True)
return int(output.split()[0])
def tasks_per_group(self):
tasks = {}
for cg in self.list_all():
tasks[cg] = self.tasks_count(cg)
return tasks
class CGroup(object):
def __init__(self, controller, name, create=True):
self.logger = logging.getLogger('cgroups.' + controller.kind)
self.target = controller.target
self.controller = controller
self.name = name
# Control cgroup path
self.directory = controller.mount_point
if name != '/':
self.directory = self.target.path.join(controller.mount_point, name.strip('/'))
# Setup path for tasks file
self.tasks_file = self.target.path.join(self.directory, 'tasks')
self.procs_file = self.target.path.join(self.directory, 'cgroup.procs')
if not create:
return
self.logger.debug('Creating cgroup %s', self.directory)
self.target.execute('[ -d {0} ] || mkdir -p {0}'\
.format(self.directory), as_root=True)
def exists(self):
try:
self.target.execute('[ -d {0} ]'\
.format(self.directory), as_root=True)
return True
except TargetStableError:
return False
def get(self):
conf = {}
logging.debug('Reading %s attributes from:',
self.controller.kind)
logging.debug(' %s',
self.directory)
output = self.target._execute_util( # pylint: disable=protected-access
'cgroups_get_attributes {} {}'.format(
self.directory, self.controller.kind),
as_root=True)
for res in output.splitlines():
attr = res.split(':')[0]
value = res.split(':')[1]
conf[attr] = value
return conf
def set(self, **attrs):
for idx in attrs:
if isiterable(attrs[idx]):
attrs[idx] = list_to_ranges(attrs[idx])
# Build attribute path
if self.controller._noprefix: # pylint: disable=protected-access
attr_name = '{}'.format(idx)
else:
attr_name = '{}.{}'.format(self.controller.kind, idx)
path = self.target.path.join(self.directory, attr_name)
self.logger.debug('Set attribute [%s] to: %s"',
path, attrs[idx])
# Set the attribute value
try:
self.target.write_value(path, attrs[idx])
except TargetStableError:
# Check if the error is due to a non-existing attribute
attrs = self.get()
if idx not in attrs:
raise ValueError('Controller [{}] does not provide attribute [{}]'\
.format(self.controller.kind, attr_name))
raise
def get_tasks(self):
task_ids = self.target.read_value(self.tasks_file).split()
logging.debug('Tasks: %s', task_ids)
return list(map(int, task_ids))
def add_task(self, tid):
self.target.write_value(self.tasks_file, tid, verify=False)
def add_tasks(self, tasks):
for tid in tasks:
self.add_task(tid)
def add_proc(self, pid):
self.target.write_value(self.procs_file, pid, verify=False)
CgroupSubsystemEntry = namedtuple('CgroupSubsystemEntry', 'name hierarchy num_cgroups enabled')
class CgroupsModule(Module):
name = 'cgroups'
stage = 'setup'
@staticmethod
def probe(target):
if not target.is_rooted:
return False
if target.file_exists('/proc/cgroups'):
return True
return target.config.has('cgroups')
def __init__(self, target):
super(CgroupsModule, self).__init__(target)
self.logger = logging.getLogger('CGroups')
# Set Devlib's CGroups mount point
self.cgroup_root = target.path.join(
target.working_directory, 'cgroups')
# Get the list of the available controllers
subsys = self.list_subsystems()
if not subsys:
self.logger.warning('No CGroups controller available')
return
# Map hierarchy IDs into a list of controllers
hierarchy = {}
for ss in subsys:
try:
hierarchy[ss.hierarchy].append(ss.name)
except KeyError:
hierarchy[ss.hierarchy] = [ss.name]
self.logger.debug('Available hierarchies: %s', hierarchy)
# Initialize controllers
self.logger.info('Available controllers:')
self.controllers = {}
for ss in subsys:
hid = ss.hierarchy
controller = Controller(ss.name, hid, hierarchy[hid])
try:
controller.mount(self.target, self.cgroup_root)
except TargetStableError:
message = 'Failed to mount "{}" controller'
raise TargetStableError(message.format(controller.kind))
self.logger.info(' %-12s : %s', controller.kind,
controller.mount_point)
self.controllers[ss.name] = controller
def list_subsystems(self):
subsystems = []
for line in self.target.execute('{} cat /proc/cgroups'\
.format(self.target.busybox), as_root=self.target.is_rooted).splitlines()[1:]:
line = line.strip()
if not line or line.startswith('#'):
continue
name, hierarchy, num_cgroups, enabled = line.split()
subsystems.append(CgroupSubsystemEntry(name,
int(hierarchy),
int(num_cgroups),
boolean(enabled)))
return subsystems
def controller(self, kind):
if kind not in self.controllers:
self.logger.warning('Controller %s not available', kind)
return None
return self.controllers[kind]
def run_into_cmd(self, cgroup, cmdline):
"""
Get the command to run a command into a given cgroup
:param cmdline: Commdand to be run into cgroup
:param cgroup: Name of cgroup to run command into
:returns: A command to run `cmdline` into `cgroup`
"""
if not cgroup.startswith('/'):
message = 'cgroup name "{}" must start with "/"'.format(cgroup)
raise ValueError(message)
return 'CGMOUNT={} {} cgroups_run_into {} {}'\
.format(self.cgroup_root, self.target.shutils,
cgroup, cmdline)
def run_into(self, cgroup, cmdline, as_root=None):
"""
Run the specified command into the specified CGroup
:param cmdline: Command to be run into cgroup
:param cgroup: Name of cgroup to run command into
:param as_root: Specify whether to run the command as root, if not
specified will default to whether the target is rooted.
:returns: Output of command.
"""
if as_root is None:
as_root = self.target.is_rooted
cmd = self.run_into_cmd(cgroup, cmdline)
raw_output = self.target.execute(cmd, as_root=as_root)
# First line of output comes from shutils; strip it out.
return raw_output.split('\n', 1)[1]
def cgroups_tasks_move(self, srcg, dstg, exclude=''):
"""
Move all the tasks from the srcg CGroup to the dstg one.
A regexps of tasks names can be used to defined tasks which should not
be moved.
"""
return self.target._execute_util( # pylint: disable=protected-access
'cgroups_tasks_move {} {} {}'.format(srcg, dstg, exclude),
as_root=True)
def isolate(self, cpus, exclude=None):
"""
Remove all userspace tasks from specified CPUs.
A list of CPUs can be specified where we do not want userspace tasks
running. This functions creates a sandbox cpuset CGroup where all
user-space tasks and not-pinned kernel-space tasks are moved into.
This should allows to isolate the specified CPUs which will not get
tasks running unless explicitely moved into the isolated group.
:param cpus: the list of CPUs to isolate
:type cpus: list(int)
:return: the (sandbox, isolated) tuple, where:
sandbox is the CGroup of sandboxed CPUs
isolated is the CGroup of isolated CPUs
"""
if exclude is None:
exclude = []
all_cpus = set(range(self.target.number_of_cpus))
sbox_cpus = list(all_cpus - set(cpus))
isol_cpus = list(all_cpus - set(sbox_cpus))
# Create Sandbox and Isolated cpuset CGroups
cpuset = self.controller('cpuset')
sbox_cg = cpuset.cgroup('/DEVLIB_SBOX')
isol_cg = cpuset.cgroup('/DEVLIB_ISOL')
# Set CPUs for Sandbox and Isolated CGroups
sbox_cg.set(cpus=sbox_cpus, mems=0)
isol_cg.set(cpus=isol_cpus, mems=0)
# Move all currently running tasks to the Sandbox CGroup
cpuset.move_all_tasks_to('/DEVLIB_SBOX', exclude)
return sbox_cg, isol_cg
def freeze(self, exclude=None, thaw=False):
"""
Freeze all user-space tasks but the specified ones
A freezer cgroup is used to stop all the tasks in the target system but
the ones which name match one of the path specified by the exclude
paramater. The name of a tasks to exclude must be a substring of the
task named as reported by the "ps" command. Indeed, this list will be
translated into a: "ps | grep -e name1 -e name2..." in order to obtain
the PID of these tasks.
:param exclude: list of commands paths to exclude from freezer
:type exclude: list(str)
:param thaw: if true thaw tasks instead
:type thaw: bool
"""
if exclude is None:
exclude = []
# Create Freezer CGroup
freezer = self.controller('freezer')
if freezer is None:
raise RuntimeError('freezer cgroup controller not present')
freezer_cg = freezer.cgroup('/DEVLIB_FREEZER')
cmd = 'cgroups_freezer_set_state {{}} {}'.format(freezer_cg.directory)
if thaw:
# Restart frozen tasks
# pylint: disable=protected-access
freezer.target._execute_util(cmd.format('THAWED'), as_root=True)
# Remove all tasks from freezer
freezer.move_all_tasks_to('/')
return
# Move all tasks into the freezer group
freezer.move_all_tasks_to('/DEVLIB_FREEZER', exclude)
# Get list of not frozen tasks, which is reported as output
tasks = freezer.tasks('/')
# Freeze all tasks
# pylint: disable=protected-access
freezer.target._execute_util(cmd.format('FROZEN'), as_root=True)
return tasks
|
import os
from mediawiki import MediaWiki
import Algorithmia as algorithmia
import requests
from robots.state import loadApikey
algorithmiaApiKey = loadApikey(
path='./credential/algorithmia.json')['apiKey']
def apiAlgorithmia():
algorithmiaAutheticated = algorithmia.client(algorithmiaApiKey)
wikipediaAlgorithm = algorithmiaAutheticated.algo(
'web/WikipediaParser/0.1.2?timeout=30')
wikipediaResponde = wikipediaAlgorithm.pipe({
'articleName': content['searchTerm'],
'lang': content['language']
}).result
wikipediaContent = wikipediaResponde["content"]
wikipediaUrl = wikipediaResponde['url']
return wikipediaContent, wikipediaUrl
def apiWikipedia(search, language):
print(language, search)
if(language == 'pt'):
language = 'pt-br'
wikipedia = MediaWiki()
if(len(wikipedia.search(search)) < 1):
raise Exception('apiWikipedia: Content not found')
page = wikipedia.page(search)
return page.summary, page.url
def getEnv(name=''):
choose = os.environ[name]
if(len(choose) == 0):
raise Exception(f"{name} is empty")
return choose
def sendRequestConcluded(videoId):
try:
print(f"sendRequestConcl: {videoId}")
requests.post('https://bot-telegram-video-maker.herokuapp.com/readyVideo', data={'videoId': videoId})
print(f"sendRequestConcluded: {videoId}")
except Exception as e:
print(e)
pass |
# *****************************************************************************
# *****************************************************************************
#
# Name : wordindex.py
# Purpose : Allocate each keyword a specific, final identifier ID.
# Author : Paul Robson (paul@robsons.org.uk)
# Date : 15th January 2020
#
# *****************************************************************************
# *****************************************************************************
import re
# *****************************************************************************
#
# Create a hash mapping word to ID.
#
# *****************************************************************************
class WordIndex(object):
def __init__(self):
if WordIndex.INDEX is None:
x = {}
elements = self.raw().split("\n")
for e in elements:
m = re.match("^\\s*(\d+)\\s*\\:\\:\\:\\s*(.*)$",e)
assert m is not None,"Bad line "+e
assert m.group(2).strip() not in x,"Duplicate "+e
x[m.group(2).strip()] = int(m.group(1))
WordIndex.INDEX = x
def get(self):
return WordIndex.INDEX
# *****************************************************************************
#
# RPL-C's word index. This is manually maintained and does not need
# to be ordered. It does need to be consistent.
#
# *****************************************************************************
def raw(self):
return """
0 ::: !
1 ::: $$!handler
2 ::: $$&handler
3 ::: $$@handler
4 ::: $$call
5 ::: $$comment
6 ::: $$define
7 ::: $$literal
8 ::: $$nextline
9 ::: $$string
10 ::: *
11 ::: +
12 ::: +!
13 ::: -
14 ::: -1
15 ::: ..
16 ::: /
17 ::: 0
18 ::: 0<
19 ::: 0=
20 ::: 1
21 ::: 1+
22 ::: 1-
23 ::: 10
24 ::: 100
25 ::: 1024
26 ::: 127
27 ::: 128
28 ::: 15
29 ::: 16
30 ::: 16*
31 ::: 16/
32 ::: 2
33 ::: 2*
34 ::: 2+
35 ::: 2-
36 ::: 2/
37 ::: 24
38 ::: 255
39 ::: 256
40 ::: 256*
41 ::: 256/
42 ::: 3
43 ::: 32
44 ::: 32767
45 ::: 32768
46 ::: 4
47 ::: 4*
48 ::: 4/
49 ::: 4096
50 ::: 5
51 ::: 512
52 ::: 63
53 ::: 64
54 ::: 8
55 ::: 8*
56 ::: 8/
57 ::: ;
58 ::: <
59 ::: <=
60 ::: <>
61 ::: =
62 ::: >
63 ::: >=
64 ::: ?dup
65 ::: @
66 ::: abs
67 ::: alloc
68 ::: and
69 ::: assert
70 ::: bswap
71 ::: c!
72 ::: c@
73 ::: clr
74 ::: drop
75 ::: dup
76 ::: else
77 ::: end
78 ::: endif
79 ::: for
80 ::: if
81 ::: index
82 ::: list
83 ::: max
84 ::: min
85 ::: mod
86 ::: negate
87 ::: new
88 ::: next
89 ::: nip
90 ::: not
91 ::: or
92 ::: over
93 ::: repeat
94 ::: rnd
95 ::: rot
96 ::: run
97 ::: sgn
98 ::: stop
99 ::: swap
100 ::: sys
101 ::: to.string
102 ::: until
103 ::: vlist
104 ::: xbreak
105 ::: xdump
106 ::: xor
107 ::: save
108 ::: load
109 ::: $$index
110 ::: old
111 ::: $$hexliteral
112 ::: fast
113 ::: slow
""".strip().upper()
WordIndex.INDEX = None
if __name__ == "__main__":
print(WordIndex().get()) |
from typing import List
from engine.gameController import GameController
from engine.gameModel import GameModel
class ChessGameController(GameController):
def __init__(self, gameModel: GameModel):
super().__init__(gameModel)
self.signalHandlers["playerJoinRequested"] = self.onPlayerJoinRequested
self.signalHandlers["cellSelected"] = self.onCellSelected
self.signalHandlers["textCommandIssued"] = self.onTextCommandIssued
self.attach(gameModel, "cellSelected")
self.attach(gameModel, "quitRequested")
def onCellSelected(self, cellIndex: int) -> None:
self.notify("cellSelected", cellIndex)
def onTextCommandIssued(self, textCommand: str) -> None:
textCommand = str(textCommand)
if textCommand == "quit":
self.notify("quitRequested")
return
commandParts = textCommand.split(' ')
numberOfCommandParts = len(commandParts)
if numberOfCommandParts <= 0:
return
commandName = commandParts[0].lower()
if commandName == "player_type":
if numberOfCommandParts == 3:
command = {
"name": commandName,
"index": int(commandParts[1]),
"value": commandParts[2]
}
self.notify("commandIssued", command)
|
'''User management
User admin page, user edit page, adding/removing users, logging in/out
TODO(Connor): Improve remove user double check.'''
from flask import Flask, Blueprint, render_template, \
abort, session, request, redirect, url_for
from flask_bcrypt import Bcrypt
import chores.blueprints.database as DATABASE
USER = Blueprint("user", __name__, template_folder="templates")
APP = Flask(__name__)
BCRYPT = Bcrypt(APP)
@USER.route("/admin")
def show_admin():
'''Show admin page if logged in and admin
Renders admin.html template file, passes userlist to it.'''
if not session.get("logged_in") or not session.get("admin"):
abort(401)
data = DATABASE.get_db()
cur = data.execute("SELECT username, admin, root FROM users ORDER BY id")
userlist = cur.fetchall()
return render_template("admin.html", title="Admin Panel", users=userlist)
@USER.route("/adduser", methods=["POST"])
def add_user():
'''Add user function
Args (over POST):
str: password
str: confirm
bool: admin
str: username
Redirects to the show_admin page.'''
if not session.get("logged_in") and not session.get("admin"):
abort(401)
if request.form["password"] != request.form["confirm"]:
return redirect(url_for("user.show_admin"))
data = DATABASE.get_db()
hashed = BCRYPT.generate_password_hash(request.form["password"])
admin = bool(request.form.getlist("check"))
insert = [request.form["username"].lower(), admin, hashed, False]
data.execute("INSERT INTO users (username, admin, hash, root) VALUES (?, ?, ?, ?)", insert)
data.commit()
return redirect(url_for("user.show_admin"))
@USER.route("/login", methods=["GET", "POST"])
def login():
'''User login page and function
Args (over POST):
str: username
str: password
If successful POST, redirect to DATABASE page.
If failure POST, redirect to same page
If over GET, display login page.'''
error = None
if request.method == "POST":
data = DATABASE.get_db()
cur = data.execute("SELECT username, hash, admin, root FROM users WHERE username = '%s'" % request.form["username"].lower())
users = cur.fetchall()
if not users:
error = "Invalid username or password!"
return render_template("login.html", error=error)
user_list = users[0]
if not BCRYPT.check_password_hash(user_list[1], request.form["password"]):
error = "Invalid username or password!"
else:
session["logged_in"] = True
session["username"] = request.form["username"].lower()
session["admin"] = bool(user_list[2] == 1)
session["root"] = bool(user_list[3] == 1)
return redirect(url_for("main_pages.index"))
return render_template("login.html", error=error, title="Login")
@USER.route("/logout")
def logout():
'''Logs out the user
Removes session["logged_in"] and session["admin"]
Redirects to DATABASE page.'''
session.pop("logged_in", None)
session.pop("admin", None)
return redirect(url_for("main_pages.index"))
@USER.route("/user/<username>")
def user_page(username):
'''Displays user edit page.
Only allow current logged in user to view their page if not admin
If admin, show any user pages.
Pass user admin flag to user.html template to render.'''
if not session["logged_in"]:
abort(401)
if not session["admin"] and username != session["username"]:
abort(401)
data = DATABASE.get_db()
admin_check = data.execute("SELECT admin FROM users WHERE username = '%s'" % username).fetchall()[0][0]
return render_template("user.html", title="Edit "+username, user=username, admin=admin_check)
@USER.route("/user/<username>/edit", methods=["POST"])
def edit_user(username):
'''Edit user information from user_page
Args:
str: password
str: confirm
bool: admin
Set password and/or set admin flag on user
Only allow admin flag change if logged in user is admin.'''
if not session["logged_in"]:
abort(401)
if not session["admin"] and username != session["username"]:
abort(401)
if request.form["password"] != request.form["confirm"]:
return redirect(url_for("user.user_page", username=username))
data = DATABASE.get_db()
root = data.execute("SELECT root FROM users WHERE username = '%s'" % username).fetchall()[0][0]
if request.form["password"] and request.form["confirm"]:
if request.form["password"] == request.form["confirm"]:
hashed = BCRYPT.generate_password_hash(request.form["password"])
data.execute("UPDATE users SET hash = ? WHERE username = '%s'" % username, [hashed])
if session["admin"] or root == 1:
if request.form.getlist("check") or root == 1:
data.execute("UPDATE users SET admin = 1 WHERE username = '%s'" % username)
else:
data.execute("UPDATE users SET admin = 0 WHERE username = '%s'" % username)
data.commit()
return redirect(url_for("user.user_page", username=username))
@USER.route("/user/<username>/delete")
def delete_user(username):
'''Asks for confirmation then deletes user
Display a simple page for a confirm, with a bool in the URL for confirm
Args (over GET):
bool: confirm
If confirmed, delete user from DATABASE.'''
if not session["logged_in"] and not session["admin"]:
abort(401)
if request.args.get("confirm") == "False":
return "Are you sure you want to delete user " + username + "? <a href=" + url_for("user.delete_user", username=username, confirm=True) + ">Yes</a>, <a href=" + url_for("user.show_admin") + ">No</a>"
elif request.args.get("confirm") == "True":
data = DATABASE.get_db()
check = data.execute("SELECT root FROM users WHERE username = '%s'" % username)
root = check.fetchall()
if root[0][0] == 1:
data.commit()
return redirect(url_for("user.show_admin"))
data.execute("DELETE FROM users WHERE username = '%s'" % username)
data.commit()
return redirect(url_for("user.show_admin"))
|
"""Web Routes."""
from masonite.routes import Get, RouteGroup
ROUTES = [
RouteGroup([
Get('/social/@provider/login', 'WelcomeController@auth'),
Get('/social/@provider/callback', 'WelcomeController@callback'),
]),
Get('/', 'WelcomeController@show').name('welcome'),
]
|
import json
import threading
import psutil
import minidiagram
import payload
timer = None
cpu_percent = [0] * 80
def record_data():
global cpu_percent
cpu_percent.append(psutil.cpu_percent())
cpu_percent.pop(0)
def get_config():
global cpu_percent
diagram = minidiagram.MiniDiagram(bgcolor=(0.6,0.6,0.6))
diagram.add_data(cpu_percent, (1,0,0))
return {
"ttl": 1,
"indicators": [
{
"icon": diagram.get_data_uri(),
"title": "{cpu: =5.1f}%".format(cpu=cpu_percent[-1])
}
],
"menu": [
{
"title": "Throttle!",
"active": False,
"open": "https://google.com"
}
],
}
def update():
global timer
record_data()
payload.set(json.dumps(get_config()).encode())
timer = threading.Timer(1.0, update)
timer.start()
def run():
global timer
timer = threading.Timer(1.0, update)
timer.start()
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from os import path
from .io.meas_info import Info
from . import pick_types
from .utils import logger, verbose
@verbose
def read_selection(name, fname=None, info=None, verbose=None):
"""Read channel selection from file
By default, the selections used in ``mne_browse_raw`` are supported.
Additional selections can be added by specifying a selection file (e.g.
produced using ``mne_browse_raw``) using the ``fname`` parameter.
The ``name`` parameter can be a string or a list of string. The returned
selection will be the combination of all selections in the file where
(at least) one element in name is a substring of the selection name in
the file. For example, ``name=['temporal', 'Right-frontal']`` will produce
a combination of ``'Left-temporal'``, ``'Right-temporal'``, and
``'Right-frontal'``.
The included selections are:
* ``'Vertex'``
* ``'Left-temporal'``
* ``'Right-temporal'``
* ``'Left-parietal'``
* ``'Right-parietal'``
* ``'Left-occipital'``
* ``'Right-occipital'``
* ``'Left-frontal'``
* ``'Right-frontal'``
Parameters
----------
name : str or list of str
Name of the selection. If is a list, the selections are combined.
fname : str
Filename of the selection file (if None, built-in selections are used).
info : instance of Info
Measurement info file, which will be used to determine the spacing
of channel names to return, e.g. ``'MEG 0111'`` for old Neuromag
systems and ``'MEG0111'`` for new ones.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
sel : list of string
List with channel names in the selection.
"""
# convert name to list of string
if not isinstance(name, (list, tuple)):
name = [name]
if isinstance(info, Info):
picks = pick_types(info, meg=True, exclude=())
if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]:
spacing = 'new'
else:
spacing = 'old'
elif info is not None:
raise TypeError('info must be an instance of Info or None, not %s'
% (type(info),))
else: # info is None
spacing = 'old'
# use built-in selections by default
if fname is None:
fname = path.join(path.dirname(__file__), 'data', 'mne_analyze.sel')
if not path.isfile(fname):
raise ValueError('The file %s does not exist.' % fname)
# use this to make sure we find at least one match for each name
name_found = dict((n, False) for n in name)
with open(fname, 'r') as fid:
sel = []
for line in fid:
line = line.strip()
# skip blank lines and comments
if len(line) == 0 or line[0] == '#':
continue
# get the name of the selection in the file
pos = line.find(':')
if pos < 0:
logger.info('":" delimiter not found in selections file, '
'skipping line')
continue
sel_name_file = line[:pos]
# search for substring match with name provided
for n in name:
if sel_name_file.find(n) >= 0:
sel.extend(line[pos + 1:].split('|'))
name_found[n] = True
break
# make sure we found at least one match for each name
for n, found in name_found.items():
if not found:
raise ValueError('No match for selection name "%s" found' % n)
# make the selection a sorted list with unique elements
sel = list(set(sel))
sel.sort()
if spacing == 'new': # "new" or "old" by now, "old" is default
sel = [s.replace('MEG ', 'MEG') for s in sel]
return sel
|
import pickle
from tkinter import *
from PIL import ImageTk, Image
import tkinter.font as fonts
from tkinter import messagebox
import os
# initialise
root = Tk()
root.title("Space Defence")
root.iconbitmap("game_icon.ico")
root.wm_geometry("1280x720")
root.resizable(False, False)
# functions for buttons
def MainMenu():
root.destroy()
os.system("mainMenu.py")
def again():
root.destroy()
os.system('SpaceDefence.py')
def special():
messagebox.showinfo("Congratulations!", "YOU HAVE DONE IT \nYOU HAVE SAVED YOUR SPACESHIP AND PLANET FROM "
"ABSOLUTE DEVASTATION WHILE MANAGING TO DESTROY THE WHOLE ENEMY FORCE")
check = True
def showAnalysis():
global check
if check:
new = Toplevel()
new.title("Space Defence")
new.iconbitmap("game_icon.ico")
new.wm_geometry("200x200")
new.resizable(False, False)
check = False
with open("score.dat", "rb") as f:
lister = pickle.load(f)
header = Label(new, text="GAME ANALYSIS").pack()
coinLbl = Label(new, text="Coins collected: " + str(lister[1])).pack()
bombLbl = Label(new, text="Bombs left: " + str(lister[2])).pack()
calc = Label(new, text="Score=coins collected-bombs left= " + str(lister[0]), padx=10).pack()
# creating buttons, images etc
testImage = Image.open("void.jpg")
testImage = testImage.resize((1280, 720))
analysisImg = ImageTk.PhotoImage(Image.open("analysis.png").resize((24, 24)))
bgImage = ImageTk.PhotoImage(testImage)
bgLabel = Label(root, image=bgImage)
font = fonts.Font(family="ComicSansMS", size=40)
try:
with open("score.dat", "rb") as t:
try:
displayScore = pickle.load(t)[0]
CHECK = float(displayScore)
except pickle.UnpicklingError:
with open("score.dat", "wb") as u:
displayScore = '0'
pickle.dump([0, 0, 0], u)
except FileNotFoundError:
displayScore = "0"
with open("name.txt", "r")as f:
name = f.read()
scoreLabel = Label(root, text=name+" Scored: " + str(displayScore), font="ComicSansMS", bg="purple1", fg="black")
title = Label(root, text="GAME OVER", font=font, bg="red", fg="black")
restart = Button(root, text="RESTART", bg="deepskyblue", fg="white", activebackground="steelblue", bd=5,
font="ComicSansMS",
relief=RAISED, activeforeground="white", height=1, width=10, command=again)
Quit = Button(root, text="QUIT", bg="thistle3", fg="black", activebackground="thistle4", bd=5, font="ComicSansMS",
relief=RAISED, activeforeground="black", command=root.destroy, height=1, width=10)
mainMenu = Button(root, text="MAIN MENU", bg="cyan2", fg="black", activebackground="cyan3", bd=5,
font="ComicSansMS",
relief=RAISED, activeforeground="black", height=1, width=15, command=MainMenu)
analysisButton = Button(root, image=analysisImg, relief=RAISED, bd=5, command=showAnalysis)
# displaying created elements on screen
bgLabel.grid(row=0, column=0, rowspan=10, columnspan=15)
scoreLabel.grid(row=2, column=7)
title.grid(row=0, column=7)
restart.grid(row=4, column=7)
Quit.grid(row=6, column=7)
mainMenu.grid(row=5, column=7)
analysisButton.grid(row=2, column=8)
with open("check.txt", "r+") as p:
if p.read() == "True":
special()
p.seek(0, 0)
p.write("False")
root.mainloop()
with open("score.dat", "wb") as f:
pickle.dump([0, 0, 0], f)
|
#
# PySNMP MIB module CISCO-TRUSTSEC-TC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-TRUSTSEC-TC-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:57:57 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Gauge32, MibIdentifier, Integer32, IpAddress, iso, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter64, Unsigned32, Counter32, NotificationType, TimeTicks, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "MibIdentifier", "Integer32", "IpAddress", "iso", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter64", "Unsigned32", "Counter32", "NotificationType", "TimeTicks", "Bits")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoCtsTcMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 694))
ciscoCtsTcMIB.setRevisions(('2013-06-06 00:00', '2012-01-30 00:00', '2009-05-14 00:00',))
if mibBuilder.loadTexts: ciscoCtsTcMIB.setLastUpdated('201306060000Z')
if mibBuilder.loadTexts: ciscoCtsTcMIB.setOrganization('Cisco Systems, Inc.')
class CtsSecurityGroupTag(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 65535)
class CtsAclName(TextualConvention, OctetString):
status = 'current'
displayHint = '255a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(1, 255)
class CtsAclNameOrEmpty(TextualConvention, OctetString):
status = 'current'
displayHint = '255a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 255)
class CtsAclList(TextualConvention, OctetString):
status = 'current'
displayHint = '255a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(1, 255)
class CtsAclListOrEmpty(TextualConvention, OctetString):
status = 'current'
displayHint = '255a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 255)
class CtsPolicyName(TextualConvention, OctetString):
status = 'current'
displayHint = '255a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 255)
class CtsPasswordEncryptionType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("other", 1), ("none", 2), ("clearText", 3), ("typeSix", 4), ("typeSeven", 5))
class CtsPassword(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 256)
class CtsGenerationId(TextualConvention, OctetString):
status = 'current'
displayHint = '128a'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 128)
class CtsAcsAuthorityIdentity(TextualConvention, OctetString):
status = 'current'
displayHint = '1x'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 64)
class CtsCredentialRecordType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("simpleSecret", 1), ("pac", 2))
class CtsSgaclMonitorMode(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("on", 1), ("off", 2))
class CtsSxpConnectionStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("other", 1), ("off", 2), ("on", 3), ("pendingOn", 4), ("deleteHoldDown", 5))
mibBuilder.exportSymbols("CISCO-TRUSTSEC-TC-MIB", PYSNMP_MODULE_ID=ciscoCtsTcMIB, CtsAclListOrEmpty=CtsAclListOrEmpty, CtsPolicyName=CtsPolicyName, CtsSxpConnectionStatus=CtsSxpConnectionStatus, CtsPassword=CtsPassword, CtsAclList=CtsAclList, CtsAclNameOrEmpty=CtsAclNameOrEmpty, ciscoCtsTcMIB=ciscoCtsTcMIB, CtsPasswordEncryptionType=CtsPasswordEncryptionType, CtsAclName=CtsAclName, CtsGenerationId=CtsGenerationId, CtsAcsAuthorityIdentity=CtsAcsAuthorityIdentity, CtsCredentialRecordType=CtsCredentialRecordType, CtsSgaclMonitorMode=CtsSgaclMonitorMode, CtsSecurityGroupTag=CtsSecurityGroupTag)
|
import torch
from mmhuman3d.models.builder import build_body_model
body_model_load_dir = 'data/body_models/smpl'
extra_joints_regressor = 'data/body_models/J_regressor_extra.npy'
def test_smpl():
random_body_pose = torch.rand((1, 69))
# test SMPL
smpl_54 = build_body_model(
dict(
type='SMPL',
keypoint_src='smpl_54',
keypoint_dst='smpl_54',
model_path=body_model_load_dir,
extra_joints_regressor=extra_joints_regressor))
smpl_54_output = smpl_54(body_pose=random_body_pose)
smpl_54_joints = smpl_54_output['joints']
smpl_49 = build_body_model(
dict(
type='SMPL',
keypoint_src='smpl_54',
keypoint_dst='smpl_49',
keypoint_approximate=True,
model_path=body_model_load_dir,
extra_joints_regressor=extra_joints_regressor))
smpl_49_output = smpl_49(body_pose=random_body_pose)
smpl_49_joints = smpl_49_output['joints']
joint_mapping = [
24, 12, 17, 19, 21, 16, 18, 20, 0, 2, 5, 8, 1, 4, 7, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 8, 5, 45, 46, 4, 7, 21, 19, 17, 16, 18, 20, 47,
48, 49, 50, 51, 52, 53, 24, 26, 25, 28, 27
]
assert torch.allclose(smpl_54_joints[:, joint_mapping, :], smpl_49_joints)
# def test_gendered_smpl():
# random_betas_neutral = torch.rand((1, 10))
# random_betas_male = torch.rand((1, 10))
# random_betas_female = torch.rand((1, 10))
# gender = torch.Tensor([-1, 0, 1])
#
# smpl_neutral = build_body_model(
# dict(
# type='SMPL',
# gender='neutral',
# keypoint_src='smpl_45',
# keypoint_dst='smpl_45',
# model_path=body_model_load_dir,
# ))
#
# smpl_male = build_body_model(
# dict(
# type='SMPL',
# gender='male',
# keypoint_src='smpl_45',
# keypoint_dst='smpl_45',
# model_path=body_model_load_dir,
# ))
#
# smpl_female = build_body_model(
# dict(
# type='SMPL',
# gender='female',
# keypoint_src='smpl_45',
# keypoint_dst='smpl_45',
# model_path=body_model_load_dir,
# ))
#
# gendered_smpl = build_body_model(
# dict(
# type='GenderedSMPL',
# keypoint_src='smpl_45',
# keypoint_dst='smpl_45',
# model_path=body_model_load_dir))
#
# smpl_neutral_output = smpl_neutral(betas=random_betas_neutral)
# smpl_male_output = smpl_male(betas=random_betas_male)
# smpl_female_output = smpl_female(betas=random_betas_female)
#
# betas_concat = torch.cat(
# [random_betas_neutral, random_betas_male, random_betas_female])
# joint_concat = torch.cat([
# smpl_neutral_output['joints'], smpl_male_output['joints'],
# smpl_female_output['joints']
# ])
#
# gendered_smpl_output = gendered_smpl(betas=betas_concat, gender=gender)
#
# assert torch.allclose(joint_concat, gendered_smpl_output['joints'])
|
"""
# ---------------------------
# GLOBAL packages imports
# ---------------------------
"""
from tests.utils.FileExtenTests import *
|
"""Wrappers building transformation from configuration"""
import logging
from PIL import Image
from utils.config import Configuration
def _build_param_dict(conf, required_params, optional_params=[],
key_renames={}, kwargs={}):
# Filter out params which are passed in kwargs
required_params = [p for p in required_params if p not in kwargs]
param_dict = conf.to_param_dict(required_params,
optional_params.copy(),
key_renames)
param_dict.update(kwargs)
return param_dict
def get_rec_transform(conf, mode, **kwargs):
assert mode in ('train', 'test', 'inference')
required_params = ['undersampling', 'image_size']
key_renames = {
'undersampling': 'cs_params'
}
if mode == 'train':
from data.reconstruction.rec_transforms import train_transform
param_dict = _build_param_dict(conf,
required_params=required_params,
optional_params={
'downscale': 1,
'augmentation': None
},
key_renames=key_renames,
kwargs=kwargs)
transform = train_transform(**param_dict)
else:
from data.reconstruction.rec_transforms import test_transform
param_dict = _build_param_dict(conf,
required_params=required_params,
optional_params={'downscale': 1},
key_renames=key_renames,
kwargs=kwargs)
transform = test_transform(**param_dict)
return transform
def get_rec_seg_transform(conf, mode, **kwargs):
assert mode in ('train', 'test', 'inference')
required_params = ['undersampling', 'image_size']
key_renames = {
'undersampling': 'cs_params'
}
if mode == 'train':
from data.reconstruction.rec_seg_transforms import train_transform
param_dict = _build_param_dict(conf,
required_params=required_params,
optional_params={
'downscale': 1,
'augmentation': None,
},
key_renames=key_renames,
kwargs=kwargs)
transform = train_transform(**param_dict)
else:
from data.reconstruction.rec_seg_transforms import test_transform
param_dict = _build_param_dict(conf,
required_params=required_params,
optional_params={'downscale': 1},
key_renames=key_renames,
kwargs=kwargs)
transform = test_transform(**param_dict)
return transform
def get_rec_output_transform(conf, mode, **kwargs):
from data.reconstruction.rec_transforms import output_transform
transform = output_transform()
return transform
def get_seg_output_transform(conf, mode, **kwargs):
from data.reconstruction.seg_transforms import output_transform
transform = output_transform()
return transform
def get_output_transform(conf, application, mode, **kwargs):
applications = {
'reconstruction': get_rec_output_transform,
'segmentation': get_seg_output_transform,
'none': None
}
assert application in applications
if applications[application] is None:
logging.debug(('Unknown application {} for output transform. Using no '
'output transform').format(application))
return None
return applications[application](conf, mode, **kwargs)
def get_rec_input_batch_transform(conf, mode, **kwargs):
assert mode in ('train', 'test')
return None
def get_input_batch_transform(conf, application, mode, **kwargs):
applications = {
'reconstruction': get_rec_input_batch_transform,
'segmentation': None,
'none': None,
}
assert application in applications
if applications[application] is None:
logging.debug(('Unknown application {} for input batch transform. Using '
'no input batch transform').format(application))
return None
return applications[application](conf, mode, **kwargs)
|
lista_numeros = []
for contador in range(0, 5):
numero = int(input('Digite um valor: '))
if contador == 0 or numero > lista_numeros[-1]:
lista_numeros.append(numero)
print('Adicionado ao final da lista...')
else:
posicao = 0
while posicao < len(lista_numeros):
if numero <= lista_numeros[posicao]:
lista_numeros.inserto(posicao, numero)
break
posicao += 1
print('-=' * 30)
print(f'A sua lista ordenada é {lista_numeros}')
|
import os
import time
import logging
import tempfile
import shutil
from django.conf import settings
from operator import itemgetter
from django.utils import module_loading
from papermerge.core.import_pipeline import LOCAL
logger = logging.getLogger(__name__)
def import_documents(directory):
files = []
pipelines = settings.PAPERMERGE_PIPELINES
if not directory:
raise ValueError("Import directory value is None")
for entry in os.scandir(directory):
if entry.is_file():
file = (entry.path, entry.stat().st_mtime)
files.append(file)
else:
logger.warning(
"Skipping %s as it is not a file",
entry.path
)
if not files:
return
files_old_to_new = sorted(files, key=itemgetter(1))
time.sleep(int(settings.PAPERMERGE_FILES_MIN_UNMODIFIED_DURATION))
for file, mtime in files_old_to_new:
if mtime == os.path.getmtime(file):
# File has not been modified and can be consumed
logger.info(f"Importing file {file}...")
basename = os.path.basename(file)
with tempfile.TemporaryDirectory() as tempdirname:
shutil.move(file, tempdirname)
temp_file_name = os.path.join(
tempdirname, basename
)
logger.info(f"Same as temp_file_name={temp_file_name}...")
init_kwargs = {'payload': temp_file_name, 'processor': LOCAL}
apply_kwargs = {'user': None, 'name': basename,
'delete_after_import': True}
# TODO: 100% as imap.py and views/document.py
# Please, refactor
for pipeline in pipelines:
pipeline_class = module_loading.import_string(pipeline)
try:
importer = pipeline_class(**init_kwargs)
except Exception as e:
logger.debug("{} importer: {}".format("LOCAL", e))
importer = None
if importer is not None:
try:
# PEP8 warning
# result_dict is not used
# Is importer.apply supposed to
# return something ?
# Please document apply function
result_dict = importer.apply(**apply_kwargs)
# undocumented
init_kwargs_temp = importer.get_init_kwargs()
# not documented
apply_kwargs_temp = importer.get_apply_kwargs()
if init_kwargs_temp:
init_kwargs = {
**init_kwargs, **init_kwargs_temp}
if apply_kwargs_temp:
apply_kwargs = {
**apply_kwargs, **apply_kwargs_temp}
except Exception as e:
# please use fstrings
logger.error("{} importer: {}".format("LOCAL", e))
continue
|
import numpy as np
from matplotlib import pyplot as plt
runtime_data = np.array([8.1, 7.0, 7.3, 7.2, 6.2, 6.1, 8.3, 6.4, 7.1, 7.0, 7.5, 7.8, 7.9, 7.7, 6.4, 6.6, 8.2, 6.7, 8.1, 8.0, 6.7, 7.9, 6.7, 6.5, 5.3, 6.8, 8.3, 4.7, 6.2, 5.9, 6.3, 7.5, 7.1, 8.0, 5.6, 7.9, 8.6, 7.6, 6.9, 7.1, 6.3, 7.5, 2.7, 7.2, 6.3, 6.7, 7.3, 5.6, 7.1, 3.7, 8.1, 5.8, 5.6, 7.2, 9.0, 7.3, 7.2, 7.4, 7.0, 7.5, 6.7, 6.8, 6.5, 4.1, 8.5, 7.7, 7.4, 8.1, 7.5, 7.2, 5.9, 7.1, 7.5, 6.8, 8.1, 7.1, 8.1, 8.3, 7.3, 5.3, 8.8, 7.9, 8.2, 8.1, 7.2, 7.0, 6.4, 7.8, 7.8, 7.4, 8.1, 7.0, 8.1, 7.1, 7.4, 7.4, 8.6, 5.8, 6.3, 8.5, 7.0, 7.0, 8.0, 7.9, 7.3, 7.7, 5.4, 6.3, 5.8, 7.7, 6.3, 8.1, 6.1, 7.7, 8.1, 5.8, 6.2, 8.8, 7.2, 7.4, 6.7, 6.7, 6.0, 7.4, 8.5, 7.5, 5.7, 6.6, 6.4, 8.0, 7.3, 6.0, 6.4, 8.5, 7.1, 7.3, 8.1, 7.3, 8.1, 7.1, 8.0, 6.2, 7.8, 8.2, 8.4, 8.1, 7.4, 7.6, 7.6, 6.2, 6.4, 7.2, 5.8, 7.6, 8.1, 4.7, 7.0, 7.4, 7.5, 7.9, 6.0, 7.0, 8.0, 6.1, 8.0, 5.2, 6.5, 7.3, 7.3, 6.8, 7.9, 7.9, 5.2, 8.0, 7.5, 6.5, 7.6, 7.0, 7.4, 7.3, 6.7, 6.8, 7.0, 5.9, 8.0, 6.0, 6.3, 6.6, 7.8, 6.3, 7.2, 5.6, 8.1, 5.8, 8.2, 6.9, 6.3, 8.1, 8.1, 6.3, 7.9, 6.5, 7.3, 7.9, 5.7, 7.8, 7.5, 7.5, 6.8, 6.7, 6.1, 5.3, 7.1, 5.8, 7.0, 5.5, 7.8, 5.7, 6.1, 7.7, 6.7, 7.1, 6.9, 7.8, 7.0, 7.0, 7.1, 6.4, 7.0, 4.8, 8.2, 5.2, 7.8, 7.4, 6.1, 8.0, 6.8, 3.9, 8.1, 5.9, 7.6, 8.2, 5.8, 6.5, 5.9, 7.6, 7.9, 7.4, 7.1, 8.6, 4.9, 7.3, 7.9, 6.7, 7.5, 7.8, 5.8, 7.6, 6.4, 7.1, 7.8, 8.0, 6.2, 7.0, 6.0, 4.9, 6.0, 7.5, 6.7, 3.7, 7.8, 7.9, 7.2, 8.0, 6.8, 7.0, 7.1, 7.7, 7.0, 7.2, 7.3, 7.6, 7.1, 7.0, 6.0, 6.1, 5.8, 5.3, 5.8, 6.1, 7.5, 7.2, 5.7, 7.7, 7.1, 6.6, 5.7, 6.8, 7.1, 8.1, 7.2, 7.5, 7.0, 5.5, 6.4, 6.7, 6.2, 5.5, 6.0, 6.1, 7.7, 7.8, 6.8, 7.4, 7.5, 7.0, 5.2, 5.3, 6.2, 7.3, 6.5, 6.4, 7.3, 6.7, 7.7, 6.0, 6.0, 7.4, 7.0, 5.4, 6.9, 7.3, 8.0, 7.4, 8.1, 6.1, 7.8, 5.9, 7.8, 6.5, 6.6, 7.4, 6.4, 6.8, 6.2, 5.8, 7.7, 7.3, 5.1, 7.7, 7.3, 6.6, 7.1, 6.7, 6.3, 5.5, 7.4, 7.7, 6.6, 7.8, 6.9, 5.7, 7.8, 7.7, 6.3, 8.0, 5.5, 6.9, 7.0, 5.7, 6.0, 6.8, 6.3, 6.7, 6.9, 5.7, 6.9, 7.6, 7.1, 6.1, 7.6, 7.4, 6.6, 7.6, 7.8, 7.1, 5.6, 6.7, 6.7, 6.6, 6.3, 5.8, 7.2, 5.0, 5.4, 7.2, 6.8, 5.5, 6.0, 6.1, 6.4, 3.9, 7.1, 7.7, 6.7, 6.7, 7.4, 7.8, 6.6, 6.1, 7.8, 6.5, 7.3, 7.2, 5.6, 5.4, 6.9, 7.8, 7.7, 7.2, 6.8, 5.7, 5.8, 6.2, 5.9, 7.8, 6.5, 8.1, 5.2, 6.0, 8.4, 4.7, 7.0, 7.4, 6.4, 7.1, 7.1, 7.6, 6.6, 5.6, 6.3, 7.5, 7.7, 7.4, 6.0, 6.6, 7.1, 7.9, 7.8, 5.9, 7.0, 7.0, 6.8, 6.5, 6.1, 8.3, 6.7, 6.0, 6.4, 7.3, 7.6, 6.0, 6.6, 7.5, 6.3, 7.5, 6.4, 6.9, 8.0, 6.7, 7.8, 6.4, 5.8, 7.5, 7.7, 7.4, 8.5, 5.7, 8.3, 6.7, 7.2, 6.5, 6.3, 7.7, 6.3, 7.8, 6.7, 6.7, 6.6, 8.0, 6.5, 6.9, 7.0, 5.3, 6.3, 7.2, 6.8, 7.1, 7.4, 8.3, 6.3, 7.2, 6.5, 7.3, 7.9, 5.7, 6.5, 7.7, 4.3, 7.8, 7.8, 7.2, 5.0, 7.1, 5.7, 7.1, 6.0, 6.9, 7.9, 6.2, 7.2, 5.3, 4.7, 6.6, 7.0, 3.9, 6.6, 5.4, 6.4, 6.7, 6.9, 5.4, 7.0, 6.4, 7.2, 6.5, 7.0, 5.7, 7.3, 6.1, 7.2, 7.4, 6.3, 7.1, 5.7, 6.7, 6.8, 6.5, 6.8, 7.9, 5.8, 7.1, 4.3, 6.3, 7.1, 4.6, 7.1, 6.3, 6.9, 6.6, 6.5, 6.5, 6.8, 7.8, 6.1, 5.8, 6.3, 7.5, 6.1, 6.5, 6.0, 7.1, 7.1, 7.8, 6.8, 5.8, 6.8, 6.8, 7.6, 6.3, 4.9, 4.2, 5.1, 5.7, 7.6, 5.2, 7.2, 6.0, 7.3, 7.2, 7.8, 6.2, 7.1, 6.4, 6.1, 7.2, 6.6, 6.2, 7.9, 7.3, 6.7, 6.4, 6.4, 7.2, 5.1, 7.4, 7.2, 6.9, 8.1, 7.0, 6.2, 7.6, 6.7, 7.5, 6.6, 6.3, 4.0, 6.9, 6.3, 7.3, 7.3, 6.4, 6.6, 5.6, 6.0, 6.3, 6.7, 6.0, 6.1, 6.2, 6.7, 6.6, 7.0, 4.9, 8.4, 7.0, 7.5, 7.3, 5.6, 6.7, 8.0, 8.1, 4.8, 7.5, 5.5, 8.2, 6.6, 3.2, 5.3, 5.6, 7.4, 6.4, 6.8, 6.7, 6.4, 7.0, 7.9, 5.9, 7.7, 6.7, 7.0, 6.9, 7.7, 6.6, 7.1, 6.6, 5.7, 6.3, 6.5, 8.0, 6.1, 6.5, 7.6, 5.6, 5.9, 7.2, 6.7, 7.2, 6.5, 7.2, 6.7, 7.5, 6.5, 5.9, 7.7, 8.0, 7.6, 6.1, 8.3, 7.1, 5.4, 7.8, 6.5, 5.5, 7.9, 8.1, 6.1, 7.3, 7.2, 5.5, 6.5, 7.0, 7.1, 6.6, 6.5, 5.8, 7.1, 6.5, 7.4, 6.2, 6.0, 7.6, 7.3, 8.2, 5.8, 6.5, 6.6, 6.2, 5.8, 6.4, 6.7, 7.1, 6.0, 5.1, 6.2, 6.2, 6.6, 7.6, 6.8, 6.7, 6.3, 7.0, 6.9, 6.6, 7.7, 7.5, 5.6, 7.1, 5.7, 5.2, 5.4, 6.6, 8.2, 7.6, 6.2, 6.1, 4.6, 5.7, 6.1, 5.9, 7.2, 6.5, 7.9, 6.3, 5.0, 7.3, 5.2, 6.6, 5.2, 7.8, 7.5, 7.3, 7.3, 6.6, 5.7, 8.2, 6.7, 6.2, 6.3, 5.7, 6.6, 4.5, 8.1, 5.6, 7.3, 6.2, 5.1, 4.7, 4.8, 7.2, 6.9, 6.5, 7.3, 6.5, 6.9, 7.8, 6.8, 4.6, 6.7, 6.4, 6.0, 6.3, 6.6, 7.8, 6.6, 6.2, 7.3, 7.4, 6.5, 7.0, 4.3, 7.2, 6.2, 6.2, 6.8, 6.0, 6.6, 7.1, 6.8, 5.2, 6.7, 6.2, 7.0, 6.3, 7.8, 7.6, 5.4, 7.6, 5.4, 4.6, 6.9, 6.8, 5.8, 7.0, 5.8, 5.3, 4.6, 5.3, 7.6, 1.9, 7.2, 6.4, 7.4, 5.7, 6.4, 6.3, 7.5, 5.5, 4.2, 7.8, 6.3, 6.4, 7.1, 7.1, 6.8, 7.3, 6.7, 7.8, 6.3, 7.5, 6.8, 7.4, 6.8, 7.1, 7.6, 5.9, 6.6, 7.5, 6.4, 7.8, 7.2, 8.4, 6.2, 7.1, 6.3, 6.5, 6.9, 6.9, 6.6, 6.9, 7.7, 2.7, 5.4, 7.0, 6.6, 7.0, 6.9, 7.3, 5.8, 5.8, 6.9, 7.5, 6.3, 6.9, 6.1, 7.5, 6.8, 6.5, 5.5, 7.7, 3.5, 6.2, 7.1, 5.5, 7.1, 7.1, 7.1, 7.9, 6.5, 5.5, 6.5, 5.6, 6.8, 7.9, 6.2, 6.2, 6.7, 6.9, 6.5, 6.6, 6.4, 4.7, 7.2, 7.2, 6.7, 7.5, 6.6, 6.7, 7.5, 6.1, 6.4, 6.3, 6.4, 6.8, 6.1, 4.9, 7.3, 5.9, 6.1, 7.1, 5.9, 6.8, 5.4, 6.3, 6.2, 6.6, 4.4, 6.8, 7.3, 7.4, 6.1, 4.9, 5.8, 6.1, 6.4, 6.9, 7.2, 5.6, 4.9, 6.1, 7.8, 7.3, 4.3, 7.2, 6.4, 6.2, 5.2, 7.7, 6.2, 7.8, 7.0, 5.9, 6.7, 6.3, 6.9, 7.0, 6.7, 7.3, 3.5, 6.5, 4.8, 6.9, 5.9, 6.2, 7.4, 6.0, 6.2, 5.0, 7.0, 7.6, 7.0, 5.3, 7.4, 6.5, 6.8, 5.6, 5.9, 6.3, 7.1, 7.5, 6.6, 8.5, 6.3, 5.9, 6.7, 6.2, 5.5, 6.2, 5.6, 5.3])
max_runtime = runtime_data.max()
min_runtime = runtime_data.min()
print(min_runtime,max_runtime)
#设置不等宽的组距,hist方法中取到的会是一个左闭右开的去见[1.9,3.5)
num_bin_list = [1.9,3.5]
i=3.5
while i<=max_runtime:
i += 0.5
num_bin_list.append(i)
print(num_bin_list)
#设置图形的大小
plt.figure(figsize=(20,8),dpi=80)
plt.hist(runtime_data,num_bin_list)
#xticks让之前的组距能够对应上
plt.xticks(num_bin_list)
plt.show()
|
'''
Copyright 2019 Secure Shed Project Dev Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# pylint: disable=too-few-public-methods
import enum
class EvtType(enum.Enum):
#------------------------
#- Keypad entry events
KeypadKeyCodeEntered = 1001
#------------------------
#- Device state change events
SensorDeviceStateChange = 2001
#------------------------
#- Siren related events
ActivateSiren = 3001
DeactivateSiren = 3002
#------------------------
#- Alarm state change events
AlarmActivated = 4001
AlarmDeactivated = 4002
#------------------------
#- Keypad Api events
KeypadApiSendAlivePing = 5001
KeypadApiSendKeypadLock = 5002
class SensorDeviceBodyItem:
DeviceType = 'deviceType'
DeviceName = 'deviceName'
State = 'state'
|
# -*- coding: utf-8 -*-
"""
Process that handles the Community Statistics from Finn
"""
__author__ = 'Samir Adrik'
__email__ = 'samir.adrik@gmail.com'
from source.util import Assertor, Profiling, Tracking, Debugger
from .engine import Process, InputOperation, Signal, Extract, Separate, Multiplex, \
OutputOperation
from .finn_transportation_sub_model import FinnTransportationSubModel
from .finn_environment_sub_model import FinnEnvironmentSubModel
from .finn_shopping_sub_model import FinnShoppingSubModel
from .finn_leisure_sub_model import FinnLeisureSubModel
from .finn_people_sub_model import FinnPeopleSubModel
from .finn_family_sub_model import FinnFamilySubModel
class FinnCommunityProcess(Process):
"""
Implementation of processing of community statistics
"""
@Tracking
def __init__(self, community_json: dict):
"""
Constructor / Instantiate the class.
Parameters
----------
community_json : dict
JSON object as dict with community statistics
"""
self.start_process()
super().__init__(name=self.__class__.__name__)
Assertor.assert_data_types([community_json], [dict])
self.community_json = community_json
self.input_operation(self.community_json)
self.run_parallel([self.extract_1, self.extract_2, self.extract_3, self.extract_4,
self.extract_5, self.extract_6, self.extract_7])
self.run_parallel([self.separate_1, self.separate_2, self.extract_8, self.separate_3,
self.separate_4, self.separate_5, self.separate_6])
self.run_parallel([self.people_data_processing, self.family_data_processing,
self.environmental_process, self.transportation_process,
self.leisure_processing, self.shopping_process])
self.multiplex()
self.finn_community_statistics = self.output_operation()
self.end_process()
@Profiling
@Tracking
def input_operation(self, data: object):
"""
initial operation of the process
"""
input_operation = InputOperation("Finn Community Statistics")
self.add_node(input_operation)
input_signal = Signal(data, desc="Finn Community Statistics")
self.add_signal(input_signal, "input_signal")
self.add_transition(input_operation, input_signal)
@Profiling
@Debugger
def extract_1(self):
"""
method for extracting community information / statistics from community JSON
"""
input_signal = self.get_signal("input_signal")
extract_people_operation = Extract(input_signal.data["nabolag"], "people")
self.add_node(extract_people_operation)
self.add_transition(input_signal, extract_people_operation, label="thread")
extract_people = extract_people_operation.run()
extract_people_signal = Signal(extract_people, desc="Extracted People Information")
self.add_signal(extract_people_signal, "people_signal")
self.add_transition(extract_people_operation, extract_people_signal, label="thread")
@Profiling
@Debugger
def extract_2(self):
"""
method for extracting general information
"""
input_signal = self.get_signal("input_signal")
extract_general_operation = Extract(input_signal.data["nabolag"], "general")
self.add_node(extract_general_operation)
self.add_transition(input_signal, extract_general_operation, label="thread")
extract_general = extract_general_operation.run()
extract_general_signal = Signal(extract_general, "Extract General Information")
self.add_signal(extract_general_signal, "general_signal")
self.add_transition(extract_general_operation, extract_general_signal, label="thread")
@Profiling
@Debugger
def extract_3(self):
"""
method for extracting family information
"""
input_signal = self.get_signal("input_signal")
extract_family_operation = Extract(input_signal.data["nabolag"], "family")
self.add_node(extract_family_operation)
self.add_transition(input_signal, extract_family_operation, label="thread")
extract_family = extract_family_operation.run()
extract_family_signal = Signal(extract_family, "Extract Family Information")
self.add_signal(extract_family_signal, "family_signal")
self.add_transition(extract_family_operation, extract_family_signal, label="thread")
@Profiling
@Debugger
def extract_4(self):
"""
method for extracting environment information
"""
input_signal = self.get_signal("input_signal")
extract_environment_operation = Extract(input_signal.data["nabolag"], "environment")
self.add_node(extract_environment_operation)
self.add_transition(input_signal, extract_environment_operation, label="thread")
extract_environment = extract_environment_operation.run()
extract_environment_signal = Signal(extract_environment, "Extract Environment Information")
self.add_signal(extract_environment_signal, "environment_signal")
self.add_transition(extract_environment_operation, extract_environment_signal,
label="thread")
@Profiling
@Debugger
def extract_5(self):
"""
method for extracting transportation info
"""
input_signal = self.get_signal("input_signal")
extract_transportation_operation = Extract(input_signal.data["nabolag"], "transport")
self.add_node(extract_transportation_operation)
self.add_transition(input_signal, extract_transportation_operation, label="thread")
extract_transportation = extract_transportation_operation.run()
extract_transportation_signal = Signal(extract_transportation,
"Extract Transportation Information")
self.add_signal(extract_transportation_signal, "transportation_signal")
self.add_transition(extract_transportation_operation, extract_transportation_signal,
label="thread")
@Profiling
@Debugger
def extract_6(self):
"""
method for extracting leisure information
"""
input_signal = self.get_signal("input_signal")
extract_leisure_operation = Extract(input_signal.data["nabolag"], "leisure")
self.add_node(extract_leisure_operation)
self.add_transition(input_signal, extract_leisure_operation, label="thread")
extract_leisure = extract_leisure_operation.run()
extract_leisure_signal = Signal(extract_leisure, "Extract Leisure Information")
self.add_signal(extract_leisure_signal, "leisure_signal")
self.add_transition(extract_leisure_operation, extract_leisure_signal, label="thread")
@Profiling
@Debugger
def extract_7(self):
"""
method for extracting shopping information
"""
input_signal = self.get_signal("input_signal")
extract_shopping_operation = Extract(input_signal.data["nabolag"], "shopping")
self.add_node(extract_shopping_operation)
self.add_transition(input_signal, extract_shopping_operation, label="thread")
extract_shopping = extract_shopping_operation.run()
extract_shopping_signal = Signal(extract_shopping, "Extract Shopping Information")
self.add_signal(extract_shopping_signal, "shopping_signal")
self.add_transition(extract_shopping_operation, extract_shopping_signal, label="thread")
@Profiling
@Debugger
def extract_8(self):
"""
method for extracting info from general information
"""
general_signal = self.get_signal("general_signal")
extract_info_operation = Extract(general_signal.data["general"], "info")
self.add_node(extract_info_operation)
self.add_transition(general_signal, extract_info_operation, label="thread")
extract_info = extract_info_operation.run()
extract_info_signal = Signal(extract_info, "Extract Community Specific Information")
self.add_signal(extract_info_signal, "info_signal")
self.add_transition(extract_info_operation, extract_info_signal, label="thread")
@Profiling
@Debugger
def separate_1(self):
"""
method for separating list of dict with people information to dict of dict
"""
people_signal = self.get_signal("people_signal")
separate_operation = Separate(people_signal.data["people"],
"Separate Out People Statistics")
self.add_node(separate_operation)
self.add_transition(people_signal, separate_operation, label="thread")
separate = separate_operation.run()
separate_signal = Signal(separate, "Separated People Statistics")
self.add_signal(separate_signal, "separate_people_signal")
self.add_transition(separate_operation, separate_signal, label="thread")
@Profiling
@Debugger
def separate_2(self):
"""
method for separating list of dict with family information to dict of dict
"""
people_signal = self.get_signal("family_signal")
separate_operation = Separate(people_signal.data["family"],
"Separate Out Family Statistics")
self.add_node(separate_operation)
self.add_transition(people_signal, separate_operation, label="thread")
separate = separate_operation.run()
separate_signal = Signal(separate, "Separated Family Statistics", prettify_keys=True,
length=5)
self.add_signal(separate_signal, "separate_family_signal")
self.add_transition(separate_operation, separate_signal, label="thread")
@Profiling
@Debugger
def separate_3(self):
"""
method for separating list of dict with environment information to dict of dict
"""
environment_signal = self.get_signal("environment_signal")
separate_operation = Separate(environment_signal.data["environment"],
"Separate Out Environment Statistics")
self.add_node(separate_operation)
self.add_transition(environment_signal, separate_operation, label="thread")
separate = separate_operation.run()
separate_signal = Signal(separate, "Separated Environment Statistics", prettify_keys=True,
length=5)
self.add_signal(separate_signal, "separated_environment_signal")
self.add_transition(separate_operation, separate_signal, label="thread")
@Profiling
@Debugger
def separate_4(self):
"""
method for separating list of dict with transportation information to dict of dict
"""
transportation_signal = self.get_signal("transportation_signal")
separate_operation = Separate(transportation_signal.data["transport"],
"Separate Out Transportation Statistics")
self.add_node(separate_operation)
self.add_transition(transportation_signal, separate_operation, label="thread")
separate = separate_operation.run()
separate_signal = Signal(separate, "Separated Transportation Statistics",
prettify_keys=True,
length=4)
self.add_signal(separate_signal, "separated_transportation_signal")
self.add_transition(separate_operation, separate_signal, label="thread")
@Profiling
@Debugger
def separate_5(self):
"""
method for separating list of dict with leisure information to dict of dict
"""
leisure_signal = self.get_signal("leisure_signal")
separate_operation = Separate(leisure_signal.data["leisure"],
"Separate Out Leisure Statistics")
self.add_node(separate_operation)
self.add_transition(leisure_signal, separate_operation, label="thread")
separate = separate_operation.run()
separate_signal = Signal(separate, "Separated Leisure Statistics", prettify_keys=True,
length=5)
self.add_signal(separate_signal, "separated_leisure_signal")
self.add_transition(separate_operation, separate_signal, label="thread")
@Profiling
@Debugger
def separate_6(self):
"""
method for separating list of dict with shopping information to dict of dict
"""
shopping_signal = self.get_signal("shopping_signal")
separate_operation = Separate(shopping_signal.data["shopping"],
"Separate Out Shopping Statistics")
self.add_node(separate_operation)
self.add_transition(shopping_signal, separate_operation, label="thread")
separate = separate_operation.run()
separate_signal = Signal(separate, "Separated Shopping Statistics", prettify_keys=True,
length=5)
self.add_signal(separate_signal, "separated_shopping_signal")
self.add_transition(separate_operation, separate_signal, label="thread")
@Profiling
@Tracking
def people_data_processing(self):
"""
sub model for processing finn people data
"""
people_signal = self.get_signal("separate_people_signal")
people_processing_operation = FinnPeopleSubModel(people_signal.data)
self.add_node(people_processing_operation)
self.add_transition(people_signal, people_processing_operation, label="thread")
people_processing = people_processing_operation.run()
people_processing_signal = Signal(people_processing, "Processed People Statistics")
self.add_signal(people_processing_signal, "people_statistics_signal")
self.add_transition(people_processing_operation, people_processing_signal, label="thread")
@Profiling
@Tracking
def family_data_processing(self):
"""
sub model for processing finn family data
"""
family_signal = self.get_signal("separate_family_signal")
family_processing_operation = FinnFamilySubModel(family_signal.data)
self.add_node(family_processing_operation)
self.add_transition(family_signal, family_processing_operation, label="thread")
family_processing = family_processing_operation.run()
family_processing_signal = Signal(family_processing, "Processed Family Statistics",
prettify_keys=True, length=4)
self.add_signal(family_processing_signal, "family_statistics_signal")
self.add_transition(family_processing_operation, family_processing_signal, label="thread")
@Profiling
@Debugger
def environmental_process(self):
"""
sub model for processing finn environmental data
"""
environmental_signal = self.get_signal("separated_environment_signal")
environmental_processing_operation = FinnEnvironmentSubModel(environmental_signal.data)
self.add_node(environmental_processing_operation)
self.add_transition(environmental_signal, environmental_processing_operation,
label="thread")
environmental_processing = environmental_processing_operation.run()
environmental_processing_signal = Signal(environmental_processing,
"Processed Environmental Statistics",
prettify_keys=True, length=5)
self.add_signal(environmental_processing_signal, "environmental_statistics_signal")
self.add_transition(environmental_processing_operation, environmental_processing_signal,
label="thread")
@Profiling
@Debugger
def transportation_process(self):
"""
sub model for processing finn transportation data
"""
transportation_signal = self.get_signal("separated_transportation_signal")
transportation_signal_operation = FinnTransportationSubModel(transportation_signal.data)
self.add_node(transportation_signal_operation)
self.add_transition(transportation_signal, transportation_signal_operation, label="thread")
transportation_processing = transportation_signal_operation.run()
transportation_processing_signal = Signal(transportation_processing,
"Processed Transportation Statistics",
prettify_keys=True, length=5)
self.add_signal(transportation_processing_signal, "transportation_statistics_signal")
self.add_transition(transportation_signal_operation, transportation_processing_signal,
label="thread")
@Profiling
@Debugger
def leisure_processing(self):
"""
sub model for processing finn leisure data
"""
leisure_signal = self.get_signal("separated_leisure_signal")
leisure_signal_operation = FinnLeisureSubModel(leisure_signal.data)
self.add_node(leisure_signal_operation)
self.add_transition(leisure_signal, leisure_signal_operation, label="thread")
leisure_processing = leisure_signal_operation.run()
leisure_processing_signal = Signal(leisure_processing, "Processed Leisure Statistics",
prettify_keys=True, length=5)
self.add_signal(leisure_processing_signal, "leisure_statistics_signal")
self.add_transition(leisure_signal_operation, leisure_processing_signal, label="thread")
@Profiling
@Debugger
def shopping_process(self):
"""
sub model for processing finn shopping data
"""
shopping_signal = self.get_signal("separated_shopping_signal")
shopping_signal_operation = FinnShoppingSubModel(shopping_signal.data)
self.add_node(shopping_signal_operation)
self.add_transition(shopping_signal, shopping_signal_operation, label="thread")
shopping_processing = shopping_signal_operation.run()
shopping_processing_signal = Signal(shopping_processing, "Processed Shopping Statistics",
prettify_keys=True, length=5)
self.add_signal(shopping_processing_signal, "shopping_statistics_signal")
self.add_transition(shopping_signal_operation, shopping_processing_signal, label="thread")
@Profiling
@Debugger
def multiplex(self):
"""
multiplex all processed data
"""
info_signal = self.get_signal("info_signal")
people_statistics = self.get_signal("people_statistics_signal")
family_statistics = self.get_signal("family_statistics_signal")
environmental_statistics = self.get_signal("environmental_statistics_signal")
transportation_statistics = self.get_signal("transportation_statistics_signal")
leisure_statistics = self.get_signal("leisure_statistics_signal")
shopping_statistics = self.get_signal("shopping_statistics_signal")
multiplex_operation = Multiplex(
[info_signal.data, people_statistics.data, family_statistics.data,
environmental_statistics.data, transportation_statistics.data,
leisure_statistics.data, shopping_statistics.data],
desc="Multiplex Finn Community Statistics")
self.add_node(multiplex_operation)
self.add_transition(info_signal, multiplex_operation)
self.add_transition(people_statistics, multiplex_operation)
self.add_transition(family_statistics, multiplex_operation)
self.add_transition(environmental_statistics, multiplex_operation)
self.add_transition(transportation_statistics, multiplex_operation)
self.add_transition(leisure_statistics, multiplex_operation)
self.add_transition(shopping_statistics, multiplex_operation)
multiplex = multiplex_operation.run()
multiplex_signal = Signal(multiplex, "Multiplexed Finn Community Statistics",
prettify_keys=True, length=7)
self.add_signal(multiplex_signal, "multiplexed_finn_community_statistics")
self.add_transition(multiplex_operation, multiplex_signal)
@Profiling
@Debugger
def output_operation(self):
"""
final operation of the process
"""
multiplexed_community_statistics = self.get_signal("multiplexed_finn_community_statistics")
output_operation = OutputOperation("Processed Finn Community Statistics")
self.add_node(output_operation)
self.add_transition(multiplexed_community_statistics, output_operation)
self.print_pdf()
return multiplexed_community_statistics.data
|
"""Test the server version helper."""
from unittest.mock import AsyncMock, call
from zwave_js_server.version import get_server_version
async def test_get_server_version(client_session, ws_client):
"""Test the get server version helper."""
version_data = {
"driverVersion": "test_driver_version",
"serverVersion": "test_server_version",
"homeId": "test_home_id",
}
client_session.ws_connect.side_effect = AsyncMock(return_value=ws_client)
ws_client.receive_json.return_value = version_data
url = "ws://test.org:3000"
version_info = await get_server_version(url, client_session)
assert client_session.ws_connect.called
assert client_session.ws_connect.call_args == call(url)
assert version_info.driver_version == version_data["driverVersion"]
assert version_info.server_version == version_data["serverVersion"]
assert version_info.home_id == version_data["homeId"]
assert ws_client.close.called
|
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import logging
from typing import List
from unittest import mock
import pytest
from azureml.core import Run
from InnerEye.Common.common_util import logging_to_stdout, namespace_to_path
from InnerEye.Common.output_directories import OutputFolderForTests
from InnerEye.ML.lightning_container import LightningContainer
from InnerEye.ML.utils.config_loader import ModelConfigLoader
from InnerEye.ML.utils.model_util import create_model_with_temperature_scaling, generate_and_print_model_summary
from Tests.ML.configs.DummyModel import DummyModel
from Tests.ML.configs.lightning_test_containers import DummyContainerWithInvalidTrainerArguments, \
DummyContainerWithParameters
from Tests.ML.util import default_runner, get_model_loader, model_loader_including_tests, model_train_unittest
def find_models() -> List[str]:
"""
Lists all Python files in the configs folder. Each of them is assumed to contain one model config.
:return: list of models
"""
path = namespace_to_path(ModelConfigLoader.get_default_search_module())
folders = [path / "segmentation", path / "classification", path / "regression"]
names = [str(f.stem) for folder in folders for f in folder.glob("*.py") if folder.exists()]
return [name for name in names if not name.endswith("Base") and not name.startswith("__")]
def test_any_models_found() -> None:
"""
Test that the basic setup for finding all model configs works: At least one of
the models are are in the main branch must be found.
"""
model_names = find_models()
assert len(model_names) > 0
assert "Lung" in model_names
# Test that all configs in the classification folder are picked up as well
assert "DummyClassification" in model_names
@pytest.mark.parametrize("model_name", find_models())
@pytest.mark.gpu
def test_load_all_configs(model_name: str) -> None:
"""
Loads all model configurations that are present in the ML/src/configs folder,
and carries out basic validations of the configuration.
"""
logger = logging.getLogger()
logger.setLevel(logging.INFO)
config = ModelConfigLoader().create_model_config_from_name(model_name)
assert config.model_name == model_name, "Mismatch between definition .py file and model name"
if config.is_segmentation_model:
# Reduce the feature channels to a minimum, to make tests run fast on CPU.
minimal_feature_channels = 1
config.feature_channels = [minimal_feature_channels] * len(config.feature_channels)
print("Model architecture after restricting to 2 feature channels only:")
model = create_model_with_temperature_scaling(config)
generate_and_print_model_summary(config, model) # type: ignore
else:
# For classification models, we can't always print a model summary: The model could require arbitrary
# numbers of input tensors, and we'd only know once we load the training data.
# Hence, only try to create the model, but don't attempt to print the summary.
create_model_with_temperature_scaling(config)
def test_cross_validation_config() -> None:
CrossValidationDummyModel(0, -1)
CrossValidationDummyModel(10, 1)
CrossValidationDummyModel(10, -1)
with pytest.raises(ValueError):
CrossValidationDummyModel(10, 11)
with pytest.raises(ValueError):
CrossValidationDummyModel(10, 10)
class CrossValidationDummyModel(DummyModel):
def __init__(self, number_of_cross_validation_splits: int, cross_validation_split_index: int):
self.number_of_cross_validation_splits = number_of_cross_validation_splits
self.cross_validation_split_index = cross_validation_split_index
super().__init__()
def test_model_config_loader() -> None:
logging_to_stdout(log_level=logging.DEBUG)
default_loader = get_model_loader()
assert default_loader.create_model_config_from_name("BasicModel2Epochs") is not None
with pytest.raises(ValueError):
default_loader.create_model_config_from_name("DummyModel")
loader_including_tests = get_model_loader(namespace="Tests.ML.configs")
assert loader_including_tests.create_model_config_from_name("BasicModel2Epochs") is not None
assert loader_including_tests.create_model_config_from_name("DummyModel") is not None
def test_config_loader_as_in_registration() -> None:
"""
During model registration, the model config namespace is read out from the present model. Ensure that we
can create a config loader that has that value as an input.
"""
loader1 = ModelConfigLoader()
model_name = "BasicModel2Epochs"
model = loader1.create_model_config_from_name(model_name)
assert model is not None
namespace = model.__module__
loader2 = ModelConfigLoader(model_configs_namespace=namespace)
assert len(loader2.module_search_specs) == 2
model2 = loader2.create_model_config_from_name(model_name)
assert model2 is not None
def test_config_loader_on_lightning_container() -> None:
"""
Test if the config loader can load an model that is neither classification nor segmentation.
"""
# First test if the container can be instantiated at all (it is tricky to get that right when inheritance change)
DummyContainerWithParameters()
logging_to_stdout(log_level=logging.DEBUG)
model = model_loader_including_tests.create_model_config_from_name("DummyContainerWithParameters")
assert model is not None
class MockDatasetConsumption:
name = "dummy"
@pytest.mark.parametrize("container_name", ["DummyContainerWithAzureDataset",
"DummyContainerWithoutDataset",
"DummyContainerWithLocalDataset",
"DummyContainerWithAzureAndLocalDataset"])
def test_submit_container_to_azureml(container_name: str) -> None:
"""
Test if we can get the config loader to load a Lightning container model, and get it through the AzureML
submission process.
"""
runner = default_runner()
mock_run = Run.get_context()
args = ["", f"--model={container_name}", "--azureml=True", "--model_configs_namespace=Tests.ML.configs"]
with mock.patch("sys.argv", args):
with mock.patch("InnerEye.Azure.azure_config.AzureConfig.get_dataset_consumption",
return_value=MockDatasetConsumption):
with mock.patch("azureml.core.Experiment.submit", return_value=mock_run):
loaded_config, actual_run = runner.run()
assert actual_run == mock_run
assert isinstance(runner.lightning_container, LightningContainer)
def test_load_container_with_arguments() -> None:
"""
Test if we can load a container and override a value in it via the commandline. Parameters can only be set at
container level, not at model level.
"""
DummyContainerWithParameters()
runner = default_runner()
args = ["", "--model=DummyContainerWithParameters", "--container_param=param1",
"--model_configs_namespace=Tests.ML.configs"]
with mock.patch("sys.argv", args):
runner.parse_and_load_model()
assert isinstance(runner.lightning_container, DummyContainerWithParameters)
assert runner.lightning_container.container_param == "param1"
# Overriding model parameters should not work
args = ["", "--model=DummyContainerWithParameters", "--model_param=param2",
"--model_configs_namespace=Tests.ML.configs"]
with pytest.raises(ValueError) as ex:
with mock.patch("sys.argv", args):
runner.parse_and_load_model()
assert "model_param" in str(ex)
def test_load_invalid_container() -> None:
"""
Test if we loading a container fails if one of the parameters is not valid.
"""
DummyContainerWithParameters()
runner = default_runner()
args = ["", "--model=DummyContainerWithParameters", "--number_of_cross_validation_splits=1",
"--model_configs_namespace=Tests.ML.configs"]
with pytest.raises(ValueError) as ex:
with mock.patch("sys.argv", args):
runner.parse_and_load_model()
assert "At least two splits required to perform cross validation, but got 1" in str(ex)
def test_run_model_with_invalid_trainer_arguments(test_output_dirs: OutputFolderForTests) -> None:
"""
Test if the trainer_arguments in a LightningContainer are passed to the trainer.
"""
container = DummyContainerWithInvalidTrainerArguments()
with pytest.raises(Exception) as ex:
model_train_unittest(config=None, dirs=test_output_dirs, lightning_container=container)
assert "no_such_argument" in str(ex)
|
## NOTE: web-app to receive commands from Fiat phone
## Author: Matteo Varvello (matteo.varvello@nokia.com)
## Date: 10/06/2021
## TEST
## curl -H "Content-Type: application/json" --data '{"data":"testing data"}' http://localhost:8080/command
#!/usr/bin/python
#import random
import string
import json
import cherrypy
import os
from threading import Thread
import threading
import signal
import sys
import time
import argparse
import simplejson
import subprocess
# simple function to read json from a POST message
def read_json(req):
cl = req.headers['Content-Length']
rawbody = req.body.read(int(cl))
body = simplejson.loads(rawbody)
return body
# global parameters
port = 8082 # default listening port
THREADS = [] # list of threads
ACL = False # control whether application ACL rules should be used
allowedips = { # ACL rules
'127.0.0.1':'-1',
}
session_id = ""
session_data = {}
# function to run a bash command
def run_bash(bashCommand, verbose = True):
process = subprocess.Popen(bashCommand.split(), stdout = subprocess.PIPE, stdin =subprocess.PIPE, shell = False)
output, error = process.communicate()
#if verbose:
print("Command: " + bashCommand + " Output: " + str(output) + " Error: " + str(error))
# all good (add a check?)
return str(output.decode('utf-8'))
# FIXME -- can this go?
def CORS():
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
def cors():
# logging
if cherrypy.request.method == 'OPTIONS':
# logging
#print "received a pre-flight request"
# preflign request
# see http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'POST'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'content-type'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
# tell CherryPy no avoid normal handler
return True
else:
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
# thread to control client-server communication
def th_web_app():
# configuration
conf = {
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
}
}
cherrypy.tools.cors = cherrypy._cptools.HandlerTool(cors)
server_config={
'server.socket_host': '0.0.0.0',
'server.socket_port': port,
'server.ssl_module':'builtin',
'server.ssl_certificate':'certificate.pem',
}
cherrypy.config.update(server_config)
# GET - ADD/REMOVE-ACL-RULE (localhost only)
cherrypy.tree.mount(StringGeneratorWebService(), '/addACLRule', conf)
cherrypy.tree.mount(StringGeneratorWebService(), '/removeACLRule', conf)
# POST/REPORT-MEASUREMENTS
cherrypy.tree.mount(StringGeneratorWebService(), '/fiatData', conf)
# start cherrypy engine
cherrypy.engine.start()
cherrypy.engine.block()
# catch ctrl-c
def signal_handler(signal, frame):
# logging
print('You pressed Ctrl+C!')
# kill throughput thread
print("stopping main thread")
THREADS[0].do_run = False
THREADS[0].join()
# kill cherrypy
print("stopping cherrypy webapp")
cherrypy.engine.exit()
# exiting from main process
sys.exit(0)
@cherrypy.expose
class StringGeneratorWebService(object):
@cherrypy.tools.accept(media='text/plain')
def GET(self, var=None, **params):
# log last IP that contacted the server
src_ip = cherrypy.request.headers['Remote-Addr']
# ACL control
if ACL:
if not src_ip in allowedips:
cherrypy.response.status = 403
print("Requesting ip address (%s) is not allowed" %(src_ip))
return "Error: Forbidden"
# add ACL rule
if 'addACLRule' in cherrypy.url():
if 'ip' in cherrypy.request.params:
ip_to_add = cherrypy.request.params['ip']
currentTime = int(time.time()) * 1000
if ip_to_add in allowedips:
print("Updating ip %s in allowedips" %(ip_to_add))
msg = "Rule correctly updated"
else:
print("Adding new ip %s to allowedips" %(ip_to_add))
msg = "Rule correctly added"
# update or add the rule
allowedips[ip_to_add] = currentTime
# respond all good
cherrypy.response.status = 200
return msg
# remove ACL rule
elif 'removeACLRule' in cherrypy.url():
if 'ip' in cherrypy.request.params:
ip_to_remove = cherrypy.request.params['ip']
if ip_to_remove in allowedips:
del allowedips[ip_to_remove]
print("Remove ip %s from allowedips" %(ip_to_remove))
# respond all good
cherrypy.response.status = 200
return "Rule correctly removed"
else:
# respond nothing was done
cherrypy.response.status = 202
return "Rule could not be removed since not existing"
# handle POST requests
def POST(self, name="test"):
# parameters
ret_code = 202 # default return code
result = [] # result to be returned when needed
ans = '' # placeholder for response
# extract incoming IP address
src_ip = cherrypy.request.headers['Remote-Addr']
# ACL control
if ACL:
if not src_ip in allowedips:
cherrypy.response.status = 403
print("Requesting ip address (%s) is not allowed" %(src_ip))
return "Error: Forbidden"
# command to be executed
if 'fiatData' in cherrypy.url():
data = read_json(cherrypy.request)
data = data['data'].split('\n')
print(data)
if len(data) > 2:
print('ignore')
else:
data = data[0].split(',')
ts = int(data[0])
app = data[1]
sensor = data[2]
if sensor == 'GYR' or sensor == 'ACC':
sensor_values = [float(d) for d in data[3:9]]
print('ts: %d, app: %s, sensor: %s, values: %s' % (ts, app, sensor, str(sensor_values)))
else:
print('sensor is %s, ignore' % (sensor))
# respond all good
cherrypy.response.headers['Content-Type'] = 'application/json'
#cherrypy.response.headers['Content-Type'] = 'string'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.status = ret_code
if ans == '':
ans = 'OK\n'
# all good, send response back
return ans.encode('utf8')
def OPTIONS(self, name="test"):
# preflign request
# see http://www.w3.org/TR/cors/#cross-origin-request-with-preflight-0
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'POST'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'content-type'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
def PUT(self, another_string):
cherrypy.session['mystring'] = another_string
def DELETE(self):
cherrypy.session.pop('mystring', None)
# main goes here
if __name__ == '__main__':
# start a thread which handle client-server communication
THREADS.append(Thread(target = th_web_app()))
THREADS[-1].start()
# listen to Ctrl+C
signal.signal(signal.SIGINT, signal_handler)
|
"""
* Francis Bui
* SDEV 300
* Professor Chris Howard
* Lab 5 - Data Analysis Application (with File I-O and Exceptions)
* Sept 18, 2020
* The purpose of this program is to import various csv files with try and except
* function. After the file has been imported, it will be converted into an array
* with Pandas. Matplotlib will then create a histogram based on the user's menu
* selection. A report will also be displayed on the console with the respective
* columns count, mean, standard deviation, min, and max.
"""
import sys
import statistics as stat
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def main_menu():
"""
Main Menu function
Executes first and after every function excepts when the user exits
:return:
"""
print('Select the file you want to analyze: \n')
print('\t1. Population Data')
print('\t2. Housing Data')
print('\t3. Exit the Program')
user_input = input()
if user_input == '1':
pop_data()
elif user_input == '2':
house_data()
elif user_input == '3':
exit_program()
else:
print('Please enter a valid letter that corresponds to the menu item\n')
main_menu()
def pop_data():
print('\nYou have entered Population Data')
print('Select the column you want to analyze: \n')
print('\ta. Pop Apr 1')
print('\tb. Pop Jul 1')
print('\tc. Change Pop')
print('\td. Exit Column')
try:
pop_change_import = pd.read_csv('PopChange.csv', skiprows=0)
user_input = input()
if user_input == 'a':
print('You selected Pop Apr 1')
column = pop_change_import['Pop Apr 1']
data_analysis(column)
pop_data()
elif user_input == 'b':
print('You selected Pop Jul 1')
column = pop_change_import['Pop Jul 1']
data_analysis(column)
pop_data()
elif user_input == 'c':
print('You selected Change Pop')
column = pop_change_import['Change Pop']
data_analysis(column)
pop_data()
elif user_input == 'd':
main_menu()
else:
print('Please enter a valid letter that corresponds to the menu item\n')
pop_data()
except FileNotFoundError:
print('File could not be found\n')
def house_data():
print('\nYou have entered Housing Data')
print('Select the column you want to analyze: \n')
print('\ta. Age')
print('\tb. Bedrooms')
print('\tc. Built')
print('\td. Rooms')
print('\te. Utility')
print('\tf. Exit Column')
try:
housing_import = pd.read_csv('Housing.csv', skiprows=0)
user_input = input()
if user_input == 'a':
print('You selected Age')
column = housing_import['AGE']
data_analysis(column)
house_data()
elif user_input == 'b':
print('You selected Bedrooms')
column = housing_import['BEDRMS']
data_analysis(column)
house_data()
elif user_input == 'c':
print('You selected Built')
column = housing_import['BUILT']
data_analysis(column)
house_data()
elif user_input == 'd':
print('You selected Rooms')
column = housing_import['ROOMS']
data_analysis(column)
house_data()
elif user_input == 'e':
print('You selected Utility')
column = housing_import['UTILITY']
data_analysis(column)
house_data()
elif user_input == 'f':
main_menu()
else:
print('Please enter a valid letter that corresponds to the menu item\n')
house_data()
except FileNotFoundError:
print('File could not be found\n')
def exit_program():
"""
Exit Function
Thank the user and exits the system
:return:
"""
print('')
print('{:*^80}'.format(''))
print('{:^80}'.format(' Thank you for using the Python Data Analysis Application '))
print('{:^80}'.format(' We hope you try it again very soon '))
print('{:*^80}'.format(''))
print('')
sys.exit()
def data_analysis(column):
array_column = np.array(column)
print('The statistics for this column are: ')
print('Count = ' + str(column.count()))
print('Mean = ' + str(column.mean()))
print('Standard Deviation = ' + str(stat.stdev(column)))
print('Min = ' + str(column.min()))
print('Max = ' + str(column.max()))
print('The Histogram of this column is now displayed')
plt.hist(array_column, density=False, bins=100)
# plt.xlabel('Distribution')
# plt.ylabel('Number Of')
plt.grid(True)
plt.draw()
plt.pause(60)
plt.close()
# Application begins here
print('')
print('{:*^80}'.format(''))
print('{:^80}'.format(' Welcome to the '))
print('{:^80}'.format(' Python Data Analysis Application '))
print('{:*^80}'.format(''))
print('')
main_menu()
|
import numpy as np
# pythran export _cplxreal(complex[])
def _cplxreal(z):
tol = 100 * np.finfo((1.0 * z).dtype).eps
z = z[np.lexsort((abs(z.imag), z.real))]
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
return np.array([]), zr
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError("Array contains complex value without conjugate")
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = np.diff(np.concatenate(([0], same_real, [0])))
run_starts = np.nonzero(diffs > 0)[0]
run_stops = np.nonzero(diffs < 0)[0]
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[:] = chunk[np.lexsort([abs(chunk.imag)])]
zc = (zp + zn.conj()) / 2
return zc, zr
def _nearest_real_complex_idx(fro, to, which):
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == "complex":
mask = ~mask
return order[np.nonzero(mask)[0][0]]
def poly(zeros):
dt = zeros.dtype
a = np.ones((1,), dtype=dt)
for k in range(len(zeros)):
a = np.convolve(a, np.array([1, -zeros[k]], dtype=dt), mode="full")
return a
def zpk2tf(z, p, k):
b = k * poly(z)
a = poly(p)
# this is a bit of an assumption to make...
return b.real, a.real
def zpk2sos(z, p, k, n_sections):
sos = np.zeros((n_sections, 6))
if len(p) % 2 == 1:
p = np.append(p, 0)
z = np.append(z, 0)
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# first order section
z1_idx = _nearest_real_complex_idx(z, p1, "real")
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# choose a complex zero to pair with
z1_idx = _nearest_real_complex_idx(z, p1, "complex")
assert not np.isreal(z[z1_idx])
else:
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# we have p1 and z1, figure out p2 and z2
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, "real")
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, "real")
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# next "worst" pole
idx = np.nonzero(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, "real")
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0
del p, z
p_sos = p_sos[::-1]
z_sos = z_sos[::-1]
gains = np.ones(n_sections, np.array(k).dtype)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
# pythran export zpk2sos_multiple(complex[][], complex[][], float[])
def zpk2sos_multiple(z, p, k):
nfilt = len(k)
assert z.shape[1] == p.shape[1] == nfilt
# pad p and z to the same length
p = np.concatenate((p, np.zeros((max(z.shape[0] - p.shape[0], 0), nfilt))), axis=0)
z = np.concatenate((z, np.zeros((max(p.shape[0] - z.shape[0], 0), nfilt))), axis=0)
n_sections = (max(p.shape[0], z.shape[0]) + 1) // 2
sos = np.zeros((nfilt, n_sections, 6))
for filt in range(nfilt):
sos[filt, :, :] = zpk2sos(z[:, filt], p[:, filt], k[filt], n_sections)
return sos
|
# -*- coding: utf-8 -*-
#
# SelfTest/Random/Fortuna/test_SHAd256.py: Self-test for the SHAd256 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Random.Fortuna.SHAd256"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# I could not find any test vectors for SHAd256, so I made these vectors by
# feeding some sample data into several plain SHA256 implementations
# (including OpenSSL, the "sha256sum" tool, and this implementation).
# This is a subset of the resulting test vectors. The complete list can be
# found at: http://www.dlitz.net/crypto/shad256-test-vectors/
('5df6e0e2761359d30a8275058e299fcc0381534545f55cf43e41983f5d4c9456',
'', "'' (empty string)"),
('4f8b42c22dd3729b519ba6f68d2da7cc5b2d606d05daed5ad5128cc03e6c6358',
'abc'),
('0cffe17f68954dac3a84fb1458bd5ec99209449749b2b308b7cb55812f9563af',
'abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq')
]
def get_tests(config={}):
from Crypto.Random.Fortuna import SHAd256
from Crypto.SelfTest.Hash.common import make_hash_tests
return make_hash_tests(SHAd256, "SHAd256", test_data, 32)
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : live_like.py
@Contact : 379327479@qq.com
@License : MIT
@Modify Time @Author @Version @Description
------------ ------- -------- -----------
2021/10/25 17:43 zxd 1.0 None
"""
import uiautomator2 as u2
from uiautomator2 import Device
from app.com_ss_android_ugc_aweme import constant
def main(device: Device, params: str):
return constant.SUCCESS
if __name__ == '__main__':
main(u2.connect(), None)
|
import psycopg2
import psycopg2.extras
import md5
def getOperator(conn,prefix=None,website=None):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if website is None:
website = 'http://www.example.com'
if prefix is None:
prefix = 'DINO'
cur.execute("""
SELECT DISTINCT ON (branch_nr)
branch_nr as privatecode,
%s||':'||branch_nr as operator_id,
branch_name as name,
%s as url,
'de' as language,
'Europe/Amsterdam' as timezone
FROM branch;""",[prefix,website])
rows = cur.fetchall()
operators = {}
cur.close()
for row in rows:
operators[row['privatecode']] = row
return operators
def getLines(conn,prefix=None):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if prefix is None:
prefix = 'DINO'
lines = {}
cur.execute("""
SELECT DISTINCT ON (operator_id,transportmode)
rec_lin_ber.branch_nr as operatorref,
'AVV'||':'||version||':'||line_nr as operator_id,
line_nr as privatecode,
line_name as publiccode,
CASE WHEN (lower(veh_type_text) like '%bus%') THEN 'BUS'
WHEN (lower(veh_type_text) like '%taxi%') THEN 'BUS'
WHEN (lower(veh_type_text) like '%bahn%') THEN 'TRAIN'
WHEN (lower(veh_type_text) like '%zug%') THEN 'TRAIN'
WHEN (lower(veh_type_text) like '%chiff%') THEN 'BOAT'
ELSE veh_type_text END as TransportMode,
null AS name,
false as monitored
FROM
rec_lin_ber LEFT JOIN rec_trip USING (version,line_nr)
LEFT JOIN set_vehicle_type USING (version,veh_type_nr)
ORDER BY operator_id,transportmode
""")
for row in cur.fetchall():
lines[row['operator_id']] = row
cur.close()
return lines
def getDestinationDisplays(conn,prefix=None):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
destinationdisplays = {}
if prefix is None:
prefix = 'DINO'
cur.execute("""
SELECT DISTINCT ON (arr.stop_nr)
arr.stop_nr as privatecode,
%s||':'||arr.stop_nr as operator_id,
arr.stop_name as name,
arr.stop_name as shortname
FROM
rec_trip,rec_stop as arr,rec_stop as dep
WHERE
dep_stop_nr = dep.stop_nr AND
arr_stop_nr = arr.stop_nr AND
dep.place = arr.place
UNION
SELECT DISTINCT ON (arr.stop_nr)
arr.stop_nr as privatecode,
%s||':P'||arr.stop_nr as operator_id,
arr.place||', '||arr.stop_name,
arr.place as shortname
FROM
rec_trip,rec_stop as arr,rec_stop as dep
WHERE
dep_stop_nr = dep.stop_nr AND
arr_stop_nr = arr.stop_nr AND
dep.place <> arr.place
""",[prefix]*2)
for row in cur.fetchall():
destinationdisplays[row['operator_id']] = row
cur.close()
return destinationdisplays
def getStopPoints(conn,prefix=None):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if prefix is None:
prefix = 'DINO'
stops = {}
cur.execute("""
SELECT
%s||':'||stop_nr||':'||stopping_point_nr as operator_id,
stop_nr||':'||stopping_point_nr as privatecode,
NULL as publiccode,
%s||':'||stop_nr as stoparearef,
place||', '||stop_name AS name,
place as town,
true as isScheduled,
CAST(CAST(ST_Y(the_geom) AS NUMERIC(9,6)) AS text) AS latitude,
CAST(CAST(ST_X(the_geom) AS NUMERIC(8,6)) AS text) AS longitude,
null as rd_x,
null as rd_y
FROM rec_stop JOIN
(SELECT *, st_transform(ST_setsrid(st_makepoint(stopping_point_pos_x,stopping_point_pos_y),31466),4326) AS the_geom
FROM rec_stopping_points) as stopping USING (stop_nr)""",[prefix]*2)
for row in cur.fetchall():
stops[row['operator_id']] = row
cur.close()
return stops
def getProductCategories(conn,prefix=None):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if prefix is None:
prefix = 'DINO'
productcategory = {}
cur.execute("""
SELECT
concat_ws(':',%s,version,str_veh_type) as operator_id,
str_veh_type as privatecode,
veh_type_text as name,
str_veh_type as shortname
FROM
set_vehicle_type""",[prefix])
for row in cur.fetchall():
productcategory[row['operator_id']] = row
cur.close()
return productcategory
def getAvailabilityConditions(conn,prefix=None):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if prefix is None:
prefix = 'DINO'
availabilityConditions = {}
cur.execute("""
SELECT
concat_ws(':',%s,version,day_attribute_nr,restriction) as operator_id,
concat_ws(':',%s,version,day_attribute_nr,restriction) as privatecode,
%s as unitcode,
%s||':'||version as versionref,
NULL::text as name,
min(day) as fromdate,
max(day) as todate,
array_agg(DISTINCT day) as days
FROM
(SELECT DISTINCT
version,restriction,day_attribute_nr FROM rec_trip) as trips
JOIN set_day_attribute USING (version,day_attribute_nr)
JOIN day_type_2_day_attribute USING (version,day_attribute_nr)
JOIN calendar_of_the_company as cotc USING (version,day_type_nr)
LEFT JOIN (SELECT version,restriction,unnest(bitcalendar(date_from,('x' || restriction_days) :: bit varying(1024)))::date as day
FROM service_restriction) as restricted USING (version,restriction,day)
WHERE restricted.day is null
GROUP BY operator_id,privatecode,unitcode,versionref,name;
""",[prefix]*4)
for row in cur.fetchall():
row['DAYS'] = {'validdates' : row['days'], 'isavailable' : True, 'availabilityconditionref' : row['operator_id']}
del(row['days'])
availabilityConditions[row['operator_id']] = row
return availabilityConditions
def getStopAreas(conn,prefix=None):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
if prefix is None:
prefix = 'DINO'
stops = {}
cur.execute("""
SELECT DISTINCT ON (operator_id)
%s||':'||stop_nr as operator_id,
stop_nr as privatecode,
place||', '||stop_name AS name,
place as town,
(avg(ST_Y(the_geom)) OVER (PARTITION BY stop_nr))::NUMERIC(9,6)::text AS latitude,
(avg(ST_X(the_geom)) OVER (PARTITION BY stop_nr))::NUMERIC(8,6)::text AS longitude
FROM rec_stop JOIN
(SELECT *, st_transform(ST_setsrid(st_makepoint(stopping_point_pos_x,stopping_point_pos_y),31466),4326) AS the_geom
FROM rec_stopping_points) as stopping USING (stop_nr)
""",[prefix])
for row in cur.fetchall():
stops[row['operator_id']] = row
cur.close()
return stops
def clusterPatternsIntoRoute(conn,prefix=None):
if prefix is None:
prefix = 'DINO'
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
routes = {}
cur.execute("""
SELECT DISTINCT ON (operator_id)
concat_ws(':',%s,version,line_nr,line_dir_nr,str_line_var) as operator_id,
%s||':'||version||':'||line_nr as lineref
FROM lid_course
""",[prefix]*2)
for row in cur.fetchall():
row['POINTS'] = []
routes[row['operator_id']] = row
cur.execute("""
SELECT
concat_ws(':',%s,l.version,l.line_nr,l.line_dir_nr,l.str_line_var) as routeref,
%s||':'||l.stop_nr||':'||l.stopping_point_nr as privatecode,
l.line_consec_nr as pointorder,
ST_Y(p.the_geom)::NUMERIC(8,5)::text AS latitude,
ST_X(p.the_geom)::NUMERIC(7,5)::text AS longitude,
coalesce(SUM(st_distance(st_transform(p.the_geom,31466),lp.the_geom)::integer) OVER (PARTITION BY l.version,l.line_nr,l.line_dir_nr,l.str_line_var
ORDER BY l.line_consec_nr),0) as distancefromstart
FROM lid_course as l LEFT JOIN (SELECT *, st_transform(ST_setsrid(st_makepoint(stopping_point_pos_x,stopping_point_pos_y),31466),4326) AS the_geom FROM rec_stopping_points) as p USING (version,stop_nr,stop_type_nr,stopping_point_nr)
LEFT JOIN (SELECT * FROM lid_course as lp LEFT JOIN (SELECT *, ST_setsrid(st_makepoint(stopping_point_pos_x,stopping_point_pos_y),31466) AS the_geom FROM rec_stopping_points) as po USING (version,stop_nr,stop_type_nr,stopping_point_nr)) as lp
ON (l.version = lp.version AND l.line_nr = lp.line_nr AND l.line_dir_nr = lp.line_dir_nr AND l.str_line_var = lp.str_line_var AND lp.line_consec_nr = l.line_consec_nr-1)
ORDER BY routeref,pointorder
""",[prefix]*2)
for row in cur.fetchall():
route = routes[row['routeref']]
route['POINTS'].append(row)
return routes
def getTimeDemandGroups(conn,prefix=None):
if prefix is None:
prefix = 'DINO'
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
timedemandgroups = {}
cur.execute("""
SELECT
concat_ws(':',%s,version,line_nr,line_dir_nr,str_line_var,line_dir_nr,timing_group_nr) as operator_id,
concat_ws(':',version,line_nr,line_dir_nr,str_line_var,timing_group_nr) as privatecode,
cast(line_consec_nr as integer) as pointorder,
tt_rel as drivingtime,
stopping_time as stopwaittime
FROM lid_travel_time_type
ORDER BY operator_id,pointorder
""",[prefix])
totaldrivetime = 0
stopwaittime = 0
for row in cur.fetchall():
if row['operator_id'] not in timedemandgroups:
timedemandgroups[row['operator_id']] = {'operator_id' : row['operator_id'], 'privatecode' : row['privatecode'], 'POINTS' : [{'pointorder' : row['pointorder'],'totaldrivetime' : row['drivingtime'], 'stopwaittime' : row['stopwaittime']}]}
totaldrivetime = row['drivingtime']
stopwaittime = row['stopwaittime']
else:
points = timedemandgroups[row['operator_id']]['POINTS']
totaldrivetime += row['drivingtime']
point_dict = {'pointorder' : row['pointorder'],'totaldrivetime' : totaldrivetime, 'stopwaittime' : row['stopwaittime']}
points.append(point_dict)
totaldrivetime += row['stopwaittime']
for key,row in timedemandgroups.items():
m = md5.new()
m.update(str(row['POINTS']))
row['operator_id'] = m.hexdigest()
row['privatecode'] = row['operator_id']
return timedemandgroups
def getJourneyPatterns(conn,routes,prefix=None):
if prefix is None:
prefix = 'DINO'
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
journeypatterns = {}
cur.execute("""
SELECT DISTINCT ON (rec_trip.version,line_nr,line_dir_nr,str_line_var,dep_stop_nr,arr_stop_nr,notice)
concat_ws(':',%s,rec_trip.version,rec_trip.line_nr,line_dir_nr,str_line_var,dep_stop_nr,arr_stop_nr,notice) as operator_id,
concat_ws(':',%s,rec_trip.version,rec_trip.line_nr,line_dir_nr,str_line_var) as routeref,
line_dir_nr as directiontype,
CASE WHEN (arr.place = dep.place) THEN 'AVV'||':'||arr.stop_nr
ELSE %s||':P'||arr.stop_nr END as destinationdisplayref
FROM
rec_trip,rec_stop as arr,rec_stop as dep
WHERE
dep_stop_nr = dep.stop_nr AND
arr_stop_nr = arr.stop_nr
""",[prefix]*3)
for row in cur.fetchall():
journeypatterns[row['operator_id']] = row
journeypatterns[row['operator_id']]['POINTS'] = []
cur.execute("""
SELECT DISTINCT ON (journeypatternref,pointorder)
concat_ws(':',%s,t.version,jp.line_nr,jp.line_dir_nr,jp.str_line_var,dep_stop_nr,arr_stop_nr,notice) as journeypatternref,
jp.line_consec_nr::integer as pointorder,
null as privatecode,
concat_ws(':',%s,jp.line_nr,jp.line_dir_nr,jp.str_line_var,dep_stop_nr,arr_stop_nr) as operator_id,
%s||':'||jp.stop_nr||':'||jp.stopping_point_nr as pointref,
%s||':'||jpo.stop_nr||':'||jpo.stopping_point_nr as onwardpointref,
NULL as destinationdisplayref,
NULL as noticeassignmentRef,
%s as administrativezoneref,
NULL as iswaitpoint,
0 as waittime,
NULL as requeststop,
true as foralighting,
true as forboarding,
0 as distancefromstartroute,
0 as fareunitspassed
FROM
rec_trip as t LEFT JOIN lid_course as dep USING (version,line_nr,line_dir_nr,str_line_var)
LEFT JOIN lid_course as arr USING (version,line_nr,line_dir_nr,str_line_var)
LEFT JOIN lid_course as jp USING (version,line_nr,line_dir_nr,str_line_var)
LEFT JOIN lid_course as jpo ON (jp.version = jpo.version AND jp.line_nr = jpo.line_nr
AND jp.line_dir_nr = jpo.line_dir_nr AND jp.str_line_var = jpo.str_line_var AND
jp.line_consec_nr = jpo.line_consec_nr + 1)
WHERE
dep.stop_nr = dep_stop_nr AND
dep.stop_type_nr = dep_stop_type_nr AND
dep.stopping_point_nr = dep_stopping_point_nr AND
arr.stop_nr = arr_stop_nr AND
arr.stop_type_nr = arr_stop_type_nr AND
arr.stopping_point_nr = arr_stopping_point_nr AND
jp.line_consec_nr between dep.line_consec_nr AND arr.line_consec_nr
ORDER BY journeypatternref,pointorder
""",[prefix]*5)
distance = 0
patternref = None
for row in cur.fetchall():
if row['journeypatternref'] != patternref:
distance = 0
patternref = row['journeypatternref']
for point in routes[journeypatterns[row['journeypatternref']]['routeref']]['POINTS']:
if point['distancefromstart'] >= distance and point['privatecode'] == row['pointref']:
distance = point['distancefromstart']
row['distancefromstartroute'] = distance
break
row['distancefromstartroute'] = distance
journeypatterns[row['journeypatternref']]['POINTS'].append(row)
cur.close()
return journeypatterns
def getJourneys(conn,prefix=None):
if prefix is None:
prefix = 'DINO'
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
SELECT
concat_ws(':',%s,version,line_nr,trip_id) as privatecode,
concat_ws(':',%s,version,trip_id,row_number() OVER (PARTITION BY version,trip_id ORDER BY trip_id)) as operator_id,
concat_ws(':',%s,version,day_attribute_nr,restriction) as availabilityconditionRef,
concat_ws(':',%s,version,line_nr,line_dir_nr,str_line_var,dep_stop_nr,arr_stop_nr,notice) as journeypatternref,
concat_ws(':',%s,version,line_nr,line_dir_nr,str_line_var,line_dir_nr,timing_group_nr) as timedemandgroupref,
concat_ws(':',%s,version,str_veh_type) as productCategoryRef,
NULL as noticeassignmentRef,
departuretime,
NULL as blockref,
coalesce(coalesce(trip_id_printing,train_nr),trip_id) as name,
nullif(veh_type_text ilike '%%niederflurbus%%',false) as lowfloor,
NULL as hasLiftOrRamp,
NULL as haswifi,
NULL as bicycleallowed,
(veh_type_text ilike '%%taxi%%' OR veh_type_text ilike '%%rufbus%%')as onDemand
FROM rec_trip LEFT JOIN set_vehicle_type USING (version,veh_type_nr)
""",[prefix]*6)
journeys = {}
for row in cur.fetchall():
journeys[row['operator_id']] = row
cur.close()
return journeys
def getVersion(conn,prefix=None,filename=None):
if prefix is None:
prefix = 'DINO'
if filename is None:
filename = ''
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
select '1' as datasourceref, %s||':'||version as operator_id,period_date_from as startdate,period_date_to as enddate,version||':'||%s as privatecode,
version_text as description
from set_version;""",[prefix,filename])
version = {}
for row in cur.fetchall():
version[row['operator_id']] = row
return version
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from chaco.abstract_overlay import AbstractOverlay
from chaco.data_label import draw_arrow
from chaco.label import Label
from chaco.plot_label import PlotLabel
from enable.colors import convert_from_pyqt_color
from enable.font_metrics_provider import font_metrics_provider
from enable.tools.drag_tool import DragTool
from kiva.trait_defs.kiva_font_trait import KivaFont
# ============= standard library imports ========================
from numpy import where, array
from six.moves import zip
from traits.api import Array, Int, Float, Str, Color, Bool, List
# ============= local library imports ==========================
from pychron.core.helpers.formatting import floatfmt
from pychron.graph.tools.info_inspector import InfoOverlay, InfoInspector
from pychron.pychron_constants import PLUSMINUS, SIGMA
class BasePlateauOverlay(AbstractOverlay):
cumulative39s = Array
def _get_section(self, pt):
d = self.component.map_data(pt)
cs = self.cumulative39s[::2]
t = where(cs < d)[0]
if len(t):
tt = t[-1]
else:
tt = 0
return tt
class SpectrumTool(InfoInspector, BasePlateauOverlay):
nsigma = Int(2)
# metadata_changed = Event
# current_position = None
# current_screen = None
analyses = List
_cached_lines = None
def hittest(self, screen_pt, ndx=None):
comp = self.component
if ndx is None:
ndx = self._get_section(screen_pt)
ys = comp.value.get_data()[::2]
if ndx < len(ys):
yd = ys[ndx]
e = comp.errors[ndx * 2] * self.nsigma
yl, yu = comp.y_mapper.map_screen(array([yd - e, yd + e]))
if yu - yl < 1:
yu += 1
yl -= 1
if yl < screen_pt[1] < yu:
return ndx
def normal_left_down(self, event):
if event.handled:
return
pt = event.x, event.y
ndx = self.hittest(pt)
if ndx is not None:
sels = self.component.index.metadata['selections']
self.component.index.metadata['selections'] = list(set(sels) ^ {ndx})
self.component.request_redraw()
event.handled = True
def assemble_lines(self):
if self._cached_lines is None:
idx = self.current_position
comp = self.component
idx2 = idx * 2
e = comp.errors[idx2]
ys = comp.value.get_data()[::2]
v = ys[idx]
low_c = self.cumulative39s[idx2]
high_c = self.cumulative39s[idx2 + 1]
an = self.analyses[idx]
lines = ['RunID={}'.format(an.record_id),
'Tag={}'.format(an.tag),
'Status={}'.format(an.status_text),
u'{}={} {}{} (1{})'.format(comp.container.y_axis.title, floatfmt(v), PLUSMINUS,
floatfmt(e), SIGMA),
'Cumulative. Ar39={}-{}'.format(floatfmt(low_c), floatfmt(high_c))]
self._cached_lines = lines
return self._cached_lines
def normal_mouse_move(self, event):
pt = event.x, event.y
hover = self._get_section(pt)
if self.hittest(pt, hover) is not None:
# print('setting cross')
# event.window.set_pointer('cross')
# self.component.index.metadata['hover'] = [hover]
if self.current_position != hover:
self._cached_lines = None
self.current_position = hover
self.current_screen = pt
else:
# print('settinasg arrow')
# event.window.set_pointer('arrow')
# self.component.index.metadata['hover'] = None
self.current_position = None
self.current_screen = None
self.metadata_changed = True
class SpectrumInspectorOverlay(InfoOverlay):
pass
# @on_trait_change('tool:metadata_changed')
# def _update_(self, new):
# print 'asdf', new
# tool =Any
# @on_trait_change('tool:current_section')
# def handle(self, new):
# if new>=0:
# self.visible=True
# else:
# self.visible=False
#
# def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
# print 'pasdasfd'
# with gc:
class SpectrumErrorOverlay(AbstractOverlay):
nsigma = Int(1)
alpha = Float
use_fill = Bool(False)
selections = List
use_user_color = Bool(False)
user_color = Color
platbounds = None
dim_non_plateau = Bool(False)
def overlay(self, component, gc, *args, **kw):
comp = self.component
with gc:
gc.clip_to_rect(comp.x, comp.y, comp.width, comp.height)
xs = comp.index.get_data()
ys = comp.value.get_data()
es = comp.errors
# sels = comp.index.metadata['selections']
sels = self.selections
n = len(xs)
xs = xs.reshape(n // 2, 2)
ys = ys.reshape(n // 2, 2)
es = es.reshape(n // 2, 2)
if self.use_fill:
alpha = self.alpha * 0.01
func = gc.fill_path
else:
alpha = 1.0
func = gc.stroke_path
color = self.user_color
color = [x / 255. for x in (color.red(), color.green(), color.blue(), color.alpha())]
color = color[0], color[1], color[2], alpha
if abs(alpha - 0.3) < 0.1:
selection_color = (0.75, 0, 0)
else:
selection_color = color[0], color[1], color[2], 0.3
n = len(xs)
step_a, step_b = None, None
if self.platbounds:
step_a, step_b = self.platbounds
for i, ((xa, xb), (ya, yb), (ea, eb)) in enumerate(zip(xs, ys, es)):
ea *= self.nsigma
# eb *= self.nsigma
p1 = xa, ya - ea
p2 = xa, ya + ea
p3 = xb, ya - ea
p4 = xb, ya + ea
p1, p2, p3, p4 = comp.map_screen([p1, p2, p3, p4])
x = p1[0]
y = p1[1]
w = p3[0] - p1[0]
h = p2[1] - p1[1]
if self.dim_non_plateau:
if step_a is not None and step_a <= i <= step_b:
c = color
else:
c = selection_color
fc, sc = c, c
if i in sels:
fc = (0, 0, 0, 0)
sc = selection_color
else:
sc = fc = selection_color if i in sels else color
gc.set_fill_color(fc)
gc.set_stroke_color(sc)
gc.rect(x, y, w, h)
gc.draw_path()
func()
if i > 0 and i <= n:
# draw verticals
px, y, e = pp
y1 = min((y - e), ya - ea)
y2 = max((y + e), ya + ea)
p1, p2 = comp.map_screen([(px, y1), (px, y2)])
with gc:
gc.begin_path()
gc.move_to(*p1)
gc.line_to(*p2)
gc.close_path()
gc.stroke_path()
pp = (xb, ya, ea)
class PlateauTool(DragTool):
def normal_mouse_move(self, event):
if self.is_draggable(event.x, event.y):
event.window.set_pointer('hand')
else:
event.window.set_pointer('arrow')
# def normal_mouse_move(self, event):
# if self.is_draggable(event.x, event.y):
# event.handled = True
#
# def normal_left_down(self, event):
# if self.is_draggable(event.x, event.y):
# event.handled = True
def is_draggable(self, x, y):
return self.component.hittest((x, y))
def drag_start(self, event):
data_pt = self.component.component.map_data((event.x, event.y), all_values=True)
self._prev_pt = data_pt
event.handled = True
def dragging(self, event):
plot = self.component.component
cur_pt = plot.map_data((event.x, event.y), all_values=True)
dy = cur_pt[1] - self._prev_pt[1]
self.component.y += dy
self.component.dragged = True
self._prev_pt = cur_pt
# event.handled = True
plot.invalidate_and_redraw()
class PlateauOverlay(BasePlateauOverlay):
plateau_bounds = Array
# y = Float
dragged = False
id = Str
plateau_label = PlotLabel
info_txt = Str
label_visible = True
label_offset = 0
label_font = KivaFont
# label_font_size = 10
extend_end_caps = True
ages_errors = Array
ages = Array
nsigma = Int(2)
line_color = Color('red')
line_width = Float(1.0)
selections = List
arrow_visible = Bool
def hittest(self, pt, threshold=7):
x, y = pt
pts = self._get_line()
if pts is not None:
pt1, pt2, y1, y2 = pts
if pt1[0] <= x <= pt2[0]:
if abs(y - pt1[1]) <= threshold:
return True
def _get_line(self):
"""
reurns screen values for start plat, end plat, error mag at start, error mag at end
"""
cs = self.cumulative39s
ps = self.plateau_bounds
if ps[0] == ps[1]:
return
sidx = ps[0]
eidx = ps[1]
sels = self.selections
# sels = self.component.index.metadata['selections']
while sidx in sels:
sidx += 1
while eidx in sels:
eidx -= 1
eidx += 1
cstart = cs[sidx]
cend = cs[eidx]
aes = self.ages
es = self.age_errors
eidx -= 1
estart = es[sidx] * self.nsigma
eend = es[eidx] * self.nsigma
ystart = aes[sidx]
yend = aes[eidx]
y = self._get_plateau_y()
a = ystart - estart if y < ystart else ystart + estart
b = yend - eend if y < yend else yend + eend
pt1, pt2, up1, up2 = self.component.map_screen([(cstart, y), (cend, y), (cstart, a), (cend, b)])
# up1, up2 = self.component.map_screen([(cstart, a), (cend, b)])
y1, y2 = up1[1], up2[1]
return pt1, pt2, y1, y2
def _get_plateau_y(self, screen_offset=100):
"""
return y for plateau in data space
if y value greater than bounds set y to ybounds - 50px
:param screen_offset:
:return:
"""
comp = self.component
y = self.y
oy = comp.value_mapper.map_data(0)
delta = comp.value_mapper.map_data(screen_offset) - oy
y += delta
by = comp.value_range.high
if y > by:
delta = comp.value_mapper.map_data(50) - oy
y = by - delta
return y
def _draw_end_caps(self, gc, x1, x2, y):
gc.lines([(x1, y - 10), (x1, y + 10)])
gc.lines([(x2, y - 10), (x2, y + 10)])
def _draw_extended_end_caps(self, gc, x1, x2, y, y1, y2):
if y1 > y:
gc.lines([(x1, y - 10), (x1, y1 - 5)])
else:
gc.lines([(x1, y + 10), (x1, y1 + 5)])
if y2 > y:
gc.lines([(x2, y - 10), (x2, y2 - 5)])
else:
gc.lines([(x2, y + 10), (x2, y2 + 5)])
# if y1 < y and y2<y:
# gc.lines([(x1, y1+5), (x1, y + 10)])
# gc.lines([(x2, y2+5), (x2, y + 10)])
# elif y1> y and y2>y:
# gc.lines([(x1, y - 10),(x1, y1 + 5)])
# gc.lines([(x2, y - 10),(x2, y2 + 5)])
def overlay(self, component, gc, *args, **kw):
points = self._get_line()
if points:
pt1, pt2, y1, y2 = points
with gc:
comp = self.component
gc.clip_to_rect(comp.x, comp.y, comp.width, comp.height)
color = convert_from_pyqt_color(None, None, self.line_color)
gc.set_stroke_color(color)
gc.set_line_width(self.line_width)
y = pt1[1]
x1 = pt1[0] + 1
x2 = pt2[0] - 1
gc.lines([(x1, y), (x2, y)])
self._draw_end_caps(gc, x1, x2, y)
gc.draw_path()
if self.arrow_visible:
draw_arrow(gc, (x1 + 5, y), (x1, y), color)
draw_arrow(gc, (x2 - 5, y), (x2, y), color)
# add end caps
if self.extend_end_caps:
gc.set_line_width(1)
self._draw_extended_end_caps(gc, x1, x2, y, y1, y2)
gc.draw_path()
if self.label_visible:
label = self._get_plateau_label(x1, x2, y)
label.overlay(component, gc)
def _get_plateau_label(self, x1, x2, y):
if self.layout_needed or not self.plateau_label:
p = self.plateau_label
else:
comp = self.component
x = x1 + (x2 - x1) * 0.5
dummy_gc = font_metrics_provider()
l = Label(text=self.info_txt)
w, h = l.get_bounding_box(dummy_gc)
xa = x + w / 2.
hjustify = 'center'
if xa > comp.x2:
d = xa - comp.x2
x -= d
elif x - w / 2. < comp.x:
x = comp.x + 5
hjustify = 'left'
x = max(comp.x, x)
p = PlotLabel(text=self.info_txt,
font=self.label_font,
# font='modern {}'.format(self.label_font_size),
color=self.line_color,
hjustify=hjustify,
border_visible=True,
bgcolor='white',
x=x,
y=y + 10)
self.plateau_label = p
return p
# ============= EOF =============================================
|
"""
Inpainting using Generative Adversarial Networks.
The dataset can be downloaded from: https://www.dropbox.com/sh/8oqt9vytwxb3s4r/AADIKlz8PR9zr6Y20qbkunrba/Img/img_align_celeba.zip?dl=0
Instrustion on running the script:
1. Download the dataset from the provided link
2. Save the folder 'img_align_celeba' to '../../data/'
4. Run the sript using command 'python3 context_encoder.py'
"""
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
from datasets import *
from models import *
import torch.nn as nn
import torch.nn.functional as F
import torch
os.makedirs('images', exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=8, help='size of the batches')
parser.add_argument('--dataset_name', type=str, default='img_align_celeba', help='name of the dataset')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=4, help='number of cpu threads to use during batch generation')
parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')
parser.add_argument('--img_size', type=int, default=128, help='size of each image dimension')
parser.add_argument('--mask_size', type=int, default=64, help='size of random mask')
parser.add_argument('--channels', type=int, default=3, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=500, help='interval between image sampling')
opt = parser.parse_args()
print(opt)
cuda = True if torch.cuda.is_available() else False
# Calculate output of image discriminator (PatchGAN)
patch_h, patch_w = int(opt.mask_size / 2**3), int(opt.mask_size / 2**3)
patch = (1, patch_h, patch_w)
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
torch.nn.init.constant_(m.bias.data, 0.0)
# Loss function
adversarial_loss = torch.nn.MSELoss()
pixelwise_loss = torch.nn.L1Loss()
# Initialize generator and discriminator
generator = Generator(channels=opt.channels)
discriminator = Discriminator(channels=opt.channels)
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
pixelwise_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Dataset loader
transforms_ = [ transforms.Resize((opt.img_size, opt.img_size), Image.BICUBIC),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
dataloader = DataLoader(ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_),
batch_size=opt.batch_size, shuffle=True, num_workers=opt.n_cpu)
test_dataloader = DataLoader(ImageDataset("../../data/%s" % opt.dataset_name, transforms_=transforms_, mode='val'),
batch_size=12, shuffle=True, num_workers=1)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
def save_sample(batches_done):
samples, masked_samples, i = next(iter(test_dataloader))
samples = Variable(samples.type(Tensor))
masked_samples = Variable(masked_samples.type(Tensor))
i = i[0].item() # Upper-left coordinate of mask
# Generate inpainted image
gen_mask = generator(masked_samples)
filled_samples = masked_samples.clone()
filled_samples[:, :, i:i+opt.mask_size, i:i+opt.mask_size] = gen_mask
# Save sample
sample = torch.cat((masked_samples.data, filled_samples.data, samples.data), -2)
save_image(sample,'images/%d.png' % batches_done, nrow=6, normalize=True)
# ----------
# Training
# ----------
for epoch in range(opt.n_epochs):
for i, (imgs, masked_imgs, masked_parts) in enumerate(dataloader):
# Adversarial ground truths
valid = Variable(Tensor(imgs.shape[0], *patch).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.shape[0], *patch).fill_(0.0), requires_grad=False)
# Configure input
imgs = Variable(imgs.type(Tensor))
masked_imgs = Variable(masked_imgs.type(Tensor))
masked_parts = Variable(masked_parts.type(Tensor))
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
gen_parts = generator(masked_imgs)
# Adversarial and pixelwise loss
g_adv = adversarial_loss(discriminator(gen_parts), valid)
g_pixel = pixelwise_loss(gen_parts, masked_parts)
# Total loss
g_loss = 0.001 * g_adv + 0.999 * g_pixel
g_loss.backward()
optimizer_G.step()
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Measure discriminator's ability to classify real from generated samples
real_loss = adversarial_loss(discriminator(masked_parts), valid)
fake_loss = adversarial_loss(discriminator(gen_parts.detach()), fake)
d_loss = 0.5 * (real_loss + fake_loss)
d_loss.backward()
optimizer_D.step()
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G adv: %f, pixel: %f]" % (epoch, opt.n_epochs, i, len(dataloader),
d_loss.item(), g_adv.item(), g_pixel.item()))
# Generate sample at sample interval
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_sample(batches_done)
|
"""Herein lies the ability for ansible-runner to run the ansible-config command."""
from typing import Optional
from typing import Tuple
from ansible_runner import get_ansible_config
from .base import Base
class AnsibleConfig(Base):
"""Abstraction for ansible-config command-line."""
def fetch_ansible_config(
self,
action: str,
config_file: Optional[str] = None,
only_changed: Optional[bool] = None,
) -> Tuple[str, str]:
"""Run ansible-config command and get the configuration related details.
:param action: The configuration fetch action to perform. Valid values are one of
``list``, ``dump``, ``view``. The ``list`` action will fetch all the config
options along with config description, ``dump`` action will fetch all the active
config and ``view`` action will return the active configuration file view.
:param config_file: Path to configuration file, defaults to first file found in
precedence. Defaults to ``None``.
:param only_changed: The boolean value when set to ``True`` returns only the
configurations that have changed from the default. This parameter is applicable only
when ``action`` is set to ``dump``. Defaults to `None`.
:returns: A tuple of response and error string (if any)
"""
return get_ansible_config(
action,
config_file=config_file,
only_changed=only_changed,
**self._runner_args,
)
|
import numpy as np
from random import choice
## Note: Non-smooth surfaces or bad triangulations may lead to non-spiral orderings of the vertices.
## Common issue in badly triangulated surfaces is that there exist some edges that belong to more than two triangles. In this
## case the mathematical definition of the spiral is insufficient. In this case, in this version of the code, we randomly
## choose two triangles in order to continue the inductive assignment of the order to the rest of the vertices.
def get_adj_trigs(A, F, reference_mesh, meshpackage = 'mpi-mesh'):
Adj = []
for x in A:
adj_x = []
dx = x.todense()
for i in range(x.shape[0]):
adj_x.append(dx[i].nonzero()[1])
Adj.append(adj_x)
if meshpackage =='trimesh':
mesh_faces = reference_mesh.faces
elif meshpackage =='mpi-mesh':
mesh_faces = reference_mesh.f
# Create Triangles List
trigs_full = [[] for i in range(len(Adj[0]))]
for t in mesh_faces:
u, v, w = t
trigs_full[u].append((u,v,w))
trigs_full[v].append((u,v,w))
trigs_full[w].append((u,v,w))
Trigs = [trigs_full]
for i,T in enumerate(F):
trigs_down = [[] for i in range(len(Adj[i+1]))]
for u,v,w in T:
trigs_down[u].append((u,v,w))
trigs_down[v].append((u,v,w))
trigs_down[w].append((u,v,w))
Trigs.append(trigs_down)
return Adj, Trigs
def generate_spirals(step_sizes, M, Adj, Trigs, reference_points, dilation=None, random=False, meshpackage = 'mpi-mesh', counter_clockwise = True, nb_stds = 2):
Adj_spirals = []
for i in range(len(Adj)):
if meshpackage =='trimesh':
mesh_vertices = M[i].vertices
elif meshpackage =='mpi-mesh':
mesh_vertices = M[i].v
sp = get_spirals(mesh_vertices, Adj[i],Trigs[i],reference_points[i], n_steps=step_sizes[i],\
padding='zero', counter_clockwise = counter_clockwise, random = random)
Adj_spirals.append(sp)
print('spiral generation for hierarchy %d (%d vertices) finished' %(i,len(Adj_spirals[-1])))
## Dilated convolution
if dilation:
for i in range(len(dilation)):
dil = dilation[i]
dil_spirals = []
for j in range(len(Adj_spirals[i])):
s = Adj_spirals[i][j][:1] + Adj_spirals[i][j][1::dil]
dil_spirals.append(s)
Adj_spirals[i] = dil_spirals
# Calculate the lengths of spirals
# Use mean + 2 * std_dev, to capture 97% of data
L = []
for i in range(len(Adj_spirals)):
L.append([])
for j in range(len(Adj_spirals[i])):
L[i].append(len(Adj_spirals[i][j]))
L[i] = np.array(L[i])
spiral_sizes = []
for i in range(len(L)):
sz = L[i].mean() + nb_stds*L[i].std()
spiral_sizes.append(int(sz))
print('spiral sizes for hierarchy %d: %d' %(i,spiral_sizes[-1]))
# 1) fill with -1 (index to the dummy vertex, i.e the zero padding) the spirals with length smaller than the chosen one
# 2) Truncate larger spirals
spirals_np = []
for i in range(len(spiral_sizes)): #len(Adj_spirals)):
S = np.zeros((1,len(Adj_spirals[i])+1,spiral_sizes[i])) - 1
for j in range(len(Adj_spirals[i])):
S[0,j,:len(Adj_spirals[i][j])] = Adj_spirals[i][j][:spiral_sizes[i]]
#spirals_np.append(np.repeat(S,args['batch_size'],axis=0))
spirals_np.append(S)
return spirals_np, spiral_sizes, Adj_spirals
def distance(v,w):
return np.sqrt(np.sum(np.square(v-w)))
def single_source_shortest_path(V,E,source,dist=None,prev=None):
import heapq
if dist == None:
dist = [None for i in range(len(V))]
prev = [None for i in range(len(V))]
q = []
seen = set()
heapq.heappush(q,(0,source,None))
while len(q) > 0 and len(seen) < len(V):
d_,v,p = heapq.heappop(q)
if v in seen:
continue
seen.add(v)
prev[v] = p
dist[v] = d_
for w in E[v]:
if w in seen:
continue
dw = d_ + distance(V[v],V[w])
heapq.heappush(q,(dw,w,v))
return prev, dist
def get_spirals(mesh, adj, trig, reference_points, n_steps=1, padding='zero', counter_clockwise = True, random = False):
spirals = []
if not random:
heat_path = None
dist = None
for reference_point in reference_points:
heat_path,dist = single_source_shortest_path(mesh,adj,reference_point, dist, heat_path)
heat_source = reference_points
for i in range(mesh.shape[0]):
seen = set(); seen.add(i)
trig_central = list(trig[i]); A = adj[i]; spiral = [i]
# 1) Frist degree of freedom - choose starting pooint:
if not random:
if i in heat_source: # choose closest neighbor
shortest_dist = np.inf
init_vert = None
for neighbor in A:
d = np.sum(np.square(mesh[i] - mesh[neighbor]))
if d < shortest_dist:
shortest_dist = d
init_vert = neighbor
else: # on the shortest path to the reference point
init_vert = heat_path[i]
else:
# choose starting point:
# random for first ring
init_vert = choice(A)
# first ring
if init_vert is not None:
ring = [init_vert]; seen.add(init_vert)
else:
ring = []
while len(trig_central) > 0 and init_vert is not None:
cur_v = ring[-1]
cur_t = [t for t in trig_central if t in trig[cur_v]]
if len(ring) == 1:
orientation_0 = (cur_t[0][0]==i and cur_t[0][1]==cur_v) \
or (cur_t[0][1]==i and cur_t[0][2]==cur_v) \
or (cur_t[0][2]==i and cur_t[0][0]==cur_v)
if not counter_clockwise:
orientation_0 = not orientation_0
# 2) Second degree of freedom - 2nd point/orientation ambiguity
if len(cur_t) >=2:
# Choose the triangle that will direct the spiral counter-clockwise
if orientation_0:
# Third point in the triangle - next vertex in the spiral
third = [p for p in cur_t[0] if p!=i and p!=cur_v][0]
trig_central.remove(cur_t[0])
else:
third = [p for p in cur_t[1] if p!=i and p!=cur_v][0]
trig_central.remove(cur_t[1])
ring.append(third)
seen.add(third)
# 3) Stop if the spiral hits the boundary in the first point
elif len(cur_t) == 1:
break
else:
# 4) Unique ordering for the rest of the points (3rd onwards)
if len(cur_t) >= 1:
# Third point in the triangle - next vertex in the spiral
third = [p for p in cur_t[0] if p!= cur_v and p!=i][0]
# Don't append the spiral if the vertex has been visited already
# (happens when the first ring is completed and the spiral returns to the central vertex)
if third not in seen:
ring.append(third)
seen.add(third)
trig_central.remove(cur_t[0])
# 4) Stop when the spiral hits the boundary (the already visited triangle is no longer in the list): First half of the spiral
elif len(cur_t) == 0:
break
rev_i = len(ring)
if init_vert is not None:
v = init_vert
if orientation_0 and len(ring)==1:
reverse_order = False
else:
reverse_order = True
need_padding = False
# 5) If on the boundary: restart from the initial vertex towards the other direction,
# but put the vertices in reverse order: Second half of the spiral
# One exception if the starting point is on the boundary + 2nd point towards the desired direction
while len(trig_central) > 0 and init_vert is not None:
cur_t = [t for t in trig_central if t in trig[v]]
if len(cur_t) != 1:
break
else:
need_padding = True
third = [p for p in cur_t[0] if p!=v and p!=i][0]
trig_central.remove(cur_t[0])
if third not in seen:
ring.insert(rev_i,third)
seen.add(third)
if not reverse_order:
rev_i = len(ring)
v = third
# Add a dummy vertex between the first half of the spiral and the second half - similar to zero padding in a 2d grid
if need_padding:
ring.insert(rev_i,-1)
"""
ring_copy = list(ring[1:])
rev_i = rev_i - 1
for z in range(len(ring_copy)-2):
if padding == 'zero':
ring.insert(rev_i,-1) # -1 is our sink node
elif padding == 'mirror':
ring.insert(rev_i,ring_copy[rev_i-z-1])
"""
spiral += ring
# Next rings:
for step in range(n_steps-1):
next_ring = set([]); next_trigs = set([]);
if len(ring) == 0:
break
base_triangle = None
init_vert = None
# Find next hop neighbors
for w in ring:
if w!=-1:
for u in adj[w]:
if u not in seen:
next_ring.add(u)
# Find triangles that contain two outer ring nodes. That way one can folllow the spiral ordering in the same way
# as done in the first ring: by simply discarding the already visited triangles+nodes.
for u in next_ring:
for tr in trig[u]:
if len([x for x in tr if x in seen]) == 1:
next_trigs.add(tr)
elif ring[0] in tr and ring[-1] in tr:
base_triangle = tr
# Normal case: starting point in the second ring ->
# the 3rd point in the triangle that connects the 1st and the last point in the 1st ring with the 2nd ring
if base_triangle is not None:
init_vert = [x for x in base_triangle if x != ring[0] and x != ring[-1]]
# Make sure that the the initial point is appropriate for starting the spiral,
# i.e it is connected to at least one of the next candidate vertices
if len(list(next_trigs.intersection(set(trig[init_vert[0]]))))==0:
init_vert = None
# If no such triangle exists (one of the vertices is dummy,
# or both the first and the last vertex take part in a specific type of boundary)
# or the init vertex is not connected with the rest of the ring -->
# Find the relative point in the the triangle that connects the 1st point with the 2nd, or the 2nd with the 3rd
# and so on and so forth. Note: This is a slight abuse of the spiral topology
if init_vert is None:
for r in range(len(ring)-1):
if ring[r] !=-1 and ring[r+1]!=-1:
tr = [t for t in trig[ring[r]] if t in trig[ring[r+1]]]
for t in tr:
init_vert = [v for v in t if v not in seen]
# make sure that the next vertex is appropriate to start the spiral ordering in the next ring
if len(init_vert)>0 and len(list(next_trigs.intersection(set(trig[init_vert[0]]))))>0:
break
else:
init_vert = []
if len(init_vert)>0 and len(list(next_trigs.intersection(set(trig[init_vert[0]]))))>0:
break
else:
init_vert = []
# The rest of the procedure is the same as the first ring
if init_vert is None:
init_vert = []
if len(init_vert)>0:
init_vert = init_vert[0]
ring = [init_vert]
seen.add(init_vert)
else:
init_vert = None
ring = []
# if i == 57:
# import pdb;pdb.set_trace()
while len(next_trigs) > 0 and init_vert is not None:
cur_v = ring[-1]
cur_t = list(next_trigs.intersection(set(trig[cur_v])))
if len(ring) == 1:
try:
orientation_0 = (cur_t[0][0] in seen and cur_t[0][1]==cur_v) \
or (cur_t[0][1] in seen and cur_t[0][2]==cur_v) \
or (cur_t[0][2] in seen and cur_t[0][0]==cur_v)
except:
import pdb;pdb.set_trace()
if not counter_clockwise:
orientation_0 = not orientation_0
# 1) orientation ambiguity for the next ring
if len(cur_t) >=2:
# Choose the triangle that will direct the spiral counter-clockwise
if orientation_0:
# Third point in the triangle - next vertex in the spiral
third = [p for p in cur_t[0] if p not in seen and p!=cur_v][0]
next_trigs.remove(cur_t[0])
else:
third = [p for p in cur_t[1] if p not in seen and p!=cur_v][0]
next_trigs.remove(cur_t[1])
ring.append(third)
seen.add(third)
# 2) Stop if the spiral hits the boundary in the first point
elif len(cur_t) == 1:
break
else:
# 3) Unique ordering for the rest of the points
if len(cur_t) >= 1:
third = [p for p in cur_t[0] if p != v and p not in seen]
next_trigs.remove(cur_t[0])
if len(third)>0:
third = third[0]
if third not in seen:
ring.append(third)
seen.add(third)
else:
break
# 4) Stop when the spiral hits the boundary
# (the already visited triangle is no longer in the list): First half of the spiral
elif len(cur_t) == 0:
break
rev_i = len(ring)
if init_vert is not None:
v = init_vert
if orientation_0 and len(ring)==1:
reverse_order = False
else:
reverse_order = True
need_padding = False
while len(next_trigs) > 0 and init_vert is not None:
cur_t = [t for t in next_trigs if t in trig[v]]
if len(cur_t) != 1:
break
else:
need_padding = True
third = [p for p in cur_t[0] if p!=v and p not in seen]
next_trigs.remove(cur_t[0])
if len(third)>0:
third = third[0]
if third not in seen:
ring.insert(rev_i,third)
seen.add(third)
if not reverse_order:
rev_i = len(ring)
v = third
if need_padding:
ring.insert(rev_i,-1)
"""
ring_copy = list(ring[1:])
rev_i = rev_i - 1
for z in range(len(ring_copy)-2):
if padding == 'zero':
ring.insert(rev_i,-1) # -1 is our sink node
elif padding == 'mirror':
ring.insert(rev_i,ring_copy[rev_i-z-1])
"""
spiral += ring
spirals.append(spiral)
return spirals
|
from rpython.jit.tl.tinyframe.tinyframe import main
from rpython.jit.codewriter.policy import JitPolicy
def jitpolicy(driver):
return JitPolicy()
def entry_point(argv):
main(argv[1], argv[2:])
return 0
# _____ Define and setup target ___
def target(*args):
return entry_point, None
|
#==========================================
# Reward Predictor Model
# Author: Nasim Alamdari
# Date: Dec. 2020
#==========================================
import keras
from keras.models import Sequential, Model
from keras.layers import Input, Dense, TimeDistributed, LSTM, Dropout, Activation, Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Flatten, Conv2D, BatchNormalization, Lambda, concatenate
from keras.layers.advanced_activations import ELU
from keras.callbacks import ModelCheckpoint, TensorBoard, ReduceLROnPlateau
from keras import backend
from keras.utils import np_utils
from keras.optimizers import Adam, RMSprop
from keras import regularizers
from tensorflow import keras
from keras import backend as K
from numba import jit, cuda
import tensorflow as tf
L2_regularization = 0.001
class RewardPredictorNetwork(object):
def __init__(self):
## outputs
self.r1 = []
self.r2 = []
self.rs = []
self.pred = []
#@jit(target ="cuda")
def conv_recurrent_model_build(self, input_shape):
print('Building model...')
def get_sub_net(input_shape):
i_input = Input(shape=input_shape, name='original_input')
layer = Conv1D(filters=256, kernel_size=5, name='convolution_1')(i_input)
layer = BatchNormalization(momentum=0.9)(layer)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Conv1D(filters=128, kernel_size=3, name='convolution_2')(layer)
layer = BatchNormalization(momentum=0.9)(layer)
layer = Activation('relu')(layer)
layer = Dropout(0.5)(layer)
layer = Conv1D(filters=128, kernel_size=2, name='convolution_3')(layer)
layer = BatchNormalization(momentum=0.9)(layer)
layer = Activation('relu')(layer)
layer = MaxPooling1D(4)(layer)
layer = Dropout(0.5)(layer)
## LSTM Layer
layer = Bidirectional(LSTM(128, return_sequences=True),merge_mode='concat')(layer)
layer = Dropout(0.5)(layer)
layer = Bidirectional(LSTM(128, return_sequences=False),merge_mode='concat')(layer)
layer = Dropout(0.5)(layer)
## Dense Layer
layer = Dense(128, name='dense1')(layer)
layer = Dropout(0.5)(layer)
shared_output = Dense(1, name='shared_output')(layer)
raw_reward = Activation('sigmoid')(shared_output)
return Model(i_input, [shared_output,raw_reward])
shared_model = get_sub_net(input_shape)
input_left = Input(shape=input_shape)
input_right = Input(shape=input_shape)
## Use the shared model
self.r1, ll = shared_model(input_left)
self.r2, rr = shared_model(input_right)
rs = concatenate([self.r1, self.r2])
## Softmax Output
pred = Activation('softmax', name='output_realtime')(rs)
model = Model([input_left, input_right], pred)
# Define custom loss
def my_loss(y_true, y_pred):
batch_size = K.cast(tf.shape(y_pred)[0], 'float32')
model_o0_sum = K.exp(K.sum(y_pred[:,0])/batch_size)
model_o1_sum = K.exp(K.sum(y_pred[:,1])/batch_size)
p_o0_o1 = model_o0_sum / (model_o0_sum + model_o1_sum)
p_o1_o0 = model_o1_sum / (model_o1_sum + model_o0_sum)
loss = -( (y_true[:,0]*K.log(p_o0_o1))+ (y_true[:,1]*K.log(p_o1_o0)) )
return loss
def my_categorical_accuracy(y_true, y_pred):
return K.cast(K.equal(K.argmax(y_true, axis=-1),
K.argmax(y_pred, axis=-1)),
K.floatx())
## compute loss and accuracy
#pref = K.placeholder(K.float32, shape=(None, 2))
#my_loss = K.reduce_mean(K.categorical_crossentropy(target=pref, output=pred, from_logits=True))
try:
model = multi_gpu_model(model)
except:
pass
opt = Adam(lr=0.001)
model.compile(optimizer=opt,loss='categorical_crossentropy', metrics=['accuracy'])
# model.compile(optimizer=opt,loss=my_loss, metrics=['accuracy'])
intermediate_layer_model = Model(input=input_left, output=ll)
## outputs
#self.r1 = r1
#self.r2 = r2
self.rs = rs
self.pred = pred
print(shared_model.summary())
print(model.summary())
return model, intermediate_layer_model
|
# Copyright (C) 2019 SAMSUNG SDS <Team.SAIDA@gmail.com>
#
# This code is distribued under the terms and conditions from the MIT License (MIT).
#
# Authors : Uk Jo, Iljoo Yoon, Hyunjae Lee, Daehun Jun
from __future__ import absolute_import
from collections import deque, namedtuple
import warnings
import random
import numpy as np
Experience = namedtuple('Experience', 'state0, action, reward, state1, terminal1')
class RingBuffer(object):
def __init__(self, maxlen):
self.maxlen = maxlen
self.data = deque(maxlen=maxlen)
def __len__(self):
return self.length()
def __getitem__(self, idx):
"""Return element of buffer at specific index
# Argument
idx (int): Index wanted
# Returns
The element of buffer at given index
"""
if idx < 0 or idx >= self.length():
raise KeyError()
return self.data[idx]
def append(self, v):
"""Append an element to the buffer
# Argument
v (object): Element to append
"""
self.data.append(v)
def length(self):
"""Return the length of Deque
# Argument
None
# Returns
The lenght of deque element
"""
return len(self.data)
class Memory(object):
def __init__(self, window_length, ignore_episode_boundaries=False):
self.window_length = window_length
self.ignore_episode_boundaries = ignore_episode_boundaries
self.recent_observations = deque(maxlen=window_length)
self.recent_terminals = deque(maxlen=window_length)
def sample(self, batch_size, batch_idxs=None):
raise NotImplementedError()
def append(self, observation, action, reward, terminal, training=True):
self.recent_observations.append(observation)
self.recent_terminals.append(terminal)
def get_recent_state(self, current_observation):
"""Return list of last observations
# Argument
current_observation (object): Last observation
# Returns
A list of the last observations
"""
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state = [current_observation]
idx = len(self.recent_observations) - 1
for offset in range(0, self.window_length - 1):
current_idx = idx - offset
current_terminal = self.recent_terminals[current_idx - 1] if current_idx - 1 >= 0 else False
if current_idx < 0 or (not self.ignore_episode_boundaries and current_terminal):
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state.insert(0, self.recent_observations[current_idx])
while len(state) < self.window_length:
state.insert(0, zeroed_observation(state[0]))
return state
def get_config(self):
"""Return configuration (window_length, ignore_episode_boundaries) for Memory
# Return
A dict with keys window_length and ignore_episode_boundaries
"""
config = {
'window_length': self.window_length,
'ignore_episode_boundaries': self.ignore_episode_boundaries,
}
return config
def zeroed_observation(observation):
"""Return an array of zeros with same shape as given observation
# Argument
observation (list): List of observation
# Return
A np.ndarray of zeros with observation.shape
"""
if hasattr(observation, 'shape'):
return np.zeros(observation.shape)
elif hasattr(observation, '__iter__'):
out = []
for x in observation:
out.append(zeroed_observation(x))
return out
else:
return 0.
def sample_batch_indexes(low, high, size):
"""Return a sample of (size) unique elements between low and high
# Argument
low (int): The minimum value for our samples
high (int): The maximum value for our samples
size (int): The number of samples to pick
# Returns
A list of samples of length size, with values between low and high
"""
if high - low >= size:
# We have enough data. Draw without replacement, that is each index is unique in the
# batch. We cannot use `np.random.choice` here because it is horribly inefficient as
# the memory grows. See https://github.com/numpy/numpy/issues/2764 for a discussion.
# `random.sample` does the same thing (drawing without replacement) and is way faster.
try:
r = range(low, high)
except NameError:
r = range(low, high)
batch_idxs = random.sample(r, size)
else:
# Not enough data. Help ourselves with sampling from the range, but the same index
# can occur multiple times. This is not good and should be avoided by picking a
# large enough warm-up phase.
warnings.warn('Not enough entries to sample without replacement. Consider increasing your warm-up phase to avoid oversampling!')
batch_idxs = np.random.random_integers(low, high - 1, size=size)
assert len(batch_idxs) == size
return batch_idxs
|
import os
import json
from tweepy.error import TweepError
from pandas import DataFrame
from dotenv import load_dotenv
from app import DATA_DIR, seek_confirmation
from app.decorators.datetime_decorators import logstamp
from app.bq_service import BigQueryService
from app.twitter_service import TwitterService
load_dotenv()
BATCH_SIZE = int(os.getenv("BATCH_SIZE", default=100)) # the max number of processed users to store in BQ at once (with a single insert API call). must be less than 10,000 to avoid error.
if __name__ == "__main__":
bq_service = BigQueryService()
twitter_service = TwitterService()
rows = list(bq_service.fetch_idless_screen_names())
row_count = len(rows)
print("-------------------------")
print(f"FETCHED {row_count} SCREEN NAMES")
print("BATCH SIZE:", BATCH_SIZE)
print("-------------------------")
seek_confirmation()
bq_service.migrate_user_id_lookups_table()
batch = []
for index, row in enumerate(rows):
counter = index + 1
try:
user_id = twitter_service.get_user_id(row.screen_name)
message = None
except TweepError as err:
#print(err)
#> [{'code': 50, 'message': 'User not found.'}]
#> [{'code': 63, 'message': 'User has been suspended.'}]
user_id = None
message = json.loads(err.reason.replace("'", '"'))[0]["message"]
lookup = {"lookup_at": logstamp(), "counter": counter, "screen_name": row.screen_name.upper(), "user_id": user_id, "message": message}
print(lookup)
batch.append(lookup)
if (len(batch) >= BATCH_SIZE) or (counter >= row_count): # if the batch is full or the row is last
print("SAVING BATCH...", len(batch))
bq_service.upload_user_id_lookups(batch)
batch = [] # clear the batch
print("-------------")
print("LOOKUPS COMPLETE!")
#print("WRITING TO CSV...")
#df = DataFrame(lookups)
#print(df.head())
#csv_filepath = os.path.join(DATA_DIR, "user_id_lookups.csv")
#df.to_csv(csv_filepath, index=False)
|
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from importlib import reload
from rbvfit import rb_vfit as r
from rbvfit import rb_setline as rt
def set_one_absorber(N,b,lam_rest):
zabs=0.
line = r.model()
N=np.array(N)
b=np.array(b)
v=np.array([0.])
line.addline(lam_rest, z=zabs)
wave=np.arange(lam_rest-10.,lam_rest+10.,0.05)
#ipdb.set_trace()
theta=np.array([N,b,v])#np.concatenate((N,b,v))
flx, components = r.model_profile(theta, wave, line)
W=np.trapz(1.-flx,x=wave)
return W
def compute_ewlist_from_voigt(Nlist,b,lam_rest):
Wlist=np.zeros(len(Nlist),)
for i in range(0,len(Nlist)):
Wlist[i]=set_one_absorber(Nlist[i],b,lam_rest)
return Wlist
class compute_cog(object):
def __init__(self,lam_guess,Nlist,blist):
"""
This object will create a curve of growth for a given input set of paramters.
Input:
lam_guess : rest frame wavelength of one transition
Nlist : array of column densities for which COG is to be computed
blist : array of b values for which COG is to be computed
Output:
a COG object with all input parameters
st: structure containing transition information
Wlist: matrix containing EW for every logN and b value
Working example:
Look up COG Example.ipynb
"""
self.st=rt.rb_setline(lam_guess,'closest')
self.Nlist=Nlist
self.blist=blist
self.Wlist=np.zeros((len(Nlist),len(blist)))
for i in range(0, len(blist)):
print(self.st['wave'])
self.Wlist[:,i]=compute_ewlist_from_voigt(Nlist,blist[i],self.st['wave'])
def plot_cog(self):
#Convert Angstrom to cm
plt.title(self.st['name'])
for i in range(0,len(self.blist)):
plt.plot(np.log10((10**self.Nlist)*self.st['fval']*self.st['wave']*1e-8),np.log10(self.Wlist[:,i]/self.st['wave']),label='b = '+ np.str(self.blist[i]))
plt.xlabel(r'$log_{10} [N f \lambda]$')
plt.ylabel(r'$log_{10} [W/ \lambda]$')
plt.legend()
plt.show()
|
from unittest import TestCase
from datetime import datetime
from qcodes.data.location import FormatLocation
from qcodes.instrument.parameter import Parameter
from qcodes.measure import Measure
from .instrument_mocks import MultiGetter, MultiSetPointParam
import numpy as np
from numpy.testing import assert_array_equal
class TestMeasure(TestCase):
def setUp(self):
self.p1 = Parameter('P1', initial_value=1, get_cmd=None, set_cmd=None)
def test_simple_scalar(self):
data = Measure(self.p1).run_temp()
self.assertEqual(data.single_set.tolist(), [0])
self.assertEqual(data.P1.tolist(), [1])
self.assertEqual(len(data.arrays), 2, data.arrays)
self.assertNotIn('loop', data.metadata)
meta = data.metadata['measurement']
self.assertEqual(meta['__class__'], 'qcodes.measure.Measure')
self.assertEqual(len(meta['actions']), 1)
self.assertFalse(meta['use_threads'])
ts_start = datetime.strptime(meta['ts_start'], '%Y-%m-%d %H:%M:%S')
ts_end = datetime.strptime(meta['ts_end'], '%Y-%m-%d %H:%M:%S')
self.assertGreaterEqual(ts_end, ts_start)
def test_simple_array(self):
data = Measure(MultiGetter(arr=(1.2, 3.4))).run_temp()
self.assertEqual(data.index0_set.tolist(), [0, 1])
self.assertEqual(data.arr.tolist(), [1.2, 3.4])
self.assertEqual(len(data.arrays), 2, data.arrays)
def test_array_and_scalar(self):
self.p1.set(42)
data = Measure(MultiGetter(arr=(5, 6)), self.p1).run_temp()
self.assertEqual(data.single_set.tolist(), [0])
self.assertEqual(data.P1.tolist(), [42])
self.assertEqual(data.index0_set.tolist(), [0, 1])
self.assertEqual(data.arr.tolist(), [5, 6])
self.assertEqual(len(data.arrays), 4, data.arrays)
class TestMeasureMulitParameter(TestCase):
def setUp(self):
self.p1 = MultiSetPointParam()
def test_metadata(self):
loc_fmt = 'data/{date}/#{counter}_{name}_{date}_{time}'
rcd = {'name': 'test_metadata'}
param_name_1 = "multi_setpoint_param_this"
param_name_2 = "multi_setpoint_param_that"
setpoint_name = "multi_setpoint_param_this_setpoint_set"
loc_provider = FormatLocation(fmt=loc_fmt, record=rcd)
c = Measure(self.p1).run(location=loc_provider)
self.assertEqual(c.metadata['arrays'][param_name_1]['unit'], 'this unit')
self.assertEqual(c.metadata['arrays'][param_name_1]['name'], param_name_1)
self.assertEqual(c.metadata['arrays'][param_name_1]['label'], 'this label')
self.assertEqual(c.metadata['arrays'][param_name_1]['is_setpoint'], False)
self.assertEqual(c.metadata['arrays'][param_name_1]['shape'], (5,))
assert_array_equal(getattr(c, param_name_1).ndarray, np.zeros(5))
self.assertEqual(c.metadata['arrays'][param_name_2]['unit'], 'that unit')
self.assertEqual(c.metadata['arrays'][param_name_2]['name'], param_name_2)
self.assertEqual(c.metadata['arrays'][param_name_2]['label'], 'that label')
self.assertEqual(c.metadata['arrays'][param_name_2]['is_setpoint'], False)
self.assertEqual(c.metadata['arrays'][param_name_2]['shape'], (5,))
assert_array_equal(getattr(c, param_name_2).ndarray, np.ones(5))
self.assertEqual(c.metadata['arrays'][setpoint_name]['unit'],
'this setpointunit')
self.assertEqual(c.metadata['arrays'][setpoint_name]['name'],
"multi_setpoint_param_this_setpoint")
self.assertEqual(c.metadata['arrays'][setpoint_name]['label'],
'this setpoint')
self.assertEqual(c.metadata['arrays'][setpoint_name]
['is_setpoint'], True)
self.assertEqual(c.metadata['arrays'][setpoint_name]['shape'],
(5,))
assert_array_equal(getattr(c, setpoint_name).ndarray, np.linspace(5, 9, 5))
|
from qdrant_client.http.models.models import Distance
DISTANCES = {
'cosine': Distance.COSINE,
'euclidean': Distance.EUCLID,
'dot': Distance.DOT,
}
|
"""
Commonly used utils:
evaluation metrics e.g. similarity, AUC, AP@k, MAP@k
graph related operation
testing data generator
file I/O
visualization
etc...
by Chengbin Hou
"""
import time
import numpy as np
from scipy import sparse
import pickle
import networkx as nx
# -----------------------------------------------------------------------------
# --------------------------------- metrics -----------------------------------
# -----------------------------------------------------------------------------
def cosine_similarity(a, b):
from numpy import dot
from numpy.linalg import norm
''' cosine similarity; can be used as score function; vector by vector;
If consider similarity for all pairs,
pairwise_similarity() implementation may be more efficient
'''
a = np.reshape(a,-1)
b = np.reshape(b,-1)
if norm(a)*norm(b) == 0:
return 0.0
else:
return dot(a, b)/(norm(a)*norm(b))
def pairwise_similarity(mat, type='cosine'):
''' pairwise similarity; can be used as score function;
vectorized computation
'''
if type == 'cosine': # support sprase and dense mat
from sklearn.metrics.pairwise import cosine_similarity
result = cosine_similarity(mat, dense_output=True)
elif type == 'jaccard':
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics.pairwise import pairwise_distances
# n_jobs=-1 means using all CPU for parallel computing
result = pairwise_distances(mat.todense(), metric=jaccard_similarity_score, n_jobs=-1)
elif type == 'euclidean':
from sklearn.metrics.pairwise import euclidean_distances
# note: similarity = - distance
result = euclidean_distances(mat)
result = -result
elif type == 'manhattan':
from sklearn.metrics.pairwise import manhattan_distances
# note: similarity = - distance
result = manhattan_distances(mat)
result = -result
else:
print('Please choose from: cosine, jaccard, euclidean or manhattan')
return 'Not found!'
return result
def auc_score(y_true, y_score):
''' use sklearn roc_auc_score API
y_true & y_score; array-like, shape = [n_samples]
'''
from sklearn.metrics import roc_auc_score
roc = roc_auc_score(y_true=y_true, y_score=y_score)
if roc < 0.5:
roc = 1.0 - roc # since binary clf, just predict the opposite if<0.5
return roc
def ranking_precision_score(y_true, y_score, k=10):
""" Precision at rank k
y_true & y_score; array-like, shape = [n_samples]
see https://gist.github.com/mblondel/7337391
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1] # 1 as true
order = np.argsort(y_score)[::-1] # return index
y_pred_true = np.take(y_true, order[:k]) # predict to be true @k
n_relevant = np.sum(y_pred_true == pos_label) # predict to be true @k but how many are correct
# Divide by min(n_pos, k) such that the best achievable score is always 1.0 (note: if k>n_pos, we use fixed n_pos; otherwise use given k)
n_pos = np.sum(y_true == pos_label)
return float(n_relevant) / min(n_pos, k)
# return float(n_relevant) / k # this is also fair but can not always get 1.0
def average_precision_score(y_true, y_score, k=10):
""" Average precision at rank k
y_true & y_score; array-like, shape = [n_samples]
see https://gist.github.com/mblondel/7337391
"""
unique_y = np.unique(y_true)
if len(unique_y) > 2:
raise ValueError("Only supported for two relevance levels.")
pos_label = unique_y[1] # 1 as true
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1][:min(n_pos, k)] # note: if k>n_pos, we use fixed n_pos; otherwise use given k
y_pred_true = np.asarray(y_true)[order]
score = 0
for i in range(len(y_pred_true)):
if y_pred_true[i] == pos_label: # if pred_true == ground truth positive label
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(i + 1): # precision @1, @2, ..., @ min(n_pos, k)
if y_pred_true[j] == pos_label: # pred true --> ground truth also positive
prec += 1.0
prec /= (i + 1.0) # precision @i where i=1,2, ... ; note: i+1.0 since i start from 0
score += prec
if n_pos == 0:
return 0
return score / n_pos # micro-score; if macro-score use np.sum(score)/np.size(score)
# ----------------------------------------------------------------------------------
# ------------------------------- graph related operation --------------------------
# ----------------------------------------------------------------------------------
def edge_s1_minus_s0(s1, s0, is_directed=False):
''' s1 and s0: edge/node-pairs set
'''
if not is_directed:
s1_reordered = set( (a,b) if a<b else (b,a) for a,b in s1 )
s0_reordered = set( (a,b) if a<b else (b,a) for a,b in s0 )
return s1_reordered-s0_reordered
else:
print('currently not support directed case')
def unique_nodes_from_edge_set(edge_set):
''' take out unique nodes from edge set
'''
unique_nodes = []
for a, b in edge_set:
if a not in unique_nodes:
unique_nodes.append(a)
if b not in unique_nodes:
unique_nodes.append(b)
return unique_nodes
def row_as_probdist(mat, dense_output=False, preserve_zeros=False):
"""Make each row of matrix sums up to 1.0, i.e., a probability distribution.
Support both dense and sparse matrix.
Attributes
----------
mat : scipy sparse matrix or dense matrix or numpy array
The matrix to be normalized
dense_output : bool
whether forced dense output
perserve_zeros : bool
If False, for row with all entries 0, we normalize it to a vector with all entries 1/n.
Leave 0 otherwise
Returns
-------
dense or sparse matrix:
return dense matrix if input is dense matrix or numpy array
return sparse matrix for sparse matrix input
(note: np.array & np.matrix are diff; and may cause some dim issues...)
"""
row_sum = np.array(mat.sum(axis=1)).ravel() # type: np.array
zero_rows = row_sum == 0
row_sum[zero_rows] = 1
diag = sparse.dia_matrix((1 / row_sum, 0), (mat.shape[0], mat.shape[0]))
mat = diag.dot(mat)
if not preserve_zeros:
mat += sparse.csr_matrix(zero_rows.astype(int)).T.dot(sparse.csr_matrix(np.repeat(1 / mat.shape[1], mat.shape[1])))
if dense_output and sparse.issparse(mat):
return mat.todense()
return mat
# ----------------------------------------------------------------------------
# --------------------------------- files I/O --------------------------------
# ----------------------------------------------------------------------------
def load_any_obj_pkl(path):
''' load any object from pickle file
'''
with open(path, 'rb') as f:
any_obj = pickle.load(f)
return any_obj
def save_any_obj_pkl(obj, path):
''' save any object to pickle file
'''
with open(path, 'wb') as f:
pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)
def save_emb(emb_dict, path):
''' save embeddings to a txt file
nodeID emb_dim1 emb_dim2 ...
'''
node_num = len(emb_dict.keys())
with open(path, 'w') as f:
for node, vec in emb_dict.items():
f.write("{} {}\n".format(node, ' '.join([str(x) for x in vec])))
def load_emb(path):
''' load embeddings to a txt file
nodeID emb_dim1 emb_dim2 ...
'''
emb_dict = {}
with open(path, 'r') as f:
for l in f.readlines():
vec = l.split()
emb_dict[vec[0]] = np.array([float(x) for x in vec[1:]])
return emb_dict
def load_edge_label(filename):
''' load edge label from a txt file
nodeID1 nodeID2 edge_label(s)
'''
fin = open(filename, 'r')
X = []
Y = []
while 1:
line = fin.readline()
if line == '':
break
vec = line.strip().split(' ')
X.append(vec[:2])
Y.append(vec[2])
fin.close()
return X, Y
def load_node_label(filename):
''' load node label from a txt file
nodeID node_label(s)
'''
fin = open(filename, 'r')
X = []
Y = []
while 1:
line = fin.readline()
if line == '':
break
vec = line.strip().split(' ')
X.append(vec[0])
Y.append(vec[1:])
fin.close()
return X, Y
# -----------------------------------------------------------------------
# ------------------------------- others --------------------------------
# -----------------------------------------------------------------------
def dim_reduction(mat, dim=128, method='pca'):
''' dimensionality reduction: PCA, SVD, etc...
dim = # of columns
'''
print('START dimensionality reduction using ' + method + ' ......')
t1 = time.time()
if method == 'pca':
from sklearn.decomposition import PCA
pca = PCA(n_components=dim, svd_solver='auto', random_state=None)
mat_reduced = pca.fit_transform(mat) # sklearn pca auto remove mean, no need to preprocess
elif method == 'svd':
from sklearn.decomposition import TruncatedSVD
svd = TruncatedSVD(n_components=dim, n_iter=5, random_state=None)
mat_reduced = svd.fit_transform(mat)
else: # to do... more methods... e.g. random projection, ica, t-sne...
print('dimensionality reduction method not found......')
t2 = time.time()
print('END dimensionality reduction: {:.2f}s'.format(t2-t1))
return mat_reduced
# ------------------------------------------------------------------------
# --------------------------data generator -----------------------------
# ------------------------------------------------------------------------
def gen_test_edge_wrt_changes(graph_t0, graph_t1):
''' input: two networkx graphs
generate **changed** testing edges for link prediction task
currently, we only consider pos_neg_ratio = 1.0
return: pos_edges_with_label [(node1, node2, 1), (), ...]
neg_edges_with_label [(node3, node4, 0), (), ...]
'''
G0 = graph_t0.copy()
G1 = graph_t1.copy() # use copy to avoid problem caused by G1.remove_node(node)
edge_add = edge_s1_minus_s0(s1=set(G1.edges()), s0=set(G0.edges()))
edge_del = edge_s1_minus_s0(s1=set(G0.edges()), s0=set(G1.edges()))
unseen_nodes = set(G1.nodes()) - set(G0.nodes())
for node in unseen_nodes: # to avoid unseen nodes while testing
G1.remove_node(node)
edge_add_unseen_node = [] # to avoid unseen nodes while testing
#print('len(edge_add)', len(edge_add))
for node in unseen_nodes:
for edge in edge_add:
if node in edge:
edge_add_unseen_node.append(edge)
edge_add = edge_add - set(edge_add_unseen_node)
#print('len(edge_add)', len(edge_add))
pos_edges_with_label = [list(item+(1,)) for item in edge_add]
neg_edges_with_label = [list(item+(0,)) for item in edge_del]
if len(edge_add) > len(edge_del):
num = len(edge_add) - len(edge_del)
i = 0
for non_edge in nx.non_edges(G1):
if non_edge not in edge_del:
neg_edges_with_label.append(list(non_edge+(0,)))
i += 1
if i >= num:
break
elif len(edge_add) < len(edge_del):
num = len(edge_del) - len(edge_add)
i = 0
for edge in nx.edges(G1):
if edge not in edge_add:
pos_edges_with_label.append(list(edge+(1,)))
i += 1
if i >= num:
break
else: # len(edge_add) == len(edge_del)
pass
return pos_edges_with_label, neg_edges_with_label
def gen_test_edge_wrt_remove(graph, edges_removed, balance_ratio=1.0):
''' given a networkx graph and edges_removed;
generate non_edges not in [both graph and edges_removed];
return all_test_samples including [edges_removed (pos samples), non_edges (neg samples)];
return format X=[[1,2],[2,4],...] Y=[1,0,...] where Y tells where corresponding element has a edge
'''
g = graph
num_edges_removed = len(edges_removed)
num_non_edges = int(balance_ratio * num_edges_removed)
num = 0
non_edges = []
exist_edges = list(g.G.edges())+list(edges_removed)
while num < num_non_edges:
non_edge = list(np.random.choice(g.look_back_list, size=2, replace=False))
if non_edge not in exist_edges:
num += 1
non_edges.append(non_edge)
test_node_pairs = edges_removed + non_edges
test_edge_labels = list(np.ones(num_edges_removed)) + list(np.zeros(num_non_edges))
return test_node_pairs, test_edge_labels |
import functools
import logging
import torch
import torch.nn as nn
from torch.nn import init
from options.options import opt_get
#import models.modules.sft_arch as sft_arch
logger = logging.getLogger('base')
####################
# initialize networks
####################
def weights_init_normal(m, bias_fill=0, mean=0.0, std=0.02):
# classname = m.__class__.__name__
# if classname.find('Conv') != -1 and classname != "DiscConvBlock": #ASRResNet's DiscConvBlock causes confusion
# elif classname.find('Linear') != -1:
if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# init.normal_(m.weight.data, 0.0, std)
init.normal_(m.weight, mean=mean, std=std)
if m.bias is not None:
m.bias.data.fill_(bias_fill)
# elif classname.find('BatchNorm2d') != -1:
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
init.normal_(m.weight.data, mean=1.0, std=std) # BN also uses norm
if hasattr(m, 'bias') and m.bias is not None:
# init.constant_(m.bias.data, 0.0)
m.bias.data.fill_(bias_fill)
def weights_init_xavier(m, scale=1, bias_fill=0, **kwargs):
# classname = m.__class__.__name__
# if classname.find('Conv') != -1 and classname != "DiscConvBlock": #ASRResNet's DiscConvBlock causes confusion
# elif classname.find('Linear') != -1:
if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# init.xavier_normal_(m.weight.data, gain=gain)
init.xavier_normal_(m.weight, **kwargs)
m.weight.data *= scale
if m.bias is not None:
m.bias.data.fill_(bias_fill)
# elif classname.find('BatchNorm2d') != -1:
# elif isinstance(m, _BatchNorm):
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
# init.constant_(m.weight.data, 1.0)
init.constant_(m.weight, 1)
if hasattr(m, 'bias') and m.bias is not None:
# init.constant_(m.bias.data, 0.0)
m.bias.data.fill_(bias_fill)
def weights_init_kaiming(m, scale=1, bias_fill=0, **kwargs):
# classname = m.__class__.__name__
# if classname.find('Conv') != -1 and classname != "DiscConvBlock": #ASRResNet's DiscConvBlock causes confusion
# elif classname.find('Linear') != -1:
if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
init.kaiming_normal_(m.weight, **kwargs)
m.weight.data *= scale
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.fill_(bias_fill)
# elif classname.find('BatchNorm2d') != -1:
# elif isinstance(m, _BatchNorm):
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
# init.constant_(m.weight.data, 1.0)
init.constant_(m.weight, 1)
if m.bias is not None:
# init.constant_(m.bias.data, 0.0)
m.bias.data.fill_(bias_fill)
def weights_init_orthogonal(m, bias_fill=0, **kwargs):
# classname = m.__class__.__name__
# if classname.find('Conv') != -1:
# elif classname.find('Linear') != -1:
if hasattr(m, 'weight') and isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
# init.orthogonal_(m.weight.data, gain=1)
init.orthogonal_(m.weight.data, **kwargs)
if m.bias is not None:
m.bias.data.fill_(bias_fill)
# elif classname.find('BatchNorm2d') != -1:
elif isinstance(m, nn.modules.batchnorm._BatchNorm):
# init.constant_(m.weight.data, 1.0)
init.constant_(m.weight, 1)
if hasattr(m, 'bias') and m.bias is not None:
# init.constant_(m.bias.data, 0.0)
m.bias.data.fill_(bias_fill)
def init_weights(net, init_type='kaiming', scale=1, std=0.02, gain=0.02):
'''Initialize network weights.
To initialize a network:
1. register CPU/GPU device (with multi-GPU support)
2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
scale (float) -- scaling factor for kaiming.
gain (float) -- scaling factor for xavier.
std (float) -- scaling factor for normal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
'kaiming' is used in the ESRGAN paper, 'normal' in the original pix2pix and CycleGAN paper.
kaiming and xavier might work better for some applications.
'''
logger.info('Initialization method [{:s}]'.format(init_type))
if init_type == 'normal':
weights_init_normal_ = functools.partial(weights_init_normal, std=std)
net.apply(weights_init_normal_)
if init_type == 'xavier':
weights_init_xavier_ = functools.partial(weights_init_xavier, gain=gain)
net.apply(weights_init_xavier_)
elif init_type == 'kaiming':
weights_init_kaiming_ = functools.partial(weights_init_kaiming, scale=scale)
net.apply(weights_init_kaiming_)
elif init_type == 'orthogonal':
net.apply(weights_init_orthogonal)
else:
raise NotImplementedError('initialization method [{:s}] not implemented'.format(init_type))
####################
# define network
####################
# Generator
def define_G(opt, step=0):
'''Create a generator
Returns a generator
The generator is usually initialized with <init_weights>.
'''
gpu_ids = opt['gpu_ids']
opt_net = opt['network_G']
which_model = opt_net['which_model_G']
init_type = opt_net.get('init_type', 'kaiming')
init_scale = opt_net.get('init_scale', 0.1)
if opt_net['net_act']: # If set, use a different activation function
act_type = opt_net['net_act']
else: # Use networks defaults
if which_model == 'sr_resnet':
act_type = 'relu'
elif which_model == 'RRDB_net':
act_type = 'leakyrelu'
elif which_model == 'ppon':
act_type = 'leakyrelu'
else:
act_type = 'leakyrelu'
if which_model == 'sr_resnet': # SRResNet
from models.modules.architectures import SRResNet_arch
netG = SRResNet_arch.SRResNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], \
nb=opt_net['nb'], upscale=opt_net['scale'], norm_type=opt_net['norm_type'], \
act_type=act_type, mode=opt_net['mode'], upsample_mode='pixelshuffle', \
convtype=opt_net['convtype'], finalact=opt_net['finalact'])
elif which_model == 'sft_arch': # SFT-GAN
from models.modules.architectures import sft_arch
netG = sft_arch.SFT_Net()
elif which_model == 'RRDB_net': # RRDB
from models.modules.architectures import RRDBNet_arch
netG = RRDBNet_arch.RRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], \
nb=opt_net['nb'], gc=opt_net['gc'], upscale=opt_net['scale'], norm_type=opt_net['norm_type'], \
act_type=act_type, mode=opt_net['mode'], upsample_mode='upconv', convtype=opt_net['convtype'], \
finalact=opt_net['finalact'], gaussian_noise=opt_net['gaussian'], plus=opt_net['plus'],
nr=opt_net.get('nr', 3))
elif which_model == 'MRRDB_net': # Modified RRDB
from models.modules.architectures import RRDBNet_arch
netG = RRDBNet_arch.MRRDBNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'], \
nb=opt_net['nb'], gc=opt_net['gc'])
elif which_model == 'ppon':
from models.modules.architectures import PPON_arch
netG = PPON_arch.PPON(in_nc=opt_net['in_nc'], nf=opt_net['nf'], nb=opt_net['nb'], out_nc=opt_net['out_nc'],
upscale=opt_net['scale'], act_type=act_type) #(in_nc=3, nf=64, nb=24, out_nc=3)
elif which_model == 'asr_cnn':
from models.modules.architectures import ASRResNet_arch
netG = ASRResNet_arch.ASRCNN(upscale_factor=opt_net['scale'], spectral_norm = True, self_attention = True, max_pool=True, poolsize = 4, finalact='tanh')
elif which_model == 'asr_resnet':
from models.modules.architectures import ASRResNet_arch
netG = ASRResNet_arch.ASRResNet(scale_factor=opt_net['scale'], spectral_norm = True, self_attention = True, max_pool=True, poolsize = 4)
elif which_model == 'abpn_net':
from models.modules.architectures import ABPN_arch
netG = ABPN_arch.ABPN_v5(input_dim=3, dim=32)
# netG = ABPN_arch.ABPN_v5(input_dim=opt_net['in_nc'], dim=opt_net['out_nc'])
elif which_model == 'pan_net': #PAN
from models.modules.architectures import PAN_arch
netG = PAN_arch.PAN(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],
nf=opt_net['nf'], unf=opt_net['unf'], nb=opt_net['nb'], scale=opt_net['scale'],
self_attention=opt_net.get('self_attention', False),
double_scpa=opt_net.get('double_scpa', False),
ups_inter_mode=opt_net.get('ups_inter_mode', 'nearest'))
elif which_model == 'sofvsr_net':
from models.modules.architectures import SOFVSR_arch
netG = SOFVSR_arch.SOFVSR(scale=opt_net['scale'],n_frames=opt_net.get('n_frames', 3),
channels=opt_net.get('channels', 320), img_ch=opt_net.get('img_ch', 1),
SR_net=opt_net.get('SR_net', 'sofvsr'),
sr_nf=opt_net.get('sr_nf', 64), sr_nb=opt_net.get('sr_nb', 23),
sr_gc=opt_net.get('sr_gc', 32), sr_unf=opt_net.get('sr_unf', 24),
sr_gaussian_noise=opt_net.get('sr_gaussian_noise', 64),
sr_plus=opt_net.get('sr_plus', False), sr_sa=opt_net.get('sr_sa', True),
sr_upinter_mode=opt_net.get('sr_upinter_mode', 'nearest'))
elif which_model == 'sr3d_net':
from models.modules.architectures import SR3DNet_arch
netG = SR3DNet_arch.SR3DNet(scale=opt['scale'], in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],
nf=opt_net['nf'], nb=opt_net['nb'], n_frames=opt_net.get('n_frames', 5))
elif which_model == 'rife_net':
from models.modules.architectures import RIFE_arch
netG = RIFE_arch.RIFE()
elif which_model == 'SRFlow_net':
from models.modules.architectures import SRFlowNet_arch
netG = SRFlowNet_arch.SRFlowNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'],
nf=opt_net['nf'], nb=opt_net['nb'], scale=opt['scale'], K=opt_net['flow']['K'], opt=opt, step=step)
elif which_model == 'unet_net':
from models.modules.architectures import UNet_arch
netG = UNet_arch.UnetGenerator(input_nc=opt_net['in_nc'], output_nc=opt_net['out_nc'],
num_downs=opt_net['num_downs'], ngf=opt_net['ngf'],
norm_type=opt_net['norm_type'], use_dropout=opt_net['use_dropout'],
upsample_mode=opt_net['upsample_mode'])
elif which_model == 'resnet_net':
from models.modules.architectures import ResNet_arch
netG = ResNet_arch.ResnetGenerator(input_nc=opt_net['in_nc'], output_nc=opt_net['out_nc'],
n_blocks=opt_net['n_blocks'], ngf=opt_net['ngf'],
norm_type=opt_net['norm_type'], use_dropout=opt_net['use_dropout'],
upsample_mode=opt_net['upsample_mode'])
elif which_model == 'DVD_net':
from models.modules.architectures import DVDNet_arch
netG = DVDNet_arch.DVDNet(in_nc=opt_net['in_nc'], out_nc=opt_net['out_nc'], nf=opt_net['nf'])
elif which_model == 'EDVR_net':
from models.modules.architectures import EDVR_arch
netG = EDVR_arch.EDVR(num_in_ch=opt_net['in_nc'], num_out_ch=opt_net['out_nc'], num_feat=opt_net['nf'], num_frame=opt_net['n_frames'],
deformable_groups=opt_net['deformable_groups'], num_extract_block=opt_net['n_extract_block'],
num_reconstruct_block=opt_net['n_reconstruct_block'], center_frame_idx=None, with_predeblur=opt_net['predeblur'],
with_tsa=opt_net['tsa'], upsample_mode=opt_net['upsample_mode'], upscale=opt_net['scale'],
add_rrdb=opt_net['add_rrdb'], nb=opt_net['nb'])
else:
raise NotImplementedError('Generator model [{:s}] not recognized'.format(which_model))
if opt['is_train'] and which_model != 'MRRDB_net':
# Note: MRRDB_net initializes the modules during init, no need to initialize again here
init_weights(netG, init_type=init_type, scale=init_scale)
if gpu_ids:
assert torch.cuda.is_available()
netG = nn.DataParallel(netG)
return netG
# Discriminator
def define_D(opt):
'''Create a discriminator
Returns a discriminator
Some of the available types of discriminators:
vgg_*: discriminators based on a VGG-like network architecture.
The ones with '_fea' in the name also allow to extract feature
maps from the discriminator to use for feature losses.
patchgan: PatchGAN classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this option, you can specify the number of conv layers
in the discriminator with the parameter <n_layers_D>
(default=3 as used in basic (PatchGAN).)
multiscale: can create multiple patchgan discriminators that operate at
different scales. Each one at half the scale of the previous. Must
coordinate with the LR_size.
pixelgan: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator is usually initialized with <init_weights>.
'''
gpu_ids = opt['gpu_ids']
opt_net = opt['network_D']
which_model = opt_net['which_model_D']
which_model_G = opt_net['which_model_G']
init_type = opt_net.get('init_type', 'kaiming')
init_scale = opt_net.get('init_scale', 1)
if which_model_G == 'ppon':
model_G = 'PPON'
else:
model_G = 'ESRGAN'
if which_model == 'dis_acd': # sft-gan, Auxiliary Classifier Discriminator
from models.modules.architectures import sft_arch
netD = sft_arch.ACD_VGG_BN_96()
elif which_model == 'discriminator_vgg_96':
from models.modules.architectures import discriminators
netD = discriminators.Discriminator_VGG_96(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)
elif which_model == 'discriminator_vgg_128_SN':
from models.modules.architectures import discriminators
netD = discriminators.Discriminator_VGG_128_SN()
elif which_model == 'discriminator_vgg_128':
from models.modules.architectures import discriminators
netD = discriminators.Discriminator_VGG_128(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)
elif which_model == 'discriminator_vgg_192' or which_model == 'discriminator_192': #vic in PPON its called Discriminator_192, instead of BasicSR's Discriminator_VGG_192
from models.modules.architectures import discriminators
netD = discriminators.Discriminator_VGG_192(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)
elif which_model == 'discriminator_vgg_256' or which_model == 'discriminator_256':
from models.modules.architectures import discriminators
netD = discriminators.Discriminator_VGG_256(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)
elif which_model == 'discriminator_vgg': # General adaptative case
from models.modules.architectures import discriminators
try:
size = int(opt['datasets']['train']['HR_size'])
netD = discriminators.Discriminator_VGG(size=size, in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)
except ValueError:
raise ValueError('VGG Discriminator size could not be parsed from the HR patch size. Check that the image patch size is either a power of 2 or 3 multiplied by a power of 2.')
elif which_model == 'adiscriminator':
from models.modules.architectures import ASRResNet_arch
netD = ASRResNet_arch.ADiscriminator(spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \
max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'])
elif which_model == 'adiscriminator_s':
from models.modules.architectures import ASRResNet_arch
netD = ASRResNet_arch.ADiscriminator_S(spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \
max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'] )
elif which_model == 'discriminator_vgg_128_fea': #VGG-like discriminator with features extraction
from models.modules.architectures import discriminators
netD = discriminators.Discriminator_VGG_128_fea(in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], \
convtype=opt_net['convtype'], arch=model_G, spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \
max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'])
elif which_model == 'discriminator_vgg_fea': #VGG-like discriminator with features extraction
from models.modules.architectures import discriminators
try:
size = int(opt['datasets']['train']['HR_size'])
netD = discriminators.Discriminator_VGG_fea(size=size, in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], \
convtype=opt_net['convtype'], arch=model_G, spectral_norm=opt_net['spectral_norm'], self_attention=opt_net['self_attention'], \
max_pool=opt_net['max_pool'], poolsize=opt_net['poolsize'])
except ValueError:
raise ValueError('VGG Discriminator size could not be parsed from the HR patch size. Check that the image patch size is either a power of 2 or 3 multiplied by a power of 2.')
elif which_model == 'patchgan' or which_model == 'NLayerDiscriminator':
from models.modules.architectures import discriminators
netD = discriminators.NLayerDiscriminator(input_nc=opt_net['in_nc'], ndf=opt_net['nf'],
n_layers=opt_net['nlayer'], patch=opt_net.get('patch_output', True),
use_spectral_norm=opt_net.get('use_spectral_norm', False))
elif which_model == 'pixelgan' or which_model == 'PixelDiscriminator':
from models.modules.architectures import discriminators
netD = discriminators.PixelDiscriminator(input_nc=opt_net['in_nc'], ndf=opt_net['nf'])
elif which_model == 'multiscale':
from models.modules.architectures import discriminators
netD = discriminators.MultiscaleDiscriminator(input_nc=opt_net['in_nc'], ndf=opt_net['nf'], \
n_layers=opt_net['nlayer'], num_D=opt_net['num_D'])
else:
raise NotImplementedError('Discriminator model [{:s}] not recognized'.format(which_model))
"""
elif which_model.startswith('discriminator_vgg_'): # User-defined case
models.modules.architectures import discriminators
vgg_size = which_model[18:]
try:
size = int(vgg_size)
netD = discriminators.Discriminator_VGG(size=size, in_nc=opt_net['in_nc'], base_nf=opt_net['nf'], \
norm_type=opt_net['norm_type'], mode=opt_net['mode'], act_type=opt_net['act_type'], convtype=opt_net['convtype'], arch=model_G)
except ValueError:
raise ValueError('VGG Discriminator size [{:s}] could not be parsed.'.format(vgg_size))
#"""
init_weights(netD, init_type=init_type, scale=init_scale)
if gpu_ids:
netD = nn.DataParallel(netD)
return netD
def define_F(opt, use_bn=False):
'''Create a feature extraction network for feature losses
'''
from models.modules.architectures import perceptual
feat_network = 'vgg' #opt['feat_network'] #can be configurable option
gpu_ids = opt['gpu_ids']
if opt['datasets']['train']['znorm']:
z_norm = opt['datasets']['train']['znorm']
else:
z_norm = False
device = torch.device('cuda' if gpu_ids else 'cpu')
# pytorch pretrained VGG19-54, before ReLU.
if use_bn:
feature_layer = 49
else:
feature_layer = 34
if feat_network == 'resnet': #ResNet
netF = perceptual.ResNet101FeatureExtractor(use_input_norm=True, device=device)
else: #VGG network (default)
netF = perceptual.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, \
use_input_norm=True, device=device, z_norm=z_norm)
if gpu_ids:
netF = nn.DataParallel(netF)
netF.eval() # No need to train
return netF
####################
# model coversions and validation for
# network loading
####################
def normal2mod(state_dict):
if 'model.0.weight' in state_dict:
try:
logger.info('Converting and loading an RRDB model to modified RRDB')
except:
print('Converting and loading an RRDB model to modified RRDB')
crt_net = {}
items = []
for k, v in state_dict.items():
items.append(k)
# # directly copy
# for k, v in crt_net.items():
# if k in state_dict and state_dict[k].size() == v.size():
# crt_net[k] = state_dict[k]
# items.remove(k)
crt_net['conv_first.weight'] = state_dict['model.0.weight']
crt_net['conv_first.bias'] = state_dict['model.0.bias']
for k in items.copy():
if 'RDB' in k:
ori_k = k.replace('model.1.sub.', 'RRDB_trunk.')
if '.0.weight' in k:
ori_k = ori_k.replace('.0.weight', '.weight')
elif '.0.bias' in k:
ori_k = ori_k.replace('.0.bias', '.bias')
crt_net[ori_k] = state_dict[k]
items.remove(k)
crt_net['trunk_conv.weight'] = state_dict['model.1.sub.23.weight']
crt_net['trunk_conv.bias'] = state_dict['model.1.sub.23.bias']
crt_net['upconv1.weight'] = state_dict['model.3.weight']
crt_net['upconv1.bias'] = state_dict['model.3.bias']
crt_net['upconv2.weight'] = state_dict['model.6.weight']
crt_net['upconv2.bias'] = state_dict['model.6.bias']
crt_net['HRconv.weight'] = state_dict['model.8.weight']
crt_net['HRconv.bias'] = state_dict['model.8.bias']
crt_net['conv_last.weight'] = state_dict['model.10.weight']
crt_net['conv_last.bias'] = state_dict['model.10.bias']
state_dict = crt_net
return state_dict
def mod2normal(state_dict):
if 'conv_first.weight' in state_dict:
try:
logger.info('Converting and loading a modified RRDB model to normal RRDB')
except:
print('Converting and loading a modified RRDB model to normal RRDB')
crt_net = {}
items = []
for k, v in state_dict.items():
items.append(k)
crt_net['model.0.weight'] = state_dict['conv_first.weight']
crt_net['model.0.bias'] = state_dict['conv_first.bias']
for k in items.copy():
if 'RDB' in k:
ori_k = k.replace('RRDB_trunk.', 'model.1.sub.')
if '.weight' in k:
ori_k = ori_k.replace('.weight', '.0.weight')
elif '.bias' in k:
ori_k = ori_k.replace('.bias', '.0.bias')
crt_net[ori_k] = state_dict[k]
items.remove(k)
crt_net['model.1.sub.23.weight'] = state_dict['trunk_conv.weight']
crt_net['model.1.sub.23.bias'] = state_dict['trunk_conv.bias']
crt_net['model.3.weight'] = state_dict['upconv1.weight']
crt_net['model.3.bias'] = state_dict['upconv1.bias']
crt_net['model.6.weight'] = state_dict['upconv2.weight']
crt_net['model.6.bias'] = state_dict['upconv2.bias']
crt_net['model.8.weight'] = state_dict['HRconv.weight']
crt_net['model.8.bias'] = state_dict['HRconv.bias']
crt_net['model.10.weight'] = state_dict['conv_last.weight']
crt_net['model.10.bias'] = state_dict['conv_last.bias']
state_dict = crt_net
return state_dict
def model_val(opt_net=None, state_dict=None, model_type=None):
if model_type == 'G':
model = opt_get(opt_net, ['network_G', 'which_model_G'])
if model == 'RRDB_net': # tonormal
return mod2normal(state_dict)
elif model == 'MRRDB_net' or model == 'SRFlow_net': # tomod
return normal2mod(state_dict)
else:
return state_dict
elif model_type == 'D':
# no particular Discriminator validation at the moment
# model = opt_get(opt_net, ['network_G', 'which_model_D'])
return state_dict
else:
# if model_type not provided, return unchanged
# (can do other validations here)
return state_dict
def cem2normal(state_dict):
if str(list(state_dict.keys())[0]).startswith('generated_image_model'):
try:
logger.info('Unwrapping the Generator model from CEM')
except:
print('Unwrapping the Generator model from CEM')
crt_net = {}
items = []
for k, v in state_dict.items():
items.append(k)
for k in items.copy():
if 'generated_image_model.module.' in k:
ori_k = k.replace('generated_image_model.module.', '')
crt_net[ori_k] = state_dict[k]
items.remove(k)
state_dict = crt_net
return state_dict
|
#!/usr/bin/env python3
#py3.7
with open('hello_foo.txt', 'w') as f:
f.write('hello, world!')
#same as:
# f = opent('hello_foo.txt', 'w')
# try:
# f.wtite('hello, world')
# finally:
# f.close()
with open('hello_foo.txt', 'r') as f:
print(f.read())
print()
#more advanced:
from contextlib import contextmanager
import os
@contextmanager # https://stackoverflow.com/a/3012921
def working_directory(path):
current_dir = os.getcwd()
os.chdir(path)
try:
yield
except:
print(f"directory {path} not found")
finally:
os.chdir(current_dir)
#Pavel: enclosed with block to get rid of error if directory is not found
#should think later if possible to handle such things inside "with" block
try:
with working_directory("data/stuff"):
# do something within data/stuff
print('Hi')
except:
print('problem with with of directory data/stuff')
finally:
print('first try final')
# here I am back again in the original working directory
print()
#Another way: https://stackoverflow.com/a/5205878/5233335
try:
f = open('foo.txt')
except IOError:
print('error open "foo.txt"')
else:
with f:
print('foo opened')
print(f.readlines())
#
# some_code
#
|
import numpy as np
import tensorflow as tf
from pyuvdata import UVData, UVCal, UVFlag
from . import utils
import copy
import argparse
import itertools
import datetime
from pyuvdata import utils as uvutils
from .utils import echo
from .utils import PBARS
from . import cal_utils
from . import modeling
import re
OPTIMIZERS = {
"Adadelta": tf.optimizers.Adadelta,
"Adam": tf.optimizers.Adam,
"Adamax": tf.optimizers.Adamax,
"Ftrl": tf.optimizers.Ftrl,
"Nadam": tf.optimizers.Nadam,
"SGD": tf.optimizers.SGD,
"RMSprop": tf.optimizers.RMSprop,
"Adagrad": tf.optimizers.Adagrad
}
def chunk_fg_comp_dict_by_nbls(fg_model_comps_dict, use_redundancy=False, grp_size_threshold=5):
"""
Order dict keys in order of number of baselines in each group
chunk fit_groups in fg_model_comps_dict into chunks where all groups in the
same chunk have the same number of baselines in each group.
Parameters
----------
fg_model_comps_dict: dict
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
use_redundancy: bool, optional
If False, break fitting groups with the same number of baselines in each redundant
sub_group into different fitting groups with no redundancy in each
redundant subgroup. This is to prevent fitting groups with single
redundant groups of varying lengths from being lumped into different chunks
increasing the number of chunks has a more significant impact on run-time
then increasing the number of baselines in each chunk.
default is False.
Returns:
fg_model_comps_dict_chunked: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
"""
chunked_keys = {}
maxvecs = {}
fg_model_comps_dict = copy.deepcopy(fg_model_comps_dict)
if not use_redundancy:
# We can remove redundancies for fitting groups of baselines that have the same
# number of elements in each redundant group.
keys_with_redundancy = list(fg_model_comps_dict.keys())
for fit_grp in keys_with_redundancy:
rlens = np.asarray([len(red_grp) for red_grp in fit_grp])
# only break up groups with small numbers of group elements.
if np.allclose(rlens, np.mean(rlens)) and len(rlens) < grp_size_threshold:
# split up groups.
modeling_vectors = fg_model_comps_dict.pop(fit_grp)
for rednum in range(int(rlens[0])):
fit_grp_new = tuple([(red_grp[rednum],) for red_grp in fit_grp])
fg_model_comps_dict[fit_grp_new] = modeling_vectors
for fit_grp in fg_model_comps_dict:
nbl = 0
for red_grp in fit_grp:
for ap in red_grp:
nbl += 1
if nbl in chunked_keys:
chunked_keys[nbl].append(fit_grp)
if fg_model_comps_dict[fit_grp].shape[1] > maxvecs[nbl]:
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
else:
chunked_keys[nbl] = [fit_grp]
maxvecs[nbl] = fg_model_comps_dict[fit_grp].shape[1]
fg_model_comps_dict_chunked = {}
for nbl in chunked_keys:
fg_model_comps_dict_chunked[(nbl, maxvecs[nbl])] = {k: fg_model_comps_dict[k] for k in chunked_keys[nbl]}
return fg_model_comps_dict_chunked
def tensorize_fg_model_comps_dict(
fg_model_comps_dict,
ants_map,
nfreqs,
use_redundancy=False,
dtype=np.float32,
notebook_progressbar=False,
verbose=False,
grp_size_threshold=5,
):
"""Convert per-baseline model components into a Ndata x Ncomponent tensor
Parameters
----------
fg_model_comps_dict: dict
dictionary where each key is a 2-tuple (nbl, nvecs) referring to the number
of baselines in each vector and the number of vectors. Each 2-tuple points to
a dictionary where each key is the fitting group in fg_comps_dict that includes
nbl baselines. Each key in the referenced dict points to an (nred_grps * nfreqs x nvecs)
numpy.ndarray describing the modeling components for each fitting group in the chunk.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
nfreqs: int, optional
number of frequency channels
dtype: numpy.dtype
tensor data types
default is np.float32
Returns
-------
fg_model_comps: list
list of tf.Tensor objects where each tensor has shape (nvecs, ngrps, nbls, nfreqs)
where nbls varies from tensor to tensor. Fitting groups with vectors that span nbls are lumped into the same
modeling tensor along the ngrps axis. nvecs is chosen in chunk_fg_comp_dict_by_nbls
to be the maximum number of vectors representing any of the ngrps baseline grps
which means that many rows in nvecs will be zero. For example, if we are modeling with
vectors that all span nbls=1 baseline and using delay-modes to model our data
then nvecs will equal the largest number of delay modes necessary to model the wedge
on all baselines even though the short baselines are described by far fewer modes
on short baselines, most of the rows along the vector dimension will therefor be zero.
This is wasteful of memory but it allows us to take advantage of the fast
dense matrix operations on a GPU.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
"""
echo(
f"{datetime.datetime.now()} Computing foreground components matrices...\n",
verbose=verbose,
)
# chunk foreground components.
fg_model_comps_dict = chunk_fg_comp_dict_by_nbls(
fg_model_comps_dict, use_redundancy=use_redundancy, grp_size_threshold=grp_size_threshold
)
fg_model_comps = []
corr_inds = []
for nbls, nvecs in fg_model_comps_dict:
ngrps = len(fg_model_comps_dict[(nbls, nvecs)])
modeling_matrix = np.zeros((nvecs, ngrps, nbls, nfreqs))
corr_inds_chunk = []
for grpnum, modeling_grp in enumerate(fg_model_comps_dict[(nbls, nvecs)]):
corr_inds_grp = []
nbl = 0
for rgrpnum, red_grp in enumerate(modeling_grp):
nred = len(red_grp)
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
corr_inds_grp.append((i, j))
vecslice = slice(0, fg_model_comps_dict[(nbls, nvecs)][modeling_grp].shape[1])
compslice = slice(rgrpnum * nfreqs, (rgrpnum + 1) * nfreqs)
dslice = slice(nbl * nfreqs, (nbl + 1) * nfreqs)
modeling_matrix[vecslice, grpnum, nbl] = fg_model_comps_dict[(nbls, nvecs)][modeling_grp][
compslice
].T
nbl += 1
corr_inds_chunk.append(corr_inds_grp)
fg_model_comps.append(tf.convert_to_tensor(modeling_matrix, dtype=dtype))
corr_inds.append(corr_inds_chunk)
return fg_model_comps, corr_inds
def tensorize_data(
uvdata,
corr_inds,
ants_map,
polarization,
time,
data_scale_factor=1.0,
weights=None,
nsamples_in_weights=False,
dtype=np.float32,
):
"""Convert data in uvdata object to a tensor
Parameters
----------
uvdata: UVData object
UVData object containing data, flags, and nsamples to tensorize.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
polarization: str
pol-str of gain to extract.
time: float
time of data to convert to tensor.
data_scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
weights: UVFlag object, optional
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is False.
dtype: numpy.dtype
data-type to store in tensor.
default is np.float32
Returns
-------
data_r: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the real components of the baselines specified by these 2-tuples.
data_i: list of tf.Tensor objects
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the imag components of the baselines specified by these 2-tuples.
wgts: tf.Tensor object
list of tf.Tensor objects. Each tensor has shape (ngrps, nbls, nfreqs)
where ngrps, nbls are the dimensions of each sublist in corr_inds
and contain the weights of the baselines specified by these 2-tuples.
"""
ants_map_inv = {ants_map[i]: i for i in ants_map}
dshape = (uvdata.Nants_data, uvdata.Nants_data, uvdata.Nfreqs)
data_r = np.zeros(dshape, dtype=dtype)
data_i = np.zeros_like(data_r)
wgts = np.zeros_like(data_r)
wgtsum = 0.0
for chunk in corr_inds:
for fitgrp in chunk:
for (i, j) in fitgrp:
ap = ants_map_inv[i], ants_map_inv[j]
bl = ap + (polarization,)
dinds1, dinds2, pol_ind = uvdata._key2inds(bl)
if len(dinds1) > 0:
dinds = dinds1
conjugate = False
pol_ind = pol_ind[0]
else:
dinds = dinds2
conjugate = True
pol_ind = pol_ind[1]
dind = dinds[np.where(np.isclose(uvdata.time_array[dinds], time, rtol=0.0, atol=1e-7))[0][0]]
data = uvdata.data_array[dind, 0, :, pol_ind].squeeze()
iflags = ~uvdata.flag_array[dind, 0, :, pol_ind].squeeze()
nsamples = uvdata.nsample_array[dind, 0, :, pol_ind].squeeze()
data /= data_scale_factor
if conjugate:
data = np.conj(data)
data_r[i, j] = data.real.astype(dtype)
data_i[i, j] = data.imag.astype(dtype)
if weights is None:
wgts[i, j] = iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
else:
if ap in weights.get_antpairs():
dinds = weights.antpair2ind(*ap)
else:
dinds = weights.antpair2ind(*ap[::-1])
dind = dinds[np.where(np.isclose(weights.time_array[dinds], time, atol=1e-7, rtol=0.0))[0][0]]
polnum = np.where(
weights.polarization_array
== uvutils.polstr2num(polarization, x_orientation=weights.x_orientation)
)[0][0]
wgts[i, j] = weights.weights_array[dind, 0, :, polnum].astype(dtype) * iflags
if nsamples_in_weights:
wgts[i, j] *= nsamples
wgtsum += np.sum(wgts[i, j])
data_r = tf.convert_to_tensor(data_r, dtype=dtype)
data_i = tf.convert_to_tensor(data_i, dtype=dtype)
wgts = tf.convert_to_tensor(wgts / wgtsum, dtype=dtype)
nchunks = len(corr_inds)
data_r = [tf.gather_nd(data_r, corr_inds[cnum]) for cnum in range(nchunks)]
data_i = [tf.gather_nd(data_i, corr_inds[cnum]) for cnum in range(nchunks)]
wgts = [tf.gather_nd(wgts, corr_inds[cnum]) for cnum in range(nchunks)]
return data_r, data_i, wgts
def renormalize(uvdata_reference_model, uvdata_deconv, gains, polarization, time, additional_flags=None):
"""Remove arbitrary phase and amplitude from deconvolved model and gains.
Parameters
----------
uvdata_reference_model: UVData object
Reference model for "true" visibilities.
uvdata_deconv: UVData object
"Deconvolved" data solved for in self-cal loop.
gains: UVCal object
Gains solved for in self-cal loop.
polarization: str
Polarization string to compute phase and amplitude correction for.
additional_flags: np.ndarray
Any additional flags you wish to use for excluding data from normalization
fed as an np.ndarray with same shape as uvdata_reference_model and uvdata_deconv.
default is None -> Only exclude data in flags from reference model and deconv from
determinging normalization.
Returns
-------
N/A: Modifies uvdata_deconv and gains in-place.
"""
# compute and multiply out scale-factor accounting for overall amplitude and phase degeneracy.
polnum_data = np.where(
uvdata_deconv.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
bltsel = np.isclose(uvdata_deconv.time_array, time, atol=1e-7, rtol=0.0)
selection = (
~uvdata_deconv.flag_array[bltsel, :, :, polnum_data]
& ~uvdata_reference_model.flag_array[bltsel, :, :, polnum_data]
)
if additional_flags is not None:
selection = selection & ~additional_flags[bltsel, :, :, polnum_data]
data_ratio = (
uvdata_reference_model.data_array[bltsel, :, :, polnum_data][selection]
/ uvdata_deconv.data_array[bltsel, :, :, polnum_data][selection]
)
data_ratio[~np.isfinite(data_ratio)] = np.nan
scale_factor_phase = np.angle(np.nanmean(data_ratio))
scale_factor_abs = np.sqrt(np.nanmean(np.abs(data_ratio) ** 2.0))
scale_factor = scale_factor_abs # * np.exp(1j * scale_factor_phase) Need to figure this out later.
uvdata_deconv.data_array[bltsel, :, :, polnum_data] *= scale_factor
polnum_gains = np.where(
gains.jones_array == uvutils.polstr2num(polarization, x_orientation=uvdata_deconv.x_orientation)
)[0][0]
gindt = np.where(np.isclose(gains.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains.gain_array[:, :, :, gindt, polnum_gains] *= (scale_factor) ** -0.5
def tensorize_gains(uvcal, polarization, time, dtype=np.float32):
"""Helper function to extract gains into fitting tensors.
Parameters
----------
uvcal: UVCal object
UVCal object holding gain data to tensorize.
polarization: str
pol-str of gain to extract.
time: float
JD of time to convert to tensor.
dtype: numpy.dtype
dtype of tensors to output.
Returns
-------
gains_re: tf.Tensor object.
tensor object holding real component of gains
for time_index and polarization
shape is Nant x Nfreq
gains_im: tf.Tensor object.
tensor object holding imag component of gains
for time_index and polarization
shape is Nant x Nfreq
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
gains_re = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().real, dtype=dtype)
gains_im = tf.convert_to_tensor(uvcal.gain_array[:, 0, :, gindt, polnum].squeeze().imag, dtype=dtype)
return gains_re, gains_im
def yield_fg_model_array(
nants,
nfreqs,
fg_model_comps,
fg_coeffs,
corr_inds,
):
"""Compute tensor foreground model.
Parameters
----------
nants: int
number of antennas in data to model.
freqs: int
number of frequencies in data to model.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
fg_coeffs: list
list of fg modeling tf.Tensor objects
representing foreground modeling coefficients.
Each tensor is (nvecs, ngrps, 1, 1)
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
Returns
-------
model: tf.Tensor object
nants x nants x nfreqs model of the visibility data
"""
model = np.zeros((nants, nants, nfreqs))
nchunks = len(fg_model_comps)
for cnum in range(nchunks):
ngrps = fg_model_comps[cnum].shape[1]
gchunk = tf.reduce_sum(fg_coeffs[cnum] * fg_model_comps[cnum], axis=0).numpy()
for gnum in range(ngrps):
for blnum, (i, j) in enumerate(corr_inds[cnum][gnum]):
model[i, j] = gchunk[gnum, blnum]
return model
def fit_gains_and_foregrounds(
g_r,
g_i,
fg_r,
fg_i,
data_r,
data_i,
wgts,
fg_comps,
corr_inds,
use_min=False,
tol=1e-14,
maxsteps=10000,
optimizer="Adamax",
freeze_model=False,
verbose=False,
notebook_progressbar=False,
dtype=np.float32,
graph_mode=False,
n_profile_steps=0,
profile_log_dir="./logdir",
sky_model_r=None,
sky_model_i=None,
model_regularization=None,
graph_args_dict=None,
**opt_kwargs,
):
"""Run optimization loop to fit gains and foreground components.
Parameters
----------
g_r: tf.Tensor object.
tf.Tensor object holding real parts of gains.
g_i: tf.Tensor object.
tf.Tensor object holding imag parts of gains.
fg_r: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding foreground coeffs.
fg_i: list
list of tf.Tensor objects. Each has shape (nvecs, ngrps, 1, 1)
tf.Tensor object holding imag coeffs.
data_r: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
real part of data to fit.
data_i: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
imag part of data to fit.
wgts: list
list of tf.Tensor objects. Each has shape (ngrps, nbls, nfreqs)
fg_comps: list:
list of tf.Tensor objects. Each has shape (nvecs, ngrps, nbls, nfreqs)
represents vectors to be used in modeling visibilities.
corr_inds: list
list of list of lists of 2-tuples. Hierarchy of lists is
chunk
group
baseline - (int 2-tuple)
use_min: bool, optional
if True, use the value that minimizes the loss function
regardless of where optimization loop ended up
(prevents overshooting due to excess momentum)
tol: float, optional
halt optimization loop once the loss changes by less then this value.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
verbose: bool, optional
lots of text output
default is False.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
sky_model_r: list of tf.Tensor objects, optional
chunked tensors containing model in same format as data_r
sky_model_i: list of tf.Tensor objects, optional
chunked tensors containing model in the same format as data_i
model_regularization: str, optional
type of model regularization to perform. Currently support "sum"
where the sums of real and imaginary parts (across all bls and freqs)
are constrained to be the same as the sum of real and imag parts
of data.
opt_kwargs: kwarg dict
additional kwargs for tf.opt.Optimizer(). See tensorflow docs.
Returns
-------
g_r_opt: tf.Tensor object
real part of optimized gains.
g_i_opt: tf.Tensor object
imag part of optimized gains.
fg_r_opt: tf.Tensor object
real part of foreground coeffs.
fg_i_opt: tf.Tensor object.
imag part of optimized foreground coeffs.
fit_history: dict
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
if graph_args_dict is None:
graph_args_dict = {}
# initialize the optimizer.
echo(f"Using {str(dtype)} precision.")
echo(f"{datetime.datetime.now()} Provided the following opt_kwargs")
for k in opt_kwargs:
echo(f"{k}: {opt_kwargs[k]}")
opt = OPTIMIZERS[optimizer](**opt_kwargs)
# set up history recording
fit_history = {"loss": []}
min_loss = 9e99
nants = g_r.shape[0]
nfreqs = g_r.shape[1]
ant0_inds = []
ant1_inds = []
nchunks = len(fg_comps)
# build up list of lists of ant0 and ant1 for gather ops
for cnum in range(nchunks):
ant0_chunk = []
ant1_chunk = []
ngrps = len(corr_inds[cnum])
for gnum in range(ngrps):
ant0_grp = []
ant1_grp = []
for cpair in corr_inds[cnum][gnum]:
ant0_grp.append(cpair[0])
ant1_grp.append(cpair[1])
ant0_chunk.append(ant0_grp)
ant1_chunk.append(ant1_grp)
ant0_inds.append(ant0_chunk)
ant1_inds.append(ant1_chunk)
g_r = tf.Variable(g_r)
g_i = tf.Variable(g_i)
if not freeze_model:
fg_r = [tf.Variable(fgr) for fgr in fg_r]
fg_i = [tf.Variable(fgi) for fgi in fg_i]
vars = [g_r, g_i] + fg_r + fg_i
else:
vars = [g_r, g_i]
echo(
f"{datetime.datetime.now()} Performing gradient descent on {np.prod(g_r.shape)} complex gain parameters...",
verbose=verbose,
)
if not freeze_model:
echo(
f"Performing gradient descent on total of {int(np.sum([fgr.shape[0] * fgr.shape[1] for fgr in fg_r]))} complex foreground parameters",
verbose=verbose,
)
echo(
f"Foreground Parameters grouped into chunks of shape ((nvecs, ngrps): nbls) {[str(fgr.shape[:2]) + ':' + str(dc.shape[1]) for fgr, dc in zip(fg_r, data_r)]}",
verbose=verbose,
)
if model_regularization == "sum":
prior_r_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_r[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
prior_i_sum = tf.reduce_sum(
tf.stack([tf.reduce_sum(sky_model_i[cnum] * wgts[cnum]) for cnum in range(nchunks)])
)
def loss_function():
return mse_chunked_sum_regularized(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
prior_r_sum=prior_r_sum,
prior_i_sum=prior_i_sum,
)
else:
def loss_function():
return mse_chunked(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
fg_comps=fg_comps,
nchunks=nchunks,
data_r=data_r,
data_i=data_i,
wgts=wgts,
ant0_inds=ant0_inds,
ant1_inds=ant1_inds,
dtype=dtype,
)
def train_step_code():
with tf.GradientTape() as tape:
loss = loss_function()
grads = tape.gradient(loss, vars)
opt.apply_gradients(zip(grads, vars))
return loss
if graph_mode:
@tf.function(**graph_args_dict)
def train_step():
return train_step_code()
else:
def train_step():
return train_step_code()
if n_profile_steps > 0:
echo(f"{datetime.datetime.now()} Profiling with {n_profile_steps}. And writing output to {profile_log_dir}...")
tf.profiler.experimental.start(profile_log_dir)
for step in PBARS[notebook_progressbar](range(n_profile_steps)):
with tf.profiler.experimental.Trace("train", step_num=step):
train_step()
tf.profiler.experimental.stop()
echo(
f"{datetime.datetime.now()} Building Computational Graph...\n",
verbose=verbose,
)
loss = train_step()
echo(
f"{datetime.datetime.now()} Performing Gradient Descent. Initial MSE of {loss:.2e}...\n",
verbose=verbose,
)
for step in PBARS[notebook_progressbar](range(maxsteps)):
loss = train_step()
fit_history["loss"].append(loss.numpy())
if use_min and fit_history["loss"][-1] < min_loss:
# store the g_r, g_i, fg_r, fg_i values that minimize loss
# in case of overshoot.
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
if step >= 1 and np.abs(fit_history["loss"][-1] - fit_history["loss"][-2]) < tol:
echo(
f"Tolerance thresshold met with delta of {np.abs(fit_history['loss'][-1] - fit_history['loss'][-2]):.2e}. Terminating...\n ",
verbose=verbose,
)
break
# if we dont use use_min, then the last
# visited set of parameters will be used
# to set the ML params.
if not use_min:
min_loss = fit_history["loss"][-1]
g_r_opt = g_r.value()
g_i_opt = g_i.value()
if not freeze_model:
fg_r_opt = [fgr.value() for fgr in fg_r]
fg_i_opt = [fgi.value() for fgi in fg_i]
else:
fg_r_opt = fg_r
fg_i_opt = fg_i
echo(
f"{datetime.datetime.now()} Finished Gradient Descent. MSE of {min_loss:.2e}...\n",
verbose=verbose,
)
return g_r_opt, g_i_opt, fg_r_opt, fg_i_opt, fit_history
def insert_model_into_uvdata_tensor(
uvdata,
time,
polarization,
ants_map,
red_grps,
model_r,
model_i,
scale_factor=1.0,
):
"""Insert fitted tensor values back into uvdata object for tensor mode.
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
ants_map: dict mapping integers to integers
map between each antenna number to a unique index between 0 and Nants_data
(typically the index of each antenna in ants_map)
red_grps: list of lists of int 2-tuples
a list of lists of 2-tuples where all antenna pairs within each sublist
are redundant with eachother. Assumes that conjugates are correctly taken.
model_r: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with real parts of data
model_i: np.ndarray
an Nants_data x Nants_data x Nfreqs np.ndarray with imag parts of model
scale_factor: float, optional
overall scaling factor to divide tensorized data by.
default is 1.0
Returns
-------
N/A: Modifies uvdata inplace.
"""
antpairs_data = uvdata.get_antpairs()
polnum = np.where(
uvdata.polarization_array == uvutils.polstr2num(polarization, x_orientation=uvdata.x_orientation)
)[0][0]
for red_grp in red_grps:
for ap in red_grp:
i, j = ants_map[ap[0]], ants_map[ap[1]]
if ap in antpairs_data:
dinds = uvdata.antpair2ind(ap)
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] + 1j * model_i[i, j]
else:
dinds = uvdata.antpair2ind(ap[::-1])
dinds = dinds[np.where(np.isclose(time, uvdata.time_array[dinds], atol=1e-7, rtol=0.0))[0][0]]
model = model_r[i, j] - 1j * model_i[i, j]
uvdata.data_array[dinds, 0, :, polnum] = model * scale_factor
def insert_gains_into_uvcal(uvcal, time, polarization, gains_re, gains_im):
"""Insert tensorized gains back into uvcal object
Parameters
----------
uvdata: UVData object
uvdata object to insert model data into.
time: float
JD of time to insert.
polarization: str
polarization to insert.
gains_re: dict with int keys and tf.Tensor object values
dictionary mapping i antenna numbers to Nfreq 1d tf.Tensor object
representing the real component of the complex gain for antenna i.
gains_im: dict with int keys and tf.Tensor object values
dictionary mapping j antenna numbers to Nfreq 1d tf.Tensor object
representing the imag component of the complex gain for antenna j.
Returns
-------
N/A: Modifies uvcal inplace.
"""
polnum = np.where(uvcal.jones_array == uvutils.polstr2num(polarization, x_orientation=uvcal.x_orientation))[0][0]
gindt = np.where(np.isclose(uvcal.time_array, time, atol=1e-7, rtol=0.0))[0][0]
for ant_index in range(uvcal.Nants_data):
uvcal.gain_array[ant_index, 0, :, gindt, polnum] = (
gains_re[ant_index].numpy() + 1j * gains_im[ant_index].numpy()
)
def tensorize_fg_coeffs(
data,
wgts,
fg_model_comps,
notebook_progressbar=False,
verbose=False,
):
"""Initialize foreground coefficient tensors from uvdata and modeling component dictionaries.
Parameters
----------
data: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing data
wgts: list
list of tf.Tensor objects, each with shape (ngrps, nbls, nfreqs)
representing weights.
fg_model_comps: list
list of fg modeling tf.Tensor objects
representing foreground modeling vectors.
Each tensor is (nvecs, ngrps, nbls, nfreqs)
see description in tensorize_fg_model_comps_dict
docstring.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
verbose: bool, optional
lots of text output
default is False.
Returns
-------
fg_coeffs_re: tf.Tensor object
1d tensor containing real parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
fg_coeffs_im: tf.Tensor object
1d tensor containing imag parts of coeffs for each modeling vector.
ordering is over foreground modeling vector per redundant group and then
redundant group in the order of groups appearing in red_grps
"""
echo(
f"{datetime.datetime.now()} Computing initial foreground coefficient guesses using linear-leastsq...\n",
verbose=verbose,
)
fg_coeffs = []
nchunks = len(data)
binary_wgts = [
tf.convert_to_tensor(~np.isclose(wgts[cnum].numpy(), 0.0), dtype=wgts[cnum].dtype) for cnum in range(nchunks)
]
for cnum in PBARS[notebook_progressbar](range(nchunks)):
# set up linear leastsq
fg_coeff_chunk = []
ngrps = data[cnum].shape[0]
ndata = data[cnum].shape[1] * data[cnum].shape[2]
nvecs = fg_model_comps[cnum].shape[0]
# pad with zeros
for gnum in range(ngrps):
nonzero_rows = np.where(
np.all(np.isclose(fg_model_comps[cnum][:, gnum].numpy().reshape(nvecs, ndata), 0.0), axis=1)
)[0]
if len(nonzero_rows) > 0:
nvecs_nonzero = np.min(nonzero_rows)
else:
nvecs_nonzero = nvecs
# solve linear leastsq
fg_coeff_chunk.append(
tf.reshape(
tf.linalg.lstsq(
tf.transpose(tf.reshape(fg_model_comps[cnum][:, gnum], (nvecs, ndata)))[:, :nvecs_nonzero],
tf.reshape(data[cnum][gnum] * binary_wgts[cnum][gnum], (ndata, 1)),
),
(nvecs_nonzero,),
)
)
# pad zeros at the end back up to nvecs.
fg_coeff_chunk[-1] = tf.pad(fg_coeff_chunk[-1], [(0, nvecs - nvecs_nonzero)])
# add two additional dummy indices to satify broadcasting rules.
fg_coeff_chunk = tf.reshape(tf.transpose(tf.stack(fg_coeff_chunk)), (nvecs, ngrps, 1, 1))
fg_coeffs.append(fg_coeff_chunk)
echo(
f"{datetime.datetime.now()} Finished initial foreground coefficient guesses...\n",
verbose=verbose,
)
return fg_coeffs
def get_auto_weights(uvdata, delay_extent=25.0):
"""
inverse variance weights from interpolated autocorrelation data
Parameters
----------
uvdata: UVData object
UVData object containing autocorrelation data to use for computing inverse noise weights.
offset: float, optional
Fit autocorrelation to delay components with this width.
Returns
-------
data_weights: UVFlag object
UFlag in flag-mode where flags contain original data flags and weights contain autocorr weights.
"""
dpss_components = modeling.yield_dpss_model_comps_bl_grp(0.0, uvdata.freq_array[0], offset=delay_extent)
data_weights = UVFlag(uvdata, mode="flag")
data_weights.weights_array = np.zeros(uvdata.data_array.shape)
# compute autocorrelation weights
auto_fit_dict = {}
bls = uvdata.get_antpairpols()
for bl in bls:
if bl[0] == bl[1]:
d_wf = uvdata.get_data(bl)
w_wf = ~uvdata.get_flags(bl)
auto_fit_dict[bl] = []
for ds, fs in zip(d_wf, w_wf):
# fit autocorr waterfall to DPSS modes.
nunflagged = np.count_nonzero(fs)
amat = tf.convert_to_tensor(dpss_components[fs])
dvec = tf.reshape(tf.convert_to_tensor(ds[fs].real), (nunflagged, 1))
model = dpss_components @ tf.linalg.lstsq(amat, dvec).numpy().squeeze()
auto_fit_dict[bl].append(model)
auto_fit_dict[bl] = np.atleast_2d(np.asarray(auto_fit_dict[bl]))
# from autocorrelation fits, weights
for bl in bls:
smooth_weights = 1.0 / (auto_fit_dict[bl[0], bl[0], bl[-1]] * auto_fit_dict[bl[1], bl[1], bl[-1]])
smooth_weights *= ~uvdata.get_flags(bl)
dinds = data_weights.antpair2ind(*bl[:2])
polnum = np.where(
data_weights.polarization_array == uvutils.polstr2num(bl[-1], x_orientation=data_weights.x_orientation)
)[0][0]
data_weights.weights_array[dinds, 0, :, polnum] = smooth_weights
return data_weights
def calibrate_and_model_tensor(
uvdata,
fg_model_comps_dict,
gains=None,
freeze_model=False,
optimizer="Adamax",
tol=1e-14,
maxsteps=10000,
include_autos=False,
verbose=False,
sky_model=None,
dtype=np.float32,
use_min=False,
use_redundancy=False,
notebook_progressbar=False,
correct_resid=False,
correct_model=True,
weights=None,
nsamples_in_weights=True,
graph_mode=False,
grp_size_threshold=5,
n_profile_steps=0,
profile_log_dir="./logdir",
model_regularization="sum",
init_guesses_from_previous_time_step=False,
skip_threshold=0.5,
use_model_snr_weights=False,
**opt_kwargs,
):
"""Perform simultaneous calibration and foreground fitting using tensors.
Parameters
----------
uvdata: UVData object
uvdata objet of data to be calibrated.
fg_model_comps_dict: dictionary
dictionary with keys that are tuples of tuples of 2-tuples (thats right, 3 levels)
in the first level, each tuple represents a 'modeling group' visibilities in each
modeling group are represented by a set of basis vectors that span all baselines in that
group with elements raveled by baseline and then frequency. Each tuple in the modeling group is a
'redundant group' representing visibilities that we will represent with identical component coefficients
each element of each 'redundant group' is a 2-tuple antenna pair. Our formalism easily accomodates modeling
visibilities as redundant or non redundant (one simply needs to make each redundant group length 1).
values are real numpy arrays with size (Ngrp * Nfreqs) * Ncomponents
gains: UVCal object
UVCal with initial gain estimates.
There many smart ways to obtain initial gain estimates
but this is beyond the scope of calamity (for example, firstcal, logcal, sky-based cal).
Users can determine initial gains with their favorite established cal algorithm.
default is None -> start with unity gains.
WARNING: At the present, the flags in gains are not propagated/used! Make sure flags in uvdata object!
freeze_model: bool, optional
Only optimize loss function wrt gain variables. This is effectively traditional model-based calibration
with sky_model as the model (but projected onto the foreground basis vectors).
default is False.
optimizer: string
Name of optimizer. See OPTIMIZERS dictionary which contains optimizers described in
https://www.tensorflow.org/api_docs/python/tf/keras/optimizers
default is 'Adamax'
tol: float, optional
halting condition for optimizer loop. Stop loop when the change in the cost function falls
below tol.
default is 1e-14
maxsteps: int, optional
maximum number of opt.minimize calls before halting.
default is 10000
include_autos: bool, optional
include autocorrelations in fitting.
default is False.
verbose: bool, optional
generate lots of text.
default is False.
sky_model: UVData object, optional
a sky-model to use for initial estimates of foreground coeffs and
to set overall flux scale and phases.
Note that this model is not used to obtain initial gain estimates.
These must be provided through the gains argument.
dtype: numpy dtype, optional
the float precision to be used in tensorflow gradient descent.
runtime scales roughly inversely linear with precision.
default is np.float32
use_min: bool, optional
If True, use the set of parameters that determine minimum as the ML params
If False, use the last set of parameters visited by the optimization loop.
use_redundancy: bool, optional
if true, solve for one set of foreground coeffs per redundant baseline group
instead of per baseline.
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
red_tol: float, optional
tolerance for determining baselines redundant (meters)
default is 1.0
correct_resid: bool, optional
if True, gain correct residual.
default is False
correct_model: bool, optional
if True, gain correct model.
default is False
weights: UVFlag object, optional.
UVFlag weights object containing weights to use for data fitting.
default is None -> use nsamples * ~flags if nsamples_in_weights
or ~flags if not nsamples_in_weights
nsamples_in_weights: bool, optional
If True and weights is None, generate weights proportional to nsamples.
default is True.
graph_mode: bool, optional
if True, compile gradient update step in graph mode to speed up
runtime by ~2-3x. I've found that this helps on CPUs but on GPUs
it actually increases runtime by a similar factor.
n_profile_steps: bool, optional
number of steps to run profiling on
default is 0.
profile_log_dir: str, optional
directory to save profile logs to
default is './logdir'
model_regularization: str, optional
option to regularize model
supported 'post_hoc', 'sum'
default is 'post_hoc'
which sets sum of amps equal and sum of phases equal.
init_guesses_from_previous_time_step: bool, optional
if True, then use foreground coeffs and gains from previous time-step to
initialize gains for next time step.
skip_threshold: float, optional
if less then this fraction of data is unflagged on a particular poltime,
flag the entire poltime.
opt_kwargs: kwarg_dict
kwargs for tf.optimizers
Returns
-------
model: UVData object
uvdata object containing model of the foregrounds
resid: UVData object
uvdata object containing resids which are the data minus
the model with gains multiplied and then with the gains divided out.
gains: UVCal object
uvcal object containing estimates of the gain solutions. These solutions
are not referenced to any sky model and are likely orders of
fit_history:
dictionary containing fit history with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
antpairs_data = uvdata.get_antpairs()
if not include_autos:
antpairs_data = set([ap for ap in antpairs_data if ap[0] != ap[1]])
uvdata = uvdata.select(inplace=False, bls=[ap for ap in antpairs_data])
resid = copy.deepcopy(uvdata)
model = copy.deepcopy(uvdata)
model.data_array[:] = 0.0
model.flag_array[:] = False
# get redundant groups
red_grps = []
for fit_grp in fg_model_comps_dict.keys():
for red_grp in fit_grp:
red_grps.append(red_grp)
if gains is None:
echo(
f"{datetime.datetime.now()} Gains are None. Initializing gains starting with unity...\n",
verbose=verbose,
)
gains = cal_utils.blank_uvcal_from_uvdata(uvdata)
if sky_model is None and model_regularization is not None:
echo(
f"{datetime.datetime.now()} Sky model is None. Initializing from data...\n",
verbose=verbose,
)
sky_model = cal_utils.apply_gains(uvdata, gains)
else:
sky_model = sky_model.select(inplace=False, bls=[ap for ap in antpairs_data])
fit_history = {}
ants_map = {ant: i for i, ant in enumerate(gains.ant_array)}
# generate tensors to hold foreground components.
fg_model_comps, corr_inds = tensorize_fg_model_comps_dict(
fg_model_comps_dict=fg_model_comps_dict,
ants_map=ants_map,
dtype=dtype,
nfreqs=sky_model.Nfreqs,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
grp_size_threshold=grp_size_threshold,
)
echo(
f"{datetime.datetime.now()}Finished Converting Foreground Modeling Components to Tensors...\n",
verbose=verbose,
)
# delete fg_model_comps_dict. It can take up a lot of memory.
del fg_model_comps_dict
# loop through polarization and times.
for polnum, pol in enumerate(uvdata.get_pols()):
echo(
f"{datetime.datetime.now()} Working on pol {pol}, {polnum + 1} of {uvdata.Npols}...\n",
verbose=verbose,
)
fit_history_p = {}
first_time = True
for time_index, time in enumerate(np.unique(uvdata.time_array)):
echo(
f"{datetime.datetime.now()} Working on time {time_index + 1} of {uvdata.Ntimes}...\n",
verbose=verbose,
)
bltsel = np.isclose(uvdata.time_array, time, atol=1e-7, rtol=0.0)
frac_unflagged = np.count_nonzero(~uvdata.flag_array[bltsel, 0, :, polnum]) / (
uvdata.Nbls * uvdata.Nfreqs
)
# check that fraction of unflagged data > skip_threshold.
if frac_unflagged >= skip_threshold:
rmsdata = np.sqrt(
np.mean(
np.abs(uvdata.data_array[bltsel, 0, :, polnum][~uvdata.flag_array[bltsel, 0, :, polnum]]) ** 2.0
)
)
echo(f"{datetime.datetime.now()} Tensorizing data...\n", verbose=verbose)
data_r, data_i, wgts = tensorize_data(
uvdata,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
nsamples_in_weights=nsamples_in_weights,
dtype=dtype,
)
if sky_model is not None:
echo(f"{datetime.datetime.now()} Tensorizing sky model...\n", verbose=verbose)
sky_model_r, sky_model_i, _ = tensorize_data(
sky_model,
corr_inds=corr_inds,
ants_map=ants_map,
polarization=pol,
time=time,
data_scale_factor=rmsdata,
weights=weights,
dtype=dtype,
)
else:
sky_model_r, sky_model_i = None, None
if first_time or not init_guesses_from_previous_time_step:
first_time = False
echo(f"{datetime.datetime.now()} Tensorizing Gains...\n", verbose=verbose)
g_r, g_i = tensorize_gains(gains, dtype=dtype, time=time, polarization=pol)
# generate initial guess for foreground coeffs.
echo(
f"{datetime.datetime.now()} Tensorizing Foreground coeffs...\n",
verbose=verbose,
)
fg_r = tensorize_fg_coeffs(
data=data_r,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
fg_i = tensorize_fg_coeffs(
data=data_i,
wgts=wgts,
fg_model_comps=fg_model_comps,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
)
if use_model_snr_weights:
wgts_model = [fg_model(fgr, fgi, fgc) for fgr, fgi, fgc in zip(fg_r, fg_i, fg_model_comps)]
wgts = [(tf.square(wm[0]) + tf.square(wm[1])) * w for wm, w in zip(wgts_model, wgts)]
del wgts_model
# renormalize
wgts_sum = np.sum([np.sum(w) for w in wgts])
wgts = [w / wgts_sum for w in wgts]
(g_r, g_i, fg_r, fg_i, fit_history_p[time_index],) = fit_gains_and_foregrounds(
g_r=g_r,
g_i=g_i,
fg_r=fg_r,
fg_i=fg_i,
data_r=data_r,
data_i=data_i,
wgts=wgts,
fg_comps=fg_model_comps,
corr_inds=corr_inds,
optimizer=optimizer,
use_min=use_min,
freeze_model=freeze_model,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
tol=tol,
dtype=dtype,
maxsteps=maxsteps,
graph_mode=graph_mode,
n_profile_steps=n_profile_steps,
profile_log_dir=profile_log_dir,
sky_model_r=sky_model_r,
sky_model_i=sky_model_i,
model_regularization=model_regularization,
**opt_kwargs,
)
# insert into model uvdata.
insert_model_into_uvdata_tensor(
uvdata=model,
time=time,
polarization=pol,
ants_map=ants_map,
red_grps=red_grps,
model_r=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_r,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
model_i=yield_fg_model_array(
fg_model_comps=fg_model_comps,
fg_coeffs=fg_i,
corr_inds=corr_inds,
nants=uvdata.Nants_data,
nfreqs=uvdata.Nfreqs,
),
scale_factor=rmsdata,
)
# insert gains into uvcal
insert_gains_into_uvcal(
uvcal=gains,
time=time,
polarization=pol,
gains_re=g_r,
gains_im=g_i,
)
else:
echo(
f"{datetime.datetime.now()}: Only {frac_unflagged * 100}-percent of data unflagged. Skipping...\n",
verbose=verbose,
)
flag_poltime(resid, time=time, polarization=pol)
flag_poltime(gains, time=time, polarization=pol)
flag_poltime(model, time=time, polarization=pol)
fit_history[polnum] = "skipped!"
# normalize on sky model if we use post-hoc regularization
if not freeze_model and model_regularization == "post_hoc" and np.any(~model.flag_array[bltsel]):
renormalize(
uvdata_reference_model=sky_model,
uvdata_deconv=model,
gains=gains,
polarization=pol,
time=time,
additional_flags=uvdata.flag_array,
)
fit_history[polnum] = fit_history_p
model_with_gains = cal_utils.apply_gains(model, gains, inverse=True)
if not correct_model:
model = model_with_gains
resid.data_array -= model_with_gains.data_array
resid.data_array[model_with_gains.flag_array] = 0.0 # set resid to zero where model is flagged.
resid.data_array[uvdata.flag_array] = 0.0 # also set resid to zero where data is flagged.
if correct_resid:
resid = cal_utils.apply_gains(resid, gains)
return model, resid, gains, fit_history
def flag_poltime(data_object, time, polarization):
if isinstance(data_object, UVData):
bltsel = np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0)
polnum = np.where(
data_object.polarization_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
data_object.flag_array[bltsel, :, :, polnum] = True
data_object.data_array[bltsel, :, :, polnum] = 0.0
elif isinstance(data_object, UVCal):
polnum = np.where(
data_object.jones_array == uvutils.polstr2num(polarization, x_orientation=data_object.x_orientation)
)[0][0]
gindt = np.where(np.isclose(data_object.time_array, time, atol=1e-7, rtol=0.0))[0][0]
data_object.gain_array[:, 0, :, gindt, polnum] = 1.0
data_object.flag_array[:, 0, :, gindt, polnum] = True
else:
raise ValueError("only supports data_object that is UVCal or UVData.")
def calibrate_and_model_mixed(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
ant_dly=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
red_tol_freq=0.5,
n_angle_bins=200,
notebook_progressbar=False,
use_redundancy=False,
use_tensorflow_to_derive_modeling_comps=False,
eigenval_cutoff=1e-10,
dtype_matinv=np.float64,
require_exact_angle_match=True,
angle_match_tol=1e-3,
grp_size_threshold=5,
model_comps_dict=None,
save_dict_to=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with a mix of DPSS vectors
for baselines with no frequency redundancy and simple_cov components for
groups of baselines that have some frequency redundancy.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
ant_dly: float, optional
intrinsic chromaticity of each antenna element
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
red_tol_freq: float, optional
tolerance for treating two baselines as having some
frequency redundancy. When frequency redundancy exists, baselines
will be modeled jointly.
n_angle_bins: int, optional
number of angular bins to use between -pi and pi to compare baselines
default is 200
notebook_progressbar: bool, optional
if True, show graphical notebook progress bar that looks good in jupyter.
default is False.
use_redundancy: bool, optional
If True, model all baselines within each redundant group with the same components
If False, model each baseline within each redundant group with sepearate components.
default is False.
use_tensorflow_to_derive_modeling_comps: bool, optional
Use tensorflow methods to derive multi-baseline modeling components.
recommended if you have a GPU with enough memory to perform spectral decomposition
of multi-baseline covariance matrices.
eigenval_cutoff: float, optional
threshold of eigenvectors to include in modeling components.
dtype_matinv: numpy.dtype, optional
data type to use for deriving modeling components.
default is np.float64 (need higher precision for cov-mat like calculation)
grp_size_threshold: int, optional
groups with number of elements less then this value are split up into single baselines.
default is 5.
model_comps_dict: dict, optional
dictionary mapping fitting groups to numpy.ndarray see modeling.yield_mixed_comps
for more specifics.
default is None -> compute fitting groups automatically.
save_dict_to: str, optional
save model_comps_dict to hdf5 container if True
default is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_tensor.
see docstring of calibrate_and_model_tensor.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
# get fitting groups
fitting_grps, blvecs, _, _ = modeling.get_uv_overlapping_grps_conjugated(
uvdata,
red_tol=red_tol,
include_autos=include_autos,
red_tol_freq=red_tol_freq,
n_angle_bins=n_angle_bins,
notebook_progressbar=notebook_progressbar,
require_exact_angle_match=require_exact_angle_match,
angle_match_tol=angle_match_tol,
)
if model_comps_dict is None:
model_comps_dict = modeling.yield_mixed_comps(
fitting_grps,
blvecs,
uvdata.freq_array[0],
eigenval_cutoff=eigenval_cutoff,
use_tensorflow=use_tensorflow_to_derive_modeling_comps,
ant_dly=ant_dly,
horizon=horizon,
offset=offset,
min_dly=min_dly,
verbose=verbose,
dtype=dtype_matinv,
notebook_progressbar=notebook_progressbar,
grp_size_threshold=grp_size_threshold,
)
if save_dict_to is not None:
np.save(save_dict_to, model_comps_dict)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
use_redundancy=use_redundancy,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def calibrate_and_model_dpss(
uvdata,
horizon=1.0,
min_dly=0.0,
offset=0.0,
include_autos=False,
verbose=False,
red_tol=1.0,
notebook_progressbar=False,
fg_model_comps_dict=None,
**fitting_kwargs,
):
"""Simultaneously solve for gains and model foregrounds with DPSS vectors.
Parameters
----------
uvdata: UVData object.
dataset to calibrate and filter.
horizon: float, optional
fraction of baseline delay length to model with dpss modes
unitless.
default is 1.
min_dly: float, optional
minimum delay to model with dpss models.
in units of ns.
default is 0.
offset: float optional
offset off of horizon wedge to include in dpss delay range.
in units of ns.
default is 0.
include_autos: bool, optional
if true, include autocorrelations in fitting.
default is False.
verbose: bool, optional
lots of text output
default is False.
red_tol: float, optional
tolerance for treating baselines as redundant (meters)
default is 1.0
notebook_progressbar: bool, optional
use progress bar optimized for notebook output.
default is False.
fg_model_comps_dict: dict, optional
dictionary containing precomputed foreground model components.
Currently only supported if use_redundancy is False.
fitting_kwargs: kwarg dict
additional kwargs for calibrate_and_model_pbl.
see docstring of calibrate_and_model_pbl.
Returns
-------
model: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains: UVCal object
uvcal object containing fitted gains.
fit_history:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
dpss_model_comps_dict = modeling.yield_pbl_dpss_model_comps(
uvdata,
horizon=horizon,
min_dly=min_dly,
offset=offset,
include_autos=include_autos,
red_tol=red_tol,
notebook_progressbar=notebook_progressbar,
verbose=verbose,
)
(model, resid, gains, fitted_info,) = calibrate_and_model_tensor(
uvdata=uvdata,
fg_model_comps_dict=dpss_model_comps_dict,
include_autos=include_autos,
verbose=verbose,
notebook_progressbar=notebook_progressbar,
**fitting_kwargs,
)
return model, resid, gains, fitted_info
def fg_model(fg_r, fg_i, fg_comps):
vr = tf.reduce_sum(fg_r * fg_comps, axis=0)
vi = tf.reduce_sum(fg_i * fg_comps, axis=0)
return vr, vi
def data_model(g_r, g_i, fg_r, fg_i, fg_comps, ant0_inds, ant1_inds):
gr0 = tf.gather(g_r, ant0_inds)
gr1 = tf.gather(g_r, ant1_inds)
gi0 = tf.gather(g_i, ant0_inds)
gi1 = tf.gather(g_i, ant1_inds)
grgr = gr0 * gr1
gigi = gi0 * gi1
grgi = gr0 * gi1
gigr = gi0 * gr1
vr, vi = fg_model(fg_r, fg_i, fg_comps)
model_r = (grgr + gigi) * vr + (grgi - gigr) * vi
model_i = (gigr - grgi) * vr + (grgr + gigi) * vi
return model_r, model_i
def mse(model_r, model_i, data_r, data_i, wgts):
return tf.reduce_sum((tf.square(data_r - model_r) + tf.square(data_i - model_i)) * wgts)
def mse_chunked(g_r, g_i, fg_r, fg_i, fg_comps, nchunks, data_r, data_i, wgts, ant0_inds, ant1_inds, dtype=np.float32):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return tf.reduce_sum(tf.stack(cal_loss))
def mse_chunked_sum_regularized(
g_r,
g_i,
fg_r,
fg_i,
fg_comps,
nchunks,
data_r,
data_i,
wgts,
ant0_inds,
ant1_inds,
prior_r_sum,
prior_i_sum,
dtype=np.float32,
):
cal_loss = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_i_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
model_r_sum = [tf.constant(0.0, dtype) for cnum in range(nchunks)]
# now deal with dense components
for cnum in range(nchunks):
model_r, model_i = data_model(
g_r, g_i, fg_r[cnum], fg_i[cnum], fg_comps[cnum], ant0_inds[cnum], ant1_inds[cnum]
)
# compute sum of real and imag parts x weights for regularization.
model_r_sum[cnum] += tf.reduce_sum(model_r * wgts[cnum])
model_i_sum[cnum] += tf.reduce_sum(model_i * wgts[cnum])
cal_loss[cnum] += mse(model_r, model_i, data_r[cnum], data_i[cnum], wgts[cnum])
return (
tf.reduce_sum(tf.stack(cal_loss))
+ tf.square(tf.reduce_sum(tf.stack(model_r_sum)) - prior_r_sum)
+ tf.square(tf.reduce_sum(tf.stack(model_i_sum)) - prior_i_sum)
)
def read_calibrate_and_model_dpss(
input_data_files,
input_model_files=None,
input_gain_files=None,
resid_outfilename=None,
gain_outfilename=None,
model_outfilename=None,
fitted_info_outfilename=None,
x_orientation="east",
clobber=False,
bllen_min=0.0,
bllen_max=np.inf,
bl_ew_min=0.0,
ex_ants=None,
select_ants=None,
gpu_index=None,
gpu_memory_limit=None,
precision=32,
use_autocorrs_in_weights=False,
**calibration_kwargs,
):
"""
Driver function for using calamity with DPSS modeling.
Parameters
----------
input_data_files: list of strings or UVData object.
list of paths to input files to read in and calibrate.
input_model_files: list of strings or UVData object, optional
list of paths to model files for overal phase/amp reference.
Default is None -> use input files as model for overall
phase and amplitude calibration.
input_gain_files: list of strings or UVCal object, optional
list of paths to gain files to use as initial guesses for calibration.
resid_outfilename: str, optional
path for file to write residuals.
default is None -> don't write out residuals.
gain_outfilename: str, optional
path to gain calfits to write fitted gains.
default is None -> don't write out gains.
model_outfilename, str, optional
path to file to write model output.
default is None -> Don't write model.
fitting_info_outfilename, str, optional
string to pickel fitting info to.
n_output_chunks: int optional
split up outputs into n_output_chunks chunked by time.
default is None -> write single output file.
bllen_min: float, optional
select all baselines with length greater then this value [meters].
default is 0.0
bllen_max: float, optional
select only baselines with length less then this value [meters].
default is np.inf.
bl_ew_min: float, optional
select all baselines with EW projected length greater then this value [meters].
default is 0.0
gpu_index: int, optional
limit visible GPUs to be the index of this GPU.
default: None -> all GPUs are visible.
gpu_memory_limit: float, optional
GiB of memory on GPU that can be used.
default None -> all memory available.
use_autocorrs_in_weights: bool, optional
if True, use smooth fits to autocorrelations as
inverse variance weights.
default is False.
calibration_kwargs: kwarg dict
see kwrags for calibration_and_model_dpss()
Returns
-------
model_fit: UVData object
uvdata object containing DPSS model of intrinsic foregrounds.
resid_fit: UVData object
uvdata object containing residuals after subtracting model times gains and applying gains.
gains_fit: UVCal object
uvcal object containing fitted gains.
fit_info:
dictionary containing fit history for each time-step and polarization in the data with fields:
'loss_history': list of values of the loss function in each minimization iteration.
"""
gpus = tf.config.list_physical_devices("GPU")
if gpu_index is not None:
# See https://www.tensorflow.org/guide/gpu
if gpus:
if gpu_memory_limit is None:
tf.config.set_visible_devices(gpus[gpu_index], "GPU")
else:
tf.config.set_logical_device_configuration(
gpus[gpu_index], [tf.config.LogicalDeviceConfiguration(memory_limit=gpu_memory_limit * 1024)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPU")
if isinstance(input_data_files, str):
input_data_files = [input_data_files]
if isinstance(input_data_files, list):
uvd = UVData()
uvd.read(input_data_files)
else:
uvd = input_data_files
if use_autocorrs_in_weights:
weights = get_auto_weights(uvd)
else:
weights = None
utils.select_baselines(
uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min, ex_ants=ex_ants, select_ants=select_ants
)
if isinstance(input_model_files, str):
input_model_files = [input_model_files]
if input_model_files is not None:
if isinstance(input_model_files, list):
uvd_model = UVData()
uvd_model.read(input_model_files)
else:
uvd_model = input_model_files
else:
uvd_model = None
if uvd_model is not None:
utils.select_baselines(uvd, bllen_min=bllen_min, bllen_max=bllen_max, bl_ew_min=bl_ew_min)
if isinstance(input_gain_files, str):
input_gain_files = [input_gain_files]
if input_gain_files is not None:
if isinstance(input_gain_files, list):
uvc = UVCal()
uvc.read_calfits(input_gain_files)
else:
uvc = input_gain_files
else:
uvc = None
# run calibration with specified GPU device.
dtype = {32: np.float32, 64: np.float64}[precision]
if gpu_index is not None and gpus:
with tf.device(f"/device:GPU:{gpus[gpu_index].name[-1]}"):
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
else:
model_fit, resid_fit, gains_fit, fit_info = calibrate_and_model_dpss(
uvdata=uvd, sky_model=uvd_model, gains=uvc, dtype=dtype, weights=weights, **calibration_kwargs
)
if resid_outfilename is not None:
resid_fit.write_uvh5(resid_outfilename, clobber=clobber)
if gain_outfilename is not None:
gains_fit.x_orientation = x_orientation
gains_fit.write_calfits(gain_outfilename, clobber=clobber)
if model_outfilename is not None:
model_fit.write_uvh5(model_outfilename, clobber=clobber)
# don't write fitting_info_outfilename for now.
fit_info["calibration_kwargs"] = calibration_kwargs
fit_info["calibration_kwargs"]["dtype"] = dtype
# don't write fitting_info_outfilename for now.
return model_fit, resid_fit, gains_fit, fit_info
def input_output_parser():
ap = argparse.ArgumentParser()
sp = ap.add_argument_group("Input and Output Arguments.")
sp.add_argument("--input_data_files", type=str, nargs="+", help="paths to data files to calibrate.", required=True)
sp.add_argument(
"--input_model_files", type=str, nargs="+", help="paths to model files to set overal amplitude and phase."
)
sp.add_argument("--input_gain_files", type=str, nargs="+", help="paths to gains to use as a staring point.")
sp.add_argument("--resid_outfilename", type=str, default=None, help="postfix for resid output file.")
sp.add_argument("--model_outfilename", type=str, default=None, help="postfix for foreground model file.")
sp.add_argument("--gain_outfilename", type=str, default=None, help="path for writing fitted gains.")
sp.add_argument("--clobber", action="store_true", default="False", help="Overwrite existing outputs.")
sp.add_argument("--x_orientation", default="east", type=str, help="x_orientation of feeds to set in output gains.")
sp.add_argument(
"--bllen_min", default=0.0, type=float, help="minimum baseline length to include in calibration and outputs."
)
sp.add_argument(
"--bllen_max", default=np.inf, type=float, help="maximum baseline length to include in calbration and outputs."
)
sp.add_argument(
"--bl_ew_min",
default=0.0,
type=float,
help="minimum EW baseline component to include in calibration and outputs.",
)
sp.add_argument(
"--ex_ants", default=None, type=int, nargs="+", help="Antennas to exclude from calibration and modeling."
)
sp.add_argument(
"--select_ants",
default=None,
type=int,
nargs="+",
help="Antennas to select exclusively for calibration and modeling.",
)
sp.add_argument("--gpu_index", default=None, type=int, help="Index of GPU to run on (if on a multi-GPU machine).")
sp.add_argument("--gpu_memory_limit", default=None, type=int, help="Limit GPU memory use to this many GBytes.")
sp.add_argument("--precision", default=32, type=int, help="Number of bits to keep track of.")
return ap
def fitting_argparser():
ap = input_output_parser()
sp = ap.add_argument_group("General Fitting Arguments.")
sp.add_argument(
"--tol",
type=float,
default=1e-14,
help="Stop gradient descent after cost function converges to within this value.",
)
sp.add_argument(
"--optimizer", type=str, default="Adamax", help="First order optimizer to use for gradient descent."
)
sp.add_argument("--maxsteps", type=int, default=10000, help="Max number of steps to iterate during optimization.")
sp.add_argument("--verbose", default=False, action="store_true", help="lots of text ouputs.")
sp.add_argument(
"--use_min",
default=False,
action="store_true",
help="Use params for mimimum cost function derived. Otherwise, use the params last visited by the descent. Avoids momentum overshoot.",
)
sp.add_argument(
"--use_redundancy",
default=False,
action="store_true",
help="Model redundant visibilities with the same set of foreground parameters.",
)
sp.add_argument(
"--correct_model", default=True, action="store_true", help="Remove gain effects from foreground model."
)
sp.add_argument(
"--correct_resid", default=False, action="store_true", help="Apply fitted gains to the fitted residuals."
)
sp.add_argument(
"--graph_mode",
default=False,
action="store_true",
help="Pre-compile computational graph before running gradient descent. Not reccomended for GPUs.",
)
sp.add_argument(
"--init_guesses_from_previous_time_step",
default=False,
action="store_true",
help="initialize gain and foreground guesses from previous time step when calibrating multiple times.",
)
sp.add_argument("--learning_rate", type=float, default=1e-2, help="gradient descent learning rate.")
sp.add_argument(
"--red_tol", type=float, default=1.0, help="Tolerance for determining redundancy between baselines [meters]."
)
sp.add_argument(
"--skip_threshold",
type=float,
default=0.5,
help="Skip and flag time/polarization if more then this fractionf of data is flagged.",
)
sp.add_argument("--model_regularization", type=str, default="post_hoc")
sp.add_argument(
"--nsamples_in_weights", default=False, action="store_true", help="Weight contributions to MSE by nsamples."
)
sp.add_argument(
"--use_model_snr_weights",
default=False,
action="store_true",
help="If True, weight contributions to MSE as proportional to SNR.",
)
sp.add_argument(
"--use_autocorrs_in_weights",
default=False,
action="store_true",
help="If True, use autocorrelations to derive relative SNR weights.",
)
return ap
def dpss_fit_argparser():
ap = fitting_argparser()
sp = ap.add_argument_group("DPSS Specific Fitting Arguments.")
sp.add_argument("--horizon", default=1.0, type=float, help="Fraction of horizon delay to model with DPSS modes.")
sp.add_argument("--min_dly", default=0.0, type=float, help="Minimum delay [ns] to model with DPSS modes.")
sp.add_argument(
"--offset", default=0.0, type=float, help="Offset from horizon delay [ns] to model with DPSS modes."
)
return ap
|
import logging
import sys
from difflib import SequenceMatcher
from fetch_menu import fetch_menu_image
from read_menu import MenuReader
from chrome_proxy import ChromeProxy
import settings
def word_similarity(a, b):
return SequenceMatcher(None, a, b).ratio()
class WordDictionary:
def __init__(self, filename):
d = dict()
with open(filename, encoding='utf-8') as file:
lines = file.readlines()
lines = map(lambda a: a.split(','), lines)
lines = filter(lambda a: len(a) >= 1 and len(a[0]) >= 1, lines)
for line in lines:
d[line[0]] = line[1].strip()
self.dictionary = d
def lookup(self, word, allow_similarity=1.0):
if word in self.dictionary:
return self.dictionary[word]
for key in self.dictionary.keys():
similarity = word_similarity(key, word)
if similarity >= allow_similarity:
return self.dictionary[key]
return None
def to_words(name):
words = []
i = 0
while i < len(name):
if name[i].isalpha():
b = i
while i < len(name) and name[i].isalpha(): i += 1
words.append(name[b:i])
else:
words.append(name[i])
i += 1
return words
def japanise_menu(name, word_dict):
words = to_words(name)
words = filter(lambda w: w != ' ', words)
jpmenu = []
for w in words:
jp = word_dict.lookup(w.lower(), allow_similarity=0.8)
if jp is None:
jpmenu.append(w)
else:
jpmenu.append(jp)
return "".join(jpmenu)
def getLogger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.DEBUG)
logger.addHandler(stream_handler)
file_handler = logging.FileHandler('nirvanam.log', 'a+')
file_handler.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
return logger
def main():
logger = getLogger()
menu_reader = MenuReader(settings.GOOGLE_API_KEY, logger=logger)
proxy = None
if settings.PROXY_HOST is not None:
proxy = ChromeProxy(settings.PROXY_HOST,
settings.PROXY_PORT,
settings.PROXY_USERNAME,
settings.PROXY_PASSWORD)
for image in fetch_menu_image(settings.DATA_DIR,
settings.MENU_IMAGE_NAME,
proxy=proxy):
logger.debug("Reading menu text...")
menus = menu_reader.read_menu_image(image)
logger.debug("Reading menu text...done")
if menus is not None and len(menus) > 5: break
logger.debug("menu is")
for menu in menus: logger.debug("- " + menu)
en_jp = WordDictionary(settings.WORD_DICTIONARY)
result = []
for menu in menus:
jp = japanise_menu(menu, en_jp)
result.append(jp + ' [' + menu + ']')
print("")
print("== Today's Special Lunch Menu ==")
for menu in result: print(menu)
if __name__ == '__main__':
main()
|
from typing import TYPE_CHECKING, Callable, Optional
from hpcrocket.typesafety import get_or_raise
from hpcrocket.watcher.watcherthread import WatcherThread, WatcherThreadImpl
try:
from typing import Protocol
except ImportError: # pragma: no cover
from typing_extensions import Protocol # type: ignore
if TYPE_CHECKING:
from hpcrocket.core.slurmbatchjob import SlurmBatchJob, SlurmJobStatus
SlurmJobStatusCallback = Callable[['SlurmJobStatus'], None]
WatcherThreadFactory = Callable[
['SlurmBatchJob', SlurmJobStatusCallback, int],
WatcherThread
]
class NotWatchingError(RuntimeError):
def __init__(self, *args: object) -> None:
super().__init__(*args)
class JobWatcher(Protocol):
def watch(self, callback: SlurmJobStatusCallback, poll_interval: int) -> None:
"""
Starts watching the job in the background.
Args:
callback (SlurmJobStatusCallback): A callback that accepts a status update.
poll_interval (int): The time between poll calls.
"""
def wait_until_done(self) -> None:
"""
Blocks until the job has been completed.
"""
def stop(self) -> None:
"""
Stops watching the job.
"""
JobWatcherFactory = Callable[['SlurmBatchJob'], JobWatcher]
class JobWatcherImpl:
def __init__(self, runner: 'SlurmBatchJob', thread_factory: WatcherThreadFactory = WatcherThreadImpl) -> None:
self.runner = runner
self.factory = thread_factory
self.watching_thread: Optional[WatcherThread] = None
def watch(self, callback: SlurmJobStatusCallback, poll_interval: int) -> None:
self.watching_thread = self.factory(self.runner, callback, poll_interval)
self.watching_thread.start()
def is_done(self) -> bool:
watching_thread = get_or_raise(self.watching_thread, NotWatchingError)
return watching_thread.is_done()
def wait_until_done(self) -> None:
watching_thread = get_or_raise(self.watching_thread, NotWatchingError)
self._try_join(watching_thread)
def stop(self) -> None:
watching_thread = get_or_raise(self.watching_thread, NotWatchingError)
watching_thread.stop()
self._try_join(watching_thread)
def _try_join(self, watching_thread: WatcherThread) -> None:
try:
watching_thread.join()
except RuntimeError as err:
print(err)
|
from django.contrib.auth.models import User
from django.http import HttpResponseRedirect
from django.shortcuts import render
from dashboard.forms import BrotherForm, BrotherEditForm
from dashboard.models import Position, Brother
from dashboard.utils import verify_position
from dashboard.views import DashboardUpdateView, DashboardDeleteView
@verify_position([Position.PositionChoices.SECRETARY, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def brother_add(request, position_slug):
""" Renders the view to add a brother """
form = BrotherForm(request.POST or None)
if request.method == 'POST':
if form.is_valid():
instance = form.clean()
user = User.objects.create_user(instance['case_ID'], instance['case_ID'] + "@case.edu",
instance['password'])
user.last_name = instance['last_name']
user.save()
brother = form.save(commit=False)
brother.user = user
brother.save()
return HttpResponseRedirect('/' + position_slug)
context = {
'title': 'Add New Brother',
'form': form,
}
return render(request, 'model-add.html', context)
@verify_position([Position.PositionChoices.MARSHAL, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def ec_brother_view(request, position_slug, brother_id):
""" Renders the marshal page to view candidate info """
brother = Brother.objects.get(pk=brother_id)
context = {
'brother': brother,
'position': position_slug,
}
return render(request, "brother-view.html", context)
class ECBrotherEdit(DashboardUpdateView):
@verify_position([Position.PositionChoices.MARSHAL, Position.PositionChoices.SECRETARY, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def get(self, request, *args, **kwargs):
return super(ECBrotherEdit, self).get(request, *args, **kwargs)
def get_success_url(self):
return '/' + self.kwargs['position_slug']
model = Brother
template_name = 'generic-forms/base-form.html'
form_class = BrotherEditForm
class BrotherDelete(DashboardDeleteView):
@verify_position([Position.PositionChoices.MARSHAL, Position.PositionChoices.VICE_PRESIDENT, Position.PositionChoices.PRESIDENT, Position.PositionChoices.ADVISER])
def get(self, request, *args, **kwargs):
return super(BrotherDelete, self).get(request, *args, **kwargs)
def get_success_url(self):
return '/' + self.kwargs['position_slug']
model = Brother
template_name = 'generic-forms/base-confirm-delete.html'
|
'''
Crie um programa que tenha um tupla com várias palavras (não usar acentos). Depois disso,
você deve mostrar, para cada palavra, quais são suas vogais.
'''
palavras = ('arroz', 'computador', 'piscina', 'copo', 'dentista',
'lazer', 'mouse', 'telefone', 'vestido', 'bermuda', 'aspirador')
for p in palavras:
print('\nNa palavra {} temos '.format(p.upper()), end='')
for letra in p:
if letra.lower() in 'aeiou':
print(letra, end=' ')
|
from collections import defaultdict
import json
from django.http.response import HttpResponse, FileResponse
import pandas as pd
import tempfile
import mimetypes
from django.views.generic import TemplateView
from django.forms import formset_factory, BaseFormSet, modelformset_factory, inlineformset_factory
from django.shortcuts import render
from django.contrib import messages
from django.urls import reverse, reverse_lazy
from django.views.generic.detail import DetailView
from django.http import HttpResponseRedirect
from core.models.view_tables import (ExperimentTemplate,
ExperimentInstance, Edocument,
ReagentMaterialValue, ReagentMaterial,
InventoryMaterial, OutcomeInstance, OutcomeTemplate)
# from core.models.core_tables import RetUUIDField
from core.forms.custom_types import SingleValForm, InventoryMaterialForm, NominalActualForm, ReagentValueForm
from core.forms.custom_types import (ExperimentNameForm, ExperimentTemplateForm,
ReagentForm, BaseReagentFormSet,
PropertyForm, OutcomeInstanceForm, VesselForm,
UploadFileForm)
from core.utilities.utils import experiment_copy
from core.utilities.experiment_utils import (update_dispense_action_set,
get_action_parameter_querysets,
get_material_querysets,
supported_wfs, get_reagent_querysets,
prepare_reagents, generate_experiments_and_save, get_vessel_querysets)
import core.models
from core.models.view_tables import Note, TagAssign, Tag
from core.custom_types import Val
import core.experiment_templates
from core.models.view_tables import Parameter
from core.widgets import ValWidget
#from escalate.core.widgets import ValFormField
# from .crud_view_methods.model_view_generic import GenericModelList
# from .crud_views import LoginRequired
#SUPPORTED_CREATE_WFS = ['liquid_solid_extraction', 'resin_weighing']
#SUPPORTED_CREATE_WFS = supported_wfs()
SUPPORTED_CREATE_WFS = [mod for mod in dir(core.experiment_templates) if '__' not in mod]
class BaseUUIDFormSet(BaseFormSet):
"""
This formset adds a UUID as the kwarg. When the form is rendered,
the UUID is added as an attribute to the html field. Which when submitted
can be used to identify where the data goes
"""
def get_form_kwargs(self, index):
kwargs = super().get_form_kwargs(index)
kwargs['uuid'] = kwargs['object_uuids'][index]
return kwargs
class CreateExperimentView(TemplateView):
template_name = "core/create_experiment.html"
form_class = ExperimentTemplateForm
MaterialFormSet = formset_factory(InventoryMaterialForm, extra=0)
NominalActualFormSet = formset_factory(NominalActualForm, extra=0)
ReagentFormSet = formset_factory(ReagentForm, extra=0, formset=BaseReagentFormSet)
def get_context_data(self, **kwargs):
# Select templates that belong to the current lab
context = super().get_context_data(**kwargs)
#lab = Actor.objects.get(organization=org_id, person__isnull=True)
#self.all_experiments = Experiment.objects.filter(parent__isnull=True, lab=lab)
#context['all_experiments'] = self.all_experiments
return context
def get_action_parameter_forms(self, exp_uuid, context):
# workflow__experiment_workflow_workflow__experiment=exp_uuid
#q1, q2, q3 = get_action_parameter_querysets(exp_uuid)
q1 = get_action_parameter_querysets(exp_uuid)
"""
This happens before copy, in the template. The only way to identify a parameter is
through a combination of object_description and parameter_def_description.
When the form is submitted, a copy is created of the template and we have to search
for the correct parameters using descriptions because UUIDS are new!
The reason for making a copy after editing parameters is because we cannot update
a WorkflowActionSet as of Jan 2021. We can only create a new one
"""
#create empty lists for initial q1-q3
initial_q1 = []
'''
using for loop instead of list comprehension to account for arrays
this will be basis for implementing new array ui
'''
#q1 initial
for row in q1:
data = {'value': row.parameter_value, \
'uuid': json.dumps([f'{row.object_description}', f'{row.parameter_def_description}'])}
if not row.parameter_value.null:
if 'array' in row.parameter_value.val_type.description:
data['actual_value'] = Val.from_dict({'type':'array_num', \
'value':[0]*len(row.parameter_value.value), \
'unit':row.parameter_value.unit})
else:
data['actual_value'] = Val.from_dict({'type':'num', \
'value':0, \
'unit':row.parameter_value.unit})
initial_q1.append(data)
q1_details = [f'{row.object_description} : {row.parameter_def_description}' for row in q1]
context['q1_param_formset'] = self.NominalActualFormSet(initial=initial_q1,
prefix='q1_param',)
context['q1_param_details'] = q1_details
return context
def get_material_forms(self, exp_uuid, context):
q1 = get_material_querysets(exp_uuid)
initial_q1 = [{'value': row.inventory_material, 'uuid': json.dumps([f'{row.object_description}'])} for row in q1]
q1_details = [f'{row.object_description}' for row in q1]
form_kwargs = {'org_uuid':self.request.session['current_org_id']}
context['q1_material_formset'] = self.MaterialFormSet(initial=initial_q1,
prefix='q1_material',
form_kwargs=form_kwargs)
context['q1_material_details'] = q1_details
return context
def get_colors(self, number_of_colors, colors=['deeppink', 'blueviolet', 'blue', 'coral', 'lightseagreen', 'orange', 'crimson']):
factor = int(number_of_colors/len(colors))
remainder = number_of_colors % len(colors)
total_colors = colors*factor + colors[:remainder]
return total_colors
def get_reagent_forms(self, exp_template, context):
if 'current_org_id' in self.request.session:
org_id = self.request.session['current_org_id']
else:
org_id = None
formsets = []
reagent_template_names = []
for index, reagent_template in enumerate(exp_template.reagent_templates.all().order_by('description')):
reagent_template_names.append(reagent_template.description)
mat_types_list = []
initial = []
#for material_type in reagent_template.material_type.all():
for reagent_material_template in reagent_template.reagent_material_template_rt.all().order_by('description'):
for reagent_material_value_template in reagent_material_template.reagent_material_value_template_rmt.filter(description='concentration'):
material_type = reagent_material_template.material_type
mat_types_list.append(material_type)
initial.append({'reagent_template_uuid': reagent_material_template.uuid,
'material_type':material_type.uuid,
'desired_concentration':reagent_material_value_template.default_value.nominal_value})
if mat_types_list:
fset = self.ReagentFormSet(prefix=f'reagent_{index}',
initial=initial,
form_kwargs={'lab_uuid': org_id,
'mat_types_list':mat_types_list,
'reagent_index':index})
formsets.append(fset)
#for form in formset:
# form.fields[]
context['reagent_formset_helper'] = ReagentForm.get_helper()
context['reagent_formset_helper'].form_tag = False
context['reagent_formset'] = formsets
context['reagent_template_names'] = reagent_template_names
#get vessel data for selection
initial_vessel = VesselForm()
context['vessel_form'] = initial_vessel
# Dead volume form
initial = {'value':Val.from_dict({'value':4000, 'unit': 'uL', 'type':'num'})}
dead_volume_form = SingleValForm(prefix='dead_volume', initial=initial)
context['dead_volume_form'] = dead_volume_form
context['colors'] = self.get_colors(len(formsets))
return context
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if 'current_org_id' in self.request.session:
org_id = self.request.session['current_org_id']
else:
org_id = None
context['experiment_template_select_form'] = ExperimentTemplateForm(org_id=org_id)
return render(request, self.template_name, context)
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
if 'select_experiment_template' in request.POST:
exp_uuid = request.POST['select_experiment_template']
if exp_uuid:
request.session['experiment_template_uuid'] = exp_uuid
context['selected_exp_template'] = ExperimentTemplate.objects.get(uuid=exp_uuid)
context['manual'] = int(request.POST['manual'])
context['automated'] = int(request.POST['automated'])
context['experiment_name_form'] = ExperimentNameForm()
context = self.get_action_parameter_forms(exp_uuid, context)
if context['manual']:
context = self.get_material_forms(exp_uuid, context)
if context['automated']:
context = self.get_reagent_forms(context['selected_exp_template'], context)
else:
request.session['experiment_template_uuid'] = None
# begin: create experiment
elif 'create_exp' in request.POST:
if "automated" in request.POST:
context = self.process_automated_formsets(request, context)
else:
context = self.process_formsets(request, context)
# end: create experiment
return render(request, self.template_name, context)
# end: self.post()
def save_forms_q1(self, queries, formset, fields):
"""Saves custom formset into queries
Args:
queries ([Queryset]): List of queries into which the forms values are saved
formset ([Formset]): Formset
fields (dict): Dictionary to map the column in queryset with field in formset
"""
for form in formset:
if form.has_changed():
data = form.cleaned_data
desc = json.loads(data['uuid'])
if len(desc) == 2:
object_desc, param_def_desc = desc
query = queries.get(object_description=object_desc, parameter_def_description=param_def_desc)
else:
query = queries.get(object_description=desc[0])
parameter = Parameter.objects.get(uuid=query.parameter_uuid)
for db_field, form_field in fields.items():
setattr(parameter, db_field, data[form_field])
parameter.save(update_fields=list(fields.keys()))
#queries.save()
def save_forms_q_material(self, queries, formset, fields):
"""
Saves custom formset into queries
Args:
queries ([Queryset]): List of queries into which the forms values are saved
formset ([Formset]): Formset
fields (dict): Dictionary to map the column in queryset with field in formset
"""
for form in formset:
if form.has_changed():
data = form.cleaned_data
desc = json.loads(data['uuid'])
if len(desc) == 2:
object_desc, param_def_desc = desc
query = queries.get(object_description=object_desc, parameter_def_description=param_def_desc)
else:
query = queries.get(object_description=desc[0])
for db_field, form_field in fields.items():
setattr(query, db_field, data[form_field])
query.save(update_fields=list(fields.keys()))
def save_forms_reagent(self, formset, exp_uuid, exp_concentrations):
'''
need a way to query the db table rows. in material and q1 we query
based on description however we only have the chemical uuid and
desired concentration
in the form. we can pass the copy experiment uuid and call that p
otentially to get the reagentinstance/reagentinstancevalue uuid
once this is finished test to make sure the data is saved correctly in the db.
'''
positions = {
'organic': 0,
'solvent': 1,
'acid': 2,
'inorganic': 3
}
vector = [0,0,0,0]
for form in formset:
if form.has_changed():
data = form.cleaned_data
reagent_template_uuid = data['reagent_template_uuid']
reagent_instance = ReagentMaterial.objects.get(template=reagent_template_uuid,
reagent__experiment=exp_uuid,
)
reagent_instance.material = InventoryMaterial.objects.get(uuid=data['chemical']) if data['chemical'] else None
reagent_instance.save()
reagent_material_value = reagent_instance.reagent_material_value_rmi.get(template__description='concentration')
reagent_material_value.nominal_value = data['desired_concentration']
reagent_material_value.save()
mat_type = reagent_instance.template.material_type
vector[positions[mat_type.description]] = data['desired_concentration']
return vector
def process_formsets(self, request, context):
"""Creates formsets and gets data from the post request.
Args:
request ([Django Request]): Should be the POST request
context (dict): Context dictionary
Returns:
context [dict]: Context dict, returned to the page
"""
# get the experiment template uuid and name
exp_template = ExperimentTemplate.objects.get(pk=request.session['experiment_template_uuid'])
template_name = exp_template.description
# ref_uid will be used to identify python functions specifically for this
# ref_uid should follow function naming rules for Python
template_ref_uid = exp_template.ref_uid
# construct all formsets
exp_name_form = ExperimentNameForm(request.POST)
q1_formset = self.NominalActualFormSet(request.POST, prefix='q1_param')
q1_material_formset = self.MaterialFormSet(request.POST,
prefix='q1_material',
form_kwargs={'org_uuid': self.request.session['current_org_id']})
if all([exp_name_form.is_valid(),
q1_formset.is_valid(),
q1_material_formset.is_valid()]):
exp_name = exp_name_form.cleaned_data['exp_name']
# make the experiment copy: this will be our new experiment
experiment_copy_uuid = experiment_copy(str(exp_template.uuid), exp_name)
# get the elements of the new experiment that we need to update with the form values
q1 = get_action_parameter_querysets(experiment_copy_uuid, template=False)
q1_material = get_material_querysets(experiment_copy_uuid, template=False)
self.save_forms_q1(q1, q1_formset, {'parameter_val_nominal': 'value', 'parameter_val_actual': 'actual_value'})
self.save_forms_q_material(q1_material, q1_material_formset, {'inventory_material': 'value'})
# begin: template-specific logic
if template_ref_uid in SUPPORTED_CREATE_WFS:
data = {} # Stick form data into this dict
for i, form in enumerate(q1_formset):
if form.is_valid():
query = q1[i]
data[query.parameter_def_description] = form.cleaned_data['value'].value
# Scans experiment_templates and picks up functions that have the same name as template_name
template_function = getattr(core.experiment_templates, template_ref_uid)
new_lsr_pk, lsr_msg = template_function(data, q1, experiment_copy_uuid, exp_name, exp_template)
if new_lsr_pk is not None:
context['xls_download_link'] = reverse('edoc_download', args=[new_lsr_pk])
if str(self.request.session['current_org_name']) != "TestCo":
context['lsr_download_link'] = None
elif new_lsr_pk is not None:
context['lsr_download_link'] = reverse('edoc_download', args=[new_lsr_pk])
else:
messages.error(request, f'LSRGenerator failed with message: "{lsr_msg}"')
context['experiment_link'] = reverse('experiment_instance_view', args=[experiment_copy_uuid])
context['reagent_prep_link'] = reverse('reagent_prep', args=[experiment_copy_uuid])
context['outcome_link'] = reverse('outcome', args=[experiment_copy_uuid])
context['new_exp_name'] = exp_name
return context
'''
this function should only save the data to the db tables. refactor all other logic
'''
def process_automated_formsets(self, request, context):
# get the experiment template uuid and name
exp_template = ExperimentTemplate.objects.get(pk=request.session['experiment_template_uuid'])
# template_name = exp_template.description
# construct all formsets
exp_name_form = ExperimentNameForm(request.POST)
if 'current_org_id' in self.request.session:
org_id = self.request.session['current_org_id']
else:
org_id = None
formsets = []
reagent_template_names = []
for index, form in enumerate(exp_template.reagent_templates.all().order_by('description')):
reagent_template_names.append(form.description)
mat_types_list = []
for reagent_material_template in form.reagent_material_template_rt.all().order_by('description'):
for reagent_material_value_template in reagent_material_template.reagent_material_value_template_rmt.filter(description='concentration'):
mat_types_list.append(reagent_material_template.material_type)
formsets.append(self.ReagentFormSet(request.POST, prefix=f'reagent_{index}',
form_kwargs={'lab_uuid': org_id,
'mat_types_list':mat_types_list,
'reagent_index':index}))
if exp_name_form.is_valid():
#experiment name
exp_name = exp_name_form.cleaned_data['exp_name']
# make the experiment copy: this will be our new experiment
experiment_copy_uuid = experiment_copy(str(exp_template.uuid), exp_name)
exp_concentrations = {}
for reagent_formset in formsets:
if reagent_formset.is_valid():
vector = self.save_forms_reagent(reagent_formset, experiment_copy_uuid, exp_concentrations)
exp_concentrations = prepare_reagents(reagent_formset, exp_concentrations)
'''
this process of creating the data structure to pass into the
random sampler needs to be less ad-hoc and more generalized moving forward
need to remove static cleaned_data element calls. however,
forms will always be process in the same order
if elif statements for current_mat_list are not needed but
add some clarity to the code
'''
# Save dead volumes should probably be in a separate function
dead_volume_form = SingleValForm(request.POST, prefix='dead_volume')
if dead_volume_form.is_valid():
dead_volume = dead_volume_form.value
else:
dead_volume = None
#retrieve # of experiments to be generated (# of vial locations)
exp_number = int(request.POST['automated'])
#generate desired volume for current reagent
generate_experiments_and_save(experiment_copy_uuid, exp_concentrations, exp_number, dead_volume)
q1 = get_action_parameter_querysets(experiment_copy_uuid, template=False)
#robotfile generation
if exp_template.ref_uid in SUPPORTED_CREATE_WFS:
template_function = getattr(core.experiment_templates, exp_template.ref_uid)
new_lsr_pk, lsr_msg = template_function(None, q1, experiment_copy_uuid, exp_name, exp_template)
if new_lsr_pk is not None:
context['xls_download_link'] = reverse('edoc_download', args=[new_lsr_pk])
if str(self.request.session['current_org_name']) != "TestCo":
context['lsr_download_link'] = None
elif new_lsr_pk is not None:
context['lsr_download_link'] = reverse('edoc_download', args=[new_lsr_pk])
else:
messages.error(request, f'LSRGenerator failed with message: "{lsr_msg}"')
context['experiment_link'] = reverse('experiment_instance_view', args=[experiment_copy_uuid])
context['reagent_prep_link'] = reverse('reagent_prep', args=[experiment_copy_uuid])
context['outcome_link'] = reverse('outcome', args=[experiment_copy_uuid])
context['new_exp_name'] = exp_name
return context
# end: class CreateExperimentView()
'''
Made experiment list view to be auto generated like the other models because it doesn't seem to have any
different functionality and the code for it below is old and doesn't work
Below is what gets autogenerated for reference
'''
# class ExperimentListView(LoginRequired, GenericModelList):
# model = core.models.view_tables.Experiment
# table_columns = ['Description']
# column_necessary_fields = {
# 'Description': ['description']
# }
# ordering = ['description']
# field_contains = ''
# org_related_path = 'lab__organization'
# default_filter_kwarg= {
# 'parent__isnull': False
# }
class ExperimentDetailView(DetailView):
model = ExperimentInstance
model_name = 'experiment' # lowercase, snake case. Ex:tag_type or inventory
template_name = 'core/experiment/detail.html'
detail_fields = None
detail_fields_need_fields = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
exp = context['object']
# dict of detail field names to their value
detail_data = {}
#q1, q2, q3 = get_action_parameter_querysets(exp.uuid)
q1 = get_action_parameter_querysets(exp.uuid)
mat_q = get_material_querysets(exp.uuid)
edocs = Edocument.objects.filter(ref_edocument_uuid=exp.uuid)
detail_data = {row.object_description : row.inventory_material for row in mat_q}
detail_data.update({f'{row.object_description} {row.parameter_def_description}': f'{row.parameter_value}' for row in q1})
#detail_data.update({f'{row.object_description} {row.parameter_def_description}': f'{row.parameter_value}' for row in q2})
#detail_data.update({f'{row.object_description} {row.parameter_def_description}': f'{row.parameter_value}' for row in q3})
link_data = {f'{lsr_edoc.title}' : self.request.build_absolute_uri(reverse('edoc_download', args=[lsr_edoc.pk])) for lsr_edoc in edocs}
# get notes
notes_raw = Note.objects.filter(note_x_note__ref_note=exp.pk)
notes = []
for note in notes_raw:
notes.append('-' + note.notetext)
context['Notes'] = notes
# get tags
tags_raw = Tag.objects.filter(pk__in=TagAssign.objects.filter(
ref_tag=exp.pk).values_list('tag', flat=True))
tags = []
for tag in tags_raw:
tags.append(tag.display_text.strip())
context['tags'] = ', '.join(tags)
context['title'] = self.model_name.replace('_', " ").capitalize()
context['update_url'] = reverse_lazy(
f'{self.model_name}_update', kwargs={'pk': exp.pk})
context['detail_data'] = detail_data
context['file_download_links'] = link_data
return context
class ExperimentReagentPrepView(TemplateView):
template_name = "core/experiment_reagent_prep.html"
#form_class = ExperimentTemplateForm
#ReagentFormSet = formset_factory(ReagentForm, extra=0, formset=BaseReagentFormSet)
ReagentFormSet = formset_factory(ReagentValueForm, extra=0, formset=BaseReagentFormSet,)
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
pk = kwargs['pk']
experiment = ExperimentInstance.objects.get(pk=pk)
context = self.get_reagent_forms(experiment, context)
return render(request, self.template_name, context)
def get_colors(self, number_of_colors, colors=['deeppink', 'blueviolet', 'blue', 'coral', 'lightseagreen', 'orange', 'crimson']):
factor = int(number_of_colors/len(colors))
remainder = number_of_colors % len(colors)
total_colors = colors*factor + colors[:remainder]
return total_colors
def get_reagent_forms(self, experiment, context):
formsets = []
reagent_names = []
reagent_total_volume_forms = []
form_kwargs = {
'disabled_fields': ['material', 'material_type', 'nominal_value'],
}
context['helper'] = ReagentValueForm.get_helper(readonly_fields=['material', 'material_type', 'nominal_value'])
context['helper'].form_tag = False
context['volume_form_helper'] = PropertyForm.get_helper()
context['volume_form_helper'].form_tag = False
#for index, reagent_template in enumerate(reagent_templates):
for index, reagent in enumerate(experiment.reagent_ei.all()):
reagent_materials = reagent.reagent_material_r.filter(reagent_material_value_rmi__description='amount')
# template__reagent_template=)
property = reagent.property_r.get(property_template__description__iexact='total volume')
reagent_total_volume_forms.append(PropertyForm(instance=property,
nominal_value_label = 'Calculated Volume',
value_label = 'Measured Volume',
disabled_fields=['nominal_value']))
initial = []
for reagent_material in reagent_materials:
reagent_names.append(reagent_material.description)
rmvi = reagent_material.reagent_material_value_rmi.all().get(template__description='amount')
initial.append({'material_type':reagent_material.template.material_type.description,
'material' : reagent_material.material,
'nominal_value' : rmvi.nominal_value,
'actual_value': rmvi.actual_value,
'uuid': rmvi.uuid})
fset = self.ReagentFormSet(prefix=f'reagent_{index}', initial=initial, form_kwargs=form_kwargs)
formsets.append(fset)
context['reagent_formsets'] = zip(formsets, reagent_total_volume_forms)
context['colors'] = self.get_colors(len(formsets))
return context
def post(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
experiment_instance_uuid = request.resolver_match.kwargs['pk']
experiment = ExperimentInstance.objects.get(uuid=experiment_instance_uuid)
reagent_templates = experiment.parent.reagent_templates.all()
formsets = []
valid_forms = True
for index in range(len(reagent_templates)):
property_form = PropertyForm(request.POST)
if property_form.is_valid():
property_form.save()
else:
valid_forms = False
fset = self.ReagentFormSet(request.POST, prefix=f'reagent_{index}')
formsets.append(fset)
if fset.is_valid():
for form in fset:
rmvi = ReagentMaterialValue.objects.get(uuid=form.cleaned_data['uuid'])
rmvi.actual_value = form.cleaned_data['actual_value']
rmvi.save()
else:
valid_forms = False
if valid_forms:
return HttpResponseRedirect(reverse('experiment_instance_list'))
else:
return render(request, self.template_name, context)
class ExperimentOutcomeView(TemplateView):
template_name = "core/experiment_outcome.html"
OutcomeFormSet = modelformset_factory(OutcomeInstance,
form=OutcomeInstanceForm,
extra=0,
widgets={'actual_value': ValWidget()})
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
pk = kwargs['pk']
# experiment = ExperimentInstance.objects.get(pk=pk)
# context = self.get_outcome_forms(experiment, context)
# context['outcome_file_url'] = reverse('outcome_file', kwargs={'pk':pk})
context['outcome_file_upload_form'] = UploadFileForm()
context['outcome_file_upload_form_helper'] = UploadFileForm.get_helper()
return render(request, self.template_name, context)
def get_outcome_forms(self, experiment, context):
outcome_instances = experiment.outcome_instance_experiment_instance.all().order_by('description')
outcome_formset = self.OutcomeFormSet(queryset=outcome_instances)
context['outcome_formset'] = outcome_formset
context['helper'] = OutcomeInstanceForm.get_helper()
context['helper'].form_tag = False
return context
def post(self, request, *args, **kwargs):
#context = self.get_context_data(**kwargs)
#experiment_instance_uuid = request.resolver_match.kwargs['pk']
if 'outcome_download' in request.POST:
return self.download_outcome_file(kwargs['pk'])
if 'outcome_upload' in request.POST:
df = pd.read_csv(request.FILES['file'])
self.process_outcome_csv(df, kwargs['pk'])
if 'outcome_formset' in request.POST:
outcome_formset = self.OutcomeFormSet(request.POST)
if outcome_formset.is_valid():
outcome_formset.save()
return HttpResponseRedirect(reverse('experiment_instance_list'))
def process_outcome_csv(self, df, exp_uuid):
outcomes = OutcomeInstance.objects.filter(experiment_instance__uuid=exp_uuid)
# outcome_templates = OutcomeTemplate.objects.filter(outcome_instance_ot__experiment_instance__uuid=exp_uuid).distinct()
outcome_templates = ExperimentInstance.objects.get(uuid=exp_uuid).parent.outcome_templates.all()
df.fillna('', inplace=True)
for ot in outcome_templates:
# Check if the outcome column exists in dataframe
if ot.description in df.columns:
# Loop through each location
for i, row in df.iterrows():
o = outcomes.get(description=row[ot.description + ' location'])
o.actual_value.value = row[ot.description]
# o.actual_value.unit = 'test'
o.actual_value.unit = row[ot.description + ' unit']
# Placeholder for when all files are uploaded
# o.file = file in post data
Note.objects.create(notetext=row[ot.description + ' notes'], ref_note_uuid=o.uuid)
o.save()
def download_outcome_file(self, exp_uuid):
# Getting outcomes and its templates
outcomes = OutcomeInstance.objects.filter(experiment_instance__uuid=exp_uuid)
outcome_templates = OutcomeTemplate.objects.filter(outcome_instance_ot__experiment_instance__uuid=exp_uuid).distinct()
# Constructing a dictionary for pandas table
data = defaultdict(list)
# Loop through each outcome type (for eg. Crystal scores, temperatures, etc)
for ot in outcome_templates:
# Loop through each outcome for that type (eg. Crystal score for A1, A2, ...)
for o in outcomes.filter(outcome_template=ot):
# Outcome description
data[ot.description + ' location'].append(o.description)
# Add value of outcome
data[ot.description].append(o.actual_value.value)
# Add unit of outcome
data[ot.description + ' unit'].append(o.actual_value.unit)
# Add filename of outcome (This can be used to associate any file uploaded with the well)
data[ot.description + ' filename'].append('')
# Extra notes the user may wish to add
data[ot.description + ' notes'].append('')
df = pd.DataFrame.from_dict(data)
temp = tempfile.NamedTemporaryFile()
df.to_csv(temp, index=False)
temp.seek(0)
response = FileResponse(temp, as_attachment=True,
filename=f'outcomes_{exp_uuid}.csv')
return response |
default_app_config = "world.magic.apps.MagicConfig"
|
#!/usr/local/bin/python3.7
# -*- coding: utf-8 -*-
def hammingWeight(n: int) -> int:
m = 0
while n:
m += 1
n = n & (n - 1)
return m
def hammingWeight2(n: int) -> int:
m = 0
flag = 1
while n >= flag:
if n & flag:
m += 1
flag = flag << 1
return m
def hammingWeight1(n: int) -> int:
m = 0
while n:
if n & 1:
m += 1
n = n >> 1
return m
def main():
"""
思路1.n >> 1然后和1做与运算,负数会导致无限循环
思路2.1 << 1然后和n做与运算
思路3.n & (n - 1) 每循环一次会把从右边开始的1置0
"""
param = -11
ret = hammingWeight(param)
print(ret)
'''剑指 Offer 15. 二进制中1的个数
请实现一个函数,输入一个整数(以二进制串形式),输出该数二进制表示中 1 的个数。例如,把 9 表示成二进制是 1001,有 2 位是 1。因此,如果输入 9,则该函数输出 2。
示例 1:
输入:00000000000000000000000000001011
输出:3
解释:输入的二进制串 00000000000000000000000000001011 中,共有三位为 '1'。
示例 2:
输入:00000000000000000000000010000000
输出:1
解释:输入的二进制串 00000000000000000000000010000000 中,共有一位为 '1'。
示例 3:
输入:11111111111111111111111111111101
输出:31
解释:输入的二进制串 11111111111111111111111111111101 中,共有 31 位为 '1'。
提示:
输入必须是长度为 32 的 二进制串 。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/er-jin-zhi-zhong-1de-ge-shu-lcof
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
if __name__ == '__main__':
main()
|
from typing import List
from XivDbReader.collections import Weapon
from XivDbReader import Reader, ExportCsv
#r: Reader = Reader(job='pld')
#pldArms: List[Weapon] = r.getArms(recordLimit=1)
whmReader: Reader = Reader(job='whm')
whmArms: List[Weapon] = whmReader.getArms(recordLimit=1)
ec = ExportCsv(recordType='weapon', recordJob='whm')
ec.write(whmArms) |
import unittest
import checksieve
from . import util
class TestCommandsAST(util.DiffTestCase):
def test_require(self):
self.assertNoDiff(util.diff(util.run_mock('commands/require_single.sieve'), 'commands/require_single.out'))
def test_require_list(self):
self.assertNoDiff(util.diff(util.run_mock('commands/require_list.sieve'), 'commands/require_list.out'))
def test_stop(self):
self.assertNoDiff(util.diff(util.run_mock('commands/stop.sieve'), 'commands/stop.out'))
if __name__ == '__main__':
unittest.main()
|
from datetime import datetime
from decimal import Decimal
import json
import os
from corehq.apps.userreports.models import DataSourceConfiguration, ReportConfiguration
from corehq.util.dates import iso_string_to_date
def get_sample_report_config():
folder = os.path.join(os.path.dirname(__file__), 'data', 'configs')
sample_file = os.path.join(folder, 'sample_report_config.json')
with open(sample_file) as f:
structure = json.loads(f.read())
return ReportConfiguration.wrap(structure)
def get_sample_data_source():
folder = os.path.join(os.path.dirname(__file__), 'data', 'configs')
sample_file = os.path.join(folder, 'sample_data_source.json')
with open(sample_file) as f:
structure = json.loads(f.read())
return DataSourceConfiguration.wrap(structure)
def get_sample_doc_and_indicators(fake_time_now=None):
if fake_time_now is None:
fake_time_now = datetime.utcnow()
date_opened = "2014-06-21"
sample_doc = dict(
_id='some-doc-id',
opened_on=date_opened,
owner_id='some-user-id',
doc_type="CommCareCase",
domain='user-reports',
type='ticket',
category='bug',
tags='easy-win public',
is_starred='yes',
estimate=2.3,
priority=4,
)
expected_indicators = {
'doc_id': 'some-doc-id',
'repeat_iteration': 0,
'date': iso_string_to_date(date_opened),
'owner': 'some-user-id',
'count': 1,
'category_bug': 1, 'category_feature': 0, 'category_app': 0, 'category_schedule': 0,
'tags_easy-win': 1, 'tags_potential-dupe': 0, 'tags_roadmap': 0, 'tags_public': 1,
'is_starred': 1,
'estimate': Decimal(2.3),
'priority': 4,
'inserted_at': fake_time_now,
}
return sample_doc, expected_indicators
|
import itertools
matches = set()
for i in itertools.permutations('123456789', 9):
for s in xrange(1, 4):
for s2 in xrange(s + 1, (14 - s) / 2):
a = int(''.join(i[:s]))
b = int(''.join(i[s:s2]))
c = int(''.join(i[s2:]))
if a * b == c:
matches.add(c)
print sum(matches) |
import re
class Game:
def __init__(self, player1, player2):
self.fields = [Field() for i in range(9)]
self.player1 = player1
self.player2 = player2
# display fields the way your numpad is configured for the best
# user experience (7 is top left, 3 is bottom right, etc.)
def __drawBoard(self):
fieldCount = 0
for field in (self.fields[6:9] + self.fields[3:6] + self.fields[0:3]):
print(field.draw(), end = "" if fieldCount % 3 != 2 else "\n" )
fieldCount += 1
def __isFinished(self):
def boardIsFull():
return all(field.isOccupied() for field in self.fields)
def hasWinner():
return self.player1.isWinner(self.fields) or self.player2.isWinner(self.fields)
return boardIsFull() or hasWinner()
def play(self):
activePlayer = self.player2
self.__drawBoard()
while not self.__isFinished():
activePlayer = activePlayer.otherPlayer
occupyFieldNr = ""
def isValidInput(tryingToOccupyFieldNr):
return re.compile("^[1-9]{1}$").match(tryingToOccupyFieldNr) and not self.fields[int(tryingToOccupyFieldNr)-1].isOccupied()
while not isValidInput(occupyFieldNr):
occupyFieldNr = input(f"{activePlayer.name} ({activePlayer.mark}), make your move (1-9)):")
self.fields[int(occupyFieldNr)-1].occupy(activePlayer)
self.__drawBoard()
def getWinner(self):
if (self.player1.isWinner(self.fields)):
return self.player1
elif (self.player2.isWinner(self.fields)):
return self.player2
return None
class Player:
def __init__(self, name, mark):
self.name = name
self.mark = mark
self.otherPlayer = None
def isWinner(self, fields):
# In order; row 1, 2, 3; diagonal 1, 2; column 1, 2, 3;
return all(field.isOccupiedByPlayer(self) for field in fields[0:3]) or \
all(field.isOccupiedByPlayer(self) for field in fields[3:6]) or \
all(field.isOccupiedByPlayer(self) for field in fields[6:9]) or \
all(field.isOccupiedByPlayer(self) for field in fields[0:9:4]) or \
all(field.isOccupiedByPlayer(self) for field in fields[2:7:2]) or \
all(field.isOccupiedByPlayer(self) for field in fields[0:7:3]) or \
all(field.isOccupiedByPlayer(self) for field in fields[1:8:3]) or \
all(field.isOccupiedByPlayer(self) for field in fields[2:9:3])
class Field:
def __init__(self):
self.player = None
def isOccupied(self):
return self.player is not None
def isOccupiedByPlayer(self, player):
return self.player == player
def occupy(self, player):
self.player = player
def draw(self):
return " " + self.player.mark + " " if self.isOccupied() else " _ " |
#### Values from the SWMF soucecode, Batsrus.jl code,
#### and also some extra named contants and indices
# Possible values for the status variable
Unset_ = -100 # index for unset values (that are otherwise larger)
Unused_ = -1 # unused block (not a leaf)
Refine_ = -2 # parent block to be refined
DontCoarsen = -3 # block not to be coarsened
Coarsen_ = -4 # child block to be coarsened
Used_ = 1 # currently used block (leaf)
RefineNew_ = 2 # child block to be refined
Refined_ = 3 # refined child block
CoarsenNew_ = 4 # parent block to be coarsened
Coarsened_ = 5 # coarsened parent block
# Deepest AMR level relative to root nodes (limited by 32 bit integers)
MaxLevel = 30
'''from SWMF/GM/BATSRUS/srcBATL_/BATL_tree.f90
! The maximum integer coordinate for a given level below root nodes
! Implied do loop was not understooed by the pgf90 compiler, so list them
integer, parameter, public :: MaxCoord_I(0:MaxLevel) = &
[ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, &
16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, &
4194304, 8388608, 16777216, 33554432, 67108864, 134217728, &
268435456, 536870912, 1073741824 ]
'''#why are they indexing this one from 0?
MaxCoord_I = [ 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192,
16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152,
4194304, 8388608, 16777216, 33554432, 67108864, 134217728,
268435456, 536870912, 1073741824 ]
# check the copied hardcoded things are what I think they are
assert(len(MaxCoord_I) == MaxLevel+1)
assert(MaxCoord_I == [2**i for i in range(len(MaxCoord_I))])
# Named indexes of iTree_IA
Status_ = 1
Level_ = 2 # grid level
Proc_ = 3 # processor index
Block_ = 4 # block index
MinLevel_ = 5 # minimum level allowed
MaxLevel_ = 6 # maximum level allowed
Coord0_ = 6 # equal to Coord1_-1
Coord1_ = 7 # coordinate of node in 1st dimension
Coord2_ = 8 # coordinate of node in 2nd dimension
Coord3_ = 9 # coordinate of node in 3rd dimension
CoordLast_= 9 # Coord0_ + MaxDim (?)
Parent_ = 10 # Parent_ must be equal to Child0_
Child0_ = 10 #
Child1_ = Child0_ + 1
#ChildLast_= Child0_ + nChild
'''
the status of the node (used, unused, to be refined, to be coarsened, etc.);
the current, the maximum allowed and minimum allowed AMR levels for this node;
the three integer coordinates with respect to the whole grid;
the index of the parent node (if any);
the indexes of the children nodes (if any);
the processor index where the block is stored for active nodes;
the local block index for active nodes.
'''
|
#!/usr/bin/env python
# Example server used to test Simple-CSRF-script.py and Advanced-CSRF-script.py
# GET creates the token
# POST verifies itand creates a new one
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
import string, random, re
html = """
<!DOCTYPE html>
<html>
<body>
<form action="/" method="POST">
Change address<br>
<input type="text" name="street" placeholder="Street">
<br>
<input type="text" name="city" placeholder="City">
<br>
<input type="text" name="zip" placeholder="ZIP">
<br>
<small>
<br>New Token<br>
<input type="text" name="CSRFtoken" value="$token">
<small>
<br><br>
<input type="submit" value="Submit">
<br>Message<br>
<textarea>$message</textarea>
</form>
</body>
</html>
"""
class RequestHandler(BaseHTTPRequestHandler):
token=""
def do_GET(self):
new_token = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)])
self.token = new_token
print "token new "+self.token
response = string.Template(html)
response = response.substitute(token=new_token, message="")
self.send_response(200)
#self.send_header("Set-Cookie", "foo=bar")
self.end_headers()
self.wfile.write(response)
def do_POST(self):
request_path = self.path
request_headers = self.headers
content_length = request_headers.getheaders('content-length')
length = int(content_length[0]) if content_length else 0
post_body = self.rfile.read(length)
print "token searched "+self.token
search = re.compile("CSRFtoken=\"("+self.token+")\"")
match = search.findall(post_body)
self.send_response(200)
if match:
expired_token = match[0]
print "token found "+expired_token
message="Token was OK "+ expired_token
else:
message="No valid token found"
new_token = ''.join([random.choice(string.ascii_letters + string.digits) for n in xrange(32)])
self.token = new_token
response = string.Template(html)
response = response.substitute(token=new_token, message=message)
print "token rereshed "+self.token
self.end_headers()
self.wfile.write(response)
do_PUT = do_POST
do_DELETE = do_GET
def main():
port = 9090
print('Listening on :%s' % port)
server = HTTPServer(('', port), RequestHandler)
server.serve_forever()
if __name__ == "__main__":
print("Main")
main() |
import os
import shutil
import subprocess
import tempfile
from base import Base
from strings import Strings
class MacSource(Base):
def __init__(self):
Base.__init__(self)
def extract_strings_from_filename(self, filename):
output_dir = self._get_output_directory()
self._run_genstrings_command_for_file(filename, output_dir)
full_paths = []
for file in os.listdir(output_dir):
full_paths.append(os.path.join(output_dir, file))
strings_parser = Strings()
return_values = \
strings_parser.extract_strings_from_files(full_paths)
shutil.rmtree(output_dir)
return return_values
def _run_genstrings_command_for_file(self, filename, output_dir):
subprocess.Popen(
[ "genstrings", "-u", "-o", output_dir, filename ],
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT,
).stdout.read()
def _get_output_directory(self):
return tempfile.mkdtemp()
|
#! /usr/bin/env python
import scipy as s
import pylab as p
import scipy.integrate as si
from scipy import stats #I need this module for the linear fit
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email import Encoders
import os
def mail(to, subject, text, attach):
msg = MIMEMultipart()
msg['From'] = gmail_user
msg['To'] = to
msg['Subject'] = subject
msg.attach(MIMEText(text))
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(attach, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(attach))
msg.attach(part)
mailServer = smtplib.SMTP("smtp.gmail.com", 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(gmail_user, gmail_pwd)
mailServer.sendmail(gmail_user, to, msg.as_string())
# Should be mailServer.quit(), but that crashes...
mailServer.close()
def Brow_ker_cont_optim(Vlist):
kern_mat=2.*k_B*T_0/(3.*mu)*(Vlist[:,s.newaxis]**(1./3.)+\
Vlist[s.newaxis,:]**(1./3.))**2./ \
(Vlist[:,s.newaxis]**(1./3.)*Vlist[s.newaxis,:]**(1./3.))
return kern_mat
def coupling_optim_garrick(y,t):
creation=s.zeros(n_bin)
destruction=s.zeros(n_bin)
#now I try to rewrite this in a more optimized way
destruction = -s.dot(s.transpose(kernel),y)*y #much more concise way to express\
#the destruction of k-mers
for k in xrange(n_bin):
kyn = (kernel*f_garrick[:,:,k])*y[:,s.newaxis]*y[s.newaxis,:]
creation[k] = s.sum(kyn)
creation=0.5*creation
out=creation+destruction
return out
#Now I work with the function for espressing smoluchowski equation when a uniform grid is used
def coupling_optim(y,t):
creation=s.zeros(n_bin)
destruction=s.zeros(n_bin)
#now I try to rewrite this in a more optimized way
destruction = -s.dot(s.transpose(kernel),y)*y #much more concise way to express\
#the destruction of k-mers
kyn = kernel*y[:,s.newaxis]*y[s.newaxis,:]
for k in xrange(n_bin):
creation[k] = s.sum(kyn[s.arange(k),k-s.arange(k)-1])
creation=0.5*creation
out=creation+destruction
return out
#Now I go for the optimal optimization of the chi_{i,j,k} coefficients used by Garrick for
# dealing with a non-uniform grid.
def mycount_garrick(V):
f=s.zeros((n_bin, n_bin, n_bin))
Vsum=V[:,s.newaxis]+V[s.newaxis,:] # matrix with the sum of the volumes in the bins
for k in xrange(1,(n_bin-1)):
f[:,:,k]=s.where((Vsum<=V[k+1]) & (Vsum>=V[k]), (V[k+1]-Vsum)/(V[k+1]-V[k]),\
f[:,:,k] )
f[:,:,k]=s.where((Vsum<=V[k]) & (Vsum>=V[k-1]),(Vsum-V[k-1])/(V[k]-V[k-1]),\
f[:,:,k])
return f
def total_concentration(number_mat, box_volume):
number= s.sum(number_mat,axis=1)*box_volume
return number
def total_mass_conservation(number_mat, vol_grid,box_volume):
ini_mass=s.dot(number_mat[0,:],vol_grid)*box_volume
fin_mass=s.dot(number_mat[-1,:],vol_grid)*box_volume
mass_conservation=(ini_mass-fin_mass)/ini_mass
results=s.array([ini_mass,fin_mass,mass_conservation])
return results
def fitting_stat(x,y):
slope, intercept, r, prob2, see = stats.linregress(x,y)
if (len(x)>2):
see=see*s.sqrt(len(x)/(len(x)-2.))
mx = x.mean()
sx2 = ((x-mx)**2).sum()
sd_intercept = see * s.sqrt(1./len(x) + mx*mx/sx2)
sd_slope = see * s.sqrt(1./sx2)
results=s.zeros(5)
results[0]=slope
results[1]=intercept
results[2]=r
if (len(x)>2):
results[3]=sd_slope
results[4]=sd_intercept
return results
#Now a list of the physical parameters needed to carry out the calculation
n_mon=5000 #total number of monomers
initial_density=0.01 #monomer density in the box
box_vol=n_mon/initial_density #volume of the box containing the monomers
r_mon=0.5 #radius of each monomer
v_mono=4./3.*s.pi*r_mon**3. #volume of each monomer
beta=1. #cluster-monomer 1/tau
k_B=1. #in these units
T_0=0.5 #temperature of the system
m_mon=1. #monomer mass in these units
sigma=1. #monomer diameter
mu=(m_mon*beta)/(3.*s.pi*sigma) # fluid viscosity
t=s.linspace(0.,1000.,1001) # choose time grid for time evolution
#Specify the bin structure you want to use
linear =1 #linear ==1---> use a linear bin structure and solve smoluchowski equation in standard form
#linear !1---> use a non-linear (log-spaced) bin structure and solve smoluchowski equation
# using the splitting operator
k_max=1000 #maximum number of monomers I consider in a k_mer
n_bin=200 # to be used only if a non-linear bin structure is used
send_email=1 #tells whether you want to send an email with an attached
# file
# variables for sending emails
gmail_user = "someaccount@gmail.com" #modify this using the account name and password of
#your own gmail account
gmail_pwd = "password"
mailto = "robert.h.schingler@nasa.gov"
mailtitle = "The job you submitted is done"
mailtext = "This is an automatically-generated email, please do not reply"
if (linear==1):
mailattachment="evolution_number_of_monomers_linear_binning.pdf"
else:
mailattachment="evolution_number_of_monomers_nonlinear_binning.pdf"
if (linear == 1):
k_list=s.linspace(1., k_max, k_max) #list of number I use to label each bin, i.e. size of the
#corresponding monomer
vol_grid=k_list*v_mono #volume of the particle in the k-th bin
n_bin=len(k_list) #overwrite the number of bins
elif (linear !=1):
k_list=s.logspace(s.log10(1.), s.log10(k_max),n_bin)
vol_grid=k_list*v_mono #volume of the particle in the k-th bin (this time the volume list is
#nonlinear)
if (linear !=1): #calculate the splitting operator on the non-uniform grid
f_garrick=mycount_garrick(vol_grid) #I calculate the splitting operator on the grid
#generate initial condition [monodisperse aerosol]
y0=s.zeros(n_bin)
y0[0]=initial_density #initial state (monodisperse aerosol)
#Generate the kernel matrix
kernel=Brow_ker_cont_optim(vol_grid)
if (linear==1):
solution = si.odeint(coupling_optim, y0, \
t,printmessg=1,rtol=1e-10,atol=1e-10)
elif (linear!=1):
solution = si.odeint(coupling_optim_garrick, y0, \
t,printmessg=1,rtol=1e-10,atol=1e-10)
total_monomers=total_concentration(solution, box_vol)
#now save the total number of monomers and the time in two separate files
if (linear==1):
p.save("number_monomers_linear_binning.dat", total_monomers)
elif (linear !=1):
p.save("number_monomers_nonlinear_binning.dat", total_monomers)
p.save("time.dat", t)
#check the quality of the simulation by testing mass conservation
mass_tests=total_mass_conservation(solution, vol_grid,box_vol)
print "initial and final total mass in the box are", mass_tests[0], mass_tests[1] ,"respectively"
print "mass is conserved up to ", mass_tests[2]*100., "percent"
#finally, perform some basic statistical analysis (fit decay of total number of clusters to a power-law)
sel_late=s.where(t>800.)
results=fitting_stat(s.log(t[sel_late]),s.log(total_monomers[sel_late]))
power_decay=results[0]
print "the exponent of the power-law decay [the theoretical value is -1] is, ", power_decay
#finally, plot the results
fig = p.figure()
axes = fig.gca()
axes.plot(t,total_monomers,"ro",linewidth=2.)
p.xlabel('Time')
p.ylabel('Total number of monomers')
p.title('Evolution of the total number of monomers')
p.grid(True)
if (linear ==1 ):
fig_name="evolution_number_of_monomers_linear_binning.pdf"
elif (linear !=1):
fig_name="evolution_number_of_monomers_nonlinear_binning.pdf"
p.savefig(fig_name)
p.clf()
#Now send one of the generated plots automatically by email
#NB: this will not work unless one really inputs its email login details
mail(mailto,mailtitle, mailtext,mailattachment)
print "Calculation ended."
|
from amaranth_boards.icebreaker import *
from amaranth_boards.icebreaker import __all__
import warnings
warnings.warn("instead of nmigen_boards.icebreaker, use amaranth_boards.icebreaker",
DeprecationWarning, stacklevel=2)
|
from .converter import Converter
from .core import get_labelstudio_export_from_api
from .pseudolabels import compute_labelstudio_preds
__all__ = [
"Converter",
"compute_labelstudio_preds",
"get_labelstudio_export_from_api",
]
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import range
from builtins import object
import MalmoPython
import json
import logging
import os
import random
import sys
import time
if sys.version_info[0] == 2:
import Tkinter as tk
else:
import tkinter as tk
first_level_y = 56
# second_level_y = 56 + 5
class TabQAgent(object):
"""Tabular Q-learning agent for discrete state/action spaces."""
def __init__(self):
self.health = 100
self.powerUps = [(6,first_level_y,1),(8,first_level_y,5),(9,first_level_y,4),(10,first_level_y,1),(9,first_level_y,3),(6,first_level_y,2),(8,first_level_y,4),(9,first_level_y,1)]
self.jumpH = 5
self.epsilon = 0.05 # exploration rate
self.alpha = 0.4 # learning rate
self.gamma = 0.5 # reward discount factor
self.logger = logging.getLogger(__name__)
if False: # True if you want to see more information
self.logger.setLevel(logging.DEBUG)
else:
self.logger.setLevel(logging.INFO)
self.logger.handlers = []
self.logger.addHandler(logging.StreamHandler(sys.stdout))
self.actions = ["movenorth 1", "movesouth 1", "movewest 1", "moveeast 1", "teleport 1"]
self.q_table = {}
self.q_table_powerup = {}
self.canvas = None
self.root = None
self.count_height = 0
self.food = list()
def generate_food_blocks(self, food_per_level, height_per_level, min_x, min_z, max_x , max_z, y_base_level, levels = 3):
max_x -= 2
max_z -= 2
min_z -= 2
min_x -= 2
start_per_level = []
start_per_level[0] = (min_x, max_x)
for level in range(1, levels):
start_per_level[level][0] = start_per_level[level-1][1]+1
start_per_level[level][1] = start_per_level[level-1][1] + (max_x-min_x)
possible_foods = []
for level in range(levels):
foods_on_level = []
for x in range(start_per_level[level][0], start_per_level[level][1]):
for z in range(min_z, max_z):
foods_on_level.append((x, z))
possible_foods.append(foods_on_level)
for level in range(levels):
while len(possible_foods) < food_per_level:
x_pos = random.choice(possible_foods)
z_pos = random.randint(min_z, max_z)
y_pos = (level * height_per_level[level]) + y_base_level
pos = (x_pos, y_pos, z_pos)
while pos in self.food:
x_pos = random.randint(min_x, max_x)
z_pos = random.randint(min_z, max_z)
y_pos = (level * height_per_level[level]) + y_base_level
self.food.append(pos)
### Change q_table to reflect what we have learnt.
# Inputs: reward - int, current_state - coordinate tuple, prev_state - coordinate tuple, prev_a - int
# Output: updated q_table
def updateQTable_general(self, reward, current_state, prev_state, prev_a, table):
a = self.alpha
g = self.gamma
r = reward
curr_Q = max(table[current_state])
prev_Q = table[prev_state][prev_a]
average_Q = a * (r + (g * curr_Q)) + (1-a) * prev_Q
table[prev_state][prev_a] = average_Q
return
def updateQTable(self, reward, current_state, prev_state, prev_a):
self.health -= 5
if reward == 49 :#and self.health <= 50:
self.updateQTable_general(reward, current_state, prev_state, prev_a, self.q_table_powerup)
self.health = 100
else:
self.updateQTable_general(reward, current_state, prev_state, prev_a, self.q_table)
'''
def updatePowerUpTable(self, reward, current_state, prev_state, prev_a):
self.updateQTable_general(reward, current_state, prev_state, prev_a, self.q_table_powerup)
'''
### Change q_table to reflect what we have learnt upon reaching the terminal state.
# Input: reward - int, prev_state - coordinate tuple, prev_a - int
# Output: updated q_table
def updateQTableFromTerminatingState(self, reward, prev_state, prev_a):
self.health = 100
if reward > 90:
self.q_table[prev_state][prev_a] = 100
if reward <= -100:
self.q_table[prev_state][prev_a] = -100
self.q_table_powerup[prev_state][prev_a] = -100
self.powerUps = [(6,first_level_y,1),(8,first_level_y,5),(9,first_level_y,4),(10,first_level_y,1),(9,first_level_y,3),(6,first_level_y,2),(8,first_level_y,4),(9,first_level_y,1)]
return
def act(self, world_state, agent_host, current_r):
obs_text = world_state.observations[-1].text
obs = json.loads(obs_text)
if not u'XPos' in obs or not u'ZPos' in obs:
self.logger.error("Incomplete observation received: %s" % obs_text)
return 0
current_s = "%d:%d" % (int(obs[u'XPos']), int(obs[u'ZPos']))
self.logger.debug("State: %s (x = %.2f, z = %.2f)" % (current_s, float(obs[u'XPos']), float(obs[u'ZPos'])))
if current_s not in self.q_table:
self.q_table[current_s] = ([0] * len(self.actions))
self.q_table_powerup[current_s] = ([0] * len(self.actions))
# update Q values
if self.prev_s is not None and self.prev_a is not None:
self.updateQTable(current_r, current_s, self.prev_s, self.prev_a)
if self.health <= 0:
agent_host.sendCommand("chat /tp Rayys %d %d %d"%(8.5,57.0,1.5))#if not at the pillar position but still wants to jump, teleport to ice to start next genetation
self.health = 100
self.drawQ(curr_x=int(obs[u'XPos']), curr_y=int(obs[u'ZPos']))
cur_pos = (int(obs[u'XPos']),int(obs[u'YPos'])-1,int(obs[u'ZPos']))
if cur_pos in self.powerUps:
self.powerUps.remove(cur_pos)
def moveRight(ah):
ah.sendCommand("strafe 1")
def moveLeft(ah):
ah.sendCommand("strafe -1")
def moveStraight(ah):
ah.sendCommand("move 1")
def moveBack(ah):
ah.sendCommand("move -1")
def teleport(ah,x,y,z):
if x == 8.5 and z == 4.5:
ah.sendCommand("chat /tp Rayys %d %d %d"%(x,y+self.jumpH,z+1))
time.sleep(1)
else:
ah.sendCommand("chat /tp Rayys %d %d %d"%(10,57,6)) #if not at the pillar position but still wants to jump, teleport to ice to start next genetation
time.sleep(1)
def legal(x, z):
LegalMoves = []
if z < 6:
LegalMoves.append("up")
if x == 8.5 and z == 4.5:
LegalMoves.append("teleport")
if z > 1:
LegalMoves.append("down")
if x < 10:
LegalMoves.append("left")
if x > 6:
LegalMoves.append("right")
return LegalMoves
p = current_s.split(":")
xp = int(p[0])
yp = int(p[1])
legal = legal(xp, yp)
self.logger.debug(legal)
random_int = random.random()
if random_int <= self.epsilon:
num = random.randint(0,len(legal)-1)
action = legal[num]
if action == "up": # up
moveStraight(agent_host)
self.prev_a = 0
if action == 'down': # down
moveBack(agent_host)
self.prev_a = 1
if action == 'left':# left
moveLeft(agent_host)
self.prev_a = 2
if action == 'right': #right
moveRight(agent_host)
self.prev_a = 3
if action == 'teleport':
teleport(agent_host,obs["XPos"],obs["YPos"],obs["ZPos"])
self.prev_a = 4
self.prev_s = current_s
else:
table = dict()
if self.health <= 75:
table = self.q_table_powerup
else:
table = self.q_table
sorted_spots = sorted(table[current_s])
valC = sorted_spots[-1]
spots = []
for i in range(len(table[current_s])):
if (table[current_s][i]==valC):
spots.append(i)
randomMove = random.choice(spots)
if randomMove == 0: # up
moveStraight(agent_host)
self.prev_a = 0
if randomMove == 1: # down
moveBack(agent_host)
self.prev_a = 1
if randomMove == 2:# left
moveLeft(agent_host)
self.prev_a = 2
if randomMove == 3: #right
moveRight(agent_host)
self.prev_a = 3
if randomMove == 4 :
teleport(agent_host,obs["XPos"],obs["YPos"],obs["ZPos"])
self.prev_a = 4
self.prev_s = current_s
return current_r
# do not change this function
def run(self, agent_host):
"""run the agent on the world"""
total_reward = 0
self.prev_s = None
self.prev_a = None
is_first_action = True
# TODO complete the main loop:
world_state = agent_host.getWorldState()
while world_state.is_mission_running:
current_r = 0
if is_first_action:
# wait until have received a valid observation
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations) > 0 and not \
world_state.observations[-1].text == "{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
is_first_action = False
else:
# wait for non-zero reward
while world_state.is_mission_running and current_r == 0:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
# allow time to stabilise after action
while True:
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
self.logger.error("Error: %s" % error.text)
for reward in world_state.rewards:
current_r += reward.getValue()
if world_state.is_mission_running and len(world_state.observations) > 0 and not \
world_state.observations[-1].text == "{}":
total_reward += self.act(world_state, agent_host, current_r)
break
if not world_state.is_mission_running:
break
# process final reward
total_reward += current_r
# update Q values
if self.prev_s is not None and self.prev_a is not None:
self.updateQTableFromTerminatingState(current_r, self.prev_s, self.prev_a)
# used to dynamically draw the QTable in a separate window
self.drawQ()
return total_reward
# do not change this function
def drawQ(self, curr_x=None, curr_y=None):
scale = 30
world_x = 15
world_y = 15
if self.canvas is None or self.canvas2 is None or self.root is None:
self.root = tk.Tk()
self.root2 = tk.Tk()
self.root2.wm_title("Q-table -- Power Up")
self.root.wm_title('Q-table')
self.canvas = tk.Canvas(self.root, width=world_x * scale, height=world_y * scale, borderwidth=0,
highlightthickness=0, bg="black")
self.canvas2 = tk.Canvas(self.root2, width=world_x * scale, height=world_y * scale, borderwidth=0,
highlightthickness=0, bg="black")
self.canvas.grid()
self.canvas2.grid()
self.root.update()
self.root2.update()
self.canvas.delete("all")
self.canvas2.delete("all")
action_inset = 0.1
action_radius = 0.1
curr_radius = 0.2
action_positions = [(0.5, action_inset), (0.5, 1 - action_inset), (action_inset, 0.5), (1 - action_inset, 0.5)]
# (NSWE to match action order)
min_value = -20
max_value = 20
for x in range(world_x):
for y in range(world_y):
s = "%d:%d" % (x, y)
self.canvas.create_rectangle(x * scale, y * scale, (x + 1) * scale, (y + 1) * scale, outline="#fff",
fill="#002")
self.canvas2.create_rectangle(x * scale, y * scale, (x + 1) * scale, (y + 1) * scale, outline="#fff",
fill="#002")
for action in range(4):
if not s in self.q_table:
continue
value = self.q_table[s][action]
value2 = self.q_table_powerup[s][action]
color = int(255 * (value - min_value) / (max_value - min_value)) # map value to 0-255
color2 = int(255 * (value2 - min_value) / (max_value - min_value)) # map value to 0-255
color = max(min(color, 255), 0) # ensure within [0,255]
color2 = max(min(color2, 255), 0) # ensure within [0,255]
color_string = '#%02x%02x%02x' % (255 - color, color, 0)
color_string2 = '#%02x%02x%02x' % (255 - color2, color2, 0)
self.canvas.create_oval((x + action_positions[action][0] - action_radius) * scale,
(y + action_positions[action][1] - action_radius) * scale,
(x + action_positions[action][0] + action_radius) * scale,
(y + action_positions[action][1] + action_radius) * scale,
outline=color_string, fill=color_string)
self.canvas2.create_oval((x + action_positions[action][0] - action_radius) * scale,
(y + action_positions[action][1] - action_radius) * scale,
(x + action_positions[action][0] + action_radius) * scale,
(y + action_positions[action][1] + action_radius) * scale,
outline=color_string2, fill=color_string2)
if curr_x is not None and curr_y is not None:
self.canvas.create_oval((curr_x + 0.5 - curr_radius) * scale,
(curr_y + 0.5 - curr_radius) * scale,
(curr_x + 0.5 + curr_radius) * scale,
(curr_y + 0.5 + curr_radius) * scale,
outline="#fff", fill="#fff")
self.canvas2.create_oval((curr_x + 0.5 - curr_radius) * scale,
(curr_y + 0.5 - curr_radius) * scale,
(curr_x + 0.5 + curr_radius) * scale,
(curr_y + 0.5 + curr_radius) * scale,
outline="#fff", fill="#fff")
self.root.update()
self.root2.update()
if sys.version_info[0] == 2:
sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) # flush print output immediately
else:
import functools
print = functools.partial(print, flush=True)
agent = TabQAgent()
agent_host = MalmoPython.AgentHost()
try:
agent_host.parse(sys.argv)
except RuntimeError as e:
print('ERROR:', e)
print(agent_host.getUsage())
exit(1)
# -- set up the mission -- #
mission_file = './final.xml'
with open(mission_file, 'r') as f:
print("Loading mission from %s" % mission_file)
mission_xml = f.read()
my_mission = MalmoPython.MissionSpec(mission_xml, True)
# add some random holes in the ground to spice things up
for x in range(5, 12):
my_mission.drawBlock(x,56,0,"ice")
my_mission.drawBlock(x,56,6,"ice")
my_mission.drawBlock(x, 61, 5, "ice")
my_mission.drawBlock(x, 61, 11, "ice")
first_level_y = 56
second_level_y = 56 + 5
my_mission.drawBlock(6,first_level_y,3,"lit_redstone_ore")
my_mission.drawBlock(9,first_level_y,3,"lit_redstone_ore")
my_mission.drawBlock(6,second_level_y,6,"lit_redstone_ore")
my_mission.drawBlock(9,second_level_y,5+2,"quartz_block")
my_mission.drawBlock(8, 66, 9, "air")
my_mission.drawBlock(8, 66, 5, "air")
my_mission.drawBlock(8, 61, 5, "dirt")
for z in range(1, 6):
my_mission.drawBlock(5, 56, z, "ice")
my_mission.drawBlock(11, 56, z, "ice")
for z in range(5, 12):
my_mission.drawBlock(5, 61, z, "ice")
my_mission.drawBlock(11, 61, z, "ice")
max_retries = 3
num_repeats = 300
cumulative_rewards = []
'''
val = input("Input: ")
if (val == "Yes"):
print("Hello")
'''
for i in range(num_repeats):
print()
print('Repeat %d of %d' % (i + 1, num_repeats))
my_mission_record = MalmoPython.MissionRecordSpec()
for retry in range(max_retries):
try:
agent_host.startMission(my_mission, my_mission_record)
break
except RuntimeError as e:
if retry == max_retries - 1:
print("Error starting mission:", e)
exit(1)
else:
time.sleep(2.5)
print("Waiting for the mission to start", end=' ')
world_state = agent_host.getWorldState()
while not world_state.has_mission_begun:
print(".", end="")
time.sleep(0.1)
world_state = agent_host.getWorldState()
for error in world_state.errors:
print("Error:", error.text)
print()
# -- run the agent in the world -- #
cumulative_reward = agent.run(agent_host)
print('Cumulative reward: %d' % cumulative_reward)
cumulative_rewards += [cumulative_reward]
# -- clean up -- #
time.sleep(0.5) # (let the Mod reset)
print("Done.")
print()
print("Cumulative rewards for all %d runs:" % num_repeats)
print(cumulative_rewards)
|
# -*- coding: utf8 -*-
# vim: ts=4 sts=4 sw=4 et:
from django.conf.urls import include, url
from django.contrib import admin
from burndown_for_what.views import BurndownTemplateView, SprintView, SprintDetailView, MilestoneView, IssueView
urlpatterns = [
url(
r'^sprint/(?P<sprint_id>\d+)/$',
BurndownTemplateView.as_view(),
name='burndown_sprint'
),
url(
r'^api/sprint/$',
SprintView.as_view(),
name='api_sprint'
),
url(
r'^api/sprint/(?P<sprint_id>\d+)/$',
SprintDetailView.as_view(),
name='burndown_sprint'
),
url(
r'^api/milestones/$',
MilestoneView.as_view(),
name='api_milestones'
),
url(
r'^api/issues/(?P<milestone>\w+)/$',
IssueView.as_view(),
name='api_milestones'
),
]
|
from __future__ import print_function, division, absolute_import, unicode_literals
import io
from fontTools.misc.py23 import *
from ufo2ft import (
compileOTF,
compileTTF,
compileInterpolatableTTFs,
compileVariableTTF,
compileVariableCFF2,
)
import warnings
import difflib
import os
import sys
import pytest
def getpath(filename):
dirname = os.path.dirname(__file__)
return os.path.join(dirname, "data", filename)
@pytest.fixture
def testufo(FontClass):
return FontClass(getpath("TestFont.ufo"))
def readLines(f):
f.seek(0)
lines = []
for line in f.readlines():
# Elide ttLibVersion because it frequently changes.
# Use os-native line separators so we can run difflib.
if line.startswith("<ttFont "):
lines.append("<ttFont>" + os.linesep)
else:
lines.append(line.rstrip() + os.linesep)
return lines
def expectTTX(font, expectedTTX, tables=None):
with open(getpath(expectedTTX), "r", encoding="utf-8") as f:
expected = readLines(f)
font.recalcTimestamp = False
font["head"].created, font["head"].modified = 3570196637, 3601822698
font["head"].checkSumAdjustment = 0x12345678
f = UnicodeIO()
font.saveXML(f, tables=tables)
actual = readLines(f)
if actual != expected:
for line in difflib.unified_diff(
expected, actual, fromfile=expectedTTX, tofile="<generated>"
):
sys.stderr.write(line)
pytest.fail("TTX output is different from expected")
@pytest.fixture(params=[None, True, False])
def useProductionNames(request):
return request.param
class IntegrationTest(object):
_layoutTables = ["GDEF", "GSUB", "GPOS", "BASE"]
# We have specific unit tests for CFF vs TrueType output, but we run
# an integration test here to make sure things work end-to-end.
# No need to test both formats for every single test case.
def test_TestFont_TTF(self, testufo):
ttf = compileTTF(testufo)
expectTTX(ttf, "TestFont.ttx")
def test_TestFont_CFF(self, testufo):
otf = compileOTF(testufo)
expectTTX(otf, "TestFont-CFF.ttx")
def test_included_features(self, FontClass):
"""Checks how the compiler handles include statements in features.fea.
The compiler should detect which features are defined by the
features.fea inside the compiled UFO, or by feature files that
are included from there.
https://github.com/googlei18n/ufo2ft/issues/108
Relative paths should be resolved taking the UFO path as reference,
not the embedded features.fea file.
https://github.com/unified-font-object/ufo-spec/issues/55
"""
ufo = FontClass(getpath("Bug108.ufo"))
ttf = compileTTF(ufo)
expectTTX(ttf, "Bug108.ttx", tables=self._layoutTables)
def test_mti_features(self, FontClass):
"""Checks handling of UFOs with embdedded MTI/Monotype feature files
https://github.com/googlei18n/fontmake/issues/289
"""
ufo = FontClass(getpath("MTIFeatures.ufo"))
ttf = compileTTF(ufo)
expectTTX(ttf, "MTIFeatures.ttx", tables=self._layoutTables)
def test_removeOverlaps_CFF(self, testufo):
otf = compileOTF(testufo, removeOverlaps=True)
expectTTX(otf, "TestFont-NoOverlaps-CFF.ttx")
def test_removeOverlaps_CFF_pathops(self, testufo):
otf = compileOTF(testufo, removeOverlaps=True, overlapsBackend="pathops")
expectTTX(otf, "TestFont-NoOverlaps-CFF-pathops.ttx")
def test_removeOverlaps(self, testufo):
ttf = compileTTF(testufo, removeOverlaps=True)
expectTTX(ttf, "TestFont-NoOverlaps-TTF.ttx")
def test_removeOverlaps_pathops(self, testufo):
ttf = compileTTF(testufo, removeOverlaps=True, overlapsBackend="pathops")
expectTTX(ttf, "TestFont-NoOverlaps-TTF-pathops.ttx")
def test_interpolatableTTFs_lazy(self, FontClass):
# two same UFOs **must** be interpolatable
ufos = [FontClass(getpath("TestFont.ufo")) for _ in range(2)]
ttfs = list(compileInterpolatableTTFs(ufos))
expectTTX(ttfs[0], "TestFont.ttx")
expectTTX(ttfs[1], "TestFont.ttx")
def test_optimizeCFF_none(self, testufo):
otf = compileOTF(testufo, optimizeCFF=0)
expectTTX(otf, "TestFont-NoOptimize-CFF.ttx")
def test_optimizeCFF_specialize(self, testufo):
otf = compileOTF(testufo, optimizeCFF=1)
expectTTX(otf, "TestFont-Specialized-CFF.ttx")
def test_optimizeCFF_subroutinize(self, testufo):
otf = compileOTF(testufo, optimizeCFF=2)
expectTTX(otf, "TestFont-CFF.ttx")
def test_compileVariableTTF(self, designspace, useProductionNames):
varfont = compileVariableTTF(designspace, useProductionNames=useProductionNames)
expectTTX(
varfont,
"TestVariableFont-TTF{}.ttx".format(
"-useProductionNames" if useProductionNames else ""
),
)
def test_compileVariableCFF2(self, designspace, useProductionNames):
varfont = compileVariableCFF2(
designspace, useProductionNames=useProductionNames
)
expectTTX(
varfont,
"TestVariableFont-CFF2{}.ttx".format(
"-useProductionNames" if useProductionNames else ""
),
)
def test_debugFeatureFile(self, designspace):
tmp = io.StringIO()
varfont = compileVariableTTF(designspace, debugFeatureFile=tmp)
assert "### LayerFont-Regular ###" in tmp.getvalue()
assert "### LayerFont-Bold ###" in tmp.getvalue()
if __name__ == "__main__":
sys.exit(pytest.main(sys.argv))
|
import unittest
from belleflopt.support import day_of_water_year, water_year
class TestWaterYearDates(unittest.TestCase):
def test_day_of_water_year(self):
self.assertEqual(1, day_of_water_year(2010, 10, 1))
self.assertEqual(1, day_of_water_year(2020, 10, 1))
self.assertEqual(366, day_of_water_year(2020, 9, 30)) # leap year water year will have day 366
self.assertEqual(365, day_of_water_year(2019, 9, 30)) # normal water year ending on day 365
self.assertEqual(92, day_of_water_year(2020, 12, 31)) # but dec 31 should always be the same
self.assertEqual(92, day_of_water_year(2019, 12, 31))
# Just confirm for our own sanity (and if someone changes it so we don't use arrow anymore) that leap years are correct
self.assertRaises(ValueError, day_of_water_year, 2019, 2, 29)
self.assertEqual(151, day_of_water_year(2019, 2, 28))
self.assertEqual(151, day_of_water_year(2020, 2, 28))
self.assertEqual(152, day_of_water_year(2020, 2, 29))
self.assertEqual(153, day_of_water_year(2020, 3, 1))
self.assertEqual(152, day_of_water_year(2019, 3, 1))
def test_water_year(self):
self.assertEqual(2019, water_year(2019, 2))
self.assertEqual(2018, water_year(2018, 2))
self.assertEqual(2019, water_year(2018, 12))
self.assertEqual(2019, water_year(2018, 10))
self.assertEqual(2018, water_year(2018, 9))
self.assertEqual(2019, water_year(2019, 9))
self.assertEqual(2020, water_year(2019, 10))
if __name__ == '__main__':
unittest.main()
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from django.contrib.auth.models import User
from django.db import models
from thing.models.character import Character
from thing.models.corpwallet import CorpWallet
class Campaign(models.Model):
user = models.ForeignKey(User)
title = models.CharField(max_length=32)
slug = models.SlugField(max_length=32)
start_date = models.DateTimeField()
end_date = models.DateTimeField()
corp_wallets = models.ManyToManyField(CorpWallet, blank=True, null=True)
characters = models.ManyToManyField(Character, blank=True, null=True)
class Meta:
app_label = 'thing'
ordering = ('title',)
def __unicode__(self):
return self.title
def get_transactions_filter(self, transactions):
return transactions.filter(
models.Q(corp_wallet__in=self.corp_wallets.all())
|
(
models.Q(corp_wallet=None)
&
models.Q(character__in=self.characters.all())
),
date__range=(self.start_date, self.end_date),
)
|
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class CveRule(object):
def __init__(self, cveId=None, cveName=None, cveType=None, cveInfo=None):
"""
:param cveId: (Optional) CVE编号
:param cveName: (Optional) CVE名称,长度限制32字节
:param cveType: (Optional) CVE类型
:param cveInfo: (Optional) CVE描述
"""
self.cveId = cveId
self.cveName = cveName
self.cveType = cveType
self.cveInfo = cveInfo
|
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import collections
def SeedFill(point, src, label):
queue = collections.deque()
queue.append(point)
while len(queue) != 0:
pt = queue.popleft()
label[pt[0],pt[1]] = 255
if src[pt[0]-1,pt[1]] == src[pt[0],pt[1]] and label[pt[0]-1,pt[1]] == 0 and queue.count([pt[0]-1,pt[1]]) == 0:
queue.append([pt[0]-1,pt[1]])
if src[pt[0],pt[1]-1] == src[pt[0],pt[1]] and label[pt[0],pt[1]-1] == 0 and queue.count([pt[0],pt[1]-1]) == 0:
queue.append([pt[0],pt[1]-1])
if src[pt[0]+1,pt[1]] == src[pt[0],pt[1]] and label[pt[0]+1,pt[1]] == 0 and queue.count([pt[0]+1,pt[1]]) == 0:
queue.append([pt[0]+1,pt[1]])
if src[pt[0],pt[1]+1] == src[pt[0],pt[1]] and label[pt[0],pt[1]+1] == 0 and queue.count([pt[0],pt[1]+1]) == 0:
queue.append([pt[0],pt[1]+1])
# print(queue)
if __name__ == "__main__":
img = cv2.imread('./bin_contour/sobel_mask_10.png')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
size = img_gray.shape
label = np.zeros((size[0],size[1]), dtype= np.uint8)
endflag = 0
for i in range(1,size[0]-1):
for j in range(1,size[1]-1):
if img_gray[i,j] == 0:
if img_gray[i-1,j] == 255 and img_gray[i,j+1] == 255:
start_point = [i,j]
endflag = 1
break
if endflag == 1:
break
SeedFill(start_point, img_gray, label)
cv2.imshow("gray", img_gray)
cv2.waitKey(0)
cv2.imshow("label", label)
cv2.waitKey(0)
|
import numpy as np
import matplotlib.pyplot as plt
from constants import Constants as cts
import warnings; warnings.filterwarnings('ignore')
class ImageReader:
def __init__(self, mode):
assert mode in [0, 1, 2], 'Insert a valid mode. Print cts.__doc__ for help.'
self.mode = mode
def __call__(self, path: str) -> np.ndarray:
from cv2 import imread, cvtColor
if self.mode == cts.ird_color:
image = imread(path)
image = cvtColor(image, 4)
elif self.mode == cts.ird_gray:
image = imread(path, 0)
else:
image = imread(path, -1)
return image.astype('float32')
class ColorConverter:
def __init__(self, mode: int):
assert mode in range(12), 'Please use a valid mode'
self.mode = mode
self.conversion_methods = {
0: self.__invertOrder,
1: self.__invertOrder,
2: self.__rgb2Gray,
3: self.__gray2Rgb,
4: self.__rgb2Rgba,
5: self.__rgba2Rgb,
6: self.__rgb2Hsv,
7: self.__hsv2Rgb
}
def __call__(self, image: np.array) -> np.array:
result = self.conversion_methods[self.mode](image)
return result.astype('uint8')
def __invertOrder(self, image):
return image[..., ::-1]
def __rgb2Gray(self, image):
return np.ceil(0.2989 * image[..., 0] + 0.587 * image[..., 1] + 0.114 * image[..., 2])
def __gray2Rgb(self, image):
return merge([image] * 3)
def __rgb2Rgba(self, image):
result = np.zeros((image.shape[0], image.shape[1], 4))
for i in range(image.shape[0]):
for j in range(image.shape[1]):
result[i][j] = np.append(image[i][j], 255)
return result
def __rgba2Rgb(self, image):
return image[..., :3]
def __rgb2Hsv(self, image):
def convertPixel(pixel):
M = pixel.max(), np.argmax(pixel)
m = pixel.min(), np.argmin(pixel)
c = M[0] - m[0]
v = M[0]
s = 0 if M[0] == 0 else c / M[0]
h = None
if c == 0:
h = 0
else:
if M[1] == pixel[0]:
a = 6 if pixel[1] < pixel[2] else 0
h = ((pixel[1] - pixel[2]) / c) + a
elif M[1] == pixel[1]:
h = ((pixel[2] - pixel[1]) / c) + 2
else:
h = ((pixel[0] - pixel[1]) / c) + 4
h /= 6
h *= 180
s *= 255
v *= 255
return np.uint8([h, s, v])
division_factor = 1 if image.max() <= 1 else 255
result = np.zeros_like(image).astype('uint8')
for i in range(image.shape[0]):
for j in range(image.shape[1]):
result[i][j] = convertPixel(image[i][j] / division_factor)
return result
def __hsv2Rgb(self, image):
def convertPixel(pixel):
if pixel[2] == 0:
return np.zeros((3, ))
h, s, v = pixel
h /= 30
s /= 255
v /= 255
switch = int(h)
frac = h - switch
c = [v * (1 - s),
v * (1 - (s * frac)),
v * (1 - (s * (1 - frac)))]
if switch == 0:
r, g, b = v, c[2], c[0]
elif switch == 1:
r, g, b = c[1], v, c[0]
elif switch == 2:
r, g, b = c[0], v, c[2]
elif switch == 3:
r, g, b = c[0], c[1], v
elif switch == 4:
r, g, b = c[2], c[1], v
else:
r, g, b = v, c[0], c[1]
r *= 255
g *= 255
b *= 255
return np.uint8([r, g, b])
result = np.zeros_like(image).astype('uint8')
for i in range(image.shape[0]):
for j in range(image.shape[1]):
result[i][j] = convertPixel(image[i][j])
return result
def mapToRange(image: np.ndarray, low: float, high: float) -> np.ndarray:
norm = (image - image.min()) / (image.max() - image.min())
return norm * (high - low) + low
def clipToRange(image: np.ndarray, low: float, high: float) -> np.ndarray:
result = image.copy()
result[image < low] = low
result[image > high] = high
return result
def split(image: np.ndarray) -> list:
assert len(image.shape) > 2, 'Cannot use it on single channel images.'
result = []
for i in range(image.shape[2]):
result.append(image[..., i])
return result
def merge(channels: list) -> np.ndarray:
return np.dstack(channels)
def show(image: np.ndarray):
plt.axis('off')
if len(image.shape) == 2:
plt.imshow(image.astype('uint8'), cmap='gray')
else:
plt.imshow(image.astype('uint8'))
plt.show()
|
"""Routes then builds a get_state response object"""
import logging
from hive.server.bridge_api.objects import load_posts_keyed
from hive.server.common.helpers import (
return_error_info,
valid_account,
valid_permlink)
log = logging.getLogger(__name__)
@return_error_info
async def get_discussion(context, author, permlink):
"""Modified `get_state` thread implementation."""
db = context['db']
author = valid_account(author)
permlink = valid_permlink(permlink)
root_id = await _get_post_id(db, author, permlink)
if not root_id:
return {}
return await _load_discussion(db, root_id)
async def _get_post_id(db, author, permlink):
"""Given an author/permlink, retrieve the id from db."""
sql = ("SELECT id FROM hive_posts WHERE author = :a "
"AND permlink = :p AND is_deleted = '0' LIMIT 1")
return await db.query_one(sql, a=author, p=permlink)
def _ref(post):
return post['author'] + '/' + post['permlink']
async def _child_ids(db, parent_ids):
"""Load child ids for multuple parent ids."""
sql = """
SELECT parent_id, array_agg(id)
FROM hive_posts
WHERE parent_id IN :ids
AND is_deleted = '0'
GROUP BY parent_id
"""
rows = await db.query_all(sql, ids=tuple(parent_ids))
return [[row[0], row[1]] for row in rows]
async def _load_discussion(db, root_id):
"""Load a full discussion thread."""
# build `ids` list and `tree` map
ids = []
tree = {}
todo = [root_id]
while todo:
ids.extend(todo)
rows = await _child_ids(db, todo)
todo = []
for pid, cids in rows:
tree[pid] = cids
todo.extend(cids)
# load all post objects, build ref-map
posts = await load_posts_keyed(db, ids)
# remove posts/comments from muted accounts
rem_pids = []
for pid, post in posts.items():
if post['stats']['hide']:
rem_pids.append(pid)
for pid in rem_pids:
if pid in posts:
del posts[pid]
if pid in tree:
rem_pids.extend(tree[pid])
refs = {pid: _ref(post) for pid, post in posts.items()}
# add child refs to parent posts
for pid, post in posts.items():
if pid in tree:
post['replies'] = [refs[cid] for cid in tree[pid]
if cid in refs]
# return all nodes keyed by ref
return {refs[pid]: post for pid, post in posts.items()}
|
import numpy as np
class BackPropagationNetwork:
"""a bp network"""
# class members
#
layerCount=0
shape=None
weights=[]
#
# class members
#
def __init__(self,layerSize):
"""Initialize the network"""
# Layer info
self.layerCount=len(layerSize)-1
self.shape=layerSize
# Data from last run
self._layerInput=[]
self._layerOutput=[]
self._previousWeightDelta=[]
# Create the weight arrays
for (l1,l2) in zip(layerSize[:-1],layerSize[1:]):
self.weights.append(np.random.normal(scale=0.01,size=(l2,l1+1)))
self._previousWeightDelta.append(np.random.normal(scale=0.01,size=(l2,l1+1)))
#
# Run method
#
def Run(self,input):
"""run the network based on input data"""
lnCases=input.shape[0]
# Clear out the previous intermediate value lists
self._layerInput=[]
self._layerOutput=[]
# Run it!
for index in range(self.layerCount):
# Determine layer input
if index==0:
layerInput=self.weights[0].dot(np.vstack([input.T,np.ones([1,lnCases])]))
else:
layerInput=self.weights[index].dot(np.vstack([self._layerOutput[-1],np.ones([1,lnCases])]))
self._layerInput.append(layerInput)
self._layerOutput.append(self.sgm(layerInput))
return self._layerOutput[-1].T
#
# TrainEpoch method
#
def TrainEpoch(self,input,target,trainingRate=0.0005,momentum=0.5):
"""This method trains the network for one epoch"""
delta=[]
lnCases=input.shape[0]
# First run the network
self.Run(input)
# Calculate our deltas
for index in reversed(range(self.layerCount)):
if index==self.layerCount-1:
# Compare to the target values
# print self._layerOutput[index]
output_delta=self._layerOutput[index]-target.T
error=np.sum(output_delta**2)
delta.append(output_delta*self.sgm(self._layerInput[index],True))
else:
# Compare to the following layer's delta
delta_pullback=self.weights[index+1].T.dot(delta[-1])
delta.append(delta_pullback[:-1, :]*self.sgm(self._layerInput[index],True))
# Compute the weight deltas
for index in range(self.layerCount):
delta_index=self.layerCount-1-index
if index==0:
layerOutput=np.vstack([input.T,np.ones([1,lnCases])])
else:
layerOutput=np.vstack([self._layerOutput[index-1],np.ones([1,self._layerOutput[index-1].shape[1]])])
curWeightDelta=np.sum(\
layerOutput[None,:,:].transpose(2,0,1)*delta[delta_index][None,:,:].transpose(2,1,0)\
, axis=0)
weightDelta=trainingRate*curWeightDelta+momentum*self._previousWeightDelta[index]
self.weights[index]-=weightDelta
self._previousWeightDelta[index]=weightDelta
return error
# Transfer function
def sgm(self,x,Derivative=False):
if not Derivative:
return 1/(1+np.exp(-x))
else:
out=self.sgm(x)
return out*(1-out)
#
# if script run --create a test object
#
if __name__=="__main__":
# bpn=BackPropagationNetwork((2,2,1))
# print bpn.shape
# print bpn.weights
# lvInput=np.array([[0,0],[1,1],[0,1],[1,0]])
# lvTarget=np.array([[0.05],[0.05],[0.95],[0.95]])
# lnMax=100000
# lnErr=1e-5
# for i in range(lnMax+1):
# err=bpn.TrainEpoch(lvInput,lvTarget)
# if i%2500==0:
# print "Iteration {0}\tError: {1:0.6f}".format(i,err)
# if err<=lnErr:
# print "Minimum error reached at iteration {0}".format(i)
# break
# # Display output
# lvOutput=bpn.Run(lvInput)
# print "Input: {0}\nOutput: {1}".format(lvInput,lvOutput)
bpn=BackPropagationNetwork((768,6,6,3))
lvInput=np.genfromtxt("input_vector1.csv", delimiter=" ")
lvTarget=np.genfromtxt("output_vector1.csv", delimiter=" ")
lnMax=10000
lnErr=1
for i in range(lnMax+1):
err=bpn.TrainEpoch(lvInput,lvTarget)
# if i%1==0:
print "Iteration {0}\tError: {1:0.1f}".format(i,err)
if err<=lnErr:
print "Minimum error reached at iteration {0}".format(i)
break
print bpn.weights[0][0]
|
from .trainset import TrainSet, MatrixTrainSet
from .testset import TestSet
from .reader import Reader
__all__ = ['TrainSet',
'MatrixTrainSet',
'TestSet',
'Reader']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""config for pytest."""
def pytest_configure(config):
"""Configure pytest."""
plugin = config.pluginmanager.getplugin("mypy")
plugin.mypy_argv.append("--ignore-missing-imports")
|
from backend.handler.project import project
from backend.handler.project import project_group |
from random import randint
from retrying import retry
import apysc as ap
from apysc._display.rotation_around_center_interface import \
RotationAroundCenterInterface
from apysc._expression import expression_data_util
class _TestInterface(RotationAroundCenterInterface):
def __init__(self) -> None:
"""
The class for the testing of the RotationAroundCenterInterface.
"""
self.variable_name = 'test_rotation_around_center_interface'
class TestRotationAroundCenterInterface:
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__initialize_rotation_around_center_if_not_initialized(
self) -> None:
interface: RotationAroundCenterInterface = \
RotationAroundCenterInterface()
interface._initialize_rotation_around_center_if_not_initialized()
assert interface._rotation_around_center == 0
interface._rotation_around_center._value = 10
interface._initialize_rotation_around_center_if_not_initialized()
assert interface._rotation_around_center == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test_rotation_around_center(self) -> None:
interface: _TestInterface = _TestInterface()
assert interface.rotation_around_center == 0
interface.rotation_around_center = ap.Int(10)
assert interface.rotation_around_center == 10
interface.rotation_around_center = 20 # type: ignore
assert interface.rotation_around_center == 20
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_rotation_around_center_update_expression(self) -> None:
expression_data_util.empty_expression()
interface: _TestInterface = _TestInterface()
int_1: ap.Int = ap.Int(10)
int_2: ap.Int = ap.Int(20)
interface.rotation_around_center = int_1
interface.rotation_around_center = int_2
expression: str = expression_data_util.get_current_expression()
expected: str = (
f'{interface.variable_name}.rotate(-{int_1.variable_name});'
f'\n{interface.variable_name}.rotate({int_2.variable_name});'
f'\n{int_1.variable_name} = {int_2.variable_name};'
)
assert expected in expression
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__make_snapshot(self) -> None:
interface: _TestInterface = _TestInterface()
interface.rotation_around_center = ap.Int(10)
snapshot_name: str = interface._get_next_snapshot_name()
interface._run_all_make_snapshot_methods(snapshot_name=snapshot_name)
interface._rotation_around_center_snapshots[snapshot_name] == 10
interface.rotation_around_center = ap.Int(20)
interface._run_all_make_snapshot_methods(snapshot_name=snapshot_name)
interface._rotation_around_center_snapshots[snapshot_name] == 10
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__revert(self) -> None:
interface: _TestInterface = _TestInterface()
interface.rotation_around_center = ap.Int(10)
snapshot_name: str = interface._get_next_snapshot_name()
interface._run_all_make_snapshot_methods(snapshot_name=snapshot_name)
interface.rotation_around_center = ap.Int(20)
interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert interface.rotation_around_center == 10
interface.rotation_around_center = ap.Int(20)
interface._run_all_revert_methods(snapshot_name=snapshot_name)
assert interface.rotation_around_center == 20
@retry(stop_max_attempt_number=15, wait_fixed=randint(10, 3000))
def test__append_rotation_around_center_attr_linking_setting(
self) -> None:
interface: _TestInterface = _TestInterface()
interface._initialize_rotation_around_center_if_not_initialized()
assert interface._attr_linking_stack['rotation_around_center'] == \
[ap.Int(0)]
|
'''
quando se trabalha com groupby os dados devem estar ordenados
para que ele consiga agrupar todos iguais em um dicionario
'''
from itertools import groupby
alunos = [
{'nome': 'Adriana', 'nota': '10'
},
{'nome': 'Eragon', 'nota': '5'
},
{'nome': 'João', 'nota': '7'
},
{'nome': 'Maria', 'nota': '7'
}
]
alunos.sort(key=lambda item:item['nota'])
alun_agrupado = groupby(alunos, lambda item:item['nota'])
print('vendo a chave e o agrupamento antes de ser desempacotado')
for agrupamento, vl_agrupado in alun_agrupado:
print(f'agrupamento {agrupamento}')
qtd = len(list(vl_agrupado))
print(f'{qtd} alunos tiraram {agrupamento}')
print('é gerado um iterador, entao o valor ja foi "usado no primeiro FOR"')
for grupo in alun_agrupado:
print(grupo)
|
from collections import namedtuple
# input int n and collection of input
(n, m) = (int(input()), input().split())
Grade = namedtuple('Grade', m)
marks = [int(Grade._make(input().split()).MARKS) for _ in range(n)]
# print avarage
print((sum(marks) / len(marks)))
|
import pathlib
import textwrap
import jiml
import pytest
def test_imports():
t = jiml.load_template(textwrap.dedent(
'''
# options
globals:
- urllib
---
host: {{ urllib.parse.urlparse(url).hostname }}
'''
))
assert t({'url': 'https://example.com/hello'}) == {'host': 'example.com'}
t = jiml.load_template(textwrap.dedent(
'''
# options
globals:
- urllib.parse.urlparse
---
host: {{ urlparse(url).hostname }}
'''
))
assert t({'url': 'https://example.com/hello'}) == {'host': 'example.com'}
|
"""Basic thermodynamic calculations for pickaxe."""
from typing import Union
import pint
from equilibrator_api import (
Q_,
ComponentContribution,
Reaction,
default_physiological_ionic_strength,
default_physiological_p_h,
default_physiological_p_mg,
default_physiological_temperature,
)
from equilibrator_api.phased_reaction import PhasedReaction
from equilibrator_assets.compounds import Compound
from equilibrator_assets.local_compound_cache import LocalCompoundCache
from equilibrator_cache.compound_cache import CompoundCache
from pymongo import MongoClient
from sqlalchemy import create_engine
from minedatabase.pickaxe import Pickaxe
class Thermodynamics:
"""Class to calculate thermodynamics of Pickaxe runs.
Thermodynamics allows for the calculation of:
1) Standard ∆G' of formation
2) Standard ∆G'o of reaction
3) Physiological ∆G'm of reaction
4) Adjusted ∆G' of reaction
eQuilibrator objects can also be obtained from r_ids and c_ids.
Parameters
----------
mongo_uri: str
URI of the mongo database.
client: MongoClient
Connection to Mongo.
CC: ComponentContribution
eQuilibrator Component Contribution object to calculate ∆G with.
lc: LocalCompoundCache
The local compound cache to generate eQuilibrator compounds from.
"""
def __init__(
self,
):
# Mongo params
self.mongo_uri = None
self.client = None
self._core = None
# eQ params
self.CC = ComponentContribution()
self.lc = None
self._water = None
def load_mongo(self, mongo_uri: Union[str, None] = None):
if mongo_uri:
self.mongo_uri = mongo_uri
self.client = MongoClient(mongo_uri)
else:
self.mongo_uri = "localhost:27017"
self.client = MongoClient()
self._core = self.client["core"]
def _all_dbs_loaded(self):
if self.client and self._core and self.lc:
return True
else:
print("Load connection to Mongo and eQuilibrator local cache.")
return False
def _eq_loaded(self):
if self.lc:
return True
else:
print("Load eQulibrator local cache.")
return False
def _reset_CC(self):
"""reset CC back to defaults"""
self.CC.p_h = default_physiological_p_h
self.CC.p_mg = default_physiological_p_mg
self.CC.temperature = default_physiological_temperature
self.CC.ionic_strength = default_physiological_ionic_strength
def load_thermo_from_postgres(
self, postgres_uri: str = "postgresql:///eq_compounds"
) -> None:
"""Load a LocalCompoundCache from a postgres uri for equilibrator.
Parameters
----------
postgres_uri : str, optional
uri of the postgres DB to use, by default "postgresql:///eq_compounds"
"""
self.lc = LocalCompoundCache()
self.lc.ccache = CompoundCache(create_engine(postgres_uri))
self._water = self.lc.get_compounds("O")
def load_thermo_from_sqlite(
self, sqlite_filename: str = "compounds.sqlite"
) -> None:
"""Load a LocalCompoundCache from a sqlite file for equilibrator.
compounds.sqlite can be generated through LocalCompoundCache's method
generate_local_cache_from_default_zenodo
Parameters
----------
sqlite_filename: str
filename of the sqlite file to load.
"""
self.lc = LocalCompoundCache()
self.lc.load_cache(sqlite_filename)
self._water = self.lc.get_compounds("O")
def get_eQ_compound_from_cid(
self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[Compound, None]:
"""Get an equilibrator compound for a given c_id from the core.
Attempts to retrieve a compound from the core or a specified db_name.
Parameters
----------
c_id : str
compound ID for MongoDB lookup of a compound.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for compound in before core database, by default None.
Returns
-------
equilibrator_assets.compounds.Compound
eQuilibrator Compound
"""
# Find locally in pickaxe
compound_smiles = None
if pickaxe:
if c_id in pickaxe.compounds:
compound_smiles = pickaxe.compounds[c_id]["SMILES"]
else:
return None
# Find in mongo db
elif self._all_dbs_loaded():
if db_name:
compound = self.client[db_name].compounds.find_one(
{"_id": c_id}, {"SMILES": 1}
)
if compound:
compound_smiles = compound["SMILES"]
# No cpd smiles from database name
if not compound_smiles:
compound = self._core.compounds.find_one({"_id": c_id}, {"SMILES": 1})
if compound:
compound_smiles = compound["SMILES"]
# No compound_smiles at all
if not compound_smiles or "*" in compound_smiles:
return None
else:
eQ_compound = self.lc.get_compounds(
compound_smiles, bypass_chemaxon=True, save_empty_compounds=True
)
return eQ_compound
def standard_dg_formation_from_cid(
self, c_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[float, None]:
"""Get standard ∆Gfo for a compound.
Parameters
----------
c_id : str
Compound ID to get the ∆Gf for.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for compound in before core database, by default None.
Returns
-------
Union[float, None]
∆Gf'o for a compound, or None if unavailable.
"""
eQ_cpd = self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
if not eQ_cpd:
return None
dgf = self.CC.standard_dg_formation(eQ_cpd)
dgf = dgf[0]
return dgf
def get_eQ_reaction_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[PhasedReaction, None]:
"""Get an eQuilibrator reaction object from an r_id.
Parameters
----------
r_id : str
Reaction id to get object for.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
Database to look for reaction in.
Returns
-------
PhasedReaction
eQuilibrator reactiono to calculate ∆Gr with.
"""
if pickaxe:
if r_id in pickaxe.reactions:
reaction_info = pickaxe.reactions[r_id]
else:
return None
elif db_name:
mine = self.client[db_name]
reaction_info = mine.reactions.find_one({"_id": r_id})
if not reaction_info:
return None
else:
return None
reactants = reaction_info["Reactants"]
products = reaction_info["Products"]
lhs = " + ".join(f"{r[0]} {r[1]}" for r in reactants)
rhs = " + ".join(f"{p[0]} {p[1]}" for p in products)
reaction_string = " => ".join([lhs, rhs])
compounds = set([r[1] for r in reactants])
compounds.update(tuple(p[1] for p in products))
eQ_compound_dict = {
c_id: self.get_eQ_compound_from_cid(c_id, pickaxe, db_name)
for c_id in compounds
}
if not all(eQ_compound_dict.values()):
return None
if "X73bc8ef21db580aefe4dbc0af17d4013961d9d17" not in compounds:
eQ_compound_dict["water"] = self._water
eq_reaction = Reaction.parse_formula(eQ_compound_dict.get, reaction_string)
return eq_reaction
def physiological_dg_prime_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[pint.Measurement, None]:
"""Calculate the ∆Gm' of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
Returns
-------
pint.Measurement
The calculated ∆G'm.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
dGm_prime = self.CC.physiological_dg_prime(eQ_reaction)
return dGm_prime
def standard_dg_prime_from_rid(
self, r_id: str, pickaxe: Pickaxe = None, db_name: str = None
) -> Union[pint.Measurement, None]:
"""Calculate the ∆G'o of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
Returns
-------
pint.Measurement
The calculated ∆G'o.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
dG0_prime = self.CC.standard_dg_prime(eQ_reaction)
return dG0_prime
def dg_prime_from_rid(
self,
r_id: str,
pickaxe: Pickaxe = None,
db_name: str = None,
p_h: Q_ = default_physiological_p_h,
p_mg: Q_ = default_physiological_p_mg,
ionic_strength: Q_ = default_physiological_ionic_strength,
) -> Union[pint.Measurement, None]:
"""Calculate the ∆G' of a reaction.
Parameters
----------
r_id : str
ID of the reaction to calculate.
pickaxe : Pickaxe
pickaxe object to look for the compound in, by default None.
db_name : str
MINE the reaction is found in.
p_h : Q_
pH of system.
p_mg: Q_
pMg of the system.
ionic_strength: Q_
ionic strength of the system.
Returns
-------
pint.Measurement
The calculated ∆G'.
"""
eQ_reaction = self.get_eQ_reaction_from_rid(r_id, pickaxe, db_name)
if not eQ_reaction:
return None
self.CC.p_h = p_h
self.CC.p_mg = p_mg
self.CC.ionic_strength = ionic_strength
dG_prime = self.CC.dg_prime(eQ_reaction)
self._reset_CC()
return dG_prime
|
'''Main functionality for artifact creation'''
import copy
import os
import logging
import random
import re
import shutil
import string
import tempfile
import zipfile
from os import path
from multiplexer.source import Github
import boto3
import yaml
LOG = logging.getLogger(__name__)
S3_REGEX = r'^s3:\/\/([a-zA-Z0-9\_\-]+)\/?([a-zA-Z0-9\_\/]+)?'
def random_string(length):
return ''.join(random.choice(
string.ascii_letters + string.digits) for _ in range(length))
class Package(object):
"""Handles packaging the resulting artifact"""
def __init__(self, name, root):
if re.search(r'.zip$', name):
self.name = name
else:
self.name = name + '.zip'
self.root = path.abspath(root)
self.package_path = path.join(self.root, self.name)
tmp_dir_name = '.aws_cd_multiplex-' + random_string(10)
self._tmp_workspace = path.join(self.root, tmp_dir_name)
os.makedirs(self._tmp_workspace)
def add_file(self, name, source=None, body=None):
"""
Add a single file to package, must either provide
a source file to copy or a body of a new file to write.
Arguments:
name -- name of file in package
Keyword arguments:
source -- Source of new file to write into package.
Mutually exclusive with body arg.
body -- Body of new file to write into package.
Mutually exclusive with source arg.
"""
dest = path.join(self._tmp_workspace, name)
if not source and not body:
raise TypeError('must set either source or body')
if source:
shutil.copyfile(source, dest)
if body:
with open(dest, 'w+') as fil:
fil.write(body)
def add_directory(self, name, source=None):
"""
Add directory to package, if no source is provided
an empty directory is created.
Arguments:
source -- path to source directory
"""
dest = path.join(self._tmp_workspace, name)
if source:
shutil.copytree(source, dest)
else:
os.mkdir(dest)
def create(self):
"""
Create the package zipfile
Returns Full Path to package
"""
zf = zipfile.ZipFile(self.package_path, 'w', zipfile.ZIP_DEFLATED)
abs_src = self._tmp_workspace
for root, _, files in os.walk(self._tmp_workspace):
for filename in files:
absname = path.abspath(path.join(root, filename))
arcname = absname[len(abs_src) + 1:]
LOG.debug('Zipping %s as %s'
% (path.join(root, filename), arcname))
zf.write(absname, arcname)
zf.close()
LOG.info("Zipfile %s created" % self.package_path)
return self.package_path
def clean_tmp(self):
"""clean up tmp"""
shutil.rmtree(self._tmp_workspace)
class AppSpec(object):
"""Appspec file"""
def __init__(self, package_name):
self.package_name = package_name
self.version = None
self.os = None
self.files = []
self.hooks = {}
self._raw = None
def load(self, yml_str):
"""Load an AppSpec file from the filesystem"""
self._raw = yaml.load(yml_str)
self.version = self._raw['version']
self.os = self._raw['os']
self._rewrite_paths()
def _rewrite_paths(self):
"""Rewrite appspec paths with package name"""
for fil in self._raw.get('files', []):
prev_pth = fil['source']
# path.join('name', '/') results in '/' so ensure
# we have the correct source.
if prev_pth == '/':
fil['source'] = '/' + self.package_name + '/'
else:
fil['source'] = path.join(
'/',
self.package_name,
prev_pth)
self.files.append(fil)
for hook, items in self._raw.get('hooks', {}).items():
self.hooks[hook] = []
for item in items:
prev_loc = item['location']
item['location'] = path.join(
'/',
self.package_name,
prev_loc)
self.hooks[hook].append(item)
def merge(self, appspec):
"""
Return self and appspec file provided as a new
appspec file merged.
"""
# Validate version, if not set then take new appspec version
if self.version and self.version != appspec.version:
raise Exception('appspec version mismatch')
else:
self.version = appspec.version
# Validate OS, if not set then take new appspec OS
if self.os and self.os != appspec.os:
raise Exception('appspec version mismatch')
else:
self.os = appspec.os
# Start with copy of self
new_spec = copy.deepcopy(self)
new_spec.files.extend(appspec.files)
for hook, items in appspec.hooks.items():
exist_hook = new_spec.hooks.get(hook)
if exist_hook:
new_spec.hooks[hook].extend(items)
else:
new_spec.hooks[hook] = items
return new_spec
def serialize(self):
"""Return YAML string representation of object"""
obj = { 'version': self.version,
'os': self.os,
'files': self.files,
'hooks': self.hooks }
return yaml.dump(obj, default_flow_style=False)
def upload_to_s3(source, destination):
"""Given a source file, upload it to S3"""
s3_info = re.search(S3_REGEX, destination)
bucket = s3_info.group(1)
key = s3_info.group(2)
s3 = boto3.resource('s3')
full_key = path.basename(source)
if key:
full_key = path.join(key, path.basename(source))
LOG.info("Uploading %s to bucket %s as %s"
% (source, bucket, full_key))
s3.meta.client.upload_file(source, bucket, full_key)
def build_artifact(name, config, destination, clean=True):
"""Given an artifact name and config build a complete artifact"""
workspace = tempfile.mkdtemp()
artifact = config.artifact(name)
pkg_destination = destination
# If using S3 then set temporary worspace as destination
store_s3 = re.search(S3_REGEX, destination)
LOG.debug("Package destination %s is S3" % destination)
if store_s3:
pkg_destination = workspace
pkg = Package(name, pkg_destination)
global_appspec = AppSpec('global')
for src_name, src_info in artifact['sources'].items():
LOG.info("Fetching source %s for %s" % (src_name, name))
src = Github(config.github['token'], src_info['owner'],
src_info['repository'], src_info['revision'])
src.download()
LOG.info("Source %s downloaded" % src_name)
pth = src.extract(workspace)
pkg.add_directory(src_info['repository'], source=pth)
src.clean()
# If source has an appspec file listed, then merge it to global
src_appspec_file = path.join(pth, 'appspec.yml')
if path.isfile(src_appspec_file):
src_appspec = AppSpec(src_info['repository'])
with open(src_appspec_file, 'r') as a_fil:
src_appspec.load(a_fil.read())
global_appspec = global_appspec.merge(src_appspec)
pkg.add_file('appspec.yml', body=global_appspec.serialize())
final_pkg = pkg.create()
if store_s3:
upload_to_s3(final_pkg, destination)
if clean:
LOG.debug("removing workspace files: " + workspace)
pkg.clean_tmp()
shutil.rmtree(workspace)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.