hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de9034004ba5236fceead2c3312b67e085347c69
| 3,618
|
py
|
Python
|
endgame.py
|
Kunalmighty/BlueJaysGame
|
76936cea8a57542bf7ea3d546f851f9f9771931f
|
[
"MIT"
] | null | null | null |
endgame.py
|
Kunalmighty/BlueJaysGame
|
76936cea8a57542bf7ea3d546f851f9f9771931f
|
[
"MIT"
] | null | null | null |
endgame.py
|
Kunalmighty/BlueJaysGame
|
76936cea8a57542bf7ea3d546f851f9f9771931f
|
[
"MIT"
] | null | null | null |
""" Add-a-new-high-score state modularization. """
import pygame
import globes
import state
import score as S
class EndGame(state.State):
""" #EmbraceTheS's interface for adding a new high score. """
BACKGROUND = None
LETTERS = None
UNDERSCORE = None
def __init__(self, score):
state.State.__init__(self)
self.font = pygame.font.Font(None, 50)
globes.Globals.SCREEN.fill(globes.Globals.WHITE)
globes.play_music("highscore.ogg")
# Font variables
self.blink = 0
self.y_lvl = 200
surf = self.font.render("WWWWWWWWWW", True, globes.Globals.BLACK)
self.x_start = globes.hcenter(surf) - 5
self.letter_width = int(surf.get_width() / 10)
if EndGame.UNDERSCORE is None:
EndGame.UNDERSCORE = self.font.render("_", True,
globes.Globals.BLACK)
if EndGame.BACKGROUND is None:
EndGame.BACKGROUND = pygame.image.load("bg/scroll.png").convert()
highscore = pygame.image.load("imgs/highscore.png").convert_alpha()
EndGame.BACKGROUND.blit(highscore, (0, 0))
if EndGame.LETTERS is None:
EndGame.LETTERS = [' ', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
' I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q',
'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
self.score = int(score)
# indices used to reference indices of LETTERS
self.name_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# index (0-9) of letter in name being edited
self.name_index = 0
def render(self):
""" Override State object's render method. """
globes.Globals.SCREEN.blit(self.BACKGROUND, (0, 0))
x_lvl = self.x_start
for index in self.name_list:
letter = self.font.render(EndGame.LETTERS[index], True,
globes.Globals.BLACK)
globes.Globals.SCREEN.blit(letter, (x_lvl, self.y_lvl))
x_lvl += self.letter_width
x_lvl = self.x_start + (self.name_index * self.letter_width)
if (self.blink < 8):
globes.Globals.SCREEN.blit(self.UNDERSCORE, (x_lvl, self.y_lvl))
def update(self, time):
""" Override State object's update method. """
self.blink = (self.blink + 1) % 10
def event(self, event):
""" Override State object's event handler. """
if event.type == pygame.KEYDOWN:
if (event.key == pygame.K_ESCAPE or event.key == pygame.K_RETURN or
event.key == pygame.K_SPACE): # save entry & change state
name = ''
for index in self.name_list:
if index == 9:
name += 'I'
else:
name += EndGame.LETTERS[index]
globes.Globals.HIGHSCORES.add_score((name, self.score))
globes.Globals.STATE = S.Score(True)
self.update_name(event.key)
def update_name(self, key):
""" Change the name characters based on the key pressed. """
if key == pygame.K_RIGHT:
self.name_index = (self.name_index + 1) % 10
elif key == pygame.K_LEFT:
self.name_index = (self.name_index - 1) % 10
elif key == pygame.K_UP:
self.name_list[self.name_index] -= 1
self.name_list[self.name_index] %= 27
elif key == pygame.K_DOWN:
self.name_list[self.name_index] += 1
self.name_list[self.name_index] %= 27
| 38.489362
| 79
| 0.543947
|
e6443b3eb68fbce816ca05ca608d83ced761f01a
| 5,640
|
py
|
Python
|
scripts/driver.py
|
ChongMingWei/lab0-c
|
d1f570e2d064a05fcb1cc0e27ef6c5eb77994f7c
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/driver.py
|
ChongMingWei/lab0-c
|
d1f570e2d064a05fcb1cc0e27ef6c5eb77994f7c
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/driver.py
|
ChongMingWei/lab0-c
|
d1f570e2d064a05fcb1cc0e27ef6c5eb77994f7c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import subprocess
import sys
import getopt
# Driver program for C programming exercise
class Tracer:
traceDirectory = "./traces"
qtest = "./qtest"
command = qtest
verbLevel = 0
autograde = False
useValgrind = False
colored = False
traceDict = {
1: "trace-01-ops",
2: "trace-02-ops",
3: "trace-03-ops",
4: "trace-04-ops",
5: "trace-05-ops",
6: "trace-06-string",
7: "trace-07-robust",
8: "trace-08-robust",
9: "trace-09-robust",
10: "trace-10-malloc",
11: "trace-11-malloc",
12: "trace-12-malloc",
13: "trace-13-perf",
14: "trace-14-perf",
15: "trace-15-perf",
16: "trace-16-perf",
17: "trace-17-complexity"
}
traceProbs = {
1: "Trace-01",
2: "Trace-02",
3: "Trace-03",
4: "Trace-04",
5: "Trace-05",
6: "Trace-06",
7: "Trace-07",
8: "Trace-08",
9: "Trace-09",
10: "Trace-10",
11: "Trace-11",
12: "Trace-12",
13: "Trace-13",
14: "Trace-14",
15: "Trace-15",
16: "Trace-16",
17: "Trace-17"
}
maxScores = [0, 6, 6, 6, 6, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5]
RED = '\033[91m'
GREEN = '\033[92m'
WHITE = '\033[0m'
def __init__(self,
qtest="",
verbLevel=0,
autograde=False,
useValgrind=False,
colored=False):
if qtest != "":
self.qtest = qtest
self.verbLevel = verbLevel
self.autograde = autograde
self.useValgrind = useValgrind
self.colored = colored
def printInColor(self, text, color):
if self.colored == False:
color = self.WHITE
print(color, text, self.WHITE, sep = '')
def runTrace(self, tid):
if not tid in self.traceDict:
self.printInColor("ERROR: No trace with id %d" % tid, self.RED)
return False
fname = "%s/%s.cmd" % (self.traceDirectory, self.traceDict[tid])
vname = "%d" % self.verbLevel
clist = self.command + ["-v", vname, "-f", fname]
try:
retcode = subprocess.call(clist)
except Exception as e:
self.printInColor("Call of '%s' failed: %s" % (" ".join(clist), e), self.RED)
return False
return retcode == 0
def run(self, tid=0):
scoreDict = {k: 0 for k in self.traceDict.keys()}
print("---\tTrace\t\tPoints")
if tid == 0:
tidList = self.traceDict.keys()
else:
if not tid in self.traceDict:
self.printInColor("ERROR: Invalid trace ID %d" % tid, self.RED)
return
tidList = [tid]
score = 0
maxscore = 0
if self.useValgrind:
self.command = ['valgrind', self.qtest]
else:
self.command = [self.qtest]
for t in tidList:
tname = self.traceDict[t]
if self.verbLevel > 0:
print("+++ TESTING trace %s:" % tname)
ok = self.runTrace(t)
maxval = self.maxScores[t]
tval = maxval if ok else 0
if tval < maxval:
self.printInColor("---\t%s\t%d/%d" % (tname, tval, maxval), self.RED)
else:
self.printInColor("---\t%s\t%d/%d" % (tname, tval, maxval), self.GREEN)
score += tval
maxscore += maxval
scoreDict[t] = tval
if score < maxscore:
self.printInColor("---\tTOTAL\t\t%d/%d" % (score, maxscore), self.RED)
else:
self.printInColor("---\tTOTAL\t\t%d/%d" % (score, maxscore), self.GREEN)
if self.autograde:
# Generate JSON string
jstring = '{"scores": {'
first = True
for k in scoreDict.keys():
if not first:
jstring += ', '
first = False
jstring += '"%s" : %d' % (self.traceProbs[k], scoreDict[k])
jstring += '}}'
print(jstring)
def usage(name):
print("Usage: %s [-h] [-p PROG] [-t TID] [-v VLEVEL] [--valgrind] [-c]" % name)
print(" -h Print this message")
print(" -p PROG Program to test")
print(" -t TID Trace ID to test")
print(" -v VLEVEL Set verbosity level (0-3)")
print(" -c Enable colored text")
sys.exit(0)
def run(name, args):
prog = ""
tid = 0
vlevel = 1
levelFixed = False
autograde = False
useValgrind = False
colored = False
optlist, args = getopt.getopt(args, 'hp:t:v:A:c', ['valgrind'])
for (opt, val) in optlist:
if opt == '-h':
usage(name)
elif opt == '-p':
prog = val
elif opt == '-t':
tid = int(val)
elif opt == '-v':
vlevel = int(val)
levelFixed = True
elif opt == '-A':
autograde = True
elif opt == '--valgrind':
useValgrind = True
elif opt == '-c':
colored = True
else:
print("Unrecognized option '%s'" % opt)
usage(name)
if not levelFixed and autograde:
vlevel = 0
t = Tracer(qtest=prog,
verbLevel=vlevel,
autograde=autograde,
useValgrind=useValgrind,
colored=colored)
t.run(tid)
if __name__ == "__main__":
run(sys.argv[0], sys.argv[1:])
| 28.77551
| 89
| 0.484574
|
dd8ac6e2c97482748246c3f629fc02d81ca4269e
| 8,306
|
py
|
Python
|
tests/test_ts_states.py
|
gityoav/pyg-timeseries
|
27859b912d6e9e9a74c172711907d5456ec3076b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ts_states.py
|
gityoav/pyg-timeseries
|
27859b912d6e9e9a74c172711907d5456ec3076b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_ts_states.py
|
gityoav/pyg-timeseries
|
27859b912d6e9e9a74c172711907d5456ec3076b
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd; import numpy as np
from pyg_base import drange, eq
from pyg_timeseries import cumprod, cumprod_, cumsum, cumsum_, diff, diff_, ewma, ewma_, ewmcor, ewmcor_,\
ewmvar, ewmvar_, ewmrms, ewmrms_, ewmskew, ewmskew_, ewmstd, ewmstd_, \
expanding_max, expanding_max_, expanding_mean, expanding_mean_, expanding_min, expanding_min_, expanding_rms, expanding_rms_, expanding_skew, expanding_skew_, expanding_std, expanding_std_, expanding_sum, expanding_sum_, \
ratio, ratio_, rolling_max, rolling_max_, rolling_mean, rolling_mean_, rolling_median, rolling_median_, rolling_min, rolling_min_, \
rolling_rank, rolling_rank_, rolling_rms, rolling_rms_, rolling_skew, rolling_skew_, rolling_std, rolling_std_, rolling_sum, rolling_sum_, \
shift, shift_, rolling_quantile, rolling_quantile_, \
ts_count, ts_count_, ts_max, ts_max_, ts_mean, ts_mean_, ts_min, ts_min_, ts_rms, ts_rms_, ts_skew, ts_skew_, ts_std, ts_std_, ts_sum, ts_sum_
_data = 'data'
def _nona_data():
s = pd.Series(np.random.normal(0,1,1000), drange(-999))
df = pd.DataFrame(np.random.normal(0,1,(1000, 20)), drange(-999))
return [s,df]
s, df = _nona_data()
s1 = s.iloc[:500]; s2 = s.iloc[500:]
df1 = df.iloc[:500]; df2 = df.iloc[500:]
t, ef = _nona_data()
t1 = t.iloc[:500]; t2 = t.iloc[500:]
ef1 = ef.iloc[:500]; ef2 = ef.iloc[500:]
#### here we test two things (1) that f_().data == f() and that (2) We can run additional data using the state we got from f_ run over history.
n2f = dict(cumprod = cumprod, cumsum = cumsum, diff = diff, ewma = ewma, ewmrms = ewmrms, ewmskew = ewmskew, ewmstd = ewmstd,
expanding_max = expanding_max, expanding_mean = expanding_mean, expanding_min = expanding_min, expanding_rms = expanding_rms, expanding_skew = expanding_skew, expanding_std = expanding_std, expanding_sum = expanding_sum, ratio = ratio,
rolling_max = rolling_max, rolling_mean = rolling_mean, rolling_median = rolling_median, rolling_min = rolling_min, rolling_rank = rolling_rank, rolling_rms = rolling_rms, rolling_skew = rolling_skew, rolling_std = rolling_std, rolling_sum = rolling_sum,
shift = shift, ts_count = ts_count, ts_max = ts_max, ts_mean = ts_mean, ts_min = ts_min, ts_rms = ts_rms, ts_skew = ts_skew, ts_std = ts_std, ts_sum = ts_sum)
n2f_ = dict(cumprod = cumprod_, cumsum = cumsum_, diff = diff_, ewma = ewma_, ewmrms = ewmrms_, ewmskew = ewmskew_, ewmstd = ewmstd_,
expanding_max = expanding_max_, expanding_mean = expanding_mean_, expanding_min = expanding_min_, expanding_rms = expanding_rms_, expanding_skew = expanding_skew_, expanding_std = expanding_std_, expanding_sum = expanding_sum_, ratio = ratio_,
rolling_max = rolling_max_, rolling_mean = rolling_mean_, rolling_median = rolling_median_, rolling_min = rolling_min_, rolling_rank = rolling_rank_, rolling_rms = rolling_rms_, rolling_skew = rolling_skew_, rolling_std = rolling_std_, rolling_sum = rolling_sum_,
shift = shift_, ts_count = ts_count_, ts_max = ts_max_, ts_mean = ts_mean_, ts_min = ts_min_, ts_rms = ts_rms_, ts_skew = ts_skew_, ts_std = ts_std_, ts_sum = ts_sum_)
def test_ts_states():
n2f = dict(ts_count = ts_count, ts_max = ts_max, ts_mean = ts_mean, ts_min = ts_min, ts_rms = ts_rms, ts_skew = ts_skew, ts_std = ts_std, ts_sum = ts_sum)
n2f_ = dict(ts_count = ts_count_, ts_max = ts_max_, ts_mean = ts_mean_, ts_min = ts_min_, ts_rms = ts_rms_, ts_skew = ts_skew_, ts_std = ts_std_, ts_sum = ts_sum_)
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(s)
res_ = f_(s)
assert eq(res, res_.data)
res1 = f_(s1)
res2 = f_(s2, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res)
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(df)
res_ = f_(df)
assert eq(res, res_.data)
res1 = f_(df1)
res2 = f_(df2, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res)
def test_window_double_states():
n2f = dict(ewmcor = ewmcor)
n2f_ = dict(ewmcor = ewmcor_)
for i in (5,10):
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(s, t, i)
res_ = f_(s , t, i)
assert eq(res, res_.data)
res1 = f_(s1, t1, i)
res2 = f_(s2, t2, i, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res.iloc[500:])
for i in (5, 10):
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(df, ef, i)
res_ = f_(a = df , b = ef, n = i)
assert eq(res, res_.data)
res1 = f_(df1, ef1, i)
res2 = f_(df2, ef2, i, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res.iloc[500:])
def test_window_states():
n2f = dict(ewma = ewma, ewmrms = ewmrms, ewmskew = ewmskew, ewmvar = ewmvar, ewmstd = ewmstd, diff = diff, shift = shift, ratio = ratio,
rolling_max = rolling_max, rolling_mean = rolling_mean, rolling_median = rolling_median, rolling_min = rolling_min,
rolling_quantile = rolling_quantile, rolling_rank = rolling_rank, rolling_rms = rolling_rms, rolling_skew = rolling_skew, rolling_std = rolling_std, rolling_sum = rolling_sum)
n2f_ = dict(ewma = ewma_, ewmrms = ewmrms_, ewmvar = ewmvar_, ewmskew = ewmskew_, ewmstd = ewmstd_, diff = diff_, shift = shift_, ratio = ratio_,
rolling_max = rolling_max_, rolling_mean = rolling_mean_, rolling_median = rolling_median_, rolling_min = rolling_min_,
rolling_quantile = rolling_quantile_, rolling_rank = rolling_rank_, rolling_rms = rolling_rms_, rolling_skew = rolling_skew_, rolling_std = rolling_std_, rolling_sum = rolling_sum_)
for i in (5, 10):
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(s, i)
res_ = f_(s , i)
assert eq(res, res_.data)
res1 = f_(s1, i)
res2 = f_(s2, i, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res.iloc[500:])
for i in (5, 10):
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(df, i)
res_ = f_(df , i)
assert eq(res, res_.data)
res1 = f_(df1, i)
res2 = f_(df2, i, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res.iloc[500:])
def test_expanding_states():
n2f = dict(expanding_max = expanding_max, expanding_mean = expanding_mean, expanding_min = expanding_min, expanding_rms = expanding_rms, expanding_skew = expanding_skew, expanding_std = expanding_std, expanding_sum = expanding_sum, cumprod = cumprod, cumsum = cumsum)
n2f_ = dict(expanding_max = expanding_max_, expanding_mean = expanding_mean_, expanding_min = expanding_min_, expanding_rms = expanding_rms_, expanding_skew = expanding_skew_, expanding_std = expanding_std_, expanding_sum = expanding_sum_, cumprod = cumprod_, cumsum = cumsum_)
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(s)
res_ = f_(s)
assert eq(res, res_.data)
res1 = f_(s1)
res2 = f_(s2, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res.iloc[500:])
for n in n2f:
f = n2f[n]
f_ = n2f_[n]
res = f(df)
res_ = f_(df)
assert eq(res, res_.data)
res1 = f_(df1)
res2 = f_(df2, instate = res1.state)
assert eq(res2 - _data, res_ - _data)
assert eq(res2.data, res.iloc[500:])
def test_min_max_no_data():
a1 = np.array([np.nan, np.nan])
a2 = np.array([np.nan, 1., 2.])
m1 = ts_max_(a1)
assert np.isnan(m1.data)
m2 = ts_max(a2, **m1)
assert m2 == 2
m1 = ts_min_(a1)
assert np.isnan(m1.data)
m2 = ts_min(a2, **m1)
assert m2 == 1
| 49.736527
| 281
| 0.62196
|
b902205b419ed25158e00b113cebab58afbd8ff1
| 2,211
|
py
|
Python
|
BasicOperations/01_01_PyQt4/QTreeWithTable.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 1
|
2018-07-02T13:54:49.000Z
|
2018-07-02T13:54:49.000Z
|
BasicOperations/01_01_PyQt4/QTreeWithTable.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | null | null | null |
BasicOperations/01_01_PyQt4/QTreeWithTable.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 3
|
2016-05-28T15:13:02.000Z
|
2021-04-10T06:04:25.000Z
|
from PyQt4 import QtGui
import sys
class mainWindow(QtGui.QDialog):
""""""
def __init__(self):
"""Constructor"""
super(mainWindow,self).__init__()
self.treeWidget = QtGui.QTreeWidget()
self.treeWidget.setColumnCount(3)
self.treeWidget.setHeaderLabels(['Items','Name','Detail'])
treeItem01 = QtGui.QTreeWidgetItem()
treeItem02 = QtGui.QTreeWidgetItem()
treeItem01.setText(0,'aaa')
treeItem01.setText(1,'bbb')
treeItem02.setText(0,'aaa')
treeItem02.setText(1,'bbb')
self.treeWidget.addTopLevelItem(treeItem01)
self.treeWidget.addTopLevelItem(treeItem02)
treeSubItem01 = QtGui.QTreeWidgetItem()
treeSubItem02 = QtGui.QTreeWidgetItem()
treeItem01.addChild(treeSubItem01)
treeItem02.addChild(treeSubItem02)
self.treeWidget.setFirstItemColumnSpanned(treeSubItem01,True)
self.treeWidget.setFirstItemColumnSpanned(treeSubItem02,True)
table = self.getTable()
self.treeWidget.setItemWidget(treeSubItem01,0,table)
table = self.getTable()
self.treeWidget.setItemWidget(treeSubItem02,0,table)
layout = QtGui.QVBoxLayout()
self.setLayout(layout)
layout.addWidget(self.treeWidget)
#----------------------------------------------------------------------
def getTable(self):
""""""
tableWidget = QtGui.QTableWidget()
tableWidget.setColumnCount(5)
tableWidget.setRowCount(3)
for row in range(3):
tableWidget.setItem(row,0,QtGui.QTableWidgetItem('datetime'))
tableWidget.setCellWidget(row,1,QtGui.QLabel(self.tr('openPrice')))
tableWidget.setItem(row,2,QtGui.QTableWidgetItem('highPrice'))
tableWidget.setItem(row,3,QtGui.QTableWidgetItem('lowPrice'))
tableWidget.setItem(row,4,QtGui.QTableWidgetItem('closePrice'))
return tableWidget
if __name__ == "__main__":
qApp=QtGui.QApplication(sys.argv)
window = mainWindow()
window.show()
sys.exit(qApp.exec_())
| 37.474576
| 85
| 0.606965
|
980ed7326607e3f9914a0d7a42048908bcbd74f3
| 81
|
py
|
Python
|
google-cloud-sdk/platform/bq/third_party/pyasn1/type/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
google-cloud-sdk/platform/bq/third_party/pyasn1/type/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 11
|
2020-02-29T02:51:12.000Z
|
2022-03-30T23:20:08.000Z
|
google-cloud-sdk/platform/bq/third_party/pyasn1/type/__init__.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
#!/usr/bin/env python
# This file is necessary to make this directory a package.
| 27
| 58
| 0.753086
|
692d85a063447b579d13914d39974efb1729e22f
| 251
|
py
|
Python
|
Scripts/Bibliotecas/Mod 2.py
|
Vinicius-de-Souza-Reis-Lima/Python
|
2009e9f5be10a1cf4e506a7f9f17c6b90a30c7c7
|
[
"MIT"
] | null | null | null |
Scripts/Bibliotecas/Mod 2.py
|
Vinicius-de-Souza-Reis-Lima/Python
|
2009e9f5be10a1cf4e506a7f9f17c6b90a30c7c7
|
[
"MIT"
] | null | null | null |
Scripts/Bibliotecas/Mod 2.py
|
Vinicius-de-Souza-Reis-Lima/Python
|
2009e9f5be10a1cf4e506a7f9f17c6b90a30c7c7
|
[
"MIT"
] | null | null | null |
from math import hypot
co = float(input('Qual o valor do cateto oposto? '))
ca = float(input('Qual o valor do cateto adjacente? '))
hy = hypot(co, ca)
print('O triângulo com cateto oposto {} e adjacente {}, tem hipotenusa {:.2f}.'.format(co, ca, hy))
| 41.833333
| 99
| 0.685259
|
8d6130729a851af4251ebf1ea8a71e21a4adce5a
| 193
|
py
|
Python
|
ic_crawler/gsgj_phone/gsgj_phone/util/md5_handle.py
|
AisinoPythonTeam/PythonAiniso
|
983a29962752679d8cc26a2c3cdb0ba8fcfa3f02
|
[
"Apache-2.0"
] | null | null | null |
ic_crawler/gsgj_phone/gsgj_phone/util/md5_handle.py
|
AisinoPythonTeam/PythonAiniso
|
983a29962752679d8cc26a2c3cdb0ba8fcfa3f02
|
[
"Apache-2.0"
] | null | null | null |
ic_crawler/gsgj_phone/gsgj_phone/util/md5_handle.py
|
AisinoPythonTeam/PythonAiniso
|
983a29962752679d8cc26a2c3cdb0ba8fcfa3f02
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
import hashlib
def get_md5_value(key):
value = None
try:
value = hashlib.md5(key).hexdigest()
except Exception, e:
print e
return value
| 16.083333
| 44
| 0.590674
|
7c5b6e9703e7cd8da72515a82ad17bca2546ed45
| 9,425
|
py
|
Python
|
gpv2/image_featurizer/image_featurizer.py
|
michalsr/gpv2
|
00a22b311dbaeefb04e1df676eb6ae3373d8d4b5
|
[
"Apache-2.0"
] | 13
|
2022-02-03T00:25:35.000Z
|
2022-03-20T02:36:05.000Z
|
gpv2/image_featurizer/image_featurizer.py
|
michalsr/gpv2
|
00a22b311dbaeefb04e1df676eb6ae3373d8d4b5
|
[
"Apache-2.0"
] | null | null | null |
gpv2/image_featurizer/image_featurizer.py
|
michalsr/gpv2
|
00a22b311dbaeefb04e1df676eb6ae3373d8d4b5
|
[
"Apache-2.0"
] | 3
|
2022-01-14T19:15:38.000Z
|
2022-02-11T14:12:52.000Z
|
import logging
from contextlib import ExitStack
from os.path import join, exists
from typing import List, Dict, Any, Tuple, Optional, NewType, Union
import logging
from typing import List, Dict, Any, Tuple, Optional
import h5py
import numpy as np
import torch
import torchvision
from allennlp.common import Registrable, Params, FromParams
from dataclasses import dataclass, replace
from torch import nn
from torchvision.models.detection.transform import GeneralizedRCNNTransform
from torchvision.transforms import ColorJitter, RandomGrayscale, Normalize
from torchvision.transforms.functional import hflip, to_tensor
from torch.nn import functional as F
from gpv2 import file_paths
from gpv2.model.gpv_example import GPVExample
from gpv2.model.layers import Layer
from gpv2.utils import pytorch_utils, image_utils
@dataclass
class ImageRegionFeatures:
"""Object boxes, features, and objectness scores for objects in an image"""
@staticmethod
def build_from_lists(boxes, features, objectness):
n_boxes = [len(x) for x in boxes]
max_len = max(n_boxes)
n_boxes = torch.as_tensor(n_boxes, dtype=torch.long, device=boxes[0].device)
return ImageRegionFeatures(
pytorch_utils.stack_and_pad(boxes, max_len),
None if features is None else pytorch_utils.stack_and_pad(features, max_len),
# -10000 so the padding is a valid log-probability
None if objectness is None else pytorch_utils.stack_and_pad(objectness, max_len, -10000),
n_boxes,
)
"""[batch, n_regions, 4] boxes in [cx, cy, w, y] format normalized between 0 and 1"""
boxes: torch.Tensor
"""[batch, n_regions, n_features] region features"""
features: Optional[torch.Tensor]
"""[batch, n_regions] or [batch, n_regions, n_classes] objectness log-probability"""
objectness: Optional[torch.Tensor]
"""[batch] number of boxes for each batch if batches can have differing numbers of boxes"""
n_boxes: Optional[torch.Tensor] = None
def numpy(self):
return ImageRegionFeatures(
self.boxes.cpu().numpy(),
None if self.features is None else self.features.cpu().numpy(),
None if self.objectness is None else self.objectness.cpu().numpy(),
None if self.n_boxes is None else self.n_boxes.cpu().numpy()
)
def get_boxes(self, i: int):
if self.boxes is not None:
return self.boxes[i, :self.n_boxes[i]]
else:
return self.boxes[i]
def to(self, device):
return ImageRegionFeatures(
self.boxes.to(device),
None if self.features is None else self.features.to(device),
None if self.objectness is None else self.objectness.to(device),
None if self.n_boxes is None else self.n_boxes.to(device)
)
def get_n_boxes(self):
if self.n_boxes is None:
batch, n = self.boxes.size()[:2]
return torch.full((batch,), n,
device=self.boxes.device, dtype=torch.long)
else:
return self.n_boxes
BoxTargets = NewType('BoxTargets', List[Optional[torch.Tensor]])
"""Batch of target boxes in cxcywh format, normalized between 0 and 1"""
class ImageFeatureExtractor(nn.Module, Registrable):
"""Extracts regions and region feature vectors for an image
This class does the visual feature extraction for our models. In order to support this,
this class provides a custom collate function to use on the images and then
a forward method that builds the features from the output of that collate function.
"""
def get_collate(self, is_train=False) -> 'ImageCollater':
raise NotImplementedError()
def forward(self, **kwargs) -> ImageRegionFeatures:
raise NotImplementedError()
class ImageCollater:
def collate(self, batch: List[GPVExample]) -> Dict[str, Any]:
"""
return:
image_inputs: Inputs to pass to `ImageFeatureExtractor.forward`
"""
raise NotImplementedError()
class ROIFeatureExtractor(Layer):
"""Extract image features for a given set of regions"""
def forward(self, x: torch.Tensor, boxes: torch.Tensor):
"""
x: Tensor of images
boxes: [batch, n, 4] boxes that NOT normalized and in xyxy format
"""
raise NotImplementedError()
@ROIFeatureExtractor.register("box-embed-feature-extractor")
class BoBoxEmbedFeatureExtractor(ROIFeatureExtractor):
"""Does ROI pooling to get features for image regions"""
def __init__(
self,
box_coordinate_embed: Optional[Layer] = None,
pre_rio: Layer = None,
post_rio: Layer = None,
return_objectness = True,
rio_processor: str = "mean",
box_coordinate_join: str = "concat",
rio_size=7,
):
super().__init__()
self.box_coordinate_embed = box_coordinate_embed
self.pre_rio = pre_rio
self.post_rio = post_rio
self.return_objectness = return_objectness
self.rio_processor = rio_processor
self.box_coordinate_join = box_coordinate_join
self.rio_size = rio_size
def extract_roi(self, features, boxes: torch.Tensor):
B, C, W, H = features.size()
N = boxes.size(1)
div = torch.as_tensor([W, H, W, H], device=boxes.device, dtype=boxes.dtype)
scaled_boxes = boxes * div
scaled_boxes = torchvision.ops.box_convert(scaled_boxes, "cxcywh", "xyxy")
scaled_boxes = torch.unbind(scaled_boxes)
roi_features = torchvision.ops.roi_align(
features, scaled_boxes, output_size=self.rio_size, aligned=True)
if self.rio_processor == "mean":
roi_features = roi_features.view(B, N, C, -1).mean(-1)
elif self.rio_processor == "max":
roi_features = roi_features.view(B, N, C, -1).max(-1)
else:
raise NotImplementedError(self.rio_processor)
return roi_features
def forward(self, images, boxes) -> ImageRegionFeatures:
if self.pre_rio is not None:
images = self.pre_rio(images)
roi_features = self.extract_roi(images, boxes)
if self.post_rio is not None:
roi_features = self.post_rio(roi_features)
if self.box_coordinate_embed:
box_embed = self.box_coordinate_embed(boxes)
if self.box_coordinate_join == "concat":
roi_features = torch.cat([roi_features, box_embed], -1)
else:
raise NotImplementedError(self.box_coordinate_join)
return roi_features
def build_scaled_boxes(features, boxes):
B, C, H, W = features.size()
div = torch.as_tensor([W, H, W, H], device=boxes.device, dtype=boxes.dtype)
boxes = boxes * div
return torchvision.ops.box_convert(boxes, "cxcywh", "xyxy")
def gather_qboxes_and_targets(batch, hflipped=None, qbox_format="cxcywh",
normalize_targets=True, target_format="cxcywh"):
"""Utility method that gather the query_boxes and targets of a batch"""
targets = []
query_boxes = []
if hflipped is None:
hflipped = [None for _ in batch]
for ex, is_flipped in zip(batch, hflipped):
if ex.target_boxes is None:
targets.append(None)
else:
if ex.crop:
raise ValueError("Box target on cropped images no supoorted")
# Normalize the target boxes to be between 0 and 1 and to be
# cxcywh format
# TODO it would be nice to do this in the pre-preprocesing step
boxes = torch.tensor(ex.target_boxes, dtype=torch.float)
boxes = torchvision.ops.box_convert(boxes, "xywh", target_format)
if normalize_targets:
boxes = image_utils.normalize_boxes(boxes, ex.image_id)
if is_flipped:
boxes[:, 0] = 1.0 - boxes[:, 0]
targets.append(boxes)
if ex.query_boxes is None:
query_boxes.append(None)
else:
if isinstance(ex.query_boxes, dict):
qboxes = ex.query_boxes["qboxes"]
qboxes = torch.tensor(qboxes, dtype=torch.float)
if torch.any(qboxes > 1.0):
qboxes = image_utils.normalize_boxes(qboxes, ex.image_id)
qboxes = torchvision.ops.box_convert(qboxes, "xywh", qbox_format)
if is_flipped:
qboxes[:, 0] = 1.0 - qboxes[:, 0]
q = dict(ex.query_boxes)
q["qboxes"] = qboxes
query_boxes.append(q)
elif isinstance(ex.query_boxes, int) or ex.query_boxes.shape == ():
# Query box reference to a stored bounding box
query_boxes.append(ex.query_boxes)
else:
# Convert query boxes
qboxes = torch.tensor(ex.query_boxes, dtype=torch.float)
if torch.any(qboxes > 1.0):
qboxes = image_utils.normalize_boxes(qboxes, ex.image_id)
qboxes = torchvision.ops.box_convert(qboxes, "xywh", qbox_format)
if is_flipped:
qboxes[:, 0] = 1.0 - qboxes[:, 0]
query_boxes.append(qboxes)
return query_boxes, targets
@ImageFeatureExtractor.register("debug")
class DebugFeaturizer(ImageFeatureExtractor, ImageCollater):
def __init__(self, n_boxes=4, dim=32):
super().__init__()
self.n_boxes = n_boxes
self.dim = dim
def get_collate(self, is_train=False) -> 'ImageCollater':
return self
def collate(self, batch):
_, targets = gather_qboxes_and_targets(batch)
return dict(batch_size=torch.as_tensor(len(batch))), targets
def forward(self, batch_size) -> ImageRegionFeatures:
device = batch_size.device
return ImageRegionFeatures(
torch.empty(batch_size, self.n_boxes, 4, device=device).uniform_(0.00001, 0.5),
torch.empty(batch_size, self.n_boxes, self.dim, device=device).uniform_(0.00001, 0.5),
torch.log(torch.empty(batch_size, self.n_boxes, self.dim, device=device).uniform_(1e-6, 1.0 - 1e-6))
)
| 34.778598
| 106
| 0.697825
|
a0bc937f75b0275a7b3b9499fad08de4ad38745b
| 24,466
|
py
|
Python
|
test/functional/interface_zmq.py
|
beloiual/bitcoin
|
53b5d7d91925a54b17f444bb1c52310fe9e4e797
|
[
"MIT"
] | 1
|
2020-10-09T12:14:36.000Z
|
2020-10-09T12:14:36.000Z
|
test/functional/interface_zmq.py
|
beloiual/bitcoin
|
53b5d7d91925a54b17f444bb1c52310fe9e4e797
|
[
"MIT"
] | 18
|
2020-10-31T01:04:18.000Z
|
2020-11-03T19:25:27.000Z
|
test/functional/interface_zmq.py
|
beloiual/bitcoin
|
53b5d7d91925a54b17f444bb1c52310fe9e4e797
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the ZMQ notification interface."""
import struct
from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, ADDRESS_BCRT1_P2WSH_OP_TRUE
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, hash256, FromHex
from test_framework.util import (
assert_equal,
connect_nodes,
assert_raises_rpc_error,
)
from io import BytesIO
from time import sleep
# Test may be skipped and not have zmq installed
try:
import zmq
except ImportError:
pass
def hash256_reversed(byte_str):
return hash256(byte_str)[::-1]
class ZMQSubscriber:
def __init__(self, socket, topic):
self.sequence = 0
self.socket = socket
self.topic = topic
self.socket.setsockopt(zmq.SUBSCRIBE, self.topic)
def receive(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
return body
def receive_sequence(self):
topic, body, seq = self.socket.recv_multipart()
# Topic should match the subscriber topic.
assert_equal(topic, self.topic)
# Sequence should be incremental.
assert_equal(struct.unpack('<I', seq)[-1], self.sequence)
self.sequence += 1
hash = body[:32].hex()
label = chr(body[32])
mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0]
if mempool_sequence is not None:
assert label == "A" or label == "R"
else:
assert label == "D" or label == "C"
return (hash, label, mempool_sequence)
class ZMQTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_py3_zmq()
self.skip_if_no_bitcoind_zmq()
def run_test(self):
self.ctx = zmq.Context()
try:
self.test_basic()
self.test_sequence()
self.test_mempool_sync()
self.test_reorg()
self.test_multiple_interfaces()
finally:
# Destroy the ZMQ context.
self.log.debug("Destroying ZMQ context")
self.ctx.destroy(linger=None)
def test_basic(self):
# Invalid zmq arguments don't take down the node, see #17185.
self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"])
address = 'tcp://127.0.0.1:28332'
sockets = []
subs = []
services = [b"hashblock", b"hashtx", b"rawblock", b"rawtx"]
for service in services:
sockets.append(self.ctx.socket(zmq.SUB))
sockets[-1].set(zmq.RCVTIMEO, 60000)
subs.append(ZMQSubscriber(sockets[-1], service))
# Subscribe to all available topics.
hashblock = subs[0]
hashtx = subs[1]
rawblock = subs[2]
rawtx = subs[3]
self.restart_node(0, ["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [hashblock, hashtx, rawblock, rawtx]])
connect_nodes(self.nodes[0], 1)
for socket in sockets:
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
num_blocks = 5
self.log.info("Generate %(n)d blocks (and %(n)d coinbase txes)" % {"n": num_blocks})
genhashes = self.nodes[0].generatetoaddress(num_blocks, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
for x in range(num_blocks):
# Should receive the coinbase txid.
txid = hashtx.receive()
# Should receive the coinbase raw transaction.
hex = rawtx.receive()
tx = CTransaction()
tx.deserialize(BytesIO(hex))
tx.calc_sha256()
assert_equal(tx.hash, txid.hex())
# Should receive the generated raw block.
block = rawblock.receive()
assert_equal(genhashes[x], hash256_reversed(block[:80]).hex())
# Should receive the generated block hash.
hash = hashblock.receive().hex()
assert_equal(genhashes[x], hash)
# The block should only have the coinbase txid.
assert_equal([txid.hex()], self.nodes[1].getblock(hash)["tx"])
if self.is_wallet_compiled():
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# Should receive the broadcasted txid.
txid = hashtx.receive()
assert_equal(payment_txid, txid.hex())
# Should receive the broadcasted raw transaction.
hex = rawtx.receive()
assert_equal(payment_txid, hash256_reversed(hex).hex())
# Mining the block with this tx should result in second notification
# after coinbase tx notification
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
hashtx.receive()
txid = hashtx.receive()
assert_equal(payment_txid, txid.hex())
self.log.info("Test the getzmqnotifications RPC")
assert_equal(self.nodes[0].getzmqnotifications(), [
{"type": "pubhashblock", "address": address, "hwm": 1000},
{"type": "pubhashtx", "address": address, "hwm": 1000},
{"type": "pubrawblock", "address": address, "hwm": 1000},
{"type": "pubrawtx", "address": address, "hwm": 1000},
])
assert_equal(self.nodes[1].getzmqnotifications(), [])
def test_reorg(self):
if not self.is_wallet_compiled():
self.log.info("Skipping reorg test because wallet is disabled")
return
address = 'tcp://127.0.0.1:28333'
services = [b"hashblock", b"hashtx"]
sockets = []
subs = []
for service in services:
sockets.append(self.ctx.socket(zmq.SUB))
# 2 second timeout to check end of notifications
sockets[-1].set(zmq.RCVTIMEO, 2000)
subs.append(ZMQSubscriber(sockets[-1], service))
# Subscribe to all available topics.
hashblock = subs[0]
hashtx = subs[1]
# Should only notify the tip if a reorg occurs
self.restart_node(0, ["-zmqpub%s=%s" % (sub.topic.decode(), address) for sub in [hashblock, hashtx]])
for socket in sockets:
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
# Generate 1 block in nodes[0] with 1 mempool tx and receive all notifications
payment_txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
disconnect_block = self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)[0]
disconnect_cb = self.nodes[0].getblock(disconnect_block)["tx"][0]
assert_equal(self.nodes[0].getbestblockhash(), hashblock.receive().hex())
assert_equal(hashtx.receive().hex(), payment_txid)
assert_equal(hashtx.receive().hex(), disconnect_cb)
# Generate 2 blocks in nodes[1] to a different address to ensure split
connect_blocks = self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_P2WSH_OP_TRUE)
# nodes[0] will reorg chain after connecting back nodes[1]
connect_nodes(self.nodes[0], 1)
self.sync_blocks() # tx in mempool valid but not advertised
# Should receive nodes[1] tip
assert_equal(self.nodes[1].getbestblockhash(), hashblock.receive().hex())
# During reorg:
# Get old payment transaction notification from disconnect and disconnected cb
assert_equal(hashtx.receive().hex(), payment_txid)
assert_equal(hashtx.receive().hex(), disconnect_cb)
# And the payment transaction again due to mempool entry
assert_equal(hashtx.receive().hex(), payment_txid)
assert_equal(hashtx.receive().hex(), payment_txid)
# And the new connected coinbases
for i in [0, 1]:
assert_equal(hashtx.receive().hex(), self.nodes[1].getblock(connect_blocks[i])["tx"][0])
# If we do a simple invalidate we announce the disconnected coinbase
self.nodes[0].invalidateblock(connect_blocks[1])
assert_equal(hashtx.receive().hex(), self.nodes[1].getblock(connect_blocks[1])["tx"][0])
# And the current tip
assert_equal(hashtx.receive().hex(), self.nodes[1].getblock(connect_blocks[0])["tx"][0])
def test_sequence(self):
"""
Sequence zmq notifications give every blockhash and txhash in order
of processing, regardless of IBD, re-orgs, etc.
Format of messages:
<32-byte hash>C : Blockhash connected
<32-byte hash>D : Blockhash disconnected
<32-byte hash>R<8-byte LE uint> : Transactionhash removed from mempool for non-block inclusion reason
<32-byte hash>A<8-byte LE uint> : Transactionhash added mempool
"""
self.log.info("Testing 'sequence' publisher")
address = 'tcp://127.0.0.1:28333'
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
seq = ZMQSubscriber(socket, b'sequence')
self.restart_node(0, ['-zmqpub%s=%s' % (seq.topic.decode(), address)])
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
# Mempool sequence number starts at 1
seq_num = 1
# Generate 1 block in nodes[0] and receive all notifications
dc_block = self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)[0]
# Note: We are not notified of any block transactions, coinbase or mined
assert_equal((self.nodes[0].getbestblockhash(), "C", None), seq.receive_sequence())
# Generate 2 blocks in nodes[1] to a different address to ensure a chain split
self.nodes[1].generatetoaddress(2, ADDRESS_BCRT1_P2WSH_OP_TRUE)
# nodes[0] will reorg chain after connecting back nodes[1]
connect_nodes(self.nodes[0], 1)
# Then we receive all block (dis)connect notifications for the 2 block reorg
assert_equal((dc_block, "D", None), seq.receive_sequence())
block_count = self.nodes[1].getblockcount()
assert_equal((self.nodes[1].getblockhash(block_count-1), "C", None), seq.receive_sequence())
assert_equal((self.nodes[1].getblockhash(block_count), "C", None), seq.receive_sequence())
# Rest of test requires wallet functionality
if self.is_wallet_compiled():
self.log.info("Wait for tx from second node")
payment_txid = self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=5.0, replaceable=True)
self.sync_all()
self.log.info("Testing sequence notifications with mempool sequence values")
# Should receive the broadcasted txid.
assert_equal((payment_txid, "A", seq_num), seq.receive_sequence())
seq_num += 1
self.log.info("Testing RBF notification")
# Replace it to test eviction/addition notification
rbf_info = self.nodes[1].bumpfee(payment_txid)
self.sync_all()
assert_equal((payment_txid, "R", seq_num), seq.receive_sequence())
seq_num += 1
assert_equal((rbf_info["txid"], "A", seq_num), seq.receive_sequence())
seq_num += 1
# Doesn't get published when mined, make a block and tx to "flush" the possibility
# though the mempool sequence number does go up by the number of transactions
# removed from the mempool by the block mining it.
mempool_size = len(self.nodes[0].getrawmempool())
c_block = self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)[0]
self.sync_all()
# Make sure the number of mined transactions matches the number of txs out of mempool
mempool_size_delta = mempool_size - len(self.nodes[0].getrawmempool())
assert_equal(len(self.nodes[0].getblock(c_block)["tx"])-1, mempool_size_delta)
seq_num += mempool_size_delta
payment_txid_2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
assert_equal((c_block, "C", None), seq.receive_sequence())
assert_equal((payment_txid_2, "A", seq_num), seq.receive_sequence())
seq_num += 1
# Spot check getrawmempool results that they only show up when asked for
assert type(self.nodes[0].getrawmempool()) is list
assert type(self.nodes[0].getrawmempool(mempool_sequence=False)) is list
assert "mempool_sequence" not in self.nodes[0].getrawmempool(verbose=True)
assert_raises_rpc_error(-8, "Verbose results cannot contain mempool sequence values.", self.nodes[0].getrawmempool, True, True)
assert_equal(self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"], seq_num)
self.log.info("Testing reorg notifications")
# Manually invalidate the last block to test mempool re-entry
# N.B. This part could be made more lenient in exact ordering
# since it greatly depends on inner-workings of blocks/mempool
# during "deep" re-orgs. Probably should "re-construct"
# blockchain/mempool state from notifications instead.
block_count = self.nodes[0].getblockcount()
best_hash = self.nodes[0].getbestblockhash()
self.nodes[0].invalidateblock(best_hash)
sleep(2) # Bit of room to make sure transaction things happened
# Make sure getrawmempool mempool_sequence results aren't "queued" but immediately reflective
# of the time they were gathered.
assert self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"] > seq_num
assert_equal((best_hash, "D", None), seq.receive_sequence())
assert_equal((rbf_info["txid"], "A", seq_num), seq.receive_sequence())
seq_num += 1
# Other things may happen but aren't wallet-deterministic so we don't test for them currently
self.nodes[0].reconsiderblock(best_hash)
self.nodes[1].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all()
self.log.info("Evict mempool transaction by block conflict")
orig_txid = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0, replaceable=True)
# More to be simply mined
more_tx = []
for _ in range(5):
more_tx.append(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.1))
raw_tx = self.nodes[0].getrawtransaction(orig_txid)
bump_info = self.nodes[0].bumpfee(orig_txid)
# Mine the pre-bump tx
block = create_block(int(self.nodes[0].getbestblockhash(), 16), create_coinbase(self.nodes[0].getblockcount()+1))
tx = FromHex(CTransaction(), raw_tx)
block.vtx.append(tx)
for txid in more_tx:
tx = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
block.vtx.append(tx)
add_witness_commitment(block)
block.solve()
assert_equal(self.nodes[0].submitblock(block.serialize().hex()), None)
tip = self.nodes[0].getbestblockhash()
assert_equal(int(tip, 16), block.sha256)
orig_txid_2 = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0, replaceable=True)
# Flush old notifications until evicted tx original entry
(hash_str, label, mempool_seq) = seq.receive_sequence()
while hash_str != orig_txid:
(hash_str, label, mempool_seq) = seq.receive_sequence()
mempool_seq += 1
# Added original tx
assert_equal(label, "A")
# More transactions to be simply mined
for i in range(len(more_tx)):
assert_equal((more_tx[i], "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
# Bumped by rbf
assert_equal((orig_txid, "R", mempool_seq), seq.receive_sequence())
mempool_seq += 1
assert_equal((bump_info["txid"], "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
# Conflict announced first, then block
assert_equal((bump_info["txid"], "R", mempool_seq), seq.receive_sequence())
mempool_seq += 1
assert_equal((tip, "C", None), seq.receive_sequence())
mempool_seq += len(more_tx)
# Last tx
assert_equal((orig_txid_2, "A", mempool_seq), seq.receive_sequence())
mempool_seq += 1
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
self.sync_all() # want to make sure we didn't break "consensus" for other tests
def test_mempool_sync(self):
"""
Use sequence notification plus getrawmempool sequence results to "sync mempool"
"""
if not self.is_wallet_compiled():
self.log.info("Skipping mempool sync test")
return
self.log.info("Testing 'mempool sync' usage of sequence notifier")
address = 'tcp://127.0.0.1:28333'
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
seq = ZMQSubscriber(socket, b'sequence')
self.restart_node(0, ['-zmqpub%s=%s' % (seq.topic.decode(), address)])
connect_nodes(self.nodes[0], 1)
socket.connect(address)
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
# In-memory counter, should always start at 1
next_mempool_seq = self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"]
assert_equal(next_mempool_seq, 1)
# Some transactions have been happening but we aren't consuming zmq notifications yet
# or we lost a ZMQ message somehow and want to start over
txids = []
num_txs = 5
for _ in range(num_txs):
txids.append(self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1.0, replaceable=True))
self.sync_all()
# 1) Consume backlog until we get a mempool sequence number
(hash_str, label, zmq_mem_seq) = seq.receive_sequence()
while zmq_mem_seq is None:
(hash_str, label, zmq_mem_seq) = seq.receive_sequence()
assert label == "A" or label == "R"
assert hash_str is not None
# 2) We need to "seed" our view of the mempool
mempool_snapshot = self.nodes[0].getrawmempool(mempool_sequence=True)
mempool_view = set(mempool_snapshot["txids"])
get_raw_seq = mempool_snapshot["mempool_sequence"]
assert_equal(get_raw_seq, 6)
# Snapshot may be too old compared to zmq message we read off latest
while zmq_mem_seq >= get_raw_seq:
sleep(2)
mempool_snapshot = self.nodes[0].getrawmempool(mempool_sequence=True)
mempool_view = set(mempool_snapshot["txids"])
get_raw_seq = mempool_snapshot["mempool_sequence"]
# Things continue to happen in the "interim" while waiting for snapshot results
# We have node 0 do all these to avoid p2p races with RBF announcements
for _ in range(num_txs):
txids.append(self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=0.1, replaceable=True))
self.nodes[0].bumpfee(txids[-1])
self.sync_all()
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
final_txid = self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=0.1, replaceable=True)
# 3) Consume ZMQ backlog until we get to "now" for the mempool snapshot
while True:
if zmq_mem_seq == get_raw_seq - 1:
break
(hash_str, label, mempool_sequence) = seq.receive_sequence()
if mempool_sequence is not None:
zmq_mem_seq = mempool_sequence
if zmq_mem_seq > get_raw_seq:
raise Exception("We somehow jumped mempool sequence numbers! zmq_mem_seq: {} > get_raw_seq: {}".format(zmq_mem_seq, get_raw_seq))
# 4) Moving forward, we apply the delta to our local view
# remaining txs(5) + 1 rbf(A+R) + 1 block connect + 1 final tx
expected_sequence = get_raw_seq
r_gap = 0
for _ in range(num_txs + 2 + 1 + 1):
(hash_str, label, mempool_sequence) = seq.receive_sequence()
if mempool_sequence is not None:
if mempool_sequence != expected_sequence:
# Detected "R" gap, means this a conflict eviction, and mempool tx are being evicted before its
# position in the incoming block message "C"
if label == "R":
assert mempool_sequence > expected_sequence
r_gap += mempool_sequence - expected_sequence
else:
raise Exception("WARNING: txhash has unexpected mempool sequence value: {} vs expected {}".format(mempool_sequence, expected_sequence))
if label == "A":
assert hash_str not in mempool_view
mempool_view.add(hash_str)
expected_sequence = mempool_sequence + 1
elif label == "R":
assert hash_str in mempool_view
mempool_view.remove(hash_str)
expected_sequence = mempool_sequence + 1
elif label == "C":
# (Attempt to) remove all txids from known block connects
block_txids = self.nodes[0].getblock(hash_str)["tx"][1:]
for txid in block_txids:
if txid in mempool_view:
expected_sequence += 1
mempool_view.remove(txid)
expected_sequence -= r_gap
r_gap = 0
elif label == "D":
# Not useful for mempool tracking per se
continue
else:
raise Exception("Unexpected ZMQ sequence label!")
assert_equal(self.nodes[0].getrawmempool(), [final_txid])
assert_equal(self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"], expected_sequence)
# 5) If you miss a zmq/mempool sequence number, go back to step (2)
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
def test_multiple_interfaces(self):
# Set up two subscribers with different addresses
subscribers = []
for i in range(2):
address = 'tcp://127.0.0.1:%d' % (28334 + i)
socket = self.ctx.socket(zmq.SUB)
socket.set(zmq.RCVTIMEO, 60000)
hashblock = ZMQSubscriber(socket, b"hashblock")
socket.connect(address)
subscribers.append({'address': address, 'hashblock': hashblock})
self.restart_node(0, ['-zmqpub%s=%s' % (subscriber['hashblock'].topic.decode(), subscriber['address']) for subscriber in subscribers])
# Relax so that the subscriber is ready before publishing zmq messages
sleep(0.2)
# Generate 1 block in nodes[0] and receive all notifications
self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
# Should receive the same block hash on both subscribers
assert_equal(self.nodes[0].getbestblockhash(), subscribers[0]['hashblock'].receive().hex())
assert_equal(self.nodes[0].getbestblockhash(), subscribers[1]['hashblock'].receive().hex())
if __name__ == '__main__':
ZMQTest().main()
| 45.730841
| 159
| 0.626093
|
2b944b679a7a2ab8e14f86256b4a13a0495104f6
| 3,736
|
py
|
Python
|
espresso/tools/asr_prep_json.py
|
ifnspaml/espresso
|
5911b69b20a6aa3cff4f0e1ba3db94ee759b52c5
|
[
"MIT"
] | null | null | null |
espresso/tools/asr_prep_json.py
|
ifnspaml/espresso
|
5911b69b20a6aa3cff4f0e1ba3db94ee759b52c5
|
[
"MIT"
] | null | null | null |
espresso/tools/asr_prep_json.py
|
ifnspaml/espresso
|
5911b69b20a6aa3cff4f0e1ba3db94ee759b52c5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Yiming Wang
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import sys
from collections import OrderedDict
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
stream=sys.stdout,
)
logger = logging.getLogger("espresso.tools.asr_prep_json")
def read_file(ordered_dict, key, dtype, *paths):
for path in paths:
with open(path, "r", encoding="utf-8") as f:
for line in f:
utt_id, val = line.strip().split(None, 1)
if utt_id in ordered_dict:
assert key not in ordered_dict[utt_id], (
"Duplicate utterance id " + utt_id + " in " + key
)
ordered_dict[utt_id].update({key: dtype(val)})
else:
ordered_dict[utt_id] = {key: val}
return ordered_dict
def main():
parser = argparse.ArgumentParser(
description="Wrap all related files of a dataset into a single json file"
)
# fmt: off
audio_group = parser.add_mutually_exclusive_group(required=True)
audio_group.add_argument("--feat-files", nargs="+", default=None,
help="path(s) to feats.scp feature file(s) from Kaldi")
audio_group.add_argument("--wave-files", nargs="+", default=None,
help="path(s) to raw waveform file(s), where each entry has the format '<utt-id> <file-path>'")
audio_group.add_argument("--command-files", nargs="+", default=None,
help="path(s) to wav.scp file(s) from Kaldi")
parser.add_argument("--token-text-files", nargs="+", default=None,
help="path(s) to token_text file(s)")
parser.add_argument("--text-files", nargs="+", default=None,
help="path(s) to text file(s)")
parser.add_argument("--numerator-fst-files", nargs="+", default=None,
help="path(s) to numerator fst file(s)")
parser.add_argument("--alignment-files", nargs="+", default=None,
help="path(s) to alignment file(s)")
parser.add_argument("--utt2num-frames-files", nargs="+", default=None,
help="path(s) to utt2num_frames file(s)")
parser.add_argument("--output", required=True, type=argparse.FileType("w"),
help="path to save json output")
args = parser.parse_args()
# fmt: on
obj = OrderedDict()
if args.feat_files is not None:
obj = read_file(obj, "feat", str, *(args.feat_files))
elif args.wave_files is not None:
obj = read_file(obj, "wave", str, *(args.wave_files))
else:
assert args.command_files is not None
obj = read_file(obj, "command", str, *(args.command_files))
if args.token_text_files is not None:
obj = read_file(obj, "token_text", str, *(args.token_text_files))
if args.text_files is not None:
obj = read_file(obj, "text", str, *(args.text_files))
if args.numerator_fst_files is not None:
obj = read_file(obj, "numerator_fst", str, *(args.numerator_fst_files))
if args.alignment_files is not None:
obj = read_file(obj, "alignment", str, *(args.alignment_files))
if args.utt2num_frames_files is not None:
obj = read_file(obj, "utt2num_frames", int, *(args.utt2num_frames_files))
json.dump(obj, args.output, indent=4)
logger.info("Dumped {} examples in {}".format(len(obj), args.output.name))
if __name__ == "__main__":
main()
| 41.977528
| 124
| 0.608137
|
c94f92407f5b3afa3787bdd72e3ccc70a13ce1cd
| 518
|
py
|
Python
|
equipment/framework/tests/Helpers/PrintIfHelperTest.py
|
didacelgueta/equipment
|
12cd86bfe4b70bce3e2578e3ec79fc4c0f76c322
|
[
"MIT"
] | 1
|
2022-03-02T11:32:10.000Z
|
2022-03-02T11:32:10.000Z
|
equipment/framework/tests/Helpers/PrintIfHelperTest.py
|
didacelgueta/equipment
|
12cd86bfe4b70bce3e2578e3ec79fc4c0f76c322
|
[
"MIT"
] | 35
|
2022-03-02T14:33:49.000Z
|
2022-03-30T08:14:26.000Z
|
equipment/framework/tests/Helpers/PrintIfHelperTest.py
|
didacelgueta/equipment
|
12cd86bfe4b70bce3e2578e3ec79fc4c0f76c322
|
[
"MIT"
] | 1
|
2022-03-24T11:52:01.000Z
|
2022-03-24T11:52:01.000Z
|
import unittest
from unittest.mock import patch
from equipment.framework.tests.TestCase import TestCase
from equipment.framework.helpers import print_if
class PrintIfHelperTest(TestCase):
@patch('builtins.print')
def test_true(self, mock_print):
print_if(True, 'foo')
mock_print.assert_called_with('foo')
@patch('builtins.print')
def test_false(self, mock_print):
print_if(False, 'foo')
mock_print.assert_not_called()
if __name__ == '__main__':
unittest.main()
| 24.666667
| 55
| 0.716216
|
a4a4f2264ce244f745d20fc59ffae5c44272ac6b
| 5,423
|
py
|
Python
|
tests/test_replication.py
|
jorge-sanchez-2020/cognite-replicator
|
eb79ac00063c37996218bd8a8ca1a691fdceab7f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_replication.py
|
jorge-sanchez-2020/cognite-replicator
|
eb79ac00063c37996218bd8a8ca1a691fdceab7f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_replication.py
|
jorge-sanchez-2020/cognite-replicator
|
eb79ac00063c37996218bd8a8ca1a691fdceab7f
|
[
"Apache-2.0"
] | null | null | null |
from cognite.client.data_classes import Asset, Event, TimeSeries
from cognite.replicator.replication import (
existing_mapping,
filter_objects,
find_objects_to_delete_if_not_in_src,
find_objects_to_delete_not_replicated_in_dst,
make_id_object_map,
make_objects_batch,
remove_replication_metadata,
)
def test_make_id_object_map():
assets = [Asset(id=3, metadata={"_replicatedInternalId": 55}), Asset(id=2)]
mapping = make_id_object_map(assets)
assert 1 == len(mapping)
assert 3 == mapping[55].id
def test_existing_mapping():
assets = [
Asset(id=3, name="holy grenade", metadata={"_replicatedInternalId": 33}),
Asset(id=7, name="not holy grenade", parent_id=3, metadata={"_replicatedInternalId": 77}),
Asset(id=5, name="in-holy grenade", parent_id=7, metadata={"_replicatedInternalId": 55}),
]
ids = existing_mapping(*assets)
assert ids[assets[0].metadata["_replicatedInternalId"]] == assets[0].id
assert ids[assets[1].metadata["_replicatedInternalId"]] == assets[1].id
assert ids[assets[2].metadata["_replicatedInternalId"]] == assets[2].id
def test_find_objects_to_delete_not_replicated_in_dst():
assets = [
Asset(id=3, name="holy grenade", metadata={"_replicatedSource": "source_tenant", "_replicatedInternalId": 123}),
Asset(id=7, name="not holy grenade", metadata={}),
Asset(id=5, name="in-holy grenade", metadata={"source": "None"}),
]
to_delete = find_objects_to_delete_not_replicated_in_dst(assets)
assert len(to_delete) == 2
assert set(to_delete) == {5, 7}
assert find_objects_to_delete_not_replicated_in_dst([]) == []
def test_find_objects_to_delete_if_not_in_src():
assets_dst = [
Asset(id=3, name="holy grenade", metadata={"_replicatedSource": "source_tenant", "_replicatedInternalId": 3}),
Asset(id=13, name="unlucky holy grenade", metadata={"_replicatedInternalId": 123}),
Asset(id=7, name="not holy grenade", metadata={}),
Asset(id=5, name="in-holy grenade", metadata={"_replicatedInternalId": 5}),
]
assets_src = [Asset(id=3, name="holy grenade", metadata={}), Asset(id=5, name="in-holy grenade", metadata={})]
to_delete = find_objects_to_delete_if_not_in_src(assets_src, assets_dst)
assert len(to_delete) == 1
assert to_delete[0] == 13
assert find_objects_to_delete_if_not_in_src([], []) == []
def test_filter_objects():
time_series = [TimeSeries(id=1, asset_id=100), TimeSeries(id=2), TimeSeries(id=3, asset_id=101)]
events = [Event(id=10, asset_ids=[100, 101]), Event(id=11), Event(id=12, asset_ids=[101])]
src_dst_asset_id_map = {100: 1000}
dummy_filtered_events = filter_objects(events, src_dst_asset_id_map)
dummy_filtered_ts = filter_objects(time_series, src_dst_asset_id_map)
assert dummy_filtered_events == events
assert dummy_filtered_ts == time_series
asset_events = filter_objects(events, src_dst_asset_id_map, skip_nonasset=True)
asset_ts = filter_objects(time_series, src_dst_asset_id_map, skip_nonasset=True)
assert len(asset_events) == 2
assert len(asset_ts) == 2
for i in range(len(asset_ts)):
assert asset_ts[i].asset_id is not None
assert asset_events[i].asset_ids is not None
linkable_events = filter_objects(events, src_dst_asset_id_map, skip_nonasset=True, skip_unlinkable=True)
linkable_ts = filter_objects(time_series, src_dst_asset_id_map, skip_nonasset=True, skip_unlinkable=True)
assert len(linkable_events) == 1
assert len(linkable_ts) == 1
assert linkable_events[0] == events[0]
assert linkable_ts[0] == time_series[0]
odd_id_events = filter_objects(events, src_dst_asset_id_map, filter_fn=lambda x: x.id % 2 == 1)
assert len(odd_id_events) == 1
for event in odd_id_events:
assert event.id % 2 == 1
def test_make_objects_batch():
src_objects = [
Event(id=1, last_updated_time=1000),
Event(id=2, last_updated_time=1000),
Event(id=3, last_updated_time=1000),
]
src_id_to_dst = {
1: Event(id=11, metadata={"_replicatedTime": 100}),
2: Event(id=12, metadata={"_replicatedTime": 10000}),
}
def dummy_update(src_obj, dst_obj, src_dst_ids_assets, project_src, replicated_runtime):
return dst_obj
def dummy_create(src_obj, src_dst_ids_assets, project_src, replicated_runtime):
return src_obj
created, updated, unchanged = make_objects_batch(
src_objects,
src_id_to_dst,
{},
create=dummy_create,
update=dummy_update,
project_src="test project",
replicated_runtime=10000,
)
assert len(created) == len(updated) == len(unchanged) == 1
assert updated[0].id == 11
assert unchanged[0].id == 12
assert created[0].id == 3
def test_remove_replication_metadata():
events = [
Event(metadata={"_replicatedInternalId": 10, "_replicatedSource": "src_project", "_replicatedTime": 10000000}),
Event(metadata={}),
Event(id=3, metadata={"_replicatedInternalId": 10, "misc1": 16, "misc2": "text"}),
]
remove_replication_metadata(events)
for event in events:
assert "_replicatedInternalId" not in event.metadata
assert "_replicatedSource" not in event.metadata
assert "_replicatedTime" not in event.metadata
assert len(events[2].metadata.keys()) == 2
| 40.470149
| 120
| 0.696109
|
f638ea91d4f51c395c31c4ff58e6cfe965800a17
| 892
|
py
|
Python
|
var/spack/repos/builtin/packages/spherepack/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2020-10-15T01:08:42.000Z
|
2021-10-18T01:28:18.000Z
|
var/spack/repos/builtin/packages/spherepack/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2019-07-30T10:12:28.000Z
|
2019-12-17T09:02:27.000Z
|
var/spack/repos/builtin/packages/spherepack/package.py
|
adrianjhpc/spack
|
0a9e4fcee57911f2db586aa50c8873d9cca8de92
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5
|
2019-07-30T09:42:14.000Z
|
2021-01-25T05:39:20.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Spherepack(Package):
"""SPHEREPACK - A Package for Modeling Geophysical Processes"""
homepage = "https://www2.cisl.ucar.edu/resources/legacy/spherepack"
url = "https://www2.cisl.ucar.edu/sites/default/files/spherepack3.2.tar"
version('3.2', sha256='d58ef8cbc45cf2ad24f73a9f73f5f9d4fbe03cd9e2e7722e526fffb68be581ba')
def install(self, spec, prefix):
if self.compiler.fc is None:
raise InstallError("SPHEREPACK requires a Fortran 90 compiler")
make("MAKE=make", "F90=f90 -O2", "AR=ar", "libspherepack")
make("MAKE=make", "F90=f90 -O2", "AR=ar", "testspherepack")
install_tree("lib", prefix.lib)
| 37.166667
| 93
| 0.700673
|
7b80010c8136519be29e883fe6cbb0afe1a352db
| 4,885
|
py
|
Python
|
cirq-core/cirq/work/collector_test.py
|
LLcat1217/Cirq
|
b88069f7b01457e592ad69d6b413642ef11a56b8
|
[
"Apache-2.0"
] | 3,326
|
2018-07-18T23:17:21.000Z
|
2022-03-29T22:28:24.000Z
|
cirq-core/cirq/work/collector_test.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | 3,443
|
2018-07-18T21:07:28.000Z
|
2022-03-31T20:23:21.000Z
|
cirq-core/cirq/work/collector_test.py
|
bradyb/Cirq
|
610b0d4ea3a7862169610797266734c844ddcc1f
|
[
"Apache-2.0"
] | 865
|
2018-07-18T23:30:24.000Z
|
2022-03-30T11:43:23.000Z
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import duet
import pytest
import cirq
def test_circuit_sample_job_equality():
eq = cirq.testing.EqualsTester()
c1 = cirq.Circuit()
c2 = cirq.Circuit(cirq.measure(cirq.LineQubit(0)))
eq.add_equality_group(
cirq.CircuitSampleJob(c1, repetitions=10),
cirq.CircuitSampleJob(c1, repetitions=10, tag=None),
)
eq.add_equality_group(cirq.CircuitSampleJob(c2, repetitions=10))
eq.add_equality_group(cirq.CircuitSampleJob(c1, repetitions=100))
eq.add_equality_group(cirq.CircuitSampleJob(c1, repetitions=10, tag='test'))
def test_circuit_sample_job_repr():
cirq.testing.assert_equivalent_repr(
cirq.CircuitSampleJob(cirq.Circuit(cirq.H(cirq.LineQubit(0))), repetitions=10, tag='guess')
)
@duet.sync
async def test_async_collect():
received = []
class TestCollector(cirq.Collector):
def next_job(self):
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
return cirq.CircuitSampleJob(circuit=circuit, repetitions=10, tag='test')
def on_job_result(self, job, result):
received.append(job.tag)
result = await TestCollector().collect_async(
sampler=cirq.Simulator(), max_total_samples=100, concurrency=5
)
assert result is None
assert received == ['test'] * 10
def test_collect():
received = []
class TestCollector(cirq.Collector):
def next_job(self):
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
return cirq.CircuitSampleJob(circuit=circuit, repetitions=10, tag='test')
def on_job_result(self, job, result):
received.append(job.tag)
TestCollector().collect(sampler=cirq.Simulator(), max_total_samples=100, concurrency=5)
assert received == ['test'] * 10
def test_failed_job():
class FailingSampler:
async def run_async(self, circuit, repetitions):
await duet.completed_future(None)
raise Exception('job failed!')
class TestCollector(cirq.Collector):
def next_job(self):
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
return cirq.CircuitSampleJob(circuit=circuit, repetitions=10, tag='test')
def on_job_result(self, job, result):
pass
with pytest.raises(Exception, match='job failed!'):
TestCollector().collect(sampler=FailingSampler(), max_total_samples=100, concurrency=5)
def test_collect_with_reaction():
events = [0]
sent = 0
received = 0
class TestCollector(cirq.Collector):
def next_job(self):
nonlocal sent
if sent >= received + 3:
return None
sent += 1
events.append(sent)
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
return cirq.CircuitSampleJob(circuit=circuit, repetitions=10, tag=sent)
def on_job_result(self, job, result):
nonlocal received
received += 1
events.append(-job.tag)
TestCollector().collect(sampler=cirq.Simulator(), max_total_samples=100, concurrency=5)
# Expected sends and receives are present.
assert sorted(events) == list(range(-10, 1 + 10))
# Sends are in order.
assert [e for e in events if e > 0] == list(range(1, 11))
# Every receive comes after the corresponding send.
assert all(events.index(-k) > events.index(k) for k in range(1, 11))
def test_flatten_jobs_terminate_from_collector():
sent = False
received = []
class TestCollector(cirq.Collector):
def next_job(self):
nonlocal sent
if sent:
return
sent = True
q = cirq.LineQubit(0)
circuit = cirq.Circuit(cirq.H(q), cirq.measure(q))
a = cirq.CircuitSampleJob(circuit=circuit, repetitions=10, tag='test')
b = cirq.CircuitSampleJob(circuit=circuit, repetitions=10, tag='test')
return [[a, None], [[[None]]], [[[]]], b]
def on_job_result(self, job, result):
received.append(job.tag)
TestCollector().collect(sampler=cirq.Simulator(), concurrency=5)
assert received == ['test'] * 2
| 33.231293
| 99
| 0.649335
|
acbb9ec0706ee41019730827df9f4c4d4d48cc56
| 8,656
|
py
|
Python
|
python/gstgva/util.py
|
morkovka1337/gst-video-analytics
|
b06a6c58ecd1f0b63019d7d757ec471d224a8090
|
[
"MIT"
] | 1
|
2020-12-01T10:49:35.000Z
|
2020-12-01T10:49:35.000Z
|
python/gstgva/util.py
|
morkovka1337/gst-video-analytics
|
b06a6c58ecd1f0b63019d7d757ec471d224a8090
|
[
"MIT"
] | null | null | null |
python/gstgva/util.py
|
morkovka1337/gst-video-analytics
|
b06a6c58ecd1f0b63019d7d757ec471d224a8090
|
[
"MIT"
] | 1
|
2019-11-14T16:40:54.000Z
|
2019-11-14T16:40:54.000Z
|
# ==============================================================================
# Copyright (C) 2018-2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
# ==============================================================================
import ctypes
from contextlib import contextmanager
# libgstreamer
libgst = ctypes.CDLL("libgstreamer-1.0.so.0")
GST_PADDING = 4
class GstMapInfo(ctypes.Structure):
_fields_ = [("memory", ctypes.c_void_p), # GstMemory *memory
("flags", ctypes.c_int), # GstMapFlags flags
("data", ctypes.POINTER(ctypes.c_byte)), # guint8 *data
("size", ctypes.c_size_t), # gsize size
("maxsize", ctypes.c_size_t), # gsize maxsize
("user_data", ctypes.c_void_p * 4), # gpointer user_data[4]
("_gst_reserved", ctypes.c_void_p * GST_PADDING)]
GST_MAP_INFO_POINTER = ctypes.POINTER(GstMapInfo)
class GUnion(ctypes.Union):
_fields_ = [('v_int', ctypes.c_int),
('v_uint', ctypes.c_uint),
('v_long', ctypes.c_long),
('v_ulong', ctypes.c_ulong),
('v_int64', ctypes.c_int64),
('v_uint64', ctypes.c_uint64),
('v_float', ctypes.c_float),
('v_double', ctypes.c_double),
('v_pointer', ctypes.c_void_p)]
class GValue(ctypes.Structure):
_fields_ = [('g_type', ctypes.c_size_t),
('data', GUnion)]
G_VALUE_POINTER = ctypes.POINTER(GValue)
class GValueArray(ctypes.Structure):
_fields_ = [("n_values", ctypes.c_uint32),
("values", ctypes.c_void_p),
("n_preallocated", ctypes.c_uint32)]
G_VALUE_ARRAY_POINTER = ctypes.POINTER(GValueArray)
# gst buffer
libgst.gst_buffer_map.argtypes = [
ctypes.c_void_p, GST_MAP_INFO_POINTER, ctypes.c_int]
libgst.gst_buffer_map.restype = ctypes.c_int
libgst.gst_buffer_unmap.argtypes = [ctypes.c_void_p, GST_MAP_INFO_POINTER]
libgst.gst_buffer_unmap.restype = None
libgst.gst_buffer_iterate_meta_filtered.argtypes = [
ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p), ctypes.c_void_p]
libgst.gst_buffer_iterate_meta_filtered.restype = ctypes.c_void_p
libgst.gst_buffer_remove_meta.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
libgst.gst_buffer_remove_meta.restype = ctypes.c_bool
libgst.gst_buffer_add_meta.argtypes = [
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
libgst.gst_buffer_add_meta.restype = ctypes.c_void_p
# gst miniobject
libgst.gst_mini_object_make_writable.argtypes = [ctypes.c_void_p]
libgst.gst_mini_object_make_writable.restype = ctypes.c_void_p
libgst.gst_mini_object_is_writable.argtypes = [ctypes.c_void_p]
libgst.gst_mini_object_is_writable.restype = ctypes.c_int
libgst.gst_mini_object_ref.argtypes = [ctypes.c_void_p]
libgst.gst_mini_object_ref.restype = ctypes.c_void_p
libgst.gst_mini_object_unref.argtypes = [ctypes.c_void_p]
libgst.gst_mini_object_unref.restype = ctypes.c_void_p
# gst structure
libgst.gst_structure_get_name.argtypes = [ctypes.c_void_p]
libgst.gst_structure_get_name.restype = ctypes.c_char_p
libgst.gst_structure_has_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
libgst.gst_structure_has_name.restype = ctypes.c_bool
libgst.gst_structure_set_name.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
libgst.gst_structure_set_name.restypes = None
libgst.gst_structure_set_value.argtypes = [
ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
libgst.gst_structure_set_value.restypes = None
libgst.gst_structure_set_array.argtypes = [
ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
libgst.gst_structure_set_array.restypes = None
libgst.gst_structure_remove_field.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
libgst.gst_structure_remove_field.restypes = None
libgst.gst_structure_get_field_type.argtypes = [
ctypes.c_void_p, ctypes.c_char_p]
libgst.gst_structure_get_field_type.restypes = ctypes.c_size_t
libgst.gst_structure_get_string.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
libgst.gst_structure_get_string.restype = ctypes.c_char_p
libgst.gst_structure_get_value.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
libgst.gst_structure_get_value.restype = ctypes.c_void_p
libgst.gst_structure_get_int.argtypes = [
ctypes.c_void_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_int)]
libgst.gst_structure_get_int.restype = ctypes.c_int
libgst.gst_structure_get_double.argtypes = [
ctypes.c_void_p, ctypes.c_char_p, ctypes.POINTER(ctypes.c_double)]
libgst.gst_structure_get_double.restype = ctypes.c_int
libgst.gst_structure_get_array.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.POINTER(G_VALUE_ARRAY_POINTER)]
libgst.gst_structure_get_array.restype = ctypes.c_bool
libgst.gst_structure_n_fields.argtypes = [ctypes.c_void_p]
libgst.gst_structure_n_fields.restype = ctypes.c_int
libgst.gst_structure_nth_field_name.argtypes = [ctypes.c_void_p, ctypes.c_uint]
libgst.gst_structure_nth_field_name.restype = ctypes.c_char_p
libgst.gst_structure_new_empty.argtypes = [ctypes.c_char_p]
libgst.gst_structure_new_empty.restype = ctypes.c_void_p
libgst.gst_structure_copy.argtypes = [ctypes.c_void_p]
libgst.gst_structure_copy.restype = ctypes.c_void_p
# gst caps
libgst.gst_caps_get_structure.argtypes = [ctypes.c_void_p, ctypes.c_uint]
libgst.gst_caps_get_structure.restype = ctypes.c_void_p
libgst.gst_value_array_get_type.argtypes = None
libgst.gst_value_array_get_type.restype = ctypes.c_void_p
libgst.gst_value_array_append_value.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
libgst.gst_value_array_append_value.restype = None
@contextmanager
def GST_PAD_PROBE_INFO_BUFFER(info):
_buffer = info.get_buffer()
_buffer.mini_object.refcount -= 1
try:
yield _buffer
finally:
_buffer.mini_object.refcount += 1
@contextmanager
def TRANSFORM_IP_BUFFER(_buffer):
_buffer.mini_object.refcount -= 1
try:
yield _buffer
finally:
_buffer.mini_object.refcount += 1
@contextmanager
def gst_buffer_data(_buffer, flags):
if _buffer is None:
raise TypeError("Cannot pass NULL to gst_buffer_map")
ptr = hash(_buffer)
mapping = GstMapInfo()
success = libgst.gst_buffer_map(ptr, mapping, flags)
if not success:
raise RuntimeError("Couldn't map buffer")
try:
yield ctypes.cast(mapping.data, ctypes.POINTER(ctypes.c_byte * mapping.size)).contents
finally:
libgst.gst_buffer_unmap(ptr, mapping)
# libgobject
libgobject = ctypes.CDLL('libgobject-2.0.so')
class GList(ctypes.Structure):
pass
GLIST_POINTER = ctypes.POINTER(GList)
GList._fields_ = [
('data', ctypes.c_void_p),
('next', GLIST_POINTER),
('prev', GLIST_POINTER)
]
libgobject.g_type_from_name.argtypes = [ctypes.c_char_p]
libgobject.g_type_from_name.restype = ctypes.c_ulong
libgobject.g_value_get_variant.argtypes = [ctypes.c_void_p]
libgobject.g_value_get_variant.restype = ctypes.c_void_p
libgobject.g_value_get_int.argtypes = [ctypes.c_void_p]
libgobject.g_value_get_int.restype = ctypes.c_void_p
libgobject.g_value_set_variant.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
libgobject.g_value_set_variant.restype = None
libgobject.g_variant_get_fixed_array.argtypes = [
ctypes.c_void_p, ctypes.POINTER(ctypes.c_size_t), ctypes.c_size_t]
libgobject.g_variant_get_fixed_array.restype = ctypes.c_void_p
libgobject.g_list_remove.argtypes = [GLIST_POINTER, ctypes.c_void_p]
libgobject.g_list_remove.restypes = GLIST_POINTER
libgobject.g_variant_new_fixed_array.argtypes = [
ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t, ctypes.c_size_t]
libgobject.g_variant_new_fixed_array.restype = ctypes.c_void_p
libgobject.g_value_array_new.argtypes = [ctypes.c_size_t]
libgobject.g_value_array_new.restype = G_VALUE_ARRAY_POINTER
libgobject.g_value_init.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
libgobject.g_value_init.restype = ctypes.c_void_p
libgobject.g_value_set_uint.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
libgobject.g_value_set_uint.restype = ctypes.c_void_p
libgobject.g_value_set_int.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
libgobject.g_value_set_int.restype = ctypes.c_void_p
libgobject.g_value_array_append.argtypes = [G_VALUE_ARRAY_POINTER, ctypes.c_void_p]
libgobject.g_value_array_append.restype = G_VALUE_ARRAY_POINTER
libgobject.g_value_array_get_nth.argtypes = [G_VALUE_ARRAY_POINTER, ctypes.c_uint]
libgobject.g_value_array_get_nth.restype = G_VALUE_POINTER
libgobject.g_value_get_uint.argtypes = [G_VALUE_POINTER]
libgobject.g_value_get_uint.restype = ctypes.c_uint
# libgstvideo
libgstvideo = ctypes.CDLL("libgstvideo-1.0.so")
###
libgstva = ctypes.CDLL("libgstvideoanalyticsmeta.so")
| 39.167421
| 115
| 0.762823
|
ce9a6163b78d06893ae80f652ce9c09d864ff91e
| 659
|
py
|
Python
|
sympy/tensor/array/tests/test_ndim_array_conversions.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/tensor/array/tests/test_ndim_array_conversions.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/tensor/array/tests/test_ndim_array_conversions.py
|
msgoff/sympy
|
1e7daef7514902f5e89718fa957b7b36c6669a10
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.tensor.array import (
ImmutableDenseNDimArray,
ImmutableSparseNDimArray,
MutableDenseNDimArray,
MutableSparseNDimArray,
)
from sympy.abc import x, y, z
def test_NDim_array_conv():
MD = MutableDenseNDimArray([x, y, z])
MS = MutableSparseNDimArray([x, y, z])
ID = ImmutableDenseNDimArray([x, y, z])
IS = ImmutableSparseNDimArray([x, y, z])
assert MD.as_immutable() == ID
assert MD.as_mutable() == MD
assert MS.as_immutable() == IS
assert MS.as_mutable() == MS
assert ID.as_immutable() == ID
assert ID.as_mutable() == MD
assert IS.as_immutable() == IS
assert IS.as_mutable() == MS
| 24.407407
| 44
| 0.664643
|
09fab14de248d2051e636ef821dfc399ad4d7fa7
| 548
|
py
|
Python
|
app/request.py
|
derrokip34/Blog
|
ec6d6acb0c69ea42a8ea99c6836d943ad7417984
|
[
"MIT"
] | null | null | null |
app/request.py
|
derrokip34/Blog
|
ec6d6acb0c69ea42a8ea99c6836d943ad7417984
|
[
"MIT"
] | null | null | null |
app/request.py
|
derrokip34/Blog
|
ec6d6acb0c69ea42a8ea99c6836d943ad7417984
|
[
"MIT"
] | null | null | null |
import urllib.request,json
from .models import Quote
quotes_url = 'http://quotes.stormconsultancy.co.uk/random.json'
def get_quotes():
with urllib.request.urlopen(quotes_url) as url:
quotes_data = url.read()
quotes_response = json.loads(quotes_data)
quote_object = None
if quotes_response:
id = quotes_response.get('id')
author = quotes_response.get('author')
quote = quotes_response.get('quote')
quote_object = Quote(id,author,quote)
return quote_object
| 27.4
| 63
| 0.656934
|
813bd2241eaa039ddb26fa5855499d9de303e62a
| 2,321
|
py
|
Python
|
tbxforms/widgets.py
|
jams2/tbxforms
|
d88aabb428a5e74d67fe877eb1e74bf9d9550c9f
|
[
"BSD-2-Clause"
] | 1
|
2022-02-03T13:59:43.000Z
|
2022-02-03T13:59:43.000Z
|
tbxforms/widgets.py
|
jams2/tbxforms
|
d88aabb428a5e74d67fe877eb1e74bf9d9550c9f
|
[
"BSD-2-Clause"
] | 1
|
2021-09-07T14:41:03.000Z
|
2021-09-07T14:41:03.000Z
|
tbxforms/widgets.py
|
jams2/tbxforms
|
d88aabb428a5e74d67fe877eb1e74bf9d9550c9f
|
[
"BSD-2-Clause"
] | 1
|
2021-09-07T14:07:40.000Z
|
2021-09-07T14:07:40.000Z
|
from django import forms
from django.utils.translation import ugettext_lazy as _
class DateInputWidget(forms.MultiWidget):
"""
A DateInputWidget defines the styling of the set of fields for displaying
the value for a DateInputField.
A custom widget was needed for two reasons. First the CSS classes needed to
style the fields and set their width reduces the code need to add a Date input
component to a form. Second, the Design System requires labels for the individual
fields. That's not supported out of the box by a MultiValueField so the labels
are added as a custom attribute and rendered with the correct markup in the
template. The template also pops the label from the widget so it does not also
get added as an attribute.
"""
template_name = "tbx/widgets/date.html"
def __init__(self, *args, **kwargs):
widgets = [
forms.TextInput(
attrs={
"class": "tbxforms-input tbxforms-date-input__input tbxforms-input--width-2",
"label": _("Day"),
"pattern": "[0-9]*",
"inputmode": "numeric",
}
),
forms.TextInput(
attrs={
"class": "tbxforms-input tbxforms-date-input__input tbxforms-input--width-2",
"label": _("Month"),
"pattern": "[0-9]*",
"inputmode": "numeric",
}
),
forms.TextInput(
attrs={
"class": "tbxforms-input tbxforms-date-input__input tbxforms-input--width-4",
"label": _("Year"),
"pattern": "[0-9]*",
"inputmode": "numeric",
}
),
]
super().__init__(widgets, **kwargs)
def decompress(self, value):
"""
Convert a ``date`` into values for the day, month and year so it can be
displayed in the widget's fields.
Args:
value (date): the date to be displayed
Returns:
a 3-tuple containing the day, month and year components of the date.
"""
if value:
return value.day, value.month, value.year
return None, None, None
| 35.166667
| 97
| 0.551486
|
51da09e509654100516be8549e6da40c74072e39
| 1,310
|
py
|
Python
|
util/n64/find_code_length.py
|
paulsapps/splat
|
e312b86f36982dfddc4b00f082d7066f0b259938
|
[
"MIT"
] | 31
|
2021-01-23T01:21:40.000Z
|
2022-03-19T03:56:42.000Z
|
util/n64/find_code_length.py
|
paulsapps/splat
|
e312b86f36982dfddc4b00f082d7066f0b259938
|
[
"MIT"
] | 44
|
2021-02-03T15:10:37.000Z
|
2022-03-03T08:29:47.000Z
|
util/n64/find_code_length.py
|
paulsapps/splat
|
e312b86f36982dfddc4b00f082d7066f0b259938
|
[
"MIT"
] | 10
|
2021-03-16T19:37:24.000Z
|
2022-03-03T15:09:48.000Z
|
#! /usr/bin/python3
from capstone import *
from capstone import Cs, CS_ARCH_MIPS, CS_MODE_MIPS64, CS_MODE_BIG_ENDIAN
from capstone.mips import *
import argparse
md = Cs(CS_ARCH_MIPS, CS_MODE_MIPS64 + CS_MODE_BIG_ENDIAN)
parser = argparse.ArgumentParser(description="Given a rom and start offset, find where the code ends")
parser.add_argument("rom", help="path to a .z64 rom")
parser.add_argument("start", help="start offset")
parser.add_argument("--end", help="end offset", default=None)
parser.add_argument("--vram", help="vram address to start disassembly at", default="0x80000000")
def run(rom_bytes, start_offset, vram, end_offset=None):
rom_addr = start_offset
last_return = rom_addr
for insn in md.disasm(rom_bytes[start_offset:], vram):
if insn.mnemonic == "jr" and insn.op_str == "$ra":
last_return = rom_addr
rom_addr += 4
if end_offset and rom_addr >= end_offset:
break
return last_return + (0x10 - (last_return % 0x10))
def main():
args = parser.parse_args()
rom_bytes = open(args.rom, "rb").read()
start = int(args.start, 0)
end = None
vram = int(args.vram, 0)
if args.end:
end = int(args.end, 0)
print(f"{run(rom_bytes, start, vram, end):X}")
if __name__ == "__main__":
main()
| 27.291667
| 102
| 0.675573
|
f72ac2161ec154a6fbc2d4c0db4116346291b457
| 9,690
|
py
|
Python
|
homeassistant/components/zha/core/discovery.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 2
|
2021-09-13T21:44:02.000Z
|
2021-12-17T21:20:51.000Z
|
homeassistant/components/zha/core/discovery.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:55:25.000Z
|
2022-03-12T00:51:18.000Z
|
homeassistant/components/zha/core/discovery.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 2
|
2020-11-04T07:40:01.000Z
|
2021-09-13T21:44:03.000Z
|
"""Device discovery functions for Zigbee Home Automation."""
from collections import Counter
import logging
from typing import Callable, List, Tuple
from homeassistant import const as ha_const
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity_registry import async_entries_for_device
from homeassistant.helpers.typing import HomeAssistantType
from . import const as zha_const, registries as zha_regs, typing as zha_typing
from .. import ( # noqa: F401 pylint: disable=unused-import,
binary_sensor,
cover,
device_tracker,
fan,
light,
lock,
sensor,
switch,
)
from .channels import base
_LOGGER = logging.getLogger(__name__)
@callback
async def async_add_entities(
_async_add_entities: Callable,
entities: List[
Tuple[
zha_typing.ZhaEntityType,
Tuple[str, zha_typing.ZhaDeviceType, List[zha_typing.ChannelType]],
]
],
) -> None:
"""Add entities helper."""
if not entities:
return
to_add = [ent_cls(*args) for ent_cls, args in entities]
_async_add_entities(to_add, update_before_add=True)
entities.clear()
class ProbeEndpoint:
"""All discovered channels and entities of an endpoint."""
def __init__(self):
"""Initialize instance."""
self._device_configs = {}
@callback
def discover_entities(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
self.discover_by_device_type(channel_pool)
self.discover_by_cluster_id(channel_pool)
@callback
def discover_by_device_type(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
unique_id = channel_pool.unique_id
component = self._device_configs.get(unique_id, {}).get(ha_const.CONF_TYPE)
if component is None:
ep_profile_id = channel_pool.endpoint.profile_id
ep_device_type = channel_pool.endpoint.device_type
component = zha_regs.DEVICE_CLASS[ep_profile_id].get(ep_device_type)
if component and component in zha_const.COMPONENTS:
channels = channel_pool.unclaimed_channels()
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, channel_pool.manufacturer, channel_pool.model, channels
)
if entity_class is None:
return
channel_pool.claim_channels(claimed)
channel_pool.async_new_entity(component, entity_class, unique_id, claimed)
@callback
def discover_by_cluster_id(self, channel_pool: zha_typing.ChannelPoolType) -> None:
"""Process an endpoint on a zigpy device."""
items = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.items()
single_input_clusters = {
cluster_class: match
for cluster_class, match in items
if not isinstance(cluster_class, int)
}
remaining_channels = channel_pool.unclaimed_channels()
for channel in remaining_channels:
if channel.cluster.cluster_id in zha_regs.CHANNEL_ONLY_CLUSTERS:
channel_pool.claim_channels([channel])
continue
component = zha_regs.SINGLE_INPUT_CLUSTER_DEVICE_CLASS.get(
channel.cluster.cluster_id
)
if component is None:
for cluster_class, match in single_input_clusters.items():
if isinstance(channel.cluster, cluster_class):
component = match
break
self.probe_single_cluster(component, channel, channel_pool)
# until we can get rid off registries
self.handle_on_off_output_cluster_exception(channel_pool)
@staticmethod
def probe_single_cluster(
component: str,
channel: zha_typing.ChannelType,
ep_channels: zha_typing.ChannelPoolType,
) -> None:
"""Probe specified cluster for specific component."""
if component is None or component not in zha_const.COMPONENTS:
return
channel_list = [channel]
unique_id = f"{ep_channels.unique_id}-{channel.cluster.cluster_id}"
entity_class, claimed = zha_regs.ZHA_ENTITIES.get_entity(
component, ep_channels.manufacturer, ep_channels.model, channel_list
)
if entity_class is None:
return
ep_channels.claim_channels(claimed)
ep_channels.async_new_entity(component, entity_class, unique_id, claimed)
def handle_on_off_output_cluster_exception(
self, ep_channels: zha_typing.ChannelPoolType
) -> None:
"""Process output clusters of the endpoint."""
profile_id = ep_channels.endpoint.profile_id
device_type = ep_channels.endpoint.device_type
if device_type in zha_regs.REMOTE_DEVICE_TYPES.get(profile_id, []):
return
for cluster_id, cluster in ep_channels.endpoint.out_clusters.items():
component = zha_regs.SINGLE_OUTPUT_CLUSTER_DEVICE_CLASS.get(
cluster.cluster_id
)
if component is None:
continue
channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base.ZigbeeChannel
)
channel = channel_class(cluster, ep_channels)
self.probe_single_cluster(component, channel, ep_channels)
def initialize(self, hass: HomeAssistantType) -> None:
"""Update device overrides config."""
zha_config = hass.data[zha_const.DATA_ZHA].get(zha_const.DATA_ZHA_CONFIG, {})
overrides = zha_config.get(zha_const.CONF_DEVICE_CONFIG)
if overrides:
self._device_configs.update(overrides)
class GroupProbe:
"""Determine the appropriate component for a group."""
def __init__(self):
"""Initialize instance."""
self._hass = None
self._unsubs = []
def initialize(self, hass: HomeAssistantType) -> None:
"""Initialize the group probe."""
self._hass = hass
self._unsubs.append(
async_dispatcher_connect(
hass, zha_const.SIGNAL_GROUP_ENTITY_REMOVED, self._reprobe_group
)
)
def cleanup(self):
"""Clean up on when zha shuts down."""
for unsub in self._unsubs[:]:
unsub()
self._unsubs.remove(unsub)
def _reprobe_group(self, group_id: int) -> None:
"""Reprobe a group for entities after its members change."""
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
zha_group = zha_gateway.groups.get(group_id)
if zha_group is None:
return
self.discover_group_entities(zha_group)
@callback
def discover_group_entities(self, group: zha_typing.ZhaGroupType) -> None:
"""Process a group and create any entities that are needed."""
# only create a group entity if there are 2 or more members in a group
if len(group.members) < 2:
_LOGGER.debug(
"Group: %s:0x%04x has less than 2 members - skipping entity discovery",
group.name,
group.group_id,
)
return
entity_domains = GroupProbe.determine_entity_domains(self._hass, group)
if not entity_domains:
return
zha_gateway = self._hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
for domain in entity_domains:
entity_class = zha_regs.ZHA_ENTITIES.get_group_entity(domain)
if entity_class is None:
continue
self._hass.data[zha_const.DATA_ZHA][domain].append(
(
entity_class,
(
group.get_domain_entity_ids(domain),
f"{domain}_zha_group_0x{group.group_id:04x}",
group.group_id,
zha_gateway.coordinator_zha_device,
),
)
)
async_dispatcher_send(self._hass, zha_const.SIGNAL_ADD_ENTITIES)
@staticmethod
def determine_entity_domains(
hass: HomeAssistantType, group: zha_typing.ZhaGroupType
) -> List[str]:
"""Determine the entity domains for this group."""
entity_domains: List[str] = []
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
all_domain_occurrences = []
for member in group.members:
if member.device.is_coordinator:
continue
entities = async_entries_for_device(
zha_gateway.ha_entity_registry, member.device.device_id
)
all_domain_occurrences.extend(
[
entity.domain
for entity in entities
if entity.domain in zha_regs.GROUP_ENTITY_DOMAINS
]
)
if not all_domain_occurrences:
return entity_domains
# get all domains we care about if there are more than 2 entities of this domain
counts = Counter(all_domain_occurrences)
entity_domains = [domain[0] for domain in counts.items() if domain[1] >= 2]
_LOGGER.debug(
"The entity domains are: %s for group: %s:0x%04x",
entity_domains,
group.name,
group.group_id,
)
return entity_domains
PROBE = ProbeEndpoint()
GROUP_PROBE = GroupProbe()
| 36.022305
| 88
| 0.637771
|
41c8b06a8d34d35e1d652a0da73c2c5ded3b9bd7
| 170
|
py
|
Python
|
katas/beta/flatten_me.py
|
the-zebulan/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 40
|
2016-03-09T12:26:20.000Z
|
2022-03-23T08:44:51.000Z
|
katas/beta/flatten_me.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | null | null | null |
katas/beta/flatten_me.py
|
akalynych/CodeWars
|
1eafd1247d60955a5dfb63e4882e8ce86019f43a
|
[
"MIT"
] | 36
|
2016-11-07T19:59:58.000Z
|
2022-03-31T11:18:27.000Z
|
def flatten_me(lst):
result = []
for a in lst:
try:
result.extend(a)
except TypeError:
result.append(a)
return result
| 18.888889
| 28
| 0.511765
|
cdbc8b30b33df138e6fda49806c5028526a0e11b
| 1,114
|
py
|
Python
|
src/lstm_models/sine_curve_fitting.py
|
mfkiwl/ConvLSTM-Computer-Vision-for-Structural-Health-Monitoring-SHM-and-NonDestructive-Testing-NDT
|
551f6afd2f4207a4a6a717cabc13fe51f31eb410
|
[
"MIT"
] | 17
|
2020-02-25T05:41:41.000Z
|
2022-03-25T06:48:30.000Z
|
src/lstm_models/sine_curve_fitting.py
|
SubChange/ConvLSTM-Computer-Vision-for-Structural-Health-Monitoring-SHM-and-NonDestructive-Testing-NDT
|
0f00291fd7d20d3472709f2941adba722b35f8d5
|
[
"MIT"
] | 1
|
2021-01-13T06:07:02.000Z
|
2021-01-13T06:07:02.000Z
|
src/lstm_models/sine_curve_fitting.py
|
SubChange/ConvLSTM-Computer-Vision-for-Structural-Health-Monitoring-SHM-and-NonDestructive-Testing-NDT
|
0f00291fd7d20d3472709f2941adba722b35f8d5
|
[
"MIT"
] | 5
|
2020-11-22T12:58:23.000Z
|
2021-06-16T14:20:10.000Z
|
import numpy as np
import matplotlib.pyplot as plt
def sine_func(x_i):
y = np.sin(2*np.pi*x_i)
return y
def gen_toy_sine_curve_dataset(N, std, periods=1):
"""
:param N: Number of data points/samples
:param std: Standard deviation for (white/Gaussian) noise input
:return: 2-tuple (x_vector, y_train)
"""
y_vector = []
x_vector = []
for period_i in range(periods):
for x_i in np.linspace(0, 1, N):
x_i = x_i + period_i
noise = np.random.normal(0, std)
y_i = sine_func(x_i) + noise
x_vector.append(x_i)
y_vector.append(y_i)
return x_vector, y_vector
N = 50
periods = 3
std_noise = 0
x_train, y_train = gen_toy_sine_curve_dataset(N, std_noise, periods)
# x_vector_test, y_hat_vector = gen_toy_sine_curve_dataset(10, 0.25)
fig_dims = (12, 12)
fig, ax = plt.subplots(figsize=fig_dims)
# fig, axes = plt.subplots(1, 3, figsize = fig_dims)
ax.plot(x_train, y_train, "g", label="Sine curve plot (x_train vs y_train) - num data points = {0} std = {1}".format(N, std_noise))
ax.grid(True)
ax.legend()
# plt.plot(x_train, y_train, "g")
# plt.plot(x_vector_test, y_hat_vector, "r")
plt.show()
| 27.170732
| 131
| 0.70377
|
8df25bf53add782f90ae03d3bcb25ce0f2ae10f3
| 1,700
|
py
|
Python
|
pyscf/dft/uks_symm.py
|
tmash/pyscf
|
89c101c1c963e8247808635c61cd165bffab42d6
|
[
"Apache-2.0"
] | 1
|
2020-01-05T13:50:50.000Z
|
2020-01-05T13:50:50.000Z
|
pyscf/dft/uks_symm.py
|
tmash/pyscf
|
89c101c1c963e8247808635c61cd165bffab42d6
|
[
"Apache-2.0"
] | null | null | null |
pyscf/dft/uks_symm.py
|
tmash/pyscf
|
89c101c1c963e8247808635c61cd165bffab42d6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Non-relativistic Unrestricted Kohn-Sham
'''
from pyscf.lib import logger
from pyscf.scf import uhf_symm
from pyscf.dft import uks
from pyscf.dft import rks
class SymAdaptedUKS(uhf_symm.UHF, rks.KohnShamDFT):
''' Restricted Kohn-Sham '''
def __init__(self, mol):
uhf_symm.UHF.__init__(self, mol)
rks.KohnShamDFT.__init__(self)
def dump_flags(self, verbose=None):
uhf_symm.UHF.dump_flags(self, verbose)
rks.KohnShamDFT.dump_flags(self, verbose)
return self
get_veff = uks.get_veff
energy_elec = uks.energy_elec
def nuc_grad_method(self):
from pyscf.grad import uks
return uks.Gradients(self)
UKS = SymAdaptedUKS
if __name__ == '__main__':
from pyscf import gto
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom.extend([['He', (0.,0.,0.)], ])
mol.basis = { 'He': 'cc-pvdz'}
mol.symmetry = 1
mol.build()
m = UKS(mol)
m.xc = 'b3lyp'
print(m.scf()) # -2.89992555753
| 26.153846
| 74
| 0.686471
|
d0b2460f436befcd9684c1a5d46413d97fe5880c
| 1,734
|
py
|
Python
|
google-cloud-gke_hub-v1beta1/synth.py
|
trambui09/google-cloud-ruby
|
9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-gke_hub-v1beta1/synth.py
|
trambui09/google-cloud-ruby
|
9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-gke_hub-v1beta1/synth.py
|
trambui09/google-cloud-ruby
|
9c5f5fc27cbfbb4c4fc55d1171f450d1af3226aa
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import synthtool.languages.ruby as ruby
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICMicrogenerator()
library = gapic.ruby_library(
"gkehub", "v1beta1",
extra_proto_files=[
"google/cloud/common_resources.proto",
],
generator_args={
"ruby-cloud-gem-name": "google-cloud-gke_hub-v1beta1",
"ruby-cloud-title": "GKE Hub V1beta1",
"ruby-cloud-description": "The GKE Hub API centrally manages features and services on all your Kubernetes clusters running in a variety of environments, including Google cloud, on premises in customer datacenters, or other third party clouds.",
"ruby-cloud-env-prefix": "GKE_HUB",
"ruby-cloud-grpc-service-config": "google/cloud/gkehub/v1beta1/membership_grpc_service_config.json",
"ruby-cloud-product-url": "https://cloud.google.com/anthos/clusters/docs",
"ruby-cloud-api-id": "gkehub.googleapis.com",
"ruby-cloud-api-shortname": "gkehub",
}
)
s.copy(library, merge=ruby.global_merge)
| 40.325581
| 252
| 0.731257
|
c4336db71ed6b5e453b20b0f5c7d3b20992dd3df
| 9,284
|
py
|
Python
|
tests/unit/test_static.py
|
matt-land/warehouse
|
0acb5d94528099ed5356253457cf8dc0b4e50aad
|
[
"Apache-2.0"
] | 1
|
2018-06-11T23:29:13.000Z
|
2018-06-11T23:29:13.000Z
|
tests/unit/test_static.py
|
matt-land/warehouse
|
0acb5d94528099ed5356253457cf8dc0b4e50aad
|
[
"Apache-2.0"
] | 8
|
2019-12-26T16:45:00.000Z
|
2022-03-21T22:17:13.000Z
|
tests/unit/test_static.py
|
matt-land/warehouse
|
0acb5d94528099ed5356253457cf8dc0b4e50aad
|
[
"Apache-2.0"
] | 1
|
2019-08-26T06:52:55.000Z
|
2019-08-26T06:52:55.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from unittest import mock
import pretend
import pytest
from pyramid.tweens import EXCVIEW, INGRESS
from warehouse import static
class TestWhiteNose:
def test_resolves_manifest_path(self, monkeypatch):
resolver = pretend.stub(
resolve=pretend.call_recorder(
lambda p: pretend.stub(
abspath=lambda: "/path/to/manifest.json",
),
),
)
monkeypatch.setattr(static, "resolver", resolver)
whitenoise = static.WhiteNoise(
None,
manifest="warehouse:manifest.json",
)
assert whitenoise.manifest_path == "/path/to/manifest.json"
assert resolver.resolve.calls == [
pretend.call("warehouse:manifest.json"),
]
def test_empty_manifest_when_no_manifest_provided(self):
whitenoise = static.WhiteNoise(None)
assert whitenoise.manifest == set()
def test_loads_manifest(self, tmpdir):
manifest_path = str(tmpdir.join("manifest.json"))
with open(manifest_path, "w", encoding="utf8") as fp:
json.dump({"file.txt": "file.hash.txt"}, fp)
whitenoise = static.WhiteNoise(None, manifest=manifest_path)
assert whitenoise.manifest_path == manifest_path
assert whitenoise.manifest == {"file.hash.txt"}
@pytest.mark.parametrize("autorefresh", [True, False])
def test_caches_manifest(self, tmpdir, autorefresh):
manifest_path = str(tmpdir.join("manifest.json"))
with open(manifest_path, "w", encoding="utf8") as fp:
json.dump({"file.txt": "file.hash.txt"}, fp)
whitenoise = static.WhiteNoise(
None,
manifest=manifest_path,
autorefresh=autorefresh,
)
assert whitenoise.manifest_path == manifest_path
assert whitenoise.manifest == {"file.hash.txt"}
with open(manifest_path, "w", encoding="utf8") as fp:
json.dump({"file.txt": "file.newhash.txt"}, fp)
assert whitenoise.manifest == \
({"file.newhash.txt"} if autorefresh else {"file.hash.txt"})
def test_is_immutable_file_no_manifest(self):
whitenoise = static.WhiteNoise(None)
assert not whitenoise.is_immutable_file(None, None)
def test_is_immutable_file_wrong_path(self):
whitenoise = static.WhiteNoise(None, manifest="/path/to/manifest.json")
assert not whitenoise.is_immutable_file(
"/path/in/another/dir",
"/static/another/dir",
)
def test_is_immutable_file_not_in_manifest(self):
whitenoise = static.WhiteNoise(None, manifest="/path/to/manifest.json")
whitenoise._manifest = {"another/file.txt"}
assert not whitenoise.is_immutable_file(
"/path/to/the/file.txt",
"static/the/file.txt",
)
def test_is_immutable_file_in_manifest(self):
whitenoise = static.WhiteNoise(None, manifest="/path/to/manifest.json")
whitenoise._manifest = {"the/file.txt"}
assert whitenoise.is_immutable_file(
"/path/to/the/file.txt",
"static/the/file.txt",
)
class TestWhitenoiseTween:
@pytest.mark.parametrize("autorefresh", [True, False])
def test_bypasses(self, autorefresh):
whitenoise = static.WhiteNoise(None, autorefresh=autorefresh)
whitenoise.add_files(
static.resolver.resolve("warehouse:/static/dist/").abspath(),
prefix="/static/",
)
response = pretend.stub()
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(whitenoise=whitenoise)
request = pretend.stub(path_info="/other/", registry=registry)
tween = static.whitenoise_tween_factory(handler, registry)
resp = tween(request)
assert resp is response
@pytest.mark.parametrize("autorefresh", [True, False])
def test_method_not_allowed(self, autorefresh):
whitenoise = static.WhiteNoise(None, autorefresh=autorefresh)
whitenoise.add_files(
static.resolver.resolve("warehouse:/static/dist/").abspath(),
prefix="/static/",
)
response = pretend.stub()
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(whitenoise=whitenoise)
request = pretend.stub(
method="POST",
environ={"HTTP_ACCEPT_ENCODING": "gzip"},
path_info="/static/manifest.json",
registry=registry,
)
tween = static.whitenoise_tween_factory(handler, registry)
resp = tween(request)
assert resp.status_code == 405
def test_serves(self):
whitenoise = static.WhiteNoise(None, autorefresh=True)
whitenoise.add_files(
static.resolver.resolve("warehouse:/static/dist/").abspath(),
prefix="/static/",
)
path, headers = (whitenoise.find_file("/static/manifest.json")
.get_path_and_headers({}))
headers = dict(headers)
response = pretend.stub()
handler = pretend.call_recorder(lambda request: response)
registry = pretend.stub(whitenoise=whitenoise)
request = pretend.stub(
method="GET",
environ={},
path_info="/static/manifest.json",
registry=registry,
)
tween = static.whitenoise_tween_factory(handler, registry)
resp = tween(request)
assert resp.status_code == 200
assert resp.headers["Content-Type"] == "application/json"
assert (
set(i.strip() for i in resp.headers["Cache-Control"].split(",")) ==
{"public", "max-age=60"}
)
assert resp.headers["Vary"] == "Accept-Encoding"
with open(path, "rb") as fp:
assert resp.body == fp.read()
class TestDirectives:
def test_whitenoise_serve_static_unsupported_kwarg(self):
with pytest.raises(TypeError):
static.whitenoise_serve_static(pretend.stub(), lol_fake=True)
def test_whitenoise_serve_static(self, monkeypatch):
whitenoise_obj = pretend.stub()
whitenoise_cls = pretend.call_recorder(lambda *a, **kw: whitenoise_obj)
whitenoise_cls.config_attrs = ["autorefresh"]
monkeypatch.setattr(static, "WhiteNoise", whitenoise_cls)
config = pretend.stub(
action=pretend.call_recorder(lambda d, f: None),
registry=pretend.stub(),
)
static.whitenoise_serve_static(config, autorefresh=True)
assert config.action.calls == [
pretend.call(("whitenoise", "create instance"), mock.ANY),
]
config.action.calls[0].args[1]()
assert whitenoise_cls.calls == [pretend.call(None, autorefresh=True)]
assert config.registry.whitenoise is whitenoise_obj
def test_whitenoise_add_files(self):
config = pretend.stub(
action=pretend.call_recorder(lambda d, f: None),
registry=pretend.stub(
whitenoise=pretend.stub(
add_files=pretend.call_recorder(lambda path, prefix: None),
),
),
)
static.whitenoise_add_files(config, "/static/foo/", "/lol/")
assert config.action.calls == [
pretend.call(
("whitenoise", "add files", "/static/foo/", "/lol/"),
mock.ANY,
),
]
config.action.calls[0].args[1]()
assert config.registry.whitenoise.add_files.calls == [
pretend.call("/static/foo", prefix="/lol/"),
]
def test_includeme():
config = pretend.stub(
add_directive=pretend.call_recorder(lambda name, callable: None),
add_tween=pretend.call_recorder(lambda tween, over, under: None),
)
static.includeme(config)
assert config.add_directive.calls == [
pretend.call(
"whitenoise_serve_static",
static.whitenoise_serve_static,
),
pretend.call(
"whitenoise_add_files",
static.whitenoise_add_files,
),
]
assert config.add_tween.calls == [
pretend.call(
"warehouse.static.whitenoise_tween_factory",
over=[
"warehouse.utils.compression.compression_tween_factory",
EXCVIEW,
],
under=[
"warehouse.csp.content_security_policy_tween_factory",
"warehouse.referrer_policy.referrer_policy_tween_factory",
"warehouse.config.require_https_tween_factory",
INGRESS,
],
),
]
| 33.039146
| 79
| 0.61816
|
5c8884d745934429b58fd771f9a3c6b214e44f31
| 1,791
|
py
|
Python
|
src/random_forest.py
|
jackred/CW2_GERMAN_SIGN
|
988a99d6012ae95bec778a91785c76a2ca40ba87
|
[
"MIT"
] | null | null | null |
src/random_forest.py
|
jackred/CW2_GERMAN_SIGN
|
988a99d6012ae95bec778a91785c76a2ca40ba87
|
[
"MIT"
] | null | null | null |
src/random_forest.py
|
jackred/CW2_GERMAN_SIGN
|
988a99d6012ae95bec778a91785c76a2ca40ba87
|
[
"MIT"
] | null | null | null |
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from helper import pre_processed_data_all, pre_processed_label_all, \
print_result, tree_to_png, run_function, extract_measures, \
plot_experiment
from arg import rForest_args
import numpy as np
import random
def random_forest(data_train, label_train, data_test, max_depth):
clf = RandomForestClassifier(
max_depth=max_depth,
max_features=10
)
clf.fit(data_train, label_train)
return clf.predict(data_test) # , clf.predict_proba(data_test)
def server():
args = rForest_args()
rand = np.random.randint(10000000)
data_train, data_test = pre_processed_data_all(args, rand)
label_train, label_test = pre_processed_label_all(args, rand)
res = []
for i in range(2, 6):
print('===\,n=====Epochs: %d=====\n===' % i)
res.append(run_function(random_forest,
args.cross_validate,
data_train, label_train,
data_test, label_test, max_depth=i+1))
print(res)
res = extract_measures(res)
print(res)
plot_experiment('random_forest_test_9000', 'n estimator', res)
if __name__ == "__main__":
server()
# print('start random tree classification')
# args = rForest_args()
# rand = np.random.randint(100000)
# data_train, data_test = pre_processed_data_all(args, rand)
# label_train, label_test = pre_processed_label_all(args, rand)
# print('data loaded')
# found, confidence = random_forest(data_train, label_train, data_test)
# print('random tree done')
# compare_class_true_positive(found, label_test)
# compare_class(found, label_test)
# measure(found, label_test, confidence, True)
| 34.442308
| 75
| 0.677275
|
ef8b5f35df60d0c3752d886bc6c755cb0ec51d4b
| 873
|
py
|
Python
|
Chapter_05/chap05_prog_02_Sympy_SolEquation.py
|
rojassergio/Prealgebra-via-Python-Programming
|
8d1cdf103af2a39f6ae7bba76e9a6a0182eb39d3
|
[
"MIT"
] | 1
|
2018-06-19T11:54:15.000Z
|
2018-06-19T11:54:15.000Z
|
Chapter_05/chap05_prog_02_Sympy_SolEquation.py
|
rojassergio/Prealgebra-via-Python-Programming
|
8d1cdf103af2a39f6ae7bba76e9a6a0182eb39d3
|
[
"MIT"
] | null | null | null |
Chapter_05/chap05_prog_02_Sympy_SolEquation.py
|
rojassergio/Prealgebra-via-Python-Programming
|
8d1cdf103af2a39f6ae7bba76e9a6a0182eb39d3
|
[
"MIT"
] | 3
|
2020-03-02T22:31:18.000Z
|
2021-04-05T05:06:39.000Z
|
"""
Content under Creative Commons Attribution license CC-BY 4.0,
code under MIT license (c)2018 Sergio Rojas (srojas@usb.ve)
http://en.wikipedia.org/wiki/MIT_License
http://creativecommons.org/licenses/by/4.0/
Created on april, 2018
Last Modified on: may 15, 2018
This program finds the solution of the equation
7/5 = 5x + 9
The trick to find rational solution to equations is to
write the numerical fractions using the SymPy S function
"""
from sympy import symbols, Eq, solveset, S, sympify
x = symbols('x')
LHS = S('7')/5
RHS = S('5')*x + S('9')
thesol = list( solveset( Eq(LHS, RHS), x) )
print('thesol =', thesol)
newLHS = LHS - RHS #rearrange the equation to read: LHS - RHS = 0
print('newLHS =', newLHS)
newLHS = newLHS.subs(x, thesol[0])
if newLHS.simplify() == 0:
print('The solution of {0} = {1}, is x = {2}'.format(LHS,RHS,thesol[0]))
| 26.454545
| 76
| 0.682703
|
c8aaf5d4cb0d5d45d3c88aaa84cae48d7485987b
| 5,103
|
py
|
Python
|
00-lesson-1/fakelib/nr_cube.py
|
tonybutzer/hauge
|
7cee4c8edda9a2a5a11e69425516c7c47bb21cbf
|
[
"MIT"
] | null | null | null |
00-lesson-1/fakelib/nr_cube.py
|
tonybutzer/hauge
|
7cee4c8edda9a2a5a11e69425516c7c47bb21cbf
|
[
"MIT"
] | null | null | null |
00-lesson-1/fakelib/nr_cube.py
|
tonybutzer/hauge
|
7cee4c8edda9a2a5a11e69425516c7c47bb21cbf
|
[
"MIT"
] | null | null | null |
import requests
import pandas as pd
import geopandas
import xarray as xr
from ks_cool_stac import ks_filter_stacquery, ks_filter_ascending, ks_convert_llurl
from fm_map import Fmap
from xra_func import load_all_bands_from_df_as_array_of_dataarrays
def get_bbox_from_geojson(geojson_file):
url = geojson_file
gdf = geopandas.read_file(url)
bbox = (gdf['geometry'].bounds['minx'][0], gdf['geometry'].bounds['miny'][0],
gdf['geometry'].bounds['maxx'][0], gdf['geometry'].bounds['maxy'][0])
return(bbox)
def get_my_alber_scenes(dates, bbox,min_clouds=0, max_clouds=90):
clouds = max_clouds
filtered = ks_filter_stacquery({
'bbox': bbox,
'limit': 500,
'time': dates,
# this did not work'collection': 'landsat-c2l2-sr',
'query': {'eo:cloud_cover': {'lt': clouds},
'collection': {'eq': 'landsat-c2l2alb-bt'}}},
#'query': {'eo:cloud_cover': {'lt': clouds},
#'eo:instrument': {'eq': 'OLI_TIRS'},
#'collection': {'eq': 'landsat-c2l2alb-bt'}}},
filters=ks_filter_ascending)
return(filtered)
import json
def get_href_url_for_asset(assets, asset_band):
try:
jstr = assets[asset_band]
href = jstr['href']
except:
jstr='NotAvail'
href = None
return ks_convert_llurl(href)
def get_measurement_band_assets(assets):
asset_dict = {}
for key in assets.keys():
if 'SR_' in key:
# print('ASSET', key)
asset_dict[key] = get_href_url_for_asset(assets, key)
# print(asset_dict)
return asset_dict
def create_simple_df_instead(meas_dict, my_scenes):
dict_list=[]
print(len(my_scenes), 'LENGTH')
print(my_scenes[0].keys())
for scene in my_scenes:
prop_df = pd.DataFrame(scene['properties'])
prune_list = ['datetime', 'eo:cloud_cover', 'eo:platform', 'eo:instrument', 'landsat:wrs_path', 'landsat:wrs_row', 'landsat:scene_id']
prune_prop_df = prop_df[prune_list]
print(prune_prop_df)
prune_prop_df.reset_index(drop=True, inplace=True)
my_dict = prune_prop_df.to_dict('records')
s_dict = my_dict[0]
asset_dict = get_measurement_band_assets(scene['assets'])
for akey in meas_dict:
try:
print(meas_dict[akey])
s_dict[akey] = asset_dict[meas_dict[akey]]
except:
print('key error', scene['assets'])
dict_list.append(s_dict)
my_df = pd.DataFrame(dict_list)
#print(my_df)
return(my_df)
class NR():
def __init__(self):
self.sat_api_url = "https://landsatlook.usgs.gov/sat-api"
print("creating Nathan Roberts Class")
self.verify_api()
self.set_measurements()
def verify_api(self):
satAPI = requests.post(self.sat_api_url)
if satAPI.status_code == 200:
print('Sat-API is Available')
else:
print('Sat-API is not Available')
sys.exit()
def set_measurements(self):
# this could be done from a yml
self.measures = {
'coastal':'SR_B1.TIF',
'red':'SR_B2.TIF',
'green':'SR_B3.TIF',
'blue':'SR_B4.TIF',
'nir':'SR_B5.TIF',
'swir1':'SR_B6.TIF',
'swir2':'SR_B7.TIF',
'pixqa':'SR_QA_AEROSOL.TIF', }
def measurements(self):
display_measures = pd.DataFrame(self.measures, index=[0])
return(display_measures)
def collections(self):
satAPICollections = requests.post('https://landsatlook.usgs.gov/sat-api/collections')
sat_collections = satAPICollections.json()
my_df = pd.DataFrame(sat_collections['collections'])
s_df = my_df[['id','title','description']]
pd.set_option('display.max_colwidth', None) ### Keeps pandas from truncating text
return(s_df)
def search(self, geoj_file, date_min, date_max, min_cloud, max_cloud, requested_measures):
print(geoj_file, date_min, date_max, min_cloud, max_cloud, requested_measures)
bbox = get_bbox_from_geojson(geoj_file)
dates = f'{date_min}/{date_max}'
my_scenes = get_my_alber_scenes(dates, bbox, min_cloud, max_cloud)
print(my_scenes)
simple_df = create_simple_df_instead(self.measures, my_scenes)
#print(simple_df)
return(my_scenes, simple_df)
def load(self, aoi_geojson_file, work_df, my_measures='ALL'):
if 'ALL' in my_measures:
my_measures = self.measures.keys()
ldatasets = load_all_bands_from_df_as_array_of_dataarrays(aoi_geojson_file, work_df, my_measures)
from datetime import datetime
my_text_list=[]
my_list = work_df.datetime.tolist()
for dt in my_list:
str_dt = dt.strftime('%Y-%m-%d')
my_text_list.append(str_dt)
DS = xr.concat(ldatasets, dim= pd.DatetimeIndex(my_text_list, name='time'))
return DS
def map(self, geoj):
fm=Fmap()
return(fm.map_geojson(geoj))
def sat(self, geoj):
fm=Fmap()
return(fm.sat_geojson(geoj))
| 32.711538
| 142
| 0.630805
|
fd15ff916ae18f9aa8f92a32111ea4889beb19e3
| 11,730
|
py
|
Python
|
python/tvm/target/target.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 5
|
2020-06-19T03:22:24.000Z
|
2021-03-17T22:16:48.000Z
|
python/tvm/target/target.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 2
|
2020-07-08T12:34:59.000Z
|
2020-07-11T15:54:47.000Z
|
python/tvm/target/target.py
|
retamia/tvm
|
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
|
[
"Apache-2.0"
] | 2
|
2019-08-24T00:06:36.000Z
|
2022-03-03T02:07:27.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Target data structure."""
import warnings
import tvm._ffi
from tvm.runtime import Object
from . import _ffi_api
@tvm._ffi.register_object
class Target(Object):
"""Target device information, use through TVM API.
Note
----
Do not use class constructor, you can create target using the following functions
- :py:func:`tvm.target.create` create target from string
- :py:func:`tvm.target.arm_cpu` create arm_cpu target
- :py:func:`tvm.target.cuda` create CUDA target
- :py:func:`tvm.target.rocm` create ROCM target
- :py:func:`tvm.target.mali` create Mali target
- :py:func:`tvm.target.intel_graphics` create Intel Graphics target
"""
def __new__(cls):
# Always override new to enable class
obj = Object.__new__(cls)
obj._keys = None
obj._options = None
obj._libs = None
return obj
@property
def keys(self):
if not self._keys:
self._keys = [str(k) for k in self.keys_array]
return self._keys
@property
def options(self):
if not self._options:
self._options = [str(o) for o in self.options_array]
return self._options
@property
def libs(self):
if not self._libs:
self._libs = [str(l) for l in self.libs_array]
return self._libs
@property
def model(self):
for opt in self.options_array:
if opt.startswith('-model='):
return opt[7:]
return 'unknown'
@property
def mcpu(self):
"""Returns the mcpu from the target if it exists."""
mcpu = ''
if self.options is not None:
for opt in self.options:
if 'mcpu' in opt:
mcpu = opt.split('=')[1]
return mcpu
def __enter__(self):
_ffi_api.EnterTargetScope(self)
return self
def __exit__(self, ptype, value, trace):
_ffi_api.ExitTargetScope(self)
@staticmethod
def current(allow_none=True):
"""Returns the current target.
Parameters
----------
allow_none : bool
Whether allow the current target to be none
Raises
------
ValueError if current target is not set.
"""
return _ffi_api.GetCurrentTarget(allow_none)
def _merge_opts(opts, new_opts):
"""Helper function to merge options"""
if isinstance(new_opts, str):
new_opts = new_opts.split()
if new_opts:
opt_set = set(opts)
new_opts = [opt for opt in new_opts if opt not in opt_set]
return opts + new_opts
return opts
def cuda(model='unknown', options=None):
"""Returns a cuda target.
Parameters
----------
model: str
The model of cuda device (e.g. 1080ti)
options : str or list of str
Additional options
"""
opts = _merge_opts(['-model=%s' % model], options)
return _ffi_api.TargetCreate("cuda", *opts)
def rocm(model='unknown', options=None):
"""Returns a ROCM target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return _ffi_api.TargetCreate("rocm", *opts)
def mali(model='unknown', options=None):
"""Returns a ARM Mali GPU target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=mali", '-model=%s' % model]
opts = _merge_opts(opts, options)
return _ffi_api.TargetCreate("opencl", *opts)
def intel_graphics(model='unknown', options=None):
"""Returns an Intel Graphics target.
Parameters
----------
model: str
The model of this device
options : str or list of str
Additional options
"""
opts = ["-device=intel_graphics", '-model=%s' % model]
opts = _merge_opts(opts, options)
return _ffi_api.TargetCreate("opencl", *opts)
def opengl(model='unknown', options=None):
"""Returns a OpenGL target.
Parameters
----------
options : str or list of str
Additional options
"""
opts = _merge_opts(["-model=%s" % model], options)
return _ffi_api.TargetCreate("opengl", *opts)
def arm_cpu(model='unknown', options=None):
"""Returns a ARM CPU target.
This function will also download pre-tuned op parameters when there is none.
Parameters
----------
model: str
SoC name or phone name of the arm board.
options : str or list of str
Additional options
"""
trans_table = {
"pixel2": ["-model=snapdragon835", "-target=arm64-linux-android -mattr=+neon"],
"mate10": ["-model=kirin970", "-target=arm64-linux-android -mattr=+neon"],
"mate10pro": ["-model=kirin970", "-target=arm64-linux-android -mattr=+neon"],
"p20": ["-model=kirin970", "-target=arm64-linux-android -mattr=+neon"],
"p20pro": ["-model=kirin970", "-target=arm64-linux-android -mattr=+neon"],
"rasp3b": ["-model=bcm2837", "-target=armv7l-linux-gnueabihf -mattr=+neon"],
"rasp4b": ["-model=bcm2711", "-target=arm-linux-gnueabihf -mattr=+neon"],
"rk3399": ["-model=rk3399", "-target=aarch64-linux-gnu -mattr=+neon"],
"pynq": ["-model=pynq", "-target=armv7a-linux-eabi -mattr=+neon"],
"ultra96": ["-model=ultra96", "-target=aarch64-linux-gnu -mattr=+neon"],
}
pre_defined_opt = trans_table.get(model, ["-model=%s" % model])
opts = ["-device=arm_cpu"] + pre_defined_opt
opts = _merge_opts(opts, options)
return _ffi_api.TargetCreate("llvm", *opts)
def rasp(options=None):
"""Return a Raspberry 3b target.
Parameters
----------
options : str or list of str
Additional options
"""
warnings.warn('tvm.target.rasp() is going to be deprecated. '
'Please use tvm.target.arm_cpu("rasp3b")')
return arm_cpu('rasp3b', options)
def vta(model='unknown', options=None):
opts = ["-device=vta", '-keys=cpu', '-model=%s' % model]
opts = _merge_opts(opts, options)
ret = _ffi_api.TargetCreate("ext_dev", *opts)
return ret
def bifrost(model='unknown', options=None):
"""Return an ARM Mali GPU target (Bifrost architecture).
Parameters
----------
options : str or list of str
Additional options
"""
opts = ["-device=bifrost", '-model=%s' % model]
opts = _merge_opts(opts, options)
return _ffi_api.TargetCreate("opencl", *opts)
def hexagon(cpu_ver='v66', sim_args=None, hvx=128):
"""Returns a Hexagon target.
Parameters
----------
cpu_ver : str
CPU version used for code generation. Not all allowed cpu str
will be valid, LLVM will throw an error.
sim_args : str or list of str
User defined sim arguments. CPU version defaults to cpu_ver.
Otherwise, separate versions are used for codegen and sim. Not
all allowed cpu strings will be valid, simulator will throw an
error if invalid. Does not affect codegen.
hvx : int
Size of hvx register. Value of 0 indicates disabled hvx.
"""
# Example compiler arguments
# llvm -target=hexagon -mcpu=hexagonv66 -mattr=+hvxv66,+hvx-length128b
# Check for valid codegen cpu
valid_hex = ['v60', 'v62', 'v65', 'v66', 'v67', 'v67t']
try:
cpu_ver = cpu_ver[cpu_ver.index('v'):].lower()
assert 3 <= len(cpu_ver) <= 4
except:
msg = '{} is not a valid Hexagon version\nvalid versions include {}'
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
assert hvx in [0, 64, 128]
# Target string
def create_target(cpu_ver):
target = ' -target=hexagon'
mcpu = ' -mcpu=hexagon' + cpu_ver
mattr = ''
# HVX enable
if hvx:
mattr = ' -mattr=+hvx' + cpu_ver + ',+hvx-length' + str(hvx) + 'b'
return 'llvm' + target + mcpu + mattr
# Simulator string
def create_sim(cpu_ver, sim_args):
def validate_hvx_length(codegen_hvx, sim_args):
if sim_args and '--hvx_length' in sim_args:
# If --hvx_length was specified, check HVX length of sim
# vs codegen
i = sim_args.index('hvx_length') + len('hvx_length') + 1
sim_hvx = sim_args[i:i+3]
if sim_hvx != str(codegen_hvx):
print('WARNING: sim hvx {} and codegen hvx {} mismatch!' \
.format(sim_hvx, codegen_hvx))
elif codegen_hvx != 0:
# If --hvx_length was not given, add it if HVX is enabled
sim_args = sim_args + ' ' if isinstance(sim_args, str) else ''
sim_args += '--hvx_length ' + str(codegen_hvx)
return sim_args or ''
if not sim_args:
return cpu_ver + ' ' + validate_hvx_length(hvx, sim_args)
sim_cpu = cpu_ver + ' '
# Add user defined args
if isinstance(sim_args, list):
sim_args = ' '.join(sim_args)
# Check for supplied sim cpu version
if 'v6' in sim_args:
sim_cpu = ''
# Regex match for allowed cpus
valid_cpu_str_regex = r'(?P<pre>--.*\s)?(--m)?' + \
r'(?P<base_version>v6[25678])(?P<sub_version>[a-z])?' + \
r'(?P<l2_size>_[0-9]+)?(?P<rev>_rev[0-9])?\s?(?P<post>--.*)?'
m = re.match(valid_cpu_str_regex, sim_args.lower())
if not m:
raise ValueError(
'Invalid simulator argument string "{}"'.format(sim_args))
# Parse options into correct order
cpu_attr = {x: str(m.groupdict()[x] or '') for x in m.groupdict()}
sim_args = cpu_attr['base_version'] + \
cpu_attr['sub_version'] + \
cpu_attr['l2_size'] + \
cpu_attr['rev'] + ' ' + \
cpu_attr['pre'] + cpu_attr['post']
return sim_cpu + ' ' + validate_hvx_length(hvx, sim_args)
# Sim args
os.environ['HEXAGON_SIM_ARGS'] = create_sim(cpu_ver, sim_args)
target_str = create_target(cpu_ver)
args_list = target_str.split()
return _ffi_api.TargetCreate("hexagon", *args_list)
def create(target_str):
"""Get a target given target string.
Parameters
----------
target_str : str
The target string.
Returns
-------
target : Target
The target object
Note
----
See the note on :py:mod:`tvm.target` on target string format.
"""
if isinstance(target_str, Target):
return target_str
if not isinstance(target_str, str):
raise ValueError("target_str has to be string type")
return _ffi_api.TargetFromString(target_str)
| 31.702703
| 90
| 0.597698
|
da533b686d2f9670220338fb2699e5fb4e88711c
| 1,911
|
py
|
Python
|
create-rss.py
|
barentsen/wwcpsx-rss
|
f872cc4d054c1441941cb194c1828e8ca724a021
|
[
"MIT"
] | null | null | null |
create-rss.py
|
barentsen/wwcpsx-rss
|
f872cc4d054c1441941cb194c1828e8ca724a021
|
[
"MIT"
] | null | null | null |
create-rss.py
|
barentsen/wwcpsx-rss
|
f872cc4d054c1441941cb194c1828e8ca724a021
|
[
"MIT"
] | null | null | null |
"""Creates an RSS feed for the CPSX Western Worlds podcast.
Crafted with ♥ for Jon Kissi and Raymond Francis.
"""
try:
from urllib import request # Python 3
except ImportError:
import urllib2 as request # Legacy Python
import re
from feedgen import feed
PODCAST_URL = "http://cpsx.uwo.ca/outreach/western_worlds/western_worlds_episodes.html"
if __name__ == "__main__":
fg = feed.FeedGenerator()
fg.id(PODCAST_URL)
fg.title("CPSX Western Worlds")
fg.description("""Western Worlds offers bi-weekly podcast programming
that features an interview with space-relevant
researchers, engineers, scientists, or advocates
representing the local, national and global planetary
science and exploration communities. Interview content
is designed to be accessible and interesting for a wide
range of listeners and will be followed by a round-table
discussion involving several Western Worlds co-hosts, who
have a wide-variety of educational and professional
backgrounds.""")
fg.link(href=PODCAST_URL, rel='alternate')
fg.load_extension('podcast')
fg.podcast.itunes_category('Science & Medicine', 'Natural Sciences')
html = request.urlopen(PODCAST_URL).readlines()
for line in html:
groups = re.search("<p>.*\.\./img(.*mp3).*\">(.*)</a><br/>(.*)</p>", str(line))
if groups is not None:
url = "http://cpsx.uwo.ca/img" + groups.group(1)
title = groups.group(2)
desc = groups.group(3)
# Add to the feed
fe = fg.add_entry()
fe.id(url)
fe.title(title)
fe.description(desc)
fe.enclosure(url, 0, 'audio/mpeg')
fg.rss_file('wwcpsx-rss.xml', pretty=True)
| 39
| 87
| 0.610675
|
4757118cf4fa15f2912d20431e30d6ac139689aa
| 251
|
bzl
|
Python
|
scala/private/phases/phase_unused_deps_checker.bzl
|
psilospore/rules_scala-rebloopy
|
69327c55e8095882e59e5c0fe948cb31693b4e6f
|
[
"Apache-2.0"
] | null | null | null |
scala/private/phases/phase_unused_deps_checker.bzl
|
psilospore/rules_scala-rebloopy
|
69327c55e8095882e59e5c0fe948cb31693b4e6f
|
[
"Apache-2.0"
] | null | null | null |
scala/private/phases/phase_unused_deps_checker.bzl
|
psilospore/rules_scala-rebloopy
|
69327c55e8095882e59e5c0fe948cb31693b4e6f
|
[
"Apache-2.0"
] | null | null | null |
#
# PHASE: unused deps checker
#
# DOCUMENT THIS
#
load(
"@io_bazel_rules_scala//scala/private:rule_impls.bzl",
"get_unused_dependency_checker_mode",
)
def phase_unused_deps_checker(ctx, p):
return get_unused_dependency_checker_mode(ctx)
| 19.307692
| 58
| 0.76494
|
eb15c36608d7202ded4e94cc6d8dc46888d90f87
| 2,219
|
py
|
Python
|
utils/inferenceutils.py
|
n-fallahinia/finger-detection
|
ebc3a5165b34156fa8f2abb44fde5d48b5405f95
|
[
"Apache-2.0"
] | 1
|
2021-02-22T00:22:05.000Z
|
2021-02-22T00:22:05.000Z
|
utils/inferenceutils.py
|
n-fallahinia/finger-detection
|
ebc3a5165b34156fa8f2abb44fde5d48b5405f95
|
[
"Apache-2.0"
] | null | null | null |
utils/inferenceutils.py
|
n-fallahinia/finger-detection
|
ebc3a5165b34156fa8f2abb44fde5d48b5405f95
|
[
"Apache-2.0"
] | null | null | null |
import io
import os
import scipy.misc
import numpy as np
import six
import time
import glob
from IPython.display import display
from six import BytesIO
import matplotlib
import matplotlib.pyplot as plt
from PIL import Image, ImageDraw, ImageFont
import tensorflow as tf
from object_detection.utils import ops as utils_ops
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
def load_image_into_numpy_array(path):
img_data = tf.io.gfile.GFile(path, 'rb').read()
image = Image.open(BytesIO(img_data))
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def run_inference_for_single_image(model, image):
image = np.asarray(image)
# The input needs to be a tensor, convert it using `tf.convert_to_tensor`.
input_tensor = tf.convert_to_tensor(image)
# The model expects a batch of images, so add an axis with `tf.newaxis`.
input_tensor = input_tensor[tf.newaxis,...]
# Run inference
model_fn = model.signatures['serving_default']
output_dict = model_fn(input_tensor)
# All outputs are batches tensors.
# Convert to numpy arrays, and take index [0] to remove the batch dimension.
# We're only interested in the first num_detections.
num_detections = int(output_dict.pop('num_detections'))
output_dict = {key:value[0, :num_detections].numpy()
for key,value in output_dict.items()}
output_dict['num_detections'] = num_detections
# detection_classes should be ints.
output_dict['detection_classes'] = \
output_dict['detection_classes'].astype(np.int64)
# Handle models with masks:
if 'detection_masks' in output_dict:
# Reframe the the bbox mask to the image size.
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(
output_dict['detection_masks'], output_dict['detection_boxes'],
image.shape[0], image.shape[1])
detection_masks_reframed = tf.cast(detection_masks_reframed > 0.5,
tf.uint8)
output_dict['detection_masks_reframed'] = detection_masks_reframed.numpy()
return output_dict
| 35.222222
| 78
| 0.733213
|
98e53f83bbb14a8417eeffbcddd1b5e63cf64c2d
| 11,032
|
py
|
Python
|
anchorecli/cli/system.py
|
ayanes/anchore-cli
|
b052f447cdb46193dca43683f47ecf0c768867fa
|
[
"Apache-2.0"
] | 1
|
2019-07-19T22:14:18.000Z
|
2019-07-19T22:14:18.000Z
|
anchorecli/cli/system.py
|
kumarchatla/anchore-cli
|
0cab3cc203c7295663b20981559e68b7283bc84a
|
[
"Apache-2.0"
] | null | null | null |
anchorecli/cli/system.py
|
kumarchatla/anchore-cli
|
0cab3cc203c7295663b20981559e68b7283bc84a
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
import re
import json
import time
import click
import logging
import anchorecli.clients.apiexternal
import anchorecli.cli.utils
config = {}
_logger = logging.getLogger(__name__)
@click.group(name='system', short_help='System operations')
@click.pass_context
@click.pass_obj
def system(ctx_config, ctx):
global config
config = ctx_config
if ctx.invoked_subcommand not in ['wait']:
try:
anchorecli.cli.utils.check_access(config)
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'system', {}, err))
sys.exit(2)
@system.command(name='status', short_help="Check current anchore-engine system status")
def status():
ecode = 0
try:
ret = anchorecli.clients.apiexternal.system_status(config)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'system_status', {}, ret['payload']))
else:
raise Exception(json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'system_status', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@system.command(name='errorcodes', short_help="Describe available anchore system error code names and descriptions")
def describe_errorcodes():
ecode = 0
try:
ret = anchorecli.clients.apiexternal.describe_error_codes(config)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'system_describe_error_codes', {}, ret['payload']))
else:
raise Exception(json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'system_describe_error_codes', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@system.command(name='wait', short_help="Blocking operation that will return when anchore-engine is available and ready")
@click.option('--timeout', type=float, default=-1.0, help="Time to wait, in seconds. If < 0, wait forever (default=-1)")
@click.option('--interval', type=float, default=5.0, help="Interval between checks, in seconds (default=5)")
@click.option("--feedsready", default='vulnerabilities', help='In addition to API and set of core services being available, wait until at least one full feed sync has been completed for the CSV list of feeds (default="vulnerabilities").')
@click.option("--servicesready", default='catalog,apiext,policy_engine,simplequeue,analyzer', help='Wait for the specified CSV list of anchore-engine services to have at least one service reporting as available (default="catalog,apiext,policy_engine,simplequeue,analyzer")')
def wait(timeout, interval, feedsready, servicesready):
"""
Wait for an image to go to analyzed or analysis_failed status with a specific timeout
:param timeout:
:param interval:
:param feedsready:
:return:
"""
global config
ecode = 0
try:
sys.stderr.write("Starting checks to wait for anchore-engine to be available timeout={} interval={}\n".format(timeout, interval))
ts = time.time()
while timeout < 0 or time.time() - ts < timeout:
sys.stderr.write("API availability: Checking anchore-engine URL ({})...\n".format(config['url']))
_logger.debug("Checking API availability for anchore-engine URL ({})".format(config['url']))
try:
anchorecli.cli.utils.check_access(config)
_logger.debug("check access success")
break;
except Exception as err:
_logger.debug("check access failed, trying again")
time.sleep(interval)
else:
raise Exception("timed out after {} seconds.".format(timeout))
sys.stderr.write("API availability: Success.\n")
while timeout < 0 or time.time() - ts < timeout:
all_up = {}
try:
services_to_check = [x for x in servicesready.split(',') if x]
for f in services_to_check:
all_up[f] = False
except:
all_up = {}
sys.stderr.write("Service availability: Checking for service set ({})...\n".format(','.join(all_up.keys())))
_logger.debug("Checking service set availability for anchore-engine URL ({})".format(config['url']))
try:
ret = anchorecli.clients.apiexternal.system_status(config)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
for service_record in ret.get('payload', {}).get('service_states', []):
s = service_record.get('servicename', None)
if s:
if s not in all_up:
all_up[s] = False
try:
s_up = service_record.get('service_detail', {}).get('up', False)
except:
s_up = False
if s_up:
all_up[s] = s_up
if False not in all_up.values():
_logger.debug("full set of available engine services detected")
break;
else:
_logger.debug("service set not yet available {}".format(all_up))
elif ret.get('httpcode', 500) in [401]:
raise Exception("service responded with 401 Unauthorized - please check anchore-engine credentials and try again")
except Exception as err:
print ("service status failed {}".format(err))
time.sleep(interval)
else:
raise Exception("timed out after {} seconds.".format(timeout))
sys.stderr.write("Service availability: Success.\n")
if feedsready:
all_up = {}
try:
feeds_to_check = feedsready.split(',')
for f in feeds_to_check:
all_up[f] = False
except:
all_up = {}
while timeout < 0 or time.time() - ts < timeout:
sys.stderr.write("Feed sync: Checking sync completion for feed set ({})...\n".format(','.join(all_up.keys())))
_logger.debug("Checking feed sync status for anchore-engine URL ({})".format(config['url']))
try:
ret = anchorecli.clients.apiexternal.system_feeds_list(config)
if ret['success']:
for feed_record in ret.get('payload', []):
_logger.debug("response show feed name={} was last_full_sync={}".format(feed_record.get('name'), feed_record.get('last_full_sync')))
if feed_record.get('name', None) in all_up:
if feed_record.get('last_full_sync', None):
all_up[feed_record.get('name')] = True
if False not in all_up.values():
_logger.debug("all requests feeds have been synced")
break
else:
_logger.debug("some feeds not yet synced {}".format(all_up))
except Exception as err:
print ("service feeds list failed {}".format(err))
time.sleep(interval)
else:
raise Exception("timed out after {} seconds.".format(timeout))
sys.stderr.write("Feed sync: Success.\n")
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'system_wait', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@system.command(name='del', short_help="Delete a non-active service from anchore-engine")
@click.argument('host_id', nargs=1)
@click.argument('servicename', nargs=1)
def delete(host_id, servicename):
ecode = 0
try:
ret = anchorecli.clients.apiexternal.delete_system_service(config, host_id, servicename)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'delete_system_service', {}, ret['payload']))
else:
raise Exception(json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'delete_system_service', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@system.group(name="feeds", short_help="Feed data operations")
def feeds():
pass
@feeds.command(name="list", short_help="Get a list of loaded data feeds.")
def list():
ecode = 0
try:
ret = anchorecli.clients.apiexternal.system_feeds_list(config)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'system_feeds_list', {}, ret['payload']))
else:
raise Exception(json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'system_feeds_list', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
@feeds.command(name="sync", short_help="Fetch latest updates from the feed service")
@click.option("--flush", is_flag=True, help="Flush all previous data, including CVE matches, and resync from scratch")
def feedsync(flush):
global input
ecode = 0
try:
answer = "n"
try:
print("\nWARNING: This operation should not normally need to be performed except when the anchore-engine operator is certain that it is required - the operation will take a long time (hours) to complete, and there may be an impact on anchore-engine performance during the re-sync/flush.\n")
try:
input = raw_input
except NameError:
pass
answer = input("Really perform a manual feed data sync/flush? (y/N)")
except Exception as err:
answer = "n"
if 'y' == answer.lower():
ret = anchorecli.clients.apiexternal.system_feeds_sync(config, flush)
ecode = anchorecli.cli.utils.get_ecode(ret)
if ret['success']:
print(anchorecli.cli.utils.format_output(config, 'system_feeds_flush', {}, ret['payload']))
else:
raise Exception(json.dumps(ret['error'], indent=4))
except Exception as err:
print(anchorecli.cli.utils.format_error_output(config, 'system_feeds_flush', {}, err))
if not ecode:
ecode = 2
anchorecli.cli.utils.doexit(ecode)
| 42.75969
| 302
| 0.599257
|
3d9f384e7faebac7214c387ca7b50522bb90d573
| 379
|
py
|
Python
|
exercises/ex083.py
|
mouraa0/python-exercises
|
78ecf1cb0d1dfd7dfbdd05574cce5cd6a5cba0f1
|
[
"MIT"
] | null | null | null |
exercises/ex083.py
|
mouraa0/python-exercises
|
78ecf1cb0d1dfd7dfbdd05574cce5cd6a5cba0f1
|
[
"MIT"
] | null | null | null |
exercises/ex083.py
|
mouraa0/python-exercises
|
78ecf1cb0d1dfd7dfbdd05574cce5cd6a5cba0f1
|
[
"MIT"
] | null | null | null |
def ex083():
expressao = input('Digite a expressão:\n')
paren_a = 0
paren_f = 0
for i in expressao:
if i == '(':
paren_a += 1
elif i == ')':
paren_f += 1
if paren_a == paren_f:
resultado = 'VÁLIDA'
else:
resultado = 'INVÁLIDA'
print(f'A equação digitada é {resultado}')
ex083()
| 18.047619
| 46
| 0.48285
|
94f527b5cfd92465aaafba3507c26bc165b4b400
| 920
|
py
|
Python
|
saleor/api/table/serializers.py
|
glosoftgroup/restaurant
|
5b10a8f5199103e5bee01b45952c9638e63f28af
|
[
"BSD-3-Clause"
] | 1
|
2018-05-03T06:17:02.000Z
|
2018-05-03T06:17:02.000Z
|
saleor/api/table/serializers.py
|
glosoftgroup/restaurant
|
5b10a8f5199103e5bee01b45952c9638e63f28af
|
[
"BSD-3-Clause"
] | 8
|
2018-05-07T16:42:35.000Z
|
2022-02-26T03:31:56.000Z
|
saleor/api/table/serializers.py
|
glosoftgroup/tenants
|
a6b229ad1f6d567b7078f83425a532830b71e1bb
|
[
"BSD-3-Clause"
] | null | null | null |
# table rest api serializers
from rest_framework import serializers
from django.contrib.auth import get_user_model
from ...table.models import Table
from ...orders.models import Orders
User = get_user_model()
class TableListSerializer(serializers.ModelSerializer):
orders_url = serializers.HyperlinkedIdentityField(view_name='order-api:api-table-orders')
new_orders = serializers.SerializerMethodField()
fully_paid_orders = serializers.SerializerMethodField()
class Meta:
model = Table
fields = ('id',
'name',
'sale_point',
'orders_url',
'new_orders',
'fully_paid_orders'
)
def get_new_orders(self, obj):
return len(Orders.objects.get_table_new_orders(obj.pk))
def get_fully_paid_orders(self, obj):
return len(Orders.objects.get_table_orders(obj.pk))
| 30.666667
| 93
| 0.66413
|
3689b04ca987e1b2607af3fdf7572b633f2b232d
| 1,836
|
py
|
Python
|
Manteia/ActiveLearning.py
|
ym001/Manteia
|
e6ed466617933047aecf3071dec1d8bef9b0eeaa
|
[
"MIT"
] | 4
|
2020-04-19T15:57:56.000Z
|
2021-07-03T09:13:32.000Z
|
Manteia/ActiveLearning.py
|
ym001/Manteia
|
e6ed466617933047aecf3071dec1d8bef9b0eeaa
|
[
"MIT"
] | null | null | null |
Manteia/ActiveLearning.py
|
ym001/Manteia
|
e6ed466617933047aecf3071dec1d8bef9b0eeaa
|
[
"MIT"
] | null | null | null |
"""
.. module:: ActiveLearning
:platform: Unix, Windows
:synopsis: A useful module indeed.
.. moduleauthor:: Yves Mercadier <manteia.ym001@gmail.com>
"""
import numpy as np
import math
from operator import itemgetter
import random
class RandomSampling():
"""
A random sampling query strategy baseline.
"""
def __init__(self,verbose=False):
self.verbose=verbose
if self.verbose:
print('Randomsampling')
def query(self, unlabeled_idx,nb_question):
random.shuffle(unlabeled_idx)
selected_indices= unlabeled_idx[:nb_question]
return selected_indices
class UncertaintyEntropySampling():
"""
The basic uncertainty sampling query strategy, querying the examples with the top entropy.
"""
def __init__(self,verbose=False):
self.verbose=verbose
if self.verbose:
print('UncertaintyEntropySampling')
def query(self,predictions,unlabeled_idx,nb_question):
entropie=[]
for prediction,idx in zip(predictions,unlabeled_idx):
summ=0
for proba in prediction:
if proba>0:
summ=summ-proba*math.log(proba)
entropie.append((summ,idx))
entropie_trie=sorted(entropie, key=itemgetter(0),reverse=True)
idx_entropie=[tup[1] for tup in entropie_trie[:nb_question]]
if self.verbose:
print(entropie_trie[:nb_question])
return idx_entropie
class DAL():
"""
The basic discriminative strategy.
"""
def __init__(self,verbose=False):
self.verbose=verbose
if self.verbose:
print('DAL')
def query(self,predictions,unlabeled_idx,nb_question):
#dal est une liste de tuple idx non labélisé et probabilité de ne pas etre labellisé
dal=[(idx,p[1])for idx,p in zip(unlabeled_idx,predictions)]
dal=sorted(dal, key=itemgetter(1),reverse=True)
print(dal[:3])
idx_dal=[tup[0] for tup in dal[:nb_question]]
if self.verbose:
print(dal[:nb_question])
return idx_dal
| 23.844156
| 91
| 0.737473
|
48807de5934006f3864512d03b687c30b4c45081
| 24,064
|
py
|
Python
|
appqos/tests/test_power.py
|
zzhou612/intel-cmt-cat
|
c351ffa565df56f5057c40ace46436bf3e776101
|
[
"BSD-3-Clause"
] | 397
|
2017-11-30T17:11:14.000Z
|
2022-03-31T22:55:32.000Z
|
appqos/tests/test_power.py
|
zzhou612/intel-cmt-cat
|
c351ffa565df56f5057c40ace46436bf3e776101
|
[
"BSD-3-Clause"
] | 150
|
2017-10-30T10:49:33.000Z
|
2022-03-30T21:34:41.000Z
|
appqos/tests/test_power.py
|
zzhou612/intel-cmt-cat
|
c351ffa565df56f5057c40ace46436bf3e776101
|
[
"BSD-3-Clause"
] | 132
|
2017-11-03T11:29:49.000Z
|
2022-03-21T10:55:10.000Z
|
################################################################################
# BSD LICENSE
#
# Copyright(c) 2019-2021 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
import mock
import pytest
import power
import power_common
import sstbf
class TestRestPowerProfiles:
def test_is_sstcp_enabled(self):
class SYS:
def __init__(self, enabled):
self.epp_enabled = enabled
self.sst_bf_enabled = enabled
with mock.patch("pwr.get_system", return_value=SYS(True)) as mock_get_system:
assert power.is_sstcp_enabled()
mock_get_system.assert_called_once()
with mock.patch("pwr.get_system", return_value=SYS(False)) as mock_get_system:
assert not power.is_sstcp_enabled()
mock_get_system.assert_called_once()
with mock.patch("pwr.get_system", return_value=None) as mock_get_system:
assert not power.is_sstcp_enabled()
mock_get_system.assert_called_once()
with mock.patch("pwr.get_system", side_effect = IOError('Test')) as mock_get_system:
assert not power.is_sstcp_enabled()
mock_get_system.assert_called_once()
def test__get_pwr_cpus(self):
class CPU:
def __init__(self, core_list):
self.core_list=core_list
CPUS_LIST = [CPU([]), CPU([])]
with mock.patch("pwr.get_cpus", return_value=CPUS_LIST) as mock_get_cpus:
assert power_common.get_pwr_cpus() == CPUS_LIST
mock_get_cpus.assert_called_once()
with mock.patch("pwr.get_cpus", return_value=[]) as mock_get_cpus:
assert not power_common.get_pwr_cpus()
mock_get_cpus.assert_called_once()
with mock.patch("pwr.get_cpus", return_value=None) as mock_get_cpus:
assert not power_common.get_pwr_cpus()
mock_get_cpus.assert_called_once()
with mock.patch("pwr.get_cpus", side_effect = IOError('Test')) as mock_get_cpus:
assert not power_common.get_pwr_cpus()
mock_get_cpus.assert_called_once()
with mock.patch("pwr.get_cpus", side_effect = ValueError('Test')) as mock_get_cpus:
assert not power_common.get_pwr_cpus()
mock_get_cpus.assert_called_once()
def test__get_pwr_cores(self):
class CORE:
def __init__(self, id):
self.core_id=id
CORES_LIST = [CORE(0), CORE(1)]
with mock.patch("pwr.get_cores", return_value=CORES_LIST) as mock_get_cores:
assert power_common.get_pwr_cores() == CORES_LIST
mock_get_cores.assert_called_once()
with mock.patch("pwr.get_cores", return_value=[]) as mock_get_cores:
assert not power_common.get_pwr_cores()
mock_get_cores.assert_called_once()
with mock.patch("pwr.get_cores", return_value=None) as mock_get_cores:
assert not power_common.get_pwr_cores()
mock_get_cores.assert_called_once()
with mock.patch("pwr.get_cores", side_effect = IOError('Test')) as mock_get_cores:
assert not power_common.get_pwr_cores()
mock_get_cores.assert_called_once()
with mock.patch("pwr.get_cores", side_effect = ValueError('Test')) as mock_get_cores:
assert not power_common.get_pwr_cores()
mock_get_cores.assert_called_once()
def test__set_freqs_epp(self):
class CORE:
def __init__(self, id):
self.core_id=id
self.commit = mock.MagicMock()
CORES_LIST = [CORE(0), CORE(1), CORE(2), CORE(3), CORE(4)]
assert -1 == power._set_freqs_epp(cores=None, min_freq=1000)
assert -1 == power._set_freqs_epp(cores=[], min_freq=1000)
assert -1 == power._set_freqs_epp(cores=[1,2,3], min_freq=None, max_freq=None, epp=None)
for ret_val in [[], None]:
with mock.patch("power_common.get_pwr_cores", return_value=ret_val) as mock_get_cores:
assert -1 == power._set_freqs_epp(cores=[1,2,3], min_freq=1000, max_freq=2300, epp="performance")
mock_get_cores.assert_called_once()
with mock.patch("power_common.get_pwr_cores", return_value=CORES_LIST) as mock_get_cores:
assert 0 == power._set_freqs_epp(cores=[1,2,3], min_freq=1000, max_freq=2300, epp="performance")
mock_get_cores.assert_called_once()
for core in CORES_LIST:
if core.core_id in [1, 2, 3]:
assert core.min_freq == 1000
assert core.max_freq == 2300
assert core.epp == "performance"
core.commit.assert_called_once()
else:
assert not hasattr(core, 'min_freq')
assert not hasattr(core, 'max_freq')
assert not hasattr(core, 'epp')
core.commit.assert_not_called()
def test_reset(self):
class CORE:
def __init__(self, id):
self.core_id=id
self.commit = mock.MagicMock()
CORES_LIST = [CORE(0), CORE(1), CORE(2), CORE(3), CORE(4)]
for val in [[], None]:
assert -1 == power.reset(val)
with mock.patch("power_common.get_pwr_cores", return_value=CORES_LIST) as mock_get_cores:
assert 0 == power.reset([0, 1, 2])
mock_get_cores.assert_called_once()
for core in CORES_LIST:
if core.core_id in [0, 1, 2]:
core.commit.assert_called_once_with("default")
else:
core.commit.assert_not_called()
def test__get_lowest_freq(self):
class CPU:
def __init__(self, freq):
self.lowest_freq = freq
for ret_val in [[], None]:
with mock.patch("power_common.get_pwr_cpus", return_value = ret_val) as mock_get_cpus:
assert None == power_common.get_pwr_lowest_freq()
mock_get_cpus.assert_called_once()
for freq in [1000, 1500, 2000]:
with mock.patch("power_common.get_pwr_cpus", return_value=[CPU(freq)]) as mock_get_cpus:
assert freq == power_common.get_pwr_lowest_freq()
mock_get_cpus.assert_called_once()
def test__get_base_freq(self):
class CPU:
def __init__(self, freq):
self.base_freq = freq
for ret_val in [[], None]:
with mock.patch("power_common.get_pwr_cpus", return_value = ret_val) as mock_get_cpus:
assert None == power_common.get_pwr_base_freq()
mock_get_cpus.assert_called_once()
for freq in [2000, 2500, 3000]:
with mock.patch("power_common.get_pwr_cpus", return_value=[CPU(freq)]) as mock_get_cpus:
assert freq == power_common.get_pwr_base_freq()
mock_get_cpus.assert_called_once()
def test__get_highest_freq(self):
class CPU:
def __init__(self, freq):
self.highest_freq = freq
for ret_val in [[], None]:
with mock.patch("power_common.get_pwr_cpus", return_value = ret_val) as mock_get_cpus:
assert None == power_common.get_pwr_highest_freq()
mock_get_cpus.assert_called_once()
for freq in [3000, 3500, 4000]:
with mock.patch("power_common.get_pwr_cpus", return_value=[CPU(freq)]) as mock_get_cpus:
assert freq == power_common.get_pwr_highest_freq()
mock_get_cpus.assert_called_once()
def test__is_min_freq_valid(self):
for ret_val in [[], None]:
with mock.patch("power_common.get_pwr_lowest_freq", return_value = ret_val) as mock_get:
assert None == power._is_min_freq_valid(1000)
mock_get.assert_called_once()
for freq in [500, 1000, 1499]:
with mock.patch("power_common.get_pwr_lowest_freq", return_value = 1500) as mock_get:
assert False == power._is_min_freq_valid(freq)
mock_get.assert_called_once()
for freq in [1000, 1500, 2000]:
with mock.patch("power_common.get_pwr_lowest_freq", return_value = 1000) as mock_get:
assert True == power._is_min_freq_valid(freq)
mock_get.assert_called_once()
def test__is_max_freq_valid(self):
for ret_val in [[], None]:
with mock.patch("power_common.get_pwr_highest_freq", return_value = ret_val) as mock_get:
assert None == power._is_max_freq_valid(1000)
mock_get.assert_called_once()
for freq in [3501, 3750, 4000]:
with mock.patch("power_common.get_pwr_highest_freq", return_value = 3500) as mock_get:
assert False == power._is_max_freq_valid(freq)
mock_get.assert_called_once()
for freq in [2000, 3000, 3500]:
with mock.patch("power_common.get_pwr_highest_freq", return_value = 3500) as mock_get:
assert True == power._is_max_freq_valid(freq)
mock_get.assert_called_once()
def test__is_epp_valid(self):
for epp in [None, "", 1000, "TEST", "inValid_EPP", []]:
assert False == power._is_epp_valid(epp)
for epp in power.VALID_EPP + [power.DEFAULT_EPP]:
assert True == power._is_epp_valid(epp)
def test__do_admission_control_check(self):
for sstbf in [True, False]:
result = not sstbf
with mock.patch('sstbf.is_sstbf_configured', return_value = sstbf):
assert result == power._do_admission_control_check()
def test_validate_power_profiles(self):
data = {
"power_profiles": [
{
"id": 0,
"min_freq": 1500,
"max_freq": 2500,
"epp": "performance",
"name": "default"
},
{
"id": 0,
"min_freq": 1000,
"max_freq": 1000,
"epp": "power",
"name": "low_priority"
}
]
}
with mock.patch('power._is_max_freq_valid', return_value = True) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power._admission_control_check', return_value = True) as mock_admission_control_check:
with pytest.raises(ValueError, match="Power Profile 0, multiple profiles with same id."):
power.validate_power_profiles(data, True)
mock_admission_control_check.assert_not_called()
# fix profile ID issue
data['power_profiles'][-1]['id'] = 1
with mock.patch('power._is_max_freq_valid', return_value = True) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power._admission_control_check', return_value = True) as mock__admission_control_check:
power.validate_power_profiles(data, True)
mock_is_max.assert_any_call(2500)
mock_is_max.assert_any_call(1000)
mock_is_min.assert_any_call(1500)
mock_is_min.assert_any_call(1000)
mock_is_epp.assert_any_call("performance")
mock_is_epp.assert_any_call("power")
mock__admission_control_check.assert_called_once()
with mock.patch('power._is_max_freq_valid', return_value = True) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = False),\
mock.patch('power._admission_control_check') as mock__admission_control_check:
power.validate_power_profiles(data, True)
mock__admission_control_check.assert_not_called()
with mock.patch('power._is_max_freq_valid', return_value = True) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power._admission_control_check') as mock__admission_control_check:
power.validate_power_profiles(data, False)
mock__admission_control_check.assert_not_called()
sys = mock.MagicMock()
sys.request_config = mock.MagicMock(return_value=True)
sys.refresh_all = mock.MagicMock()
cores = []
for id in range(0,3):
core = mock.MagicMock()
core.core_id = id
cores.append(core)
data.update({
"pools": [
{
"id": 0,
"cores": [0,1,2,3],
"power_profile": 0
},
{
"id": 1,
"cores": [4,5,6,7]
}]
})
with mock.patch('power._is_max_freq_valid', return_value = True) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power_common.get_pwr_cores', return_value=cores) as mock_get_cores,\
mock.patch('power_common.get_pwr_sys', return_value=sys) as mock_get_sys:
power.validate_power_profiles(data, True)
sys.request_config.assert_called_once()
sys.refresh_all.assert_called_once()
for core in cores:
assert hasattr(core, "min_freq")
assert hasattr(core, "max_freq")
assert hasattr(core, "epp")
sys.request_config = mock.MagicMock(return_value=False)
sys.refresh_all.reset_mock()
with mock.patch('power._is_max_freq_valid', return_value = True) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power_common.get_pwr_cores', return_value=cores) as mock_get_cores,\
mock.patch('power_common.get_pwr_sys', return_value=sys) as mock_get_sys:
with pytest.raises(power.AdmissionControlError, match="Power Profiles configuration would cause CPU to be oversubscribed."):
power.validate_power_profiles(data, True)
sys.request_config.assert_called_once()
sys.refresh_all.assert_called_once()
with mock.patch('power._is_max_freq_valid', return_value = False ) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power._admission_control_check', return_value = True) as mock__admission_control_check:
with pytest.raises(ValueError, match="Power Profile 0, Invalid max. freq 2500."):
power.validate_power_profiles(data, True)
mock__admission_control_check.assert_not_called()
with mock.patch('power._is_max_freq_valid', return_value = True ) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = False) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power._admission_control_check', return_value = True) as mock__admission_control_check:
with pytest.raises(ValueError, match="Power Profile 0, Invalid min. freq 1500."):
power.validate_power_profiles(data, True)
mock__admission_control_check.assert_not_called()
with mock.patch('power._is_max_freq_valid', return_value = True ) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = False) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power._admission_control_check', return_value = True) as mock__admission_control_check:
with pytest.raises(ValueError, match="Power Profile 0, Invalid EPP value performance."):
power.validate_power_profiles(data, True)
mock__admission_control_check.assert_not_called()
# set invalid min. freq, higher than max. freq
data['power_profiles'][-1]['min_freq'] = data['power_profiles'][-1]['max_freq'] + 1
with mock.patch('power._is_max_freq_valid', return_value = True) as mock_is_max,\
mock.patch('power._is_min_freq_valid', return_value = True) as mock_is_min,\
mock.patch('power._is_epp_valid', return_value = True) as mock_is_epp,\
mock.patch('power._do_admission_control_check', return_value = True),\
mock.patch('power._admission_control_check', return_value = True) as mock__admission_control_check:
with pytest.raises(ValueError, match="Power Profile 1, Invalid freqs! min. freq is higher than max. freq."):
power.validate_power_profiles(data, True)
mock__admission_control_check.assert_not_called()
def test_configure_power(self):
pool_ids = [0, 1]
profile_ids = [5, 6]
pool_to_cores = {0: [2,3], 1: [4, 5]}
pool_to_profiles = {0: 5, 1: 6}
profiles = {5: {"min_freq": 1500, "max_freq": 2500, "epp": "performance", "id": 5},
6 : {"min_freq": 1000, "max_freq": 1200, "epp": "power", "id": 6}}
def get_pool_attr(attr, pool_id):
if attr == 'id':
if pool_id is None:
return pool_ids
else:
return None
elif attr == 'cores':
return pool_to_cores[pool_id]
elif attr == 'power_profile':
if pool_id is None:
return profile_ids
else:
return pool_to_profiles[pool_id]
return None
def get_power_profile(id):
return profiles[id]
# ERROR POWER: No Pools configured...
power.PREV_PROFILES.clear()
with mock.patch('common.CONFIG_STORE.get_pool_attr', return_value=None) as mock_get_pool_attr,\
mock.patch('common.CONFIG_STORE.get_power_profile') as mock_get_power_profile,\
mock.patch('power.reset') as mock_reset,\
mock.patch('power._set_freqs_epp') as mock_set_freqs_epp:
assert -1 == power.configure_power()
mock_get_pool_attr.assert_called_once_with('id', None)
mock_get_power_profile.assert_not_called()
mock_reset.assert_not_called()
mock_set_freqs_epp.assert_not_called()
# ERROR POWER: Profile 5 does not exist!
power.PREV_PROFILES.clear()
with mock.patch('common.CONFIG_STORE.get_pool_attr', new=get_pool_attr),\
mock.patch('common.CONFIG_STORE.get_power_profile', return_value=None) as mock_get_power_profile,\
mock.patch('power.reset') as mock_reset,\
mock.patch('power._set_freqs_epp') as mock_set_freqs_epp:
assert -1 == power.configure_power()
mock_get_power_profile.assert_called_once_with(5)
mock_reset.assert_not_called()
mock_set_freqs_epp.assert_not_called()
# All OK!
power.PREV_PROFILES.clear()
with mock.patch('common.CONFIG_STORE.get_pool_attr', new=get_pool_attr),\
mock.patch('common.CONFIG_STORE.get_power_profile', new=get_power_profile),\
mock.patch('power.reset') as mock_reset,\
mock.patch('power._set_freqs_epp') as mock_set_freqs_epp:
assert 0 == power.configure_power()
mock_reset.assert_not_called()
mock_set_freqs_epp.assert_any_call(pool_to_cores[0], profiles[5]["min_freq"], profiles[5]["max_freq"], profiles[5]["epp"])
mock_set_freqs_epp.assert_any_call(pool_to_cores[1], profiles[6]["min_freq"], profiles[6]["max_freq"], profiles[6]["epp"])
# POWER: Skipping Pool 0, no cores assigned
power.PREV_PROFILES.clear()
pool_to_cores[0] = []
with mock.patch('common.CONFIG_STORE.get_pool_attr', new=get_pool_attr),\
mock.patch('common.CONFIG_STORE.get_power_profile', new=get_power_profile),\
mock.patch('power.reset') as mock_reset,\
mock.patch('power._set_freqs_epp') as mock_set_freqs_epp:
assert 0 == power.configure_power()
mock_reset.assert_not_called()
mock_set_freqs_epp.assert_called_once_with(pool_to_cores[1], profiles[6]["min_freq"], profiles[6]["max_freq"], profiles[6]["epp"])
# POWER: Pool 1, no power profile assigned. Resetting to defaults.
power.PREV_PROFILES.clear()
pool_to_profiles[1] = None
with mock.patch('common.CONFIG_STORE.get_pool_attr', new=get_pool_attr),\
mock.patch('common.CONFIG_STORE.get_power_profile', new=get_power_profile),\
mock.patch('power.reset') as mock_reset,\
mock.patch('power._set_freqs_epp') as mock_set_freqs_epp:
assert 0 == power.configure_power()
mock_reset.assert_called_once_with(pool_to_cores[1])
mock_set_freqs_epp.assert_not_called()
| 44.316759
| 142
| 0.630111
|
5ae419168f1d56a51d1a97e5a29d05ce165fce67
| 5,786
|
py
|
Python
|
megengine_release/layers/det/rcnn.py
|
megvii-research/ICD
|
a97e0ecd9b69dbc0e3c2b8168c1d72ea79c6641b
|
[
"Apache-2.0"
] | 32
|
2021-11-09T11:19:21.000Z
|
2022-03-21T17:37:32.000Z
|
layers/det/rcnn.py
|
Senwang98/ICD
|
fdda393088fa31ac6dc9ddbd7ec3e7008ea32ff4
|
[
"Apache-2.0"
] | 3
|
2022-02-28T08:51:13.000Z
|
2022-03-30T09:16:41.000Z
|
layers/det/rcnn.py
|
Senwang98/ICD
|
fdda393088fa31ac6dc9ddbd7ec3e7008ea32ff4
|
[
"Apache-2.0"
] | 4
|
2021-11-11T11:59:05.000Z
|
2022-03-30T03:26:41.000Z
|
# -*- coding:utf-8 -*-
# This repo is licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine as mge
import megengine.functional as F
import megengine.module as M
import layers
class RCNN(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.box_coder = layers.BoxCoder(cfg.rcnn_reg_mean, cfg.rcnn_reg_std)
# roi head
self.in_features = cfg.rcnn_in_features
self.stride = cfg.rcnn_stride
self.pooling_method = cfg.pooling_method
self.pooling_size = cfg.pooling_size
self.fc1 = M.Linear(256 * self.pooling_size[0] * self.pooling_size[1], 1024)
self.fc2 = M.Linear(1024, 1024)
for l in [self.fc1, self.fc2]:
M.init.normal_(l.weight, std=0.01)
M.init.fill_(l.bias, 0)
# box predictor
self.pred_cls = M.Linear(1024, cfg.num_classes + 1)
self.pred_delta = M.Linear(1024, cfg.num_classes * 4)
M.init.normal_(self.pred_cls.weight, std=0.01)
M.init.normal_(self.pred_delta.weight, std=0.001)
for l in [self.pred_cls, self.pred_delta]:
M.init.fill_(l.bias, 0)
def forward(self, fpn_fms, rcnn_rois, im_info=None, gt_boxes=None):
rcnn_rois, labels, bbox_targets = self.get_ground_truth(
rcnn_rois, im_info, gt_boxes
)
fpn_fms = [fpn_fms[x] for x in self.in_features]
pool_features = layers.roi_pool(
fpn_fms, rcnn_rois, self.stride, self.pooling_size, self.pooling_method,
)
flatten_feature = F.flatten(pool_features, start_axis=1)
roi_feature = F.relu(self.fc1(flatten_feature))
roi_feature = F.relu(self.fc2(roi_feature))
pred_logits = self.pred_cls(roi_feature)
pred_offsets = self.pred_delta(roi_feature)
if self.training:
# loss for rcnn classification
loss_rcnn_cls = F.loss.cross_entropy(pred_logits, labels, axis=1)
# loss for rcnn regression
pred_offsets = pred_offsets.reshape(-1, self.cfg.num_classes, 4)
num_samples = labels.shape[0]
fg_mask = labels > 0
loss_rcnn_bbox = layers.smooth_l1_loss(
pred_offsets[fg_mask, labels[fg_mask] - 1],
bbox_targets[fg_mask],
self.cfg.rcnn_smooth_l1_beta,
).sum() / F.maximum(num_samples, mge.tensor(1))
loss_dict = {
"loss_rcnn_cls": loss_rcnn_cls,
"loss_rcnn_bbox": loss_rcnn_bbox,
}
return loss_dict
else:
# slice 1 for removing background
pred_scores = F.softmax(pred_logits, axis=1)[:, 1:]
pred_offsets = pred_offsets.reshape(-1, 4)
target_shape = (rcnn_rois.shape[0], self.cfg.num_classes, 4)
# rois (N, 4) -> (N, 1, 4) -> (N, 80, 4) -> (N * 80, 4)
base_rois = F.broadcast_to(
F.expand_dims(rcnn_rois[:, 1:5], axis=1), target_shape).reshape(-1, 4)
pred_bbox = self.box_coder.decode(base_rois, pred_offsets)
return pred_bbox, pred_scores
def get_ground_truth(self, rpn_rois, im_info, gt_boxes):
if not self.training:
return rpn_rois, None, None
return_rois = []
return_labels = []
return_bbox_targets = []
# get per image proposals and gt_boxes
for bid in range(gt_boxes.shape[0]):
num_valid_boxes = im_info[bid, 4].astype("int32")
gt_boxes_per_img = gt_boxes[bid, :num_valid_boxes, :]
batch_inds = F.full((gt_boxes_per_img.shape[0], 1), bid)
gt_rois = F.concat([batch_inds, gt_boxes_per_img[:, :4]], axis=1)
batch_roi_mask = rpn_rois[:, 0] == bid
# all_rois : [batch_id, x1, y1, x2, y2]
all_rois = F.concat([rpn_rois[batch_roi_mask], gt_rois])
overlaps = layers.get_iou(all_rois[:, 1:], gt_boxes_per_img)
max_overlaps = overlaps.max(axis=1)
gt_assignment = F.argmax(overlaps, axis=1).astype("int32")
labels = gt_boxes_per_img[gt_assignment, 4]
# ---------------- get the fg/bg labels for each roi ---------------#
fg_mask = (max_overlaps >= self.cfg.fg_threshold) & (labels >= 0)
bg_mask = (
(max_overlaps >= self.cfg.bg_threshold_low)
& (max_overlaps < self.cfg.bg_threshold_high)
)
num_fg_rois = int(self.cfg.num_rois * self.cfg.fg_ratio)
fg_inds_mask = layers.sample_labels(fg_mask, num_fg_rois, True, False)
num_bg_rois = int(self.cfg.num_rois - fg_inds_mask.sum())
bg_inds_mask = layers.sample_labels(bg_mask, num_bg_rois, True, False)
labels[bg_inds_mask] = 0
keep_mask = fg_inds_mask | bg_inds_mask
labels = labels[keep_mask].astype("int32")
rois = all_rois[keep_mask]
target_boxes = gt_boxes_per_img[gt_assignment[keep_mask], :4]
bbox_targets = self.box_coder.encode(rois[:, 1:], target_boxes)
bbox_targets = bbox_targets.reshape(-1, 4)
return_rois.append(rois)
return_labels.append(labels)
return_bbox_targets.append(bbox_targets)
return (
F.concat(return_rois, axis=0).detach(),
F.concat(return_labels, axis=0).detach(),
F.concat(return_bbox_targets, axis=0).detach(),
)
| 41.035461
| 88
| 0.605773
|
fb0b423673a0aa120104de8c8bc50c513a85886d
| 13,185
|
py
|
Python
|
src/m1.py
|
hickshj/05a-Debugging
|
d37ce18683b716ca7031df50f230a63118354bc6
|
[
"MIT"
] | null | null | null |
src/m1.py
|
hickshj/05a-Debugging
|
d37ce18683b716ca7031df50f230a63118354bc6
|
[
"MIT"
] | null | null | null |
src/m1.py
|
hickshj/05a-Debugging
|
d37ce18683b716ca7031df50f230a63118354bc6
|
[
"MIT"
] | null | null | null |
"""
This module lets you practice DEBUGGING when RUN-TIME EXCEPTIONS occur.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Hunter Hicks.
""" # Done: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
########################################################################
#
# Done: 2. READ these instructions, ASKING QUESTIONS as needed.
#
# This module contains 7 "broken" functions.
#
# For each broken function, running it will generate what's called
# an EXCEPTION -- an error that occurs when the program runs and
# (left to itself) causes the program to end its execution.
#
# We have written tests for each broken function.
# *** DO NOT MODIFY THE TESTS. ***
# In fact, you do not even need to read the tests.
# Instead, look at the file m1_pictures.pdf to see what output
# the tests should generate.
#
# To do this exercise, do the following:
#
# Step 1:
# -- Read the doc-string (but NOT the code) of the broken_1 function.
# -- Look at the m1_pictures.pdf file
# to see what output the tests should produce.
# -- ASK QUESTIONS AS NEEDED until you understand the specification
# of the broken_1 function.
#
# Step 2: Run this module.
# You will see that the code "breaks" and prints on the Console:
# -- a STACK TRACEBACK of the code that led to the exception
# -- an error message that attempts to explain the exception
# Right-click in the Console and select "Word Wrap" to make
# the stack traceback and error message more readable.
#
# Step 3: READ the error message. Try to make sense of it.
# ASK QUESTIONS AS NEEDED!
#
# Step 4: Click on the BOTTOM blue link in the Console.
# It will take you to the line at which the code broke.
# (BUT if the line refers to a line in rosegraphics.py,
# work your way UP the blue links until you reach
# the lowermost one that refers to a line in THIS module.)
#
# Step 5: Looking at the line at which the code broke,
# figure out what the error message is telling you.
# ASK QUESTIONS AS NEEDED!
#
# Step 6: Thinking about the green specification of the broken_1
# function, and thinking about what the error message tells you,
# correct the mistake(s).
#
# Sometimes the mistake will be on the line at which the code broke,
# sometimes at a line that executed before that line executed.
#
# After correcting the mistake(s), run the program again.
# Continue until you believe that the broken_1 function produces
# the correct output (per the m1_pictures.pdf file)
# AND you believe that the code for the function is correct.
#
# ** IMPORTANT: **
# Resist the urge to "fiddle" with the code until you stumble
# upon something that works. This exercise will be helpful
# to you ONLY if you use it as an opportunity to learn
# what the error messages mean and how to react to them.
#
# *** ASK QUESTIONS AS NEEDED! ***
#
# Once you have corrected the broken_1 function, continue to
# the next function, again proceeding according to the above steps.
#
# When you believe you understand these instructions,
# change the above TO DO to DONE.
#
########################################################################
def main():
""" Calls the TEST functions in this module. """
run_test_all()
########################################################################
# Students: Do NOT change the following tests.
# There are NO errors in the TESTS.
########################################################################
def run_test_all():
""" Tests ALL the functions in this module. """
# Test broken_1:
window = rg.RoseWindow(title='Testing BROKEN_1')
circle1 = rg.Circle(rg.Point(50, 50), 15)
circle1.fill_color = 'blue'
broken_1(circle1, window) # Test 1 of broken_1
circle2 = rg.Circle(rg.Point(70, 150), 30)
circle2.fill_color = 'red'
broken_1(circle2, window) # Test 2 of broken_1
window.close_on_mouse_click()
# Test broken_2:
window = rg.RoseWindow(title='Testing BROKEN_2')
broken_2(50, 75, window) # Test 1 of broken_2
broken_2(100, 150, window) # Test 2 of broken_2
window.close_on_mouse_click()
# Test broken_3:
window = rg.RoseWindow(title='Testing BROKEN_3')
broken_3(5, rg.Point(100, 50), 80, 20, window) # Test 1 of broken_3
broken_3(3, rg.Point(50, 150), 40, 50, window) # Test 2 of broken_3
window.close_on_mouse_click()
# Test broken_4:
window = rg.RoseWindow(title='Testing BROKEN_4')
broken_4(50, 75, 40, window) # Test 1 of broken_4
broken_4(100, 150, 75, window) # Test 2 of broken_4
window.close_on_mouse_click()
# Test broken_5:
window = rg.RoseWindow(title='Testing BROKEN_5')
circle = rg.Circle(rg.Point(100, 50), 30)
circle.fill_color = 'pink'
broken_5(circle, window) # Test 1 of broken_5
circle = rg.Circle(rg.Point(250, 100), 80)
circle.fill_color = 'red'
broken_5(circle, window) # Test 2 of broken_5
window.close_on_mouse_click()
# Test broken_6:
expected = 1.8333333
actual = broken_6(3) # Test 1 of broken_6
print("Testing BROKEN_6:\n")
print('Expected for BROKEN_6, Test 1:', expected, '(approximately)')
print(' Actual for BROKEN_6, Test 1:', actual)
expected = 5.1873775
actual = broken_6(100) # Test 2 of broken_6
print()
print('Expected for BROKEN_6, Test 2:', expected, '(approximately)')
print(' Actual for BROKEN_6, Test 2:', actual)
print()
# Test broken_7:
window = rg.RoseWindow(title='Testing BROKEN_7')
broken_7(5, rg.Point(100, 50), 80, 20, window) # Test 1 of broken_7
broken_7(3, rg.Point(50, 150), 40, 50, window) # Test 2 of broken_7
window.close_on_mouse_click()
# ----------------------------------------------------------------------
# Done: 3. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_1(circle, window):
"""
What comes in: an rg.Circle and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle on the given rg.RoseWindow,
then draws another rg.Circle whose RADIUS
is TWICE that of the given rg.Circle
and whose center is the same as that of the given rg.Circle.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
circle.attach_to(window)
circle2 = rg.Circle(circle.center, circle.radius * 2)
circle2.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 4. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_2(x, y, window):
"""
What comes in: Positive integers x and y, and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws a rg.Circle with radius 33, centered at (x, y),
on the given rg.RoseWindow.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type x: int
:type y: int
:type window: rg.RoseWindow
"""
circle = rg.Circle(rg.Point(x, y), 33)
circle.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 5. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_3(n, point, length, distance_between_lines, window):
"""
What comes in: The four arguments are:
-- A positive integer n.
-- An rg.Point.
-- A positive integer length.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws n vertical rg.Lines on the given rg.RoseWindow,
where the leftmost rg.Line has the given point as its topmost
point and all the rg.Lines have the given length
and they are the given distance apart.
Each line is drawn with a 0.5 second pause after drawing it.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type point: rg.Point
:type length: int
:type distance_between_lines: int
:type window: rg.RoseWindow
"""
a = rg.Point(point.x, point.y)
b = rg.Point(point.x, point.y + length)
for _ in range(n):
line = rg.Line(a, b)
line.attach_to(window)
window.render(0.5)
a.x = a.x + distance_between_lines
b.x = b.x + distance_between_lines
# ----------------------------------------------------------------------
# Done: 6. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_4(x, y, radius, window):
"""
What comes in: Positive integers x and y, and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws a green-filled rg.Circle with the given radius,
centered at (x, y), on the given rg.RoseWindow
Must ** render ** but ** NOT close ** the window.
Type hints:
:type x: int
:type y: int
:type radius: int
:type window: rg.RoseWindow
"""
circle = rg.Circle(rg.Point(x, y), radius)
circle.fill_color = 'green'
circle.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 7. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_5(circle, window):
"""
What comes in: an rg.Circle and an rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws the given rg.Circle and an rg.Square that circumscribes it,
both on the given rg.RoseWindow.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type circle: rg.Circle
:type window: rg.RoseWindow
"""
circle.attach_to(window)
square = rg.Square(circle.center, (2 * circle.radius))
square.outline_color = circle.fill_color
square.attach_to(window)
window.render()
# ----------------------------------------------------------------------
# Done: 8. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_6(n):
"""
What comes in: A positive integer n.
What goes out: Returns the sum:
1 + 1/2 + 1/3 + ... + 1/n.
Side effects: None.
"""
total = 0
for k in range(n):
total = total + (1 / (k + 1))
return total
# ----------------------------------------------------------------------
# Done: 9. Follow the INSTRUCTIONS AT THE TOP OF THIS MODULE
# to correct the mistake(s) in the following function.
# ----------------------------------------------------------------------
def broken_7(n, point, length, distance_between_lines, window):
"""
What comes in: The four arguments are:
-- A positive integer n.
-- An rg.Point.
-- A positive integer length.
-- An rg.RoseWindow.
What goes out: Nothing (i.e., None).
Side effects:
Draws n horizontal rg.Lines on the given rg.RoseWindow,
where the topmost rg.Line has the given point as its leftmost
point and all the rg.Lines have the given length
and they are the given distance apart.
Each line is drawn with a 0.5 second pause after drawing it.
Must ** render ** but ** NOT close ** the window.
Type hints:
:type n: int
:type point: rg.Point
:type length: int
:type distance_between_lines: int
:type window: rg.RoseWindow
"""
left = rg.Point(point.x, point.y)
right = rg.Point(point.x + length, point.y)
for _ in range(n):
line = rg.Line(left, right)
line.attach_to(window)
window.render(0.5)
left = rg.Point(left.x, left.y + distance_between_lines)
right = rg.Point(right.x, right.y + distance_between_lines)
window.render()
# ----------------------------------------------------------------------
# Calls main to start the ball rolling.
# ----------------------------------------------------------------------
main()
| 36.932773
| 75
| 0.564581
|
e9898ff70e1e0b55fc4a5abed8f8020ff117de4f
| 2,394
|
py
|
Python
|
pydantic/__init__.py
|
MihanixA/pydantic
|
91278419f913a596d4bf567d8d16126112f24236
|
[
"MIT"
] | null | null | null |
pydantic/__init__.py
|
MihanixA/pydantic
|
91278419f913a596d4bf567d8d16126112f24236
|
[
"MIT"
] | null | null | null |
pydantic/__init__.py
|
MihanixA/pydantic
|
91278419f913a596d4bf567d8d16126112f24236
|
[
"MIT"
] | null | null | null |
# flake8: noqa
from . import dataclasses
from .annotated_types import create_model_from_namedtuple, create_model_from_typeddict
from .class_validators import root_validator, validator
from .decorator import validate_arguments
from .env_settings import BaseSettings
from .error_wrappers import ValidationError
from .errors import *
from .fields import Field, PrivateAttr, Required
from .main import *
from .networks import *
from .parse import Protocol
from .tools import *
from .types import *
from .version import VERSION
# WARNING __all__ from .errors is not included here, it will be removed as an export here in v2
# please use "from pydantic.errors import ..." instead
__all__ = [
# annotated types utils
'create_model_from_namedtuple',
'create_model_from_typeddict',
# dataclasses
'dataclasses',
# class_validators
'root_validator',
'validator',
# decorator
'validate_arguments',
# env_settings
'BaseSettings',
# error_wrappers
'ValidationError',
# fields
'Field',
'Required',
# main
'BaseConfig',
'BaseModel',
'Extra',
'compiled',
'create_model',
'validate_model',
# network
'AnyUrl',
'AnyHttpUrl',
'HttpUrl',
'stricturl',
'EmailStr',
'NameEmail',
'IPvAnyAddress',
'IPvAnyInterface',
'IPvAnyNetwork',
'PostgresDsn',
'RedisDsn',
'KafkaDsn',
'validate_email',
# parse
'Protocol',
# tools
'parse_file_as',
'parse_obj_as',
'parse_raw_as',
# types
'NoneStr',
'NoneBytes',
'StrBytes',
'NoneStrBytes',
'StrictStr',
'ConstrainedBytes',
'conbytes',
'ConstrainedList',
'conlist',
'ConstrainedSet',
'conset',
'ConstrainedStr',
'constr',
'PyObject',
'ConstrainedInt',
'conint',
'PositiveInt',
'NegativeInt',
'NonNegativeInt',
'NonPositiveInt',
'ConstrainedFloat',
'confloat',
'PositiveFloat',
'NegativeFloat',
'NonNegativeFloat',
'NonPositiveFloat',
'ConstrainedDecimal',
'condecimal',
'UUID1',
'UUID3',
'UUID4',
'UUID5',
'FilePath',
'DirectoryPath',
'Json',
'JsonWrapper',
'SecretStr',
'SecretBytes',
'StrictBool',
'StrictBytes',
'StrictInt',
'StrictFloat',
'PaymentCardNumber',
'PrivateAttr',
'ByteSize',
# version
'VERSION',
]
| 21.185841
| 95
| 0.641604
|
37b20d9d82fd1bc715d92148c21099399ce06378
| 1,951
|
py
|
Python
|
glance/api/versions.py
|
rcbops/glance-buildpackage
|
13e52178fb25d6062db6c7fad9df122d279320ab
|
[
"Apache-2.0"
] | 2
|
2015-09-30T09:43:37.000Z
|
2017-06-26T14:36:21.000Z
|
glance/api/versions.py
|
rcbops/glance-buildpackage
|
13e52178fb25d6062db6c7fad9df122d279320ab
|
[
"Apache-2.0"
] | null | null | null |
glance/api/versions.py
|
rcbops/glance-buildpackage
|
13e52178fb25d6062db6c7fad9df122d279320ab
|
[
"Apache-2.0"
] | null | null | null |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Controller that returns information on the Glance API versions
"""
import httplib
import json
import webob.dec
from glance.common import wsgi
class Controller(object):
"""
A controller that produces information on the Glance API versions.
"""
def __init__(self, conf):
self.conf = conf
@webob.dec.wsgify
def __call__(self, req):
"""Respond to a request for all OpenStack API versions."""
version_objs = [
{
"id": "v1.1",
"status": "CURRENT",
"links": [
{
"rel": "self",
"href": self.get_href(req)}]},
{
"id": "v1.0",
"status": "SUPPORTED",
"links": [
{
"rel": "self",
"href": self.get_href(req)}]}]
body = json.dumps(dict(versions=version_objs))
response = webob.Response(request=req,
status=httplib.MULTIPLE_CHOICES,
content_type='application/json')
response.body = body
return response
def get_href(self, req):
return "%s/v1/" % req.host_url
| 28.275362
| 78
| 0.561763
|
20716025f37ee6382edb7694747e38be848056a9
| 5,932
|
py
|
Python
|
gans1dset.py
|
AgamChopra/WGAN-GP
|
cbe15f4d2ef2ebaef477524103cbda0741098186
|
[
"MIT"
] | null | null | null |
gans1dset.py
|
AgamChopra/WGAN-GP
|
cbe15f4d2ef2ebaef477524103cbda0741098186
|
[
"MIT"
] | null | null | null |
gans1dset.py
|
AgamChopra/WGAN-GP
|
cbe15f4d2ef2ebaef477524103cbda0741098186
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import torch
from matplotlib import pyplot as plt
import random
import sys
sys.path.append(r"E:\ML\Dog-Cat-GANs\Dataset")
def load_human_bw():
human_list = []
for i in range(1,5233):
img = cv2.imread('E:\ML\Dog-Cat-GANs\Dataset\human\humans (%d).jpg'%(i+1))[:,:,0:1]
human_list.append(cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC))
print('.human data loaded')
return human_list
def load_celeb():
human_list = []
for i in range(1,202598):
idx = str(i).zfill(6)
img = cv2.imread('E:\ML\Dog-Cat-GANs\Dataset\img_align_celeba\%s.jpg'%(idx))
human_list.append(cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC))
print('.celeb data loaded')
return human_list
def load_celeb_sample(N=10):
human_list = []
sample = np.random.randint(low=0, high=202598, size=N, dtype=int)
for i in sample:
idx = str(i).zfill(6)
img = cv2.imread('E:\ML\Dog-Cat-GANs\Dataset\img_align_celeba\%s.jpg'%(idx))
human_list.append(cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC))
print('.celeb data loaded')
return human_list
def load_cats():
cat_list = []
for i in range(5650):
img = cv2.imread('E:\ML\Dog-Cat-GANs\Dataset\cat_hq\cat (%d).jpg'%(i+1))
cat_list.append(cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC))
print('.cat data loaded')
return cat_list
def load_not_cats():
not_cat_list = []
for i in range(5000):
img = cv2.imread('E:\ML\Dog-Cat-GANs\Dataset\cats\catnt (%d).jpg'%(i+1))
not_cat_list.append(cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC))
print('..not cat data loaded')
return not_cat_list
def load_photos():
cat_list = []
for i in range(7036):
img = cv2.imread('E:\ML\Dog-Cat-GANs\Dataset\photo_jpg\photo (%d).jpg'%(i+1))
cat_list.append(cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC))
print('.photo data loaded')
return cat_list
def load_art():
cat_list = []
for i in range(300):
img = cv2.imread('E:\ML\Dog-Cat-GANs\Dataset\monet_jpg\photo (%d).jpg'%(i+1))
cat_list.append(cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC))
print('.art data loaded')
return cat_list
def dataset():
cat = load_cats()
catnt = load_not_cats()
return np.swapaxes(np.asanyarray(catnt), 1, -1), np.swapaxes(np.asanyarray(cat), 1, -1)
def cat_dataset():
cat = load_cats()
return np.swapaxes(np.asanyarray(cat), 1, -1)
def dog_dataset():
dog = load_not_cats()
return np.swapaxes(np.asanyarray(dog), 1, -1)
def human_dataset_bw():
human = load_human_bw()
human = np.expand_dims(np.asanyarray(human), axis=1)
return np.swapaxes(human, 2, -1)
def photo_dataset():
cat = load_photos()
return np.swapaxes(np.asanyarray(cat), 1, -1)
def art_dataset():
cat = load_art()
return np.swapaxes(np.asanyarray(cat), 1, -1)
def celeb_dataset():
cat = load_celeb()
return np.swapaxes(np.asanyarray(cat), 1, -1)
def celeb_dataset_sample(N=10):
cat = load_celeb_sample(N)
return np.swapaxes(np.asanyarray(cat), 1, -1)
def load_dataset():
random.seed(15)
cat = load_cats()
catnt = load_not_cats()
rand_seed = random.sample(range(10000), 10000)
x = []
y = []
for i in range(10000):
if rand_seed[i] < 5000:
x.append(cat[rand_seed[i]].T)
y.append(1)
else:
x.append(catnt[rand_seed[i]-5000].T)
y.append(0)
print('...data stitching and randomization finished')
return x,y
def dataset_():
x,y = load_dataset()
print('....train test data loaded')
return np.stack(x[:9900]),np.stack(y[:9900]),np.stack(x[9900:]),np.stack(y[9900:])
def visualize(x,dark=True,title=None):
if dark:
plt.style.use('dark_background')
else:
plt.style.use('default')
plt.imshow(cv2.cvtColor(x.T, cv2.COLOR_BGR2RGB))
plt.axis('off')
if title != None:
plt.title(title)
plt.show()
def visualize_25(x,dark=True):
if dark:
plt.style.use('dark_background')
else:
plt.style.use('default')
r = 5
c = 5
fig = plt.figure(figsize=(20,20))
for i in range(x.shape[0]):
fig.add_subplot(r,c,i+1)
plt.imshow(cv2.cvtColor(x[i].T, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
def visualize_16(x,dark=True):
if dark:
plt.style.use('dark_background')
else:
plt.style.use('default')
r = 4
c = 4
fig = plt.figure(figsize=(10,10))
for i in range(x.shape[0]):
fig.add_subplot(r,c,i+1)
plt.imshow(cv2.cvtColor(x[i].T, cv2.COLOR_BGR2RGB))
plt.axis('off')
plt.show()
def img_load(path, show = True):
img = cv2.imread(path)
x = cv2.resize(img, dsize=(128, 128), interpolation=cv2.INTER_CUBIC)
if show:
plt.imshow(cv2.cvtColor(x, cv2.COLOR_BGR2RGB))
plt.show()
print('image loaded!')
return x
def torch_celeb_dataset():
data = celeb_dataset()
data = torch.from_numpy(data).to(dtype = torch.float)
return data
def torch_celeb_dataset_sample(N=10):
data = celeb_dataset_sample(N)
data = torch.from_numpy(data).to(dtype = torch.float)
return data
def torch_cat_dataset():
data = cat_dataset()
data = torch.from_numpy(data).to(dtype = torch.float)
return data
def torch_photo_dataset():
data = photo_dataset()
data = torch.from_numpy(data).to(dtype = torch.float)
return data
def main():
data = celeb_dataset()
print(data.shape)
visualize_25(data[0:25])
visualize(data[0])
#data = torch_celeb_dataset()
#print(data[0], data.max(),data.min())
if __name__ == '__main__':
main()
| 25.90393
| 93
| 0.623399
|
0a0b686edb06d4d7689c9c4e1aba04e3e934a913
| 2,622
|
py
|
Python
|
arxiv/taxonomy/__init__.py
|
cul-it/arxiv-base
|
a5beadf44c24f72e21313299bfafc1ffb9d28ac7
|
[
"MIT"
] | 23
|
2019-01-10T22:01:18.000Z
|
2022-02-02T10:28:25.000Z
|
arxiv/taxonomy/__init__.py
|
arXiv/arxiv-base
|
b59490abc1656c240025e19af86d6a246926914a
|
[
"MIT"
] | 57
|
2018-12-17T16:45:38.000Z
|
2021-12-14T14:20:58.000Z
|
arxiv/taxonomy/__init__.py
|
cul-it/arxiv-base-ui
|
a5beadf44c24f72e21313299bfafc1ffb9d28ac7
|
[
"MIT"
] | 5
|
2019-01-10T22:01:28.000Z
|
2021-11-05T12:25:31.000Z
|
"""
arXiv group, archive and category definitions.
arXiv categories are arranged in a hierarchy where there are archives
(astro-ph, cs, math, etc.) that contain subject classes (astro-ph has subject
classes CO, GA, etc.). We now use the term category to refer to any archive or
archive.subject_class that one can submit to (so hep-th and math.IT are both
categories). No subject class can be in more than one archive. However, our
scientific advisors identify some categories that should appear in more than
one archive because they bridge major subject areas. Examples include math.MP
== math-ph and stat.TH = math.ST. These are called category aliases and the
idea is that any article classified in one of the aliases categories also
appears in the other (canonical), but that most of the arXiv code for display,
search, etc. does not need to understand the break with hierarchy.
"""
from .definitions import CATEGORIES, CATEGORIES_ACTIVE, ARCHIVES, \
ARCHIVES_ACTIVE, ARCHIVES_SUBSUMED, CATEGORY_ALIASES
from .category import Category, Archive, Group
def get_category_display(category: str, canonical: bool = True) -> str:
"""
Get the display name of an arXiv category.
Parameters
----------
category : str
Category identifier, e.g. ``nlin.AO``.
canonical : bool
If True (default) and the category is subsumed, the display name for
the canonical category will be returned instead.
Returns
-------
str
Display name for the category, e.g. ``Adaptation and Self-Organizing
Systems (nlin.AO)``.
"""
if canonical:
return Category(category).canonical.display
return Category(category).display
def get_archive_display(archive: str, canonical: bool = True) -> str:
"""
Get the display name of an arXiv archive.
Parameters
----------
archive : str
Archive identifier, e.g. ``astro-ph``.
canonical : bool
If True (default) and the archive is subsumed, the display name for
the canonical archive will be returned instead.
Returns
-------
str
Display name for the category, e.g. ``Astrophysics (astro-ph)``.
"""
if canonical:
return Archive(archive).canonical.display
return Archive(archive).display
def get_group_display(group: str) -> str:
"""
Get the display name of an arXiv group.
Parameters
----------
group : str
Group identifier, e.g. ``grp_math``.
Returns
-------
str
Display name for the group, e.g. ``Mathematics (grp_math)``.
"""
return Group(group).display
| 30.847059
| 78
| 0.680397
|
b185c73c34565575bf0482e43bc5ea66833b9c38
| 5,165
|
py
|
Python
|
libs/cocos/camera.py
|
HieuLsw/blobjob.editor
|
c33473ffb7836a70ba3a1b2a9dd9452a9d3a1b81
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
libs/cocos/camera.py
|
HieuLsw/blobjob.editor
|
c33473ffb7836a70ba3a1b2a9dd9452a9d3a1b81
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
libs/cocos/camera.py
|
HieuLsw/blobjob.editor
|
c33473ffb7836a70ba3a1b2a9dd9452a9d3a1b81
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Camera object'''
__docformat__ = 'restructuredtext'
from director import director
from euclid import Point3
from pyglet.gl import *
__all__ = ['Camera']
class Camera(object):
"""
Camera used in every `CocosNode`.
Useful to look at the object from different views.
The OpenGL gluLookAt() function is used to locate the
camera.
If the object is transformed by any of the scale, rotation or
position attributes, then they will override the camera.
"""
def __init__(self):
self.restore()
@classmethod
def get_z_eye( cls ):
'''Returns the best distance for the camera for the current window size
cocos2d uses a Filed Of View (fov) of 60
'''
width, height = director.get_window_size()
eye_z = height / 1.1566
return eye_z
def restore( self ):
'''Restore the camera to the initial position
and sets it's ``dirty`` attribute in False and ``once`` in true.
If you use the camera, for a while and you want to stop using it
call this method.
'''
width, height = director.get_window_size()
# tuple (x,y,z) that says where is the eye of the camera.
# used by ``gluLookAt()``
self._eye = Point3( width /2.0, height /2.0, self.get_z_eye() )
# tuple (x,y,z) that says where is pointing to the camera.
# used by ``gluLookAt()``
self._center = Point3( width /2.0, height /2.0, 0.0 )
# tuple (x,y,z) that says the up vector for the camera.
# used by ``gluLookAt()``
self._up_vector = Point3( 0.0, 1.0, 0.0)
#: whether or not the camera is 'dirty'
#: It is dirty if it is not in the original position
self.dirty = False
#: optimization. Only renders the camera once
self.once = False
def locate( self, force=False ):
'''Sets the camera using gluLookAt using its eye, center and up_vector
:Parameters:
`force` : bool
whether or not the camera will be located even if it is not dirty
'''
if force or self.dirty or self.once:
glLoadIdentity()
gluLookAt( self._eye.x, self._eye.y, self._eye.z, # camera eye
self._center.x, self._center.y, self._center.z, # camera center
self._up_vector.x, self._up_vector.y, self._up_vector.z # camera up vector
)
self.once = False
def _get_eye( self ):
return self._eye
def _set_eye( self, eye ):
self._eye = eye
self.dirty = True
eye = property(_get_eye, _set_eye, doc='''Eye of the camera in x,y,z coordinates
:type: flaat,float,float
''')
def _get_center( self ):
return self._center
def _set_center( self, center ):
self._center = center
self.dirty = True
center = property(_get_center, _set_center, doc='''Center of the camera in x,y,z coordinates
:type: flaat,float,float
''')
def _get_up_vector( self ):
return self._up_vector
def _set_up_vector( self, up_vector ):
self._up_vector = up_vector
self.dirty = True
up_vector = property(_get_up_vector, _set_up_vector, doc='''Up vector of the camera in x,y,z coordinates
:type: flaat,float,float
''')
| 35.376712
| 108
| 0.634656
|
1a029e645bdfde79841cafd73fd76849b96ca744
| 1,330
|
py
|
Python
|
tests/anat/script/kancolle/expedition.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
tests/anat/script/kancolle/expedition.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
tests/anat/script/kancolle/expedition.py
|
coppelia517/orlov
|
d7ed6c061432b99ab2b75e0262db293e444fe6be
|
[
"MIT"
] | null | null | null |
""" Script base for orlov anat package. """
import logging
import pytest
# pylint: disable=E0401
from anat.script.kancolle.testcase_kancolle import KancolleNormal
logger = logging.getLogger(__name__)
@pytest.mark.usefixtures('conftests_fixture', 'orlov_fixture', 'anat_fixture')
# pylint: disable=E1101, C0302, R0914
class TestExpedition(KancolleNormal):
""" Test Case Expedition in `anat` package.
"""
def test_000_expedition(self):
""" Test Expedition. """
logger.info(' *** Start TestCase : %s *** ', __file__)
self.start()
logger.info(' *** Test SetUp. *** ')
assert self.initialize()
logger.info(' *** Supply Fleet. *** ')
while self.expedition_result():
self.sleep()
result, fleets = self.supply_all()
assert result
logger.info(' *** Quest Check *** ')
while self.expedition_result():
self.sleep()
assert self.quest_receipts(['DP01', 'DP02', 'WP01', 'WP02', 'WP03'])
logger.info(' *** Expedition Start. *** ')
while self.expedition_result():
self.sleep()
assert self.expedition_all(fleets)
logger.info(' *** Test TearDown. *** ')
while self.expedition_result():
self.sleep()
| 30.227273
| 79
| 0.583459
|
151b09250a4e4f1b076b36f5ed79fbf81db6bc18
| 18,473
|
py
|
Python
|
src/asyncio_helpers/_version.py
|
sci-bots/asyncio-helpers
|
3eab1250275e1a0f35369e6a7775f81d4ce3640f
|
[
"BSD-3-Clause"
] | null | null | null |
src/asyncio_helpers/_version.py
|
sci-bots/asyncio-helpers
|
3eab1250275e1a0f35369e6a7775f81d4ce3640f
|
[
"BSD-3-Clause"
] | 3
|
2018-09-12T17:51:54.000Z
|
2019-04-25T17:28:00.000Z
|
src/asyncio_helpers/_version.py
|
sci-bots/asyncio-helpers
|
3eab1250275e1a0f35369e6a7775f81d4ce3640f
|
[
"BSD-3-Clause"
] | 1
|
2021-06-03T22:26:41.000Z
|
2021-06-03T22:26:41.000Z
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "asyncio-helpers-"
cfg.versionfile_source = "src/asyncio_helpers/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| 35.456814
| 79
| 0.584908
|
824ea88d2339e16be8b4e188c393e8359e5c8bd2
| 1,131
|
py
|
Python
|
GATHER_PYv2.py
|
rgupta9/RANT
|
d49882e71a6f04e71e9796320007dc78a346e5bd
|
[
"MIT"
] | 3
|
2021-02-24T21:01:40.000Z
|
2021-09-14T00:35:48.000Z
|
GATHER_PYv2.py
|
rgupta9/RANT
|
d49882e71a6f04e71e9796320007dc78a346e5bd
|
[
"MIT"
] | null | null | null |
GATHER_PYv2.py
|
rgupta9/RANT
|
d49882e71a6f04e71e9796320007dc78a346e5bd
|
[
"MIT"
] | 2
|
2021-04-19T00:33:34.000Z
|
2021-08-29T15:24:35.000Z
|
#! /usr/bin/python
# OLD PYTHON VERSION 2.7 - NO LONGER MAINTAINED
import os, sys, getpass
from netmiko import ConnectHandler
from netmiko.ssh_exception import NetMikoTimeoutException
from paramiko.ssh_exception import SSHException
platform = 'arista_eos'
username = raw_input('Username? ')
passy = getpass.getpass()
def openfile(file):
f = open(file,'r')
x = f.read()
x = x.strip()
x = x.split('\n')
return x
commandfile = raw_input('command file? ')
targetfile = raw_input('target file? ')
show_commands = openfile(commandfile)
hostlist = openfile(targetfile)
outfile = raw_input('output filename? ')
for host in hostlist:
try:
device = ConnectHandler(device_type=platform, ip=host, username=username, password=passy)
except Exception:
continue
try:
device.find_prompt()
except Exception:
continue
sys.stdout=open(outfile,"a")
for item in show_commands:
try:
output = device.send_command(item)
except:
continue
output = output.split('\n')
for line in output:
print host + " " + "|" + item + "|" + " " + line
device.disconnect()
sys.stdout.close()
| 22.62
| 91
| 0.693192
|
e9bc9337d31a7766061c591c2af09510b4c7a9b0
| 7,861
|
py
|
Python
|
poincarefm_rank.py
|
heygrain/HFM
|
0bc5c0a44bdbad72febc53c6839df116b8fe433f
|
[
"MIT"
] | 2
|
2020-06-19T01:58:12.000Z
|
2020-07-29T11:50:11.000Z
|
poincarefm_rank.py
|
heygrain/HFM
|
0bc5c0a44bdbad72febc53c6839df116b8fe433f
|
[
"MIT"
] | 1
|
2021-10-03T22:13:00.000Z
|
2021-10-03T22:13:00.000Z
|
poincarefm_rank.py
|
heygrain/HFM
|
0bc5c0a44bdbad72febc53c6839df116b8fe433f
|
[
"MIT"
] | 2
|
2020-12-03T00:19:02.000Z
|
2021-10-03T22:13:02.000Z
|
import torch
from torch import optim
import argparse
import os
import time
from datetime import datetime
from model import PoincareFM
from model.loss import bpr_loss
from model.metric import AUC
from utils.utils import load_data, batcher, construct_features, set_random_seed, save_results, hit_eval
def train(model, data, output_path, args, loss_func, score_func):
# optimizer
all_parameters = model.parameters()
poincare_parameters = []
for pname, p in model.named_parameters():
if 'feature_embedding' in pname:
poincare_parameters.append(p)
poincare_parameters_id = list(map(id, poincare_parameters))
other_parameters = list(filter(lambda p: id(p) not in poincare_parameters_id,
all_parameters))
params = [{'params': poincare_parameters, 'lr': args.lr_poincare}, {'params': other_parameters, 'lr': args.lr}]
if args.optimizer == 'SGD':
optimizer = optim.SGD(params, lr=args.lr)
elif args.optimizer == 'Adam':
optimizer = optim.Adam(params, lr=args.lr)
elif args.optimizer == 'Momentum':
optimizer = optim.SGD(params, lr=args.lr, momentum=0.8)
elif args.optimizer == 'Adagrad':
optimizer = optim.Adagrad(params, lr=args.lr)
# train
print('before train:')
best_dev_score = evaluate(model, data, score_func, on='dev')
best_epoch = -1
print()
for epoch in range(args.n_epochs):
print("Epoch {:} out of {:}".format(epoch + 1, args.n_epochs))
train_for_epoch(model, data, loss_func, optimizer, args.batch_size, args.reg_l2)
# evaluate on dev_set
cond1 = not(epoch % args.eval_every) and args.eval_every > 0
cond2 = epoch == args.n_epochs and not args.eval_every
if cond1 or cond2:
dev_score = evaluate(model, data, score_func, on='dev')
if best_dev_score < dev_score:
print("New best dev score! Saving model.")
torch.save(model.state_dict(), output_path)
best_dev_score = dev_score
best_epoch = epoch
if epoch >= best_epoch + 5:
print("Early stopping at epoch {:}.".format(epoch + 1))
print()
break
print()
print("- Best epoch: {:}, best dev score: {:.5f}.".format(best_epoch + 1, best_dev_score))
return best_dev_score
def train_for_epoch(model, data, loss_func, optimizer, batch_size, reg_l2):
train_user, train_item = data['train_user'], data['train_item']
pos_features = construct_features(train_user, train_item, data['item_idx_min'], user_demo=data['user_demo'], item_demo=data['item_demo'])
neg_item = torch.randint(low=data['item_idx_min'], high=data['item_idx_max'] + 1, size=train_user.shape, device=train_user.device)
neg_features = construct_features(train_user, neg_item, data['item_idx_min'], user_demo=data['user_demo'], item_demo=data['item_demo'])
model.train()
epoch_loss = 0
for i, (pos_x, neg_x) in enumerate(batcher(pos_features, neg_features, batch_size=batch_size, shuffle=True)):
optimizer.zero_grad()
pos_pred = model.forward(pos_x)
neg_pred = model.forward(neg_x)
loss = loss_func(pos_pred, neg_pred)
loss += 0.5 * reg_l2 * model.l2_sqnorm()
epoch_loss += loss * batch_size
loss.backward()
model.riemannian_grads()
optimizer.step()
model.project_embedding()
print("Average Train Loss: {}".format(epoch_loss / pos_features.shape[0]))
def evaluate(model, data, score_func, on='dev'):
print('Evaluate on', on, 'set',)
key_user = on + '_user'
key_item = on + '_item'
user, item = data[key_user], data[key_item]
pos_features = construct_features(user, item, data['item_idx_min'], user_demo=data['user_demo'], item_demo=data['item_demo'])
neg_item = torch.randint(low=data['item_idx_min'], high=data['item_idx_max'] + 1, size=user.shape, device=user.device)
neg_features = construct_features(user, neg_item, data['item_idx_min'], user_demo=data['user_demo'], item_demo=data['item_demo'])
model.eval()
with torch.no_grad():
pos_pred = model(pos_features)
neg_pred = model(neg_features)
score = score_func(pos_pred, neg_pred)
print("-", on, "score: {:.5f}".format(score))
return score
def parse_args():
parser = argparse.ArgumentParser(description="Run the model.")
parser.add_argument('--seed', type=int, default=205,
help='Seed for random, numpy, torch and cuda.')
parser.add_argument('--use_cuda', type=bool, default=True,
help='Whether use cuda.')
parser.add_argument('--embedding_dim', type=int, default=10,
help='Set embedding dimension for the model.')
parser.add_argument('--batch_size', type=int, default=4096,
help='Batch size.')
parser.add_argument('--n_epochs', type=int, default=10,
help='Number of epochs.')
parser.add_argument('--optimizer', default='Adam',
choices=['Adam', 'SGD', 'Momentum', 'Adagrad'],
help='Specify an optimizer type (Adam, Adagrad, SGD, Momentum).')
parser.add_argument('--lr', type=float, default=1e-3,
help='Learning rate.')
parser.add_argument('--lr_poincare', type=float, default=1e-3,
help='Learning rate for hyperbolic embeddings')
parser.add_argument('--reg_l2', type=float, default=0,
help='L2 regularization parameter.')
parser.add_argument('--dataset', default='TX',
help='Specify a dataset (ml100k, lastfm, amazon_cloths, ...)')
parser.add_argument('--use_content', type=int, default=0,
help='Whether using content features')
parser.add_argument('--eval_every', type=int, default=1,
help='Frequency of evaluating the performance on dev data \
(-1: never, 0: at the end, n: every n epochs)')
parser.add_argument('--plot', type=bool, default=False,
help='Whether to plot the hyperbolic embeddings')
parser.add_argument('--process', default='train',
choices=['train', 'eval'],
help='Process type: train, evaluate.')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
# fix random seed
set_random_seed(args.seed)
# use cuda or not
if args.use_cuda:
device = torch.device('cuda')
else:
device = torch.device('cpu')
# load data
data, n_fields, n_features = load_data(args.dataset, device=device, use_content=args.use_content, use_rating=False, print_info=True)
# create model
model = PoincareFM(n_features=n_features, n_fields=n_fields, embedding_dim=args.embedding_dim)
model.to(device=device)
# output dir
output_dir = "./results/{:%Y%m%d_%H%M%S}/".format(datetime.now())
output_path = output_dir + "model.weights"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# train
print("Training start ...")
start = time.time()
best_dev_score = train(model, data, output_path=output_path, args=args, loss_func=bpr_loss, score_func=AUC)
print("Training process took {:.2f} seconds\n".format(time.time() - start))
# load best model and analysis on test set
model.load_state_dict(torch.load(output_path))
test_score = evaluate(model, data, AUC, on='test')
dev_hit5, dev_hit10 = hit_eval(model, data, on='dev')
test_hit5, test_hit10 = hit_eval(model, data, on='test')
# write results
save_results('./' + args.dataset + '_pfm.txt', args, best_dev_score, test_score, dev_hit5, dev_hit10, test_hit5, test_hit10)
| 43.916201
| 141
| 0.641013
|
9d23cb542a375f41b8e202cbcdc5a791703636ab
| 22,476
|
py
|
Python
|
Distances_Study/Comparison_Between_Networks.py
|
Serdobe/Probabilistic_Graphlets
|
ce4aca495751970089490b745069dbf6113e9f56
|
[
"Apache-2.0"
] | 1
|
2021-01-04T08:41:59.000Z
|
2021-01-04T08:41:59.000Z
|
Distances_Study/Comparison_Between_Networks.py
|
Serdobe/Probabilistic_Graphlets
|
ce4aca495751970089490b745069dbf6113e9f56
|
[
"Apache-2.0"
] | null | null | null |
Distances_Study/Comparison_Between_Networks.py
|
Serdobe/Probabilistic_Graphlets
|
ce4aca495751970089490b745069dbf6113e9f56
|
[
"Apache-2.0"
] | null | null | null |
'''
This Script is for the last part of the protocol. Here we get the final correlation Matrices and calculate the distances.
With this distances we can perform a lot of different analyses
'''
from itertools import product
from scipy.spatial.distance import pdist, squareform
from sklearn.metrics import precision_recall_curve, auc, roc_curve, roc_auc_score
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import os
from scipy.stats import mannwhitneyu
from statsmodels.distributions.empirical_distribution import ECDF
# Function To compare Distributions (Uniform Vs PPI):
'''
Finish this comparisons with all the distributions
'''
def Compare_Distributions(save_path):
PPI = pd.read_csv("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Tests/Real_Distributions/Empirical_PPI/Work_Network_Prob_Low_PPI.txt",
sep = " ", header = None)
CoEx = pd.read_csv("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Tests/Real_Distributions/Empirical_CoEx/Work_Network_Prob_Coex_Net_Original_Medium_Rank_5%.txt",
sep = " ", header = None)
GI = pd.read_csv("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Tests/Real_Distributions/Empirical_GI/Work_Network_Prob_Network_Medium_Confidence_Named.txt",
sep = " ", header = None)
Uniform = pd.read_csv("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Work_Network_Prob_UniformEta_0.0350076103500761_Mu_0.27_5000_Repetition_0_0.003_ER_.txt",
sep = " ", header = None)
Beta_1 = pd.read_csv("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Work_Network_Prob_BetaEta_0.001_Mu_0.9_5000_Repetition_0_0.003_ER_.txt",
sep = " ", header = None)
Beta_2 = pd.read_csv("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Tests/Real_Distributions/Beta_Mean_0.27_Var_0.0069/Work_Network_Prob_BetaEta_0.0350076103500761_Mu_0.27_5000_Repetition_0_0.003_ER_.txt",
sep = " ", header = None)
Beta_3 = pd.read_csv("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Tests/Real_Distributions/Beta_Mean_0.78_Var_0.0028/Work_Network_Prob_BetaEta_0.01631701631701632_Mu_0.78_5000_Repetition_0_0.003_ER_.txt",
sep = " ", header = None)
### Table to get Means and Variances: ###
# Probabilities Distributions:
PPI = PPI.iloc[:, 2]
CoEx = CoEx.iloc[:, 2]
GI = GI.iloc[:, 2]
Uniform = Uniform.iloc[:, 2]
Beta_1 = Beta_1.iloc[:, 2]
Beta_2 = Beta_2.iloc[:, 2]
Beta_3 = Beta_3.iloc[:, 2]
# Means:
means_final = []
means_final += [np.mean(PPI)]
means_final += [np.mean(CoEx)]
means_final += [np.mean(GI)]
means_final += [np.mean(Uniform)]
means_final += [np.mean(Beta_1)]
means_final += [np.mean(Beta_2)]
means_final += [np.mean(Beta_3)]
# Variance:
variance_final = []
variance_final += [np.var(PPI)]
variance_final += [np.var(CoEx)]
variance_final += [np.var(GI)]
variance_final += [np.var(Uniform)]
variance_final += [np.var(Beta_1)]
variance_final += [np.var(Beta_2)]
variance_final += [np.var(Beta_3)]
# Networks:
Distributions = ["PPI", "CoEx", "GI", "Uniform", "Beta_1", "Beta_2", "Beta_3"]
# Final dataframe and save it:
Final_Mean_Variance_db = pd.DataFrame({"Dist" : Distributions,
"Mean" : means_final ,
"Variance" : variance_final})
Final_Mean_Variance_db.to_csv(save_path + "Table_Mean_Variance.txt", index = None, sep = " ")
### Table to compare Distributions with Mann Whitney test: ####
Options1 = [0,1,2,3,4,5,6]
Options2 = [0,1,2,3,4,5,6]
Result_P_values = pd.DataFrame(index=np.arange(7), columns=np.arange(7))
for option1, option2 in product(Options1, Options2):
# For First Comparison:
if option1 == 0:
test1 = PPI
name = "PPI"
if option1 == 1:
test1 = CoEx
name = "CoEx"
if option1 == 2:
test1 = GI
name = "GI"
if option1 == 3:
test1 = Uniform
name = "Uniform"
if option1 == 4:
test1 = Beta_1
name = "Beta_1"
if option1 == 5:
test1 = Beta_2
name = "Beta_2"
if option1 == 6:
test1 = Beta_3
name = "Beta_3"
# For second Comparison:
if option2 == 0:
test2 = PPI
name2 = "PPI"
if option2 == 1:
test2 = CoEx
name2 = "CoEx"
if option2 == 2:
test2 = GI
name2 = "GI"
if option2 == 3:
test2 = Uniform
name2 = "Uniform"
if option2 == 4:
test2 = Beta_1
name2 = "Beta_1"
if option2 == 5:
test2 = Beta_2
name2 = "Beta_2"
if option2 == 6:
test2 = Beta_3
name2 = "Beta_2"
_, p_value= mannwhitneyu(test1, test2, use_continuity=True, alternative='two-sided')
Result_P_values.iloc[option1,option2] = p_value
# Plot comparisons:
cdf1 = ECDF(test1)
cdf2 = ECDF(test2)
x = np.linspace(0,1,2**10)
y1 = cdf1(x)
y2 = cdf2(x)
plt.plot(x,y1)
plt.plot(x,y2)
plt.savefig(save_path + "Distribution_Cumulative_Comparisons_" + str(name) + "_" + str(name2) + ".png")
Result_P_values.columns = ["PPI", "CoEx", "GI", "Uniform", "Beta_1", "Beta_2", "Beta_3"]
Result_P_values.index = ["PPI", "CoEx", "GI", "Uniform", "Beta_1", "Beta_2", "Beta_3"]
Result_P_values.to_csv(save_path + "Table_ManWhitsen.txt", sep = " ")
# Function to prepare the Matrices:
def Prepare_The_Matrices(Option1, Option2, Option3, empirical = False):
# Read File:
file_Bin1 = pd.read_csv(Option1 + "Final_Correlation_Bin.txt")
file_Prob1 = pd.read_csv(Option1 + "Final_Correlation_Prob.txt")
file_Bin2 = pd.read_csv(Option2 + "Final_Correlation_Bin.txt")
file_Prob2 = pd.read_csv(Option2 + "Final_Correlation_Prob.txt")
file_Bin3 = pd.read_csv(Option3 + "Final_Correlation_Bin.txt")
file_Prob3 = pd.read_csv(Option3 + "Final_Correlation_Prob.txt")
# Categories Distribution:
if empirical == False:
Group_Bin = file_Bin["Variance"].map(str) + file_Bin["Mean"].map(str)
Group_Prob = file_Prob["Variance"].map(str) + file_Prob["Mean"].map(str)
Final_Cat_Bin_Dist = Group_Bin.reset_index()
Final_Cat_Prob_Dist = Group_Prob.reset_index()
else:
# Model 1:
file_Bin1 = file_Bin1.drop_duplicates()
file_Prob1 = file_Prob1.drop_duplicates()
Group_Bin1 = file_Bin1.Real.fillna("Uniform")
Group_Prob1 = file_Prob1.Real.fillna("Uniform")
Group_Bin1 = Group_Bin1.reset_index(drop = True)
Group_Prob1 = Group_Prob1.reset_index(drop = True)
file_Bin1 = file_Bin1.reset_index(drop = True)
file_Prob1 = file_Prob1.reset_index(drop = True)
# Model 2:
file_Bin2 = file_Bin2.drop_duplicates()
file_Prob2 = file_Prob2.drop_duplicates()
Group_Bin2 = file_Bin2.Real.fillna("Uniform")
Group_Prob2 = file_Prob2.Real.fillna("Uniform")
Group_Bin2 = Group_Bin2.reset_index(drop = True)
Group_Prob2 = Group_Prob2.reset_index(drop = True)
file_Bin2 = file_Bin2.reset_index(drop = True)
file_Prob2 = file_Prob2.reset_index(drop = True)
# Model 3:
file_Bin3 = file_Bin3.drop_duplicates()
file_Prob3 = file_Prob3.drop_duplicates()
Group_Bin3 = file_Bin3.Real.fillna("Uniform")
Group_Prob3 = file_Prob3.Real.fillna("Uniform")
Group_Bin3 = Group_Bin3.reset_index(drop = True)
Group_Prob3 = Group_Prob3.reset_index(drop = True)
file_Bin3 = file_Bin3.reset_index(drop = True)
file_Prob3 = file_Prob3.reset_index(drop = True)
# Put together:
file_Bin = file_Bin1.append(file_Bin2)
file_Bin = file_Bin.append(file_Bin3)
file_Prob = file_Prob1.append(file_Prob2)
file_Prob = file_Prob.append(file_Prob3)
Group_Bin = Group_Bin1.append(Group_Bin2)
Group_Bin = Group_Bin.append(Group_Bin3)
Group_Prob = Group_Prob1.append(Group_Prob2)
Group_Prob = Group_Prob.append(Group_Prob3)
# Add New Category Distribution:
if empirical == False:
Final_Cat_Bin_Dist.columns = ["T", "G"]
Final_Cat_Prob_Dist.columns = ["T", "G"]
file_Bin["Density"] = Final_Cat_Bin_Dist["G"]
file_Prob["Density"] = Final_Cat_Prob_Dist["G"]
# Delete non used Variables:
file_Bin = file_Bin.drop(['Nodes', 'Densities', 'Distribution','Repetitions', 'Variance', 'Mean'], axis=1)
file_Prob = file_Prob.drop(['Nodes', 'Densities', 'Distribution','Repetitions', 'Variance', 'Mean'], axis=1)
# Relabel Distributions:
Number2Labels = {
"0.00.0" : "Unif",
"0.99900000000000010.5": "Var : 0.999 Mean : 0.5",
"0.99900000000000010.1": "Var : 0.999 Mean : 0.1",
"0.99900000000000010.9": "Var : 0.999 Mean : 0.9",
"0.50.5" : "Var : 0.5 Mean : 0.5",
"0.50.1" : "Var : 0.5 Mean : 0.1",
"0.50.9" : "Var : 0.5 Mean : 0.9",
"0.0010.5" : "Var : 0.001 Mean : 0.5",
"0.0010.1" : "Var : 0.001 Mean : 0.1",
"0.0010.9" : "Var : 0.001 Mean : 0.9",
}
file_Bin.Density = file_Bin.Density.apply(Number2Labels.get)
file_Prob.Density = file_Prob.Density.apply(Number2Labels.get)
else:
file_Bin = file_Bin.drop(['Nodes', 'Densities', 'Distribution','Repetitions', 'Variance', 'Mean', 'Real'], axis=1)
file_Prob = file_Prob.drop(['Nodes', 'Densities', 'Distribution','Repetitions', 'Variance', 'Mean', 'Real'], axis=1)
file_Bin["Density"] = Group_Bin
file_Prob["Density"] = Group_Prob
# Return Prepared Information:
return(file_Bin, file_Prob)
# Funtion To Calculate the Distances:
def Similarity_Infor_for_One_Model(file, Option, Network, empirical = False, group = "ALL"):
# Save the information in a different Variable Before Distance:
Info_Categories_Dens= file["Network"] + file["Density"]
# Drop The Variables:
file_Distance = file.drop(["Network", "Density"], axis=1)
# Calculate the distance:
distances = pdist(file_Distance.values, metric='euclidean')
dist_matrix = squareform(distances)
Data_Frame_Distance = pd.DataFrame(dist_matrix)
# Go to similarity Matrix (Normalize):
Data_Frame_Similarity = 1 - Data_Frame_Distance
Data_Frame_Similarity = Data_Frame_Similarity / Data_Frame_Similarity.max()[0]
Data_Frame_Similarity_np = np.array(Data_Frame_Similarity)
# Save Similarity Matrix:
Data_Frame_Similarity.to_csv( Option + "Correlation_Distance"+ Network + ".txt", header = True, index = False)
# Y_Score (without diagonal):
y_score = Data_Frame_Similarity_np[np.triu_indices_from(Data_Frame_Similarity_np, k = 1)]
# Group Labels (Y_True):
if empirical == False:
Number2Numbers= {
"Unif": 1,
"Var : 0.999 Mean : 0.5" : 2,
"Var : 0.999 Mean : 0.1" : 3,
"Var : 0.999 Mean : 0.9" : 4,
"Var : 0.5 Mean : 0.5" : 5,
"Var : 0.5 Mean : 0.1" : 6,
"Var : 0.5 Mean : 0.9" : 7,
"Var : 0.001 Mean : 0.5" : 8,
"Var : 0.001 Mean : 0.1" : 9,
"Var : 0.001 Mean : 0.9" : 10,
}
y_true = Info_Categories_Dens
y_true = Info_Categories_Dens.apply(Number2Numbers.get)
my_y_true = ~pdist(np.array(y_true).reshape(-1,1), 'cityblock').astype(bool)
else:
if group == "ALL":
Number2Numbers= {
"_ER_Uniform" : 2,
"_ER_PPI" : 2,
"_ER_GI" : 3,
"_ER_CoEx" : 4,
"_BA_Uniform" : 5,
"_BA_PPI" : 5,
"_BA_GI" : 6,
"_BA_CoEx" : 7,
"_RG_Uniform" : 8,
"_RG_PPI" : 8,
"_RG_GI" : 10,
"_RG_CoEx" : 11
}
y_true = Info_Categories_Dens
y_true = Info_Categories_Dens.apply(Number2Numbers.get)
my_y_true = ~pdist(np.array(y_true).reshape(-1,1), 'cityblock').astype(bool)
if group == "Density":
Number2Numbers= {
"Uniform" : 1,
"GI" : 2,
"CoEx" : 3,
"PPI" : 1
}
y_true = file["Density"]
y_true = file["Density"].apply(Number2Numbers.get)
my_y_true = ~pdist(np.array(y_true).reshape(-1,1), 'cityblock').astype(bool)
if group == "Topology":
Number2Numbers= {
"_ER_" : 1,
"_BA_" : 2,
"_RG_" : 3,
}
y_true = file["Network"]
y_true = file["Network"].apply(Number2Numbers.get)
my_y_true = ~pdist(np.array(y_true).reshape(-1,1), 'cityblock').astype(bool)
# Return the information:
return(y_score, my_y_true)
# Function to Make the Plots:
def Precision_Recall_plot(x_bin,
x_prob,
y_bin,
y_prob,
Option,
save_directory):
# Groups:
Bin_group = pd.Series(['Bin']).repeat(len(x_bin))
Prob_group = pd.Series(['Prob']).repeat(len(x_prob))
Final_Groups = Bin_group.append(Prob_group)
# Data frame:
precision_Bin = pd.Series(y_bin)
precision_Prob = pd.Series(y_prob)
precision_Bin = precision_Bin.append(precision_Prob)
recall_Bin = pd.Series(x_bin)
recall_Prob = pd.Series(x_prob)
recall_Bin = recall_Bin.append(recall_Prob)
Final_Groups = Final_Groups.reset_index(drop = True)
recall_Bin = recall_Bin.reset_index(drop = True)
precision_Bin = precision_Bin.reset_index(drop = True)
Plot_df = pd.DataFrame()
Plot_df["Recall"] = recall_Bin
Plot_df["Precision"] = precision_Bin
Plot_df["Group"] = Final_Groups
Plot_df = Plot_df.reset_index(drop=True)
# Plot:
fig, ax = plt.subplots(figsize=(12,8))
plt.grid(0)
paper_rc = {'lines.linewidth': 1, 'lines.markersize': 10}
sns.set_context("paper", rc = paper_rc)
sns.lineplot(x = "Recall", y = "Precision", hue = "Group", data = Plot_df )
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_linewidth(2.5)
ax.spines['bottom'].set_linewidth(2.5)
ax.xaxis.set_tick_params(width=2.5)
ax.yaxis.set_tick_params(width=2.5)
plt.figtext(.5,.9,'Precision-Recall', fontsize=30, ha='center')
ax.set_xlabel('# Recall',fontsize = 20)
ax.set_ylabel('# Precision', fontsize = 20)
fig.savefig(save_directory + "_Correlation_Plot_Prob_" + Option + ".png")
# ROC Curve Plot:
def Plot_ROC(fpr_bin, tpr_bin,
fpr_prob, tpr_prob,
save_path, Model):
# Groups:
Bin_group = pd.Series(['Bin']).repeat(len(fpr_bin))
Prob_group = pd.Series(['Prob']).repeat(len(fpr_prob))
Final_Groups = Bin_group.append(Prob_group)
Final_Groups = Final_Groups.reset_index(drop = True)
# DataFrame:
fpr_prob = pd.Series(fpr_prob)
fpr_bin = pd.Series(fpr_bin)
tpr_prob = pd.Series(tpr_prob)
tpr_bin = pd.Series(tpr_bin)
Final_Data = pd.DataFrame()
Final_Data["fpr"] = fpr_bin.append(fpr_prob)
Final_Data["tpr"] = tpr_bin.append(tpr_prob)
Final_Data = Final_Data.reset_index(drop = True)
Final_Data["Network"] = Final_Groups
Final_Data_Bin = Final_Data[Final_Data.Network == "Bin"]
Final_Data_Prob = Final_Data[Final_Data.Network == "Prob"]
fig, ax = plt.subplots(figsize=(12,8))
lw = 2
plt.plot(Final_Data_Bin["fpr"], Final_Data_Bin["tpr"], c = "orange",
lw=lw)
plt.plot(Final_Data_Prob["fpr"], Final_Data_Prob["tpr"], c = "green",
lw=lw)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.05, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate', fontsize=20)
plt.ylabel('True Positive Rate', fontsize=20)
plt.title('ROC', fontsize=20)
plt.legend(["Bin", "Prob", "Random"], loc="lower right", prop={'size': 15})
plt.savefig(save_path + "ROC_Curve_From_" + Model + ".png")
plt.show()
# Main:
# Work Directory:
os.chdir("/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Tests/Real_Distributions/Considering_Random/")
save_directory = "/home/sergio/workspace_Eclipse/Lucky_GOGO_Extra/Results/Testeos_Varios/Network_Distances/Tests/Real_Distributions/Considering_Random/"
# Preparing the Data:
Bin_Corr, Prob_Corr = Prepare_The_Matrices(Option1 = "ER_1/",
Option2 = "BA_1/",
Option3 = "RG_1/",
empirical = True)
# Getting the rest of information:
## GROUP = ALL ##
# Getting the variables for the ROC Curve:
y_score_Bin, y_true_Bin = Similarity_Infor_for_One_Model(Bin_Corr, Option = "ALL", Network = "Bin", empirical = True)
y_score_Prob, y_true_Prob = Similarity_Infor_for_One_Model(Prob_Corr, Option = "ALL", Network = "Prob", empirical = True)
# Precision-Recall:
precision_Bin, recall_Bin, thresholds_Bin = precision_recall_curve(y_true_Bin, y_score_Bin)
precision_Prob, recall_Prob, thresholds_Prob = precision_recall_curve(y_true_Prob, y_score_Prob)
Precision_Recall_plot( recall_Bin, recall_Prob,
precision_Bin, precision_Prob,
Option = "ALL", save_directory = save_directory)
auc(recall_Bin, precision_Bin) # 0.3673 Precision-Recall
auc(recall_Prob, precision_Prob) # 0.8711 Precision-Recall
# ROC Curve:
fpr_Bin, tpr_ER_Bin, thresholds_ER_Bin = roc_curve(y_true_Bin, y_score_Bin)
fpr_Prob, tpr_ER_Prob, thresholds_ER_Prob = roc_curve(y_true_Prob, y_score_Prob)
roc_auc_score(y_true_Bin, y_score_Bin) # 0.8807 Area under ROC
roc_auc_score(y_true_Prob, y_score_Prob) # 0.9803 Area Under ROC
Plot_ROC(fpr_Bin, tpr_ER_Bin, fpr_Prob, tpr_ER_Prob, save_directory, Model= "ALL")
## GROUP = Density ##
# Getting the variables for the ROC Curve:
y_score_Bin, y_true_Bin = Similarity_Infor_for_One_Model(Bin_Corr, Option = "ALL", Network = "Bin", empirical = True, group = "Density")
y_score_Prob, y_true_Prob = Similarity_Infor_for_One_Model(Prob_Corr, Option = "ALL", Network = "Prob", empirical = True, group = "Density")
# Precision-Recall:
precision_Bin, recall_Bin, thresholds_Bin = precision_recall_curve(y_true_Bin, y_score_Bin)
precision_Prob, recall_Prob, thresholds_Prob = precision_recall_curve(y_true_Prob, y_score_Prob)
Precision_Recall_plot( recall_Bin, recall_Prob,
precision_Bin, precision_Prob,
Option = "ALL_Group_Density", save_directory = save_directory)
auc(recall_Bin, precision_Bin) # 0.3711 Precision-Recall
auc(recall_Prob, precision_Prob) # 0.5363 Precision-Recall
# ROC Curve:
fpr_Bin, tpr_ER_Bin, thresholds_ER_Bin = roc_curve(y_true_Bin, y_score_Bin)
fpr_Prob, tpr_ER_Prob, thresholds_ER_Prob = roc_curve(y_true_Prob, y_score_Prob)
roc_auc_score(y_true_Bin, y_score_Bin) # 0.4981 Area under ROC
roc_auc_score(y_true_Prob, y_score_Prob) # 0.5424 Area Under ROC
Plot_ROC(fpr_Bin, tpr_ER_Bin, fpr_Prob, tpr_ER_Prob, save_directory, Model= "ALL_Group_Density")
## GROUP = Topology ##
# Getting the variables for the ROC Curve:
y_score_Bin, y_true_Bin = Similarity_Infor_for_One_Model(Bin_Corr, Option = "ALL", Network = "Bin", empirical = True, group = "Topology")
y_score_Prob, y_true_Prob = Similarity_Infor_for_One_Model(Prob_Corr, Option = "ALL", Network = "Prob", empirical = True, group = "Topology")
# Precision-Recall:
precision_Bin, recall_Bin, thresholds_Bin = precision_recall_curve(y_true_Bin, y_score_Bin)
precision_Prob, recall_Prob, thresholds_Prob = precision_recall_curve(y_true_Prob, y_score_Prob)
Precision_Recall_plot( recall_Bin, recall_Prob,
precision_Bin, precision_Prob,
Option = "ALL_Group_Topology", save_directory = save_directory)
auc(recall_Bin, precision_Bin) # 1 Precision-Recall
auc(recall_Prob, precision_Prob) # 0.999 Precision-Recall
# ROC Curve:
fpr_Bin, tpr_ER_Bin, thresholds_ER_Bin = roc_curve(y_true_Bin, y_score_Bin)
fpr_Prob, tpr_ER_Prob, thresholds_ER_Prob = roc_curve(y_true_Prob, y_score_Prob)
roc_auc_score(y_true_Bin, y_score_Bin) # 1 Area under ROC
roc_auc_score(y_true_Prob, y_score_Prob) # 1 Area Under ROC
Plot_ROC(fpr_Bin, tpr_ER_Bin, fpr_Prob, tpr_ER_Prob, save_directory, Model= "ALL_Group_Topology")
| 36.487013
| 253
| 0.605846
|
c8c0d1d80efe59d08a0c155e2586094ee7c5118a
| 2,904
|
py
|
Python
|
pyloxi/loxi/of13/util.py
|
floodlight/loxigen-artifacts
|
1822ec984cb6da342bbaa381677071cbbe53cee6
|
[
"Apache-2.0"
] | 1
|
2017-06-01T09:41:07.000Z
|
2017-06-01T09:41:07.000Z
|
pyloxi/loxi/of13/util.py
|
floodlight/loxigen-artifacts
|
1822ec984cb6da342bbaa381677071cbbe53cee6
|
[
"Apache-2.0"
] | 2
|
2017-07-03T08:50:56.000Z
|
2018-03-12T16:16:19.000Z
|
pyloxi/loxi/of14/util.py
|
floodlight/loxigen-artifacts
|
1822ec984cb6da342bbaa381677071cbbe53cee6
|
[
"Apache-2.0"
] | 20
|
2015-02-16T15:23:04.000Z
|
2022-03-15T20:06:10.000Z
|
# Copyright (c) 2008 The Board of Trustees of The Leland Stanford Junior University
# Copyright (c) 2011, 2012 Open Networking Foundation
# Copyright (c) 2012, 2013 Big Switch Networks, Inc.
# See the file LICENSE.pyloxi which should have been included in the source distribution
# Automatically generated by LOXI from template util.py
# Do not modify
import struct
from . import const
def pretty_mac(mac):
return ':'.join(["%02x" % x for x in mac])
def pretty_ipv4(v):
return "%d.%d.%d.%d" % ((v >> 24) & 0xFF, (v >> 16) & 0xFF, (v >> 8) & 0xFF, v & 0xFF)
def pretty_ipv6(v):
return ":".join(["%0.2x%0.2x" % (ord(v[i]), ord(v[i+1])) for i in range(0, len(v), 2)])
def pretty_flags(v, flag_names):
set_flags = []
for flag_name in flag_names:
flag_value = getattr(const, flag_name)
if v & flag_value == flag_value:
set_flags.append(flag_name)
elif v & flag_value:
set_flags.append('%s&%#x' % (flag_name, v & flag_value))
v &= ~flag_value
if v:
set_flags.append("%#x" % v)
return '|'.join(set_flags) or '0'
def pretty_port(v):
named_ports = [(k,v2) for (k,v2) in const.__dict__.items() if k.startswith('OFPP_')]
for (k, v2) in named_ports:
if v == v2:
return k
return v
def pack_port_no(value):
return struct.pack("!L", value)
def unpack_port_no(reader):
return reader.read("!L")[0]
def pack_fm_cmd(value):
return struct.pack("!B", value)
def unpack_fm_cmd(reader):
return reader.read("!B")[0]
def init_wc_bmap():
return 0
def pack_wc_bmap(value):
return struct.pack("!Q", value)
def unpack_wc_bmap(reader):
return reader.read("!Q")[0]
def init_match_bmap():
return 0
def pack_match_bmap(value):
return struct.pack("!Q", value)
def unpack_match_bmap(reader):
return reader.read("!Q")[0]
MASK64 = (1 << 64) - 1
def pack_bitmap_128(value):
x = 0l
for y in value:
x |= 1 << y
return struct.pack("!QQ", (x >> 64) & MASK64, x & MASK64)
def unpack_bitmap_128(reader):
hi, lo = reader.read("!QQ")
x = (hi << 64) | lo
i = 0
value = set()
while x != 0:
if x & 1 == 1:
value.add(i)
i += 1
x >>= 1
return value
def pack_bitmap_512(value):
words = [0] * 8
for v in value:
assert v < 512
words[7-int(v/64)] |= 1 << (v % 64)
return struct.pack("!8Q", *words)
def unpack_bitmap_512(reader):
words = reader.read("!8Q")
x = 0l
for word in words:
x <<= 64
x |= word
i = 0
value = set()
while x != 0:
if x & 1 == 1:
value.add(i)
i += 1
x >>= 1
return value
def pack_checksum_128(value):
return struct.pack("!QQ", (value >> 64) & MASK64, value & MASK64)
def unpack_checksum_128(reader):
hi, lo = reader.read("!QQ")
return (hi << 64) | lo
| 24.403361
| 91
| 0.583333
|
376f0c416e67a80bc8909f11fa5a3d1c2a41d63c
| 27,516
|
py
|
Python
|
tests/cupy_tests/creation_tests/test_from_data.py
|
prkhrsrvstv1/cupy
|
ea86c8225b575af9d2855fb77a306cf86fd098ea
|
[
"MIT"
] | null | null | null |
tests/cupy_tests/creation_tests/test_from_data.py
|
prkhrsrvstv1/cupy
|
ea86c8225b575af9d2855fb77a306cf86fd098ea
|
[
"MIT"
] | null | null | null |
tests/cupy_tests/creation_tests/test_from_data.py
|
prkhrsrvstv1/cupy
|
ea86c8225b575af9d2855fb77a306cf86fd098ea
|
[
"MIT"
] | null | null | null |
import tempfile
import unittest
import pytest
import cupy
from cupy import cuda
from cupy import testing
import numpy
class TestFromData(unittest.TestCase):
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array(self, xp, dtype, order):
return xp.array([[1, 2, 3], [2, 3, 4]], dtype=dtype, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_empty_list(self, xp, dtype, order):
return xp.array([], dtype=dtype, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_nested_empty_list(self, xp, dtype, order):
return xp.array([[], []], dtype=dtype, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_numpy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), numpy, dtype)
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_numpy_scalar(self, xp, dtype, order):
a = numpy.array(2, dtype=dtype)
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_numpy_broad_cast(self, xp, dtype, order):
a = testing.shaped_arange((2, 1, 4), numpy, dtype)
a = numpy.broadcast_to(a, (2, 3, 4))
return xp.array(a, order=order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_list_of_numpy(self, xp, dtype, src_order, dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of numpy.ndarray>)
a = [
testing.shaped_arange((3, 4), numpy, dtype, src_order) + (12 * i)
for i in range(2)]
return xp.array(a, order=dst_order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_list_of_numpy_view(self, xp, dtype, src_order,
dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of numpy.ndarray>)
# create a list of view of ndarrays
a = [
(testing.shaped_arange((3, 8), numpy,
dtype, src_order) + (24 * i))[:, ::2]
for i in range(2)]
return xp.array(a, order=dst_order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_list_of_numpy_scalar(self, xp, dtype, order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of numpy.ndarray>)
a = [numpy.array(i, dtype=dtype) for i in range(2)]
return xp.array(a, order=order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_nested_list_of_numpy(self, xp, dtype, src_order,
dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of numpy.ndarray>)
a = [
[testing.shaped_arange(
(3, 4), numpy, dtype, src_order) + (12 * i)]
for i in range(2)]
return xp.array(a, order=dst_order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes_combination(names=('dtype1', 'dtype2'))
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_list_of_cupy(
self, xp, dtype1, dtype2, src_order, dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of cupy.ndarray>)
a = [
testing.shaped_arange((3, 4), xp, dtype1, src_order),
testing.shaped_arange((3, 4), xp, dtype2, src_order),
]
return xp.array(a, order=dst_order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_list_of_cupy_view(self, xp, dtype, src_order,
dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of cupy.ndarray>)
# create a list of view of ndarrays
a = [
(testing.shaped_arange((3, 8), xp,
dtype, src_order) + (24 * i))[:, ::2]
for i in range(2)]
return xp.array(a, order=dst_order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_nested_list_of_cupy(self, xp, dtype, src_order,
dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of cupy.ndarray>)
a = [
[testing.shaped_arange((3, 4), xp, dtype, src_order) + (12 * i)]
for i in range(2)]
return xp.array(a, order=dst_order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_from_list_of_cupy_scalar(self, xp, dtype, order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of cupy.ndarray>)
a = [xp.array(i, dtype=dtype) for i in range(2)]
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_nested_list_of_cupy_scalar(self, xp, dtype, order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of cupy.ndarray>)
a = [[xp.array(i, dtype=dtype)] for i in range(2)]
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_copy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.array(a, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_copy_is_copied(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, order=order)
a.fill(0)
return b
@testing.for_orders('CFAK')
@testing.for_all_dtypes(name='dtype1', no_complex=True)
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal()
def test_array_copy_with_dtype(self, xp, dtype1, dtype2, order):
# complex to real makes no sense
a = testing.shaped_arange((2, 3, 4), xp, dtype1)
return xp.array(a, dtype=dtype2, order=order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes(name='dtype1', no_complex=True)
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal()
def test_array_copy_with_dtype_char(self, xp, dtype1, dtype2, order):
# complex to real makes no sense
a = testing.shaped_arange((2, 3, 4), xp, dtype1)
return xp.array(a, dtype=numpy.dtype(dtype2).char, order=order)
@testing.for_orders('CFAK')
@testing.numpy_cupy_array_equal()
def test_array_copy_with_dtype_being_none(self, xp, order):
a = testing.shaped_arange((2, 3, 4), xp)
return xp.array(a, dtype=None, order=order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes(name='dtype1', no_complex=True)
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_copy_list_of_numpy_with_dtype(self, xp, dtype1, dtype2,
src_order, dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of numpy.ndarray>)
a = [
testing.shaped_arange((3, 4), numpy, dtype1, src_order) + (12 * i)
for i in range(2)]
return xp.array(a, dtype=dtype2, order=dst_order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes(name='dtype1', no_complex=True)
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_copy_list_of_numpy_with_dtype_char(self, xp, dtype1,
dtype2, src_order,
dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of numpy.ndarray>)
a = [
testing.shaped_arange((3, 4), numpy, dtype1, src_order) + (12 * i)
for i in range(2)]
return xp.array(a, dtype=numpy.dtype(dtype2).char, order=dst_order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes(name='dtype1', no_complex=True)
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_copy_list_of_cupy_with_dtype(self, xp, dtype1, dtype2,
src_order, dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of cupy.ndarray>)
a = [
testing.shaped_arange((3, 4), xp, dtype1, src_order) + (12 * i)
for i in range(2)]
return xp.array(a, dtype=dtype2, order=dst_order)
@testing.for_orders('CFAK', name='src_order')
@testing.for_orders('CFAK', name='dst_order')
@testing.for_all_dtypes(name='dtype1', no_complex=True)
@testing.for_all_dtypes(name='dtype2')
@testing.numpy_cupy_array_equal(strides_check=True)
def test_array_copy_list_of_cupy_with_dtype_char(self, xp, dtype1, dtype2,
src_order, dst_order):
# compares numpy.array(<list of numpy.ndarray>) with
# cupy.array(<list of cupy.ndarray>)
a = [
testing.shaped_arange((3, 4), xp, dtype1, src_order) + (12 * i)
for i in range(2)]
return xp.array(a, dtype=numpy.dtype(dtype2).char, order=dst_order)
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_no_copy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, copy=False, order=order)
a.fill(0)
return b
@testing.for_orders('CFAK')
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_f_contiguous_input(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype, order='F')
b = xp.array(a, copy=False, order=order)
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_f_contiguous_output(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, copy=False, order='F')
assert b.flags.f_contiguous
return b
@testing.multi_gpu(2)
def test_array_multi_device(self):
with cuda.Device(0):
x = testing.shaped_arange((2, 3, 4), cupy, dtype='f')
with cuda.Device(1):
y = cupy.array(x)
assert isinstance(y, cupy.ndarray)
assert x is not y # Do copy
assert int(x.device) == 0
assert int(y.device) == 1
testing.assert_array_equal(x, y)
@testing.multi_gpu(2)
def test_array_multi_device_zero_size(self):
with cuda.Device(0):
x = testing.shaped_arange((0,), cupy, dtype='f')
with cuda.Device(1):
y = cupy.array(x)
assert isinstance(y, cupy.ndarray)
assert x is not y # Do copy
assert x.device.id == 0
assert y.device.id == 1
testing.assert_array_equal(x, y)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_no_copy_ndmin(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.array(a, copy=False, ndmin=5)
assert a.shape == (2, 3, 4)
a.fill(0)
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_big_endian(self, xp, dtype):
dtype = numpy.dtype(dtype).newbyteorder('>')
a = testing.shaped_arange((2, 3, 4), numpy, dtype)
b = xp.array(a)
# Make a computation here as just moving big-endian data back and forth
# happens to work before the change in #5828
return b + b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_array_from_list_of_numpy_big_endian(self, xp, dtype):
dtype = numpy.dtype(dtype).newbyteorder('>')
a = [testing.shaped_arange((3, 4), numpy, dtype) for i in range(2)]
b = xp.array(a)
# Make a computation here as just moving big-endian data back and forth
# happens to work before the change in #5828
return b + b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.asarray(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_is_not_copied(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asarray(a)
a.fill(0)
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_with_order(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asarray(a, order=order)
if order in ['F', 'f']:
assert b.flags.f_contiguous
else:
assert b.flags.c_contiguous
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_preserves_numpy_array_order(self, xp, dtype, order):
a_numpy = testing.shaped_arange((2, 3, 4), numpy, dtype, order)
b = xp.asarray(a_numpy)
assert b.flags.f_contiguous == a_numpy.flags.f_contiguous
assert b.flags.c_contiguous == a_numpy.flags.c_contiguous
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asanyarray_with_order(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asanyarray(a, order=order)
if order in ['F', 'f']:
assert b.flags.f_contiguous
else:
assert b.flags.c_contiguous
return b
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asanyarray_from_big_endian(self, xp, dtype):
dtype = numpy.dtype(dtype).newbyteorder('>')
a = testing.shaped_arange((2, 3, 4), numpy, dtype)
b = xp.asanyarray(a)
# Make a computation here as just moving big-endian data back and forth
# happens to work before the change in #5828
return b + b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_from_numpy(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), numpy, dtype)
b = xp.asarray(a, order=order)
if order in ['F', 'f']:
assert b.flags.f_contiguous
else:
assert b.flags.c_contiguous
return b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_with_order_copy_behavior(self, xp, dtype, order):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.asarray(a, order=order)
a.fill(0)
return b
def test_ascontiguousarray_on_noncontiguous_array(self):
a = testing.shaped_arange((2, 3, 4))
b = a.transpose(2, 0, 1)
c = cupy.ascontiguousarray(b)
assert c.flags.c_contiguous
testing.assert_array_equal(b, c)
def test_ascontiguousarray_on_contiguous_array(self):
a = testing.shaped_arange((2, 3, 4))
b = cupy.ascontiguousarray(a)
assert a is b
@testing.numpy_cupy_array_equal()
def test_asarray_cuda_array_zero_dim(self, xp):
a = xp.ones(())
return xp.ascontiguousarray(a)
@testing.numpy_cupy_array_equal()
def test_asarray_cuda_array_zero_dim_dtype(self, xp):
a = xp.ones((), dtype=numpy.float64)
return xp.ascontiguousarray(a, dtype=numpy.int64)
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_asarray_from_big_endian(self, xp, dtype):
dtype = numpy.dtype(dtype).newbyteorder('>')
a = testing.shaped_arange((2, 3, 4), numpy, dtype)
b = xp.asarray(a)
# Make a computation here as just moving big-endian data back and forth
# happens to work before the change in #5828
return b + b
@testing.for_CF_orders()
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_copy(self, xp, dtype, order):
a = xp.zeros((2, 3, 4), dtype=dtype)
b = xp.copy(a, order=order)
a[1] = 1
return b
@testing.multi_gpu(2)
@testing.for_CF_orders()
@testing.for_all_dtypes()
def test_copy_multigpu(self, dtype, order):
with cuda.Device(0):
src = cupy.random.uniform(-1, 1, (2, 3)).astype(dtype)
with cuda.Device(1):
dst = cupy.copy(src, order)
testing.assert_allclose(src, dst, rtol=0, atol=0)
@testing.for_CF_orders()
@testing.numpy_cupy_equal()
def test_copy_order(self, xp, order):
a = xp.zeros((2, 3, 4), order=order)
b = xp.copy(a)
return (b.flags.c_contiguous, b.flags.f_contiguous)
@testing.numpy_cupy_array_equal()
def test_asfortranarray_cuda_array_zero_dim(self, xp):
a = xp.ones(())
return xp.asfortranarray(a)
@testing.for_all_dtypes_combination(['dtype_a', 'dtype_b'],
no_complex=True)
@testing.numpy_cupy_array_equal()
def test_asfortranarray_cuda_array_zero_dim_dtype(
self, xp, dtype_a, dtype_b):
a = xp.ones((), dtype=dtype_a)
return xp.asfortranarray(a, dtype=dtype_b)
@testing.numpy_cupy_array_equal()
def test_fromfile(self, xp):
with tempfile.TemporaryFile() as fh:
fh.write(b"\x00\x01\x02\x03\x04")
fh.flush()
fh.seek(0)
return xp.fromfile(fh, dtype="u1")
@testing.numpy_cupy_array_equal()
def test_fromfile_big_endian(self, xp):
with tempfile.TemporaryFile() as fh:
fh.write(b"\x00\x00\x00\x01")
fh.flush()
fh.seek(0)
a = xp.fromfile(fh, dtype='>u4')
# Make a computation here as just moving big-endian data back and
# forth happens to work before the change in #5828
return a + a
max_cuda_array_interface_version = 3
@testing.parameterize(*testing.product({
'ver': tuple(range(max_cuda_array_interface_version+1)),
'strides': (False, None, True),
}))
@pytest.mark.skipif(
cupy.cuda.runtime.is_hip, reason='HIP does not support this')
class TestCudaArrayInterface(unittest.TestCase):
@testing.for_all_dtypes()
def test_base(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
b = cupy.asarray(
DummyObjectWithCudaArrayInterface(a, self.ver, self.strides))
testing.assert_array_equal(a, b)
@testing.for_all_dtypes()
def test_not_copied(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
b = cupy.asarray(
DummyObjectWithCudaArrayInterface(a, self.ver, self.strides))
a.fill(0)
testing.assert_array_equal(a, b)
@testing.for_all_dtypes()
def test_order(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
b = cupy.asarray(
DummyObjectWithCudaArrayInterface(a, self.ver, self.strides),
order='F')
assert b.flags.f_contiguous
testing.assert_array_equal(a, b)
@testing.for_all_dtypes()
def test_with_strides(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype).T
b = cupy.asarray(
DummyObjectWithCudaArrayInterface(a, self.ver, self.strides))
assert a.strides == b.strides
assert a.nbytes == b.data.mem.size
@testing.for_all_dtypes()
def test_with_zero_size_array(self, dtype):
a = testing.shaped_arange((0,), cupy, dtype)
b = cupy.asarray(
DummyObjectWithCudaArrayInterface(a, self.ver, self.strides))
assert a.strides == b.strides
assert a.nbytes == b.data.mem.size
assert a.data.ptr == 0
assert a.size == 0
@testing.for_all_dtypes()
def test_asnumpy(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
b = DummyObjectWithCudaArrayInterface(a, self.ver, self.strides)
a_cpu = cupy.asnumpy(a)
b_cpu = cupy.asnumpy(b)
testing.assert_array_equal(a_cpu, b_cpu)
def test_big_endian(self):
a = cupy.array([0x1, 0x0, 0x0, 0x0], dtype=numpy.int8)
dtype = numpy.dtype('>i4')
shape = 1,
strides = 4,
data = a.data.ptr
b = DummyObjectWithCudaArrayInterface(
(shape, strides, dtype.str, dtype.descr, data),
self.ver, self.strides)
with pytest.raises(ValueError):
cupy.asarray(b)
@testing.parameterize(*testing.product({
'ver': tuple(range(1, max_cuda_array_interface_version+1)),
'strides': (False, None, True),
}))
@pytest.mark.skipif(
cupy.cuda.runtime.is_hip, reason='HIP does not support this')
class TestCudaArrayInterfaceMaskedArray(unittest.TestCase):
# TODO(leofang): update this test when masked array is supported
@testing.for_all_dtypes()
def test_masked_array(self, dtype):
a = testing.shaped_arange((2, 3, 4), cupy, dtype)
mask = testing.shaped_arange((2, 3, 4), cupy, dtype)
a = DummyObjectWithCudaArrayInterface(a, self.ver, self.strides, mask)
with pytest.raises(ValueError) as ex:
b = cupy.asarray(a) # noqa
assert 'does not support' in str(ex.value)
# marked slow as either numpy or cupy could go OOM in this test
@testing.slow
@pytest.mark.skipif(
cupy.cuda.runtime.is_hip, reason='HIP does not support this')
class TestCudaArrayInterfaceBigArray(unittest.TestCase):
def test_with_over_size_array(self):
# real example from #3009
size = 5 * 10**8
a = testing.shaped_random((size,), cupy, cupy.float64)
b = cupy.asarray(DummyObjectWithCudaArrayInterface(a, 2, None))
testing.assert_array_equal(a, b)
@pytest.mark.skipif(
cupy.cuda.runtime.is_hip, reason='HIP does not support this')
class DummyObjectWithCudaArrayInterface(object):
def __init__(self, a, ver, include_strides=False, mask=None, stream=None):
assert ver in tuple(range(max_cuda_array_interface_version+1))
self.a = None
if isinstance(a, cupy.ndarray):
self.a = a
else:
self.shape, self.strides, self.typestr, self.descr, self.data = a
self.ver = ver
self.include_strides = include_strides
self.mask = mask
self.stream = stream
@property
def __cuda_array_interface__(self):
if self.a is not None:
desc = {
'shape': self.a.shape,
'typestr': self.a.dtype.str,
'descr': self.a.dtype.descr,
'data': (self.a.data.ptr, False),
'version': self.ver,
}
if self.a.flags.c_contiguous:
if self.include_strides is True:
desc['strides'] = self.a.strides
elif self.include_strides is None:
desc['strides'] = None
else: # self.include_strides is False
pass
else: # F contiguous or neither
desc['strides'] = self.a.strides
else:
desc = {
'shape': self.shape,
'typestr': self.typestr,
'descr': self.descr,
'data': (self.data, False),
'version': self.ver,
}
if self.include_strides is True:
desc['strides'] = self.strides
elif self.include_strides is None:
desc['strides'] = None
else: # self.include_strides is False
pass
if self.mask is not None:
desc['mask'] = self.mask
# The stream field is kept here for compliance. However, since the
# synchronization is done via calling a cpdef function, which cannot
# be mock-tested.
if self.stream is not None:
if self.stream is cuda.Stream.null:
desc['stream'] = cuda.runtime.streamLegacy
elif (not cuda.runtime.is_hip) and self.stream is cuda.Stream.ptds:
desc['stream'] = cuda.runtime.streamPerThread
else:
desc['stream'] = self.stream.ptr
return desc
@testing.parameterize(
*testing.product({
'ndmin': [0, 1, 2, 3],
'copy': [True, False],
'xp': [numpy, cupy]
})
)
class TestArrayPreservationOfShape(unittest.TestCase):
@testing.for_all_dtypes()
def test_cupy_array(self, dtype):
shape = 2, 3
a = testing.shaped_arange(shape, self.xp, dtype)
cupy.array(a, copy=self.copy, ndmin=self.ndmin)
# Check if cupy.ndarray does not alter
# the shape of the original array.
assert a.shape == shape
@testing.parameterize(
*testing.product({
'ndmin': [0, 1, 2, 3],
'copy': [True, False],
'xp': [numpy, cupy]
})
)
class TestArrayCopy(unittest.TestCase):
@testing.for_all_dtypes()
def test_cupy_array(self, dtype):
a = testing.shaped_arange((2, 3), self.xp, dtype)
actual = cupy.array(a, copy=self.copy, ndmin=self.ndmin)
should_copy = (self.xp is numpy) or self.copy
# TODO(Kenta Oono): Better determination of copy.
is_copied = not ((actual is a) or (actual.base is a) or
(actual.base is a.base and a.base is not None))
assert should_copy == is_copied
class TestArrayInvalidObject(unittest.TestCase):
def test_invalid_type(self):
a = numpy.array([1, 2, 3], dtype=object)
with self.assertRaises(ValueError):
cupy.array(a)
| 37.953103
| 79
| 0.619603
|
c23a5bcb3ebed2dcfabb49a4f25e69837a6efeaa
| 17,771
|
py
|
Python
|
libraries/gui/tab_quiver.py
|
loslab/hware
|
85771404a9a75982e3196bfe2acc26ecb30350e8
|
[
"BSD-3-Clause"
] | 3
|
2019-01-23T13:11:51.000Z
|
2022-03-16T22:44:52.000Z
|
libraries/gui/tab_quiver.py
|
loslab/hware
|
85771404a9a75982e3196bfe2acc26ecb30350e8
|
[
"BSD-3-Clause"
] | 1
|
2022-03-06T15:16:40.000Z
|
2022-03-11T20:27:10.000Z
|
libraries/gui/tab_quiver.py
|
loslab/hware
|
85771404a9a75982e3196bfe2acc26ecb30350e8
|
[
"BSD-3-Clause"
] | 6
|
2019-01-23T13:12:00.000Z
|
2022-03-19T13:15:45.000Z
|
import sys
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import matplotlib.pyplot as plt
from PyQt5.QtWidgets import (QWidget, QGridLayout,QPushButton, QApplication)
from PyQt5 import QtCore
from PyQt5.QtWidgets import (QLabel, QLineEdit, QGridLayout, QComboBox,
QTextEdit,QSizePolicy, QPushButton, QProgressBar,QSlider, QWidget, QSpinBox, QCheckBox)
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QFont
from libraries import helpfunctions
class TabQuiver(QWidget):
"""
display + save motion as heatmap and quiver plot
"""
def __init__(self, parent):
super(TabQuiver, self).__init__(parent)
self.initUI()
self.parent=parent
def initUI(self):
# define + place gui elements
self.info = QTextEdit()
self.info.setText('In this tab, motion is vizualized by heatmaps and quiverplots. Use the slider to look at different frames. You can save individual frames or the whole video.')
self.info.setReadOnly(True)
self.info.setMaximumHeight(50)
self.info.setFixedWidth(700)
self.info.setStyleSheet("background-color: LightSkyBlue")
"""
#create a label for choosing the ROI
label_advanced_choose_ROI = QLabel('Choose the ROI to be displayed: ')
label_advanced_choose_ROI.setFont(QFont("Times",weight=QFont.Bold))
#create a drop-down menu for choosing the ROI to be displayed
self.advanced_combobox = QComboBox()
self.advanced_combobox.addItem('Full image')
#self.advanced_combobox.activated[str].connect(self.on_chooseROI)
self.advanced_combobox.currentIndexChanged[int].connect(self.on_chooseROI)
"""
self.label_heatmaps = QLabel('Heatmaps')
self.label_heatmaps.setFont(QFont("Times",weight=QFont.Bold))
self.label_quivers = QLabel('Quivers')
self.label_quivers.setFont(QFont("Times",weight=QFont.Bold))
self.btn_heatmap_vid = QPushButton('Export Heatmap Video')
self.btn_heatmap_vid.clicked.connect(self.on_saveHeatmapvideo)
#slider to switch between heatmaps
self.slider_heatmaps = QSlider(Qt.Horizontal)
self.slider_heatmaps.setMinimum(0)
self.slider_heatmaps.setMaximum(100)
self.slider_heatmaps.setValue(0)
#self.slider_heatmaps.setTickPosition(QSlider.TicksBelow)
#self.slider_heatmaps.setTickInterval(5)
self.slider_heatmaps.setFixedWidth(500)
self.slider_heatmaps.valueChanged.connect(self.slider_heatmaps_valueChanged)
self.btn_heatmap_save = QPushButton('Save this frame')
self.btn_heatmap_save.clicked.connect(self.on_saveHeatmap)
self.btn_quiver_save = QPushButton('Save this frame')
self.btn_quiver_save.clicked.connect(self.on_saveQuiver)
self.slider_quiver = QSlider(Qt.Horizontal)
self.slider_quiver.setMinimum(0)
self.slider_quiver.setMaximum(100)
self.slider_quiver.setValue(0)
self.slider_quiver.setFixedWidth(500)
#self.slider_quiver.setTickPosition(QSlider.TicksBelow)
self.slider_quiver.valueChanged.connect(self.slider_quiver_valueChanged)
#self.slider_quiver.setTickPosition(QSlider.TicksBelow)
self.slider_quiver.setTickInterval(5)
#display the chosen heatmap
self.label_heatmap_result = QLabel('Heatmap result:')
self.fig_heatmaps, self.ax_heatmaps = plt.subplots(1,1, figsize = (16,12))
self.fig_heatmaps.subplots_adjust(bottom=0, top=1, left=0, right=1)
self.canvas_heatmaps = FigureCanvas(self.fig_heatmaps)
#button for changing the quiver export settings
self.btn_quiver_settings = QPushButton('Change quiver export settings')
#self.btn_quiver_settings.clicked.connect(self.on_change_quiverSettings)
#Button for starting the creation of quiver plots and quiver video
self.btn_quivers_video = QPushButton('Export quiver video')
self.btn_quivers_video.clicked.connect(self.on_saveQuivervideo)
#display the chosen quiver plot
self.label_quiver_result = QLabel('Quiver result: ')
# display figures for quivers in Canvas
self.fig_quivers, self.ax_quivers = plt.subplots(1,1, figsize = (16,12))
self.fig_quivers.subplots_adjust(bottom=0, top=1, left=0, right=1)
self.canvas_quivers = FigureCanvas(self.fig_quivers)
self.canvas_heatmaps.setFixedSize(500,500)
self.canvas_quivers.setFixedSize(500,500)
"""
#succed-button
self.button_succeed_heatmaps = QPushButton('Heatmap-video creation was successful')
self.button_succeed_heatmaps.setStyleSheet("background-color: IndianRed")
self.button_succeed_quivers = QPushButton('Quiver-video creation was successful')
self.button_succeed_quivers.setStyleSheet("background-color: IndianRed")
"""
#progressbar for heatmaps
self.progressbar_heatmaps = QProgressBar(self)
self.progressbar_heatmaps.setValue(0)
self.progressbar_heatmaps.setFixedWidth(300)
#progressbar for quivers
self.progressbar_quivers = QProgressBar(self)
self.progressbar_quivers.setValue(0)
self.progressbar_quivers.setFixedWidth(300)
#define layout
colHeatmap = 0
colQuiver = 2
self.grid_overall = QGridLayout()
self.grid_overall.addWidget(self.info, 0,0,1,4)
self.grid_overall.addWidget(self.label_heatmaps, 1, colHeatmap,1,2)
self.grid_overall.addWidget(self.label_quivers, 1, colQuiver,1,2)
self.grid_overall.addWidget(self.canvas_heatmaps, 2, colHeatmap,1,2)
self.grid_overall.addWidget(self.canvas_quivers, 2, colQuiver,1,2)
self.grid_overall.addWidget(self.label_heatmap_result, 3, colHeatmap,1,2)
self.grid_overall.addWidget(self.label_quiver_result, 3, colQuiver,1,2)
self.grid_overall.addWidget(self.slider_heatmaps, 4, colHeatmap,1,2)
self.grid_overall.addWidget(self.slider_quiver, 4, colQuiver,1,2)
self.grid_overall.addWidget(self.btn_heatmap_vid, 5, colHeatmap)
self.grid_overall.addWidget(self.btn_quivers_video, 5, colQuiver)
self.grid_overall.addWidget(self.progressbar_heatmaps, 5, colHeatmap+1)
self.grid_overall.addWidget(self.progressbar_quivers, 5, colQuiver+1)
#self.grid_overall.addWidget(self.btn_quiver_settings, 2, colQuiver)
self.grid_overall.addWidget(self.btn_heatmap_save, 6, colHeatmap)
self.grid_overall.addWidget(self.btn_quiver_save, 6, colQuiver)
self.grid_overall.setSpacing(15)
self.grid_overall.setAlignment(Qt.AlignTop|Qt.AlignLeft)
self.setLayout(self.grid_overall)
for label in self.findChildren(QLabel):
label.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
for LineEdit in self.findChildren(QLineEdit):
LineEdit.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
for Slider in self.findChildren(QSlider):
Slider.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
for SpinBox in self.findChildren(QSpinBox):
SpinBox.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
for CheckBox in self.findChildren(QCheckBox):
CheckBox.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
for progbar in self.findChildren(QProgressBar):
progbar.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
#for btn in self.findChildren(QPushButton):
# btn.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
def init_ohw(self):
"""
set values from current_ohw
enable sliders/ plot if corresponding data is present
"""
self.current_ohw = self.parent.current_ohw #check if this works... looks good
self.clear_heatmaps()
self.clear_quivers()
# init heatmap part
if self.parent.current_ohw.analysis_meta["motion_calculated"]:
self.btn_heatmap_vid.setEnabled(True)
self.btn_heatmap_save.setEnabled(True)
self.slider_heatmaps.setMaximum(self.current_ohw.absMotions.shape[0]-1)
self.slider_heatmaps.setValue(0)
self.slider_heatmaps.setEnabled(True)
self.init_heatmaps()
else:
self.btn_heatmap_vid.setEnabled(False)
self.btn_heatmap_save.setEnabled(False)
self.slider_heatmaps.setEnabled(False)
self.placeholder_heatmaps()
# init quiver part
if self.parent.current_ohw.analysis_meta["has_MVs"] and self.parent.current_ohw.video_loaded:
self.btn_quiver_save.setEnabled(True)
self.btn_quivers_video.setEnabled(True)
self.slider_quiver.setMaximum(self.current_ohw.mean_absMotions.shape[0]-1)# or introduce new variable which counts the amount of motion timepoints
self.slider_quiver.setValue(0)
self.slider_quiver.setEnabled(True)
self.init_quivers()
else:
self.btn_quiver_save.setEnabled(False)
self.btn_quivers_video.setEnabled(False)
self.slider_quiver.setEnabled(False)
self.placeholder_quivers()
def init_heatmaps(self):
scale_max = helpfunctions.get_scale_maxMotion2(self.current_ohw.absMotions) #decide on which scale to use
self.imshow_heatmaps = self.ax_heatmaps.imshow(self.current_ohw.absMotions[0],
vmin = 0, vmax = scale_max, cmap = 'jet', interpolation = 'bilinear')
self.canvas_heatmaps.draw()
"""
# don't add title in gui yet/ adjust size...
cbar_heatmaps = self.fig_heatmaps.colorbar(self.imshow_heatmaps)
cbar_heatmaps.ax.tick_params(labelsize=20)
for l in cbar_heatmaps.ax.yaxis.get_ticklabels():
l.set_weight("bold")
self.ax_heatmaps.set_title('Motion [µm/s]', fontsize = 16, fontweight = 'bold')
"""
def clear_heatmaps(self):
self.ax_heatmaps.clear()
self.ax_heatmaps.axis('off')
self.canvas_heatmaps.draw()
def placeholder_heatmaps(self):
self.ax_heatmaps.text(0.5, 0.5,'no motion calculated/ loaded',
size=16, ha='center', va='center', backgroundcolor='indianred', color='w')
self.canvas_heatmaps.draw()
def init_quivers(self):
blockwidth = self.current_ohw.analysis_meta["MV_parameters"]["blockwidth"]
microns_per_px = self.current_ohw.videometa["microns_per_px"]
scalingfactor = self.current_ohw.analysis_meta["scalingfactor"]
scale_max = helpfunctions.get_scale_maxMotion2(self.current_ohw.absMotions)
skipquivers = int(self.parent.config["DEFAULT QUIVER SETTINGS"]['quiver_density']) # store in ohw object!
distance_between_arrows = blockwidth * skipquivers
arrowscale = 1 / (distance_between_arrows / scale_max)
#self.MotionCoordinatesX, self.MotionCoordinatesY = np.meshgrid(np.arange(blockwidth/2, self.current_ohw.scaledImageStack.shape[2]-blockwidth/2, blockwidth)+1, np.arange(blockwidth/2, self.current_ohw.scaledImageStack.shape[1]-blockwidth/2+1, blockwidth)) #changed arange range, double check!
self.qslice=(slice(None,None,skipquivers),slice(None,None,skipquivers))
qslice = self.qslice
self.imshow_quivers = self.ax_quivers.imshow(
self.current_ohw.analysisImageStack[0], cmap = "gray",
vmin = self.current_ohw.videometa["Blackval"], vmax = self.current_ohw.videometa["Whiteval"])
self.quiver_quivers = self.ax_quivers.quiver(
self.current_ohw.MotionCoordinatesX[qslice],
self.current_ohw.MotionCoordinatesY[qslice],
self.current_ohw.QuiverMotionX[0][qslice],
self.current_ohw.QuiverMotionY[0][qslice],
pivot='mid', color='r', units ="xy", scale_units = "xy", angles = "xy",
scale = arrowscale, width = 3, headwidth = 2, headlength = 3) #adjust scale to max. movement #width = blockwidth / 4?
self.canvas_quivers.draw()
def clear_quivers(self):
self.ax_quivers.clear()
self.ax_quivers.axis('off')
self.canvas_quivers.draw()
def placeholder_quivers(self):
self.ax_quivers.text(0.5, 0.5,'no motion + video calculated/ loaded',
size=16, ha='center', va='center', backgroundcolor='indianred', color='w')
self.canvas_quivers.draw()
def slider_quiver_valueChanged(self):
frame = self.slider_quiver.value()
time = round(frame / self.current_ohw.videometa["fps"], 3)
self.updateQuiver(frame)
self.label_quiver_result.setText('Quiverplot of frame ' + str(frame) + ' at t = ' + str(time) + ' s')
def slider_heatmaps_valueChanged(self):
frame = self.slider_heatmaps.value()
time = round(frame / self.current_ohw.videometa["fps"], 3)
self.updateHeatMap(frame)
self.label_heatmap_result.setText('Heatmap of frame ' + str(frame) + ' at t = ' + str(time) + ' s')
def updateQuiver(self, frame):
#callback when slider is moved
self.imshow_quivers.set_data(self.current_ohw.analysisImageStack[frame]) #introduce a displayImageStack here?
self.quiver_quivers.set_UVC(self.current_ohw.QuiverMotionX[frame][self.qslice], self.current_ohw.QuiverMotionY[frame][self.qslice])
self.canvas_quivers.draw()
def updateHeatMap(self, frame):
#callback when slider is moved
self.imshow_heatmaps.set_data(self.current_ohw.absMotions[frame])
self.canvas_heatmaps.draw()
def updateQuiverBrightness(self,vmin,vmax):
self.imshow_quivers.set_clim(vmin=vmin, vmax=vmax)
self.canvas_quivers.draw()
def on_saveQuivervideo(self):
"""
saves the quivervideo
"""
#reset the color of success-button
#self.button_succeed_quivers.setStyleSheet("background-color: IndianRed")
#self.progressbar_quivers.setValue(0)
"""
if self.quiver_settings['one_view']:
#export one view quivers
save_quiver1_thread = self.current_ohw.save_quiver_thread(singleframe = False, skipquivers = int(self.quiver_settings['quiver_density']), t_cut=float(self.quiver_settings['video_length']))
save_quiver1_thread.start()
self.progressbar_quivers.setRange(0,0)
save_quiver1_thread.finished.connect(self.finish_saveQuivervideo)
if self.quiver_settings['three_views']:
#export three views quivers
save_quiver3_thread = self.current_ohw.save_quivervid3_thread(skipquivers = int(self.quiver_settings['quiver_density']), t_cut=float(self.quiver_settings['video_length']))
save_quiver3_thread.start()
self.progressbar_quivers.setRange(0,0)
save_quiver3_thread.finished.connect(self.finish_saveQuivervideo)
"""
save_quiver_thread = self.current_ohw.save_quiver3_thread(singleframe = False, skipquivers = 4)
save_quiver_thread.start()
self.progressbar_quivers.setRange(0,0)
save_quiver_thread.finished.connect(self.finish_saveQuivervideo)
def finish_saveQuivervideo(self):
self.progressbar_quivers.setRange(0,1)
self.progressbar_quivers.setValue(1)
helpfunctions.msgbox(self, 'Quiver was saved successfully')
#self.button_succeed_quivers.setStyleSheet("background-color: YellowGreen")
def on_saveQuiver(self):
singleframe = int(self.slider_quiver.value())
"""
#save the different views if chosen by the user
if self.quiver_settings['one_view']:
self.current_ohw.save_quiver(singleframe = singleframe, skipquivers = int(self.quiver_settings['quiver_density']))
if self.quiver_settings['three_views']:
self.current_ohw.save_quivervid3(singleframe = singleframe, skipquivers = int(self.quiver_settings['quiver_density']))
"""
self.current_ohw.save_quiver3(singleframe = singleframe)
helpfunctions.msgbox(self, 'Quiver of frame ' + str(singleframe) + ' was saved successfully')
def on_saveHeatmapvideo(self):
"""
saves the heatmpavideo
"""
self.progressbar_heatmaps.setValue(0)
save_heatmap_thread = self.current_ohw.save_heatmap_thread(singleframe = False)
save_heatmap_thread.start()
self.progressbar_heatmaps.setRange(0,0)
save_heatmap_thread.finished.connect(self.finish_saveHeatmapvideo)
def finish_saveHeatmapvideo(self):
self.progressbar_heatmaps.setRange(0,1)
self.progressbar_heatmaps.setValue(1)
helpfunctions.msgbox(self, 'Heatmap video was saved successfully')
def on_saveHeatmap(self):
"""
saves the selected frame (singleframe = framenumber)
"""
singleframe=self.slider_heatmaps.value()
self.current_ohw.save_heatmap(singleframe = singleframe)
helpfunctions.msgbox(self, 'Heatmap of frame ' + str(singleframe) + ' was saved successfully')
| 48.02973
| 301
| 0.670643
|
fddae63d2f96384c2c2c0d53a64ecd4ad8c9a4c8
| 2,025
|
py
|
Python
|
ludwig/data/dataframe/modin.py
|
dantreiman/ludwig
|
daeffd21f9eef524afb2037763abd07a93228c2a
|
[
"Apache-2.0"
] | 7,739
|
2019-02-11T14:06:31.000Z
|
2020-12-16T18:30:29.000Z
|
ludwig/data/dataframe/modin.py
|
dantreiman/ludwig
|
daeffd21f9eef524afb2037763abd07a93228c2a
|
[
"Apache-2.0"
] | 769
|
2019-02-11T16:13:20.000Z
|
2020-12-16T17:26:11.000Z
|
ludwig/data/dataframe/modin.py
|
dantreiman/ludwig
|
daeffd21f9eef524afb2037763abd07a93228c2a
|
[
"Apache-2.0"
] | 975
|
2019-02-11T15:55:54.000Z
|
2020-12-14T21:45:39.000Z
|
#! /usr/bin/env python
# Copyright (c) 2022 Predibase, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import modin.pandas as pd
import numpy as np
from ludwig.data.dataframe.base import DataFrameEngine
class ModinEngine(DataFrameEngine):
def __init__(self, **kwargs):
super().__init__()
def df_like(self, df, proc_cols):
# df argument unused for pandas, which can instantiate df directly
return pd.DataFrame(proc_cols)
def parallelize(self, data):
return data
def persist(self, data):
return data
def compute(self, data):
return data
def from_pandas(self, df):
return df
def map_objects(self, series, map_fn, meta=None):
return series.map(map_fn)
def map_partitions(self, series, map_fn, meta=None):
return map_fn(series)
def apply_objects(self, df, apply_fn, meta=None):
return df.apply(apply_fn, axis=1)
def reduce_objects(self, series, reduce_fn):
return reduce_fn(series)
def to_parquet(self, df, path):
df.to_parquet(path, engine="pyarrow")
def to_ray_dataset(self, df):
from ray.data import from_modin
return from_modin(df)
@property
def array_lib(self):
return np
@property
def df_lib(self):
return pd
@property
def partitioned(self):
return False
def set_parallelism(self, parallelism):
pass
| 26.298701
| 80
| 0.652346
|
f08ca06a07e69c6e68f6891baa43317a399765d3
| 465
|
py
|
Python
|
subscription/migrations/0012_auto_20200603_1405.py
|
BuildForSDG/dijiElimu
|
514a90d6f0480516aa209891224cf9dfe6be1607
|
[
"MIT"
] | null | null | null |
subscription/migrations/0012_auto_20200603_1405.py
|
BuildForSDG/dijiElimu
|
514a90d6f0480516aa209891224cf9dfe6be1607
|
[
"MIT"
] | 20
|
2020-05-12T12:11:01.000Z
|
2022-03-12T00:33:12.000Z
|
subscription/migrations/0012_auto_20200603_1405.py
|
BuildForSDG/dijiElimu
|
514a90d6f0480516aa209891224cf9dfe6be1607
|
[
"MIT"
] | 4
|
2020-05-06T03:57:04.000Z
|
2020-05-20T06:30:28.000Z
|
# Generated by Django 3.0.6 on 2020-06-03 14:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('subscription', '0011_auto_20200603_1304'),
]
operations = [
migrations.RemoveField(
model_name='subscription',
name='subscribed_courses',
),
migrations.RemoveField(
model_name='subscription',
name='subscribers',
),
]
| 21.136364
| 52
| 0.591398
|
5c07d1c66d8bb22472b2bc26c85d0702f272fcd1
| 3,377
|
py
|
Python
|
app/main/views.py
|
candycrushpro/Pitches
|
9224e9ecc11ab673d8478b5bcd7cec3ed6116013
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
candycrushpro/Pitches
|
9224e9ecc11ab673d8478b5bcd7cec3ed6116013
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
candycrushpro/Pitches
|
9224e9ecc11ab673d8478b5bcd7cec3ed6116013
|
[
"MIT"
] | 2
|
2019-07-02T10:36:41.000Z
|
2019-09-21T13:53:49.000Z
|
from flask import render_template,request,redirect,url_for, abort
from . import main
from ..models import User, Pitch, Category, Vote, Comment
from flask_login import login_required, current_user
from .forms import UpdateProfile, PitchForm, CommentForm, CategoryForm
from .. import db, photos
#Views
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
category = Category.get_categories()
return render_template('index.html', category = category)
@main.route('/add/category', methods=['GET','POST'])
@login_required
def new_category():
'''
View new group route function that returns a page with a form to create a category
'''
form = CategoryForm()
if form.validate_on_submit():
name = form.name.data
new_category = Category(name=name)
new_category.save_category()
return redirect(url_for('.index'))
title = 'New category'
return render_template('new_category.html', category_form = form,title=title)
@main.route('/categories/<int:id>')
def category(id):
category_ = Category.query.get(id)
pitches = Pitch.query.filter_by(category=category_.id).all()
# pitches=Pitch.get_pitches(id)
# title = f'{category.name} page'
return render_template('category.html', pitches=pitches, category=category_)
#Route for adding a new pitch
@main.route('/categories/view_pitch/add/<int:id>', methods=['GET', 'POST'])
@login_required
def new_pitch(id):
'''
Function to check Pitches form and fetch data from the fields
'''
form = PitchForm()
category = Category.query.filter_by(id=id).first()
if category is None:
abort(404)
if form.validate_on_submit():
content = form.content.data
new_pitch= Pitch(content=content,category= category.id,user_id=current_user.id)
new_pitch.save_pitch()
return redirect(url_for('.category', id=category.id))
title = 'New Pitch'
return render_template('new_pitch.html', title = title, pitch_form = form, category = category)
#viewing a Pitch with its comments
@main.route('/categories/view_pitch/<int:id>', methods=['GET', 'POST'])
@login_required
def view_pitch(id):
'''
Function the returns a single pitch for comment to be added
'''
print(id)
pitches = Pitch.query.get(id)
# pitches = P 2itch.query.filter_by(id=id).all()
if pitches is None:
abort(404)
#
comment = Comments.get_comments(id)
return render_template('pitch.html', pitches=pitches, comment=comment, category_id=id)
@main.route('/user/<uname>/update/pic', methods = ['POST'])
@login_required
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname = user.username))
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
return render_template("profile/profile.html", user = user)
| 29.112069
| 99
| 0.66716
|
1259f27d2880e8183704da490a9cf5f53f82d1bf
| 1,521
|
py
|
Python
|
bifrost/registry.py
|
snek-shipyard/wagtai-bifrost
|
2a32467c922110aff5b79edaf84f38ee9f684369
|
[
"Unlicense",
"MIT"
] | null | null | null |
bifrost/registry.py
|
snek-shipyard/wagtai-bifrost
|
2a32467c922110aff5b79edaf84f38ee9f684369
|
[
"Unlicense",
"MIT"
] | 3
|
2021-01-17T17:58:01.000Z
|
2021-02-19T09:15:15.000Z
|
bifrost/registry.py
|
snek-shipyard/wagtail-bifrost
|
2a32467c922110aff5b79edaf84f38ee9f684369
|
[
"MIT",
"Unlicense"
] | 1
|
2021-04-27T12:46:09.000Z
|
2021-04-27T12:46:09.000Z
|
class RegistryItem(dict):
@property
def types(self) -> tuple:
return tuple(self.values())
class Registry:
apps = []
queries = []
mutations = []
subscriptions = []
pages = RegistryItem()
documents = RegistryItem()
images = RegistryItem()
snippets = RegistryItem()
streamfield_blocks = RegistryItem()
django_models = RegistryItem()
settings = RegistryItem()
forms = RegistryItem()
# The items in the registry that should be lazy loaded.
lazy_types = (
"pages",
"documents",
"images",
"snippets",
"django_models",
"settings",
)
# Internal use only, do not add to .models method
schema = []
@property
def class_models(self) -> dict:
models: dict = {}
models.update(self.pages)
models.update(self.documents)
models.update(self.images)
models.update(self.snippets)
models.update(self.django_models)
models.update(self.settings)
models.update(self.forms)
return models
@property
def models(self) -> dict:
models: dict = {}
models.update(self.pages)
models.update(self.documents)
models.update(self.images)
models.update(self.snippets)
models.update(self.streamfield_blocks)
models.update(self.django_models)
models.update(self.settings)
models.update(self.forms)
return models
# Singleton Registry
registry = Registry()
| 24.532258
| 59
| 0.608153
|
cdf329a03e5f44f791a544ec582d1c2894188c79
| 21,924
|
py
|
Python
|
tests/__init__.py
|
DarioLodeiros/doodba
|
5f2d5a0c475f6877b6bfffd850a3470ee9955e30
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
DarioLodeiros/doodba
|
5f2d5a0c475f6877b6bfffd850a3470ee9955e30
|
[
"Apache-2.0"
] | null | null | null |
tests/__init__.py
|
DarioLodeiros/doodba
|
5f2d5a0c475f6877b6bfffd850a3470ee9955e30
|
[
"Apache-2.0"
] | 1
|
2022-01-02T00:46:47.000Z
|
2022-01-02T00:46:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Run tests for this base image.
Each test must be a valid docker-compose.yaml file with a ``odoo`` service.
"""
import logging
import unittest
from itertools import product
from os import environ
from os.path import dirname, join
from subprocess import Popen
logging.basicConfig(level=logging.DEBUG)
DIR = dirname(__file__)
ODOO_PREFIX = ("odoo", "--stop-after-init", "--workers=0")
ODOO_VERSIONS = frozenset(
environ.get("DOCKER_TAG", "7.0 8.0 9.0 10.0 11.0 12.0 13.0").split()
)
PG_VERSIONS = frozenset(environ.get("PG_VERSIONS", "12").split())
SCAFFOLDINGS_DIR = join(DIR, "scaffoldings")
GEIOP_CREDENTIALS_PROVIDED = environ.get("GEOIP_LICENSE_KEY", False) and environ.get(
"GEOIP_ACCOUNT_ID", False
)
# This decorator skips tests that will fail until some branches and/or addons
# are migrated to the next release. It is used in situations where Doodba is
# preparing the pre-release for the next version of Odoo, which hasn't been
# released yet.
prerelease_skip = unittest.skipIf(
ODOO_VERSIONS == {"13.0"}, "Tests not supported in pre-release"
)
def matrix(
odoo=ODOO_VERSIONS, pg=PG_VERSIONS, odoo_skip=frozenset(), pg_skip=frozenset()
):
"""All possible combinations.
We compute the variable matrix here instead of in ``.travis.yml`` because
this generates faster builds, given the scripts found in ``hooks``
directory are already multi-version-build aware.
"""
return map(
dict,
product(
product(("ODOO_MINOR",), ODOO_VERSIONS & odoo - odoo_skip),
product(("DB_VERSION",), PG_VERSIONS & pg - pg_skip),
),
)
class ScaffoldingCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.compose_run = ("docker-compose", "run", "--rm", "odoo")
def popen(self, *args, **kwargs):
"""Shortcut to open a subprocess and ensure it works."""
logging.info("Subtest execution: %s", self._subtest)
self.assertFalse(Popen(*args, **kwargs).wait())
def compose_test(self, workdir, sub_env, *commands):
"""Execute commands in a docker-compose environment.
:param workdir:
Path where the docker compose commands will be executed. It should
contain a valid ``docker-compose.yaml`` file.
:param dict sub_env:
Specific environment variables that will be appended to current
ones to execute the ``docker-compose`` tests.
You can set in this dict a ``COMPOSE_FILE`` key to choose different
docker-compose files in the same directory.
:param tuple()... commands:
List of commands to be tested in the odoo container.
"""
full_env = dict(environ, **sub_env)
with self.subTest(PWD=workdir, **sub_env):
try:
self.popen(("docker-compose", "build"), cwd=workdir, env=full_env)
for command in commands:
with self.subTest(command=command):
self.popen(
self.compose_run + command, cwd=workdir, env=full_env
)
finally:
self.popen(("docker-compose", "down", "-v"), cwd=workdir, env=full_env)
def test_addons_filtered(self):
"""Test addons filtering with ``ONLY`` keyword in ``addons.yaml``."""
project_dir = join(SCAFFOLDINGS_DIR, "dotd")
for sub_env in matrix():
self.compose_test(
project_dir,
dict(sub_env, DBNAME="prod"),
("test", "-e", "auto/addons/web"),
("test", "-e", "auto/addons/private_addon"),
(
"bash",
"-xc",
'test "$(addons list -p)" == disabled_addon,private_addon',
),
("bash", "-xc", 'test "$(addons list -ip)" == private_addon'),
("bash", "-xc", "addons list -c | grep ,crm,"),
# absent_addon is missing and should fail
("bash", "-xc", "! addons list -px"),
# Test addon inclusion, exclusion, dependencies...
(
"bash",
"-xc",
'test "$(addons list -dw private_addon)" == base,dummy_addon,website',
),
(
"bash",
"-xc",
'test "$(addons list -dwprivate_addon -Wwebsite)" == base,dummy_addon',
),
(
"bash",
"-xc",
'test "$(addons list -dw private_addon -W dummy_addon)" == base,website',
),
("bash", "-xc", 'test "$(addons list -nd)" == base,iap',),
(
"bash",
"-xc",
'test "$(addons list --enterprise)" == make_odoo_rich',
),
)
self.compose_test(
project_dir,
dict(sub_env, DBNAME="limited_private"),
("test", "-e", "auto/addons/web"),
("test", "!", "-e", "auto/addons/private_addon"),
("bash", "-xc", 'test -z "$(addons list -p)"'),
(
"bash",
"-xc",
'[ "$(addons list -s. -pwfake1 -wfake2)" == fake1.fake2 ]',
),
("bash", "-xc", "! addons list -wrepeat -Wrepeat"),
("bash", "-xc", "addons list -c | grep ,crm,"),
)
self.compose_test(
project_dir,
dict(sub_env, DBNAME="limited_core"),
("test", "!", "-e", "auto/addons/web"),
("test", "!", "-e", "auto/addons/private_addon"),
("bash", "-xc", 'test -z "$(addons list -p)"'),
("bash", "-xc", 'test "$(addons list -c)" == crm,sale'),
)
# Skip Odoo versions that don't support __manifest__.py files
for sub_env in matrix(odoo_skip={"7.0", "8.0", "9.0"}):
self.compose_test(
project_dir,
dict(sub_env, DBNAME="prod"),
("bash", "-xc", 'test "$(addons list -ped)" == base,web,website'),
# ``dummy_addon`` and ``private_addon`` exist
("test", "-d", "auto/addons/dummy_addon"),
("test", "-h", "auto/addons/dummy_addon"),
("test", "-f", "auto/addons/dummy_addon/__init__.py"),
("test", "-e", "auto/addons/dummy_addon"),
# Addon from extra repo takes higher priority than core version
("realpath", "auto/addons/product"),
(
"bash",
"-xc",
'test "$(realpath auto/addons/product)" == '
"/opt/odoo/custom/src/other-doodba/odoo/src/private/product",
),
("bash", "-xc", 'test "$(addons list -e)" == dummy_addon,product'),
)
self.compose_test(
project_dir,
dict(sub_env, DBNAME="limited_private"),
("test", "-e", "auto/addons/dummy_addon"),
("bash", "-xc", 'test "$(addons list -e)" == dummy_addon,product'),
)
self.compose_test(
project_dir,
dict(sub_env, DBNAME="limited_core"),
("test", "-e", "auto/addons/dummy_addon"),
(
"bash",
"-xc",
'[ "$(addons list -s. -pwfake1 -wfake2)" == fake1.fake2 ]',
),
("bash", "-xc", 'test "$(addons list -e)" == dummy_addon,product'),
("bash", "-xc", 'test "$(addons list -c)" == crm,sale'),
("bash", "-xc", 'test "$(addons list -cWsale)" == crm'),
)
@prerelease_skip
def test_qa(self):
"""Test that QA tools are in place and work as expected."""
folder = join(SCAFFOLDINGS_DIR, "settings")
commands = (
("./custom/scripts/qa-insider-test",),
("/qa/node_modules/.bin/eslint", "--version"),
("/qa/venv/bin/flake8", "--version"),
("/qa/venv/bin/pylint", "--version"),
("/qa/venv/bin/python", "--version"),
("/qa/venv/bin/python", "-c", "import pylint_odoo"),
("test", "-d", "/qa/mqt"),
)
for sub_env in matrix():
self.compose_test(folder, sub_env, *commands)
@prerelease_skip
def test_settings(self):
"""Test settings are filled OK"""
folder = join(SCAFFOLDINGS_DIR, "settings")
commands = (
# Odoo should install
("--stop-after-init",),
# Odoo settings work
("./custom/scripts/test_settings.py",),
)
# Odoo 8.0 has no shell, and --load-language doesn't work fine in 9.0
for sub_env in matrix(odoo={"9.0"}):
self.compose_test(folder, sub_env, *commands)
# Extra tests for versions >= 10.0, that support --load-language fine
commands += (
# DB was created with the correct language
(
"bash",
"-xc",
"""test "$(psql -Atqc "SELECT code FROM res_lang
WHERE active = TRUE")" == es_ES""",
),
)
for sub_env in matrix(odoo_skip={"7.0", "8.0", "9.0"}):
self.compose_test(folder, sub_env, *commands)
def test_smallest(self):
"""Tests for the smallest possible environment."""
liberation = 'Liberation{0}-Regular.ttf: "Liberation {0}" "Regular"'
commands = (
# Must generate a configuration file
("test", "-f", "/opt/odoo/auto/odoo.conf"),
("test", "-d", "/opt/odoo/custom/src/private"),
("test", "-d", "/opt/odoo/custom/ssh"),
("addons", "list", "-cpix"),
("pg_activity", "--version"),
# Default fonts must be liberation
(
"bash",
"-xc",
"""test "$(fc-match monospace)" == '{}'""".format(
liberation.format("Mono")
),
),
(
"bash",
"-xc",
"""test "$(fc-match sans-serif)" == '{}'""".format(
liberation.format("Sans")
),
),
(
"bash",
"-xc",
"""test "$(fc-match serif)" == '{}'""".format(
liberation.format("Serif")
),
),
# Must be able to install base addon
ODOO_PREFIX + ("--init", "base"),
# Auto updater must work
("click-odoo-update",),
# Needed tools exist
("curl", "--version"),
("git", "--version"),
("pg_activity", "--version"),
("psql", "--version"),
("ssh", "-V"),
("python", "-c", "import plumbum"),
# We are able to dump
("pg_dump", "-f/var/lib/odoo/prod.sql", "prod"),
# Geoip should not be activated
("bash", "-xc", 'test "$(which geoipupdate)" != ""'),
("test", "!", "-e", "/usr/share/GeoIP/GeoLite2-City.mmdb"),
("bash", "-xc", "! geoipupdate"),
)
smallest_dir = join(SCAFFOLDINGS_DIR, "smallest")
for sub_env in matrix(odoo_skip={"7.0", "8.0"}):
self.compose_test(
smallest_dir, sub_env, *commands, ("python", "-c", "import watchdog")
)
for sub_env in matrix(odoo={"8.0"}):
self.compose_test(
smallest_dir,
sub_env,
# Odoo <= 8.0 does not autocreate the database
("createdb",),
*commands,
)
def test_addons_env(self):
"""Test environment variables in addons.yaml"""
# 7.0 is skiped because the module is absent in that branch
for sub_env in matrix(odoo_skip={"7.0"}):
self.compose_test(
join(SCAFFOLDINGS_DIR, "addons_env"),
sub_env,
# check module from custom repo pattern
("test", "-d", "custom/src/misc-addons"),
("test", "-d", "custom/src/misc-addons/web_debranding"),
("test", "-e", "auto/addons/web_debranding"),
# Migrations folder is only in OpenUpgrade
("test", "-e", "auto/addons/crm"),
("test", "-d", "auto/addons/crm/migrations"),
)
def test_dotd(self):
"""Test environment with common ``*.d`` directories."""
for sub_env in matrix():
self.compose_test(
join(SCAFFOLDINGS_DIR, "dotd"),
sub_env,
# ``custom/build.d`` was properly executed
("test", "-f", "/home/odoo/created-at-build"),
# ``custom/entrypoint.d`` was properly executed
("test", "-f", "/home/odoo/created-at-entrypoint"),
# ``custom/conf.d`` was properly concatenated
("grep", "test-conf", "auto/odoo.conf"),
# ``custom/dependencies`` were installed
("test", "!", "-e", "/usr/sbin/sshd"),
("test", "!", "-e", "/var/lib/apt/lists/lock"),
("busybox", "whoami"),
("bash", "-xc", "echo $NODE_PATH"),
("node", "-e", "require('test-npm-install')"),
("aloha_world",),
("python", "-xc", "import Crypto; print(Crypto.__version__)"),
("sh", "-xc", "rst2html.py --version | grep 'Docutils 0.14'"),
# ``requirements.txt`` from addon repos were processed
("python", "-c", "import cfssl"),
# Local executable binaries found in $PATH
("sh", "-xc", "pip install --user -q flake8 && which flake8"),
# Addon cleanup works correctly
("test", "!", "-e", "custom/src/private/dummy_addon"),
("test", "!", "-e", "custom/src/dummy_repo/dummy_link"),
("test", "-d", "custom/src/private/private_addon"),
("test", "-f", "custom/src/private/private_addon/__init__.py"),
("test", "-e", "auto/addons/private_addon"),
# ``odoo`` command works
("odoo", "--version"),
# Implicit ``odoo`` command also works
("--version",),
)
# TODO Remove decorator when OCB 13.0 is released and server-tools 13.0
# has a valid module to test
@prerelease_skip
def test_dependencies(self):
"""Test dependencies installation."""
dependencies_dir = join(SCAFFOLDINGS_DIR, "dependencies")
for sub_env in matrix(odoo_skip={"7.0"}):
self.compose_test(
dependencies_dir,
sub_env,
("test", "!", "-f", "custom/dependencies/apt.txt"),
("test", "!", "-f", "custom/dependencies/gem.txt"),
("test", "!", "-f", "custom/dependencies/npm.txt"),
("test", "!", "-f", "custom/dependencies/pip.txt"),
# It should have module_auto_update available
("test", "-d", "custom/src/server-tools/module_auto_update"),
# Patched Werkzeug version
(
"bash",
"-xc",
(
'test "$(python -c "import werkzeug; '
'print(werkzeug.__version__)")" == 0.14.1'
),
),
# apt_build.txt
("test", "-f", "custom/dependencies/apt_build.txt"),
("test", "!", "-e", "/usr/sbin/sshd"),
# apt-without-sequence.txt
("test", "-f", "custom/dependencies/apt-without-sequence.txt"),
("test", "!", "-e", "/bin/busybox"),
# 070-apt-bc.txt
("test", "-f", "custom/dependencies/070-apt-bc.txt"),
("test", "-e", "/usr/bin/bc"),
# 150-npm-aloha_world-install.txt
(
"test",
"-f",
("custom/dependencies/" "150-npm-aloha_world-install.txt"),
),
("node", "-e", "require('test-npm-install')"),
# 200-pip-without-ext
("test", "-f", "custom/dependencies/200-pip-without-ext"),
("python", "-c", "import Crypto; print(Crypto.__version__)"),
("sh", "-xc", "rst2html.py --version | grep 'Docutils 0.14'"),
# 270-gem.txt
("test", "-f", "custom/dependencies/270-gem.txt"),
("aloha_world",),
)
def test_modified_uids(self):
"""tests if we can build an image with a custom uid and gid of odoo"""
uids_dir = join(SCAFFOLDINGS_DIR, "uids_1001")
for sub_env in matrix():
self.compose_test(
uids_dir,
sub_env,
# verify that odoo user has the given ids
("bash", "-xc", 'test "$(id -u)" == "1001"'),
("bash", "-xc", 'test "$(id -g)" == "1002"'),
("bash", "-xc", 'test "$(id -u -n)" == "odoo"'),
# all those directories need to belong to odoo (user or group odoo)
(
"bash",
"-xc",
'test "$(stat -c \'%U:%G\' /var/lib/odoo)" == "odoo:odoo"',
),
(
"bash",
"-xc",
'test "$(stat -c \'%U:%G\' /opt/odoo/auto/addons)" == "root:odoo"',
),
(
"bash",
"-xc",
'test "$(stat -c \'%U:%G\' /opt/odoo/custom/src)" == "root:odoo"',
),
)
def test_uids_mac_os(self):
"""tests if we can build an image with a custom uid and gid of odoo"""
uids_dir = join(SCAFFOLDINGS_DIR, "uids_mac_os")
for sub_env in matrix():
self.compose_test(
uids_dir,
sub_env,
# verify that odoo user has the given ids
("bash", "-c", 'test "$(id -u)" == "501"'),
("bash", "-c", 'test "$(id -g)" == "20"'),
("bash", "-c", 'test "$(id -u -n)" == "odoo"'),
# all those directories need to belong to odoo (user or group odoo/dialout)
(
"bash",
"-c",
'test "$(stat -c \'%U:%g\' /var/lib/odoo)" == "odoo:20"',
),
(
"bash",
"-c",
'test "$(stat -c \'%U:%g\' /opt/odoo/auto/addons)" == "root:20"',
),
(
"bash",
"-c",
'test "$(stat -c \'%U:%g\' /opt/odoo/custom/src)" == "root:20"',
),
)
def test_default_uids(self):
uids_dir = join(SCAFFOLDINGS_DIR, "uids_default")
for sub_env in matrix():
self.compose_test(
uids_dir,
sub_env,
# verify that odoo user has the given ids
("bash", "-xc", 'test "$(id -u)" == "1000"'),
("bash", "-xc", 'test "$(id -g)" == "1000"'),
("bash", "-xc", 'test "$(id -u -n)" == "odoo"'),
# all those directories need to belong to odoo (user or group odoo)
(
"bash",
"-xc",
'test "$(stat -c \'%U:%G\' /var/lib/odoo)" == "odoo:odoo"',
),
(
"bash",
"-xc",
'test "$(stat -c \'%U:%G\' /opt/odoo/auto/addons)" == "root:odoo"',
),
(
"bash",
"-xc",
'test "$(stat -c \'%U:%G\' /opt/odoo/custom/src)" == "root:odoo"',
),
)
@unittest.skipIf(
not GEIOP_CREDENTIALS_PROVIDED, "GeoIP credentials missing in environment"
)
def test_geoip(self):
geoip_dir = join(SCAFFOLDINGS_DIR, "geoip")
for sub_env in matrix():
self.compose_test(
geoip_dir,
sub_env,
# verify that geoipupdate works after waiting for entrypoint to finish its update
(
"bash",
"-c",
"timeout 60s bash -c 'while (ls -l /proc/*/exe 2>&1 | grep geoipupdate); do sleep 1; done' &&"
" geoipupdate",
),
# verify that geoip database exists after entrypoint finished its update
# using ls and /proc because ps is missing in image for 13.0
(
"bash",
"-c",
"timeout 60s bash -c 'while (ls -l /proc/*/exe 2>&1 | grep geoipupdate); do sleep 1; done' &&"
" test -e /opt/odoo/auto/geoip/GeoLite2-City.mmdb",
),
# verify that geoip database is configured
(
"grep",
"-R",
"geoip_database = /opt/odoo/auto/geoip/GeoLite2-City.mmdb",
"/opt/odoo/auto/odoo.conf",
),
)
if __name__ == "__main__":
unittest.main()
| 41.522727
| 114
| 0.456623
|
45c9abb029acf9285f5483fb5099b72633903410
| 730
|
py
|
Python
|
test/aio/test_shutdown_event.py
|
jayvdb/i3ipc-python
|
dc2144418499eebbc1aa08f224fbcdfd76670d62
|
[
"BSD-3-Clause"
] | null | null | null |
test/aio/test_shutdown_event.py
|
jayvdb/i3ipc-python
|
dc2144418499eebbc1aa08f224fbcdfd76670d62
|
[
"BSD-3-Clause"
] | null | null | null |
test/aio/test_shutdown_event.py
|
jayvdb/i3ipc-python
|
dc2144418499eebbc1aa08f224fbcdfd76670d62
|
[
"BSD-3-Clause"
] | null | null | null |
from .ipctest import IpcTest
import pytest
import asyncio
class TestShutdownEvent(IpcTest):
events = []
def restart_func(self, i3):
asyncio.ensure_future(i3.command('restart'))
def on_shutdown(self, i3, e):
self.events.append(e)
if len(self.events) == 1:
i3._loop.call_later(0.1, self.restart_func, i3)
elif len(self.events) == 2:
i3.main_quit()
@pytest.mark.asyncio
async def test_shutdown_event_reconnect(self, i3):
i3._auto_reconnect = True
self.events = []
i3.on('shutdown::restart', self.on_shutdown)
i3._loop.call_later(0.1, self.restart_func, i3)
await i3.main()
assert len(self.events) == 2
| 25.172414
| 59
| 0.621918
|
77a0e594663bf536c69ff41ab5b192020d45bf9d
| 9,480
|
py
|
Python
|
tornado/test/log_test.py
|
bgerrity/tornado
|
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
|
[
"Apache-2.0"
] | 15,056
|
2015-01-01T03:08:16.000Z
|
2022-03-31T14:44:56.000Z
|
tornado/test/log_test.py
|
bgerrity/tornado
|
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
|
[
"Apache-2.0"
] | 1,645
|
2015-01-05T08:15:32.000Z
|
2022-03-24T20:30:10.000Z
|
tornado/test/log_test.py
|
bgerrity/tornado
|
208672f3bf6cbb7e37f54c356e02a71ca29f1e02
|
[
"Apache-2.0"
] | 5,098
|
2015-01-02T15:43:36.000Z
|
2022-03-30T06:04:43.000Z
|
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import glob
import logging
import os
import re
import subprocess
import sys
import tempfile
import unittest
import warnings
from tornado.escape import utf8
from tornado.log import LogFormatter, define_logging_options, enable_pretty_logging
from tornado.options import OptionParser
from tornado.util import basestring_type
@contextlib.contextmanager
def ignore_bytes_warning():
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=BytesWarning)
yield
class LogFormatterTest(unittest.TestCase):
# Matches the output of a single logging call (which may be multiple lines
# if a traceback was included, so we use the DOTALL option)
LINE_RE = re.compile(
b"(?s)\x01\\[E [0-9]{6} [0-9]{2}:[0-9]{2}:[0-9]{2} log_test:[0-9]+\\]\x02 (.*)"
)
def setUp(self):
self.formatter = LogFormatter(color=False)
# Fake color support. We can't guarantee anything about the $TERM
# variable when the tests are run, so just patch in some values
# for testing. (testing with color off fails to expose some potential
# encoding issues from the control characters)
self.formatter._colors = {logging.ERROR: u"\u0001"}
self.formatter._normal = u"\u0002"
# construct a Logger directly to bypass getLogger's caching
self.logger = logging.Logger("LogFormatterTest")
self.logger.propagate = False
self.tempdir = tempfile.mkdtemp()
self.filename = os.path.join(self.tempdir, "log.out")
self.handler = self.make_handler(self.filename)
self.handler.setFormatter(self.formatter)
self.logger.addHandler(self.handler)
def tearDown(self):
self.handler.close()
os.unlink(self.filename)
os.rmdir(self.tempdir)
def make_handler(self, filename):
# Base case: default setup without explicit encoding.
# In python 2, supports arbitrary byte strings and unicode objects
# that contain only ascii. In python 3, supports ascii-only unicode
# strings (but byte strings will be repr'd automatically).
return logging.FileHandler(filename)
def get_output(self):
with open(self.filename, "rb") as f:
line = f.read().strip()
m = LogFormatterTest.LINE_RE.match(line)
if m:
return m.group(1)
else:
raise Exception("output didn't match regex: %r" % line)
def test_basic_logging(self):
self.logger.error("foo")
self.assertEqual(self.get_output(), b"foo")
def test_bytes_logging(self):
with ignore_bytes_warning():
# This will be "\xe9" on python 2 or "b'\xe9'" on python 3
self.logger.error(b"\xe9")
self.assertEqual(self.get_output(), utf8(repr(b"\xe9")))
def test_utf8_logging(self):
with ignore_bytes_warning():
self.logger.error(u"\u00e9".encode("utf8"))
if issubclass(bytes, basestring_type):
# on python 2, utf8 byte strings (and by extension ascii byte
# strings) are passed through as-is.
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
else:
# on python 3, byte strings always get repr'd even if
# they're ascii-only, so this degenerates into another
# copy of test_bytes_logging.
self.assertEqual(self.get_output(), utf8(repr(utf8(u"\u00e9"))))
def test_bytes_exception_logging(self):
try:
raise Exception(b"\xe9")
except Exception:
self.logger.exception("caught exception")
# This will be "Exception: \xe9" on python 2 or
# "Exception: b'\xe9'" on python 3.
output = self.get_output()
self.assertRegex(output, br"Exception.*\\xe9")
# The traceback contains newlines, which should not have been escaped.
self.assertNotIn(br"\n", output)
class UnicodeLogFormatterTest(LogFormatterTest):
def make_handler(self, filename):
# Adding an explicit encoding configuration allows non-ascii unicode
# strings in both python 2 and 3, without changing the behavior
# for byte strings.
return logging.FileHandler(filename, encoding="utf8")
def test_unicode_logging(self):
self.logger.error(u"\u00e9")
self.assertEqual(self.get_output(), utf8(u"\u00e9"))
class EnablePrettyLoggingTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.options = OptionParser()
define_logging_options(self.options)
self.logger = logging.Logger("tornado.test.log_test.EnablePrettyLoggingTest")
self.logger.propagate = False
def test_log_file(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + "/test_log"
enable_pretty_logging(options=self.options, logger=self.logger)
self.assertEqual(1, len(self.logger.handlers))
self.logger.error("hello")
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + "/test_log*")
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegex(f.read(), r"^\[E [^]]*\] hello$")
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + "/test_log*"):
os.unlink(filename)
os.rmdir(tmpdir)
def test_log_file_with_timed_rotating(self):
tmpdir = tempfile.mkdtemp()
try:
self.options.log_file_prefix = tmpdir + "/test_log"
self.options.log_rotate_mode = "time"
enable_pretty_logging(options=self.options, logger=self.logger)
self.logger.error("hello")
self.logger.handlers[0].flush()
filenames = glob.glob(tmpdir + "/test_log*")
self.assertEqual(1, len(filenames))
with open(filenames[0]) as f:
self.assertRegex(f.read(), r"^\[E [^]]*\] hello$")
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
for filename in glob.glob(tmpdir + "/test_log*"):
os.unlink(filename)
os.rmdir(tmpdir)
def test_wrong_rotate_mode_value(self):
try:
self.options.log_file_prefix = "some_path"
self.options.log_rotate_mode = "wrong_mode"
self.assertRaises(
ValueError,
enable_pretty_logging,
options=self.options,
logger=self.logger,
)
finally:
for handler in self.logger.handlers:
handler.flush()
handler.close()
class LoggingOptionTest(unittest.TestCase):
"""Test the ability to enable and disable Tornado's logging hooks."""
def logs_present(self, statement, args=None):
# Each test may manipulate and/or parse the options and then logs
# a line at the 'info' level. This level is ignored in the
# logging module by default, but Tornado turns it on by default
# so it is the easiest way to tell whether tornado's logging hooks
# ran.
IMPORT = "from tornado.options import options, parse_command_line"
LOG_INFO = 'import logging; logging.info("hello")'
program = ";".join([IMPORT, statement, LOG_INFO])
proc = subprocess.Popen(
[sys.executable, "-c", program] + (args or []),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate()
self.assertEqual(proc.returncode, 0, "process failed: %r" % stdout)
return b"hello" in stdout
def test_default(self):
self.assertFalse(self.logs_present("pass"))
def test_tornado_default(self):
self.assertTrue(self.logs_present("parse_command_line()"))
def test_disable_command_line(self):
self.assertFalse(self.logs_present("parse_command_line()", ["--logging=none"]))
def test_disable_command_line_case_insensitive(self):
self.assertFalse(self.logs_present("parse_command_line()", ["--logging=None"]))
def test_disable_code_string(self):
self.assertFalse(
self.logs_present('options.logging = "none"; parse_command_line()')
)
def test_disable_code_none(self):
self.assertFalse(
self.logs_present("options.logging = None; parse_command_line()")
)
def test_disable_override(self):
# command line trumps code defaults
self.assertTrue(
self.logs_present(
"options.logging = None; parse_command_line()", ["--logging=info"]
)
)
| 38.536585
| 87
| 0.633017
|
56e6569552ee7c0481a269938dd7e39f713278e7
| 6,278
|
py
|
Python
|
src/deepymod/training/sparsity_scheduler.py
|
remykusters/DeePyMoD
|
c53ce939c5e6a5f0207b042d8d42bc0197d66073
|
[
"MIT"
] | null | null | null |
src/deepymod/training/sparsity_scheduler.py
|
remykusters/DeePyMoD
|
c53ce939c5e6a5f0207b042d8d42bc0197d66073
|
[
"MIT"
] | null | null | null |
src/deepymod/training/sparsity_scheduler.py
|
remykusters/DeePyMoD
|
c53ce939c5e6a5f0207b042d8d42bc0197d66073
|
[
"MIT"
] | null | null | null |
""" Contains classes that schedule when the sparsity mask should be applied """
import torch
import numpy as np
class Periodic:
"""Periodically applies sparsity every periodicity iterations
after initial_epoch.
"""
def __init__(self, periodicity=50, initial_iteration=1000):
"""Periodically applies sparsity every periodicity iterations
after initial_epoch.
Args:
periodicity (int): after initial_iterations, apply sparsity mask per periodicity epochs
initial_iteration (int): wait initial_iterations before applying sparsity
"""
self.periodicity = periodicity
self.initial_iteration = initial_iteration
def __call__(self, iteration, loss, model, optimizer):
# Update periodically
apply_sparsity = False # we overwrite it if we need to update
if (iteration - self.initial_iteration) % self.periodicity == 0:
apply_sparsity = True
return apply_sparsity
class TrainTest:
"""Early stops the training if validation loss doesn't improve after a given patience.
Note that periodicity should be multitude of write_iterations."""
def __init__(self, patience=200, delta=1e-5, path='checkpoint.pt'):
"""Early stops the training if validation loss doesn't improve after a given patience.
Note that periodicity should be multitude of write_iterations.
Args:
patience (int): wait patience epochs before checking TrainTest
delta (float): desired accuracy
path (str): pathname where to store the savepoints, must have ".pt" extension
"""
self.path = path
self.patience = patience
self.delta = delta
self.best_iteration = None
self.best_loss = None
def __call__(self, iteration, loss, model, optimizer):
apply_sparsity = False # we overwrite it if we need to update
# Initialize if doesnt exist yet
if self.best_loss is None:
self.best_loss = loss
self.best_iteration = iteration
self.save_checkpoint(model, optimizer)
# If it didnt improve, check if we're past patience
elif (self.best_loss - loss) < self.delta:
if (iteration - self.best_iteration) >= self.patience:
# We reload the model to the best point and reset the scheduler
self.load_checkpoint(model, optimizer) # reload model to best point
self.best_loss = None
self.best_iteration = None
apply_sparsity = True
# If not, keep going
else:
self.best_loss = loss
self.best_iteration = iteration
self.save_checkpoint(model, optimizer)
return apply_sparsity
def save_checkpoint(self, model, optimizer):
'''Saves model when validation loss decrease.'''
checkpoint_path = self.path + 'checkpoint.pt'
torch.save({'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(),}, checkpoint_path)
def load_checkpoint(self, model, optimizer):
'''Loads model from disk'''
checkpoint_path = self.path + 'checkpoint.pt'
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
class TrainTestPeriodic:
"""Early stops the training if validation loss doesn't improve after a given patience.
Note that periodicity should be multitude of write_iterations."""
def __init__(self, periodicity=50, patience=200, delta=1e-5, path='checkpoint.pt'):
"""Early stops the training if validation loss doesn't improve after a given patience.
Note that periodicity should be multitude of write_iterations.
Args:
periodicity (int): apply sparsity mask per periodicity epochs
patience (int): wait patience epochs before checking TrainTest
delta (float): desired accuracy
path (str): pathname where to store the savepoints, must have ".pt" extension"""
self.path = path
self.patience = patience
self.delta = delta
self.periodicity = periodicity
self.best_iteration = None
self.best_loss = None
self.periodic = False
def __call__(self, iteration, loss, model, optimizer):
# Update periodically if we have updated once
apply_sparsity = False # we overwrite it if we need to update
if self.periodic is True:
if (iteration - self.best_iteration) % self.periodicity == 0:
apply_sparsity = True
# Check for improvements if we havent updated yet.
# Initialize if doesnt exist yet
elif self.best_loss is None:
self.best_loss = loss
self.best_iteration = iteration
self.save_checkpoint(model, optimizer)
# If it didnt improve, check if we're past patience
elif (self.best_loss - loss) < self.delta:
if (iteration - self.best_iteration) >= self.patience:
self.load_checkpoint(model, optimizer) # reload model to best point
self.periodic = True # switch to periodic regime
self.best_iteration = iteration # because the iterator doesnt reset
apply_sparsity = True
# If not, keep going
else:
self.best_loss = loss
self.best_iteration = iteration
self.save_checkpoint(model, optimizer)
return apply_sparsity
def save_checkpoint(self, model, optimizer):
'''Saves model when validation loss decrease.'''
checkpoint_path = self.path + 'checkpoint.pt'
torch.save({'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(),}, checkpoint_path)
def load_checkpoint(self, model, optimizer):
'''Loads model from disk'''
checkpoint_path = self.path + 'checkpoint.pt'
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
| 42.707483
| 126
| 0.651959
|
59269461d9f3658566fe51f42861ecdc66716467
| 8,586
|
py
|
Python
|
sympy/polys/orderings.py
|
MartinThoma/sympy
|
009d0031bec7222ffa472e52148a2b4e441cd3a5
|
[
"BSD-3-Clause"
] | 2
|
2019-05-18T22:36:49.000Z
|
2019-05-24T05:56:16.000Z
|
sympy/polys/orderings.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | 1
|
2020-04-22T12:45:26.000Z
|
2020-04-22T12:45:26.000Z
|
sympy/polys/orderings.py
|
mmelotti/sympy
|
bea29026d27cc50c2e6a5501b6a70a9629ed3e18
|
[
"BSD-3-Clause"
] | 3
|
2021-02-16T16:40:49.000Z
|
2022-03-07T18:28:41.000Z
|
"""Definitions of monomial orderings. """
from __future__ import print_function, division
from typing import Optional
__all__ = ["lex", "grlex", "grevlex", "ilex", "igrlex", "igrevlex"]
from sympy.core import Symbol
from sympy.core.compatibility import iterable
class MonomialOrder(object):
"""Base class for monomial orderings. """
alias = None # type: Optional[str]
is_global = None # type: Optional[bool]
is_default = False
def __repr__(self):
return self.__class__.__name__ + "()"
def __str__(self):
return self.alias
def __call__(self, monomial):
raise NotImplementedError
def __eq__(self, other):
return self.__class__ == other.__class__
def __hash__(self):
return hash(self.__class__)
def __ne__(self, other):
return not (self == other)
class LexOrder(MonomialOrder):
"""Lexicographic order of monomials. """
alias = 'lex'
is_global = True
is_default = True
def __call__(self, monomial):
return monomial
class GradedLexOrder(MonomialOrder):
"""Graded lexicographic order of monomials. """
alias = 'grlex'
is_global = True
def __call__(self, monomial):
return (sum(monomial), monomial)
class ReversedGradedLexOrder(MonomialOrder):
"""Reversed graded lexicographic order of monomials. """
alias = 'grevlex'
is_global = True
def __call__(self, monomial):
return (sum(monomial), tuple(reversed([-m for m in monomial])))
class ProductOrder(MonomialOrder):
"""
A product order built from other monomial orders.
Given (not necessarily total) orders O1, O2, ..., On, their product order
P is defined as M1 > M2 iff there exists i such that O1(M1) = O2(M2),
..., Oi(M1) = Oi(M2), O{i+1}(M1) > O{i+1}(M2).
Product orders are typically built from monomial orders on different sets
of variables.
ProductOrder is constructed by passing a list of pairs
[(O1, L1), (O2, L2), ...] where Oi are MonomialOrders and Li are callables.
Upon comparison, the Li are passed the total monomial, and should filter
out the part of the monomial to pass to Oi.
Examples
========
We can use a lexicographic order on x_1, x_2 and also on
y_1, y_2, y_3, and their product on {x_i, y_i} as follows:
>>> from sympy.polys.orderings import lex, grlex, ProductOrder
>>> P = ProductOrder(
... (lex, lambda m: m[:2]), # lex order on x_1 and x_2 of monomial
... (grlex, lambda m: m[2:]) # grlex on y_1, y_2, y_3
... )
>>> P((2, 1, 1, 0, 0)) > P((1, 10, 0, 2, 0))
True
Here the exponent `2` of `x_1` in the first monomial
(`x_1^2 x_2 y_1`) is bigger than the exponent `1` of `x_1` in the
second monomial (`x_1 x_2^10 y_2^2`), so the first monomial is greater
in the product ordering.
>>> P((2, 1, 1, 0, 0)) < P((2, 1, 0, 2, 0))
True
Here the exponents of `x_1` and `x_2` agree, so the grlex order on
`y_1, y_2, y_3` is used to decide the ordering. In this case the monomial
`y_2^2` is ordered larger than `y_1`, since for the grlex order the degree
of the monomial is most important.
"""
def __init__(self, *args):
self.args = args
def __call__(self, monomial):
return tuple(O(lamda(monomial)) for (O, lamda) in self.args)
def __repr__(self):
contents = [repr(x[0]) for x in self.args]
return self.__class__.__name__ + '(' + ", ".join(contents) + ')'
def __str__(self):
contents = [str(x[0]) for x in self.args]
return self.__class__.__name__ + '(' + ", ".join(contents) + ')'
def __eq__(self, other):
if not isinstance(other, ProductOrder):
return False
return self.args == other.args
def __hash__(self):
return hash((self.__class__, self.args))
@property
def is_global(self):
if all(o.is_global is True for o, _ in self.args):
return True
if all(o.is_global is False for o, _ in self.args):
return False
return None
class InverseOrder(MonomialOrder):
"""
The "inverse" of another monomial order.
If O is any monomial order, we can construct another monomial order iO
such that `A >_{iO} B` if and only if `B >_O A`. This is useful for
constructing local orders.
Note that many algorithms only work with *global* orders.
For example, in the inverse lexicographic order on a single variable `x`,
high powers of `x` count as small:
>>> from sympy.polys.orderings import lex, InverseOrder
>>> ilex = InverseOrder(lex)
>>> ilex((5,)) < ilex((0,))
True
"""
def __init__(self, O):
self.O = O
def __str__(self):
return "i" + str(self.O)
def __call__(self, monomial):
def inv(l):
if iterable(l):
return tuple(inv(x) for x in l)
return -l
return inv(self.O(monomial))
@property
def is_global(self):
if self.O.is_global is True:
return False
if self.O.is_global is False:
return True
return None
def __eq__(self, other):
return isinstance(other, InverseOrder) and other.O == self.O
def __hash__(self):
return hash((self.__class__, self.O))
lex = LexOrder()
grlex = GradedLexOrder()
grevlex = ReversedGradedLexOrder()
ilex = InverseOrder(lex)
igrlex = InverseOrder(grlex)
igrevlex = InverseOrder(grevlex)
_monomial_key = {
'lex': lex,
'grlex': grlex,
'grevlex': grevlex,
'ilex': ilex,
'igrlex': igrlex,
'igrevlex': igrevlex
}
def monomial_key(order=None, gens=None):
"""
Return a function defining admissible order on monomials.
The result of a call to :func:`monomial_key` is a function which should
be used as a key to :func:`sorted` built-in function, to provide order
in a set of monomials of the same length.
Currently supported monomial orderings are:
1. lex - lexicographic order (default)
2. grlex - graded lexicographic order
3. grevlex - reversed graded lexicographic order
4. ilex, igrlex, igrevlex - the corresponding inverse orders
If the ``order`` input argument is not a string but has ``__call__``
attribute, then it will pass through with an assumption that the
callable object defines an admissible order on monomials.
If the ``gens`` input argument contains a list of generators, the
resulting key function can be used to sort SymPy ``Expr`` objects.
"""
if order is None:
order = lex
if isinstance(order, Symbol):
order = str(order)
if isinstance(order, str):
try:
order = _monomial_key[order]
except KeyError:
raise ValueError("supported monomial orderings are 'lex', 'grlex' and 'grevlex', got %r" % order)
if hasattr(order, '__call__'):
if gens is not None:
def _order(expr):
return order(expr.as_poly(*gens).degree_list())
return _order
return order
else:
raise ValueError("monomial ordering specification must be a string or a callable, got %s" % order)
class _ItemGetter(object):
"""Helper class to return a subsequence of values."""
def __init__(self, seq):
self.seq = tuple(seq)
def __call__(self, m):
return tuple(m[idx] for idx in self.seq)
def __eq__(self, other):
if not isinstance(other, _ItemGetter):
return False
return self.seq == other.seq
def build_product_order(arg, gens):
"""
Build a monomial order on ``gens``.
``arg`` should be a tuple of iterables. The first element of each iterable
should be a string or monomial order (will be passed to monomial_key),
the others should be subsets of the generators. This function will build
the corresponding product order.
For example, build a product of two grlex orders:
>>> from sympy.polys.orderings import grlex, build_product_order
>>> from sympy.abc import x, y, z, t
>>> O = build_product_order((("grlex", x, y), ("grlex", z, t)), [x, y, z, t])
>>> O((1, 2, 3, 4))
((3, (1, 2)), (7, (3, 4)))
"""
gens2idx = {}
for i, g in enumerate(gens):
gens2idx[g] = i
order = []
for expr in arg:
name = expr[0]
var = expr[1:]
def makelambda(var):
return _ItemGetter(gens2idx[g] for g in var)
order.append((monomial_key(name), makelambda(var)))
return ProductOrder(*order)
| 29.709343
| 109
| 0.62765
|
0453a5736615075a9d61b6d2228b0f13ff961d60
| 1,367
|
py
|
Python
|
debug/compare_map/run_fpsl_cvxpy.py
|
gfarnadi/FairPSL
|
1d262b070beb3d622676cd226c4dfd8f1a8ad7d9
|
[
"MIT"
] | null | null | null |
debug/compare_map/run_fpsl_cvxpy.py
|
gfarnadi/FairPSL
|
1d262b070beb3d622676cd226c4dfd8f1a8ad7d9
|
[
"MIT"
] | null | null | null |
debug/compare_map/run_fpsl_cvxpy.py
|
gfarnadi/FairPSL
|
1d262b070beb3d622676cd226c4dfd8f1a8ad7d9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os, sys
SCRIPTDIR = os.path.dirname(__file__)
ENGINDIR = os.path.join(SCRIPTDIR, '..', '..', 'engines')
sys.path.append(os.path.abspath(ENGINDIR))
from fpsl_cvxpy import map_inference
PROBLEMDIR = os.path.join(SCRIPTDIR, '..', '..', 'problems', 'paper_review')
sys.path.append(os.path.abspath(PROBLEMDIR))
from grounding import ground
from os.path import join as ojoin
def run_model(data_path, out_path):
rules, hard_rules, _, atoms = ground(data_path)
results = map_inference(rules, hard_rules)
reviews = atoms['review']
with open(ojoin(out_path, 'POSITIVEREVIEW.txt'), 'w') as f:
for (review, paper), (vid, _) in reviews.items():
print("'%s'\t'%s'\t%f"%(review, paper, results[vid]), file=f)
acceptable = atoms['acceptable']
with open(ojoin(out_path, 'ACCEPTABLE.txt'), 'w') as f:
for paper, (vid, _) in acceptable.items():
print("'%s'\t%f"%(paper, results[vid]), file=f)
presents = atoms['presents']
with open(ojoin(out_path, 'PRESENTS.txt'), 'w') as f:
for author, (vid, _) in presents.items():
print("'%s'\t%f"%(author, results[vid]), file=f)
if __name__ == '__main__':
data_path = ojoin(PROBLEMDIR, 'data', '1')
out_path = ojoin('output', 'fpsl_cvxpy')
run_model(data_path, out_path)
| 32.547619
| 76
| 0.627652
|
b7510eabf69066f2fa55df9786d8dcfd4b2f55d2
| 482
|
py
|
Python
|
data/scripts/templates/object/building/corellia/shared_skyscraper_corellia_style_04.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/building/corellia/shared_skyscraper_corellia_style_04.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/building/corellia/shared_skyscraper_corellia_style_04.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/corellia/shared_skyscraper_corellia_style_04.iff"
result.attribute_template_id = -1
result.stfName("building_name","skyscraper_corellia_style_4")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 28.352941
| 85
| 0.746888
|
0d48752e6d0551c8d7b4615e5780d619892af268
| 7,040
|
py
|
Python
|
tests/test_preprocessing.py
|
stas00/Megatron-DeepSpeed
|
48dcee4a4dc07da855c35d3c563f8c2eec14c737
|
[
"MIT"
] | null | null | null |
tests/test_preprocessing.py
|
stas00/Megatron-DeepSpeed
|
48dcee4a4dc07da855c35d3c563f8c2eec14c737
|
[
"MIT"
] | 1
|
2021-08-08T01:48:54.000Z
|
2021-08-08T16:43:31.000Z
|
tests/test_preprocessing.py
|
stas00/Megatron-DeepSpeed
|
48dcee4a4dc07da855c35d3c563f8c2eec14c737
|
[
"MIT"
] | null | null | null |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filecmp
import io
import json
import re
import os
import unittest
import functools
from pathlib import Path
from megatron.testing_utils import (
TestCasePlus,
execute_subprocess_async,
set_seed
)
from datasets import load_dataset
set_seed(42)
def write_jsonl(path, lines_num=1000, line_length=1024):
def get_text_line(line_length):
# XXX: fix to generate line_length
return "It's a wonderful world. I'm just walking on air. Talk of heaven on earth. I've got more than my share. Haven't got a care. Happy all day through. It's a wonderful world. Loving wonderful you!"
with io.open(path, "w", encoding="utf-8") as f:
for i in range(lines_num):
rec = dict(text=get_text_line(line_length))
x = json.dumps(rec, indent=0, ensure_ascii=False)
x = re.sub(r'\n', ' ', x, 0, re.M)
f.write(x + "\n")
@functools.lru_cache()
def download_hf_dataset(dsetname):
return load_dataset(dsetname)
class MegDSTestPreprocessing(TestCasePlus):
""" """
def setUp(self):
super().setUp()
def test_preprocess_data(self):
src_dir = self.src_dir
data_dir = f"{self.data_dir}/gpt2"
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False)
# autogenerate "input.jsonl"
input_path = f"{output_dir}/input.jsonl"
write_jsonl(input_path)
output_prefix =f"{output_dir}/test-ds"
cmd = f"""
python {src_dir}/tools/preprocess_data.py
--input {input_path}
--output-prefix {output_prefix}
--dataset-impl mmap
--tokenizer-type GPT2BPETokenizer
--merge-file {data_dir}/gpt2-tiny-merges.txt
--vocab {data_dir}/gpt2-tiny-vocab.json
--append-eod
--workers 2
""".split()
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
for ext in ["bin", "idx"]:
tgt_path = f"{output_prefix}_text_document.{ext}"
self.assertTrue(Path(tgt_path).exists(), )
def compare_meg_data_files(self, tgt, ref):
for ext in ["bin", "idx"]:
tgt_path = f"{tgt}.{ext}"
ref_path = f"{ref}.{ext}"
self.assertTrue(Path(tgt_path).exists(), )
self.assertTrue(filecmp.cmp(tgt_path, ref_path, shallow=False))
def test_process_data_microsoft(self):
"""We want to be stable to Microsoft version."""
src_dir = self.src_dir
data_dir = f"{self.data_dir}/gpt2"
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False)
input_path = f"{self.tests_dir}/data/gpt2/openwebtext-1000.jsonl"
output_prefix = f"{output_dir}/test-ds-meg-gpt2-openwebtext"
cmd = f"""
python {src_dir}/tools/preprocess_data.py
--input {input_path}
--output-prefix {output_prefix}
--dataset-impl mmap
--tokenizer-type GPT2BPETokenizer
--merge-file {data_dir}/gpt2-tiny-merges.txt
--vocab {data_dir}/gpt2-tiny-vocab.json
--append-eod
--workers 2
""".split()
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
self.compare_meg_data_files(f"{output_prefix}_text_document", f"{data_dir}/meg-gpt2-openwebtext_text_document")
def test_process_data_dist_microsoft(self):
"""We want to be stable to Microsoft version."""
src_dir = self.src_dir
data_dir = f"{self.data_dir}/gpt2"
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False)
output_prefix = f"{output_dir}/test-ds-meg-gpt2-openwebtext_1k"
# preprocess_data_dist requires one to have already downloaded the input HF dataset.
# We do that by running this script before the test.
dsetname = 'stas/openwebtext-10k'
download_hf_dataset(dsetname)
cmd = f"""
python -m torch.distributed.launch --nproc_per_node 2 {src_dir}/tools/preprocess_data_dist.py
--input {dsetname}
--count 1000
--output-prefix {output_prefix}
--dataset-impl mmap
--tokenizer-type GPT2BPETokenizer
--merge-file {data_dir}/gpt2-tiny-merges.txt
--vocab {data_dir}/gpt2-tiny-vocab.json
--append-eod
""".split()
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
self.compare_meg_data_files(f"{output_prefix}_text_document", f"{data_dir}/meg-gpt2-openwebtext_text_document")
def test_process_data_dist_serial_microsoft(self):
"""We want to be stable to Microsoft version."""
src_dir = self.src_dir
data_dir = f"{self.data_dir}/gpt2"
output_dir = self.get_auto_remove_tmp_dir() # "./xxx", after=False)
output_prefix = f"{output_dir}/test-ds-meg-gpt2-openwebtext_1k"
# preproces_data_dist requires one to have already downloaded the input HF dataset.
# We do that by running this script before the test.
dsetname = 'stas/openwebtext-10k'
download_hf_dataset(dsetname)
cmd = f"""
python -m torch.distributed.launch --nproc_per_node 2 {src_dir}/tools/preprocess_data_dist.py
--input {dsetname}
--count 1000
--merge serial
--output-prefix {output_prefix}
--dataset-impl mmap
--tokenizer-type GPT2BPETokenizer
--merge-file {data_dir}/gpt2-tiny-merges.txt
--vocab {data_dir}/gpt2-tiny-vocab.json
--append-eod
""".split()
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(cmd, env=self.get_env())
self.compare_meg_data_files(f"{output_prefix}_text_document", f"{data_dir}/meg-gpt2-openwebtext_text_document")
| 37.446809
| 208
| 0.613778
|
04f897ef9f317ce64fd61c2face3de2711041567
| 11,513
|
py
|
Python
|
docs/source/conf.py
|
normarivano/aiida-wannier90
|
9c672178195bb40dafcd3eca3e4b5004b49526d7
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
normarivano/aiida-wannier90
|
9c672178195bb40dafcd3eca3e4b5004b49526d7
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
normarivano/aiida-wannier90
|
9c672178195bb40dafcd3eca3e4b5004b49526d7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# aiida-wannier90 documentation build configuration file, created by
# sphinx-quickstart on Fri Oct 10 02:14:52 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import contextlib
import os
import sys
import time
import aiida_wannier90
# -- General configuration ------------------------------------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
with contextlib.suppress(ImportError):
import sphinx_rtd_theme # pylint: disable=import-error
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
try:
# For AiiDA v1.1+
from aiida.manage.configuration import load_documentation_profile
load_documentation_profile()
except ImportError:
# AiiDA versions <1.1
# This can be removed when python2 support is dropped, because there
# will be no need to build the documentation with AiiDA v1.0.
sys.path.append(
os.path.join(os.path.split(__file__)[0], os.pardir, os.pardir)
)
sys.path.append(os.path.join(os.path.split(__file__)[0], os.pardir))
os.environ['DJANGO_SETTINGS_MODULE'] = 'rtd_settings'
if not on_rtd: # only import and set the theme if we're building docs locally
# Load the database environment by first loading the profile and then loading the backend through the manager
from aiida.manage.configuration import get_config, load_profile
from aiida.manage.manager import get_manager
config = get_config()
load_profile(config.default_profile_name)
get_manager().get_backend()
else:
from aiida.manage import configuration
from aiida.manage.configuration import load_profile, reset_config
from aiida.manage.manager import get_manager
# Set the global variable to trigger shortcut behavior in `aiida.manager.configuration.load_config`
configuration.IN_RT_DOC_MODE = True
# First need to reset the config, because an empty one will have been loaded when `aiida` module got imported
reset_config()
# Load the profile: this will first load the config, which will be the dummy one for RTD purposes
load_profile()
# Finally load the database backend but without checking the schema because there is no actual database
get_manager()._load_backend(schema_check=False) # pylint: disable=protected-access
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/2.7', None),
'aiida': ('http://aiida_core.readthedocs.io/en/latest/', None),
}
nitpick_ignore = [('py:obj', 'module')]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#~ master_doc = 'index'
master_doc = 'index'
# General information about the project.
project = u'aiida-wannier90'
# pylint: disable=redefined-builtin
copyright = u'2015-{}, ECOLE POLYTECHNIQUE FEDERALE DE LAUSANNE (Theory and Simulation of Materials (THEOS) and ETH Zurich\
and National Centre for Computational Design and Discovery of Novel Materials (NCCR MARVEL)), Switzerland. All rights reserved.'.format(
time.localtime().tm_year # pylint: disable=redefined-builtin
)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '.'.join(aiida_wannier90.__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = aiida_wannier90.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['doc.rst']
#~ exclude_patterns = ['index.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#~ html_theme = 'basicstrap'
## SET BELOW
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#~ html_theme_options = {
#~ 'inner_theme': True,
#~ 'inner_theme_name': 'bootswatch-darkly',
#~ 'nav_fixed_top': False
#~ }
# Add any paths that contain custom themes here, relative to this directory.
#~ html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = "images/.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#~ html_show_copyright = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
html_use_opensearch = 'http://aiida-wannier90.readthedocs.io'
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiida-wannier90doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
# latex_documents = [
# ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = [
# ]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# ]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 34.163205
| 140
| 0.715365
|
7039b77b8baafd0af311ef808f880c223ea3f4b2
| 2,982
|
py
|
Python
|
lang/zsh.py
|
codecat555/codecat555-fidgetingbits_knausj_talon
|
62f9be0459e6631c99d58eee97054ddd970cc5f3
|
[
"MIT"
] | 1
|
2020-12-30T00:18:18.000Z
|
2020-12-30T00:18:18.000Z
|
lang/zsh.py
|
codecat555/codecat555-fidgetingbits_knausj_talon
|
62f9be0459e6631c99d58eee97054ddd970cc5f3
|
[
"MIT"
] | null | null | null |
lang/zsh.py
|
codecat555/codecat555-fidgetingbits_knausj_talon
|
62f9be0459e6631c99d58eee97054ddd970cc5f3
|
[
"MIT"
] | null | null | null |
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
# this is largely based on bash.talon. any problems you find that aren't
# explicitly in zsh functionality you may want to see if they also exist there
mode: user.zsh
mode: command
and code.language: zsh
"""
@ctx.action_class("user")
class UserActions:
def code_operator_assignment():
actions.auto_insert(" = ")
def code_operator_subtraction():
actions.auto_insert(" - ")
def code_operator_subtraction_assignment():
actions.auto_insert(" -= ")
def code_operator_addition():
actions.auto_insert(" + ")
def code_operator_addition_assignment():
actions.auto_insert(" += ")
def code_operator_multiplication():
actions.auto_insert(" * ")
def code_operator_multiplication_assignment():
actions.auto_insert(" *= ")
# action(user.code_operator_exponent): " ** "
def code_operator_division():
actions.auto_insert(" / ")
def code_operator_division_assignment():
actions.auto_insert(" /= ")
def code_operator_modulo():
actions.auto_insert(" % ")
def code_operator_modulo_assignment():
actions.auto_insert(" %= ")
def code_operator_equal():
actions.auto_insert(" == ")
def code_operator_not_equal():
actions.auto_insert(" != ")
def code_operator_greater_than():
actions.auto_insert(" > ")
def code_operator_greater_than_or_equal_to():
actions.auto_insert(" >= ")
def code_operator_less_than():
actions.auto_insert(" < ")
def code_operator_less_than_or_equal_to():
actions.auto_insert(" <= ")
def code_operator_and():
actions.auto_insert(" && ")
def code_operator_or():
actions.auto_insert(" || ")
def code_operator_bitwise_and():
actions.auto_insert(" & ")
def code_operator_bitwise_and_assignment():
actions.auto_insert(" &= ")
def code_operator_bitwise_or():
actions.auto_insert(" | ")
def code_operator_bitwise_or_assignment():
actions.auto_insert(" |= ")
def code_operator_bitwise_exclusive_or():
actions.auto_insert(" ^ ")
def code_operator_bitwise_exclusive_or_assignment():
actions.auto_insert(" ^= ")
def code_operator_bitwise_left_shift():
actions.auto_insert(" << ")
def code_operator_bitwise_left_shift_assignment():
actions.auto_insert(" <<= ")
def code_operator_bitwise_right_shift():
actions.auto_insert(" >> ")
def code_operator_bitwise_right_shift_assignment():
actions.auto_insert(" >>= ")
def code_include_local():
actions.insert("source ")
def code_comment():
actions.auto_insert("#")
# XXX - redundant with snippets
def code_state_if():
actions.insert("if [];")
actions.key("left")
# XXX - redundant with snippet
# XXX - should use the env line instead
| 25.706897
| 78
| 0.65057
|
a892012c3fcc015e665d249161c3e79bc4338ec3
| 45,763
|
py
|
Python
|
Lib/email/message.py
|
pelotoncycle/cpython-fork
|
1ab99a0e912aac9c3f16555f23284d7e381f2f69
|
[
"PSF-2.0"
] | 486
|
2016-05-28T18:51:54.000Z
|
2022-03-20T17:30:31.000Z
|
Lib/email/message.py
|
sky-skynet/Python3
|
b816507f56ee14b730b7ab52a61eb17f9eb9d815
|
[
"PSF-2.0"
] | 40
|
2016-05-29T00:24:56.000Z
|
2020-07-13T11:56:58.000Z
|
Lib/email/message.py
|
sky-skynet/Python3
|
b816507f56ee14b730b7ab52a61eb17f9eb9d815
|
[
"PSF-2.0"
] | 74
|
2015-05-29T17:18:53.000Z
|
2022-01-15T14:06:44.000Z
|
# Copyright (C) 2001-2007 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Basic message object for the email package object model."""
__all__ = ['Message']
import re
import uu
import quopri
import warnings
from io import BytesIO, StringIO
# Intrapackage imports
from email import utils
from email import errors
from email._policybase import compat32
from email import charset as _charset
from email._encoded_words import decode_b
Charset = _charset.Charset
SEMISPACE = '; '
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _splitparam(param):
# Split header parameters. BAW: this may be too simple. It isn't
# strictly RFC 2045 (section 5.1) compliant, but it catches most headers
# found in the wild. We may eventually need a full fledged parser.
# RDM: we might have a Header here; for now just stringify it.
a, sep, b = str(param).partition(';')
if not sep:
return a.strip(), None
return a.strip(), b.strip()
def _formatparam(param, value=None, quote=True):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true. If value is a
three tuple (charset, language, value), it will be encoded according
to RFC2231 rules. If it contains non-ascii characters it will likewise
be encoded according to RFC2231 rules, using the utf-8 charset and
a null language.
"""
if value is not None and len(value) > 0:
# A tuple is used for RFC 2231 encoded parameter values where items
# are (charset, language, value). charset is a string, not a Charset
# instance. RFC 2231 encoded values are never quoted, per RFC.
if isinstance(value, tuple):
# Encode as per RFC 2231
param += '*'
value = utils.encode_rfc2231(value[2], value[0], value[1])
return '%s=%s' % (param, value)
else:
try:
value.encode('ascii')
except UnicodeEncodeError:
param += '*'
value = utils.encode_rfc2231(value, 'utf-8', '')
return '%s=%s' % (param, value)
# BAW: Please check this. I think that if quote is set it should
# force quoting even if not necessary.
if quote or tspecials.search(value):
return '%s="%s"' % (param, utils.quote(value))
else:
return '%s=%s' % (param, value)
else:
return param
def _parseparam(s):
# RDM This might be a Header, so for now stringify it.
s = ';' + str(s)
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
if '=' in f:
i = f.index('=')
f = f[:i].strip().lower() + '=' + f[i+1:].strip()
plist.append(f.strip())
s = s[end:]
return plist
def _unquotevalue(value):
# This is different than utils.collapse_rfc2231_value() because it doesn't
# try to convert the value to a unicode. Message.get_param() and
# Message.get_params() are both currently defined to return the tuple in
# the face of RFC 2231 parameters.
if isinstance(value, tuple):
return value[0], value[1], utils.unquote(value[2])
else:
return utils.unquote(value)
class Message:
"""Basic message object.
A message object is defined as something that has a bunch of RFC 2822
headers and a payload. It may optionally have an envelope header
(a.k.a. Unix-From or From_ header). If the message is a container (i.e. a
multipart or a message/rfc822), then the payload is a list of Message
objects, otherwise it is a string.
Message objects implement part of the `mapping' interface, which assumes
there is exactly one occurrence of the header per message. Some headers
do in fact appear multiple times (e.g. Received) and for those headers,
you must use the explicit API to set or get all the headers. Not all of
the mapping methods are implemented.
"""
def __init__(self, policy=compat32):
self.policy = policy
self._headers = []
self._unixfrom = None
self._payload = None
self._charset = None
# Defaults for multipart messages
self.preamble = self.epilogue = None
self.defects = []
# Default content type
self._default_type = 'text/plain'
def __str__(self):
"""Return the entire formatted message as a string.
"""
return self.as_string()
def as_string(self, unixfrom=False, maxheaderlen=0, policy=None):
"""Return the entire formatted message as a string.
Optional 'unixfrom', when true, means include the Unix From_ envelope
header. For backward compatibility reasons, if maxheaderlen is
not specified it defaults to 0, so you must override it explicitly
if you want a different maxheaderlen. 'policy' is passed to the
Generator instance used to serialize the mesasge; if it is not
specified the policy associated with the message instance is used.
If the message object contains binary data that is not encoded
according to RFC standards, the non-compliant data will be replaced by
unicode "unknown character" code points.
"""
from email.generator import Generator
policy = self.policy if policy is None else policy
fp = StringIO()
g = Generator(fp,
mangle_from_=False,
maxheaderlen=maxheaderlen,
policy=policy)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def __bytes__(self):
"""Return the entire formatted message as a bytes object.
"""
return self.as_bytes()
def as_bytes(self, unixfrom=False, policy=None):
"""Return the entire formatted message as a bytes object.
Optional 'unixfrom', when true, means include the Unix From_ envelope
header. 'policy' is passed to the BytesGenerator instance used to
serialize the message; if not specified the policy associated with
the message instance is used.
"""
from email.generator import BytesGenerator
policy = self.policy if policy is None else policy
fp = BytesIO()
g = BytesGenerator(fp, mangle_from_=False, policy=policy)
g.flatten(self, unixfrom=unixfrom)
return fp.getvalue()
def is_multipart(self):
"""Return True if the message consists of multiple parts."""
return isinstance(self._payload, list)
#
# Unix From_ line
#
def set_unixfrom(self, unixfrom):
self._unixfrom = unixfrom
def get_unixfrom(self):
return self._unixfrom
#
# Payload manipulation.
#
def attach(self, payload):
"""Add the given payload to the current payload.
The current payload will always be a list of objects after this method
is called. If you want to set the payload to a scalar object, use
set_payload() instead.
"""
if self._payload is None:
self._payload = [payload]
else:
try:
self._payload.append(payload)
except AttributeError:
raise TypeError("Attach is not valid on a message with a"
" non-multipart payload")
def get_payload(self, i=None, decode=False):
"""Return a reference to the payload.
The payload will either be a list object or a string. If you mutate
the list object, you modify the message's payload in place. Optional
i returns that index into the payload.
Optional decode is a flag indicating whether the payload should be
decoded or not, according to the Content-Transfer-Encoding header
(default is False).
When True and the message is not a multipart, the payload will be
decoded if this header's value is `quoted-printable' or `base64'. If
some other encoding is used, or the header is missing, or if the
payload has bogus data (i.e. bogus base64 or uuencoded data), the
payload is returned as-is.
If the message is a multipart and the decode flag is True, then None
is returned.
"""
# Here is the logic table for this code, based on the email5.0.0 code:
# i decode is_multipart result
# ------ ------ ------------ ------------------------------
# None True True None
# i True True None
# None False True _payload (a list)
# i False True _payload element i (a Message)
# i False False error (not a list)
# i True False error (not a list)
# None False False _payload
# None True False _payload decoded (bytes)
# Note that Barry planned to factor out the 'decode' case, but that
# isn't so easy now that we handle the 8 bit data, which needs to be
# converted in both the decode and non-decode path.
if self.is_multipart():
if decode:
return None
if i is None:
return self._payload
else:
return self._payload[i]
# For backward compatibility, Use isinstance and this error message
# instead of the more logical is_multipart test.
if i is not None and not isinstance(self._payload, list):
raise TypeError('Expected list, got %s' % type(self._payload))
payload = self._payload
# cte might be a Header, so for now stringify it.
cte = str(self.get('content-transfer-encoding', '')).lower()
# payload may be bytes here.
if isinstance(payload, str):
if utils._has_surrogates(payload):
bpayload = payload.encode('ascii', 'surrogateescape')
if not decode:
try:
payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
except LookupError:
payload = bpayload.decode('ascii', 'replace')
elif decode:
try:
bpayload = payload.encode('ascii')
except UnicodeError:
# This won't happen for RFC compliant messages (messages
# containing only ASCII code points in the unicode input).
# If it does happen, turn the string into bytes in a way
# guaranteed not to fail.
bpayload = payload.encode('raw-unicode-escape')
if not decode:
return payload
if cte == 'quoted-printable':
return quopri.decodestring(bpayload)
elif cte == 'base64':
# XXX: this is a bit of a hack; decode_b should probably be factored
# out somewhere, but I haven't figured out where yet.
value, defects = decode_b(b''.join(bpayload.splitlines()))
for defect in defects:
self.policy.handle_defect(self, defect)
return value
elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
in_file = BytesIO(bpayload)
out_file = BytesIO()
try:
uu.decode(in_file, out_file, quiet=True)
return out_file.getvalue()
except uu.Error:
# Some decoding problem
return bpayload
if isinstance(payload, str):
return bpayload
return payload
def set_payload(self, payload, charset=None):
"""Set the payload to the given value.
Optional charset sets the message's default character set. See
set_charset() for details.
"""
if hasattr(payload, 'encode'):
if charset is None:
self._payload = payload
return
if not isinstance(charset, Charset):
charset = Charset(charset)
payload = payload.encode(charset.output_charset)
if hasattr(payload, 'decode'):
self._payload = payload.decode('ascii', 'surrogateescape')
else:
self._payload = payload
if charset is not None:
self.set_charset(charset)
def set_charset(self, charset):
"""Set the charset of the payload to a given character set.
charset can be a Charset instance, a string naming a character set, or
None. If it is a string it will be converted to a Charset instance.
If charset is None, the charset parameter will be removed from the
Content-Type field. Anything else will generate a TypeError.
The message will be assumed to be of type text/* encoded with
charset.input_charset. It will be converted to charset.output_charset
and encoded properly, if needed, when generating the plain text
representation of the message. MIME headers (MIME-Version,
Content-Type, Content-Transfer-Encoding) will be added as needed.
"""
if charset is None:
self.del_param('charset')
self._charset = None
return
if not isinstance(charset, Charset):
charset = Charset(charset)
self._charset = charset
if 'MIME-Version' not in self:
self.add_header('MIME-Version', '1.0')
if 'Content-Type' not in self:
self.add_header('Content-Type', 'text/plain',
charset=charset.get_output_charset())
else:
self.set_param('charset', charset.get_output_charset())
if charset != charset.get_output_charset():
self._payload = charset.body_encode(self._payload)
if 'Content-Transfer-Encoding' not in self:
cte = charset.get_body_encoding()
try:
cte(self)
except TypeError:
# This 'if' is for backward compatibility, it allows unicode
# through even though that won't work correctly if the
# message is serialized.
payload = self._payload
if payload:
try:
payload = payload.encode('ascii', 'surrogateescape')
except UnicodeError:
payload = payload.encode(charset.output_charset)
self._payload = charset.body_encode(payload)
self.add_header('Content-Transfer-Encoding', cte)
def get_charset(self):
"""Return the Charset instance associated with the message's payload.
"""
return self._charset
#
# MAPPING INTERFACE (partial)
#
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __getitem__(self, name):
"""Get a header value.
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, exactly which
occurrence gets returned is undefined. Use get_all() to get all
the values matching a header field name.
"""
return self.get(name)
def __setitem__(self, name, val):
"""Set the value of a header.
Note: this does not overwrite an existing header with the same field
name. Use __delitem__() first to delete any existing headers.
"""
max_count = self.policy.header_max_count(name)
if max_count:
lname = name.lower()
found = 0
for k, v in self._headers:
if k.lower() == lname:
found += 1
if found >= max_count:
raise ValueError("There may be at most {} {} headers "
"in a message".format(max_count, name))
self._headers.append(self.policy.header_store_parse(name, val))
def __delitem__(self, name):
"""Delete all occurrences of a header, if present.
Does not raise an exception if the header is missing.
"""
name = name.lower()
newheaders = []
for k, v in self._headers:
if k.lower() != name:
newheaders.append((k, v))
self._headers = newheaders
def __contains__(self, name):
return name.lower() in [k.lower() for k, v in self._headers]
def __iter__(self):
for field, value in self._headers:
yield field
def keys(self):
"""Return a list of all the message's header field names.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all the message's header values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [self.policy.header_fetch_parse(k, v)
for k, v in self._headers]
def items(self):
"""Get all the message's header fields and values.
These will be sorted in the order they appeared in the original
message, or were added to the message, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [(k, self.policy.header_fetch_parse(k, v))
for k, v in self._headers]
def get(self, name, failobj=None):
"""Get a header value.
Like __getitem__() but return failobj instead of None when the field
is missing.
"""
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
return self.policy.header_fetch_parse(k, v)
return failobj
#
# "Internal" methods (public API, but only intended for use by a parser
# or generator, not normal application code.
#
def set_raw(self, name, value):
"""Store name and value in the model without modification.
This is an "internal" API, intended only for use by a parser.
"""
self._headers.append((name, value))
def raw_items(self):
"""Return the (name, value) header pairs without modification.
This is an "internal" API, intended only for use by a generator.
"""
return iter(self._headers.copy())
#
# Additional useful stuff
#
def get_all(self, name, failobj=None):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original
message, and may contain duplicates. Any fields deleted and
re-inserted are always appended to the header list.
If no such fields exist, failobj is returned (defaults to None).
"""
values = []
name = name.lower()
for k, v in self._headers:
if k.lower() == name:
values.append(self.policy.header_fetch_parse(k, v))
if not values:
return failobj
return values
def add_header(self, _name, _value, **_params):
"""Extended header setting.
name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added. If a
parameter value contains non-ASCII characters it can be specified as a
three-tuple of (charset, language, value), in which case it will be
encoded according to RFC2231 rules. Otherwise it will be encoded using
the utf-8 charset and a language of ''.
Examples:
msg.add_header('content-disposition', 'attachment', filename='bud.gif')
msg.add_header('content-disposition', 'attachment',
filename=('utf-8', '', Fußballer.ppt'))
msg.add_header('content-disposition', 'attachment',
filename='Fußballer.ppt'))
"""
parts = []
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
if _value is not None:
parts.insert(0, _value)
self[_name] = SEMISPACE.join(parts)
def replace_header(self, _name, _value):
"""Replace a header.
Replace the first matching header found in the message, retaining
header order and case. If no matching header was found, a KeyError is
raised.
"""
_name = _name.lower()
for i, (k, v) in zip(range(len(self._headers)), self._headers):
if k.lower() == _name:
self._headers[i] = self.policy.header_store_parse(k, _value)
break
else:
raise KeyError(_name)
#
# Use these three methods instead of the three above.
#
def get_content_type(self):
"""Return the message's content type.
The returned string is coerced to lower case of the form
`maintype/subtype'. If there was no Content-Type header in the
message, the default type as given by get_default_type() will be
returned. Since according to RFC 2045, messages always have a default
type this will always return a value.
RFC 2045 defines a message's default type to be text/plain unless it
appears inside a multipart/digest container, in which case it would be
message/rfc822.
"""
missing = object()
value = self.get('content-type', missing)
if value is missing:
# This should have no parameters
return self.get_default_type()
ctype = _splitparam(value)[0].lower()
# RFC 2045, section 5.2 says if its invalid, use text/plain
if ctype.count('/') != 1:
return 'text/plain'
return ctype
def get_content_maintype(self):
"""Return the message's main content type.
This is the `maintype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[0]
def get_content_subtype(self):
"""Returns the message's sub-content type.
This is the `subtype' part of the string returned by
get_content_type().
"""
ctype = self.get_content_type()
return ctype.split('/')[1]
def get_default_type(self):
"""Return the `default' content type.
Most messages have a default content type of text/plain, except for
messages that are subparts of multipart/digest containers. Such
subparts have a default content type of message/rfc822.
"""
return self._default_type
def set_default_type(self, ctype):
"""Set the `default' content type.
ctype should be either "text/plain" or "message/rfc822", although this
is not enforced. The default content type is not stored in the
Content-Type header.
"""
self._default_type = ctype
def _get_params_preserve(self, failobj, header):
# Like get_params() but preserves the quoting of values. BAW:
# should this be part of the public interface?
missing = object()
value = self.get(header, missing)
if value is missing:
return failobj
params = []
for p in _parseparam(value):
try:
name, val = p.split('=', 1)
name = name.strip()
val = val.strip()
except ValueError:
# Must have been a bare attribute
name = p.strip()
val = ''
params.append((name, val))
params = utils.decode_params(params)
return params
def get_params(self, failobj=None, header='content-type', unquote=True):
"""Return the message's Content-Type parameters, as a list.
The elements of the returned list are 2-tuples of key/value pairs, as
split on the `=' sign. The left hand side of the `=' is the key,
while the right hand side is the value. If there is no `=' sign in
the parameter the value is the empty string. The value is as
described in the get_param() method.
Optional failobj is the object to return if there is no Content-Type
header. Optional header is the header to search instead of
Content-Type. If unquote is True, the value is unquoted.
"""
missing = object()
params = self._get_params_preserve(missing, header)
if params is missing:
return failobj
if unquote:
return [(k, _unquotevalue(v)) for k, v in params]
else:
return params
def get_param(self, param, failobj=None, header='content-type',
unquote=True):
"""Return the parameter value if found in the Content-Type header.
Optional failobj is the object to return if there is no Content-Type
header, or the Content-Type header has no such parameter. Optional
header is the header to search instead of Content-Type.
Parameter keys are always compared case insensitively. The return
value can either be a string, or a 3-tuple if the parameter was RFC
2231 encoded. When it's a 3-tuple, the elements of the value are of
the form (CHARSET, LANGUAGE, VALUE). Note that both CHARSET and
LANGUAGE can be None, in which case you should consider VALUE to be
encoded in the us-ascii charset. You can usually ignore LANGUAGE.
The parameter value (either the returned string, or the VALUE item in
the 3-tuple) is always unquoted, unless unquote is set to False.
If your application doesn't care whether the parameter was RFC 2231
encoded, it can turn the return value into a string as follows:
rawparam = msg.get_param('foo')
param = email.utils.collapse_rfc2231_value(rawparam)
"""
if header not in self:
return failobj
for k, v in self._get_params_preserve(failobj, header):
if k.lower() == param.lower():
if unquote:
return _unquotevalue(v)
else:
return v
return failobj
def set_param(self, param, value, header='Content-Type', requote=True,
charset=None, language='', replace=False):
"""Set a parameter in the Content-Type header.
If the parameter already exists in the header, its value will be
replaced with the new value.
If header is Content-Type and has not yet been defined for this
message, it will be set to "text/plain" and the new parameter and
value will be appended as per RFC 2045.
An alternate header can specified in the header argument, and all
parameters will be quoted as necessary unless requote is False.
If charset is specified, the parameter will be encoded according to RFC
2231. Optional language specifies the RFC 2231 language, defaulting
to the empty string. Both charset and language should be strings.
"""
if not isinstance(value, tuple) and charset:
value = (charset, language, value)
if header not in self and header.lower() == 'content-type':
ctype = 'text/plain'
else:
ctype = self.get(header)
if not self.get_param(param, header=header):
if not ctype:
ctype = _formatparam(param, value, requote)
else:
ctype = SEMISPACE.join(
[ctype, _formatparam(param, value, requote)])
else:
ctype = ''
for old_param, old_value in self.get_params(header=header,
unquote=requote):
append_param = ''
if old_param.lower() == param.lower():
append_param = _formatparam(param, value, requote)
else:
append_param = _formatparam(old_param, old_value, requote)
if not ctype:
ctype = append_param
else:
ctype = SEMISPACE.join([ctype, append_param])
if ctype != self.get(header):
if replace:
self.replace_header(header, ctype)
else:
del self[header]
self[header] = ctype
def del_param(self, param, header='content-type', requote=True):
"""Remove the given parameter completely from the Content-Type header.
The header will be re-written in place without the parameter or its
value. All values will be quoted as necessary unless requote is
False. Optional header specifies an alternative to the Content-Type
header.
"""
if header not in self:
return
new_ctype = ''
for p, v in self.get_params(header=header, unquote=requote):
if p.lower() != param.lower():
if not new_ctype:
new_ctype = _formatparam(p, v, requote)
else:
new_ctype = SEMISPACE.join([new_ctype,
_formatparam(p, v, requote)])
if new_ctype != self.get(header):
del self[header]
self[header] = new_ctype
def set_type(self, type, header='Content-Type', requote=True):
"""Set the main type and subtype for the Content-Type header.
type must be a string in the form "maintype/subtype", otherwise a
ValueError is raised.
This method replaces the Content-Type header, keeping all the
parameters in place. If requote is False, this leaves the existing
header's quoting as is. Otherwise, the parameters will be quoted (the
default).
An alternative header can be specified in the header argument. When
the Content-Type header is set, we'll always also add a MIME-Version
header.
"""
# BAW: should we be strict?
if not type.count('/') == 1:
raise ValueError
# Set the Content-Type, you get a MIME-Version
if header.lower() == 'content-type':
del self['mime-version']
self['MIME-Version'] = '1.0'
if header not in self:
self[header] = type
return
params = self.get_params(header=header, unquote=requote)
del self[header]
self[header] = type
# Skip the first param; it's the old type.
for p, v in params[1:]:
self.set_param(p, v, header, requote)
def get_filename(self, failobj=None):
"""Return the filename associated with the payload if present.
The filename is extracted from the Content-Disposition header's
`filename' parameter, and it is unquoted. If that header is missing
the `filename' parameter, this method falls back to looking for the
`name' parameter.
"""
missing = object()
filename = self.get_param('filename', missing, 'content-disposition')
if filename is missing:
filename = self.get_param('name', missing, 'content-type')
if filename is missing:
return failobj
return utils.collapse_rfc2231_value(filename).strip()
def get_boundary(self, failobj=None):
"""Return the boundary associated with the payload if present.
The boundary is extracted from the Content-Type header's `boundary'
parameter, and it is unquoted.
"""
missing = object()
boundary = self.get_param('boundary', missing)
if boundary is missing:
return failobj
# RFC 2046 says that boundaries may begin but not end in w/s
return utils.collapse_rfc2231_value(boundary).rstrip()
def set_boundary(self, boundary):
"""Set the boundary parameter in Content-Type to 'boundary'.
This is subtly different than deleting the Content-Type header and
adding a new one with a new boundary parameter via add_header(). The
main difference is that using the set_boundary() method preserves the
order of the Content-Type header in the original message.
HeaderParseError is raised if the message has no Content-Type header.
"""
missing = object()
params = self._get_params_preserve(missing, 'content-type')
if params is missing:
# There was no Content-Type header, and we don't know what type
# to set it to, so raise an exception.
raise errors.HeaderParseError('No Content-Type header found')
newparams = []
foundp = False
for pk, pv in params:
if pk.lower() == 'boundary':
newparams.append(('boundary', '"%s"' % boundary))
foundp = True
else:
newparams.append((pk, pv))
if not foundp:
# The original Content-Type header had no boundary attribute.
# Tack one on the end. BAW: should we raise an exception
# instead???
newparams.append(('boundary', '"%s"' % boundary))
# Replace the existing Content-Type header with the new value
newheaders = []
for h, v in self._headers:
if h.lower() == 'content-type':
parts = []
for k, v in newparams:
if v == '':
parts.append(k)
else:
parts.append('%s=%s' % (k, v))
val = SEMISPACE.join(parts)
newheaders.append(self.policy.header_store_parse(h, val))
else:
newheaders.append((h, v))
self._headers = newheaders
def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
as_bytes = charset[2].encode('raw-unicode-escape')
charset = str(as_bytes, pcharset)
except (LookupError, UnicodeError):
charset = charset[2]
# charset characters must be in us-ascii range
try:
charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower()
def get_charsets(self, failobj=None):
"""Return a list containing the charset(s) used in this message.
The returned list of items describes the Content-Type headers'
charset parameter for this message and all the subparts in its
payload.
Each item will either be a string (the value of the charset parameter
in the Content-Type header of that part) or the value of the
'failobj' parameter (defaults to None), if the part does not have a
main MIME type of "text", or the charset is not defined.
The list will contain one string for each part of the message, plus
one for the container message (i.e. self), so that a non-multipart
message will still return a list of length 1.
"""
return [part.get_content_charset(failobj) for part in self.walk()]
def get_content_disposition(self):
"""Return the message's content-disposition if it exists, or None.
The return values can be either 'inline', 'attachment' or None
according to the rfc2183.
"""
value = self.get('content-disposition')
if value is None:
return None
c_d = _splitparam(value)[0].lower()
return c_d
# I.e. def walk(self): ...
from email.iterators import walk
class MIMEPart(Message):
def __init__(self, policy=None):
if policy is None:
from email.policy import default
policy = default
Message.__init__(self, policy)
def is_attachment(self):
c_d = self.get('content-disposition')
return False if c_d is None else c_d.content_disposition == 'attachment'
def _find_body(self, part, preferencelist):
if part.is_attachment():
return
maintype, subtype = part.get_content_type().split('/')
if maintype == 'text':
if subtype in preferencelist:
yield (preferencelist.index(subtype), part)
return
if maintype != 'multipart':
return
if subtype != 'related':
for subpart in part.iter_parts():
yield from self._find_body(subpart, preferencelist)
return
if 'related' in preferencelist:
yield (preferencelist.index('related'), part)
candidate = None
start = part.get_param('start')
if start:
for subpart in part.iter_parts():
if subpart['content-id'] == start:
candidate = subpart
break
if candidate is None:
subparts = part.get_payload()
candidate = subparts[0] if subparts else None
if candidate is not None:
yield from self._find_body(candidate, preferencelist)
def get_body(self, preferencelist=('related', 'html', 'plain')):
"""Return best candidate mime part for display as 'body' of message.
Do a depth first search, starting with self, looking for the first part
matching each of the items in preferencelist, and return the part
corresponding to the first item that has a match, or None if no items
have a match. If 'related' is not included in preferencelist, consider
the root part of any multipart/related encountered as a candidate
match. Ignore parts with 'Content-Disposition: attachment'.
"""
best_prio = len(preferencelist)
body = None
for prio, part in self._find_body(self, preferencelist):
if prio < best_prio:
best_prio = prio
body = part
if prio == 0:
break
return body
_body_types = {('text', 'plain'),
('text', 'html'),
('multipart', 'related'),
('multipart', 'alternative')}
def iter_attachments(self):
"""Return an iterator over the non-main parts of a multipart.
Skip the first of each occurrence of text/plain, text/html,
multipart/related, or multipart/alternative in the multipart (unless
they have a 'Content-Disposition: attachment' header) and include all
remaining subparts in the returned iterator. When applied to a
multipart/related, return all parts except the root part. Return an
empty iterator when applied to a multipart/alternative or a
non-multipart.
"""
maintype, subtype = self.get_content_type().split('/')
if maintype != 'multipart' or subtype == 'alternative':
return
parts = self.get_payload()
if maintype == 'multipart' and subtype == 'related':
# For related, we treat everything but the root as an attachment.
# The root may be indicated by 'start'; if there's no start or we
# can't find the named start, treat the first subpart as the root.
start = self.get_param('start')
if start:
found = False
attachments = []
for part in parts:
if part.get('content-id') == start:
found = True
else:
attachments.append(part)
if found:
yield from attachments
return
parts.pop(0)
yield from parts
return
# Otherwise we more or less invert the remaining logic in get_body.
# This only really works in edge cases (ex: non-text relateds or
# alternatives) if the sending agent sets content-disposition.
seen = [] # Only skip the first example of each candidate type.
for part in parts:
maintype, subtype = part.get_content_type().split('/')
if ((maintype, subtype) in self._body_types and
not part.is_attachment() and subtype not in seen):
seen.append(subtype)
continue
yield part
def iter_parts(self):
"""Return an iterator over all immediate subparts of a multipart.
Return an empty iterator for a non-multipart.
"""
if self.get_content_maintype() == 'multipart':
yield from self.get_payload()
def get_content(self, *args, content_manager=None, **kw):
if content_manager is None:
content_manager = self.policy.content_manager
return content_manager.get_content(self, *args, **kw)
def set_content(self, *args, content_manager=None, **kw):
if content_manager is None:
content_manager = self.policy.content_manager
content_manager.set_content(self, *args, **kw)
def _make_multipart(self, subtype, disallowed_subtypes, boundary):
if self.get_content_maintype() == 'multipart':
existing_subtype = self.get_content_subtype()
disallowed_subtypes = disallowed_subtypes + (subtype,)
if existing_subtype in disallowed_subtypes:
raise ValueError("Cannot convert {} to {}".format(
existing_subtype, subtype))
keep_headers = []
part_headers = []
for name, value in self._headers:
if name.lower().startswith('content-'):
part_headers.append((name, value))
else:
keep_headers.append((name, value))
if part_headers:
# There is existing content, move it to the first subpart.
part = type(self)(policy=self.policy)
part._headers = part_headers
part._payload = self._payload
self._payload = [part]
else:
self._payload = []
self._headers = keep_headers
self['Content-Type'] = 'multipart/' + subtype
if boundary is not None:
self.set_param('boundary', boundary)
def make_related(self, boundary=None):
self._make_multipart('related', ('alternative', 'mixed'), boundary)
def make_alternative(self, boundary=None):
self._make_multipart('alternative', ('mixed',), boundary)
def make_mixed(self, boundary=None):
self._make_multipart('mixed', (), boundary)
def _add_multipart(self, _subtype, *args, _disp=None, **kw):
if (self.get_content_maintype() != 'multipart' or
self.get_content_subtype() != _subtype):
getattr(self, 'make_' + _subtype)()
part = type(self)(policy=self.policy)
part.set_content(*args, **kw)
if _disp and 'content-disposition' not in part:
part['Content-Disposition'] = _disp
self.attach(part)
def add_related(self, *args, **kw):
self._add_multipart('related', *args, _disp='inline', **kw)
def add_alternative(self, *args, **kw):
self._add_multipart('alternative', *args, **kw)
def add_attachment(self, *args, **kw):
self._add_multipart('mixed', *args, _disp='attachment', **kw)
def clear(self):
self._headers = []
self._payload = None
def clear_content(self):
self._headers = [(n, v) for n, v in self._headers
if not n.lower().startswith('content-')]
self._payload = None
class EmailMessage(MIMEPart):
def set_content(self, *args, **kw):
super().set_content(*args, **kw)
if 'MIME-Version' not in self:
self['MIME-Version'] = '1.0'
| 39.93281
| 96
| 0.595175
|
79f8bcaf051114c727cc12c20c5ac07e97607674
| 97,244
|
py
|
Python
|
scipy/interpolate/interpolate.py
|
isuruf/scipy
|
a767030252ba3f7c8e2924847dffa7024171657b
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/interpolate/interpolate.py
|
isuruf/scipy
|
a767030252ba3f7c8e2924847dffa7024171657b
|
[
"BSD-3-Clause"
] | null | null | null |
scipy/interpolate/interpolate.py
|
isuruf/scipy
|
a767030252ba3f7c8e2924847dffa7024171657b
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
ravel, poly1d, asarray, intp)
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
r"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : `numpy.poly1d` instance
The Lagrange interpolating polynomial.
Examples
--------
Interpolate :math:`f(x) = x^3` by 3 points.
>>> from scipy.interpolate import lagrange
>>> x = np.array([0, 1, 2])
>>> y = x**3
>>> poly = lagrange(x, y)
Since there are only 3 points, Lagrange polynomial has degree 2. Explicitly,
it is given by
.. math::
\begin{aligned}
L(x) &= 1\times \frac{x (x - 2)}{-1} + 8\times \frac{x (x-1)}{2} \\
&= x (-2 + 3x)
\end{aligned}
>>> from numpy.polynomial.polynomial import Polynomial
>>> Polynomial(poly).coef
array([ 3., -2., 0.])
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated via nearest-neighbor extrapolation.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'previous', 'next', where 'zero', 'slinear', 'quadratic' and 'cubic'
refer to a spline interpolation of zeroth, first, second or third
order; 'previous' and 'next' simply return the previous or next value
of the point) or as an integer specifying the order of the spline
interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless ``fill_value="extrapolate"``.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Attributes
----------
fill_value
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest', 'previous', 'next'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: https://docs.python.org/reference/datamodel.html
if kind in ('linear', 'nearest', 'previous', 'next'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
elif kind == 'previous':
# Side for np.searchsorted and index for clipping
self._side = 'left'
self._ind = 0
# Move x by one floating point value to the left
self._x_shift = np.nextafter(self.x, -np.inf)
self._call = self.__class__._call_previousnext
elif kind == 'next':
self._side = 'right'
self._ind = 1
# Move x by one floating point value to the right
self._x_shift = np.nextafter(self.x, np.inf)
self._call = self.__class__._call_previousnext
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
"""The fill value."""
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the original data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_previousnext(self, x_new):
"""Use previous/next neighbour of x_new, y_new = f(x_new)."""
# 1. Get index of left/right value
x_new_indices = searchsorted(self._x_shift, x_new, side=self._side)
# 2. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(1-self._ind,
len(self.x)-self._ind).astype(intp)
# 3. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices+self._ind-1]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
``self.x`` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
``self.x`` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.solve()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature,
see for example [1]_ [2]_ [3]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
``x = 1`` and ``x = 2``.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on ``[xa, xb]`` and having the values and derivatives at the
endpoints `xa` and `xb` as specified by `ya`` and `yb`.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of `ya` and `yb` are `na` and `nb`, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at `xa`. `ya[0]` is the value of the function, and
`ya[i]` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at `xb`.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At ``x = xb`` it's the same with ``a = n - q``.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point ``xp = (x', y', z', ...)`` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
``c`` and ``x`` must be arrays of the correct shape and type. The
``c`` array can only be of dtypes float and complex, and ``x``
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[tuple(sl)]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[tuple(sl)]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
If any of `points` have a dimension of size 1, linear interpolation will
return an array of `nan` values. Nearest-neighbor interpolation will work
as usual in this case.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Wikipedia, "Trilinear interpolation",
https://en.wikipedia.org/wiki/Trilinear_interpolation
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = [np.where(yi <= .5, i, i + 1)
for i, yi in zip(indices, norm_distances)]
return self.values[tuple(idx_res)]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class _ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("_ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
| 35.790946
| 112
| 0.554841
|
29fd118ccaf1a1d136f9695e39f0e23b6427e873
| 624
|
py
|
Python
|
saleor/dashboard/group/forms.py
|
nobism/saleor
|
a199bee5ef8cb2ecd1d7fd4bcc708f3b4903aa2d
|
[
"BSD-3-Clause"
] | 3
|
2018-06-14T22:52:38.000Z
|
2019-01-24T03:48:46.000Z
|
saleor/dashboard/group/forms.py
|
nobism/saleor
|
a199bee5ef8cb2ecd1d7fd4bcc708f3b4903aa2d
|
[
"BSD-3-Clause"
] | 1
|
2022-02-13T22:52:38.000Z
|
2022-02-13T22:52:38.000Z
|
saleor/dashboard/group/forms.py
|
nobism/saleor
|
a199bee5ef8cb2ecd1d7fd4bcc708f3b4903aa2d
|
[
"BSD-3-Clause"
] | 1
|
2021-12-02T18:49:31.000Z
|
2021-12-02T18:49:31.000Z
|
from django import forms
from django.contrib.auth.models import Group
from django.utils.translation import pgettext_lazy
from ...core.permissions import get_permissions
from ..forms import PermissionMultipleChoiceField
class GroupPermissionsForm(forms.ModelForm):
class Meta:
model = Group
fields = ['name', 'permissions']
labels = {
'name': pgettext_lazy('Item name', 'Name'),
'permissions': pgettext_lazy('Permissions', 'Permissions')}
permissions = PermissionMultipleChoiceField(
queryset=get_permissions(),
widget=forms.CheckboxSelectMultiple)
| 31.2
| 71
| 0.711538
|
c7e570417af6b904b6c822405240c86b3f716e8e
| 873
|
py
|
Python
|
nlp/readabilityIndex.py
|
numankh/HypeBeastDashboard
|
8b30fe2cb972a603b6ce1d84004b418d52471a7e
|
[
"MIT"
] | null | null | null |
nlp/readabilityIndex.py
|
numankh/HypeBeastDashboard
|
8b30fe2cb972a603b6ce1d84004b418d52471a7e
|
[
"MIT"
] | null | null | null |
nlp/readabilityIndex.py
|
numankh/HypeBeastDashboard
|
8b30fe2cb972a603b6ce1d84004b418d52471a7e
|
[
"MIT"
] | null | null | null |
# import spacy
from textstat.textstat import textstatistics,legacy_round
import textstat
import en_core_web_sm
# While the maximum score is 121.22, there is no limit on how low the score can be. A negative score is valid.
def flesch_reading_ease_score(text):
return textstat.flesch_reading_ease(text)
# Average of all grade level readability indices
def average_grade_score(text):
total = textstat.flesch_kincaid_grade(text) + textstat.gunning_fog(text) + textstat.automated_readability_index(text) + textstat.coleman_liau_index(text) + textstat.linsear_write_formula(text) + textstat.dale_chall_readability_score(text)
return round(total / 6, 2)
if __name__ == "__main__":
sample_text = "As the years pass by, we all know owners look more and more like their dogs"
print(textstat.flesch_reading_ease(sample_text))
print(average_grade_score(sample_text))
| 48.5
| 239
| 0.802978
|
3b7e9c1083f6138386c2c8fbd2a6932567b8b0ff
| 3,832
|
py
|
Python
|
torchgeometry/losses/depth_smooth.py
|
tina300399/torchgeometry
|
48d8026f0a5f3d4ac5567b7b2738390892b3cc8d
|
[
"Apache-2.0"
] | null | null | null |
torchgeometry/losses/depth_smooth.py
|
tina300399/torchgeometry
|
48d8026f0a5f3d4ac5567b7b2738390892b3cc8d
|
[
"Apache-2.0"
] | null | null | null |
torchgeometry/losses/depth_smooth.py
|
tina300399/torchgeometry
|
48d8026f0a5f3d4ac5567b7b2738390892b3cc8d
|
[
"Apache-2.0"
] | 1
|
2019-10-04T05:05:28.000Z
|
2019-10-04T05:05:28.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# Based on
# https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641
class InverseDepthSmoothnessLoss(nn.Module):
r"""Criterion that computes image-aware inverse depth smoothness loss.
.. math::
\text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \|
\partial_x I_{ij} \right \|} + \left |
\partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|}
Shape:
- Inverse Depth: :math:`(N, 1, H, W)`
- Image: :math:`(N, 3, H, W)`
- Output: scalar
Examples::
>>> idepth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> smooth = tgm.losses.DepthSmoothnessLoss()
>>> loss = smooth(idepth, image)
"""
def __init__(self) -> None:
super(InverseDepthSmoothnessLoss, self).__init__()
@staticmethod
def gradient_x(img: torch.Tensor) -> torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :, :-1] - img[:, :, :, 1:]
@staticmethod
def gradient_y(img: torch.Tensor) -> torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :-1, :] - img[:, :, 1:, :]
def forward( # type: ignore
self,
idepth: torch.Tensor,
image: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(idepth):
raise TypeError("Input idepth type is not a torch.Tensor. Got {}"
.format(type(idepth)))
if not torch.is_tensor(image):
raise TypeError("Input image type is not a torch.Tensor. Got {}"
.format(type(image)))
if not len(idepth.shape) == 4:
raise ValueError("Invalid idepth shape, we expect BxCxHxW. Got: {}"
.format(idepth.shape))
if not len(image.shape) == 4:
raise ValueError("Invalid image shape, we expect BxCxHxW. Got: {}"
.format(image.shape))
if not idepth.shape[-2:] == image.shape[-2:]:
raise ValueError("idepth and image shapes must be the same. Got: {}"
.format(idepth.shape, image.shape))
if not idepth.device == image.device:
raise ValueError(
"idepth and image must be in the same device. Got: {}" .format(
idepth.device, image.device))
if not idepth.dtype == image.dtype:
raise ValueError(
"idepth and image must be in the same dtype. Got: {}" .format(
idepth.dtype, image.dtype))
# compute the gradients
idepth_dx: torch.Tensor = self.gradient_x(idepth)
idepth_dy: torch.Tensor = self.gradient_y(idepth)
image_dx: torch.Tensor = self.gradient_x(image)
image_dy: torch.Tensor = self.gradient_y(image)
# compute image weights
weights_x: torch.Tensor = torch.exp(
-torch.mean(torch.abs(image_dx), dim=1, keepdim=True))
weights_y: torch.Tensor = torch.exp(
-torch.mean(torch.abs(image_dy), dim=1, keepdim=True))
# apply image weights to depth
smoothness_x: torch.Tensor = torch.abs(idepth_dx * weights_x)
smoothness_y: torch.Tensor = torch.abs(idepth_dy * weights_y)
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
######################
# functional interface
######################
def inverse_depth_smoothness_loss(
idepth: torch.Tensor,
image: torch.Tensor) -> torch.Tensor:
r"""Computes image-aware inverse depth smoothness loss.
See :class:`~torchgeometry.losses.InvDepthSmoothnessLoss` for details.
"""
return InverseDepthSmoothnessLoss()(idepth, image)
| 37.203883
| 91
| 0.5762
|
8ab5b498065e1ffa4e16c5237e8d8d2e09e0744f
| 4,853
|
py
|
Python
|
mrdomino/__init__.py
|
knighton/mapreduce
|
3349fe2da0d01400b464aebb6ee239fb962d8def
|
[
"MIT"
] | 1
|
2015-07-28T18:22:30.000Z
|
2015-07-28T18:22:30.000Z
|
mrdomino/__init__.py
|
knighton/mapreduce
|
3349fe2da0d01400b464aebb6ee239fb962d8def
|
[
"MIT"
] | null | null | null |
mrdomino/__init__.py
|
knighton/mapreduce
|
3349fe2da0d01400b464aebb6ee239fb962d8def
|
[
"MIT"
] | null | null | null |
import os
import sys
import imp
import logging
from pkg_resources import resource_filename
from tempfile import mkdtemp
from abc import abstractmethod
from mrdomino import util
from mrdomino.util import MRCounter
class protocol(object):
JSONProtocol = 0
JSONValueProtocol = 1
PickleProtocol = 2 # unsupported
PickleValueProtocol = 3 # unsupported
RawProtocol = 4 # unsupported
RawValueProtocol = 5 # unsupported
ReprProtocol = 6 # unsupported
ReprValueProtocol = 7 # unsupported
logger = logging.getLogger('mrdomino')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter('%(asctime)s: %(levelname)s: %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
EXEC_SCRIPT = resource_filename(__name__, "exec.sh")
def get_instance(args):
job_module = imp.load_source('job_module', args.job_module)
job_class = getattr(job_module, args.job_class)
return job_class()
def get_step(args):
return get_instance(args).steps()[args.step_idx]
class MRStep(object):
def __init__(self, mapper, reducer, combiner=None, n_mappers=2,
n_reducers=2):
# do some basic type checking to verify that we pass callables.
assert hasattr(mapper, '__call__')
self.mapper = mapper
assert hasattr(reducer, '__call__')
self.reducer = reducer
assert combiner is None or hasattr(combiner, '__call__')
self.combiner = combiner
assert isinstance(n_mappers, int)
self.n_mappers = n_mappers
assert isinstance(n_reducers, int)
self.n_reducers = n_reducers
class MRSettings(object):
def __init__(self, input_files, output_dir, tmp_dir, use_domino=False,
n_concurrent_machines=2, n_shards_per_machine=4):
assert isinstance(input_files, list)
self.input_files = input_files
assert isinstance(output_dir, str)
self.output_dir = output_dir
assert isinstance(tmp_dir, str)
self.tmp_dir = tmp_dir
assert isinstance(use_domino, bool)
self.use_domino = use_domino
assert isinstance(n_concurrent_machines, int)
self.n_concurrent_machines = n_concurrent_machines
assert isinstance(n_shards_per_machine, int)
self.n_shards_per_machine = n_shards_per_machine
def mapreduce(job_class):
job = job_class()
step_count = len(job._steps)
# if temporary directory root does not exist, create one
tmp_root = job._settings.tmp_dir
if not os.path.exists(tmp_root):
os.makedirs(tmp_root)
tmp_dirs = [mkdtemp(dir=tmp_root, prefix="step%d." % i)
for i in range(step_count)]
input_file_lists = [job._settings.input_files]
for step, out_dir in zip(job._steps, tmp_dirs):
n_reducers = step.n_reducers
reduce_format = os.path.join(out_dir, 'reduce.out.%d')
ff = [reduce_format % n for n in range(n_reducers)]
input_file_lists.append(ff)
logger.info("Input files: {}".format(input_file_lists))
# if output directory root does not exist, create one
output_dir = job._settings.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
for i, step in enumerate(job._steps):
cmd_opts = [
EXEC_SCRIPT, 'mrdomino.step',
'--step_idx', i,
'--total_steps', step_count,
'--input_files', ' '.join(input_file_lists[i]),
'--work_dir', tmp_dirs[i],
'--output_dir', output_dir,
'--job_module', sys.modules[job.__module__].__file__,
'--job_class', job.__class__.__name__,
'--use_domino', int(job._settings.use_domino),
'--n_concurrent_machines', job._settings.n_concurrent_machines,
'--n_shards_per_machine', job._settings.n_shards_per_machine
]
cmd = util.create_cmd(cmd_opts)
logger.info("Starting step %d with command: %s" % (i, cmd))
util.wait_cmd(cmd, logger, "Step %d" % i)
logger.info('All done.')
class MRJob(object):
INPUT_PROTOCOL = protocol.JSONValueProtocol
INTERNAL_PROTOCOL = protocol.JSONProtocol
OUTPUT_PROTOCOL = protocol.JSONValueProtocol
def __init__(self, counters=None):
self._settings = self.settings()
self._steps = self.steps()
self._counters = MRCounter()
@classmethod
def run(cls):
mapreduce(cls)
@abstractmethod
def steps(self):
"""define steps necessary to run the job"""
@abstractmethod
def settings(self):
"""define settings"""
def increment_counter(self, group, counter, amount=1):
self._counters.incr(group, counter, amount)
def get_step(self, step_idx):
return self.steps()[step_idx]
| 31.512987
| 75
| 0.667422
|
280df66de612622bafcfeec906a80b6745aacaa1
| 1,143
|
py
|
Python
|
.tox/scenario/lib/python2.7/site-packages/tempest_lib/tests/fake_auth_provider.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
.tox/scenario/lib/python2.7/site-packages/tempest_lib/tests/fake_auth_provider.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | null | null | null |
.tox/scenario/lib/python2.7/site-packages/tempest_lib/tests/fake_auth_provider.py
|
bdrich/neutron-lbaas
|
b4711abfe0207c4fdd5d7fb7ecbf017e753abbfd
|
[
"Apache-2.0"
] | 1
|
2020-07-21T02:18:23.000Z
|
2020-07-21T02:18:23.000Z
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class FakeAuthProvider(object):
def __init__(self, creds_dict={}):
self.credentials = FakeCredentials(creds_dict)
def auth_request(self, method, url, headers=None, body=None, filters=None):
return url, headers, body
def base_url(self, filters, auth_data=None):
return "https://example.com"
class FakeCredentials(object):
def __init__(self, creds_dict):
for key in creds_dict.keys():
setattr(self, key, creds_dict[key])
| 33.617647
| 79
| 0.705162
|
0f939937a7aba24f5d25e7c0d16fcdb0eb6cbf97
| 81,825
|
py
|
Python
|
sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/operations/_share_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/operations/_share_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/storage/azure-storage-file-share/azure/storage/fileshare/_generated/operations/_share_operations.py
|
v-xuto/azure-sdk-for-python
|
9c6296d22094c5ede410bc83749e8df8694ccacc
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ShareOperations(object):
"""ShareOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.storage.fileshare.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def create(
self,
timeout=None, # type: Optional[int]
metadata=None, # type: Optional[str]
quota=None, # type: Optional[int]
access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]]
enabled_protocols=None, # type: Optional[str]
root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]]
**kwargs # type: Any
):
# type: (...) -> None
"""Creates a new share under the specified account. If the share with the same name already
exists, the operation fails.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param metadata: A name-value pair to associate with a file storage object.
:type metadata: str
:param quota: Specifies the maximum size of the share, in gigabytes.
:type quota: int
:param access_tier: Specifies the access tier of the share.
:type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
:param enabled_protocols: Protocols to enable on the share.
:type enabled_protocols: str
:param root_squash: Root squash to set on the share. Only valid for NFS shares.
:type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "share"
accept = "application/xml"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
if quota is not None:
header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
if access_tier is not None:
header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if enabled_protocols is not None:
header_parameters['x-ms-enabled-protocols'] = self._serialize.header("enabled_protocols", enabled_protocols, 'str')
if root_squash is not None:
header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
create.metadata = {'url': '/{shareName}'} # type: ignore
def get_properties(
self,
sharesnapshot=None, # type: Optional[str]
timeout=None, # type: Optional[int]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Returns all user-defined metadata and system properties for the specified share or share
snapshot. The data returned does not include the share's list of files.
:param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the share snapshot to query.
:type sharesnapshot: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "share"
accept = "application/xml"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-meta']=self._deserialize('str', response.headers.get('x-ms-meta'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-share-quota']=self._deserialize('int', response.headers.get('x-ms-share-quota'))
response_headers['x-ms-share-provisioned-iops']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-iops'))
response_headers['x-ms-share-provisioned-ingress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-ingress-mbps'))
response_headers['x-ms-share-provisioned-egress-mbps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-egress-mbps'))
response_headers['x-ms-share-next-allowed-quota-downgrade-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-share-next-allowed-quota-downgrade-time'))
response_headers['x-ms-share-provisioned-bandwidth-mibps']=self._deserialize('int', response.headers.get('x-ms-share-provisioned-bandwidth-mibps'))
response_headers['x-ms-lease-duration']=self._deserialize('str', response.headers.get('x-ms-lease-duration'))
response_headers['x-ms-lease-state']=self._deserialize('str', response.headers.get('x-ms-lease-state'))
response_headers['x-ms-lease-status']=self._deserialize('str', response.headers.get('x-ms-lease-status'))
response_headers['x-ms-access-tier']=self._deserialize('str', response.headers.get('x-ms-access-tier'))
response_headers['x-ms-access-tier-change-time']=self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time'))
response_headers['x-ms-access-tier-transition-state']=self._deserialize('str', response.headers.get('x-ms-access-tier-transition-state'))
response_headers['x-ms-enabled-protocols']=self._deserialize('str', response.headers.get('x-ms-enabled-protocols'))
response_headers['x-ms-root-squash']=self._deserialize('str', response.headers.get('x-ms-root-squash'))
if cls:
return cls(pipeline_response, None, response_headers)
get_properties.metadata = {'url': '/{shareName}'} # type: ignore
def delete(
self,
sharesnapshot=None, # type: Optional[str]
timeout=None, # type: Optional[int]
delete_snapshots=None, # type: Optional[Union[str, "_models.DeleteSnapshotsOptionType"]]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Operation marks the specified share or share snapshot for deletion. The share or share snapshot
and any files contained within it are later deleted during garbage collection.
:param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the share snapshot to query.
:type sharesnapshot: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param delete_snapshots: Specifies the option include to delete the base share and all of its
snapshots.
:type delete_snapshots: str or ~azure.storage.fileshare.models.DeleteSnapshotsOptionType
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "share"
accept = "application/xml"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if delete_snapshots is not None:
header_parameters['x-ms-delete-snapshots'] = self._serialize.header("delete_snapshots", delete_snapshots, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
delete.metadata = {'url': '/{shareName}'} # type: ignore
def acquire_lease(
self,
timeout=None, # type: Optional[int]
duration=None, # type: Optional[int]
proposed_lease_id=None, # type: Optional[str]
sharesnapshot=None, # type: Optional[str]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
for set and delete share operations.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param duration: Specifies the duration of the lease, in seconds, or negative one (-1) for a
lease that never expires. A non-infinite lease can be between 15 and 60 seconds. A lease
duration cannot be changed using renew or change.
:type duration: int
:param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
Constructor (String) for a list of valid GUID string formats.
:type proposed_lease_id: str
:param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "lease"
action = "acquire"
restype = "share"
accept = "application/xml"
# Construct URL
url = self.acquire_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if duration is not None:
header_parameters['x-ms-lease-duration'] = self._serialize.header("duration", duration, 'int')
if proposed_lease_id is not None:
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
acquire_lease.metadata = {'url': '/{shareName}'} # type: ignore
def release_lease(
self,
lease_id, # type: str
timeout=None, # type: Optional[int]
sharesnapshot=None, # type: Optional[str]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
for set and delete share operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "lease"
action = "release"
restype = "share"
accept = "application/xml"
# Construct URL
url = self.release_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
release_lease.metadata = {'url': '/{shareName}'} # type: ignore
def change_lease(
self,
lease_id, # type: str
timeout=None, # type: Optional[int]
proposed_lease_id=None, # type: Optional[str]
sharesnapshot=None, # type: Optional[str]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
for set and delete share operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param proposed_lease_id: Proposed lease ID, in a GUID string format. The File service returns
400 (Invalid request) if the proposed lease ID is not in the correct format. See Guid
Constructor (String) for a list of valid GUID string formats.
:type proposed_lease_id: str
:param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "lease"
action = "change"
restype = "share"
accept = "application/xml"
# Construct URL
url = self.change_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
if proposed_lease_id is not None:
header_parameters['x-ms-proposed-lease-id'] = self._serialize.header("proposed_lease_id", proposed_lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
change_lease.metadata = {'url': '/{shareName}'} # type: ignore
def renew_lease(
self,
lease_id, # type: str
timeout=None, # type: Optional[int]
sharesnapshot=None, # type: Optional[str]
request_id_parameter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
for set and delete share operations.
:param lease_id: Specifies the current lease ID on the resource.
:type lease_id: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the share snapshot to query.
:type sharesnapshot: str
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
comp = "lease"
action = "renew"
restype = "share"
accept = "application/xml"
# Construct URL
url = self.renew_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
renew_lease.metadata = {'url': '/{shareName}'} # type: ignore
def break_lease(
self,
timeout=None, # type: Optional[int]
break_period=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
sharesnapshot=None, # type: Optional[str]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""The Lease Share operation establishes and manages a lock on a share, or the specified snapshot
for set and delete share operations.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param break_period: For a break operation, proposed duration the lease should continue before
it is broken, in seconds, between 0 and 60. This break period is only used if it is shorter
than the time remaining on the lease. If longer, the time remaining on the lease is used. A new
lease will not be available before the break period has expired, but the lease may be held for
longer than the break period. If this header does not appear with a break operation, a
fixed-duration lease breaks after the remaining lease period elapses, and an infinite lease
breaks immediately.
:type break_period: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param sharesnapshot: The snapshot parameter is an opaque DateTime value that, when present,
specifies the share snapshot to query.
:type sharesnapshot: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
comp = "lease"
action = "break"
restype = "share"
accept = "application/xml"
# Construct URL
url = self.break_lease.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
if sharesnapshot is not None:
query_parameters['sharesnapshot'] = self._serialize.query("sharesnapshot", sharesnapshot, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-lease-action'] = self._serialize.header("action", action, 'str')
if break_period is not None:
header_parameters['x-ms-lease-break-period'] = self._serialize.header("break_period", break_period, 'int')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-lease-time']=self._deserialize('int', response.headers.get('x-ms-lease-time'))
response_headers['x-ms-lease-id']=self._deserialize('str', response.headers.get('x-ms-lease-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
break_lease.metadata = {'url': '/{shareName}'} # type: ignore
def create_snapshot(
self,
timeout=None, # type: Optional[int]
metadata=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Creates a read-only snapshot of a share.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param metadata: A name-value pair to associate with a file storage object.
:type metadata: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "share"
comp = "snapshot"
accept = "application/xml"
# Construct URL
url = self.create_snapshot.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-snapshot']=self._deserialize('str', response.headers.get('x-ms-snapshot'))
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
create_snapshot.metadata = {'url': '/{shareName}'} # type: ignore
def create_permission(
self,
share_permission, # type: "_models.SharePermission"
timeout=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> None
"""Create a permission (a security descriptor).
:param share_permission: A permission (a security descriptor) at the share level.
:type share_permission: ~azure.storage.fileshare.models.SharePermission
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "share"
comp = "filepermission"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/xml"
# Construct URL
url = self.create_permission.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(share_permission, 'SharePermission')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
response_headers['x-ms-file-permission-key']=self._deserialize('str', response.headers.get('x-ms-file-permission-key'))
if cls:
return cls(pipeline_response, None, response_headers)
create_permission.metadata = {'url': '/{shareName}'} # type: ignore
def get_permission(
self,
file_permission_key, # type: str
timeout=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> "_models.SharePermission"
"""Returns the permission (security descriptor) for a given key.
:param file_permission_key: Key of the permission to be set for the directory/file.
:type file_permission_key: str
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharePermission, or the result of cls(response)
:rtype: ~azure.storage.fileshare.models.SharePermission
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharePermission"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "share"
comp = "filepermission"
accept = "application/json"
# Construct URL
url = self.get_permission.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-file-permission-key'] = self._serialize.header("file_permission_key", file_permission_key, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('SharePermission', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_permission.metadata = {'url': '/{shareName}'} # type: ignore
def set_properties(
self,
timeout=None, # type: Optional[int]
quota=None, # type: Optional[int]
access_tier=None, # type: Optional[Union[str, "_models.ShareAccessTier"]]
root_squash=None, # type: Optional[Union[str, "_models.ShareRootSquash"]]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Sets properties for the specified share.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param quota: Specifies the maximum size of the share, in gigabytes.
:type quota: int
:param access_tier: Specifies the access tier of the share.
:type access_tier: str or ~azure.storage.fileshare.models.ShareAccessTier
:param root_squash: Root squash to set on the share. Only valid for NFS shares.
:type root_squash: str or ~azure.storage.fileshare.models.ShareRootSquash
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "share"
comp = "properties"
accept = "application/xml"
# Construct URL
url = self.set_properties.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if quota is not None:
header_parameters['x-ms-share-quota'] = self._serialize.header("quota", quota, 'int', minimum=1)
if access_tier is not None:
header_parameters['x-ms-access-tier'] = self._serialize.header("access_tier", access_tier, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
if root_squash is not None:
header_parameters['x-ms-root-squash'] = self._serialize.header("root_squash", root_squash, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
set_properties.metadata = {'url': '/{shareName}'} # type: ignore
def set_metadata(
self,
timeout=None, # type: Optional[int]
metadata=None, # type: Optional[str]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Sets one or more user-defined name-value pairs for the specified share.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param metadata: A name-value pair to associate with a file storage object.
:type metadata: str
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "share"
comp = "metadata"
accept = "application/xml"
# Construct URL
url = self.set_metadata.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if metadata is not None:
header_parameters['x-ms-meta'] = self._serialize.header("metadata", metadata, 'str')
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
set_metadata.metadata = {'url': '/{shareName}'} # type: ignore
def get_access_policy(
self,
timeout=None, # type: Optional[int]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> List["_models.SignedIdentifier"]
"""Returns information about stored access policies specified on the share.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of SignedIdentifier, or the result of cls(response)
:rtype: list[~azure.storage.fileshare.models.SignedIdentifier]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.SignedIdentifier"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "share"
comp = "acl"
accept = "application/xml"
# Construct URL
url = self.get_access_policy.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('[SignedIdentifier]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_access_policy.metadata = {'url': '/{shareName}'} # type: ignore
def set_access_policy(
self,
timeout=None, # type: Optional[int]
share_acl=None, # type: Optional[List["_models.SignedIdentifier"]]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> None
"""Sets a stored access policy for use with shared access signatures.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param share_acl: The ACL for the share.
:type share_acl: list[~azure.storage.fileshare.models.SignedIdentifier]
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "share"
comp = "acl"
content_type = kwargs.pop("content_type", "application/xml")
accept = "application/xml"
# Construct URL
url = self.set_access_policy.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
serialization_ctxt = {'xml': {'name': 'SignedIdentifiers', 'wrapped': True}}
if share_acl is not None:
body_content = self._serialize.body(share_acl, '[SignedIdentifier]', is_xml=True, serialization_ctxt=serialization_ctxt)
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
set_access_policy.metadata = {'url': '/{shareName}'} # type: ignore
def get_statistics(
self,
timeout=None, # type: Optional[int]
lease_access_conditions=None, # type: Optional["_models.LeaseAccessConditions"]
**kwargs # type: Any
):
# type: (...) -> "_models.ShareStats"
"""Retrieves statistics related to the share.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param lease_access_conditions: Parameter group.
:type lease_access_conditions: ~azure.storage.fileshare.models.LeaseAccessConditions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ShareStats, or the result of cls(response)
:rtype: ~azure.storage.fileshare.models.ShareStats
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ShareStats"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_lease_id = None
if lease_access_conditions is not None:
_lease_id = lease_access_conditions.lease_id
restype = "share"
comp = "stats"
accept = "application/xml"
# Construct URL
url = self.get_statistics.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if _lease_id is not None:
header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", _lease_id, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
deserialized = self._deserialize('ShareStats', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
get_statistics.metadata = {'url': '/{shareName}'} # type: ignore
def restore(
self,
timeout=None, # type: Optional[int]
request_id_parameter=None, # type: Optional[str]
deleted_share_name=None, # type: Optional[str]
deleted_share_version=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
"""Restores a previously deleted Share.
:param timeout: The timeout parameter is expressed in seconds. For more information, see
:code:`<a
href="https://docs.microsoft.com/en-us/rest/api/storageservices/Setting-Timeouts-for-File-Service-Operations?redirectedfrom=MSDN">Setting
Timeouts for File Service Operations.</a>`.
:type timeout: int
:param request_id_parameter: Provides a client-generated, opaque value with a 1 KB character
limit that is recorded in the analytics logs when storage analytics logging is enabled.
:type request_id_parameter: str
:param deleted_share_name: Specifies the name of the previously-deleted share.
:type deleted_share_name: str
:param deleted_share_version: Specifies the version of the previously-deleted share.
:type deleted_share_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
restype = "share"
comp = "undelete"
accept = "application/xml"
# Construct URL
url = self.restore.metadata['url'] # type: ignore
path_format_arguments = {
'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['restype'] = self._serialize.query("restype", restype, 'str')
query_parameters['comp'] = self._serialize.query("comp", comp, 'str')
if timeout is not None:
query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0)
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str')
if request_id_parameter is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id_parameter", request_id_parameter, 'str')
if deleted_share_name is not None:
header_parameters['x-ms-deleted-share-name'] = self._serialize.header("deleted_share_name", deleted_share_name, 'str')
if deleted_share_version is not None:
header_parameters['x-ms-deleted-share-version'] = self._serialize.header("deleted_share_version", deleted_share_version, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.StorageError, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['ETag']=self._deserialize('str', response.headers.get('ETag'))
response_headers['Last-Modified']=self._deserialize('rfc-1123', response.headers.get('Last-Modified'))
response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))
response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))
response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))
response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))
if cls:
return cls(pipeline_response, None, response_headers)
restore.metadata = {'url': '/{shareName}'} # type: ignore
| 54.26061
| 172
| 0.667705
|
2421ae44c8410ae22db7694240c9dabfe98997c4
| 3,452
|
py
|
Python
|
profiles_project/settings.py
|
JasonK1561/profiles-rest-api
|
5cdf148d55c5610c944e48abd203885e92d4b741
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
JasonK1561/profiles-rest-api
|
5cdf148d55c5610c944e48abd203885e92d4b741
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
JasonK1561/profiles-rest-api
|
5cdf148d55c5610c944e48abd203885e92d4b741
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'sm4(&y%5j&)st03l125i)^b2wyw^^ib!n*1g2y)x7j0)0+#*se'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework', ##The depedent app that was installed in the DjangoRestFramework
'rest_framework.authtoken', ##Another app thats lets us use the authentication that comes with DRF
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
#Configuring django to use this as the user model instead of the one provided
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 27.181102
| 102
| 0.705678
|
4ebfbc4fee1e04634408737c452de9e17245d0c3
| 14,570
|
py
|
Python
|
net/views.py
|
dehu4ka/lna
|
f5ee176bdb5c7507b76fba5ae651ce333b71c3db
|
[
"MIT"
] | null | null | null |
net/views.py
|
dehu4ka/lna
|
f5ee176bdb5c7507b76fba5ae651ce333b71c3db
|
[
"MIT"
] | null | null | null |
net/views.py
|
dehu4ka/lna
|
f5ee176bdb5c7507b76fba5ae651ce333b71c3db
|
[
"MIT"
] | null | null | null |
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic import TemplateView, ListView, FormView, DetailView
from django.core.exceptions import PermissionDenied
from net.models import Scripts, Job, Equipment, Subnets, EquipmentConfig
from net.forms import TaskForm, ArchiveTasksForm, SubnetForm, NEListForm, ConfigSearchForm, CMDRunnerForm
from django.contrib import messages
from net.equipment.generic import GenericEquipment
from net.lib import celery_job_starter, scan_nets_with_fping, discover_vendor, cmd_to_celery
from argus.models import Client, ASTU
import re
# Create your views here.
class Demo(LoginRequiredMixin, TemplateView):
template_name = 'net/demo.html'
def get(self, request, *args, **kwargs):
eq_device = Equipment.objects.get(ne_ip='10.205.18.247') # equipment object
eq = GenericEquipment(eq_device)
eq.set_io_timeout(1)
eq.suggest_login(resuggest=False)
eq.do_login()
eq.discover_vendor()
return render(request, self.template_name, *args, **kwargs)
class PickNE(LoginRequiredMixin, TemplateView):
template_name = 'net/pick_ne.html'
def get_context_data(self, **kwargs):
context = super(PickNE, self).get_context_data(**kwargs)
possible_scripts = Scripts.objects.all().exclude(is_hidden=True)
context['possible_scripts'] = possible_scripts
return context
class DoTask(LoginRequiredMixin, TemplateView):
template_name = 'net/do_task.html'
def get(self, *args, **kwargs):
raise PermissionDenied
def post(self, request):
"""
Нужно запустить стартер, который получит на вход список ID назначений, имя скрипта для выполнения, и возможно,
какие-то дополнительные аргументы.
:param request:
:return:
"""
destinations_ids = request.POST.getlist('destinations')
script_id = request.POST['script_select']
celery_job_starter(destinations_ids, script_id)
args = dict()
return render(request, self.template_name, args)
class ActiveTasks(LoginRequiredMixin, ListView, FormView):
model = Job
template_name = 'net/active_tasks.html'
form_class = TaskForm
paginate_by = 9
success_url = '/net/active_tasks'
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_queryset(self):
if self.request.method == 'POST':
form = TaskForm(self.request.POST)
if form.is_valid():
task_status = form.cleaned_data['task_status']
if task_status != '':
return Job.objects.filter(status=task_status)
return Job.objects.all().exclude(status='ARCHIVED').exclude(status='TERMINATED')
if self.request.method == 'GET':
if self.request.GET.get('task_status') and (self.request.GET.get('task_status') != 'None'):
return Job.objects.filter(status=self.request.GET.get('task_status'))
return Job.objects.all().exclude(status='ARCHIVED').exclude(status='TERMINATED')
def get_context_data(self, **kwargs):
context = super(ActiveTasks, self).get_context_data(**kwargs)
task_status = None
if self.request.method == 'POST':
form = TaskForm(self.request.POST)
if form.is_valid():
task_status = form.cleaned_data['task_status']
if self.request.method == 'GET':
task_status = self.request.GET.get('task_status')
context['task_status'] = task_status
return context
class ArchiveTasks(LoginRequiredMixin, FormView):
template_name = 'net/archive_tasks.html'
form_class = ArchiveTasksForm
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ArchiveTasks, self).get_context_data(**kwargs)
if self.request.method == 'POST':
Job.objects.filter(status='SUCCESS').update(status='ARCHIVED')
messages.add_message(self.request, messages.INFO, 'Архивация выполена')
return context
class TaskDetail(LoginRequiredMixin, TemplateView):
template_name = 'net/task_detail.html'
class DiscoverSubnets(LoginRequiredMixin, FormView):
template_name = 'net/discover_subnets.html'
form_class = SubnetForm
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(DiscoverSubnets, self).get_context_data(**kwargs)
context['new'] = False
context['found'] = False
if self.request.method == 'POST':
form = SubnetForm(self.request.POST)
if form.is_valid():
subnets = form.cleaned_data['subnets'].split("\r\n") # lists with subnet
cast_to_celery = form.cleaned_data['cast_to_celery'] # "Send discovery task to Celery" checkbox
discover_task = form.cleaned_data['discover_task'] # Task type
context['cast_to_celery'] = cast_to_celery
if discover_task == 'fping':
if not cast_to_celery:
found, new = scan_nets_with_fping(subnets)
context['found'] = found
context['new'] = new
else:
celery_job_starter(subnets, '999') # 999 will be send task to celery for subnets scan
if discover_task == 'vendor':
if not cast_to_celery:
discover_vendor(subnets)
else:
celery_job_starter(subnets, '1000')
pass
if discover_task == 'config':
if not cast_to_celery:
# discover_config(subnets)
pass
else:
celery_job_starter(subnets, '1001')
pass
if discover_task == 'put_syslocation':
if not cast_to_celery:
# only in celery
pass
else:
celery_job_starter(subnets, '1002')
return context
class ClientsCount(LoginRequiredMixin, TemplateView):
template_name = 'net/clients_count.html'
def get_context_data(self, **kwargs):
result_dict = dict()
clients = Client.objects.all()
for client in clients:
hostname = client.hostname
hostname_parts = hostname.split('-')
try:
node_name = hostname_parts[0] + '-' + hostname_parts[1] + '-' + hostname_parts[2]
if node_name in result_dict:
result_dict[node_name] += 1
else:
result_dict[node_name] = 1
except IndexError:
# skip
# print(hostname)
pass
result_str = ''
for node in result_dict:
try:
astu_objects = ASTU.objects.filter(hostname__contains=node).filter(status='эксплуатация')
astu_first_object = astu_objects[0]
address = astu_first_object.address
except IndexError:
address = 'Unknown'
# print(node + ';' + str(result_dict[node]) + ';"' + address + '"')
result_str += node + ';' + str(result_dict[node]) + ';"' + address + '"' + "\n"
context = super(ClientsCount, self).get_context_data(**kwargs)
context['result_str'] = result_str
return context
class NEList(LoginRequiredMixin, ListView, FormView):
template_name = 'net/ne_list.html'
form_class = NEListForm
model = Equipment
success_url = 'net/ne_list'
paginate_by = 20
context_object_name = 'ne_list'
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_queryset(self):
ne_list = Equipment.objects.all()
# defaults
is_login_discovered = 'any' # any value
is_vendor_discovered = 'any'
ip_or_subnet = ''
if self.request.method == 'POST':
form = NEListForm(self.request.POST)
if form.is_valid():
is_login_discovered = form.cleaned_data['is_login_discovered']
is_vendor_discovered = form.cleaned_data['is_vendor_discovered']
ip_or_subnet = form.cleaned_data['ip_or_subnet']
if self.request.method == 'GET':
is_login_discovered = self.request.GET.get('is_login_discovered')
is_vendor_discovered = self.request.GET.get('is_login_discovered')
ip_or_subnet = self.request.GET.get('ip_or_subnet')
# Filter login_discovered
if is_login_discovered == 'yes':
ne_list = ne_list.filter(credentials_id__isnull=False)
elif is_login_discovered == 'no':
ne_list = ne_list.filter(credentials_id__isnull=True)
else: # 'any'
pass
# Filter vendor discovered
if is_vendor_discovered == 'yes':
ne_list = ne_list.filter(vendor__isnull=False)
elif is_vendor_discovered == 'no':
ne_list = ne_list.filter(vendor__isnull=True)
else: # any
pass
ip_re = r'^([0-9]+\.){3}[0-9]+$'
mask_re = r'^([0-9]+\.){3}[0-9]+\/\d{1,2}$'
# IP / hostname / subnet filtering
if ip_or_subnet and (ip_or_subnet is not None) and (ip_or_subnet != 'None'):
if re.match(ip_re, ip_or_subnet): # IP-address only
ne_list = ne_list.filter(ne_ip=ip_or_subnet)
elif re.match(mask_re, ip_or_subnet): # Subnet
try:
ne_list = ne_list.filter(ne_ip__net_contained=ip_or_subnet)
except ValueError as err:
messages.add_message(self.request, messages.ERROR, 'Subnet search error. ' + str(err))
else: # filtering by hostname
ne_list = ne_list.filter(hostname__icontains=ip_or_subnet)
# return result
return ne_list
def get_context_data(self, **kwargs):
context = super(NEList, self).get_context_data(**kwargs)
context['row_count'] = self.get_queryset().count()
if self.request.method == 'GET':
context['is_login_discovered'] = self.request.GET.get('is_login_discovered')
context['is_vendor_discovered'] = self.request.GET.get('is_vendor_discovered')
context['ip_or_subnet'] = self.request.GET.get('ip_or_subnet')
if self.request.method == 'POST':
form = NEListForm(self.request.POST)
if form.is_valid():
context['is_login_discovered'] = form.cleaned_data['is_login_discovered']
context['is_vendor_discovered'] = form.cleaned_data['is_vendor_discovered']
context['ip_or_subnet'] = form.cleaned_data['ip_or_subnet']
return context
class NEDetail(LoginRequiredMixin, DetailView):
template_name = 'net/ne_detail.html'
model = Equipment
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
ip = str(context['object'].ne_ip).replace('/32', '') # removing /32 from IPv4 addr
try:
astu_object = ASTU.objects.get(ne_ip=ip) # check if NE with this IP exists in ASTU table
address = astu_object.address # getting address
except ASTU.DoesNotExist:
address = 'Not found'
context['address'] = address # return it to the context
config_archives = EquipmentConfig.objects.filter(equipment_id=context['object'].id)
context['config_archives'] = config_archives[:20] # Last 20 configurations
return context
class SubnetsList(LoginRequiredMixin, ListView):
template_name = 'net/subnets_list.html'
model = Subnets
def get_queryset(self):
subnets_list = Subnets.objects.all()
return subnets_list
def get_context_data(self, **kwargs):
context = super(SubnetsList, self).get_context_data(**kwargs)
context['row_count'] = self.get_queryset().count()
return context
class ConfigSearch(LoginRequiredMixin, ListView, FormView):
template_name = 'net/config_search.html'
form_class = ConfigSearchForm
model = Equipment
success_url = 'net/config_search'
paginate_by = 20
context_object_name = 'ne_list'
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_search_term(self):
"""
Returns search term from form if method is post, otherwise returns None
:return: search term or None
"""
if self.request.method == 'POST':
form = ConfigSearchForm(self.request.POST)
if form.is_valid():
search = form.cleaned_data['search'] or ''
return search
if self.request.method == 'GET':
return self.request.GET.get('search') or ''
def get_queryset(self):
ne_list = Equipment.objects.all() # all NE's
search = self.get_search_term()
if search:
ne_list = ne_list.filter(current_config__icontains=search)
return ne_list
return Equipment.objects.none() # otherwise return empty queryset / list
def get_context_data(self, **kwargs):
context = super(ConfigSearch, self).get_context_data(**kwargs)
context['row_count'] = self.get_queryset().count()
context['search'] = self.get_search_term()
return context
class CMDRunner(LoginRequiredMixin, FormView):
template_name = 'net/cmd_runner.html'
form_class = CMDRunnerForm
def post(self, request, *args, **kwargs):
return self.get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.method == 'POST':
form = CMDRunnerForm(self.request.POST)
if form.is_valid():
ips = form.cleaned_data['ips_textfield']
cmds = form.cleaned_data['commands_list']
vendor = form.cleaned_data['vendor_choices']
cmd_to_celery(vendor, ips, cmds)
return context
| 38.853333
| 118
| 0.617708
|
cd8cd449f9002a31a3a3cc9560cb46e6985bc0e0
| 16,366
|
py
|
Python
|
airflow/providers/microsoft/azure/operators/azure_container_instances.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 15,947
|
2019-01-05T13:51:02.000Z
|
2022-03-31T23:33:16.000Z
|
airflow/providers/microsoft/azure/operators/azure_container_instances.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 14,603
|
2019-01-05T09:43:19.000Z
|
2022-03-31T23:11:59.000Z
|
airflow/providers/microsoft/azure/operators/azure_container_instances.py
|
ChaseKnowlden/airflow
|
6b71eac1997a7c0db3b8e3aed6b4e65d01871440
|
[
"Apache-2.0"
] | 8,429
|
2019-01-05T19:45:47.000Z
|
2022-03-31T22:13:01.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
from collections import namedtuple
from time import sleep
from typing import Any, Dict, List, Optional, Sequence, Union
from azure.mgmt.containerinstance.models import (
Container,
ContainerGroup,
ContainerPort,
EnvironmentVariable,
IpAddress,
ResourceRequests,
ResourceRequirements,
VolumeMount,
)
from msrestazure.azure_exceptions import CloudError
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.azure_container_instance import AzureContainerInstanceHook
from airflow.providers.microsoft.azure.hooks.azure_container_registry import AzureContainerRegistryHook
from airflow.providers.microsoft.azure.hooks.azure_container_volume import AzureContainerVolumeHook
Volume = namedtuple(
'Volume',
['conn_id', 'account_name', 'share_name', 'mount_path', 'read_only'],
)
DEFAULT_ENVIRONMENT_VARIABLES: Dict[str, str] = {}
DEFAULT_SECURED_VARIABLES: Sequence[str] = []
DEFAULT_VOLUMES: Sequence[Volume] = []
DEFAULT_MEMORY_IN_GB = 2.0
DEFAULT_CPU = 1.0
class AzureContainerInstancesOperator(BaseOperator):
"""
Start a container on Azure Container Instances
:param ci_conn_id: connection id of a service principal which will be used
to start the container instance
:type ci_conn_id: str
:param registry_conn_id: connection id of a user which can login to a
private docker registry. For Azure use :ref:`Azure connection id<howto/connection:azure>`
:type azure_conn_id: str If None, we assume a public registry
:type registry_conn_id: Optional[str]
:param resource_group: name of the resource group wherein this container
instance should be started
:type resource_group: str
:param name: name of this container instance. Please note this name has
to be unique in order to run containers in parallel.
:type name: str
:param image: the docker image to be used
:type image: str
:param region: the region wherein this container instance should be started
:type region: str
:param environment_variables: key,value pairs containing environment
variables which will be passed to the running container
:type environment_variables: Optional[dict]
:param secured_variables: names of environmental variables that should not
be exposed outside the container (typically passwords).
:type secured_variables: Optional[str]
:param volumes: list of ``Volume`` tuples to be mounted to the container.
Currently only Azure Fileshares are supported.
:type volumes: list[<conn_id, account_name, share_name, mount_path, read_only>]
:param memory_in_gb: the amount of memory to allocate to this container
:type memory_in_gb: double
:param cpu: the number of cpus to allocate to this container
:type cpu: double
:param gpu: GPU Resource for the container.
:type gpu: azure.mgmt.containerinstance.models.GpuResource
:param command: the command to run inside the container
:type command: Optional[List[str]]
:param container_timeout: max time allowed for the execution of
the container instance.
:type container_timeout: datetime.timedelta
:param tags: azure tags as dict of str:str
:type tags: Optional[dict[str, str]]
:param os_type: The operating system type required by the containers
in the container group. Possible values include: 'Windows', 'Linux'
:type os_type: str
:param restart_policy: Restart policy for all containers within the container group.
Possible values include: 'Always', 'OnFailure', 'Never'
:type restart_policy: str
:param ip_address: The IP address type of the container group.
:type ip_address: IpAddress
**Example**::
AzureContainerInstancesOperator(
ci_conn_id = "azure_service_principal",
registry_conn_id = "azure_registry_user",
resource_group = "my-resource-group",
name = "my-container-name-{{ ds }}",
image = "myprivateregistry.azurecr.io/my_container:latest",
region = "westeurope",
environment_variables = {"MODEL_PATH": "my_value",
"POSTGRES_LOGIN": "{{ macros.connection('postgres_default').login }}",
"POSTGRES_PASSWORD": "{{ macros.connection('postgres_default').password }}",
"JOB_GUID": "{{ ti.xcom_pull(task_ids='task1', key='guid') }}" },
secured_variables = ['POSTGRES_PASSWORD'],
volumes = [("azure_container_instance_conn_id",
"my_storage_container",
"my_fileshare",
"/input-data",
True),],
memory_in_gb=14.0,
cpu=4.0,
gpu=GpuResource(count=1, sku='K80'),
command=["/bin/echo", "world"],
task_id="start_container"
)
"""
template_fields = ('name', 'image', 'command', 'environment_variables')
template_fields_renderers = {"command": "bash", "environment_variables": "json"}
def __init__(
self,
*,
ci_conn_id: str,
registry_conn_id: Optional[str],
resource_group: str,
name: str,
image: str,
region: str,
environment_variables: Optional[dict] = None,
secured_variables: Optional[str] = None,
volumes: Optional[list] = None,
memory_in_gb: Optional[Any] = None,
cpu: Optional[Any] = None,
gpu: Optional[Any] = None,
command: Optional[List[str]] = None,
remove_on_error: bool = True,
fail_if_exists: bool = True,
tags: Optional[Dict[str, str]] = None,
os_type: str = 'Linux',
restart_policy: str = 'Never',
ip_address: Optional[IpAddress] = None,
ports: Optional[List[ContainerPort]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.ci_conn_id = ci_conn_id
self.resource_group = resource_group
self.name = self._check_name(name)
self.image = image
self.region = region
self.registry_conn_id = registry_conn_id
self.environment_variables = environment_variables or DEFAULT_ENVIRONMENT_VARIABLES
self.secured_variables = secured_variables or DEFAULT_SECURED_VARIABLES
self.volumes = volumes or DEFAULT_VOLUMES
self.memory_in_gb = memory_in_gb or DEFAULT_MEMORY_IN_GB
self.cpu = cpu or DEFAULT_CPU
self.gpu = gpu
self.command = command
self.remove_on_error = remove_on_error
self.fail_if_exists = fail_if_exists
self._ci_hook: Any = None
self.tags = tags
self.os_type = os_type
if self.os_type not in ['Linux', 'Windows']:
raise AirflowException(
"Invalid value for the os_type argument. "
"Please set 'Linux' or 'Windows' as the os_type. "
f"Found `{self.os_type}`."
)
self.restart_policy = restart_policy
if self.restart_policy not in ['Always', 'OnFailure', 'Never']:
raise AirflowException(
"Invalid value for the restart_policy argument. "
"Please set one of 'Always', 'OnFailure','Never' as the restart_policy. "
f"Found `{self.restart_policy}`"
)
self.ip_address = ip_address
self.ports = ports
def execute(self, context: dict) -> int:
# Check name again in case it was templated.
self._check_name(self.name)
self._ci_hook = AzureContainerInstanceHook(self.ci_conn_id)
if self.fail_if_exists:
self.log.info("Testing if container group already exists")
if self._ci_hook.exists(self.resource_group, self.name):
raise AirflowException("Container group exists")
if self.registry_conn_id:
registry_hook = AzureContainerRegistryHook(self.registry_conn_id)
image_registry_credentials: Optional[list] = [
registry_hook.connection,
]
else:
image_registry_credentials = None
environment_variables = []
for key, value in self.environment_variables.items():
if key in self.secured_variables:
e = EnvironmentVariable(name=key, secure_value=value)
else:
e = EnvironmentVariable(name=key, value=value)
environment_variables.append(e)
volumes: List[Union[Volume, Volume]] = []
volume_mounts: List[Union[VolumeMount, VolumeMount]] = []
for conn_id, account_name, share_name, mount_path, read_only in self.volumes:
hook = AzureContainerVolumeHook(conn_id)
mount_name = "mount-%d" % len(volumes)
volumes.append(hook.get_file_volume(mount_name, share_name, account_name, read_only))
volume_mounts.append(VolumeMount(name=mount_name, mount_path=mount_path, read_only=read_only))
exit_code = 1
try:
self.log.info("Starting container group with %.1f cpu %.1f mem", self.cpu, self.memory_in_gb)
if self.gpu:
self.log.info("GPU count: %.1f, GPU SKU: %s", self.gpu.count, self.gpu.sku)
resources = ResourceRequirements(
requests=ResourceRequests(memory_in_gb=self.memory_in_gb, cpu=self.cpu, gpu=self.gpu)
)
if self.ip_address and not self.ports:
self.ports = [ContainerPort(port=80)]
self.log.info("Default port set. Container will listen on port 80")
container = Container(
name=self.name,
image=self.image,
resources=resources,
command=self.command,
environment_variables=environment_variables,
volume_mounts=volume_mounts,
ports=self.ports,
)
container_group = ContainerGroup(
location=self.region,
containers=[
container,
],
image_registry_credentials=image_registry_credentials,
volumes=volumes,
restart_policy=self.restart_policy,
os_type=self.os_type,
tags=self.tags,
ip_address=self.ip_address,
)
self._ci_hook.create_or_update(self.resource_group, self.name, container_group)
self.log.info("Container group started %s/%s", self.resource_group, self.name)
exit_code = self._monitor_logging(self.resource_group, self.name)
self.log.info("Container had exit code: %s", exit_code)
if exit_code != 0:
raise AirflowException(f"Container had a non-zero exit code, {exit_code}")
return exit_code
except CloudError:
self.log.exception("Could not start container group")
raise AirflowException("Could not start container group")
finally:
if exit_code == 0 or self.remove_on_error:
self.on_kill()
def on_kill(self) -> None:
if self.remove_on_error:
self.log.info("Deleting container group")
try:
self._ci_hook.delete(self.resource_group, self.name)
except Exception:
self.log.exception("Could not delete container group")
def _monitor_logging(self, resource_group: str, name: str) -> int:
last_state = None
last_message_logged = None
last_line_logged = None
while True:
try:
cg_state = self._ci_hook.get_state(resource_group, name)
instance_view = cg_state.containers[0].instance_view
# If there is no instance view, we show the provisioning state
if instance_view is not None:
c_state = instance_view.current_state
state, exit_code, detail_status = (
c_state.state,
c_state.exit_code,
c_state.detail_status,
)
else:
state = cg_state.provisioning_state
exit_code = 0
detail_status = "Provisioning"
if instance_view is not None and instance_view.events is not None:
messages = [event.message for event in instance_view.events]
last_message_logged = self._log_last(messages, last_message_logged)
if state != last_state:
self.log.info("Container group state changed to %s", state)
last_state = state
if state in ["Running", "Terminated", "Succeeded"]:
try:
logs = self._ci_hook.get_logs(resource_group, name)
last_line_logged = self._log_last(logs, last_line_logged)
except CloudError:
self.log.exception(
"Exception while getting logs from container instance, retrying..."
)
if state == "Terminated":
self.log.info("Container exited with detail_status %s", detail_status)
return exit_code
if state == "Failed":
self.log.error("Azure provision failure")
return 1
except AirflowTaskTimeout:
raise
except CloudError as err:
if 'ResourceNotFound' in str(err):
self.log.warning(
"ResourceNotFound, container is probably removed "
"by another process "
"(make sure that the name is unique)."
)
return 1
else:
self.log.exception("Exception while getting container groups")
except Exception:
self.log.exception("Exception while getting container groups")
sleep(1)
def _log_last(self, logs: Optional[list], last_line_logged: Any) -> Optional[Any]:
if logs:
# determine the last line which was logged before
last_line_index = 0
for i in range(len(logs) - 1, -1, -1):
if logs[i] == last_line_logged:
# this line is the same, hence print from i+1
last_line_index = i + 1
break
# log all new ones
for line in logs[last_line_index:]:
self.log.info(line.rstrip())
return logs[-1]
return None
@staticmethod
def _check_name(name: str) -> str:
if '{{' in name:
# Let macros pass as they cannot be checked at construction time
return name
regex_check = re.match("[a-z0-9]([-a-z0-9]*[a-z0-9])?", name)
if regex_check is None or regex_check.group() != name:
raise AirflowException('ACI name must match regex [a-z0-9]([-a-z0-9]*[a-z0-9])? (like "my-name")')
if len(name) > 63:
raise AirflowException('ACI name cannot be longer than 63 characters')
return name
| 41.856777
| 110
| 0.611878
|
17fa338fad14d58a178996bfaadf00950f6e5981
| 1,463
|
py
|
Python
|
pytorch/models/classifier.py
|
cmonserr/Why_Difficulty
|
7b34cc3556a1b99ac67cb155fba8d0837c9b7b10
|
[
"MIT"
] | 1
|
2022-02-04T11:33:41.000Z
|
2022-02-04T11:33:41.000Z
|
pytorch/models/classifier.py
|
dcastf01/creating_adversarial_images
|
01564f7b4ff9f19021986e57f5bfad827213c8a6
|
[
"MIT"
] | null | null | null |
pytorch/models/classifier.py
|
dcastf01/creating_adversarial_images
|
01564f7b4ff9f19021986e57f5bfad827213c8a6
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn.functional as F
from torch import nn
import pytorch_pfn_extras as ppe
class Classifier(nn.Module):
"""two class classfication"""
def __init__(self, predictor, lossfun=cross_entropy_with_logits):
super().__init__()
self.predictor = predictor
self.lossfun = lossfun
self.prefix = ""
def forward(self, image, targets):
outputs = self.predictor(image)
loss = self.lossfun(outputs, targets)
metrics = {
f"{self.prefix}loss": loss.item(),
f"{self.prefix}acc": accuracy_with_logits(outputs, targets).item()
}
ppe.reporting.report(metrics, self)
return loss, metrics
def predict(self, data_loader):
pred = self.predict_proba(data_loader)
label = torch.argmax(pred, dim=1)
return label
def predict_proba(self, data_loader):
device: torch.device = next(self.parameters()).device
y_list = []
self.eval()
with torch.no_grad():
for batch in data_loader:
if isinstance(batch, (tuple, list)):
# Assumes first argument is "image"
batch = batch[0].to(device)
else:
batch = batch.to(device)
y = self.predictor(batch)
y = torch.softmax(y, dim=-1)
y_list.append(y)
pred = torch.cat(y_list)
return pred
| 32.511111
| 78
| 0.574846
|
76782e70811ce5cd512d3c5a760a4bb0182a0071
| 1,568
|
py
|
Python
|
qcdb/driver/pe.py
|
nuwandesilva/qcdb
|
b47fb2ed550fc4176198ddb1dbea3724d6704d23
|
[
"BSD-3-Clause"
] | null | null | null |
qcdb/driver/pe.py
|
nuwandesilva/qcdb
|
b47fb2ed550fc4176198ddb1dbea3724d6704d23
|
[
"BSD-3-Clause"
] | null | null | null |
qcdb/driver/pe.py
|
nuwandesilva/qcdb
|
b47fb2ed550fc4176198ddb1dbea3724d6704d23
|
[
"BSD-3-Clause"
] | null | null | null |
import collections
from qcelemental.util import which, which_import
from .. import data_dir
from ..molecule import Molecule
from ..moptions.read_options2 import RottenOptions, load_qcdb_defaults
from ..intf_psi4.options import load_cfour_defaults_from_psi4, load_psi4_defaults
from ..intf_nwchem.options import load_nwchem_defaults
from ..intf_gamess.options import load_gamess_defaults
def clean_nu_options():
global nu_options
nu_options = RottenOptions()
def load_nu_options():
global nu_options
load_options(nu_options)
#print('OPTIONS LOADED')
#print(nu_options)
def load_options(options):
"""Initialize program options defaults onto `options`."""
load_qcdb_defaults(options)
if which('xcfour') and which_import('psi4'):
load_cfour_defaults_from_psi4(options)
if which('nwchem'):
load_nwchem_defaults(options)
if which('rungms'):
load_gamess_defaults(options)
if which('psi4') and which_import('psi4'):
load_psi4_defaults(options)
if which_import('resp'):
resp.load_defaults(nu_options)
nu_options = None
clean_nu_options()
def clean_options():
global active_options
active_options = collections.defaultdict(lambda: collections.defaultdict(dict))
active_options['GLOBALS']
# here liveth the options _during_ function calls
active_options = None
clean_options()
# here liveth the molecule between function calls
active_molecule = Molecule("""H 0 0 0\nH 0.74 0 0""")
# here liveth the QCVariables when not attached to jobrec
active_qcvars = {}
| 26.133333
| 83
| 0.753189
|
f0c9f4eb3806365595409e4a7d10933f90de563f
| 2,105
|
py
|
Python
|
utility/testSiO-SiO2-quant-2.py
|
jrminter/dtsa2scripts
|
a7a4f3a63f47f0a8abe7ee13c72f5a27196c3a1b
|
[
"MIT"
] | 2
|
2018-04-19T12:25:29.000Z
|
2018-11-24T12:55:46.000Z
|
utility/testSiO-SiO2-quant-2.py
|
jrminter/dtsa2Scripts
|
a7a4f3a63f47f0a8abe7ee13c72f5a27196c3a1b
|
[
"MIT"
] | null | null | null |
utility/testSiO-SiO2-quant-2.py
|
jrminter/dtsa2Scripts
|
a7a4f3a63f47f0a8abe7ee13c72f5a27196c3a1b
|
[
"MIT"
] | null | null | null |
# This Python file uses the following encoding: utf-8
# testSiO-SiO2-quant-2.py
# This version reads from disk
# 2018-10-16
import sys
import os
import time
import shutil
# import dtsa2 as dt2
import dtsa2.mcSimulate3 as mc3
import dtsa2.jmGen as jmg
gitDir = os.environ['GIT_HOME']
relPrj = "/dtsa2Scripts/utility"
prjDir = gitDir + relPrj
rptDir = prjDir + '/testSiO-SiO2-quant-2 Results/'
spcDir = gitDir + relPrj + "/sim-quant-sio-w-sio2"
det = findDetector("Oxford p4 05eV 2K")
e0 = 4.0 # kV
nDigits = 5
DataManager.clearSpectrumList()
start = time.time()
sio2 = material("SiO2", density=2.196)
# sio = material("SiO", density=2.13)
# Read standard
fi = spcDir + "/SiO2 std.msa"
spc_sio2_std = readSpectrum(fi)
spc_sio2_std.display()
lDoseUnk = [50, 100, 200, 500, 1000, 2500, 5000]
xrts = []
trs = mc3.suggestTransitions(sio2, e0)
for tr in trs:
xrts.append(tr)
stds = { element("O"): spc_sio2_std, element("Si"): spc_sio2_std }
qus = multiQuant(det, e0, stds, {})
for doseUnk in lDoseUnk:
sName = "SiO Unk %g nA-sec.msa" % (doseUnk)
fi = spcDir + "/" + sName
spc_sio_unk = readSpectrum(fi)
spc_sio_unk.display()
res = qus.compute(spc_sio_unk)
print(sName)
print("Weight Fraction")
print("Si")
siWF = res.getComposition().weightFractionU(element("Si"), True)
print(jmg.pretty_print_unc_val(siWF, nDigits))
print("O")
oWF = res.getComposition().weightFractionU(element("O"), True)
print(jmg.pretty_print_unc_val(oWF, nDigits))
print("")
print("Atomic Percent")
print("Si")
siAP = res.getComposition().atomicPercentU(element("Si"), True)
print(jmg.pretty_print_unc_val(siAP, nDigits))
print("O")
oAP = res.getComposition().atomicPercentU(element("O"), True)
print(jmg.pretty_print_unc_val(oAP, nDigits))
print("\n")
# clean up cruft
shutil.rmtree(rptDir)
print "Done!"
end = time.time()
delta = (end-start)/60
msg = "This script required %.3f min" % delta
print msg
if(delta > 60):
delta = delta/60
msg = "...or %.3f hr" % delta
print msg
| 24.764706
| 68
| 0.664133
|
be4e026862513dc237b901b17602e6e32314c79b
| 964
|
py
|
Python
|
disturbance/migrations/0155_auto_20200911_0941.py
|
thakurpriya1990/disturbance
|
47f9ce5ae5f1b02d97ace11f1041e96daf7e4556
|
[
"Apache-2.0"
] | 1
|
2020-06-30T04:47:42.000Z
|
2020-06-30T04:47:42.000Z
|
disturbance/migrations/0155_auto_20200911_0941.py
|
thakurpriya1990/disturbance
|
47f9ce5ae5f1b02d97ace11f1041e96daf7e4556
|
[
"Apache-2.0"
] | 16
|
2020-03-11T08:25:46.000Z
|
2022-03-02T08:14:40.000Z
|
disturbance/migrations/0155_auto_20200911_0941.py
|
thakurpriya1990/disturbance
|
47f9ce5ae5f1b02d97ace11f1041e96daf7e4556
|
[
"Apache-2.0"
] | 9
|
2020-01-30T17:37:38.000Z
|
2021-09-30T02:22:24.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-09-11 01:41
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('disturbance', '0154_apiarysiteonapproval_wkb_geometry'),
]
operations = [
migrations.RenameField(
model_name='approval',
old_name='beehive_sites',
new_name='apiary_sites',
),
migrations.RenameField(
model_name='proposalapiary',
old_name='beehive_sites',
new_name='apiary_sites',
),
migrations.RemoveField(
model_name='apiarysite',
name='approval',
),
migrations.RemoveField(
model_name='apiarysite',
name='proposal_apiaries',
),
migrations.RemoveField(
model_name='apiarysite',
name='proposal_apiary',
),
]
| 25.368421
| 66
| 0.574689
|
bbc00859eedabe8a5a3bec12b998c0e13fd445c5
| 996
|
py
|
Python
|
app/recipe/serializers.py
|
aldhiramdans/recipe-app-api
|
2c9f0902372a5eb23c4a19c06611379e7540ed73
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
aldhiramdans/recipe-app-api
|
2c9f0902372a5eb23c4a19c06611379e7540ed73
|
[
"MIT"
] | null | null | null |
app/recipe/serializers.py
|
aldhiramdans/recipe-app-api
|
2c9f0902372a5eb23c4a19c06611379e7540ed73
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from core.models import Tag, Ingridient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Serializer for tag objects"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngridientSerializer(serializers.ModelSerializer):
"""Serializer for ingridient objects"""
class Meta:
model = Ingridient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Serializer for recipe objects"""
ingridient = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingridient.objects.all()
)
tag = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'ingridient', 'tag', 'time_minutes',
'price', 'link')
read_only_fields = ('id',)
| 24.9
| 69
| 0.62751
|
f9c5d6842160a4443aded597030343e05bf1470b
| 7,896
|
py
|
Python
|
submit_muti.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | null | null | null |
submit_muti.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | null | null | null |
submit_muti.py
|
Complicateddd/Complicateddd-ROITransformer
|
2adfbf98892d569c460d100c6e2169c5fa3a9b82
|
[
"Apache-2.0"
] | 1
|
2021-12-17T12:49:06.000Z
|
2021-12-17T12:49:06.000Z
|
from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections,inference_detector_2
from mmdet.apis import draw_poly_detections_2,init_detector_2
import mmcv
from mmcv import Config
from mmdet.datasets import get_dataset
import cv2
import os
import numpy as np
from tqdm import tqdm
import DOTA_devkit.polyiou as polyiou
import math
import pdb
def py_cpu_nms_poly_fast_np(dets, thresh):
obbs = dets[:, 0:-1]
x1 = np.min(obbs[:, 0::2], axis=1)
y1 = np.min(obbs[:, 1::2], axis=1)
x2 = np.max(obbs[:, 0::2], axis=1)
y2 = np.max(obbs[:, 1::2], axis=1)
scores = dets[:, 8]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
polys = []
for i in range(len(dets)):
tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],
dets[i][2], dets[i][3],
dets[i][4], dets[i][5],
dets[i][6], dets[i][7]])
polys.append(tm_polygon)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
ovr = []
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1)
h = np.maximum(0.0, yy2 - yy1)
hbb_inter = w * h
hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)
h_inds = np.where(hbb_ovr > 0)[0]
tmp_order = order[h_inds + 1]
for j in range(tmp_order.size):
iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])
hbb_ovr[h_inds[j]] = iou
try:
if math.isnan(ovr[0]):
pdb.set_trace()
except:
pass
inds = np.where(hbb_ovr <= thresh)[0]
order = order[inds + 1]
return keep
class DetectorModel():
def __init__(self,
config_file,
checkpoint_file):
# init RoITransformer
self.config_file = config_file
self.checkpoint_file = checkpoint_file
self.cfg = Config.fromfile(self.config_file)
self.cfg_2=Config.fromfile(self.config_file)
self.data_test = self.cfg.data['test']
self.dataset = get_dataset(self.data_test)
# self.classnames = self.dataset.CLASSES
self.classnames = ('1', '2', '3', '4', '5')
self.model = init_detector(config_file, checkpoint_file, device='cuda:0')
self.cfg_2.data['test']['img_scale']=(1666,1666)
self.cfg_2.test_cfg['rcnn']['score_thr']=0.25
self.model_2=init_detector_2(self.cfg_2, checkpoint_file, device='cuda:0')
# config.test_cfg
# print(self.cfg.data['test']['img_scale'])
def inference_single(self, imagname):
img = mmcv.imread(imagname)
height, width, channel = img.shape
# slide_h, slide_w = slide_size
# hn, wn = chip_size
# TODO: check the corner case
# import pdb; pdb.set_trace()
total_detections = np.zeros((0, 9))
# print(self.classnames)
chip_detections = inference_detector(self.model, img)
chip_detections_2=inference_detector(self.model_2, img)
# for i in range(5):
# print('result: ', chip_detections[i])
# for i in tqdm(range(int(width / slide_w + 1))):
# for j in range(int(height / slide_h) + 1):
# subimg = np.zeros((hn, wn, channel))
# # print('i: ', i, 'j: ', j)
# chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3]
# subimg[:chip.shape[0], :chip.shape[1], :] = chip
# chip_detections = inference_detector(self.model, subimg)
# print('result: ', chip_detections)
# for cls_id, name in enumerate(self.classnames):
# # chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w
# # chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h
# # import pdb;pdb.set_trace()
# # try:
# total_detections[cls_id] = chip_detections[cls_id]
# except:
# import pdb; pdb.set_trace()
# nms
# total_detections=chip_detections
# print(chip_detections.shape)
# for i in range(5):
# # print(len(chip_detections[i]))
# if len(chip_detections[i]):
# # print(chip_detections[i].shape)
# # print(total_detections)
# total_detections=np.concatenate((total_detections,chip_detections[i]))
# # print(total_detections[1:].shape)
# total_detections_=total_detections[1:]
# print(chip_detections)
# totol_class=np.zeros((0,1))
# for i in range(5):
# total_detections=np.concatenate((total_detections,chip_detections[i]))
# total_detections=np.concatenate((total_detections,chip_detections_2[i]))
# # print(chip_detections[i].shape[0])
# temp_class=np.ones((chip_detections[i].shape[0],1))*i
# totol_class=np.concatenate((totol_class,temp_class))
# temp_class=np.ones((chip_detections_2[i].shape[0],1))*i
# totol_class=np.concatenate((totol_class,temp_class))
# print(total_detections.shape)
# keep = py_cpu_nms_poly_fast_np(total_detections, 0.1)
# totol_class=totol_class[keep]
# # print(totol_class.shape)
# total_detections=total_detections[keep]
# print(total_detections.shape)
for i in range(5):
# print(chip_detections[i].shape)
chip_detections[i]=np.concatenate((chip_detections[i],chip_detections_2[i]))
keep = py_cpu_nms_poly_fast_np(chip_detections[i], 0.1)
chip_detections[i] = chip_detections[i][keep]
return chip_detections
#
def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size):
detections= self.inference_single(srcpath, slide_size, chip_size)
# print(detections)
img = draw_poly_detections(srcpath, detections,self.classnames, scale=1, threshold=0.05)
cv2.imwrite(dstpath, img)
if __name__ == '__main__':
import tqdm
roitransformer = DetectorModel(r'work_dirs/faster_rcnn_RoITrans_r101_fpn_1x_all_aug/faster_rcnn_RoITrans_r101x_fpn_1x_anchors_augs_augfpn.py',
r'work_dirs/faster_rcnn_RoITrans_r101_fpn_1x_all_aug/epoch_140.pth')
threshold=0.05
class_names=('1', '2', '3', '4', '5')
import os
path="/media/ubuntu/data/huojianjun/科目四初赛第一阶段/test1"
file_img_name=os.listdir(path)
result_file=open("./科目四_莘莘学子.txt",'w')
# print(file_img_name)
count=0
for name in tqdm.tqdm(file_img_name):
# count+=1
path_img=os.path.join(path,name)
detection_result=roitransformer.inference_single(path_img)
for j, name_cls in enumerate(class_names):
dets = detection_result[j]
for det in dets:
bbox = det[:8]
score = round(det[-1],2)
if score < threshold:
continue
bbox = list(map(int, bbox))
# print(bbox)
# print(score)
# print(name_cls)
result_file.writelines(name+" "+str(name_cls)+" "+str(score)+" "
+str(bbox[0])
+" "+str(bbox[1])+" "+str(bbox[2])+" "+str(bbox[3])
+" "+str(bbox[4])+" "+str(bbox[5])+" "+str(bbox[6])
+" "+str(bbox[7]))
result_file.writelines("\n")
count+=1
| 38.144928
| 146
| 0.566489
|
2d9c15f50f03bd7aa918736bbdf996baf98a9275
| 88,401
|
py
|
Python
|
google/cloud/bigquery/table.py
|
quentin-sommer/python-bigquery
|
1312093855b1b6bd81c5f6e9e358151cfbd366b8
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery/table.py
|
quentin-sommer/python-bigquery
|
1312093855b1b6bd81c5f6e9e358151cfbd366b8
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/bigquery/table.py
|
quentin-sommer/python-bigquery
|
1312093855b1b6bd81c5f6e9e358151cfbd366b8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Tables."""
from __future__ import absolute_import
import copy
import datetime
import functools
import operator
import pytz
import typing
from typing import Any, Dict, Iterable, Iterator, Optional, Tuple
import warnings
try:
import pandas
except ImportError: # pragma: NO COVER
pandas = None
try:
import pyarrow
except ImportError: # pragma: NO COVER
pyarrow = None
import google.api_core.exceptions
from google.api_core.page_iterator import HTTPIterator
import google.cloud._helpers
from google.cloud.bigquery import _helpers
from google.cloud.bigquery import _pandas_helpers
from google.cloud.bigquery.exceptions import LegacyBigQueryStorageError
from google.cloud.bigquery.schema import _build_schema_resource
from google.cloud.bigquery.schema import _parse_schema_resource
from google.cloud.bigquery.schema import _to_schema_fields
from google.cloud.bigquery._tqdm_helpers import get_progress_bar
from google.cloud.bigquery.external_config import ExternalConfig
from google.cloud.bigquery.encryption_configuration import EncryptionConfiguration
if typing.TYPE_CHECKING: # pragma: NO COVER
# Unconditionally import optional dependencies again to tell pytype that
# they are not None, avoiding false "no attribute" errors.
import pandas
import pyarrow
from google.cloud import bigquery_storage
_NO_PANDAS_ERROR = (
"The pandas library is not installed, please install "
"pandas to use the to_dataframe() function."
)
_NO_PYARROW_ERROR = (
"The pyarrow library is not installed, please install "
"pyarrow to use the to_arrow() function."
)
_TABLE_HAS_NO_SCHEMA = 'Table has no schema: call "client.get_table()"'
def _reference_getter(table):
"""A :class:`~google.cloud.bigquery.table.TableReference` pointing to
this table.
Returns:
google.cloud.bigquery.table.TableReference: pointer to this table.
"""
from google.cloud.bigquery import dataset
dataset_ref = dataset.DatasetReference(table.project, table.dataset_id)
return TableReference(dataset_ref, table.table_id)
def _view_use_legacy_sql_getter(table):
"""bool: Specifies whether to execute the view with Legacy or Standard SQL.
This boolean specifies whether to execute the view with Legacy SQL
(:data:`True`) or Standard SQL (:data:`False`). The client side default is
:data:`False`. The server-side default is :data:`True`. If this table is
not a view, :data:`None` is returned.
Raises:
ValueError: For invalid value types.
"""
view = table._properties.get("view")
if view is not None:
# The server-side default for useLegacySql is True.
return view.get("useLegacySql", True)
# In some cases, such as in a table list no view object is present, but the
# resource still represents a view. Use the type as a fallback.
if table.table_type == "VIEW":
# The server-side default for useLegacySql is True.
return True
class TableReference(object):
"""TableReferences are pointers to tables.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#tablereference
Args:
dataset_ref (google.cloud.bigquery.dataset.DatasetReference):
A pointer to the dataset
table_id (str): The ID of the table
"""
def __init__(self, dataset_ref, table_id):
self._project = dataset_ref.project
self._dataset_id = dataset_ref.dataset_id
self._table_id = table_id
@property
def project(self):
"""str: Project bound to the table"""
return self._project
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._dataset_id
@property
def table_id(self):
"""str: The table ID."""
return self._table_id
@property
def path(self):
"""str: URL path for the table's APIs."""
return "/projects/%s/datasets/%s/tables/%s" % (
self._project,
self._dataset_id,
self._table_id,
)
@classmethod
def from_string(
cls, table_id: str, default_project: str = None
) -> "TableReference":
"""Construct a table reference from table ID string.
Args:
table_id (str):
A table ID in standard SQL format. If ``default_project``
is not specified, this must included a project ID, dataset
ID, and table ID, each separated by ``.``.
default_project (Optional[str]):
The project ID to use when ``table_id`` does not
include a project ID.
Returns:
TableReference: Table reference parsed from ``table_id``.
Examples:
>>> TableReference.from_string('my-project.mydataset.mytable')
TableRef...(DatasetRef...('my-project', 'mydataset'), 'mytable')
Raises:
ValueError:
If ``table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
from google.cloud.bigquery.dataset import DatasetReference
(
output_project_id,
output_dataset_id,
output_table_id,
) = _helpers._parse_3_part_id(
table_id, default_project=default_project, property_name="table_id"
)
return cls(
DatasetReference(output_project_id, output_dataset_id), output_table_id
)
@classmethod
def from_api_repr(cls, resource: dict) -> "TableReference":
"""Factory: construct a table reference given its API representation
Args:
resource (Dict[str, object]):
Table reference representation returned from the API
Returns:
google.cloud.bigquery.table.TableReference:
Table reference parsed from ``resource``.
"""
from google.cloud.bigquery.dataset import DatasetReference
project = resource["projectId"]
dataset_id = resource["datasetId"]
table_id = resource["tableId"]
return cls(DatasetReference(project, dataset_id), table_id)
def to_api_repr(self) -> dict:
"""Construct the API resource representation of this table reference.
Returns:
Dict[str, object]: Table reference represented as an API resource
"""
return {
"projectId": self._project,
"datasetId": self._dataset_id,
"tableId": self._table_id,
}
def to_bqstorage(self) -> str:
"""Construct a BigQuery Storage API representation of this table.
Install the ``google-cloud-bigquery-storage`` package to use this
feature.
If the ``table_id`` contains a partition identifier (e.g.
``my_table$201812``) or a snapshot identifier (e.g.
``mytable@1234567890``), it is ignored. Use
:class:`google.cloud.bigquery_storage.types.ReadSession.TableReadOptions`
to filter rows by partition. Use
:class:`google.cloud.bigquery_storage.types.ReadSession.TableModifiers`
to select a specific snapshot to read from.
Returns:
str: A reference to this table in the BigQuery Storage API.
"""
table_id, _, _ = self._table_id.partition("@")
table_id, _, _ = table_id.partition("$")
table_ref = "projects/{}/datasets/{}/tables/{}".format(
self._project, self._dataset_id, table_id,
)
return table_ref
def _key(self):
"""A tuple key that uniquely describes this field.
Used to compute this instance's hashcode and evaluate equality.
Returns:
Tuple[str]: The contents of this :class:`DatasetReference`.
"""
return (self._project, self._dataset_id, self._table_id)
def __eq__(self, other):
if not isinstance(other, TableReference):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __str__(self):
return f"{self.project}.{self.dataset_id}.{self.table_id}"
def __repr__(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset_ref = DatasetReference(self._project, self._dataset_id)
return "TableReference({}, '{}')".format(repr(dataset_ref), self._table_id)
class Table(object):
"""Tables represent a set of rows whose values correspond to a schema.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource-table
Args:
table_ref (Union[google.cloud.bigquery.table.TableReference, str]):
A pointer to a table. If ``table_ref`` is a string, it must
included a project ID, dataset ID, and table ID, each separated
by ``.``.
schema (Optional[Sequence[Union[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
Mapping[str, Any] \
]]]):
The table's schema. If any item is a mapping, its content must be
compatible with
:meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.
"""
_PROPERTY_TO_API_FIELD = {
"clustering_fields": "clustering",
"created": "creationTime",
"dataset_id": ["tableReference", "datasetId"],
"description": "description",
"encryption_configuration": "encryptionConfiguration",
"etag": "etag",
"expires": "expirationTime",
"external_data_configuration": "externalDataConfiguration",
"friendly_name": "friendlyName",
"full_table_id": "id",
"labels": "labels",
"location": "location",
"modified": "lastModifiedTime",
"mview_enable_refresh": "materializedView",
"mview_last_refresh_time": ["materializedView", "lastRefreshTime"],
"mview_query": "materializedView",
"mview_refresh_interval": "materializedView",
"num_bytes": "numBytes",
"num_rows": "numRows",
"partition_expiration": "timePartitioning",
"partitioning_type": "timePartitioning",
"project": ["tableReference", "projectId"],
"range_partitioning": "rangePartitioning",
"time_partitioning": "timePartitioning",
"schema": "schema",
"snapshot_definition": "snapshotDefinition",
"streaming_buffer": "streamingBuffer",
"self_link": "selfLink",
"table_id": ["tableReference", "tableId"],
"time_partitioning": "timePartitioning",
"type": "type",
"view_use_legacy_sql": "view",
"view_query": "view",
"require_partition_filter": "requirePartitionFilter",
}
def __init__(self, table_ref, schema=None):
table_ref = _table_arg_to_table_ref(table_ref)
self._properties = {"tableReference": table_ref.to_api_repr(), "labels": {}}
# Let the @property do validation.
if schema is not None:
self.schema = schema
@property
def project(self):
"""str: Project bound to the table."""
return _helpers._get_sub_prop(
self._properties, self._PROPERTY_TO_API_FIELD["project"]
)
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return _helpers._get_sub_prop(
self._properties, self._PROPERTY_TO_API_FIELD["dataset_id"]
)
@property
def table_id(self):
"""str: ID of the table."""
return _helpers._get_sub_prop(
self._properties, self._PROPERTY_TO_API_FIELD["table_id"]
)
reference = property(_reference_getter)
@property
def path(self):
"""str: URL path for the table's APIs."""
return "/projects/%s/datasets/%s/tables/%s" % (
self.project,
self.dataset_id,
self.table_id,
)
@property
def require_partition_filter(self):
"""bool: If set to true, queries over the partitioned table require a
partition filter that can be used for partition elimination to be
specified.
"""
return self._properties.get(
self._PROPERTY_TO_API_FIELD["require_partition_filter"]
)
@require_partition_filter.setter
def require_partition_filter(self, value):
self._properties[
self._PROPERTY_TO_API_FIELD["require_partition_filter"]
] = value
@property
def schema(self):
"""Sequence[Union[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
Mapping[str, Any] \
]]:
Table's schema.
Raises:
Exception:
If ``schema`` is not a sequence, or if any item in the sequence
is not a :class:`~google.cloud.bigquery.schema.SchemaField`
instance or a compatible mapping representation of the field.
"""
prop = self._properties.get(self._PROPERTY_TO_API_FIELD["schema"])
if not prop:
return []
else:
return _parse_schema_resource(prop)
@schema.setter
def schema(self, value):
api_field = self._PROPERTY_TO_API_FIELD["schema"]
if value is None:
self._properties[api_field] = None
else:
value = _to_schema_fields(value)
self._properties[api_field] = {"fields": _build_schema_resource(value)}
@property
def labels(self):
"""Dict[str, str]: Labels for the table.
This method always returns a dict. To change a table's labels,
modify the dict, then call ``Client.update_table``. To delete a
label, set its value to :data:`None` before updating.
Raises:
ValueError: If ``value`` type is invalid.
"""
return self._properties.setdefault(self._PROPERTY_TO_API_FIELD["labels"], {})
@labels.setter
def labels(self, value):
if not isinstance(value, dict):
raise ValueError("Pass a dict")
self._properties[self._PROPERTY_TO_API_FIELD["labels"]] = value
@property
def encryption_configuration(self):
"""google.cloud.bigquery.encryption_configuration.EncryptionConfiguration: Custom
encryption configuration for the table.
Custom encryption configuration (e.g., Cloud KMS keys) or :data:`None`
if using default encryption.
See `protecting data with Cloud KMS keys
<https://cloud.google.com/bigquery/docs/customer-managed-encryption>`_
in the BigQuery documentation.
"""
prop = self._properties.get(
self._PROPERTY_TO_API_FIELD["encryption_configuration"]
)
if prop is not None:
prop = EncryptionConfiguration.from_api_repr(prop)
return prop
@encryption_configuration.setter
def encryption_configuration(self, value):
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._properties[
self._PROPERTY_TO_API_FIELD["encryption_configuration"]
] = api_repr
@property
def created(self):
"""Union[datetime.datetime, None]: Datetime at which the table was
created (:data:`None` until set from the server).
"""
creation_time = self._properties.get(self._PROPERTY_TO_API_FIELD["created"])
if creation_time is not None:
# creation_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(creation_time)
)
@property
def etag(self):
"""Union[str, None]: ETag for the table resource (:data:`None` until
set from the server).
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["etag"])
@property
def modified(self):
"""Union[datetime.datetime, None]: Datetime at which the table was last
modified (:data:`None` until set from the server).
"""
modified_time = self._properties.get(self._PROPERTY_TO_API_FIELD["modified"])
if modified_time is not None:
# modified_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(modified_time)
)
@property
def num_bytes(self):
"""Union[int, None]: The size of the table in bytes (:data:`None` until
set from the server).
"""
return _helpers._int_or_none(
self._properties.get(self._PROPERTY_TO_API_FIELD["num_bytes"])
)
@property
def num_rows(self):
"""Union[int, None]: The number of rows in the table (:data:`None`
until set from the server).
"""
return _helpers._int_or_none(
self._properties.get(self._PROPERTY_TO_API_FIELD["num_rows"])
)
@property
def self_link(self):
"""Union[str, None]: URL for the table resource (:data:`None` until set
from the server).
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["self_link"])
@property
def full_table_id(self):
"""Union[str, None]: ID for the table (:data:`None` until set from the
server).
In the format ``project-id:dataset_id.table_id``.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["full_table_id"])
@property
def table_type(self):
"""Union[str, None]: The type of the table (:data:`None` until set from
the server).
Possible values are ``'TABLE'``, ``'VIEW'``, ``'MATERIALIZED_VIEW'`` or
``'EXTERNAL'``.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["type"])
@property
def range_partitioning(self):
"""Optional[google.cloud.bigquery.table.RangePartitioning]:
Configures range-based partitioning for a table.
.. note::
**Beta**. The integer range partitioning feature is in a
pre-release state and might change or have limited support.
Only specify at most one of
:attr:`~google.cloud.bigquery.table.Table.time_partitioning` or
:attr:`~google.cloud.bigquery.table.Table.range_partitioning`.
Raises:
ValueError:
If the value is not
:class:`~google.cloud.bigquery.table.RangePartitioning` or
:data:`None`.
"""
resource = self._properties.get(
self._PROPERTY_TO_API_FIELD["range_partitioning"]
)
if resource is not None:
return RangePartitioning(_properties=resource)
@range_partitioning.setter
def range_partitioning(self, value):
resource = value
if isinstance(value, RangePartitioning):
resource = value._properties
elif value is not None:
raise ValueError(
"Expected value to be RangePartitioning or None, got {}.".format(value)
)
self._properties[self._PROPERTY_TO_API_FIELD["range_partitioning"]] = resource
@property
def time_partitioning(self):
"""Optional[google.cloud.bigquery.table.TimePartitioning]: Configures time-based
partitioning for a table.
Only specify at most one of
:attr:`~google.cloud.bigquery.table.Table.time_partitioning` or
:attr:`~google.cloud.bigquery.table.Table.range_partitioning`.
Raises:
ValueError:
If the value is not
:class:`~google.cloud.bigquery.table.TimePartitioning` or
:data:`None`.
"""
prop = self._properties.get(self._PROPERTY_TO_API_FIELD["time_partitioning"])
if prop is not None:
return TimePartitioning.from_api_repr(prop)
@time_partitioning.setter
def time_partitioning(self, value):
api_repr = value
if isinstance(value, TimePartitioning):
api_repr = value.to_api_repr()
elif value is not None:
raise ValueError(
"value must be google.cloud.bigquery.table.TimePartitioning " "or None"
)
self._properties[self._PROPERTY_TO_API_FIELD["time_partitioning"]] = api_repr
@property
def partitioning_type(self):
"""Union[str, None]: Time partitioning of the table if it is
partitioned (Defaults to :data:`None`).
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.type_ instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.type_
@partitioning_type.setter
def partitioning_type(self, value):
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.type_ instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_field = self._PROPERTY_TO_API_FIELD["partitioning_type"]
if self.time_partitioning is None:
self._properties[api_field] = {}
self._properties[api_field]["type"] = value
@property
def partition_expiration(self):
"""Union[int, None]: Expiration time in milliseconds for a partition.
If :attr:`partition_expiration` is set and :attr:`type_` is
not set, :attr:`type_` will default to
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.expiration_ms instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.expiration_ms
@partition_expiration.setter
def partition_expiration(self, value):
warnings.warn(
"This method will be deprecated in future versions. Please use "
"Table.time_partitioning.expiration_ms instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_field = self._PROPERTY_TO_API_FIELD["partition_expiration"]
if self.time_partitioning is None:
self._properties[api_field] = {"type": TimePartitioningType.DAY}
self._properties[api_field]["expirationMs"] = str(value)
@property
def clustering_fields(self):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
Clustering fields are immutable after table creation.
.. note::
BigQuery supports clustering for both partitioned and
non-partitioned tables.
"""
prop = self._properties.get(self._PROPERTY_TO_API_FIELD["clustering_fields"])
if prop is not None:
return list(prop.get("fields", ()))
@clustering_fields.setter
def clustering_fields(self, value):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
"""
api_field = self._PROPERTY_TO_API_FIELD["clustering_fields"]
if value is not None:
prop = self._properties.setdefault(api_field, {})
prop["fields"] = value
else:
# In order to allow unsetting clustering fields completely, we explicitly
# set this property to None (as oposed to merely removing the key).
self._properties[api_field] = None
@property
def description(self):
"""Union[str, None]: Description of the table (defaults to
:data:`None`).
Raises:
ValueError: For invalid value types.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["description"])
@description.setter
def description(self, value):
if not isinstance(value, str) and value is not None:
raise ValueError("Pass a string, or None")
self._properties[self._PROPERTY_TO_API_FIELD["description"]] = value
@property
def expires(self):
"""Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
Raises:
ValueError: For invalid value types.
"""
expiration_time = self._properties.get(self._PROPERTY_TO_API_FIELD["expires"])
if expiration_time is not None:
# expiration_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(expiration_time)
)
@expires.setter
def expires(self, value):
if not isinstance(value, datetime.datetime) and value is not None:
raise ValueError("Pass a datetime, or None")
value_ms = google.cloud._helpers._millis_from_datetime(value)
self._properties[
self._PROPERTY_TO_API_FIELD["expires"]
] = _helpers._str_or_none(value_ms)
@property
def friendly_name(self):
"""Union[str, None]: Title of the table (defaults to :data:`None`).
Raises:
ValueError: For invalid value types.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["friendly_name"])
@friendly_name.setter
def friendly_name(self, value):
if not isinstance(value, str) and value is not None:
raise ValueError("Pass a string, or None")
self._properties[self._PROPERTY_TO_API_FIELD["friendly_name"]] = value
@property
def location(self):
"""Union[str, None]: Location in which the table is hosted
Defaults to :data:`None`.
"""
return self._properties.get(self._PROPERTY_TO_API_FIELD["location"])
@property
def view_query(self):
"""Union[str, None]: SQL query defining the table as a view (defaults
to :data:`None`).
By default, the query is treated as Standard SQL. To use Legacy
SQL, set :attr:`view_use_legacy_sql` to :data:`True`.
Raises:
ValueError: For invalid value types.
"""
api_field = self._PROPERTY_TO_API_FIELD["view_query"]
return _helpers._get_sub_prop(self._properties, [api_field, "query"])
@view_query.setter
def view_query(self, value):
if not isinstance(value, str):
raise ValueError("Pass a string")
api_field = self._PROPERTY_TO_API_FIELD["view_query"]
_helpers._set_sub_prop(self._properties, [api_field, "query"], value)
view = self._properties[api_field]
# The service defaults useLegacySql to True, but this
# client uses Standard SQL by default.
if view.get("useLegacySql") is None:
view["useLegacySql"] = False
@view_query.deleter
def view_query(self):
"""Delete SQL query defining the table as a view."""
self._properties.pop(self._PROPERTY_TO_API_FIELD["view_query"], None)
view_use_legacy_sql = property(_view_use_legacy_sql_getter)
@view_use_legacy_sql.setter
def view_use_legacy_sql(self, value):
if not isinstance(value, bool):
raise ValueError("Pass a boolean")
api_field = self._PROPERTY_TO_API_FIELD["view_query"]
if self._properties.get(api_field) is None:
self._properties[api_field] = {}
self._properties[api_field]["useLegacySql"] = value
@property
def mview_query(self):
"""Optional[str]: SQL query defining the table as a materialized
view (defaults to :data:`None`).
"""
api_field = self._PROPERTY_TO_API_FIELD["mview_query"]
return _helpers._get_sub_prop(self._properties, [api_field, "query"])
@mview_query.setter
def mview_query(self, value):
api_field = self._PROPERTY_TO_API_FIELD["mview_query"]
_helpers._set_sub_prop(self._properties, [api_field, "query"], str(value))
@mview_query.deleter
def mview_query(self):
"""Delete SQL query defining the table as a materialized view."""
self._properties.pop(self._PROPERTY_TO_API_FIELD["mview_query"], None)
@property
def mview_last_refresh_time(self):
"""Optional[datetime.datetime]: Datetime at which the materialized view was last
refreshed (:data:`None` until set from the server).
"""
refresh_time = _helpers._get_sub_prop(
self._properties, self._PROPERTY_TO_API_FIELD["mview_last_refresh_time"]
)
if refresh_time is not None:
# refresh_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000 * int(refresh_time)
)
@property
def mview_enable_refresh(self):
"""Optional[bool]: Enable automatic refresh of the materialized view
when the base table is updated. The default value is :data:`True`.
"""
api_field = self._PROPERTY_TO_API_FIELD["mview_enable_refresh"]
return _helpers._get_sub_prop(self._properties, [api_field, "enableRefresh"])
@mview_enable_refresh.setter
def mview_enable_refresh(self, value):
api_field = self._PROPERTY_TO_API_FIELD["mview_enable_refresh"]
return _helpers._set_sub_prop(
self._properties, [api_field, "enableRefresh"], value
)
@property
def mview_refresh_interval(self):
"""Optional[datetime.timedelta]: The maximum frequency at which this
materialized view will be refreshed. The default value is 1800000
milliseconds (30 minutes).
"""
api_field = self._PROPERTY_TO_API_FIELD["mview_refresh_interval"]
refresh_interval = _helpers._get_sub_prop(
self._properties, [api_field, "refreshIntervalMs"]
)
if refresh_interval is not None:
return datetime.timedelta(milliseconds=int(refresh_interval))
@mview_refresh_interval.setter
def mview_refresh_interval(self, value):
if value is None:
refresh_interval_ms = None
else:
refresh_interval_ms = str(value // datetime.timedelta(milliseconds=1))
api_field = self._PROPERTY_TO_API_FIELD["mview_refresh_interval"]
_helpers._set_sub_prop(
self._properties, [api_field, "refreshIntervalMs"], refresh_interval_ms,
)
@property
def streaming_buffer(self):
"""google.cloud.bigquery.StreamingBuffer: Information about a table's
streaming buffer.
"""
sb = self._properties.get(self._PROPERTY_TO_API_FIELD["streaming_buffer"])
if sb is not None:
return StreamingBuffer(sb)
@property
def external_data_configuration(self):
"""Union[google.cloud.bigquery.ExternalConfig, None]: Configuration for
an external data source (defaults to :data:`None`).
Raises:
ValueError: For invalid value types.
"""
prop = self._properties.get(
self._PROPERTY_TO_API_FIELD["external_data_configuration"]
)
if prop is not None:
prop = ExternalConfig.from_api_repr(prop)
return prop
@external_data_configuration.setter
def external_data_configuration(self, value):
if not (value is None or isinstance(value, ExternalConfig)):
raise ValueError("Pass an ExternalConfig or None")
api_repr = value
if value is not None:
api_repr = value.to_api_repr()
self._properties[
self._PROPERTY_TO_API_FIELD["external_data_configuration"]
] = api_repr
@property
def snapshot_definition(self) -> Optional["SnapshotDefinition"]:
"""Information about the snapshot. This value is set via snapshot creation.
See: https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#Table.FIELDS.snapshot_definition
"""
snapshot_info = self._properties.get(
self._PROPERTY_TO_API_FIELD["snapshot_definition"]
)
if snapshot_info is not None:
snapshot_info = SnapshotDefinition(snapshot_info)
return snapshot_info
@classmethod
def from_string(cls, full_table_id: str) -> "Table":
"""Construct a table from fully-qualified table ID.
Args:
full_table_id (str):
A fully-qualified table ID in standard SQL format. Must
included a project ID, dataset ID, and table ID, each
separated by ``.``.
Returns:
Table: Table parsed from ``full_table_id``.
Examples:
>>> Table.from_string('my-project.mydataset.mytable')
Table(TableRef...(D...('my-project', 'mydataset'), 'mytable'))
Raises:
ValueError:
If ``full_table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
return cls(TableReference.from_string(full_table_id))
@classmethod
def from_api_repr(cls, resource: dict) -> "Table":
"""Factory: construct a table given its API representation
Args:
resource (Dict[str, object]):
Table resource representation from the API
Returns:
google.cloud.bigquery.table.Table: Table parsed from ``resource``.
Raises:
KeyError:
If the ``resource`` lacks the key ``'tableReference'``, or if
the ``dict`` stored within the key ``'tableReference'`` lacks
the keys ``'tableId'``, ``'projectId'``, or ``'datasetId'``.
"""
from google.cloud.bigquery import dataset
if (
"tableReference" not in resource
or "tableId" not in resource["tableReference"]
):
raise KeyError(
"Resource lacks required identity information:"
'["tableReference"]["tableId"]'
)
project_id = _helpers._get_sub_prop(
resource, cls._PROPERTY_TO_API_FIELD["project"]
)
table_id = _helpers._get_sub_prop(
resource, cls._PROPERTY_TO_API_FIELD["table_id"]
)
dataset_id = _helpers._get_sub_prop(
resource, cls._PROPERTY_TO_API_FIELD["dataset_id"]
)
dataset_ref = dataset.DatasetReference(project_id, dataset_id)
table = cls(dataset_ref.table(table_id))
table._properties = resource
return table
def to_api_repr(self) -> dict:
"""Constructs the API resource of this table
Returns:
Dict[str, object]: Table represented as an API resource
"""
return copy.deepcopy(self._properties)
def to_bqstorage(self) -> str:
"""Construct a BigQuery Storage API representation of this table.
Returns:
str: A reference to this table in the BigQuery Storage API.
"""
return self.reference.to_bqstorage()
def _build_resource(self, filter_fields):
"""Generate a resource for ``update``."""
return _helpers._build_resource_from_properties(self, filter_fields)
def __repr__(self):
return "Table({})".format(repr(self.reference))
class TableListItem(object):
"""A read-only table resource from a list operation.
For performance reasons, the BigQuery API only includes some of the table
properties when listing tables. Notably,
:attr:`~google.cloud.bigquery.table.Table.schema` and
:attr:`~google.cloud.bigquery.table.Table.num_rows` are missing.
For a full list of the properties that the BigQuery API returns, see the
`REST documentation for tables.list
<https://cloud.google.com/bigquery/docs/reference/rest/v2/tables/list>`_.
Args:
resource (Dict[str, object]):
A table-like resource object from a table list response. A
``tableReference`` property is required.
Raises:
ValueError:
If ``tableReference`` or one of its required members is missing
from ``resource``.
"""
def __init__(self, resource):
if "tableReference" not in resource:
raise ValueError("resource must contain a tableReference value")
if "projectId" not in resource["tableReference"]:
raise ValueError(
"resource['tableReference'] must contain a projectId value"
)
if "datasetId" not in resource["tableReference"]:
raise ValueError(
"resource['tableReference'] must contain a datasetId value"
)
if "tableId" not in resource["tableReference"]:
raise ValueError("resource['tableReference'] must contain a tableId value")
self._properties = resource
@property
def created(self):
"""Union[datetime.datetime, None]: Datetime at which the table was
created (:data:`None` until set from the server).
"""
creation_time = self._properties.get("creationTime")
if creation_time is not None:
# creation_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(creation_time)
)
@property
def expires(self):
"""Union[datetime.datetime, None]: Datetime at which the table will be
deleted.
"""
expiration_time = self._properties.get("expirationTime")
if expiration_time is not None:
# expiration_time will be in milliseconds.
return google.cloud._helpers._datetime_from_microseconds(
1000.0 * float(expiration_time)
)
@property
def project(self):
"""str: Project bound to the table."""
return self._properties["tableReference"]["projectId"]
@property
def dataset_id(self):
"""str: ID of dataset containing the table."""
return self._properties["tableReference"]["datasetId"]
@property
def table_id(self):
"""str: ID of the table."""
return self._properties["tableReference"]["tableId"]
reference = property(_reference_getter)
@property
def labels(self):
"""Dict[str, str]: Labels for the table.
This method always returns a dict. To change a table's labels,
modify the dict, then call ``Client.update_table``. To delete a
label, set its value to :data:`None` before updating.
"""
return self._properties.setdefault("labels", {})
@property
def full_table_id(self):
"""Union[str, None]: ID for the table (:data:`None` until set from the
server).
In the format ``project_id:dataset_id.table_id``.
"""
return self._properties.get("id")
@property
def table_type(self):
"""Union[str, None]: The type of the table (:data:`None` until set from
the server).
Possible values are ``'TABLE'``, ``'VIEW'``, or ``'EXTERNAL'``.
"""
return self._properties.get("type")
@property
def time_partitioning(self):
"""google.cloud.bigquery.table.TimePartitioning: Configures time-based
partitioning for a table.
"""
prop = self._properties.get("timePartitioning")
if prop is not None:
return TimePartitioning.from_api_repr(prop)
@property
def partitioning_type(self):
"""Union[str, None]: Time partitioning of the table if it is
partitioned (Defaults to :data:`None`).
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"TableListItem.time_partitioning.type_ instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.type_
@property
def partition_expiration(self):
"""Union[int, None]: Expiration time in milliseconds for a partition.
If this property is set and :attr:`type_` is not set, :attr:`type_`
will default to :attr:`TimePartitioningType.DAY`.
"""
warnings.warn(
"This method will be deprecated in future versions. Please use "
"TableListItem.time_partitioning.expiration_ms instead.",
PendingDeprecationWarning,
stacklevel=2,
)
if self.time_partitioning is not None:
return self.time_partitioning.expiration_ms
@property
def friendly_name(self):
"""Union[str, None]: Title of the table (defaults to :data:`None`)."""
return self._properties.get("friendlyName")
view_use_legacy_sql = property(_view_use_legacy_sql_getter)
@property
def clustering_fields(self):
"""Union[List[str], None]: Fields defining clustering for the table
(Defaults to :data:`None`).
Clustering fields are immutable after table creation.
.. note::
BigQuery supports clustering for both partitioned and
non-partitioned tables.
"""
prop = self._properties.get("clustering")
if prop is not None:
return list(prop.get("fields", ()))
@classmethod
def from_string(cls, full_table_id: str) -> "TableListItem":
"""Construct a table from fully-qualified table ID.
Args:
full_table_id (str):
A fully-qualified table ID in standard SQL format. Must
included a project ID, dataset ID, and table ID, each
separated by ``.``.
Returns:
Table: Table parsed from ``full_table_id``.
Examples:
>>> Table.from_string('my-project.mydataset.mytable')
Table(TableRef...(D...('my-project', 'mydataset'), 'mytable'))
Raises:
ValueError:
If ``full_table_id`` is not a fully-qualified table ID in
standard SQL format.
"""
return cls(
{"tableReference": TableReference.from_string(full_table_id).to_api_repr()}
)
def to_bqstorage(self) -> str:
"""Construct a BigQuery Storage API representation of this table.
Returns:
str: A reference to this table in the BigQuery Storage API.
"""
return self.reference.to_bqstorage()
def to_api_repr(self) -> dict:
"""Constructs the API resource of this table
Returns:
Dict[str, object]: Table represented as an API resource
"""
return copy.deepcopy(self._properties)
def _row_from_mapping(mapping, schema):
"""Convert a mapping to a row tuple using the schema.
Args:
mapping (Dict[str, object])
Mapping of row data: must contain keys for all required fields in
the schema. Keys which do not correspond to a field in the schema
are ignored.
schema (List[google.cloud.bigquery.schema.SchemaField]):
The schema of the table destination for the rows
Returns:
Tuple[object]:
Tuple whose elements are ordered according to the schema.
Raises:
ValueError: If schema is empty.
"""
if len(schema) == 0:
raise ValueError(_TABLE_HAS_NO_SCHEMA)
row = []
for field in schema:
if field.mode == "REQUIRED":
row.append(mapping[field.name])
elif field.mode == "REPEATED":
row.append(mapping.get(field.name, ()))
elif field.mode == "NULLABLE":
row.append(mapping.get(field.name))
else:
raise ValueError("Unknown field mode: {}".format(field.mode))
return tuple(row)
class StreamingBuffer(object):
"""Information about a table's streaming buffer.
See https://cloud.google.com/bigquery/streaming-data-into-bigquery.
Args:
resource (Dict[str, object]):
streaming buffer representation returned from the API
"""
def __init__(self, resource):
self.estimated_bytes = None
if "estimatedBytes" in resource:
self.estimated_bytes = int(resource["estimatedBytes"])
self.estimated_rows = None
if "estimatedRows" in resource:
self.estimated_rows = int(resource["estimatedRows"])
self.oldest_entry_time = None
if "oldestEntryTime" in resource:
self.oldest_entry_time = google.cloud._helpers._datetime_from_microseconds(
1000.0 * int(resource["oldestEntryTime"])
)
class SnapshotDefinition:
"""Information about base table and snapshot time of the snapshot.
See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#snapshotdefinition
Args:
resource: Snapshot definition representation returned from the API.
"""
def __init__(self, resource: Dict[str, Any]):
self.base_table_reference = None
if "baseTableReference" in resource:
self.base_table_reference = TableReference.from_api_repr(
resource["baseTableReference"]
)
self.snapshot_time = None
if "snapshotTime" in resource:
self.snapshot_time = google.cloud._helpers._rfc3339_to_datetime(
resource["snapshotTime"]
)
class Row(object):
"""A BigQuery row.
Values can be accessed by position (index), by key like a dict,
or as properties.
Args:
values (Sequence[object]): The row values
field_to_index (Dict[str, int]):
A mapping from schema field names to indexes
"""
# Choose unusual field names to try to avoid conflict with schema fields.
__slots__ = ("_xxx_values", "_xxx_field_to_index")
def __init__(self, values, field_to_index):
self._xxx_values = values
self._xxx_field_to_index = field_to_index
def values(self):
"""Return the values included in this row.
Returns:
Sequence[object]: A sequence of length ``len(row)``.
"""
return copy.deepcopy(self._xxx_values)
def keys(self) -> Iterable[str]:
"""Return the keys for using a row as a dict.
Returns:
Iterable[str]: The keys corresponding to the columns of a row
Examples:
>>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).keys())
['x', 'y']
"""
return self._xxx_field_to_index.keys()
def items(self) -> Iterable[Tuple[str, Any]]:
"""Return items as ``(key, value)`` pairs.
Returns:
Iterable[Tuple[str, object]]:
The ``(key, value)`` pairs representing this row.
Examples:
>>> list(Row(('a', 'b'), {'x': 0, 'y': 1}).items())
[('x', 'a'), ('y', 'b')]
"""
for key, index in self._xxx_field_to_index.items():
yield (key, copy.deepcopy(self._xxx_values[index]))
def get(self, key: str, default: Any = None) -> Any:
"""Return a value for key, with a default value if it does not exist.
Args:
key (str): The key of the column to access
default (object):
The default value to use if the key does not exist. (Defaults
to :data:`None`.)
Returns:
object:
The value associated with the provided key, or a default value.
Examples:
When the key exists, the value associated with it is returned.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('x')
'a'
The default value is :data:`None` when the key does not exist.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z')
None
The default value can be overrided with the ``default`` parameter.
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', '')
''
>>> Row(('a', 'b'), {'x': 0, 'y': 1}).get('z', default = '')
''
"""
index = self._xxx_field_to_index.get(key)
if index is None:
return default
return self._xxx_values[index]
def __getattr__(self, name):
value = self._xxx_field_to_index.get(name)
if value is None:
raise AttributeError("no row field {!r}".format(name))
return self._xxx_values[value]
def __len__(self):
return len(self._xxx_values)
def __getitem__(self, key):
if isinstance(key, str):
value = self._xxx_field_to_index.get(key)
if value is None:
raise KeyError("no row field {!r}".format(key))
key = value
return self._xxx_values[key]
def __eq__(self, other):
if not isinstance(other, Row):
return NotImplemented
return (
self._xxx_values == other._xxx_values
and self._xxx_field_to_index == other._xxx_field_to_index
)
def __ne__(self, other):
return not self == other
def __repr__(self):
# sort field dict by value, for determinism
items = sorted(self._xxx_field_to_index.items(), key=operator.itemgetter(1))
f2i = "{" + ", ".join("%r: %d" % item for item in items) + "}"
return "Row({}, {})".format(self._xxx_values, f2i)
class _NoopProgressBarQueue(object):
"""A fake Queue class that does nothing.
This is used when there is no progress bar to send updates to.
"""
def put_nowait(self, item):
"""Don't actually do anything with the item."""
class RowIterator(HTTPIterator):
"""A class for iterating through HTTP/JSON API row list responses.
Args:
client (Optional[google.cloud.bigquery.Client]):
The API client instance. This should always be non-`None`, except for
subclasses that do not use it, namely the ``_EmptyRowIterator``.
api_request (Callable[google.cloud._http.JSONConnection.api_request]):
The function to use to make API requests.
path (str): The method path to query for the list of items.
schema (Sequence[Union[ \
:class:`~google.cloud.bigquery.schema.SchemaField`, \
Mapping[str, Any] \
]]):
The table's schema. If any item is a mapping, its content must be
compatible with
:meth:`~google.cloud.bigquery.schema.SchemaField.from_api_repr`.
page_token (str): A token identifying a page in a result set to start
fetching results from.
max_results (Optional[int]): The maximum number of results to fetch.
page_size (Optional[int]): The maximum number of rows in each page
of results from this request. Non-positive values are ignored.
Defaults to a sensible value set by the API.
extra_params (Optional[Dict[str, object]]):
Extra query string parameters for the API call.
table (Optional[Union[ \
google.cloud.bigquery.table.Table, \
google.cloud.bigquery.table.TableReference, \
]]):
The table which these rows belong to, or a reference to it. Used to
call the BigQuery Storage API to fetch rows.
selected_fields (Optional[Sequence[google.cloud.bigquery.schema.SchemaField]]):
A subset of columns to select from this table.
total_rows (Optional[int]):
Total number of rows in the table.
first_page_response (Optional[dict]):
API response for the first page of results. These are returned when
the first page is requested.
"""
def __init__(
self,
client,
api_request,
path,
schema,
page_token=None,
max_results=None,
page_size=None,
extra_params=None,
table=None,
selected_fields=None,
total_rows=None,
first_page_response=None,
):
super(RowIterator, self).__init__(
client,
api_request,
path,
item_to_value=_item_to_row,
items_key="rows",
page_token=page_token,
max_results=max_results,
extra_params=extra_params,
page_start=_rows_page_start,
next_token="pageToken",
)
schema = _to_schema_fields(schema)
self._field_to_index = _helpers._field_to_index_mapping(schema)
self._page_size = page_size
self._preserve_order = False
self._project = client.project if client is not None else None
self._schema = schema
self._selected_fields = selected_fields
self._table = table
self._total_rows = total_rows
self._first_page_response = first_page_response
def _is_completely_cached(self):
"""Check if all results are completely cached.
This is useful to know, because we can avoid alternative download
mechanisms.
"""
if self._first_page_response is None or self.next_page_token:
return False
return self._first_page_response.get(self._next_token) is None
def _validate_bqstorage(self, bqstorage_client, create_bqstorage_client):
"""Returns if the BigQuery Storage API can be used.
Returns:
bool
True if the BigQuery Storage client can be used or created.
"""
using_bqstorage_api = bqstorage_client or create_bqstorage_client
if not using_bqstorage_api:
return False
if self._is_completely_cached():
return False
if self.max_results is not None:
warnings.warn(
"Cannot use bqstorage_client if max_results is set, "
"reverting to fetching data with the REST endpoint.",
stacklevel=2,
)
return False
try:
from google.cloud import bigquery_storage # noqa: F401
except ImportError:
return False
try:
_helpers.BQ_STORAGE_VERSIONS.verify_version()
except LegacyBigQueryStorageError as exc:
warnings.warn(str(exc))
return False
return True
def _get_next_page_response(self):
"""Requests the next page from the path provided.
Returns:
Dict[str, object]:
The parsed JSON response of the next page's contents.
"""
if self._first_page_response:
response = self._first_page_response
self._first_page_response = None
return response
params = self._get_query_params()
if self._page_size is not None:
if self.page_number and "startIndex" in params:
del params["startIndex"]
params["maxResults"] = self._page_size
return self.api_request(
method=self._HTTP_METHOD, path=self.path, query_params=params
)
@property
def schema(self):
"""List[google.cloud.bigquery.schema.SchemaField]: The subset of
columns to be read from the table."""
return list(self._schema)
@property
def total_rows(self):
"""int: The total number of rows in the table."""
return self._total_rows
def _to_page_iterable(
self, bqstorage_download, tabledata_list_download, bqstorage_client=None
):
if not self._validate_bqstorage(bqstorage_client, False):
bqstorage_client = None
result_pages = (
bqstorage_download()
if bqstorage_client is not None
else tabledata_list_download()
)
yield from result_pages
def _to_arrow_iterable(self, bqstorage_client=None):
"""Create an iterable of arrow RecordBatches, to process the table as a stream."""
bqstorage_download = functools.partial(
_pandas_helpers.download_arrow_bqstorage,
self._project,
self._table,
bqstorage_client,
preserve_order=self._preserve_order,
selected_fields=self._selected_fields,
)
tabledata_list_download = functools.partial(
_pandas_helpers.download_arrow_row_iterator, iter(self.pages), self.schema
)
return self._to_page_iterable(
bqstorage_download,
tabledata_list_download,
bqstorage_client=bqstorage_client,
)
# If changing the signature of this method, make sure to apply the same
# changes to job.QueryJob.to_arrow()
def to_arrow(
self,
progress_bar_type: str = None,
bqstorage_client: "bigquery_storage.BigQueryReadClient" = None,
create_bqstorage_client: bool = True,
) -> "pyarrow.Table":
"""[Beta] Create a class:`pyarrow.Table` by loading all pages of a
table or query.
Args:
progress_bar_type (Optional[str]):
If set, use the `tqdm <https://tqdm.github.io/>`_ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
Possible values of ``progress_bar_type`` include:
``None``
No progress bar.
``'tqdm'``
Use the :func:`tqdm.tqdm` function to print a progress bar
to :data:`sys.stderr`.
``'tqdm_notebook'``
Use the :func:`tqdm.tqdm_notebook` function to display a
progress bar as a Jupyter notebook widget.
``'tqdm_gui'``
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
A BigQuery Storage API client. If supplied, use the faster BigQuery
Storage API to fetch rows from BigQuery. This API is a billable API.
This method requires the ``pyarrow`` and
``google-cloud-bigquery-storage`` libraries.
This method only exposes a subset of the capabilities of the
BigQuery Storage API. For full access to all features
(projections, filters, snapshots) use the Storage API directly.
create_bqstorage_client (Optional[bool]):
If ``True`` (default), create a BigQuery Storage API client using
the default API settings. The BigQuery Storage API is a faster way
to fetch rows from BigQuery. See the ``bqstorage_client`` parameter
for more information.
This argument does nothing if ``bqstorage_client`` is supplied.
..versionadded:: 1.24.0
Returns:
pyarrow.Table
A :class:`pyarrow.Table` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError: If the :mod:`pyarrow` library cannot be imported.
..versionadded:: 1.17.0
"""
if pyarrow is None:
raise ValueError(_NO_PYARROW_ERROR)
if not self._validate_bqstorage(bqstorage_client, create_bqstorage_client):
create_bqstorage_client = False
bqstorage_client = None
owns_bqstorage_client = False
if not bqstorage_client and create_bqstorage_client:
bqstorage_client = self.client._ensure_bqstorage_client()
owns_bqstorage_client = bqstorage_client is not None
try:
progress_bar = get_progress_bar(
progress_bar_type, "Downloading", self.total_rows, "rows"
)
record_batches = []
for record_batch in self._to_arrow_iterable(
bqstorage_client=bqstorage_client
):
record_batches.append(record_batch)
if progress_bar is not None:
# In some cases, the number of total rows is not populated
# until the first page of rows is fetched. Update the
# progress bar's total to keep an accurate count.
progress_bar.total = progress_bar.total or self.total_rows
progress_bar.update(record_batch.num_rows)
if progress_bar is not None:
# Indicate that the download has finished.
progress_bar.close()
finally:
if owns_bqstorage_client:
bqstorage_client._transport.grpc_channel.close()
if record_batches:
return pyarrow.Table.from_batches(record_batches)
else:
# No records, use schema based on BigQuery schema.
arrow_schema = _pandas_helpers.bq_to_arrow_schema(self._schema)
return pyarrow.Table.from_batches(record_batches, schema=arrow_schema)
def to_dataframe_iterable(
self,
bqstorage_client: "bigquery_storage.BigQueryReadClient" = None,
dtypes: Dict[str, Any] = None,
max_queue_size: int = _pandas_helpers._MAX_QUEUE_SIZE_DEFAULT,
) -> "pandas.DataFrame":
"""Create an iterable of pandas DataFrames, to process the table as a stream.
Args:
bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
A BigQuery Storage API client. If supplied, use the faster
BigQuery Storage API to fetch rows from BigQuery.
This method requires the ``pyarrow`` and
``google-cloud-bigquery-storage`` libraries.
This method only exposes a subset of the capabilities of the
BigQuery Storage API. For full access to all features
(projections, filters, snapshots) use the Storage API directly.
dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
A dictionary of column names pandas ``dtype``s. The provided
``dtype`` is used when constructing the series for the column
specified. Otherwise, the default pandas behavior is used.
max_queue_size (Optional[int]):
The maximum number of result pages to hold in the internal queue when
streaming query results over the BigQuery Storage API. Ignored if
Storage API is not used.
By default, the max queue size is set to the number of BQ Storage streams
created by the server. If ``max_queue_size`` is :data:`None`, the queue
size is infinite.
..versionadded:: 2.14.0
Returns:
pandas.DataFrame:
A generator of :class:`~pandas.DataFrame`.
Raises:
ValueError:
If the :mod:`pandas` library cannot be imported.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
if dtypes is None:
dtypes = {}
column_names = [field.name for field in self._schema]
bqstorage_download = functools.partial(
_pandas_helpers.download_dataframe_bqstorage,
self._project,
self._table,
bqstorage_client,
column_names,
dtypes,
preserve_order=self._preserve_order,
selected_fields=self._selected_fields,
max_queue_size=max_queue_size,
)
tabledata_list_download = functools.partial(
_pandas_helpers.download_dataframe_row_iterator,
iter(self.pages),
self.schema,
dtypes,
)
return self._to_page_iterable(
bqstorage_download,
tabledata_list_download,
bqstorage_client=bqstorage_client,
)
# If changing the signature of this method, make sure to apply the same
# changes to job.QueryJob.to_dataframe()
def to_dataframe(
self,
bqstorage_client: "bigquery_storage.BigQueryReadClient" = None,
dtypes: Dict[str, Any] = None,
progress_bar_type: str = None,
create_bqstorage_client: bool = True,
date_as_object: bool = True,
) -> "pandas.DataFrame":
"""Create a pandas DataFrame by loading all pages of a query.
Args:
bqstorage_client (Optional[google.cloud.bigquery_storage_v1.BigQueryReadClient]):
A BigQuery Storage API client. If supplied, use the faster
BigQuery Storage API to fetch rows from BigQuery.
This method requires the ``pyarrow`` and
``google-cloud-bigquery-storage`` libraries.
This method only exposes a subset of the capabilities of the
BigQuery Storage API. For full access to all features
(projections, filters, snapshots) use the Storage API directly.
dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
A dictionary of column names pandas ``dtype``s. The provided
``dtype`` is used when constructing the series for the column
specified. Otherwise, the default pandas behavior is used.
progress_bar_type (Optional[str]):
If set, use the `tqdm <https://tqdm.github.io/>`_ library to
display a progress bar while the data downloads. Install the
``tqdm`` package to use this feature.
Possible values of ``progress_bar_type`` include:
``None``
No progress bar.
``'tqdm'``
Use the :func:`tqdm.tqdm` function to print a progress bar
to :data:`sys.stderr`.
``'tqdm_notebook'``
Use the :func:`tqdm.tqdm_notebook` function to display a
progress bar as a Jupyter notebook widget.
``'tqdm_gui'``
Use the :func:`tqdm.tqdm_gui` function to display a
progress bar as a graphical dialog box.
..versionadded:: 1.11.0
create_bqstorage_client (Optional[bool]):
If ``True`` (default), create a BigQuery Storage API client
using the default API settings. The BigQuery Storage API
is a faster way to fetch rows from BigQuery. See the
``bqstorage_client`` parameter for more information.
This argument does nothing if ``bqstorage_client`` is supplied.
..versionadded:: 1.24.0
date_as_object (Optional[bool]):
If ``True`` (default), cast dates to objects. If ``False``, convert
to datetime64[ns] dtype.
..versionadded:: 1.26.0
Returns:
pandas.DataFrame:
A :class:`~pandas.DataFrame` populated with row data and column
headers from the query results. The column headers are derived
from the destination table's schema.
Raises:
ValueError:
If the :mod:`pandas` library cannot be imported, or the
:mod:`google.cloud.bigquery_storage_v1` module is
required but cannot be imported.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
if dtypes is None:
dtypes = {}
if not self._validate_bqstorage(bqstorage_client, create_bqstorage_client):
create_bqstorage_client = False
bqstorage_client = None
record_batch = self.to_arrow(
progress_bar_type=progress_bar_type,
bqstorage_client=bqstorage_client,
create_bqstorage_client=create_bqstorage_client,
)
# When converting timestamp values to nanosecond precision, the result
# can be out of pyarrow bounds. To avoid the error when converting to
# Pandas, we set the timestamp_as_object parameter to True, if necessary.
types_to_check = {
pyarrow.timestamp("us"),
pyarrow.timestamp("us", tz=pytz.UTC),
}
for column in record_batch:
if column.type in types_to_check:
try:
column.cast("timestamp[ns]")
except pyarrow.lib.ArrowInvalid:
timestamp_as_object = True
break
else:
timestamp_as_object = False
extra_kwargs = {"timestamp_as_object": timestamp_as_object}
df = record_batch.to_pandas(date_as_object=date_as_object, **extra_kwargs)
for column in dtypes:
df[column] = pandas.Series(df[column], dtype=dtypes[column])
return df
class _EmptyRowIterator(RowIterator):
"""An empty row iterator.
This class prevents API requests when there are no rows to fetch or rows
are impossible to fetch, such as with query results for DDL CREATE VIEW
statements.
"""
schema = ()
pages = ()
total_rows = 0
def __init__(
self, client=None, api_request=None, path=None, schema=(), *args, **kwargs
):
super().__init__(
client=client,
api_request=api_request,
path=path,
schema=schema,
*args,
**kwargs,
)
def to_arrow(
self,
progress_bar_type=None,
bqstorage_client=None,
create_bqstorage_client=True,
) -> "pyarrow.Table":
"""[Beta] Create an empty class:`pyarrow.Table`.
Args:
progress_bar_type (str): Ignored. Added for compatibility with RowIterator.
bqstorage_client (Any): Ignored. Added for compatibility with RowIterator.
create_bqstorage_client (bool): Ignored. Added for compatibility with RowIterator.
Returns:
pyarrow.Table: An empty :class:`pyarrow.Table`.
"""
if pyarrow is None:
raise ValueError(_NO_PYARROW_ERROR)
return pyarrow.Table.from_arrays(())
def to_dataframe(
self,
bqstorage_client=None,
dtypes=None,
progress_bar_type=None,
create_bqstorage_client=True,
date_as_object=True,
) -> "pandas.DataFrame":
"""Create an empty dataframe.
Args:
bqstorage_client (Any): Ignored. Added for compatibility with RowIterator.
dtypes (Any): Ignored. Added for compatibility with RowIterator.
progress_bar_type (Any): Ignored. Added for compatibility with RowIterator.
create_bqstorage_client (bool): Ignored. Added for compatibility with RowIterator.
date_as_object (bool): Ignored. Added for compatibility with RowIterator.
Returns:
pandas.DataFrame: An empty :class:`~pandas.DataFrame`.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
return pandas.DataFrame()
def to_dataframe_iterable(
self,
bqstorage_client: Optional["bigquery_storage.BigQueryReadClient"] = None,
dtypes: Optional[Dict[str, Any]] = None,
max_queue_size: Optional[int] = None,
) -> Iterator["pandas.DataFrame"]:
"""Create an iterable of pandas DataFrames, to process the table as a stream.
..versionadded:: 2.21.0
Args:
bqstorage_client:
Ignored. Added for compatibility with RowIterator.
dtypes (Optional[Map[str, Union[str, pandas.Series.dtype]]]):
Ignored. Added for compatibility with RowIterator.
max_queue_size:
Ignored. Added for compatibility with RowIterator.
Returns:
An iterator yielding a single empty :class:`~pandas.DataFrame`.
Raises:
ValueError:
If the :mod:`pandas` library cannot be imported.
"""
if pandas is None:
raise ValueError(_NO_PANDAS_ERROR)
return iter((pandas.DataFrame(),))
def __iter__(self):
return iter(())
class PartitionRange(object):
"""Definition of the ranges for range partitioning.
.. note::
**Beta**. The integer range partitioning feature is in a pre-release
state and might change or have limited support.
Args:
start (Optional[int]):
Sets the
:attr:`~google.cloud.bigquery.table.PartitionRange.start`
property.
end (Optional[int]):
Sets the
:attr:`~google.cloud.bigquery.table.PartitionRange.end`
property.
interval (Optional[int]):
Sets the
:attr:`~google.cloud.bigquery.table.PartitionRange.interval`
property.
_properties (Optional[dict]):
Private. Used to construct object from API resource.
"""
def __init__(self, start=None, end=None, interval=None, _properties=None):
if _properties is None:
_properties = {}
self._properties = _properties
if start is not None:
self.start = start
if end is not None:
self.end = end
if interval is not None:
self.interval = interval
@property
def start(self):
"""int: The start of range partitioning, inclusive."""
return _helpers._int_or_none(self._properties.get("start"))
@start.setter
def start(self, value):
self._properties["start"] = _helpers._str_or_none(value)
@property
def end(self):
"""int: The end of range partitioning, exclusive."""
return _helpers._int_or_none(self._properties.get("end"))
@end.setter
def end(self, value):
self._properties["end"] = _helpers._str_or_none(value)
@property
def interval(self):
"""int: The width of each interval."""
return _helpers._int_or_none(self._properties.get("interval"))
@interval.setter
def interval(self, value):
self._properties["interval"] = _helpers._str_or_none(value)
def _key(self):
return tuple(sorted(self._properties.items()))
def __eq__(self, other):
if not isinstance(other, PartitionRange):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __repr__(self):
key_vals = ["{}={}".format(key, val) for key, val in self._key()]
return "PartitionRange({})".format(", ".join(key_vals))
__hash__ = None
class RangePartitioning(object):
"""Range-based partitioning configuration for a table.
.. note::
**Beta**. The integer range partitioning feature is in a pre-release
state and might change or have limited support.
Args:
range_ (Optional[google.cloud.bigquery.table.PartitionRange]):
Sets the
:attr:`google.cloud.bigquery.table.RangePartitioning.range_`
property.
field (Optional[str]):
Sets the
:attr:`google.cloud.bigquery.table.RangePartitioning.field`
property.
_properties (Optional[dict]):
Private. Used to construct object from API resource.
"""
def __init__(self, range_=None, field=None, _properties=None):
if _properties is None:
_properties = {}
self._properties = _properties
if range_ is not None:
self.range_ = range_
if field is not None:
self.field = field
# Trailing underscore to prevent conflict with built-in range() function.
@property
def range_(self):
"""google.cloud.bigquery.table.PartitionRange: Defines the
ranges for range partitioning.
Raises:
ValueError:
If the value is not a :class:`PartitionRange`.
"""
range_properties = self._properties.setdefault("range", {})
return PartitionRange(_properties=range_properties)
@range_.setter
def range_(self, value):
if not isinstance(value, PartitionRange):
raise ValueError("Expected a PartitionRange, but got {}.".format(value))
self._properties["range"] = value._properties
@property
def field(self):
"""str: The table is partitioned by this field.
The field must be a top-level ``NULLABLE`` / ``REQUIRED`` field. The
only supported type is ``INTEGER`` / ``INT64``.
"""
return self._properties.get("field")
@field.setter
def field(self, value):
self._properties["field"] = value
def _key(self):
return (("field", self.field), ("range_", self.range_))
def __eq__(self, other):
if not isinstance(other, RangePartitioning):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __repr__(self):
key_vals = ["{}={}".format(key, repr(val)) for key, val in self._key()]
return "RangePartitioning({})".format(", ".join(key_vals))
__hash__ = None
class TimePartitioningType(object):
"""Specifies the type of time partitioning to perform."""
DAY = "DAY"
"""str: Generates one partition per day."""
HOUR = "HOUR"
"""str: Generates one partition per hour."""
MONTH = "MONTH"
"""str: Generates one partition per month."""
YEAR = "YEAR"
"""str: Generates one partition per year."""
class TimePartitioning(object):
"""Configures time-based partitioning for a table.
Args:
type_ (Optional[google.cloud.bigquery.table.TimePartitioningType]):
Specifies the type of time partitioning to perform. Defaults to
:attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`.
Supported values are:
* :attr:`~google.cloud.bigquery.table.TimePartitioningType.HOUR`
* :attr:`~google.cloud.bigquery.table.TimePartitioningType.DAY`
* :attr:`~google.cloud.bigquery.table.TimePartitioningType.MONTH`
* :attr:`~google.cloud.bigquery.table.TimePartitioningType.YEAR`
field (Optional[str]):
If set, the table is partitioned by this field. If not set, the
table is partitioned by pseudo column ``_PARTITIONTIME``. The field
must be a top-level ``TIMESTAMP``, ``DATETIME``, or ``DATE``
field. Its mode must be ``NULLABLE`` or ``REQUIRED``.
See the `time-unit column-partitioned tables guide
<https://cloud.google.com/bigquery/docs/creating-column-partitions>`_
in the BigQuery documentation.
expiration_ms(Optional[int]):
Number of milliseconds for which to keep the storage for a
partition.
require_partition_filter (Optional[bool]):
DEPRECATED: Use
:attr:`~google.cloud.bigquery.table.Table.require_partition_filter`,
instead.
"""
def __init__(
self, type_=None, field=None, expiration_ms=None, require_partition_filter=None
):
self._properties = {}
if type_ is None:
self.type_ = TimePartitioningType.DAY
else:
self.type_ = type_
if field is not None:
self.field = field
if expiration_ms is not None:
self.expiration_ms = expiration_ms
if require_partition_filter is not None:
self.require_partition_filter = require_partition_filter
@property
def type_(self):
"""google.cloud.bigquery.table.TimePartitioningType: The type of time
partitioning to use.
"""
return self._properties.get("type")
@type_.setter
def type_(self, value):
self._properties["type"] = value
@property
def field(self):
"""str: Field in the table to use for partitioning"""
return self._properties.get("field")
@field.setter
def field(self, value):
self._properties["field"] = value
@property
def expiration_ms(self):
"""int: Number of milliseconds to keep the storage for a partition."""
return _helpers._int_or_none(self._properties.get("expirationMs"))
@expiration_ms.setter
def expiration_ms(self, value):
if value is not None:
# Allow explicitly setting the expiration to None.
value = str(value)
self._properties["expirationMs"] = value
@property
def require_partition_filter(self):
"""bool: Specifies whether partition filters are required for queries
DEPRECATED: Use
:attr:`~google.cloud.bigquery.table.Table.require_partition_filter`,
instead.
"""
warnings.warn(
(
"TimePartitioning.require_partition_filter will be removed in "
"future versions. Please use Table.require_partition_filter "
"instead."
),
PendingDeprecationWarning,
stacklevel=2,
)
return self._properties.get("requirePartitionFilter")
@require_partition_filter.setter
def require_partition_filter(self, value):
warnings.warn(
(
"TimePartitioning.require_partition_filter will be removed in "
"future versions. Please use Table.require_partition_filter "
"instead."
),
PendingDeprecationWarning,
stacklevel=2,
)
self._properties["requirePartitionFilter"] = value
@classmethod
def from_api_repr(cls, api_repr: dict) -> "TimePartitioning":
"""Return a :class:`TimePartitioning` object deserialized from a dict.
This method creates a new ``TimePartitioning`` instance that points to
the ``api_repr`` parameter as its internal properties dict. This means
that when a ``TimePartitioning`` instance is stored as a property of
another object, any changes made at the higher level will also appear
here::
>>> time_partitioning = TimePartitioning()
>>> table.time_partitioning = time_partitioning
>>> table.time_partitioning.field = 'timecolumn'
>>> time_partitioning.field
'timecolumn'
Args:
api_repr (Mapping[str, str]):
The serialized representation of the TimePartitioning, such as
what is output by :meth:`to_api_repr`.
Returns:
google.cloud.bigquery.table.TimePartitioning:
The ``TimePartitioning`` object.
"""
instance = cls()
instance._properties = api_repr
return instance
def to_api_repr(self) -> dict:
"""Return a dictionary representing this object.
This method returns the properties dict of the ``TimePartitioning``
instance rather than making a copy. This means that when a
``TimePartitioning`` instance is stored as a property of another
object, any changes made at the higher level will also appear here.
Returns:
dict:
A dictionary representing the TimePartitioning object in
serialized form.
"""
return self._properties
def _key(self):
# because we are only "renaming" top level keys shallow copy is sufficient here.
properties = self._properties.copy()
# calling repr for non built-in type objects.
properties["type_"] = repr(properties.pop("type"))
if "field" in properties:
# calling repr for non built-in type objects.
properties["field"] = repr(properties["field"])
if "requirePartitionFilter" in properties:
properties["require_partition_filter"] = properties.pop(
"requirePartitionFilter"
)
if "expirationMs" in properties:
properties["expiration_ms"] = properties.pop("expirationMs")
return tuple(sorted(properties.items()))
def __eq__(self, other):
if not isinstance(other, TimePartitioning):
return NotImplemented
return self._key() == other._key()
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._key())
def __repr__(self):
key_vals = ["{}={}".format(key, val) for key, val in self._key()]
return "TimePartitioning({})".format(",".join(key_vals))
def _item_to_row(iterator, resource):
"""Convert a JSON row to the native object.
.. note::
This assumes that the ``schema`` attribute has been
added to the iterator after being created, which
should be done by the caller.
Args:
iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
resource (Dict): An item to be converted to a row.
Returns:
google.cloud.bigquery.table.Row: The next row in the page.
"""
return Row(
_helpers._row_tuple_from_json(resource, iterator.schema),
iterator._field_to_index,
)
def _row_iterator_page_columns(schema, response):
"""Make a generator of all the columns in a page from tabledata.list.
This enables creating a :class:`pandas.DataFrame` and other
column-oriented data structures such as :class:`pyarrow.RecordBatch`
"""
columns = []
rows = response.get("rows", [])
def get_column_data(field_index, field):
for row in rows:
yield _helpers._field_from_json(row["f"][field_index]["v"], field)
for field_index, field in enumerate(schema):
columns.append(get_column_data(field_index, field))
return columns
# pylint: disable=unused-argument
def _rows_page_start(iterator, page, response):
"""Grab total rows when :class:`~google.cloud.iterator.Page` starts.
Args:
iterator (google.api_core.page_iterator.Iterator): The iterator that is currently in use.
page (google.api_core.page_iterator.Page): The page that was just created.
response (Dict): The JSON API response for a page of rows in a table.
"""
# Make a (lazy) copy of the page in column-oriented format for use in data
# science packages.
page._columns = _row_iterator_page_columns(iterator._schema, response)
total_rows = response.get("totalRows")
if total_rows is not None:
total_rows = int(total_rows)
iterator._total_rows = total_rows
# pylint: enable=unused-argument
def _table_arg_to_table_ref(value, default_project=None):
"""Helper to convert a string or Table to TableReference.
This function keeps TableReference and other kinds of objects unchanged.
"""
if isinstance(value, str):
value = TableReference.from_string(value, default_project=default_project)
if isinstance(value, (Table, TableListItem)):
value = value.reference
return value
def _table_arg_to_table(value, default_project=None):
"""Helper to convert a string or TableReference to a Table.
This function keeps Table and other kinds of objects unchanged.
"""
if isinstance(value, str):
value = TableReference.from_string(value, default_project=default_project)
if isinstance(value, TableReference):
value = Table(value)
if isinstance(value, TableListItem):
newvalue = Table(value.reference)
newvalue._properties = value._properties
value = newvalue
return value
| 35.431263
| 109
| 0.621181
|
a3e234ecd6e035d8e93540b9bbc4c39277986474
| 4,829
|
py
|
Python
|
doc/conf.py
|
thefab/mfcom-1
|
f6a209edb22a782dfb0cf63cc0f62433b1b6e961
|
[
"BSD-3-Clause"
] | null | null | null |
doc/conf.py
|
thefab/mfcom-1
|
f6a209edb22a782dfb0cf63cc0f62433b1b6e961
|
[
"BSD-3-Clause"
] | null | null | null |
doc/conf.py
|
thefab/mfcom-1
|
f6a209edb22a782dfb0cf63cc0f62433b1b6e961
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sphinx_rtd_theme
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
#source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mfcom'
copyright = u'2017, MetWork'
author = u'MetWork'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(os.environ.get('MODULE_VERSION',
'unknown.unknown').split('.')[0:-1])
# The full version, including alpha/beta/rc tags.
release = os.environ.get('MODULE_VERSION', 'unknown')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'mfcomdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'mfcom.tex',
u'mfcom Documentation',
u'MetWork', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'mfcom', u'mfcom Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'mfcom', u'mfcom Documentation',
author, 'mfcom', 'One line description of project.',
'Miscellaneous'),
]
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = False
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = True
napoleon_use_param = False
def skip(app, what, name, obj, skip, options):
if name == "__init__":
return False
return skip
| 29.993789
| 78
| 0.674881
|
ff91304d7511f7521e26f81283629ce1d723360d
| 30,598
|
py
|
Python
|
test/test_frames.py
|
to-bee/hyperframe
|
554fcad68beffae57dc8335ed5e02c177bfd6000
|
[
"MIT"
] | 30
|
2015-09-21T09:31:15.000Z
|
2022-01-23T18:31:34.000Z
|
test/test_frames.py
|
to-bee/hyperframe
|
554fcad68beffae57dc8335ed5e02c177bfd6000
|
[
"MIT"
] | 50
|
2015-07-20T07:52:27.000Z
|
2021-06-06T14:55:16.000Z
|
test/test_frames.py
|
to-bee/hyperframe
|
554fcad68beffae57dc8335ed5e02c177bfd6000
|
[
"MIT"
] | 25
|
2015-09-17T15:54:37.000Z
|
2022-03-18T21:24:13.000Z
|
# -*- coding: utf-8 -*-
from hyperframe.frame import (
Frame, Flags, DataFrame, PriorityFrame, RstStreamFrame, SettingsFrame,
PushPromiseFrame, PingFrame, GoAwayFrame, WindowUpdateFrame, HeadersFrame,
ContinuationFrame, AltSvcFrame, ExtensionFrame
)
from hyperframe.exceptions import (
UnknownFrameError, InvalidPaddingError, InvalidFrameError, InvalidDataError
)
import pytest
def decode_frame(frame_data):
f, length = Frame.parse_frame_header(frame_data[:9])
f.parse_body(memoryview(frame_data[9:9 + length]))
assert 9 + length == len(frame_data)
return f
class TestGeneralFrameBehaviour:
def test_base_frame_ignores_flags(self):
f = Frame(0)
flags = f.parse_flags(0xFF)
assert not flags
assert isinstance(flags, Flags)
def test_base_frame_cant_serialize(self):
f = Frame(0)
with pytest.raises(NotImplementedError):
f.serialize()
def test_base_frame_cant_parse_body(self):
data = b''
f = Frame(0)
with pytest.raises(NotImplementedError):
f.parse_body(data)
def test_parse_frame_header_unknown_type_strict(self):
with pytest.raises(UnknownFrameError) as excinfo:
Frame.parse_frame_header(
b'\x00\x00\x59\xFF\x00\x00\x00\x00\x01',
strict=True
)
exception = excinfo.value
assert exception.frame_type == 0xFF
assert exception.length == 0x59
assert str(exception) == (
"UnknownFrameError: Unknown frame type 0xFF received, "
"length 89 bytes"
)
def test_parse_frame_header_ignore_first_bit_of_stream_id(self):
s = b'\x00\x00\x00\x06\x01\x80\x00\x00\x00'
f, _ = Frame.parse_frame_header(s)
assert f.stream_id == 0
def test_parse_frame_header_unknown_type(self):
frame, length = Frame.parse_frame_header(
b'\x00\x00\x59\xFF\x00\x00\x00\x00\x01'
)
assert frame.type == 0xFF
assert length == 0x59
assert isinstance(frame, ExtensionFrame)
assert frame.stream_id == 1
def test_flags_are_persisted(self):
frame, length = Frame.parse_frame_header(
b'\x00\x00\x59\xFF\x09\x00\x00\x00\x01'
)
assert frame.type == 0xFF
assert length == 0x59
assert frame.flag_byte == 0x09
def test_parse_body_unknown_type(self):
frame = decode_frame(
b'\x00\x00\x0C\xFF\x00\x00\x00\x00\x01hello world!'
)
assert frame.body == b'hello world!'
assert frame.body_len == 12
assert frame.stream_id == 1
def test_can_round_trip_unknown_frames(self):
frame_data = b'\x00\x00\x0C\xFF\x00\x00\x00\x00\x01hello world!'
f = decode_frame(frame_data)
assert f.serialize() == frame_data
def test_repr(self, monkeypatch):
f = Frame(0)
monkeypatch.setattr(Frame, "serialize_body", lambda _: b"body")
assert repr(f) == (
"Frame(stream_id=0, flags=[]): <hex:626f6479>"
)
f.stream_id = 42
f.flags = ["END_STREAM", "PADDED"]
assert repr(f) == (
"Frame(stream_id=42, flags=['END_STREAM', 'PADDED']): <hex:626f6479>"
)
monkeypatch.setattr(Frame, "serialize_body", lambda _: b"A"*25)
assert repr(f) == (
"Frame(stream_id=42, flags=['END_STREAM', 'PADDED']): <hex:{}...>".format("41"*10)
)
def test_frame_explain(self, capsys):
d = b'\x00\x00\x08\x00\x01\x00\x00\x00\x01testdata'
Frame.explain(memoryview(d))
captured = capsys.readouterr()
assert captured.out.strip() == "DataFrame(stream_id=1, flags=['END_STREAM']): <hex:7465737464617461>"
def test_cannot_parse_invalid_frame_header(self):
with pytest.raises(InvalidFrameError):
Frame.parse_frame_header(b'\x00\x00\x08\x00\x01\x00\x00\x00')
class TestDataFrame:
payload = b'\x00\x00\x08\x00\x01\x00\x00\x00\x01testdata'
payload_with_padding = (
b'\x00\x00\x13\x00\x09\x00\x00\x00\x01\x0Atestdata' + b'\0' * 10
)
def test_repr(self):
f = DataFrame(1, b"testdata")
assert repr(f).endswith("<hex:7465737464617461>")
def test_data_frame_has_correct_flags(self):
f = DataFrame(1)
flags = f.parse_flags(0xFF)
assert flags == set([
'END_STREAM', 'PADDED'
])
@pytest.mark.parametrize('data', [
b'testdata',
memoryview(b'testdata')
])
def test_data_frame_serializes_properly(self, data):
f = DataFrame(1)
f.flags = set(['END_STREAM'])
f.data = data
s = f.serialize()
assert s == self.payload
def test_data_frame_with_padding_serializes_properly(self):
f = DataFrame(1)
f.flags = set(['END_STREAM', 'PADDED'])
f.data = b'testdata'
f.pad_length = 10
s = f.serialize()
assert s == self.payload_with_padding
def test_data_frame_parses_properly(self):
f = decode_frame(self.payload)
assert isinstance(f, DataFrame)
assert f.flags == set(['END_STREAM'])
assert f.pad_length == 0
assert f.data == b'testdata'
assert f.body_len == 8
def test_data_frame_with_padding_parses_properly(self):
f = decode_frame(self.payload_with_padding)
assert isinstance(f, DataFrame)
assert f.flags == set(['END_STREAM', 'PADDED'])
assert f.pad_length == 10
assert f.data == b'testdata'
assert f.body_len == 19
def test_data_frame_with_invalid_padding_errors(self):
with pytest.raises(InvalidFrameError):
decode_frame(self.payload_with_padding[:9])
def test_data_frame_with_padding_calculates_flow_control_len(self):
f = DataFrame(1)
f.flags = set(['PADDED'])
f.data = b'testdata'
f.pad_length = 10
assert f.flow_controlled_length == 19
def test_data_frame_zero_length_padding_calculates_flow_control_len(self):
f = DataFrame(1)
f.flags = set(['PADDED'])
f.data = b'testdata'
f.pad_length = 0
assert f.flow_controlled_length == len(b'testdata') + 1
def test_data_frame_without_padding_calculates_flow_control_len(self):
f = DataFrame(1)
f.data = b'testdata'
assert f.flow_controlled_length == 8
def test_data_frame_comes_on_a_stream(self):
with pytest.raises(InvalidDataError):
DataFrame(0)
def test_long_data_frame(self):
f = DataFrame(1)
# Use more than 256 bytes of data to force setting higher bits.
f.data = b'\x01' * 300
data = f.serialize()
# The top three bytes should be numerically equal to 300. That means
# they should read 00 01 2C.
# The weird double index trick is to ensure this test behaves equally
# on Python 2 and Python 3.
assert data[0] == b'\x00'[0]
assert data[1] == b'\x01'[0]
assert data[2] == b'\x2C'[0]
def test_body_length_behaves_correctly(self):
f = DataFrame(1)
f.data = b'\x01' * 300
# Initially the body length is zero. For now this is incidental, but
# I'm going to test it to ensure that the behaviour is codified. We
# should change this test if we change that.
assert f.body_len == 0
f.serialize()
assert f.body_len == 300
def test_data_frame_with_invalid_padding_fails_to_parse(self):
# This frame has a padding length of 6 bytes, but a total length of
# only 5.
data = b'\x00\x00\x05\x00\x0b\x00\x00\x00\x01\x06\x54\x65\x73\x74'
with pytest.raises(InvalidPaddingError):
decode_frame(data)
def test_data_frame_with_no_length_parses(self):
# Fixes issue with empty data frames raising InvalidPaddingError.
f = DataFrame(1)
f.data = b''
data = f.serialize()
new_frame = decode_frame(data)
assert new_frame.data == b''
class TestPriorityFrame:
payload = b'\x00\x00\x05\x02\x00\x00\x00\x00\x01\x80\x00\x00\x04\x40'
def test_repr(self):
f = PriorityFrame(1)
assert repr(f).endswith("exclusive=False, depends_on=0, stream_weight=0")
f.exclusive = True
f.depends_on = 0x04
f.stream_weight = 64
assert repr(f).endswith("exclusive=True, depends_on=4, stream_weight=64")
def test_priority_frame_has_no_flags(self):
f = PriorityFrame(1)
flags = f.parse_flags(0xFF)
assert flags == set()
assert isinstance(flags, Flags)
def test_priority_frame_default_serializes_properly(self):
f = PriorityFrame(1)
assert f.serialize() == (
b'\x00\x00\x05\x02\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00'
)
def test_priority_frame_with_all_data_serializes_properly(self):
f = PriorityFrame(1)
f.depends_on = 0x04
f.stream_weight = 64
f.exclusive = True
assert f.serialize() == self.payload
def test_priority_frame_with_all_data_parses_properly(self):
f = decode_frame(self.payload)
assert isinstance(f, PriorityFrame)
assert f.flags == set()
assert f.depends_on == 4
assert f.stream_weight == 64
assert f.exclusive is True
assert f.body_len == 5
def test_priority_frame_invalid(self):
with pytest.raises(InvalidFrameError):
decode_frame(
b'\x00\x00\x06\x02\x00\x00\x00\x00\x01\x80\x00\x00\x04\x40\xFF'
)
def test_priority_frame_comes_on_a_stream(self):
with pytest.raises(InvalidDataError):
PriorityFrame(0)
def test_short_priority_frame_errors(self):
with pytest.raises(InvalidFrameError):
decode_frame(self.payload[:-2])
class TestRstStreamFrame:
def test_repr(self):
f = RstStreamFrame(1)
assert repr(f).endswith("error_code=0")
f.error_code = 420
assert repr(f).endswith("error_code=420")
def test_rst_stream_frame_has_no_flags(self):
f = RstStreamFrame(1)
flags = f.parse_flags(0xFF)
assert not flags
assert isinstance(flags, Flags)
def test_rst_stream_frame_serializes_properly(self):
f = RstStreamFrame(1)
f.error_code = 420
s = f.serialize()
assert s == b'\x00\x00\x04\x03\x00\x00\x00\x00\x01\x00\x00\x01\xa4'
def test_rst_stream_frame_parses_properly(self):
s = b'\x00\x00\x04\x03\x00\x00\x00\x00\x01\x00\x00\x01\xa4'
f = decode_frame(s)
assert isinstance(f, RstStreamFrame)
assert f.flags == set()
assert f.error_code == 420
assert f.body_len == 4
def test_rst_stream_frame_comes_on_a_stream(self):
with pytest.raises(InvalidDataError):
RstStreamFrame(0)
def test_rst_stream_frame_must_have_body_length_four(self):
f = RstStreamFrame(1)
with pytest.raises(InvalidFrameError):
f.parse_body(b'\x01')
class TestSettingsFrame:
serialized = (
b'\x00\x00\x2A\x04\x01\x00\x00\x00\x00' + # Frame header
b'\x00\x01\x00\x00\x10\x00' + # HEADER_TABLE_SIZE
b'\x00\x02\x00\x00\x00\x00' + # ENABLE_PUSH
b'\x00\x03\x00\x00\x00\x64' + # MAX_CONCURRENT_STREAMS
b'\x00\x04\x00\x00\xFF\xFF' + # INITIAL_WINDOW_SIZE
b'\x00\x05\x00\x00\x40\x00' + # MAX_FRAME_SIZE
b'\x00\x06\x00\x00\xFF\xFF' + # MAX_HEADER_LIST_SIZE
b'\x00\x08\x00\x00\x00\x01' # ENABLE_CONNECT_PROTOCOL
)
settings = {
SettingsFrame.HEADER_TABLE_SIZE: 4096,
SettingsFrame.ENABLE_PUSH: 0,
SettingsFrame.MAX_CONCURRENT_STREAMS: 100,
SettingsFrame.INITIAL_WINDOW_SIZE: 65535,
SettingsFrame.MAX_FRAME_SIZE: 16384,
SettingsFrame.MAX_HEADER_LIST_SIZE: 65535,
SettingsFrame.ENABLE_CONNECT_PROTOCOL: 1,
}
def test_repr(self):
f = SettingsFrame()
assert repr(f).endswith("settings={}")
f.settings[SettingsFrame.MAX_FRAME_SIZE] = 16384
assert repr(f).endswith("settings={5: 16384}")
def test_settings_frame_has_only_one_flag(self):
f = SettingsFrame()
flags = f.parse_flags(0xFF)
assert flags == set(['ACK'])
def test_settings_frame_serializes_properly(self):
f = SettingsFrame()
f.parse_flags(0xFF)
f.settings = self.settings
s = f.serialize()
assert s == self.serialized
def test_settings_frame_with_settings(self):
f = SettingsFrame(settings=self.settings)
assert f.settings == self.settings
def test_settings_frame_without_settings(self):
f = SettingsFrame()
assert f.settings == {}
def test_settings_frame_with_ack(self):
f = SettingsFrame(flags=('ACK',))
assert 'ACK' in f.flags
def test_settings_frame_ack_and_settings(self):
with pytest.raises(InvalidDataError):
SettingsFrame(settings=self.settings, flags=('ACK',))
with pytest.raises(InvalidDataError):
decode_frame(self.serialized)
def test_settings_frame_parses_properly(self):
# unset the ACK flag to allow correct parsing
data = self.serialized[:4] + b"\x00" + self.serialized[5:]
f = decode_frame(data)
assert isinstance(f, SettingsFrame)
assert f.flags == set()
assert f.settings == self.settings
assert f.body_len == 42
def test_settings_frame_invalid_body_length(self):
with pytest.raises(InvalidFrameError):
decode_frame(
b'\x00\x00\x2A\x04\x00\x00\x00\x00\x00\xFF\xFF\xFF\xFF'
)
def test_settings_frames_never_have_streams(self):
with pytest.raises(InvalidDataError):
SettingsFrame(1)
def test_short_settings_frame_errors(self):
with pytest.raises(InvalidDataError):
decode_frame(self.serialized[:-2])
class TestPushPromiseFrame:
def test_repr(self):
f = PushPromiseFrame(1)
assert repr(f).endswith("promised_stream_id=0, data=None")
f.promised_stream_id = 4
f.data = b"testdata"
assert repr(f).endswith("promised_stream_id=4, data=<hex:7465737464617461>")
def test_push_promise_frame_flags(self):
f = PushPromiseFrame(1)
flags = f.parse_flags(0xFF)
assert flags == set(['END_HEADERS', 'PADDED'])
def test_push_promise_frame_serializes_properly(self):
f = PushPromiseFrame(1)
f.flags = set(['END_HEADERS'])
f.promised_stream_id = 4
f.data = b'hello world'
s = f.serialize()
assert s == (
b'\x00\x00\x0F\x05\x04\x00\x00\x00\x01' +
b'\x00\x00\x00\x04' +
b'hello world'
)
def test_push_promise_frame_parses_properly(self):
s = (
b'\x00\x00\x0F\x05\x04\x00\x00\x00\x01' +
b'\x00\x00\x00\x04' +
b'hello world'
)
f = decode_frame(s)
assert isinstance(f, PushPromiseFrame)
assert f.flags == set(['END_HEADERS'])
assert f.promised_stream_id == 4
assert f.data == b'hello world'
assert f.body_len == 15
def test_push_promise_frame_with_padding(self):
s = (
b'\x00\x00\x17\x05\x0C\x00\x00\x00\x01' +
b'\x07\x00\x00\x00\x04' +
b'hello world' +
b'padding'
)
f = decode_frame(s)
assert isinstance(f, PushPromiseFrame)
assert f.flags == set(['END_HEADERS', 'PADDED'])
assert f.promised_stream_id == 4
assert f.data == b'hello world'
assert f.body_len == 23
def test_push_promise_frame_with_invalid_padding_fails_to_parse(self):
# This frame has a padding length of 6 bytes, but a total length of
# only 5.
data = b'\x00\x00\x05\x05\x08\x00\x00\x00\x01\x06\x54\x65\x73\x74'
with pytest.raises(InvalidPaddingError):
decode_frame(data)
def test_push_promise_frame_with_no_length_parses(self):
# Fixes issue with empty data frames raising InvalidPaddingError.
f = PushPromiseFrame(1, 2)
f.data = b''
data = f.serialize()
new_frame = decode_frame(data)
assert new_frame.data == b''
def test_push_promise_frame_invalid(self):
data = PushPromiseFrame(1, 0).serialize()
with pytest.raises(InvalidDataError):
decode_frame(data)
data = PushPromiseFrame(1, 3).serialize()
with pytest.raises(InvalidDataError):
decode_frame(data)
def test_short_push_promise_errors(self):
s = (
b'\x00\x00\x0F\x05\x04\x00\x00\x00\x01' +
b'\x00\x00\x00' # One byte short
)
with pytest.raises(InvalidFrameError):
decode_frame(s)
class TestPingFrame:
def test_repr(self):
f = PingFrame()
assert repr(f).endswith("opaque_data=b''")
f.opaque_data = b'hello'
assert repr(f).endswith("opaque_data=b'hello'")
def test_ping_frame_has_only_one_flag(self):
f = PingFrame()
flags = f.parse_flags(0xFF)
assert flags == set(['ACK'])
def test_ping_frame_serializes_properly(self):
f = PingFrame()
f.parse_flags(0xFF)
f.opaque_data = b'\x01\x02'
s = f.serialize()
assert s == (
b'\x00\x00\x08\x06\x01\x00\x00\x00\x00\x01\x02\x00\x00\x00\x00\x00'
b'\x00'
)
def test_no_more_than_8_octets(self):
f = PingFrame()
f.opaque_data = b'\x01\x02\x03\x04\x05\x06\x07\x08\x09'
with pytest.raises(InvalidFrameError):
f.serialize()
def test_ping_frame_parses_properly(self):
s = (
b'\x00\x00\x08\x06\x01\x00\x00\x00\x00\x01\x02\x00\x00\x00\x00\x00'
b'\x00'
)
f = decode_frame(s)
assert isinstance(f, PingFrame)
assert f.flags == set(['ACK'])
assert f.opaque_data == b'\x01\x02\x00\x00\x00\x00\x00\x00'
assert f.body_len == 8
def test_ping_frame_never_has_a_stream(self):
with pytest.raises(InvalidDataError):
PingFrame(1)
def test_ping_frame_has_no_more_than_body_length_8(self):
f = PingFrame()
with pytest.raises(InvalidFrameError):
f.parse_body(b'\x01\x02\x03\x04\x05\x06\x07\x08\x09')
def test_ping_frame_has_no_less_than_body_length_8(self):
f = PingFrame()
with pytest.raises(InvalidFrameError):
f.parse_body(b'\x01\x02\x03\x04\x05\x06\x07')
class TestGoAwayFrame:
def test_repr(self):
f = GoAwayFrame()
assert repr(f).endswith("last_stream_id=0, error_code=0, additional_data=b''")
f.last_stream_id = 64
f.error_code = 32
f.additional_data = b'hello'
assert repr(f).endswith("last_stream_id=64, error_code=32, additional_data=b'hello'")
def test_go_away_has_no_flags(self):
f = GoAwayFrame()
flags = f.parse_flags(0xFF)
assert not flags
assert isinstance(flags, Flags)
def test_goaway_serializes_properly(self):
f = GoAwayFrame()
f.last_stream_id = 64
f.error_code = 32
f.additional_data = b'hello'
s = f.serialize()
assert s == (
b'\x00\x00\x0D\x07\x00\x00\x00\x00\x00' + # Frame header
b'\x00\x00\x00\x40' + # Last Stream ID
b'\x00\x00\x00\x20' + # Error Code
b'hello' # Additional data
)
def test_goaway_frame_parses_properly(self):
s = (
b'\x00\x00\x0D\x07\x00\x00\x00\x00\x00' + # Frame header
b'\x00\x00\x00\x40' + # Last Stream ID
b'\x00\x00\x00\x20' + # Error Code
b'hello' # Additional data
)
f = decode_frame(s)
assert isinstance(f, GoAwayFrame)
assert f.flags == set()
assert f.additional_data == b'hello'
assert f.body_len == 13
s = (
b'\x00\x00\x08\x07\x00\x00\x00\x00\x00' + # Frame header
b'\x00\x00\x00\x40' + # Last Stream ID
b'\x00\x00\x00\x20' + # Error Code
b'' # Additional data
)
f = decode_frame(s)
assert isinstance(f, GoAwayFrame)
assert f.flags == set()
assert f.additional_data == b''
assert f.body_len == 8
def test_goaway_frame_never_has_a_stream(self):
with pytest.raises(InvalidDataError):
GoAwayFrame(1)
def test_short_goaway_frame_errors(self):
s = (
b'\x00\x00\x0D\x07\x00\x00\x00\x00\x00' + # Frame header
b'\x00\x00\x00\x40' + # Last Stream ID
b'\x00\x00\x00' # short Error Code
)
with pytest.raises(InvalidFrameError):
decode_frame(s)
class TestWindowUpdateFrame:
def test_repr(self):
f = WindowUpdateFrame(0)
assert repr(f).endswith("window_increment=0")
f.stream_id = 1
f.window_increment = 512
assert repr(f).endswith("window_increment=512")
def test_window_update_has_no_flags(self):
f = WindowUpdateFrame(0)
flags = f.parse_flags(0xFF)
assert not flags
assert isinstance(flags, Flags)
def test_window_update_serializes_properly(self):
f = WindowUpdateFrame(0)
f.window_increment = 512
s = f.serialize()
assert s == b'\x00\x00\x04\x08\x00\x00\x00\x00\x00\x00\x00\x02\x00'
def test_windowupdate_frame_parses_properly(self):
s = b'\x00\x00\x04\x08\x00\x00\x00\x00\x00\x00\x00\x02\x00'
f = decode_frame(s)
assert isinstance(f, WindowUpdateFrame)
assert f.flags == set()
assert f.window_increment == 512
assert f.body_len == 4
def test_short_windowupdate_frame_errors(self):
s = b'\x00\x00\x04\x08\x00\x00\x00\x00\x00\x00\x00\x02' # -1 byte
with pytest.raises(InvalidFrameError):
decode_frame(s)
s = b'\x00\x00\x05\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'
with pytest.raises(InvalidFrameError):
decode_frame(s)
with pytest.raises(InvalidDataError):
decode_frame(WindowUpdateFrame(0).serialize())
with pytest.raises(InvalidDataError):
decode_frame(WindowUpdateFrame(2**31).serialize())
class TestHeadersFrame:
def test_repr(self):
f = HeadersFrame(1)
assert repr(f).endswith("exclusive=False, depends_on=0, stream_weight=0, data=None")
f.data = b'hello'
f.exclusive = True
f.depends_on = 42
f.stream_weight = 64
assert repr(f).endswith("exclusive=True, depends_on=42, stream_weight=64, data=<hex:68656c6c6f>")
def test_headers_frame_flags(self):
f = HeadersFrame(1)
flags = f.parse_flags(0xFF)
assert flags == set(['END_STREAM', 'END_HEADERS',
'PADDED', 'PRIORITY'])
def test_headers_frame_serializes_properly(self):
f = HeadersFrame(1)
f.flags = set(['END_STREAM', 'END_HEADERS'])
f.data = b'hello world'
s = f.serialize()
assert s == (
b'\x00\x00\x0B\x01\x05\x00\x00\x00\x01' +
b'hello world'
)
def test_headers_frame_parses_properly(self):
s = (
b'\x00\x00\x0B\x01\x05\x00\x00\x00\x01' +
b'hello world'
)
f = decode_frame(s)
assert isinstance(f, HeadersFrame)
assert f.flags == set(['END_STREAM', 'END_HEADERS'])
assert f.data == b'hello world'
assert f.body_len == 11
def test_headers_frame_with_priority_parses_properly(self):
# This test also tests that we can receive a HEADERS frame with no
# actual headers on it. This is technically possible.
s = (
b'\x00\x00\x05\x01\x20\x00\x00\x00\x01' +
b'\x80\x00\x00\x04\x40'
)
f = decode_frame(s)
assert isinstance(f, HeadersFrame)
assert f.flags == set(['PRIORITY'])
assert f.data == b''
assert f.depends_on == 4
assert f.stream_weight == 64
assert f.exclusive is True
assert f.body_len == 5
def test_headers_frame_with_priority_serializes_properly(self):
# This test also tests that we can receive a HEADERS frame with no
# actual headers on it. This is technically possible.
s = (
b'\x00\x00\x05\x01\x20\x00\x00\x00\x01' +
b'\x80\x00\x00\x04\x40'
)
f = HeadersFrame(1)
f.flags = set(['PRIORITY'])
f.data = b''
f.depends_on = 4
f.stream_weight = 64
f.exclusive = True
assert f.serialize() == s
def test_headers_frame_with_invalid_padding_fails_to_parse(self):
# This frame has a padding length of 6 bytes, but a total length of
# only 5.
data = b'\x00\x00\x05\x01\x08\x00\x00\x00\x01\x06\x54\x65\x73\x74'
with pytest.raises(InvalidPaddingError):
decode_frame(data)
def test_headers_frame_with_no_length_parses(self):
# Fixes issue with empty data frames raising InvalidPaddingError.
f = HeadersFrame(1)
f.data = b''
data = f.serialize()
new_frame = decode_frame(data)
assert new_frame.data == b''
class TestContinuationFrame:
def test_repr(self):
f = ContinuationFrame(1)
assert repr(f).endswith("data=None")
f.data = b'hello'
assert repr(f).endswith("data=<hex:68656c6c6f>")
def test_continuation_frame_flags(self):
f = ContinuationFrame(1)
flags = f.parse_flags(0xFF)
assert flags == set(['END_HEADERS'])
def test_continuation_frame_serializes(self):
f = ContinuationFrame(1)
f.parse_flags(0x04)
f.data = b'hello world'
s = f.serialize()
assert s == (
b'\x00\x00\x0B\x09\x04\x00\x00\x00\x01' +
b'hello world'
)
def test_continuation_frame_parses_properly(self):
s = b'\x00\x00\x0B\x09\x04\x00\x00\x00\x01hello world'
f = decode_frame(s)
assert isinstance(f, ContinuationFrame)
assert f.flags == set(['END_HEADERS'])
assert f.data == b'hello world'
assert f.body_len == 11
class TestAltSvcFrame:
payload_with_origin = (
b'\x00\x00\x31' # Length
b'\x0A' # Type
b'\x00' # Flags
b'\x00\x00\x00\x00' # Stream ID
b'\x00\x0B' # Origin len
b'example.com' # Origin
b'h2="alt.example.com:8000", h2=":443"' # Field Value
)
payload_without_origin = (
b'\x00\x00\x13' # Length
b'\x0A' # Type
b'\x00' # Flags
b'\x00\x00\x00\x01' # Stream ID
b'\x00\x00' # Origin len
b'' # Origin
b'h2=":8000"; ma=60' # Field Value
)
payload_with_origin_and_stream = (
b'\x00\x00\x36' # Length
b'\x0A' # Type
b'\x00' # Flags
b'\x00\x00\x00\x01' # Stream ID
b'\x00\x0B' # Origin len
b'example.com' # Origin
b'Alt-Svc: h2=":443"; ma=2592000; persist=1' # Field Value
)
def test_repr(self):
f = AltSvcFrame(0)
assert repr(f).endswith("origin=b'', field=b''")
f.field = b'h2="alt.example.com:8000", h2=":443"'
assert repr(f).endswith("origin=b'', field=b'h2=\"alt.example.com:8000\", h2=\":443\"'")
f.origin = b'example.com'
assert repr(f).endswith("origin=b'example.com', field=b'h2=\"alt.example.com:8000\", h2=\":443\"'")
def test_altsvc_frame_flags(self):
f = AltSvcFrame(0)
flags = f.parse_flags(0xFF)
assert flags == set()
def test_altsvc_frame_with_origin_serializes_properly(self):
f = AltSvcFrame(0)
f.origin = b'example.com'
f.field = b'h2="alt.example.com:8000", h2=":443"'
s = f.serialize()
assert s == self.payload_with_origin
def test_altsvc_frame_with_origin_parses_properly(self):
f = decode_frame(self.payload_with_origin)
assert isinstance(f, AltSvcFrame)
assert f.origin == b'example.com'
assert f.field == b'h2="alt.example.com:8000", h2=":443"'
assert f.body_len == 49
assert f.stream_id == 0
def test_altsvc_frame_without_origin_serializes_properly(self):
f = AltSvcFrame(1, origin=b'', field=b'h2=":8000"; ma=60')
s = f.serialize()
assert s == self.payload_without_origin
def test_altsvc_frame_without_origin_parses_properly(self):
f = decode_frame(self.payload_without_origin)
assert isinstance(f, AltSvcFrame)
assert f.origin == b''
assert f.field == b'h2=":8000"; ma=60'
assert f.body_len == 19
assert f.stream_id == 1
def test_altsvc_frame_with_origin_and_stream_serializes_properly(self):
# This frame is not valid, but we allow it to be serialized anyway.
f = AltSvcFrame(1)
f.origin = b'example.com'
f.field = b'Alt-Svc: h2=":443"; ma=2592000; persist=1'
assert f.serialize() == self.payload_with_origin_and_stream
def test_short_altsvc_frame_errors(self):
with pytest.raises(InvalidFrameError):
decode_frame(self.payload_with_origin[:12])
with pytest.raises(InvalidFrameError):
decode_frame(self.payload_with_origin[:10])
def test_altsvc_with_unicode_origin_fails(self):
with pytest.raises(InvalidDataError):
AltSvcFrame(
stream_id=0, origin=u'hello', field=b'h2=":8000"; ma=60'
)
def test_altsvc_with_unicode_field_fails(self):
with pytest.raises(InvalidDataError):
AltSvcFrame(
stream_id=0, origin=b'hello', field=u'h2=":8000"; ma=60'
)
class TestExtensionFrame:
def test_repr(self):
f = ExtensionFrame(0xFF, 1, 42, b'hello')
assert repr(f).endswith("type=255, flag_byte=42, body=<hex:68656c6c6f>")
| 32.865736
| 109
| 0.605432
|
7607ab6b187fb9c402bb8a8d57b9520d45c84a4f
| 161
|
py
|
Python
|
ejercicioA/empresa.py
|
Germiprogramer/EJERCICIOS-DE-AGREGACI-N-Y-COMPOSICI-N-DE-POO
|
0bead93f86801614c3aa3c2ed29a5559da90990c
|
[
"Apache-2.0"
] | null | null | null |
ejercicioA/empresa.py
|
Germiprogramer/EJERCICIOS-DE-AGREGACI-N-Y-COMPOSICI-N-DE-POO
|
0bead93f86801614c3aa3c2ed29a5559da90990c
|
[
"Apache-2.0"
] | null | null | null |
ejercicioA/empresa.py
|
Germiprogramer/EJERCICIOS-DE-AGREGACI-N-Y-COMPOSICI-N-DE-POO
|
0bead93f86801614c3aa3c2ed29a5559da90990c
|
[
"Apache-2.0"
] | null | null | null |
class Empresa:
def __init__(self, nombre, edificio, empleado):
self.nombre = nombre
self.edificio = edificio
self.empleado = empleado
| 32.2
| 51
| 0.652174
|
c7c6d5acf77500a56a683ebd5472777799bb3d6d
| 375
|
py
|
Python
|
citrination_client/views/descriptors/__init__.py
|
nequalszero/python-citrination-client
|
634796717692af35e6b406a7014fc82ad6baaa34
|
[
"Apache-2.0"
] | 20
|
2016-06-15T18:40:50.000Z
|
2022-03-21T11:59:13.000Z
|
citrination_client/views/descriptors/__init__.py
|
nequalszero/python-citrination-client
|
634796717692af35e6b406a7014fc82ad6baaa34
|
[
"Apache-2.0"
] | 91
|
2015-12-23T18:13:43.000Z
|
2020-07-21T21:33:13.000Z
|
citrination_client/views/descriptors/__init__.py
|
nequalszero/python-citrination-client
|
634796717692af35e6b406a7014fc82ad6baaa34
|
[
"Apache-2.0"
] | 18
|
2016-07-19T15:33:18.000Z
|
2022-03-02T19:42:24.000Z
|
from .alloy_composition_descriptor import AlloyCompositionDescriptor
from .categorical_descriptor import CategoricalDescriptor
from .inorganic_descriptor import InorganicDescriptor
from .int_descriptor import IntDescriptor
from .organic_descriptor import OrganicDescriptor
from .real_descriptor import RealDescriptor
from .formulation_descriptor import FormulationDescriptor
| 46.875
| 68
| 0.906667
|
b01619e603894f790f3b16487e33e2d2ac3f6c8f
| 15,858
|
py
|
Python
|
bin/sam_stats.py
|
KyleLevi/BAM_Scripts
|
71805b57ee81cb3bd4e30f96a6236d8d8e148df7
|
[
"MIT"
] | null | null | null |
bin/sam_stats.py
|
KyleLevi/BAM_Scripts
|
71805b57ee81cb3bd4e30f96a6236d8d8e148df7
|
[
"MIT"
] | 2
|
2020-01-23T14:58:03.000Z
|
2020-01-23T15:02:14.000Z
|
bin/sam_stats.py
|
KyleLevi/BAM_Scripts
|
71805b57ee81cb3bd4e30f96a6236d8d8e148df7
|
[
"MIT"
] | 1
|
2020-01-23T14:54:14.000Z
|
2020-01-23T14:54:14.000Z
|
import sys
import os
import argparse
import subprocess
import pysam
class Sam_Reader:
def __init__(self, file_or_folder, **kwargs):
"""
Initialize with the path to a file or a folder. If a file is
:param file_or_folder:
"""
convert = kwargs.get('convert', True)
check_files = kwargs.get('check_files', True)
# Generate a list of files in dir, and convert sam to bam
if not os.path.isdir(file_or_folder):
if file_or_folder.endswith('.sam'):
file_or_folder = self.sam_to_bam(file_or_folder)
input_files = [file_or_folder]
else:
if not file_or_folder.endswith('/'):
file_or_folder = file_or_folder + '/'
# Get the names of every SAM and BAM file in the input dir
input_files = [file_or_folder + file_name for file_name in os.listdir(file_or_folder) if
file_name.endswith(".sam") or file_name.endswith('.bam')]
# Trim sam files from the list that have a bam file of the same name in the list
input_files = [file_name for file_name in input_files if not
(file_name.endswith('.sam') and file_name.replace('.sam','.bam') in input_files)]
# Convert any sam files to bam files, sort, index and add the new file names to the input_files
input_files = [file_name if file_name.endswith('.bam') else self.sam_to_bam(file_name) for file_name in input_files]
self.input_files = input_files
# Check if every BAM files has an index
#TODO
# Check if every file can be opened and record genomes & lengths
genome_lengths = {}
removed_files = []
for f in self.input_files:
try:
bamfile = pysam.AlignmentFile(f, 'rb')
except Exception as e:
sys.stderr.write('File {} could not be opened by pysam because...:\n{}\n'.format(f, e))
sys.stderr.write('Removing {} from input list and continuing.\n'.format(f))
removed_files.append(f)
continue
for l, r in zip(bamfile.lengths, bamfile.references):
genome_lengths[r] = l
if not check_files:
break
self.input_files = list(set(self.input_files)-set(removed_files))
self.broken_files = removed_files
self.genome_lengths = genome_lengths
def __str__(self):
return "{} BAM file(s): (use .input_files)\n{} Organism(s)/Genome_Length {}\n".format(len(self.input_files), len(self.genome_lengths.keys()), str(self.genome_lengths))
def remove_short_reads(self, new_dir = None, min_length = 50):
"""
Reads in each bamfile and removes an reads less than min length and writes them to a new file
:param min_length:
:return:
"""
@staticmethod
def sam_to_bam(infile, outdir = None):
"""
Converts a SAM file to a BAM file, sorts it, and Indexes it.
:param infile: path to SAM file
:param outdir: (optional) path to write BAM file to
:return: path to new BAM file
"""
if infile.endswith('.sam'):
# Changing the output file name and location
bamfile = infile.replace('.sam', '.bam')
if outdir:
infile = infile.split('/')[-1].replace('.sam', '')
bamfile = outdir + infile + '.bam'
# These are the commands to be run, edit them here!
convert_to_bam = ["samtools", "view", "-bS", infile]
sort_bamfile = ["samtools", "sort", bamfile, bamfile.replace('.bam', '')]
index_bamfile = ["samtools", "index", bamfile, bamfile.replace('.bam', '')]
sys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(infile))
ret_code = subprocess.call(convert_to_bam, stdout=open(bamfile, 'w'))
if ret_code != 0:
sys.stderr.write("Error running command \"{}\"\n".format(' '.join(convert_to_bam)))
return None
ret_code = subprocess.call(sort_bamfile)
if ret_code != 0:
sys.stderr.write("Error running command \"{}\"\n".format(' '.join(sort_bamfile)))
return None
ret_code = subprocess.call(index_bamfile)
if ret_code != 0:
sys.stderr.write("Error running command \"{}\"\n".format(' '.join(index_bamfile)))
return None
return bamfile
else:
sys.stderr.write('File: "{}" does not end with .sam, cannot convert to .bam'.format(infile))
return None
@staticmethod
def read_counts(bam_file_name, n=50):
bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)
stats_dict = {} # {genome_name: [total_reads_mapped, reads > n base pairs long]}
for read in bamfile.fetch():
if not read.reference_name in stats_dict:
stats_dict[read.reference_name] = [0, 0]# index 0 is count of all reads, index 1 is all reads > n length
total_len = int(sum(read.get_cigar_stats()[0]))
if total_len > n:
stats_dict[read.reference_name][1] += 1
stats_dict[read.reference_name][0] += 1
if stats_dict == {}:
return {'None': [0, 0]}
return stats_dict
def quick_percent_coverages(self, bam_file_name, organism=None, MIN_POSITIONAL_COVERAGE=1):
bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)
# Loop over every read, and calculate coverage an organism if it's the first read found
organism_coverage = {}
for read in bamfile.fetch():
genome_name = read.reference_name
if genome_name in organism_coverage:
# print('exists')
continue
if organism != None and organism != genome_name:
# print('specified and not{}{}'.format(genome_name,organism))
continue
# Process one organism
base_depth = []
for p in bamfile.pileup(contig=genome_name):
for pilups in p.pileups:
if pilups.query_position:
# Expand array while insert pos is out of list bounds
if p.reference_pos >= len(base_depth):
base_depth += [0] * (p.reference_pos - len(base_depth) + 1)
# while p.reference_pos >= len(base_depth):
# base_depth.append(0)
base_depth[p.reference_pos] += 1
if base_depth[p.reference_pos] > MIN_POSITIONAL_COVERAGE:
continue
bins_covered = len([x for x in base_depth if x > 0])
organism_coverage[genome_name] = (bins_covered / self.genome_lengths[genome_name]) * 100
if organism_coverage == {}:
return {'None': 0}
return organism_coverage
def hits(self, **kwargs):
"""
File | Genome | Percent Coverage | Total Mapped Reads | Mapped Reads > 50 bp
:param kwargs:
:return:
"""
# Setting Kwargs and defaults
organism = kwargs.get('organism', None)
only_this_file = kwargs.get('file_name', None)
min_read_len = kwargs.get('min_read_length', 50)
min_cov_depth = kwargs.get('min_coverage_depth', 1)
header = ['file', 'genome', 'percent_coverage', 'total reads mapped', 'reads mapped > {} bp'.format(min_read_len)]
results = []
for f in self.input_files:
# if a specific file is specified and this file isn't it, continue
if only_this_file != None and f != only_this_file:
continue
f_coverages = self.quick_percent_coverages(f, organism, min_cov_depth)
for genome, stats in Sam_Reader.read_counts(f, min_read_len).items():
line = [f, genome, round(f_coverages.get(genome,0), 1), stats[0], stats[1]]
results.append(line)
if kwargs.get('write_file', False):
if len(results) < 1:
print("no results?")
return
with open(kwargs['write_file'], 'w') as outfile:
outfile.write('\t'.join(header) + '\n')
for line in results:
line = [str(x) for x in line]
line = '\t'.join(line)
outfile.write(line + '\n')
return results
def per_base_stats(self, **kwargs):
"""
:param kwargs:
:return:
"""
# Setting Kwargs and defaults
kwargs['write_file'] = kwargs.get('write_file', False)
organism = kwargs.get('organism', None)
file_name = kwargs.get('file_name', None)
min_len = kwargs.get('min_len', 50)
if organism == None and len(self.genome_lengths.keys()) > 1:
sys.stderr.write("Organism name not specified for per_base_stats and more than one organism is present,\n"
"Available organism names are: {}".format(', '.join(self.genome_lengths.keys())))
organism = input("\n\nOrganism name not specified for .per_base_stats(organism=...) and more than one organism is present,\n"+
"Enter the name of an organism to analyze. (available names listed above):\n")
else:
organism = list(self.genome_lengths.keys())[0]
if organism == 'all':
sys.stdout.write("All Organisms chosen, this could take a long time and a lot of memory. I hope you know what you are doing...\n")
all_d = {}
for organism in self.genome_lengths.keys():
all_d[organism] = self.per_base_stats(organism=organism, write_file=kwargs['write_file'])
return all_d
# Initialize a list for every position in the genome, with an empty dictionary
base_positions = [{"A": 0, "C": 0, "G": 0, "T": 0, "N": 0, "Gap": 0} for i in range(self.genome_lengths[organism])]
empty = True
# Loop over each file and add each base to the correct position in base_positions
for f in self.input_files:
try:
# if a specific file is specified and this file isn't it, continue
if file_name != None and f != file_name:
continue
bamfile = pysam.AlignmentFile(f, 'rb')
for p in bamfile.pileup(contig=organism):
for pilups in p.pileups:
if pilups.query_position:
bp = pilups.alignment.query_sequence[pilups.query_position]
else:
bp = '-'
base_positions[p.reference_pos][bp] = base_positions[p.reference_pos].get(bp, 0) + 1
empty = False
except Exception as e:
sys.stderr.write('{}\nReading file: {} failed for Organism: {} -- skipping.\n'.format(e, file_name, organism))
continue
if kwargs['write_file']:
if empty:
print('\n\nempty')
with open(kwargs['write_file'] + organism + '.csv', 'w') as outfile:
header = "\t".join(['Position', 'Consensus', 'Percent', 'A', 'C', 'G', 'T', 'N', 'Gap\n'])
outfile.write(header)
for index, pos_dict in enumerate(base_positions):
consensus = max(pos_dict, key=pos_dict.get)
try:
percent = float(pos_dict[consensus]) / sum(list(pos_dict.values()))
except:
percent = 0.0
line = [index, consensus, round(percent * 100, 2), pos_dict['A'], pos_dict['C'], pos_dict['G'],
pos_dict['T'], pos_dict['N'], pos_dict['Gap']]
line = [str(x) for x in line]
line[-1] = line[-1] + '\n'
outfile.write('\t'.join(line))
return base_positions
def reads(self, **kwargs):
"""
For a full list of things to do with yielded reads:
http://pysam.readthedocs.io/en/latest/api.html#pysam.AlignedSegment
:param kwargs: organism, min_read_len, only_this_file
:return:
"""
organism = kwargs.get('organism', None)
only_this_file = kwargs.get('file_name', None)
min_read_len = kwargs.get('min_len', None)
verb = kwargs.get('verbose', False)
for bam_file_name in self.input_files:
if only_this_file != None and bam_file_name != only_this_file:
continue
bamfile = pysam.AlignmentFile(bam_file_name, 'rb', check_sq=False)
if verb:
print('Opening file: {}'.format(bam_file_name))
for read in bamfile.fetch():
if organism is not None and read.reference_name != organism:
continue
if min_read_len != None and read.infer_query_length() < min_read_len:
continue
yield read
def write_reads(self, new_filename, **kwargs):
organism = kwargs.get('organism', None)
only_this_file = kwargs.get('file_name', None)
min_read_len = kwargs.get('min_len', None)
out = pysam.Samfile(new_filename, 'w', template=pysam.AlignmentFile(self.input_files[0]))
for read in self.reads(min_len=30, organism=organism, only_this_file=only_this_file, verbose=True):
out.write(read)
if not new_filename.endswith('.sam'):
new_filename = new_filename + '.sam'
bamfile = new_filename.replace('.sam', '.bam')
# These are the commands to be run, edit them here!
convert_to_bam = ["samtools", "view", "-bS", new_filename]
sort_bamfile = ["samtools", "sort", bamfile, bamfile.replace('.bam', '')]
index_bamfile = ["samtools", "index", bamfile, bamfile.replace('.bam', '.bai')]
sys.stdout.write('Converting {} to BAM file, sorting, and indexing...'.format(infile))
ret_code = subprocess.call(convert_to_bam, stdout=open(bamfile, 'w'))
if ret_code != 0:
sys.stderr.write("Error running command \"{}\"\n".format(' '.join(convert_to_bam)))
return None
ret_code = subprocess.call(sort_bamfile)
if ret_code != 0:
sys.stderr.write("Error running command \"{}\"\n".format(' '.join(sort_bamfile)))
return None
ret_code = subprocess.call(index_bamfile)
if ret_code != 0:
sys.stderr.write("Error running command \"{}\"\n".format(' '.join(index_bamfile)))
return None
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i', '--input', help='Input File', required=True)
parser.add_argument('-o', '--output', help='output directory')
parser.add_argument('-n', help='Some Number', type=int)
parser.add_argument('-v', help='Verbose', action='store_true')
try:
args = parser.parse_args()
except:
parser.print_help()
sys.exit(1)
data = Sam_Reader(args.input)
if not args.output:
args.outpt=None
data.per_base_stats(write_file=args.output)
| 45.700288
| 176
| 0.557573
|
1e4dac23cccfffdbb98022a4ce00a319028c7687
| 567
|
py
|
Python
|
djangoui/user/signals.py
|
ceexon/django-frontend
|
1aa72b8cdc119acd50922c1978c7abaa47d9b1ed
|
[
"MIT"
] | null | null | null |
djangoui/user/signals.py
|
ceexon/django-frontend
|
1aa72b8cdc119acd50922c1978c7abaa47d9b1ed
|
[
"MIT"
] | 8
|
2021-04-08T19:21:53.000Z
|
2022-03-11T23:50:51.000Z
|
djangoui/user/signals.py
|
kburudi/django-frontend
|
1aa72b8cdc119acd50922c1978c7abaa47d9b1ed
|
[
"MIT"
] | null | null | null |
"""Signals for user actions."""
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
"""Create user profile on saving the user."""
if created: # noqa
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
"""Create user profile on saving the user."""
instance.profile.save()
| 28.35
| 61
| 0.738977
|
e745e2d717cffe48514128f36b80c5317775087e
| 654
|
py
|
Python
|
bbtools/callvariants/wrapper.py
|
avilab/vs-wrappers
|
65524b3566969da7909e0d08c423b6eecadea039
|
[
"MIT"
] | 1
|
2021-04-25T08:20:14.000Z
|
2021-04-25T08:20:14.000Z
|
bbtools/callvariants/wrapper.py
|
avilab/vs-wrappers
|
65524b3566969da7909e0d08c423b6eecadea039
|
[
"MIT"
] | 2
|
2018-12-28T08:40:03.000Z
|
2019-02-22T13:28:34.000Z
|
bbtools/callvariants/wrapper.py
|
avilab/vs-wrappers
|
65524b3566969da7909e0d08c423b6eecadea039
|
[
"MIT"
] | null | null | null |
__author__ = "Taavi Päll"
__copyright__ = "Copyright 2021, Taavi Päll"
__email__ = "tapa741@gmail.com"
__license__ = "MIT"
from snakemake.shell import shell
from snakemake_wrapper_utils.java import get_java_opts
java_opts = get_java_opts(snakemake)
def parseIO(d):
return " ".join([("in" if k == "input" else k) + "=" + v for k, v in d.items()])
inputs = parseIO(snakemake.input)
outputs = parseIO(snakemake.output)
# Get extra arguments
extra = snakemake.params.get("extra", "")
# Setup log
log = snakemake.log_fmt_shell(stdout=False, stderr=True)
shell(
"""
(callvariants.sh {inputs} {outputs} {extra} {java_opts}) {log}
"""
)
| 21.8
| 84
| 0.697248
|
235e2a8ac6171d94793c40231b2df3f800d482b5
| 2,026
|
py
|
Python
|
compile.py
|
honzajavorek/blackbeard
|
1252de7f2a7cbf74b432002509087c09f54c84b4
|
[
"ISC"
] | null | null | null |
compile.py
|
honzajavorek/blackbeard
|
1252de7f2a7cbf74b432002509087c09f54c84b4
|
[
"ISC"
] | null | null | null |
compile.py
|
honzajavorek/blackbeard
|
1252de7f2a7cbf74b432002509087c09f54c84b4
|
[
"ISC"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Compile script."""
import datetime
import urllib2
import re
import os
VERSION = datetime.datetime.now().strftime('ver%Y-%m-%d')
DIR = os.path.dirname(os.path.realpath(__file__))
SRC_DIR = DIR + '/src'
RELEASE_FILE = DIR + '/releases/blackbeard_%s.html' % VERSION
HTML_FILE = SRC_DIR + '/blackbeard.html'
LOGO_FILE = 'https://github.com/Littlemaple/blackbeard/raw/master/static/logo.jpg'
CSS_FILES = ('https://github.com/Littlemaple/css/raw/master/base.css',
SRC_DIR + '/style.css')
JAVASCRIPT_FILES = ('http://ajax.googleapis.com/ajax/libs/jquery/1.6.1/jquery.min.js',
'https://github.com/jamespadolsey/jQuery-Plugins/raw/master/cross-domain-ajax/jquery.xdomainajax.js',
SRC_DIR + '/app.js')
def get_file_contents(file):
if re.match(r'https?://', file):
f = urllib2.urlopen(file)
contents = f.read()
else:
with open(file, 'r') as f:
contents = f.read()
if isinstance(contents, basestring):
if not isinstance(contents, unicode):
contents = unicode(contents, 'utf-8')
return contents
def compress_css(css_code):
css_code = re.sub(r'/\*[^*]*\*+([^/][^*]*\*+)*/', '', css_code)
css_code = re.sub(r'[\r\n\t]+', '', css_code)
css_code = re.sub(r' +', ' ', css_code)
return css_code.strip()
template = get_file_contents(HTML_FILE)
data = {'logo': LOGO_FILE, 'version': VERSION}
css = ''
for f in CSS_FILES:
css += get_file_contents(f) + '\n\n'
data['css'] = compress_css(css)
javascript = ''
for f in JAVASCRIPT_FILES:
javascript += get_file_contents(f) + '\n\n'
data['javascript'] = javascript.strip()
def replace(match):
global data
match = match.group(1)
return data[match]
def file_put_contents(file, contents):
with open(file, 'w') as f:
f.write(contents.encode('utf-8'))
template = re.sub(r'{(\w+)}', replace, template)
file_put_contents(RELEASE_FILE, template)
| 25.325
| 121
| 0.628332
|
4fa844d981324956dfa9181a56aa2b99d8d1f781
| 2,738
|
py
|
Python
|
src/tools/generate-eclipse.py
|
cucosion/polyglot
|
66a491da6393411c7ee1b8ef1d4780136b139da2
|
[
"BSD-3-Clause"
] | 489
|
2016-06-21T21:28:06.000Z
|
2022-03-17T20:26:27.000Z
|
src/tools/generate-eclipse.py
|
cucosion/polyglot
|
66a491da6393411c7ee1b8ef1d4780136b139da2
|
[
"BSD-3-Clause"
] | 65
|
2016-06-25T11:22:13.000Z
|
2022-02-12T08:34:17.000Z
|
src/tools/generate-eclipse.py
|
cucosion/polyglot
|
66a491da6393411c7ee1b8ef1d4780136b139da2
|
[
"BSD-3-Clause"
] | 66
|
2016-06-11T12:38:50.000Z
|
2022-02-24T12:35:11.000Z
|
#! /usr/bin/env python
import os
import subprocess
from xml.etree.ElementTree import Element, SubElement, tostring
LIBRARY_JAR_ROOT = os.path.join('bazel-genfiles', 'external')
BUILD_EVERYTHING_COMMAND = ['bazel', 'build', 'src/...']
PROTO_JAR_ROOT = os.path.join('bazel-bin', 'src', 'main', 'proto')
def main():
# Using relative paths for certain things makes our lives much easier, but
# this requires being run from the root of the Bazel workspace.
if not os.path.isfile(os.path.join(os.getcwd(), 'WORKSPACE')):
print('This script must be invoked from the WORKSPACE root.')
return
# Build the project to make sure all jars are present.
print('Building project...')
subprocess.check_output(BUILD_EVERYTHING_COMMAND)
print('Generating .classpath file ...')
with open('.classpath', 'w') as file:
file.write(generate_classpath_contents())
print('Generating .project file ...')
with open('.project', 'w') as file:
file.write(generate_project_contents())
print('Done')
def generate_project_contents():
project_name = os.path.basename(os.getcwd())
print('Using project name: ' + project_name)
return PROJECT_FILE_TEMPLATE % { 'project_name': project_name }
def generate_classpath_contents():
jar_paths = discover_jars(LIBRARY_JAR_ROOT)
jar_entries = '\n'.join([CLASSPATH_INDENT + jar_entry(p) for p in jar_paths])
return CLASSPATH_TEMPLATE % { 'jar_entries': jar_entries }
def jar_entry(jar_path):
return JAR_CLASSPATH_ENTRY_TEMPLATE % {'path': jar_path}
def discover_jars(root):
jar_paths = []
trees = [LIBRARY_JAR_ROOT, PROTO_JAR_ROOT]
for tree in trees:
for root, _, files in os.walk(tree):
for file in files:
if os.path.splitext(file)[1] == '.jar':
jar_paths.append(os.path.abspath(os.path.join(root, file)))
return jar_paths
CLASSPATH_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" path="src/main/java"/>
<classpathentry kind="src" path="src/test/java"/>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER"/>
%(jar_entries)s
</classpath>
"""
CLASSPATH_INDENT = """ """
JAR_CLASSPATH_ENTRY_TEMPLATE = '<classpathentry kind="lib" path="%(path)s"/>'
PROJECT_FILE_TEMPLATE = """<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>%(project_name)s</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
</natures>
</projectDescription>
"""
if __name__ == '__main__':
main()
| 32.211765
| 83
| 0.661432
|
b326366651cb460514022a4b83da46c0a9d4ac52
| 2,581
|
py
|
Python
|
homework_fall2019/hw1/cs285/infrastructure/tf_utils.py
|
grandintegrator/cs285-deeprlcourse-fa19-hw
|
4abd57eb9da8978b576300b69865e52862e4eaab
|
[
"MIT"
] | 1
|
2021-04-05T11:40:10.000Z
|
2021-04-05T11:40:10.000Z
|
homework_fall2019/hw1/cs285/infrastructure/tf_utils.py
|
grandintegrator/cs285-deeprlcourse-fa19-hw
|
4abd57eb9da8978b576300b69865e52862e4eaab
|
[
"MIT"
] | null | null | null |
homework_fall2019/hw1/cs285/infrastructure/tf_utils.py
|
grandintegrator/cs285-deeprlcourse-fa19-hw
|
4abd57eb9da8978b576300b69865e52862e4eaab
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import os
############################################
############################################
def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None):
"""
Builds a feedforward neural network
arguments:
input_placeholder: placeholder variable for the state (batch_size, input_size)
scope: variable scope of the network
n_layers: number of hidden layers
size: dimension of each hidden layer
activation: activation of each hidden layer
output_size: size of the output layer
output_activation: activation of the output layer
returns:
output_placeholder: the result of a forward pass through the hidden layers + the output layer
"""
output_placeholder = input_placeholder
with tf.variable_scope(scope):
for _ in range(n_layers):
output_placeholder = tf.layers.dense(inputs=output_placeholder,
units=size,
activation=activation)
output_placeholder = tf.layers.dense(inputs=output_placeholder,
units=output_size,
activation=output_activation)
# output_placeholderut_placeholder = TODO # HINT: use tf.layers.dense (specify <input>, <size>, activation=<?>)
return output_placeholder
############################################
############################################
def create_tf_session(use_gpu=False, gpu_frac=0.6,
allow_gpu_growth=True, which_gpu=0):
if use_gpu:
# gpu options
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_frac,
allow_growth=allow_gpu_growth)
# TF config
config = tf.ConfigProto(
gpu_options=gpu_options,
log_device_placement=False,
allow_soft_placement=True,
inter_op_parallelism_threads=1,
intra_op_parallelism_threads=1)
# set env variable to specify which gpu to use
os.environ["CUDA_VISIBLE_DEVICES"] = str(which_gpu)
else:
# TF config without gpu
config = tf.ConfigProto(device_count={'GPU': 0})
# use config to create TF session
sess = tf.Session(config=config)
return sess
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
| 35.847222
| 119
| 0.565285
|
8d07a8e9bb8de79ea6d75ec3ebf698d1eaf3a1ea
| 9,102
|
py
|
Python
|
model_zoo/official/cv/mobilenetv3/train.py
|
kungfu-team/mindspore-bert
|
71501cf52ae01db9d6a73fb64bcfe68a6509dc32
|
[
"Apache-2.0"
] | 2
|
2021-07-08T13:10:42.000Z
|
2021-11-08T02:48:57.000Z
|
model_zoo/official/cv/mobilenetv3/train.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/cv/mobilenetv3/train.py
|
peixinhou/mindspore
|
fcb2ec2779b753e95c762cf292b23bd81d1f561b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""train_imagenet."""
import time
import argparse
import ast
import numpy as np
from mindspore import context
from mindspore import Tensor
from mindspore import nn
from mindspore.nn.optim.momentum import Momentum
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn.loss.loss import _Loss
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
from mindspore.train.model import Model
from mindspore.context import ParallelMode
from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, Callback
from mindspore.train.loss_scale_manager import FixedLossScaleManager
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from mindspore.communication.management import init, get_group_size, get_rank
from src.dataset import create_dataset
from src.dataset import create_dataset_cifar
from src.lr_generator import get_lr
from src.config import config_gpu
from src.config import config_cpu
from src.mobilenetV3 import mobilenet_v3_large
set_seed(1)
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
parser.add_argument('--pre_trained', type=str, default=None, help='Pretrained checkpoint path')
parser.add_argument('--device_target', type=str, default="GPU", help='run device_target')
parser.add_argument('--run_distribute', type=ast.literal_eval, default=True, help='Run distribute')
args_opt = parser.parse_args()
if args_opt.device_target == "GPU":
context.set_context(mode=context.GRAPH_MODE,
device_target="GPU",
save_graphs=False)
if args_opt.run_distribute:
init()
context.set_auto_parallel_context(device_num=get_group_size(),
parallel_mode=ParallelMode.DATA_PARALLEL,
gradients_mean=True)
elif args_opt.device_target == "CPU":
context.set_context(mode=context.GRAPH_MODE,
device_target="CPU",
save_graphs=False)
else:
raise ValueError("Unsupported device_target.")
class CrossEntropyWithLabelSmooth(_Loss):
"""
CrossEntropyWith LabelSmooth.
Args:
smooth_factor (float): smooth factor for label smooth. Default is 0.
num_classes (int): number of classes. Default is 1000.
Returns:
None.
Examples:
>>> CrossEntropyWithLabelSmooth(smooth_factor=0., num_classes=1000)
"""
def __init__(self, smooth_factor=0., num_classes=1000):
super(CrossEntropyWithLabelSmooth, self).__init__()
self.onehot = P.OneHot()
self.on_value = Tensor(1.0 - smooth_factor, mstype.float32)
self.off_value = Tensor(1.0 * smooth_factor /
(num_classes - 1), mstype.float32)
self.ce = nn.SoftmaxCrossEntropyWithLogits()
self.mean = P.ReduceMean(False)
self.cast = P.Cast()
def construct(self, logit, label):
one_hot_label = self.onehot(self.cast(label, mstype.int32), F.shape(logit)[1],
self.on_value, self.off_value)
out_loss = self.ce(logit, one_hot_label)
out_loss = self.mean(out_loss, 0)
return out_loss
class Monitor(Callback):
"""
Monitor loss and time.
Args:
lr_init (numpy array): train lr
Returns:
None
Examples:
>>> Monitor(100,lr_init=Tensor([0.05]*100).asnumpy())
"""
def __init__(self, lr_init=None):
super(Monitor, self).__init__()
self.lr_init = lr_init
self.lr_init_len = len(lr_init)
def epoch_begin(self, run_context):
self.losses = []
self.epoch_time = time.time()
def epoch_end(self, run_context):
cb_params = run_context.original_args()
epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / cb_params.batch_num
print("epoch time: {:5.3f}, per step time: {:5.3f}, avg loss: {:5.3f}".format(epoch_mseconds,
per_step_mseconds,
np.mean(self.losses)))
def step_begin(self, run_context):
self.step_time = time.time()
def step_end(self, run_context):
cb_params = run_context.original_args()
step_mseconds = (time.time() - self.step_time) * 1000
step_loss = cb_params.net_outputs
if isinstance(step_loss, (tuple, list)) and isinstance(step_loss[0], Tensor):
step_loss = step_loss[0]
if isinstance(step_loss, Tensor):
step_loss = np.mean(step_loss.asnumpy())
self.losses.append(step_loss)
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num
print("epoch: [{:3d}/{:3d}], step:[{:5d}/{:5d}], loss:[{:5.3f}/{:5.3f}], time:[{:5.3f}], lr:[{:5.3f}]".format(
cb_params.cur_epoch_num -
1, cb_params.epoch_num, cur_step_in_epoch, cb_params.batch_num, step_loss,
np.mean(self.losses), step_mseconds, self.lr_init[cb_params.cur_step_num - 1]))
if __name__ == '__main__':
config_ = None
if args_opt.device_target == "GPU":
config_ = config_gpu
elif args_opt.device_target == "CPU":
config_ = config_cpu
else:
raise ValueError("Unsupported device_target.")
# train on device
print("train args: ", args_opt)
print("cfg: ", config_)
# define net
net = mobilenet_v3_large(num_classes=config_.num_classes)
# define loss
if config_.label_smooth > 0:
loss = CrossEntropyWithLabelSmooth(
smooth_factor=config_.label_smooth, num_classes=config_.num_classes)
else:
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
# define dataset
epoch_size = config_.epoch_size
if args_opt.device_target == "GPU":
dataset = create_dataset(dataset_path=args_opt.dataset_path,
do_train=True,
config=config_,
device_target=args_opt.device_target,
repeat_num=1,
batch_size=config_.batch_size,
run_distribute=args_opt.run_distribute)
elif args_opt.device_target == "CPU":
dataset = create_dataset_cifar(args_opt.dataset_path,
do_train=True,
batch_size=config_.batch_size)
else:
raise ValueError("Unsupported device_target.")
step_size = dataset.get_dataset_size()
# resume
if args_opt.pre_trained:
param_dict = load_checkpoint(args_opt.pre_trained)
load_param_into_net(net, param_dict)
# define optimizer
loss_scale = FixedLossScaleManager(
config_.loss_scale, drop_overflow_update=False)
lr = Tensor(get_lr(global_step=0,
lr_init=0,
lr_end=0,
lr_max=config_.lr,
warmup_epochs=config_.warmup_epochs,
total_epochs=epoch_size,
steps_per_epoch=step_size))
opt = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr, config_.momentum,
config_.weight_decay, config_.loss_scale)
# define model
model = Model(net, loss_fn=loss, optimizer=opt,
loss_scale_manager=loss_scale)
cb = [Monitor(lr_init=lr.asnumpy())]
if args_opt.run_distribute and args_opt.device_target != "CPU":
ckpt_save_dir = config_gpu.save_checkpoint_path + "ckpt_" + str(get_rank()) + "/"
else:
ckpt_save_dir = config_gpu.save_checkpoint_path + "ckpt_" + "/"
if config_.save_checkpoint:
config_ck = CheckpointConfig(save_checkpoint_steps=config_.save_checkpoint_epochs * step_size,
keep_checkpoint_max=config_.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix="mobilenetV3", directory=ckpt_save_dir, config=config_ck)
cb += [ckpt_cb]
# begine train
model.train(epoch_size, dataset, callbacks=cb)
| 39.921053
| 118
| 0.642826
|
a104852d056ed351485d21e914439c76be9d8086
| 7,132
|
py
|
Python
|
seditor.py
|
vxsl/picotui
|
768eac16883bb682715dcd7b9fa490317d32df2d
|
[
"MIT"
] | null | null | null |
seditor.py
|
vxsl/picotui
|
768eac16883bb682715dcd7b9fa490317d32df2d
|
[
"MIT"
] | null | null | null |
seditor.py
|
vxsl/picotui
|
768eac16883bb682715dcd7b9fa490317d32df2d
|
[
"MIT"
] | null | null | null |
#
# Very simple VT100 terminal text editor widget
# Copyright (c) 2015 Paul Sokolovsky
# Distributed under MIT License
#
import sys
import os
from picotui.defs import VIM_KEYMAP
KEY_UP = 1
KEY_DOWN = 2
KEY_LEFT = 3
KEY_RIGHT = 4
KEY_HOME = 5
KEY_END = 6
KEY_PGUP = 7
KEY_PGDN = 8
KEY_QUIT = 9
KEY_ENTER = 10
KEY_BACKSPACE = 11
KEY_DELETE = 12
KEYMAP = {
b"\x1b[A": KEY_UP,
b"\x1b[B": KEY_DOWN,
b"\x1b[D": KEY_LEFT,
b"\x1b[C": KEY_RIGHT,
b"\x1bOH": KEY_HOME,
b"\x1bOF": KEY_END,
b"\x1b[1~": KEY_HOME,
b"\x1b[4~": KEY_END,
b"\x1b[5~": KEY_PGUP,
b"\x1b[6~": KEY_PGDN,
b"\x03": KEY_QUIT,
b"\r": KEY_ENTER,
b"\x7f": KEY_BACKSPACE,
b"\x1b[3~": KEY_DELETE,
}
class Editor:
def __init__(self):
self.top_line = 0
self.cur_line = 0
self.row = 0
self.col = 0
self.height = 25
@staticmethod
def wr(s):
# TODO: When Python is 3.5, update this to use only bytes
if isinstance(s, str):
s = bytes(s, "utf-8")
os.write(1, s)
@staticmethod
def cls():
Editor.wr(b"\x1b[2J")
@staticmethod
def goto(row, col):
# TODO: When Python is 3.5, update this to use bytes
Editor.wr("\x1b[%d;%dH" % (row + 1, col + 1))
@staticmethod
def clear_to_eol():
Editor.wr(b"\x1b[0K")
@staticmethod
def cursor(onoff):
if onoff:
Editor.wr(b"\x1b[?25h")
else:
Editor.wr(b"\x1b[?25l")
def set_cursor(self):
self.goto(self.row, self.col)
def adjust_cursor_eol(self):
l = len(self.content[self.cur_line])
if self.col > l:
self.col = l
def set_lines(self, lines):
self.content = lines
self.total_lines = len(lines)
def update_screen(self):
self.cursor(False)
self.goto(0, 0)
self.cls()
i = self.top_line
for c in range(self.height):
self.show_line(self.content[i])
self.wr(b"\r\n")
i += 1
if i == self.total_lines:
break
self.set_cursor()
self.cursor(True)
def update_line(self):
self.cursor(False)
self.wr(b"\r")
self.show_line(self.content[self.cur_line])
self.clear_to_eol()
self.set_cursor()
self.cursor(True)
def show_line(self, l):
self.wr(l)
def next_line(self):
if self.row + 1 == self.height:
self.top_line += 1
return True
else:
self.row += 1
return False
def handle_cursor_keys(self, key):
if key == KEY_DOWN:
if self.cur_line + 1 != self.total_lines:
self.cur_line += 1
self.adjust_cursor_eol()
if self.next_line():
self.update_screen()
else:
self.set_cursor()
elif key == KEY_UP:
if self.cur_line > 0:
self.cur_line -= 1
self.adjust_cursor_eol()
if self.row == 0:
if self.top_line > 0:
self.top_line -= 1
self.update_screen()
else:
self.row -= 1
self.set_cursor()
elif key == KEY_LEFT:
if self.col > 0:
self.col -= 1
self.set_cursor()
elif key == KEY_RIGHT:
self.col += 1
self.adjust_cursor_eol()
self.set_cursor()
elif key == KEY_HOME:
self.col = 0
self.set_cursor()
elif key == KEY_END:
self.col = len(self.content[self.cur_line])
self.set_cursor()
elif key == KEY_PGUP:
self.cur_line -= self.height
self.top_line -= self.height
if self.top_line < 0:
self.top_line = 0
self.cur_line = 0
self.row = 0
elif self.cur_line < 0:
self.cur_line = 0
self.row = 0
self.adjust_cursor_eol()
self.update_screen()
elif key == KEY_PGDN:
self.cur_line += self.height
self.top_line += self.height
if self.cur_line >= self.total_lines:
self.top_line = self.total_lines - self.height
self.cur_line = self.total_lines - 1
if self.top_line >= 0:
self.row = self.height - 1
else:
self.top_line = 0
self.row = self.cur_line
self.adjust_cursor_eol()
self.update_screen()
else:
return False
return True
def loop(self):
self.update_screen()
while True:
buf = os.read(0, 32)
sz = len(buf)
i = 0
while i < sz:
if buf[0] == 0x1b:
key = buf
i = len(buf)
else:
key = buf[i:i + 1]
i += 1
#self.show_status(repr(key))
if key in KEYMAP:
key = KEYMAP[key]
elif key in VIM_KEYMAP:
key = VIM_KEYMAP
if key == KEY_QUIT:
return key
if self.handle_cursor_keys(key):
continue
res = self.handle_key(key)
if res is not None:
return res
def handle_key(self, key):
l = self.content[self.cur_line]
if key == KEY_ENTER:
self.content[self.cur_line] = l[:self.col]
self.cur_line += 1
self.content[self.cur_line:self.cur_line] = [l[self.col:]]
self.total_lines += 1
self.col = 0
self.next_line()
self.update_screen()
elif key == KEY_BACKSPACE:
if self.col:
self.col -= 1
l = l[:self.col] + l[self.col + 1:]
self.content[self.cur_line] = l
self.update_line()
elif key == KEY_DELETE:
l = l[:self.col] + l[self.col + 1:]
self.content[self.cur_line] = l
self.update_line()
else:
l = l[:self.col] + str(key, "utf-8") + l[self.col:]
self.content[self.cur_line] = l
self.col += 1
self.update_line()
def init_tty(self):
import tty, termios
self.org_termios = termios.tcgetattr(0)
tty.setraw(0)
def deinit_tty(self):
# Don't leave cursor in the middle of screen
self.goto(self.height, 0)
import termios
termios.tcsetattr(0, termios.TCSANOW, self.org_termios)
if __name__ == "__main__":
with open(sys.argv[1]) as f:
content = f.read().splitlines()
#content = f.readlines()
e = Editor()
e.init_tty()
e.set_lines(content)
e.loop()
e.deinit_tty()
| 27.32567
| 74
| 0.482894
|
19d71bb43a705e60847a82435c92abb4e384e7f0
| 1,125
|
py
|
Python
|
beacon/beacon/migrations/0001_initial.py
|
gEndelf/beacon
|
53b0e47bf136f10fca2a2b3a399ac6e473fdb840
|
[
"MIT"
] | null | null | null |
beacon/beacon/migrations/0001_initial.py
|
gEndelf/beacon
|
53b0e47bf136f10fca2a2b3a399ac6e473fdb840
|
[
"MIT"
] | null | null | null |
beacon/beacon/migrations/0001_initial.py
|
gEndelf/beacon
|
53b0e47bf136f10fca2a2b3a399ac6e473fdb840
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Repo',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('repo_type', models.CharField(default=b'github', max_length=15, choices=[(b'github', b'github'), (b'bitbucket', b'bitbucket')])),
('organization', models.CharField(max_length=70)),
('repo', models.CharField(max_length=70)),
('title', models.CharField(max_length=70)),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ('-created_at',),
},
),
migrations.AlterUniqueTogether(
name='repo',
unique_together=set([('repo_type', 'organization', 'repo')]),
),
]
| 34.090909
| 146
| 0.557333
|
7f06d325a6051521c2dd45b4ccd738c6f6dff4c7
| 30,318
|
py
|
Python
|
miper/api/xmlutil.py
|
MountainWei/miper
|
4dd6df51cb0e7d041b8dc7decebbcfb47a79f210
|
[
"Apache-2.0"
] | 1
|
2016-01-13T04:23:20.000Z
|
2016-01-13T04:23:20.000Z
|
miper/api/xmlutil.py
|
MountainWei/miper
|
4dd6df51cb0e7d041b8dc7decebbcfb47a79f210
|
[
"Apache-2.0"
] | 1
|
2015-12-17T09:58:04.000Z
|
2016-08-01T15:23:27.000Z
|
miper/api/xmlutil.py
|
MountainWei/miper
|
4dd6df51cb0e7d041b8dc7decebbcfb47a79f210
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import re
from lxml import etree
import six
from miper.i18n import _
from miper import utils
XMLNS_V10 = 'https://www.openstack.org/mediawiki/miper/api/v1.0'
XMLNS_V11 = 'https://www.openstack.org/mediawiki/miper/api/v1.0'
XMLNS_COMMON_V10 = 'https://www.openstack.org/mediawiki/miper/api/v1.0'
XMLNS_ATOM = 'http://www.w3.org/2005/Atom'
XMLNS_MIPER_V1 = ('https://www.openstack.org/mediawiki/miper/1.0/content')
_split_pattern = re.compile(r'([^:{]*{[^}]*}[^:]*|[^:]+)')
def validate_schema(xml, schema_name):
if isinstance(xml, str):
xml = etree.fromstring(xml)
base_path = 'miper/api/schemas/v1.1/'
if schema_name in ('atom', 'atom-link'):
base_path = 'miper/api/schemas/'
schema_path = os.path.join(utils.miperdir(),
'%s%s.rng' % (base_path, schema_name))
schema_doc = etree.parse(schema_path)
relaxng = etree.RelaxNG(schema_doc)
relaxng.assertValid(xml)
class Selector(object):
"""Selects datum to operate on from an object."""
def __init__(self, *chain):
"""Initialize the selector.
Each argument is a subsequent index into the object.
"""
self.chain = chain
def __repr__(self):
"""Return a representation of the selector."""
return "Selector" + repr(self.chain)
def __call__(self, obj, do_raise=False):
"""Select a datum to operate on.
Selects the relevant datum within the object.
:param obj: The object from which to select the object.
:param do_raise: If False (the default), return None if the
indexed datum does not exist. Otherwise,
raise a KeyError.
"""
# Walk the selector list
for elem in self.chain:
# If it's callable, call it
if callable(elem):
obj = elem(obj)
else:
# Use indexing
try:
obj = obj[elem]
except (KeyError, IndexError):
# No sense going any further
if do_raise:
# Convert to a KeyError, for consistency
raise KeyError(elem)
return None
# Return the finally-selected object
return obj
def get_items(obj):
"""Get items in obj."""
return list(obj.items())
class EmptyStringSelector(Selector):
"""Returns the empty string if Selector would return None."""
def __call__(self, obj, do_raise=False):
"""Returns empty string if the selected value does not exist."""
try:
return super(EmptyStringSelector, self).__call__(obj, True)
except KeyError:
return ""
class ConstantSelector(object):
"""Returns a constant."""
def __init__(self, value):
"""Initialize the selector.
:param value: The value to return.
"""
self.value = value
def __repr__(self):
"""Return a representation of the selector."""
return repr(self.value)
def __call__(self, _obj, _do_raise=False):
"""Select a datum to operate on.
Returns a constant value. Compatible with
Selector.__call__().
"""
return self.value
class TemplateElement(object):
"""Represent an element in the template."""
def __init__(self, tag, attrib=None, selector=None, subselector=None,
**extra):
"""Initialize an element.
Initializes an element in the template. Keyword arguments
specify attributes to be set on the element; values must be
callables. See TemplateElement.set() for more information.
:param tag: The name of the tag to create.
:param attrib: An optional dictionary of element attributes.
:param selector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
:param subselector: An optional callable taking an object and
optional boolean do_raise indicator and
returning the object bound to the element.
This is used to further refine the datum
object returned by selector in the event
that it is a list of objects.
"""
# Convert selector into a Selector
if selector is None:
selector = Selector()
elif not callable(selector):
selector = Selector(selector)
# Convert subselector into a Selector
if subselector is not None and not callable(subselector):
subselector = Selector(subselector)
self.tag = tag
self.selector = selector
self.subselector = subselector
self.attrib = {}
self._text = None
self._children = []
self._childmap = {}
# Run the incoming attributes through set() so that they
# become selectorized
if not attrib:
attrib = {}
attrib.update(extra)
for k, v in attrib.items():
self.set(k, v)
def __repr__(self):
"""Return a representation of the template element."""
return ('<%s.%s %r at %#x>' %
(self.__class__.__module__, self.__class__.__name__,
self.tag, id(self)))
def __len__(self):
"""Return the number of child elements."""
return len(self._children)
def __contains__(self, key):
"""Determine whether a child node named by key exists."""
return key in self._childmap
def __getitem__(self, idx):
"""Retrieve a child node by index or name."""
if isinstance(idx, six.string_types):
# Allow access by node name
return self._childmap[idx]
else:
return self._children[idx]
def append(self, elem):
"""Append a child to the element."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.append(elem)
self._childmap[elem.tag] = elem
def extend(self, elems):
"""Append children to the element."""
# Pre-evaluate the elements
elemmap = {}
elemlist = []
for elem in elems:
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap or elem.tag in elemmap:
raise KeyError(elem.tag)
elemmap[elem.tag] = elem
elemlist.append(elem)
# Update the children
self._children.extend(elemlist)
self._childmap.update(elemmap)
def insert(self, idx, elem):
"""Insert a child element at the given index."""
# Unwrap templates...
elem = elem.unwrap()
# Avoid duplications
if elem.tag in self._childmap:
raise KeyError(elem.tag)
self._children.insert(idx, elem)
self._childmap[elem.tag] = elem
def remove(self, elem):
"""Remove a child element."""
# Unwrap templates...
elem = elem.unwrap()
# Check if element exists
if elem.tag not in self._childmap or self._childmap[elem.tag] != elem:
raise ValueError(_('element is not a child'))
self._children.remove(elem)
del self._childmap[elem.tag]
def get(self, key):
"""Get an attribute.
Returns a callable which performs datum selection.
:param key: The name of the attribute to get.
"""
return self.attrib[key]
def set(self, key, value=None):
"""Set an attribute.
:param key: The name of the attribute to set.
:param value: A callable taking an object and optional boolean
do_raise indicator and returning the datum bound
to the attribute. If None, a Selector() will be
constructed from the key. If a string, a
Selector() will be constructed from the string.
"""
# Convert value to a selector
if value is None:
value = Selector(key)
elif not callable(value):
value = Selector(value)
self.attrib[key] = value
def keys(self):
"""Return the attribute names."""
return self.attrib.keys()
def items(self):
"""Return the attribute names and values."""
return self.attrib.items()
def unwrap(self):
"""Unwraps a template to return a template element."""
# We are a template element
return self
def wrap(self):
"""Wraps a template element to return a template."""
# Wrap in a basic Template
return Template(self)
def apply(self, elem, obj):
"""Apply text and attributes to an etree.Element.
Applies the text and attribute instructions in the template
element to an etree.Element instance.
:param elem: An etree.Element instance.
:param obj: The base object associated with this template
element.
"""
# Start with the text...
if self.text is not None:
elem.text = six.text_type(self.text(obj))
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
elem.set(key, six.text_type(value(obj, True)))
except KeyError:
# Attribute has no value, so don't include it
pass
def getAttrib(self, obj):
"""Get attribute."""
tmpattrib = {}
# Now set up all the attributes...
for key, value in self.attrib.items():
try:
tmpattrib[key] = value(obj)
except KeyError:
# Attribute has no value, so don't include it
pass
return tmpattrib
@staticmethod
def _splitTagName(name):
return _split_pattern.findall(name)
def _render(self, parent, datum, patches, nsmap):
"""Internal rendering.
Renders the template node into an etree.Element object.
Returns the etree.Element object.
:param parent: The parent etree.Element instance.
:param datum: The datum associated with this template element.
:param patches: A list of other template elements that must
also be applied.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance.
"""
# Allocate a node
if callable(self.tag):
tagname = self.tag(datum)
else:
tagname = self.tag
# If the datum is None
if datum is not None:
tmpattrib = self.getAttrib(datum)
else:
tmpattrib = {}
tagnameList = self._splitTagName(tagname)
insertIndex = 0
# If parent is not none and has same tagname
if parent is not None:
for i in range(0, len(tagnameList)):
tmpInsertPos = parent.find(tagnameList[i])
if tmpInsertPos is None:
break
elif not cmp(parent.attrib, tmpattrib) == 0:
break
parent = tmpInsertPos
insertIndex = i + 1
if insertIndex >= len(tagnameList):
insertIndex = insertIndex - 1
# Create root elem
elem = etree.Element(tagnameList[insertIndex], nsmap=nsmap)
rootelem = elem
subelem = elem
# Create subelem
for i in range((insertIndex + 1), len(tagnameList)):
subelem = etree.SubElement(elem, tagnameList[i])
elem = subelem
# If we have a parent, append the node to the parent
if parent is not None:
# If we can merge this element, then insert
if insertIndex > 0:
parent.insert(len(list(parent)), rootelem)
else:
parent.append(rootelem)
# If the datum is None, do nothing else
if datum is None:
return rootelem
# Apply this template element to the element
self.apply(subelem, datum)
# Additionally, apply the patches
for patch in patches:
patch.apply(subelem, datum)
# We have fully rendered the element; return it
return rootelem
def render(self, parent, obj, patches=None, nsmap=None):
"""Render an object.
Renders an object against this template node. Returns a list
of two-item tuples, where the first item is an etree.Element
instance and the second item is the datum associated with that
instance.
:param parent: The parent for the etree.Element instances.
:param obj: The object to render this template element
against.
:param patches: A list of other template elements to apply
when rendering this template element.
:param nsmap: An optional namespace dictionary to attach to
the etree.Element instances.
"""
patches = patches or []
# First, get the datum we're rendering
data = None if obj is None else self.selector(obj)
# Check if we should render at all
if not self.will_render(data):
return []
elif data is None:
return [(self._render(parent, None, patches, nsmap), None)]
# Make the data into a list if it isn't already
if not isinstance(data, list):
data = [data]
elif parent is None:
raise ValueError(_('root element selecting a list'))
# Render all the elements
elems = []
for datum in data:
if self.subselector is not None:
datum = self.subselector(datum)
elems.append((self._render(parent, datum, patches, nsmap), datum))
# Return all the elements rendered, as well as the
# corresponding datum for the next step down the tree
return elems
def will_render(self, datum):
"""Hook method.
An overridable hook method to determine whether this template
element will be rendered at all. By default, returns False
(inhibiting rendering) if the datum is None.
:param datum: The datum associated with this template element.
"""
# Don't render if datum is None
return datum is not None
def _text_get(self):
"""Template element text.
Either None or a callable taking an object and optional
boolean do_raise indicator and returning the datum bound to
the text of the template element.
"""
return self._text
def _text_set(self, value):
# Convert value to a selector
if value is not None and not callable(value):
value = Selector(value)
self._text = value
def _text_del(self):
self._text = None
text = property(_text_get, _text_set, _text_del)
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template rooted at this
element as a string, suitable for inclusion in debug logs.
"""
# Build the inner contents of the tag...
contents = [self.tag, '!selector=%r' % self.selector]
# Add the text...
if self.text is not None:
contents.append('!text=%r' % self.text)
# Add all the other attributes
for key, value in self.attrib.items():
contents.append('%s=%r' % (key, value))
# If there are no children, return it as a closed tag
if len(self) == 0:
return '<%s/>' % ' '.join([str(i) for i in contents])
# OK, recurse to our children
children = [c.tree() for c in self]
# Return the result
return ('<%s>%s</%s>' %
(' '.join(contents), ''.join(children), self.tag))
def SubTemplateElement(parent, tag, attrib=None, selector=None,
subselector=None, **extra):
"""Create a template element as a child of another.
Corresponds to the etree.SubElement interface. Parameters are as
for TemplateElement, with the addition of the parent.
"""
# Convert attributes
attrib = attrib or {}
attrib.update(extra)
# Get a TemplateElement
elem = TemplateElement(tag, attrib=attrib, selector=selector,
subselector=subselector)
# Append the parent safely
if parent is not None:
parent.append(elem)
return elem
class Template(object):
"""Represent a template."""
def __init__(self, root, nsmap=None):
"""Initialize a template.
:param root: The root element of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
self.root = root.unwrap() if root is not None else None
self.nsmap = nsmap or {}
self.serialize_options = dict(encoding='UTF-8', xml_declaration=True)
def _serialize(self, parent, obj, siblings, nsmap=None):
"""Internal serialization.
Recursive routine to build a tree of etree.Element instances
from an object based on the template. Returns the first
etree.Element instance rendered, or None.
:param parent: The parent etree.Element instance. Can be
None.
:param obj: The object to render.
:param siblings: The TemplateElement instances against which
to render the object.
:param nsmap: An optional namespace dictionary to be
associated with the etree.Element instance
rendered.
"""
# First step, render the element
elems = siblings[0].render(parent, obj, siblings[1:], nsmap)
# Now, traverse all child elements
seen = set()
for idx, sibling in enumerate(siblings):
for child in sibling:
# Have we handled this child already?
if child.tag in seen:
continue
seen.add(child.tag)
# Determine the child's siblings
nieces = [child]
for sib in siblings[idx + 1:]:
if child.tag in sib:
nieces.append(sib[child.tag])
# Now call this function for all data elements recursively
for elem, datum in elems:
self._serialize(elem, datum, nieces)
# Return the first element; at the top level, this will be the
# root element
if elems:
return elems[0][0]
def serialize(self, obj, *args, **kwargs):
"""Serialize an object.
Serializes an object against the template. Returns a string
with the serialized XML. Positional and keyword arguments are
passed to etree.tostring().
:param obj: The object to serialize.
"""
elem = self.make_tree(obj)
if elem is None:
return ''
for k, v in self.serialize_options.items():
kwargs.setdefault(k, v)
# Serialize it into XML
return etree.tostring(elem, *args, **kwargs)
def make_tree(self, obj):
"""Create a tree.
Serializes an object against the template. Returns an Element
node with appropriate children.
:param obj: The object to serialize.
"""
# If the template is empty, return the empty string
if self.root is None:
return None
# Get the siblings and nsmap of the root element
siblings = self._siblings()
nsmap = self._nsmap()
# Form the element tree
return self._serialize(None, obj, siblings, nsmap)
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. By default, this is the root element itself.
"""
return [self.root]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
"""
return self.nsmap.copy()
def unwrap(self):
"""Unwraps a template to return a template element."""
# Return the root element
return self.root
def wrap(self):
"""Wraps a template element to return a template."""
# We are a template
return self
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template.
:param master: The master template to test.
"""
return True
def tree(self):
"""Return string representation of the template tree.
Returns a representation of the template as a string, suitable
for inclusion in debug logs.
"""
return "%r: %s" % (self, self.root.tree())
class MasterTemplate(Template):
"""Represent a master template.
Master templates are versioned derivatives of templates that
additionally allow slave templates to be attached. Slave
templates allow modification of the serialized result without
directly changing the master.
"""
def __init__(self, root, version, nsmap=None):
"""Initialize a master template.
:param root: The root element of the template.
:param version: The version number of the template.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(MasterTemplate, self).__init__(root, nsmap)
self.version = version
self.slaves = []
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object version %s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.version, id(self)))
def _siblings(self):
"""Hook method for computing root siblings.
An overridable hook method to return the siblings of the root
element. This is the root element plus the root elements of
all the slave templates.
"""
return [self.root] + [slave.root for slave in self.slaves]
def _nsmap(self):
"""Hook method for computing the namespace dictionary.
An overridable hook method to return the namespace dictionary.
The namespace dictionary is computed by taking the master
template's namespace dictionary and updating it from all the
slave templates.
"""
nsmap = self.nsmap.copy()
for slave in self.slaves:
nsmap.update(slave._nsmap())
return nsmap
def attach(self, *slaves):
"""Attach one or more slave templates.
Attaches one or more slave templates to the master template.
Slave templates must have a root element with the same tag as
the master template. The slave template's apply() method will
be called to determine if the slave should be applied to this
master; if it returns False, that slave will be skipped.
(This allows filtering of slaves based on the version of the
master template.)
"""
slave_list = []
for slave in slaves:
slave = slave.wrap()
# Make sure we have a tree match
if slave.root.tag != self.root.tag:
msg = (_("Template tree mismatch; adding slave %(slavetag)s "
"to master %(mastertag)s") %
{'slavetag': slave.root.tag,
'mastertag': self.root.tag})
raise ValueError(msg)
# Make sure slave applies to this template
if not slave.apply(self):
continue
slave_list.append(slave)
# Add the slaves
self.slaves.extend(slave_list)
def copy(self):
"""Return a copy of this master template."""
# Return a copy of the MasterTemplate
tmp = self.__class__(self.root, self.version, self.nsmap)
tmp.slaves = self.slaves[:]
return tmp
class SlaveTemplate(Template):
"""Represent a slave template.
Slave templates are versioned derivatives of templates. Each
slave has a minimum version and optional maximum version of the
master template to which they can be attached.
"""
def __init__(self, root, min_vers, max_vers=None, nsmap=None):
"""Initialize a slave template.
:param root: The root element of the template.
:param min_vers: The minimum permissible version of the master
template for this slave template to apply.
:param max_vers: An optional upper bound for the master
template version.
:param nsmap: An optional namespace dictionary to be
associated with the root element of the
template.
"""
super(SlaveTemplate, self).__init__(root, nsmap)
self.min_vers = min_vers
self.max_vers = max_vers
def __repr__(self):
"""Return string representation of the template."""
return ("<%s.%s object versions %s-%s at %#x>" %
(self.__class__.__module__, self.__class__.__name__,
self.min_vers, self.max_vers, id(self)))
def apply(self, master):
"""Hook method for determining slave applicability.
An overridable hook method used to determine if this template
is applicable as a slave to a given master template. This
version requires the master template to have a version number
between min_vers and max_vers.
:param master: The master template to test.
"""
# Does the master meet our minimum version requirement?
if master.version < self.min_vers:
return False
# How about our maximum version requirement?
if self.max_vers is not None and master.version > self.max_vers:
return False
return True
class TemplateBuilder(object):
"""Template builder.
This class exists to allow templates to be lazily built without
having to build them each time they are needed. It must be
subclassed, and the subclass must implement the construct()
method, which must return a Template (or subclass) instance. The
constructor will always return the template returned by
construct(), or, if it has a copy() method, a copy of that
template.
"""
_tmpl = None
def __new__(cls, copy=True):
"""Construct and return a template.
:param copy: If True (the default), a copy of the template
will be constructed and returned, if possible.
"""
# Do we need to construct the template?
if cls._tmpl is None:
tmp = super(TemplateBuilder, cls).__new__(cls)
# Construct the template
cls._tmpl = tmp.construct()
# If the template has a copy attribute, return the result of
# calling it
if copy and hasattr(cls._tmpl, 'copy'):
return cls._tmpl.copy()
# Return the template
return cls._tmpl
def construct(self):
"""Construct a template.
Called to construct a template instance, which it must return.
Only called once.
"""
raise NotImplementedError(_("subclasses must implement construct()!"))
def make_links(parent, selector=None):
"""Attach an Atom <links> element to the parent."""
elem = SubTemplateElement(parent, '{%s}link' % XMLNS_ATOM,
selector=selector)
elem.set('rel')
elem.set('type')
elem.set('href')
# Just for completeness...
return elem
def make_flat_dict(name, selector=None, subselector=None, ns=None):
"""Utility for simple XML templates.
Simple templates are templates that traditionally used
XMLDictSerializer with no metadata.
Returns a template element where the top-level element has the
given tag name, and where sub-elements have tag names derived
from the object's keys and text derived from the object's values.
This only works for flat dictionary objects, not dictionaries
containing nested lists or dictionaries.
"""
# Set up the names we need...
if ns is None:
elemname = name
tagname = Selector(0)
else:
elemname = '{%s}%s' % (ns, name)
tagname = lambda obj, do_raise=False: '{%s}%s' % (ns, obj[0])
if selector is None:
selector = name
# Build the root element
root = TemplateElement(elemname, selector=selector,
subselector=subselector)
# Build an element to represent all the keys and values
elem = SubTemplateElement(root, tagname, selector=get_items)
elem.text = 1
# Return the template
return root
| 31.385093
| 78
| 0.594663
|
5a9246987c5fd1244b0fe91b4b2e14856bc33a4e
| 178
|
py
|
Python
|
allennlp/common/testing/__init__.py
|
annaproxy/udify-metalearning
|
55206a3aac0aba74a3615a36192d03b6467cfd6f
|
[
"MIT"
] | 65
|
2020-11-13T05:36:29.000Z
|
2022-03-26T22:45:46.000Z
|
allennlp/common/testing/__init__.py
|
annaproxy/udify-metalearning
|
55206a3aac0aba74a3615a36192d03b6467cfd6f
|
[
"MIT"
] | 11
|
2021-05-26T16:22:17.000Z
|
2022-03-02T04:03:18.000Z
|
allennlp/common/testing/__init__.py
|
annaproxy/udify-metalearning
|
55206a3aac0aba74a3615a36192d03b6467cfd6f
|
[
"MIT"
] | 10
|
2019-12-06T11:32:37.000Z
|
2022-01-06T15:39:09.000Z
|
"""
Utilities and helpers for writing tests.
"""
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.common.testing.model_test_case import ModelTestCase
| 29.666667
| 65
| 0.837079
|
743d69e389a72fce9401caa823ba6554fa6e06af
| 4,724
|
py
|
Python
|
classify/model.py
|
gallupliu/QA
|
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
|
[
"Apache-2.0"
] | 3
|
2017-09-06T07:10:05.000Z
|
2019-08-01T03:27:39.000Z
|
classify/model.py
|
gallupliu/QA
|
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
|
[
"Apache-2.0"
] | 2
|
2018-01-25T14:46:40.000Z
|
2018-01-25T14:53:13.000Z
|
classify/model.py
|
gallupliu/QA
|
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
|
[
"Apache-2.0"
] | null | null | null |
# encoding: utf-8
"""
@author: gallupliu
@contact: gallup-liu@hotmail.com
@version: 1.0
@license: Apache Licence
@file: model.py
@time: 2018/3/5 22:26
"""
import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
def __init__(
self,inputs,labels,w2v_model, sequence_length, num_classes, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.embeddings = w2v_model
self.input_x = inputs
self.input_y = tf.cast(labels,tf.float32)
# self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
# self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(tf.to_float(self.embeddings), trainable=True, name="W")
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer
filter_shape = [filter_size, 50, 1, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1),dtype=tf.float32, name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_filters]),dtype=tf.float32, name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
self.train_op = self.train_op(self.loss)
def train_op(self,loss):
"""
:param loss:
:return:
"""
self.global_step = tf.Variable(0, name="globle_step", trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),5)
# optimizer = tf.train.GradientDescentOptimizer(lstm.lr)
self.lr = tf.train.exponential_decay(0.1,self.global_step,1000,0.95,staircase=True)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
optimizer.apply_gradients(zip(grads, tvars))
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step)
return train_op
| 41.078261
| 112
| 0.606689
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.