hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3153a767e4fae2ec5fa88a9d97f1d0ae0b5c7fe
| 2,657
|
bzl
|
Python
|
iree/lit_test.bzl
|
rsuderman/iree
|
fa5faf0a254db3311dafacc70c383a7469376095
|
[
"Apache-2.0"
] | 1
|
2020-08-16T17:38:49.000Z
|
2020-08-16T17:38:49.000Z
|
iree/lit_test.bzl
|
rsuderman/iree
|
fa5faf0a254db3311dafacc70c383a7469376095
|
[
"Apache-2.0"
] | null | null | null |
iree/lit_test.bzl
|
rsuderman/iree
|
fa5faf0a254db3311dafacc70c383a7469376095
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bazel macros for running lit tests."""
def iree_lit_test(
name,
test_file,
data,
size = "small",
driver = "//iree/tools:run_lit.sh",
**kwargs):
"""Creates a lit test from the specified source file.
Args:
name: name of the generated test suite.
test_file: the test file with the lit test
data: binaries used in the lit tests.
size: size of the tests.
driver: the shell runner for the lit tests.
**kwargs: Any additional arguments that will be passed to the underlying sh_test.
"""
native.sh_test(
name = name,
srcs = [driver],
size = size,
data = data + [test_file],
args = ["$(location %s)" % (test_file,)],
**kwargs
)
def iree_lit_test_suite(
name,
data,
srcs,
size = "small",
driver = "//iree/tools:run_lit.sh",
tags = [],
**kwargs):
"""Creates one lit test per source file and a test suite that bundles them.
Args:
name: name of the generated test suite.
data: binaries used in the lit tests.
srcs: test file sources.
size: size of the tests.
driver: the shell runner for the lit tests.
tags: tags to apply to the test. Note that as in standard test suites, manual
is treated specially and will also apply to the test suite itself.
**kwargs: Any additional arguments that will be passed to the underlying tests.
"""
tests = []
for test_file in srcs:
test_name = "%s_%s_test" % (name, test_file)
iree_lit_test(
name = test_name,
test_file = test_file,
size = size,
data = data,
driver = driver,
**kwargs
)
tests.append(test_name)
native.test_suite(
name = name,
tests = tests,
# Note that only the manual tag really has any effect here. Others are
# used for test suite filtering, but all tests are passed the same tags.
tags = tags,
)
| 32.012048
| 87
| 0.614603
|
9f1f13c5c0bde4da98520b7508fbb0851295a4a0
| 3,991
|
py
|
Python
|
pykeyvi/tests/dictionary/near_test.py
|
ssherko/keyvi
|
b8e57e6207244cc570e0d7d3970015aa1faf4254
|
[
"Apache-2.0"
] | null | null | null |
pykeyvi/tests/dictionary/near_test.py
|
ssherko/keyvi
|
b8e57e6207244cc570e0d7d3970015aa1faf4254
|
[
"Apache-2.0"
] | null | null | null |
pykeyvi/tests/dictionary/near_test.py
|
ssherko/keyvi
|
b8e57e6207244cc570e0d7d3970015aa1faf4254
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Usage: py.test tests
import contextlib
import os
import pykeyvi
import sys
import os
root = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(root, "../"))
from test_tools import tmp_dictionary
def test_near():
c=pykeyvi.JsonDictionaryCompiler()
c.Add("zahnarzt:u0we9yykdyum", '["a" : 2]')
c.Add("zahnarzt:u1h2fde2kct3", '["a" : 3]')
c.Add("zahnarzt:u1huf1q5cnxn", '["a" : 4]')
c.Add("zahnarzt:u0y2dvey61sw", '["a" : 5]')
c.Add("zahnarzt:u1hvqmmj801r", '["a" : 6]')
c.Add("zahnarzt:u0vvmknrwgmj", '["a" : 7]')
c.Add("zahnarzt:u0ypv22fb9q3", '["a" : 8]')
c.Add("zahnarzt:u1qcvvw0hxe1", '["a" : 9]')
c.Add("zahnarzt:u1xjx6yfvfz2", '["a" : 10]')
c.Add("zahnarzt:u1q0gkqsenhf", '["a" : 11]')
with tmp_dictionary(c, 'near_simple.kv') as d:
assert(len(list(d.GetNear("zahnarzt:u1q0gkqsenhf", 12))) == 1)
assert(len(list(d.GetNear("zahnarzt:u1h0gkqsenhf", 12))) == 3)
assert(len(list(d.GetNear("zahnarzt:u1h0gkqsenhf", 13))) == 0)
assert(len(list(d.GetNear("zahnarzt:u0h0gkqsenhf", 10))) == 4)
def test_near_greedy():
c=pykeyvi.JsonDictionaryCompiler()
c.Add("zahnarzt:u0we9yykdyum", '["a" : 2]')
c.Add("zahnarzt:u1h2fde2kct3", '["a" : 3]')
c.Add("zahnarzt:u1huf1q5cnxn", '["a" : 4]')
c.Add("zahnarzt:u0y2dvey61sw", '["a" : 5]')
c.Add("zahnarzt:u1hvqmmj801r", '["a" : 6]')
c.Add("zahnarzt:u0vvmknrwgmj", '["a" : 7]')
c.Add("zahnarzt:u0ypv22fb9q3", '["a" : 8]')
c.Add("zahnarzt:u1qcvvw0hxe1", '["a" : 9]')
c.Add("zahnarzt:u1xjx6yfvfz2", '["a" : 10]')
c.Add("zahnarzt:u1q0gkqsenhf", '["a" : 11]')
with tmp_dictionary(c, 'near_greedy.kv') as d:
assert(len(list(d.GetNear("zahnarzt:u1q0gkqsenhf", 12, True))) == 2)
assert(len(list(d.GetNear("zahnarzt:u1h0gkqsenhf", 12, True))) == 3)
assert(len(list(d.GetNear("zahnarzt:u1h0gkqsenhf", 13, True))) == 0)
assert(len(list(d.GetNear("zahnarzt:u0h0gkqsenhf", 10, True))) == 10)
greedy = [x.GetMatchedString() for x in d.GetNear("zahnarzt:u0h0gkqsenhf", 10, True)]
non_greedy = [x.GetMatchedString() for x in d.GetNear("zahnarzt:u0h0gkqsenhf", 10, False)]
assert greedy[:len(non_greedy)] == non_greedy
def test_near_score():
c=pykeyvi.JsonDictionaryCompiler()
c.Add("zahnarzt:u0we9yykdyum", '["a" : 2]')
c.Add("zahnarzt:u1h2fde2kct3", '["a" : 3]')
c.Add("zahnarzt:u1huf1q5cnxn", '["a" : 4]')
c.Add("zahnarzt:u0y2dvey61sw", '["a" : 5]')
c.Add("zahnarzt:u1hvqmmj801r", '["a" : 6]')
c.Add("zahnarzt:u0vvmknrwgmj", '["a" : 7]')
c.Add("zahnarzt:u0ypv22fb9q3", '["a" : 8]')
c.Add("zahnarzt:u1qcvvw0hxe1", '["a" : 9]')
c.Add("zahnarzt:u1xjx6yfvfz2", '["a" : 10]')
c.Add("zahnarzt:u1q0gkqsenhf", '["a" : 11]')
c.Add("zahnarzt:u0h0gkqsenhf", '["a" : 11]')
with tmp_dictionary(c, 'near_score.kv') as d:
greedy = list(d.GetNear("zahnarzt:u0h0gkqsenhf", 10, True))
assert greedy[0].GetScore() == 21
for m in greedy[1:5]:
assert m.GetScore() == 11
for m in greedy[5:]:
assert m.GetScore() == 10
def test_near_less_precission():
c=pykeyvi.JsonDictionaryCompiler()
c.Add("zahnarzt:u0we9", '["a" : 2]')
c.Add("zahnarzt:u1h2f", '["a" : 3]')
c.Add("zahnarzt:u1huf", '["a" : 4]')
with tmp_dictionary(c, 'near_less_precission.kv') as d:
assert(len(list(d.GetNear("zahnarzt:u1h0gkqsenhf", 12))) == 2)
assert(len(list(d.GetNear("zahnarzt:u1h0gkqsenhf", 13))) == 0)
def test_near_broken_input():
c=pykeyvi.JsonDictionaryCompiler()
c.Add("zahnarzt:u0we9", '["a" : 2]')
c.Add("zahnarzt:u1h2f", '["a" : 3]')
c.Add("zahnarzt:u1huf", '["a" : 4]')
with tmp_dictionary(c, 'near_broken.kv') as d:
assert(len(list(d.GetNear("zahnarzt:u1h", 12))) == 2)
assert(len(list(d.GetNear("zahnarzt:u", 13))) == 0)
assert(len(list(d.GetNear("zahnarzt:u1", 12))) == 0)
| 40.72449
| 98
| 0.59985
|
7c39bcc595854e43391b207e30ba90c61471a59a
| 55,582
|
py
|
Python
|
qiskit/visualization/matplotlib.py
|
errvnd/qiskit-terra
|
c9c6d46cae3e48f06f4513be9dc0de3a49128424
|
[
"Apache-2.0"
] | 1
|
2021-10-13T14:37:54.000Z
|
2021-10-13T14:37:54.000Z
|
qiskit/visualization/matplotlib.py
|
errvnd/qiskit-terra
|
c9c6d46cae3e48f06f4513be9dc0de3a49128424
|
[
"Apache-2.0"
] | null | null | null |
qiskit/visualization/matplotlib.py
|
errvnd/qiskit-terra
|
c9c6d46cae3e48f06f4513be9dc0de3a49128424
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name,anomalous-backslash-in-string,missing-docstring
"""mpl circuit visualization backend."""
import collections
import fractions
import itertools
import json
import logging
import math
import numpy as np
try:
from matplotlib import patches
from matplotlib import pyplot as plt
from matplotlib import pyplot as plt, gridspec
HAS_MATPLOTLIB = True
except ImportError:
HAS_MATPLOTLIB = False
from qiskit.visualization import exceptions
from qiskit.visualization import interpolation
from qiskit.visualization.qcstyle import (OPStylePulse, OPStyleSched,
DefaultStyle, BWStyle)
from qiskit.pulse.channels import (DriveChannel, ControlChannel, MeasureChannel,
AcquireChannel, SnapshotChannel)
from qiskit.pulse import (SamplePulse, FrameChange, PersistentValue, Snapshot, Acquire,
PulseError)
from qiskit import user_config
logger = logging.getLogger(__name__)
Register = collections.namedtuple('Register', 'reg index')
WID = 0.65
HIG = 0.65
DEFAULT_SCALE = 4.3
PORDER_GATE = 5
PORDER_LINE = 2
PORDER_GRAY = 3
PORDER_TEXT = 6
PORDER_SUBP = 4
class Anchor:
def __init__(self, reg_num, yind, fold):
self.__yind = yind
self.__fold = fold
self.__reg_num = reg_num
self.__gate_placed = []
self.gate_anchor = 0
def plot_coord(self, index, gate_width):
h_pos = index % self.__fold + 1
# check folding
if self.__fold > 0:
if h_pos + (gate_width - 1) > self.__fold:
index += self.__fold - (h_pos - 1)
x_pos = index % self.__fold + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind - (index // self.__fold) * (self.__reg_num + 1)
else:
x_pos = index + 1 + 0.5 * (gate_width - 1)
y_pos = self.__yind
# could have been updated, so need to store
self.gate_anchor = index
return x_pos, y_pos
def is_locatable(self, index, gate_width):
hold = [index + i for i in range(gate_width)]
for p in hold:
if p in self.__gate_placed:
return False
return True
def set_index(self, index, gate_width):
h_pos = index % self.__fold + 1
if h_pos + (gate_width - 1) > self.__fold:
_index = index + self.__fold - (h_pos - 1)
else:
_index = index
for ii in range(gate_width):
if _index + ii not in self.__gate_placed:
self.__gate_placed.append(_index + ii)
self.__gate_placed.sort()
def get_index(self):
if self.__gate_placed:
return self.__gate_placed[-1] + 1
return 0
class MatplotlibDrawer:
def __init__(self, qregs, cregs, ops,
scale=1.0, style=None, plot_barriers=True,
reverse_bits=False):
if not HAS_MATPLOTLIB:
raise ImportError('The class MatplotlibDrawer needs matplotlib. '
'Run "pip install matplotlib" before.')
self._ast = None
self._scale = DEFAULT_SCALE * scale
self._creg = []
self._qreg = []
self._registers(cregs, qregs)
self._ops = ops
self._qreg_dict = collections.OrderedDict()
self._creg_dict = collections.OrderedDict()
self._cond = {
'n_lines': 0,
'xmax': 0,
'ymax': 0,
}
config = user_config.get_config()
if config:
config_style = config.get('circuit_mpl_style', 'default')
if config_style == 'default':
self._style = DefaultStyle()
elif config_style == 'bw':
self._style = BWStyle()
if not config and style is None:
self._style = DefaultStyle()
elif style is False:
self._style = BWStyle()
self.plot_barriers = plot_barriers
self.reverse_bits = reverse_bits
if style:
if isinstance(style, dict):
self._style.set_style(style)
elif isinstance(style, str):
with open(style, 'r') as infile:
dic = json.load(infile)
self._style.set_style(dic)
self.figure = plt.figure()
self.figure.patch.set_facecolor(color=self._style.bg)
self.ax = self.figure.add_subplot(111)
self.ax.axis('off')
self.ax.set_aspect('equal')
self.ax.tick_params(labelbottom=False, labeltop=False,
labelleft=False, labelright=False)
def _registers(self, creg, qreg):
self._creg = []
for r in creg:
self._creg.append(Register(reg=r[0], index=r[1]))
self._qreg = []
for r in qreg:
self._qreg.append(Register(reg=r[0], index=r[1]))
@property
def ast(self):
return self._ast
def _custom_multiqubit_gate(self, xy, fc=None, wide=True, text=None,
subtext=None):
xpos = min([x[0] for x in xy])
ypos = min([y[1] for y in xy])
ypos_max = max([y[1] for y in xy])
if wide:
if subtext:
boxes_length = round(max([len(text), len(subtext)]) / 8) or 1
else:
boxes_length = round(len(text) / 8) or 1
wid = WID * 2.8 * boxes_length
else:
wid = WID
if fc:
_fc = fc
else:
_fc = self._style.gc
qubit_span = abs(ypos) - abs(ypos_max) + 1
height = HIG + (qubit_span - 1)
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - .5 * HIG),
width=wid, height=height, fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
# Annotate inputs
for bit, y in enumerate([x[1] for x in xy]):
self.ax.text(xpos - 0.45 * wid, y, str(bit), ha='left', va='center',
fontsize=self._style.fs, color=self._style.gt,
clip_on=True, zorder=PORDER_TEXT)
if text:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.15 * height, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * height, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos + .5 * (qubit_span - 1), disp_text,
ha='center',
va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT)
def _gate(self, xy, fc=None, wide=False, text=None, subtext=None):
xpos, ypos = xy
if wide:
if subtext:
wid = WID * 2.8
else:
boxes_wide = round(len(text) / 10) or 1
wid = WID * 2.8 * boxes_wide
else:
wid = WID
if fc:
_fc = fc
elif text and text in self._style.dispcol:
_fc = self._style.dispcol[text]
else:
_fc = self._style.gc
box = patches.Rectangle(
xy=(xpos - 0.5 * wid, ypos - 0.5 * HIG), width=wid, height=HIG,
fc=_fc, ec=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
if text:
if text in self._style.dispcol:
disp_text = "${}$".format(self._style.disptex[text])
else:
disp_text = text
if subtext:
self.ax.text(xpos, ypos + 0.15 * HIG, disp_text, ha='center',
va='center', fontsize=self._style.fs,
color=self._style.gt, clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(xpos, ypos - 0.3 * HIG, subtext, ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.sc, clip_on=True,
zorder=PORDER_TEXT)
else:
self.ax.text(xpos, ypos, disp_text, ha='center', va='center',
fontsize=self._style.fs,
color=self._style.gt,
clip_on=True,
zorder=PORDER_TEXT)
def _subtext(self, xy, text):
xpos, ypos = xy
self.ax.text(xpos, ypos - 0.3 * HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _sidetext(self, xy, text):
xpos, ypos = xy
# 0.15 = the initial gap, each char means it needs to move
# another 0.0375 over
xp = xpos + 0.15 + (0.0375 * len(text))
self.ax.text(xp, ypos+HIG, text, ha='center', va='top',
fontsize=self._style.sfs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _line(self, xy0, xy1, lc=None, ls=None):
x0, y0 = xy0
x1, y1 = xy1
if lc is None:
linecolor = self._style.lc
else:
linecolor = lc
if ls is None:
linestyle = 'solid'
else:
linestyle = ls
if linestyle == 'doublet':
theta = np.arctan2(np.abs(x1 - x0), np.abs(y1 - y0))
dx = 0.05 * WID * np.cos(theta)
dy = 0.05 * WID * np.sin(theta)
self.ax.plot([x0 + dx, x1 + dx], [y0 + dy, y1 + dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
self.ax.plot([x0 - dx, x1 - dx], [y0 - dy, y1 - dy],
color=linecolor,
linewidth=1.0,
linestyle='solid',
zorder=PORDER_LINE)
else:
self.ax.plot([x0, x1], [y0, y1],
color=linecolor,
linewidth=1.0,
linestyle=linestyle,
zorder=PORDER_LINE)
def _measure(self, qxy, cxy, cid):
qx, qy = qxy
cx, cy = cxy
self._gate(qxy, fc=self._style.dispcol['meas'])
# add measure symbol
arc = patches.Arc(xy=(qx, qy - 0.15 * HIG), width=WID * 0.7,
height=HIG * 0.7, theta1=0, theta2=180, fill=False,
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(arc)
self.ax.plot([qx, qx + 0.35 * WID],
[qy - 0.15 * HIG, qy + 0.20 * HIG],
color=self._style.lc, linewidth=1.5, zorder=PORDER_GATE)
# arrow
self._line(qxy, [cx, cy + 0.35 * WID], lc=self._style.cc,
ls=self._style.cline)
arrowhead = patches.Polygon(((cx - 0.20 * WID, cy + 0.35 * WID),
(cx + 0.20 * WID, cy + 0.35 * WID),
(cx, cy)),
fc=self._style.cc,
ec=None)
self.ax.add_artist(arrowhead)
# target
if self._style.bundle:
self.ax.text(cx + .25, cy + .1, str(cid), ha='left', va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
def _conds(self, xy, istrue=False):
xpos, ypos = xy
if istrue:
_fc = self._style.lc
else:
_fc = self._style.gc
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=_fc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _ctrl_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=WID * 0.15,
fc=self._style.lc, ec=self._style.lc,
linewidth=1.5, zorder=PORDER_GATE)
self.ax.add_patch(box)
def _tgt_qubit(self, xy):
xpos, ypos = xy
box = patches.Circle(xy=(xpos, ypos), radius=HIG * 0.35,
fc=self._style.dispcol['target'],
ec=self._style.lc, linewidth=1.5,
zorder=PORDER_GATE)
self.ax.add_patch(box)
# add '+' symbol
self.ax.plot([xpos, xpos], [ypos - 0.35 * HIG, ypos + 0.35 * HIG],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
self.ax.plot([xpos - 0.35 * HIG, xpos + 0.35 * HIG], [ypos, ypos],
color=self._style.lc, linewidth=1.0, zorder=PORDER_GATE)
def _swap(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos - 0.20 * WID, ypos + 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
self.ax.plot([xpos - 0.20 * WID, xpos + 0.20 * WID],
[ypos + 0.20 * WID, ypos - 0.20 * WID],
color=self._style.lc, linewidth=1.5, zorder=PORDER_LINE)
def _barrier(self, config, anc):
xys = config['coord']
group = config['group']
y_reg = []
for qreg in self._qreg_dict.values():
if qreg['group'] in group:
y_reg.append(qreg['y'])
x0 = xys[0][0]
box_y0 = min(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) - 0.5
box_y1 = max(y_reg) - int(anc / self._style.fold) * (self._cond['n_lines'] + 1) + 0.5
box = patches.Rectangle(xy=(x0 - 0.3 * WID, box_y0),
width=0.6 * WID, height=box_y1 - box_y0,
fc=self._style.bc, ec=None, alpha=0.6,
linewidth=1.5, zorder=PORDER_GRAY)
self.ax.add_patch(box)
for xy in xys:
xpos, ypos = xy
self.ax.plot([xpos, xpos], [ypos + 0.5, ypos - 0.5],
linewidth=1, linestyle="dashed",
color=self._style.lc,
zorder=PORDER_TEXT)
def _linefeed_mark(self, xy):
xpos, ypos = xy
self.ax.plot([xpos - .1, xpos - .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
self.ax.plot([xpos + .1, xpos + .1],
[ypos, ypos - self._cond['n_lines'] + 1],
color=self._style.lc, zorder=PORDER_LINE)
def draw(self, filename=None, verbose=False):
self._draw_regs()
self._draw_ops(verbose)
_xl = - self._style.margin[0]
_xr = self._cond['xmax'] + self._style.margin[1]
_yb = - self._cond['ymax'] - self._style.margin[2] + 1 - 0.5
_yt = self._style.margin[3] + 0.5
self.ax.set_xlim(_xl, _xr)
self.ax.set_ylim(_yb, _yt)
# update figure size
fig_w = _xr - _xl
fig_h = _yt - _yb
if self._style.figwidth < 0.0:
self._style.figwidth = fig_w * self._scale * self._style.fs / 72 / WID
self.figure.set_size_inches(self._style.figwidth, self._style.figwidth * fig_h / fig_w)
if filename:
self.figure.savefig(filename, dpi=self._style.dpi,
bbox_inches='tight')
plt.close(self.figure)
return self.figure
def _draw_regs(self):
# quantum register
for ii, reg in enumerate(self._qreg):
if len(self._qreg) > 1:
label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)
else:
label = '${}$'.format(reg.reg.name)
pos = -ii
self._qreg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
self._cond['n_lines'] += 1
# classical register
if self._creg:
n_creg = self._creg.copy()
n_creg.pop(0)
idx = 0
y_off = -len(self._qreg)
for ii, (reg, nreg) in enumerate(itertools.zip_longest(
self._creg, n_creg)):
pos = y_off - idx
if self._style.bundle:
label = '${}$'.format(reg.reg.name)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
if not (not nreg or reg.reg != nreg.reg):
continue
else:
label = '${}_{{{}}}$'.format(reg.reg.name, reg.index)
self._creg_dict[ii] = {
'y': pos,
'label': label,
'index': reg.index,
'group': reg.reg
}
self._cond['n_lines'] += 1
idx += 1
def _draw_regs_sub(self, n_fold, feedline_l=False, feedline_r=False):
# quantum register
for qreg in self._qreg_dict.values():
if n_fold == 0:
label = qreg['label'] + ' : $\\left|0\\right\\rangle$'
else:
label = qreg['label']
y = qreg['y'] - n_fold * (self._cond['n_lines'] + 1)
self.ax.text(-0.5, y, label, ha='right', va='center',
fontsize=self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([0, y], [self._cond['xmax'], y])
# classical register
this_creg_dict = {}
for creg in self._creg_dict.values():
if n_fold == 0:
label = creg['label'] + ' : 0 '
else:
label = creg['label']
y = creg['y'] - n_fold * (self._cond['n_lines'] + 1)
if y not in this_creg_dict.keys():
this_creg_dict[y] = {'val': 1, 'label': label}
else:
this_creg_dict[y]['val'] += 1
for y, this_creg in this_creg_dict.items():
# bundle
if this_creg['val'] > 1:
self.ax.plot([.6, .7], [y - .1, y + .1],
color=self._style.cc,
zorder=PORDER_LINE)
self.ax.text(0.5, y + .1, str(this_creg['val']), ha='left',
va='bottom',
fontsize=0.8 * self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self.ax.text(-0.5, y, this_creg['label'], ha='right', va='center',
fontsize=self._style.fs,
color=self._style.tc,
clip_on=True,
zorder=PORDER_TEXT)
self._line([0, y], [self._cond['xmax'], y], lc=self._style.cc,
ls=self._style.cline)
# lf line
if feedline_r:
self._linefeed_mark((self._style.fold + 1 - 0.1,
- n_fold * (self._cond['n_lines'] + 1)))
if feedline_l:
self._linefeed_mark((0.1,
- n_fold * (self._cond['n_lines'] + 1)))
def _draw_ops(self, verbose=False):
_wide_gate = ['u2', 'u3', 'cu2', 'cu3']
_barriers = {'coord': [], 'group': []}
#
# generate coordinate manager
#
q_anchors = {}
for key, qreg in self._qreg_dict.items():
q_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=qreg['y'],
fold=self._style.fold)
c_anchors = {}
for key, creg in self._creg_dict.items():
c_anchors[key] = Anchor(reg_num=self._cond['n_lines'],
yind=creg['y'],
fold=self._style.fold)
#
# draw gates
#
prev_anc = -1
for layer in self._ops:
layer_width = 1
for op in layer:
if op.name in _wide_gate:
if layer_width < 2:
layer_width = 2
# if custom gate with a longer than standard name determine
# width
elif op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap'] and len(
op.name) >= 4:
box_width = round(len(op.name) / 8)
# If more than 4 characters min width is 2
if box_width <= 1:
box_width = 2
if layer_width < box_width:
if box_width > 2:
layer_width = box_width * 2
else:
layer_width = 2
this_anc = prev_anc + 1
for op in layer:
_iswide = op.name in _wide_gate
if op.name not in ['barrier', 'snapshot', 'load', 'save',
'noise', 'cswap', 'swap'] and len(
op.name) >= 4:
_iswide = True
# get qreg index
q_idxs = []
for qarg in op.qargs:
for index, reg in self._qreg_dict.items():
if (reg['group'] == qarg[0] and
reg['index'] == qarg[1]):
q_idxs.append(index)
break
# get creg index
c_idxs = []
for carg in op.cargs:
for index, reg in self._creg_dict.items():
if (reg['group'] == carg[0] and
reg['index'] == carg[1]):
c_idxs.append(index)
break
for ii in q_idxs:
q_anchors[ii].set_index(this_anc, layer_width)
# qreg coordinate
q_xy = [q_anchors[ii].plot_coord(this_anc, layer_width) for ii in q_idxs]
# creg coordinate
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for ii in c_idxs]
# bottom and top point of qreg
qreg_b = min(q_xy, key=lambda xy: xy[1])
qreg_t = max(q_xy, key=lambda xy: xy[1])
# update index based on the value from plotting
this_anc = q_anchors[q_idxs[0]].gate_anchor
if verbose:
print(op)
if op.type == 'op' and hasattr(op.op, 'params'):
param = self.param_parse(op.op.params, self._style.pimode)
else:
param = None
# conditional gate
if op.condition:
c_xy = [c_anchors[ii].plot_coord(this_anc, layer_width) for
ii in self._creg_dict]
mask = 0
for index, cbit in enumerate(self._creg):
if cbit.reg == op.condition[0]:
mask |= (1 << index)
val = op.condition[1]
# cbit list to consider
fmt_c = '{{:0{}b}}'.format(len(c_xy))
cmask = list(fmt_c.format(mask))[::-1]
# value
fmt_v = '{{:0{}b}}'.format(cmask.count('1'))
vlist = list(fmt_v.format(val))[::-1]
# plot conditionals
v_ind = 0
xy_plot = []
for xy, m in zip(c_xy, cmask):
if m == '1':
if xy not in xy_plot:
if vlist[v_ind] == '1' or self._style.bundle:
self._conds(xy, istrue=True)
else:
self._conds(xy, istrue=False)
xy_plot.append(xy)
v_ind += 1
creg_b = sorted(xy_plot, key=lambda xy: xy[1])[0]
self._subtext(creg_b, hex(val))
self._line(qreg_t, creg_b, lc=self._style.cc,
ls=self._style.cline)
#
# draw special gates
#
if op.name == 'measure':
vv = self._creg_dict[c_idxs[0]]['index']
self._measure(q_xy[0], c_xy[0], vv)
elif op.name in ['barrier', 'snapshot', 'load', 'save',
'noise']:
_barriers = {'coord': [], 'group': []}
for index, qbit in enumerate(q_idxs):
q_group = self._qreg_dict[qbit]['group']
if q_group not in _barriers['group']:
_barriers['group'].append(q_group)
_barriers['coord'].append(q_xy[index])
if self.plot_barriers:
self._barrier(_barriers, this_anc)
#
# draw single qubit gates
#
elif len(q_xy) == 1:
disp = op.name
if param:
prm = '{}'.format(param)
if len(prm) < 20:
self._gate(q_xy[0], wide=_iswide, text=disp,
subtext=prm)
else:
self._gate(q_xy[0], wide=_iswide, text=disp)
else:
self._gate(q_xy[0], wide=_iswide, text=disp)
#
# draw multi-qubit gates (n=2)
#
elif len(q_xy) == 2:
# cx
if op.name == 'cx':
self._ctrl_qubit(q_xy[0])
self._tgt_qubit(q_xy[1])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# cz for latexmode
elif op.name == 'cz':
if self._style.latexmode:
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
else:
disp = op.name.replace('c', '')
self._ctrl_qubit(q_xy[0])
self._gate(q_xy[1], wide=_iswide, text=disp)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# control gate
elif op.name in ['cy', 'ch', 'cu3', 'crz']:
disp = op.name.replace('c', '')
self._ctrl_qubit(q_xy[0])
if param:
self._gate(q_xy[1], wide=_iswide, text=disp,
subtext='{}'.format(param))
else:
self._gate(q_xy[1], wide=_iswide, text=disp)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# cu1
elif op.name == 'cu1':
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
self._sidetext(qreg_b, param)
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# rzz gate
elif op.name == 'rzz':
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
self._sidetext(qreg_b, text='zz({})'.format(param))
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# swap gate
elif op.name == 'swap':
self._swap(q_xy[0])
self._swap(q_xy[1])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# Custom gate
else:
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.name)
#
# draw multi-qubit gates (n=3)
#
elif len(q_xy) == 3:
# cswap gate
if op.name == 'cswap':
self._ctrl_qubit(q_xy[0])
self._swap(q_xy[1])
self._swap(q_xy[2])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# ccx gate
elif op.name == 'ccx':
self._ctrl_qubit(q_xy[0])
self._ctrl_qubit(q_xy[1])
self._tgt_qubit(q_xy[2])
# add qubit-qubit wiring
self._line(qreg_b, qreg_t)
# custom gate
else:
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.name)
# draw custom multi-qubit gate
elif len(q_xy) > 3:
self._custom_multiqubit_gate(q_xy, wide=_iswide,
text=op.name)
else:
logger.critical('Invalid gate %s', op)
raise exceptions.VisualizationError('invalid gate {}'.format(op))
prev_anc = this_anc + layer_width - 1
#
# adjust window size and draw horizontal lines
#
anchors = [q_anchors[ii].get_index() for ii in self._qreg_dict]
if anchors:
max_anc = max(anchors)
else:
max_anc = 0
n_fold = max(0, max_anc - 1) // self._style.fold
# window size
if max_anc > self._style.fold > 0:
self._cond['xmax'] = self._style.fold + 1
self._cond['ymax'] = (n_fold + 1) * (self._cond['n_lines'] + 1) - 1
else:
self._cond['xmax'] = max_anc + 1
self._cond['ymax'] = self._cond['n_lines']
# add horizontal lines
for ii in range(n_fold + 1):
feedline_r = (n_fold > 0 and n_fold > ii)
feedline_l = (ii > 0)
self._draw_regs_sub(ii, feedline_l, feedline_r)
# draw gate number
if self._style.index:
for ii in range(max_anc):
if self._style.fold > 0:
x_coord = ii % self._style.fold + 1
y_coord = - (ii // self._style.fold) * (self._cond['n_lines'] + 1) + 0.7
else:
x_coord = ii + 1
y_coord = 0.7
self.ax.text(x_coord, y_coord, str(ii + 1), ha='center',
va='center', fontsize=self._style.sfs,
color=self._style.tc, clip_on=True,
zorder=PORDER_TEXT)
@staticmethod
def param_parse(v, pimode=False):
# create an empty list to store the parameters in
param_parts = [None] * len(v)
for i, e in enumerate(v):
if pimode:
try:
param_parts[i] = MatplotlibDrawer.format_pi(e)
except TypeError:
param_parts[i] = str(e)
else:
try:
param_parts[i] = MatplotlibDrawer.format_numeric(e)
except TypeError:
param_parts[i] = str(e)
if param_parts[i].startswith('-'):
param_parts[i] = '$-$' + param_parts[i][1:]
param_parts = ', '.join(param_parts)
return param_parts
@staticmethod
def format_pi(val):
fracvals = MatplotlibDrawer.fraction(val)
buf = ''
if fracvals:
nmr, dnm = fracvals.numerator, fracvals.denominator
if nmr == 1:
buf += '$\\pi$'
elif nmr == -1:
buf += '-$\\pi$'
else:
buf += '{}$\\pi$'.format(nmr)
if dnm > 1:
buf += '/{}'.format(dnm)
return buf
else:
coef = MatplotlibDrawer.format_numeric(val / np.pi)
if coef == '0':
return '0'
return '{}$\\pi$'.format(coef)
@staticmethod
def format_numeric(val, tol=1e-5):
abs_val = abs(val)
if math.isclose(abs_val, 0.0, abs_tol=1e-100):
return '0'
if math.isclose(math.fmod(abs_val, 1.0),
0.0, abs_tol=tol) and 0.5 < abs_val < 9999.5:
return str(int(val))
if 0.1 <= abs_val < 100.0:
return '{:.2f}'.format(val)
return '{:.1e}'.format(val)
@staticmethod
def fraction(val, base=np.pi, n=100, tol=1e-5):
abs_val = abs(val)
for i in range(1, n):
for j in range(1, n):
if math.isclose(abs_val, i / j * base, rel_tol=tol):
if val < 0:
i *= -1
return fractions.Fraction(i, j)
return None
class EventsOutputChannels:
"""Pulse dataset for channel."""
def __init__(self, t0, tf):
"""Create new channel dataset.
Args:
t0 (int): starting time of plot
tf (int): ending time of plot
"""
self.pulses = {}
self.t0 = t0
self.tf = tf
self._waveform = None
self._framechanges = None
self._conditionals = None
self._snapshots = None
self._labels = None
self.enable = False
def add_instruction(self, start_time, pulse):
"""Add new pulse instruction to channel.
Args:
start_time (int): Starting time of instruction
pulse (Instruction): Instruction object to be added
"""
if start_time in self.pulses.keys():
self.pulses[start_time].append(pulse.command)
else:
self.pulses[start_time] = [pulse.command]
@property
def waveform(self):
"""Get waveform."""
if self._waveform is None:
self._build_waveform()
return self._waveform[self.t0:self.tf]
@property
def framechanges(self):
"""Get frame changes."""
if self._framechanges is None:
self._build_waveform()
return self._trim(self._framechanges)
@property
def conditionals(self):
"""Get conditionals."""
if self._conditionals is None:
self._build_waveform()
return self._trim(self._conditionals)
@property
def snapshots(self):
"""Get snapshots."""
if self._snapshots is None:
self._build_waveform()
return self._trim(self._snapshots)
@property
def labels(self):
"""Get labels."""
if self._labels is None:
self._build_waveform()
return self._trim(self._labels)
def is_empty(self):
"""Return if pulse is empty.
Returns:
bool: if the channel has nothing to plot
"""
if any(self.waveform) or self.framechanges or self.conditionals or self.snapshots:
return False
return True
def to_table(self, name):
"""Get table contains.
Args:
name (str): name of channel
Returns:
dict: dictionary of events in the channel
"""
time_event = []
framechanges = self.framechanges
conditionals = self.conditionals
snapshots = self.snapshots
for key, val in framechanges.items():
data_str = 'framechange: %.2f' % val
time_event.append((key, name, data_str))
for key, val in conditionals.items():
data_str = 'conditional, %s' % val
time_event.append((key, name, data_str))
for key, val in snapshots.items():
data_str = 'snapshot: %s' % val
time_event.append((key, name, data_str))
return time_event
def _build_waveform(self):
"""Create waveform from stored pulses.
"""
self._framechanges = {}
self._conditionals = {}
self._snapshots = {}
self._labels = {}
fc = 0
pv = np.zeros(self.tf + 1, dtype=np.complex128)
wf = np.zeros(self.tf + 1, dtype=np.complex128)
last_pv = None
for time, commands in sorted(self.pulses.items()):
if time > self.tf:
break
tmp_fc = 0
for command in commands:
if isinstance(command, FrameChange):
tmp_fc += command.phase
pv[time:] = 0
elif isinstance(command, Snapshot):
self._snapshots[time] = command.name
if tmp_fc != 0:
self._framechanges[time] = tmp_fc
fc += tmp_fc
for command in commands:
if isinstance(command, PersistentValue):
pv[time:] = np.exp(1j*fc) * command.value
last_pv = (time, command)
break
for command in commands:
duration = command.duration
tf = min(time + duration, self.tf)
if isinstance(command, SamplePulse):
wf[time:tf] = np.exp(1j*fc) * command.samples[:tf-time]
pv[time:] = 0
self._labels[time] = (tf, command)
if last_pv is not None:
pv_cmd = last_pv[1]
self._labels[last_pv[0]] = (time, pv_cmd)
last_pv = None
elif isinstance(command, Acquire):
wf[time:tf] = np.ones(command.duration)
self._labels[time] = (tf, command)
self._waveform = wf + pv
def _trim(self, events):
"""Return events during given `time_range`.
Args:
events (dict): time and operation of events
Returns:
dict: dictionary of events within the time
"""
events_in_time_range = {}
for k, v in events.items():
if self.t0 <= k <= self.tf:
events_in_time_range[k] = v
return events_in_time_range
class SamplePulseDrawer:
"""A class to create figure for sample pulse."""
def __init__(self, style):
"""Create new figure.
Args:
style (OPStylePulse): style sheet
"""
self.style = style or OPStylePulse()
def draw(self, pulse, dt, interp_method, scaling=1):
"""Draw figure.
Args:
pulse (SamplePulse): SamplePulse to draw
dt (float): time interval
interp_method (Callable): interpolation function
See `qiskit.visualization.interpolation` for more information
scaling (float): Relative visual scaling of waveform amplitudes
Returns:
matplotlib.figure: A matplotlib figure object of the pulse envelope
"""
figure = plt.figure()
interp_method = interp_method or interpolation.step_wise
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
ax = figure.add_subplot(111)
ax.set_facecolor(self.style.bg_color)
samples = pulse.samples
time = np.arange(0, len(samples) + 1, dtype=float) * dt
time, re, im = interp_method(time, samples, self.style.num_points)
# plot
ax.fill_between(x=time, y1=re, y2=np.zeros_like(time),
facecolor=self.style.wave_color[0], alpha=0.3,
edgecolor=self.style.wave_color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=np.zeros_like(time),
facecolor=self.style.wave_color[1], alpha=0.3,
edgecolor=self.style.wave_color[1], linewidth=1.5,
label='imaginary part')
ax.set_xlim(0, pulse.duration * dt)
if scaling:
ax.set_ylim(-scaling, scaling)
else:
v_max = max(max(np.abs(re)), max(np.abs(im)))
ax.set_ylim(-1.2 * v_max, 1.2 * v_max)
return figure
class ScheduleDrawer:
"""A class to create figure for schedule and channel."""
def __init__(self, style):
"""Create new figure.
Args:
style (OPStyleSched): style sheet
"""
self.style = style or OPStyleSched()
def _build_channels(self, schedule, t0, tf):
# prepare waveform channels
drive_channels = collections.OrderedDict()
measure_channels = collections.OrderedDict()
control_channels = collections.OrderedDict()
acquire_channels = collections.OrderedDict()
snapshot_channels = collections.OrderedDict()
for chan in schedule.channels:
if isinstance(chan, DriveChannel):
try:
drive_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, MeasureChannel):
try:
measure_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, ControlChannel):
try:
control_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, AcquireChannel):
try:
acquire_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
elif isinstance(chan, SnapshotChannel):
try:
snapshot_channels[chan] = EventsOutputChannels(t0, tf)
except PulseError:
pass
output_channels = {**drive_channels, **measure_channels,
**control_channels, **acquire_channels}
channels = {**output_channels, **acquire_channels, **snapshot_channels}
# sort by index then name to group qubits together.
output_channels = collections.OrderedDict(sorted(output_channels.items(),
key=lambda x: (x[0].index, x[0].name)))
channels = collections.OrderedDict(sorted(channels.items(),
key=lambda x: (x[0].index, x[0].name)))
for start_time, instruction in schedule.instructions:
for channel in instruction.channels:
if channel in output_channels:
output_channels[channel].add_instruction(start_time, instruction)
elif channel in snapshot_channels:
snapshot_channels[channel].add_instruction(start_time, instruction)
return channels, output_channels, snapshot_channels
def _count_valid_waveforms(self, channels, scaling=1, channels_to_plot=None,
plot_all=False):
# count numbers of valid waveform
n_valid_waveform = 0
v_max = 0
for channel, events in channels.items():
if channels_to_plot:
if channel in channels_to_plot:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
n_valid_waveform += 1
events.enable = True
else:
if not events.is_empty() or plot_all:
waveform = events.waveform
v_max = max(v_max,
max(np.abs(np.real(waveform))),
max(np.abs(np.imag(waveform))))
n_valid_waveform += 1
events.enable = True
if scaling:
v_max = 0.5 * scaling
else:
v_max = 0.5 / (1.2 * v_max)
return n_valid_waveform, v_max
# pylint: disable=unused-argument
def _draw_table(self, figure, channels, dt, n_valid_waveform):
# create table
table_data = []
if self.style.use_table:
for channel, events in channels.items():
if events.enable:
table_data.extend(events.to_table(channel.name))
table_data = sorted(table_data, key=lambda x: x[0])
# plot table
if table_data:
# table area size
ncols = self.style.table_columns
nrows = int(np.ceil(len(table_data)/ncols))
# fig size
h_table = nrows * self.style.fig_unit_h_table
h_waves = (self.style.figsize[1] - h_table)
# create subplots
gs = gridspec.GridSpec(2, 1, height_ratios=[h_table, h_waves], hspace=0)
tb = plt.subplot(gs[0])
ax = plt.subplot(gs[1])
# configure each cell
tb.axis('off')
cell_value = [['' for _kk in range(ncols * 3)] for _jj in range(nrows)]
cell_color = [self.style.table_color * ncols for _jj in range(nrows)]
cell_width = [*([0.2, 0.2, 0.5] * ncols)]
for ii, data in enumerate(table_data):
# pylint: disable=unbalanced-tuple-unpacking
r, c = np.unravel_index(ii, (nrows, ncols), order='f')
# pylint: enable=unbalanced-tuple-unpacking
time, ch_name, data_str = data
# item
cell_value[r][3 * c + 0] = 't = %s' % time * dt
cell_value[r][3 * c + 1] = 'ch %s' % ch_name
cell_value[r][3 * c + 2] = data_str
table = tb.table(cellText=cell_value,
cellLoc='left',
rowLoc='center',
colWidths=cell_width,
bbox=[0, 0, 1, 1],
cellColours=cell_color)
table.auto_set_font_size(False)
table.set_fontsize = self.style.table_font_size
else:
ax = figure.add_subplot(111)
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
return ax
def _draw_snapshots(self, ax, snapshot_channels, dt, y0):
for events in snapshot_channels.values():
snapshots = events.snapshots
if snapshots:
for time in snapshots:
ax.annotate(s=u"\u25D8", xy=(time*dt, y0), xytext=(time*dt, y0+0.08),
arrowprops={'arrowstyle': 'wedge'}, ha='center')
def _draw_framechanges(self, ax, fcs, dt, y0):
framechanges_present = True
for time in fcs.keys():
ax.text(x=time*dt, y=y0, s=r'$\circlearrowleft$',
fontsize=self.style.icon_font_size,
ha='center', va='center')
return framechanges_present
def _get_channel_color(self, channel):
# choose color
if isinstance(channel, DriveChannel):
color = self.style.d_ch_color
elif isinstance(channel, ControlChannel):
color = self.style.u_ch_color
elif isinstance(channel, MeasureChannel):
color = self.style.m_ch_color
elif isinstance(channel, AcquireChannel):
color = self.style.a_ch_color
else:
color = 'black'
return color
def _prev_label_at_time(self, prev_labels, time):
for _, labels in enumerate(prev_labels):
for t0, (tf, _) in labels.items():
if time in (t0, tf):
return True
return False
def _draw_labels(self, ax, labels, prev_labels, dt, y0):
for t0, (tf, cmd) in labels.items():
if isinstance(cmd, PersistentValue):
name = cmd.name if cmd.name else 'pv'
elif isinstance(cmd, Acquire):
name = cmd.name if cmd.name else 'acquire'
else:
name = cmd.name
ax.annotate(r'%s' % name,
xy=((t0+tf)//2*dt, y0),
xytext=((t0+tf)//2*dt, y0-0.07),
fontsize=self.style.label_font_size,
ha='center', va='center')
linestyle = self.style.label_ch_linestyle
alpha = self.style.label_ch_alpha
color = self.style.label_ch_color
if not self._prev_label_at_time(prev_labels, t0):
ax.axvline(t0*dt, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
if not (self._prev_label_at_time(prev_labels, tf) or tf in labels):
ax.axvline(tf*dt, -1, 1, color=color,
linestyle=linestyle, alpha=alpha)
def _draw_channels(self, ax, output_channels, interp_method, t0, tf, dt, v_max,
label=False, framechange=True):
y0 = 0
prev_labels = []
for channel, events in output_channels.items():
if events.enable:
# plot waveform
waveform = events.waveform
time = np.arange(t0, tf + 1, dtype=float) * dt
time, re, im = interp_method(time, waveform, self.style.num_points)
color = self._get_channel_color(channel)
# scaling and offset
re = v_max * re + y0
im = v_max * im + y0
offset = np.zeros_like(time) + y0
# plot
ax.fill_between(x=time, y1=re, y2=offset,
facecolor=color[0], alpha=0.3,
edgecolor=color[0], linewidth=1.5,
label='real part')
ax.fill_between(x=time, y1=im, y2=offset,
facecolor=color[1], alpha=0.3,
edgecolor=color[1], linewidth=1.5,
label='imaginary part')
ax.plot((t0, tf), (y0, y0), color='#000000', linewidth=1.0)
# plot frame changes
fcs = events.framechanges
if fcs and framechange:
self._draw_framechanges(ax, fcs, dt, y0)
# plot labels
labels = events.labels
if labels and label:
self._draw_labels(ax, labels, prev_labels, dt, y0)
prev_labels.append(labels)
else:
continue
# plot label
ax.text(x=0, y=y0, s=channel.name,
fontsize=self.style.axis_font_size,
ha='right', va='center')
y0 -= 1
return y0
def draw(self, schedule, dt, interp_method, plot_range,
scaling=1, channels_to_plot=None, plot_all=True,
table=True, label=False, framechange=True):
"""Draw figure.
Args:
schedule (ScheduleComponent): Schedule to draw
dt (float): time interval
interp_method (Callable): interpolation function
See `qiskit.visualization.interpolation` for more information
plot_range (tuple[float]): plot range
scaling (float): Relative visual scaling of waveform amplitudes
channels_to_plot (list[OutputChannel]): channels to draw
plot_all (bool): if plot all channels even it is empty
table (bool): Draw event table
label (bool): Label individual instructions
framechange (bool): Add framechange indicators
Returns:
matplotlib.figure: A matplotlib figure object for the pulse schedule
Raises:
VisualizationError: when schedule cannot be drawn
"""
figure = plt.figure()
if not channels_to_plot:
channels_to_plot = []
interp_method = interp_method or interpolation.step_wise
# setup plot range
if plot_range:
t0 = int(np.floor(plot_range[0]/dt))
tf = int(np.floor(plot_range[1]/dt))
else:
t0 = 0
tf = schedule.stop_time
# prepare waveform channels
(channels, output_channels,
snapshot_channels) = self._build_channels(schedule, t0, tf)
# count numbers of valid waveform
n_valid_waveform, v_max = self._count_valid_waveforms(output_channels, scaling=scaling,
channels_to_plot=channels_to_plot,
plot_all=plot_all)
if table:
ax = self._draw_table(figure, channels, dt, n_valid_waveform)
else:
ax = figure.add_subplot(111)
figure.set_size_inches(self.style.figsize[0], self.style.figsize[1])
ax.set_facecolor(self.style.bg_color)
y0 = self._draw_channels(ax, output_channels, interp_method,
t0, tf, dt, v_max, label=label,
framechange=framechange)
self._draw_snapshots(ax, snapshot_channels, dt, y0)
ax.set_xlim(t0 * dt, tf * dt)
ax.set_ylim(y0, 1)
ax.set_yticklabels([])
return figure
| 38.305996
| 96
| 0.481469
|
f239e48af19b0b4c8b2d209d28288fe113f831c3
| 6,242
|
py
|
Python
|
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/python_modelling_framework/UnitTests/mcFrameworkTest.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/python_modelling_framework/UnitTests/mcFrameworkTest.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
pcraster/pcraster-4.2.0/pcraster-4.2.0/source/python_modelling_framework/UnitTests/mcFrameworkTest.py
|
quanpands/wflow
|
b454a55e4a63556eaac3fbabd97f8a0b80901e5a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
import sys
import unittest
import pcraster
import pcraster.framework.staticFramework as sf
import pcraster.framework.dynamicFramework as df
import pcraster.framework.mcFramework as mf
import pcraster.framework.frameworkBase as fb
import staticTestModels
import dynamicTestModels
import mcTestModels
## \brief unit tests monte carlo framework
class mcFrameworkTestScript(unittest.TestCase):
def test_1(self):
"""test type of user model"""
myModel = mcTestModels.T0()
try:
mcFw = mf.MonteCarloFramework(myModel, 5)
except fb.FrameworkError as e:
self.assertEqual(str(e),"Cannot run MonteCarlo framework: User model must be type of StaticFramework or DynamicFramework")
def test_2(self):
"""test existence of added methods and attributes"""
myModel = mcTestModels.staticModel()
statFrw = sf.StaticFramework(myModel)
mcFw = mf.MonteCarloFramework(statFrw, 5)
self.assert_(hasattr(myModel, "nrSamples"))
self.assert_(hasattr(myModel, "_d_firstSampleNumber"))
self.assert_(hasattr(myModel, "_d_lastSampleNumber"))
self.assert_(hasattr(myModel, "_d_currentSampleNumber"))
self.assert_(hasattr(myModel, "_d_inSample"))
self.assert_(hasattr(myModel, "currentSampleNumber"))
self.assert_(hasattr(myModel, "_lastSampleNumber"))
self.assert_(hasattr(myModel, "_firstSampleNumber"))
self.assert_(hasattr(myModel, "_setCurrentSample"))
self.assert_(hasattr(myModel, "_inSample"))
self.assert_(hasattr(myModel, "sampleNumbers"))
self.assert_(hasattr(myModel, "report"))
self.assert_(hasattr(myModel, "readmap"), "todo")
def test_3(self):
"""test framework methods"""
myModel = mcTestModels.dynamicModel()
dynFrw = df.DynamicFramework(myModel, 10)
mcFw = mf.MonteCarloFramework(dynFrw, 5)
self.assert_(myModel.sampleNumbers() == range(1,6))
def test_4(self):
"""test generation of sample directories"""
myModel = mcTestModels.staticModel()
statFrw = sf.StaticFramework(myModel)
mcFw = mf.MonteCarloFramework(statFrw, 5)
directoriesCreated = True
for directory in range(1,6):
if not os.path.isdir(str(directory)):
directoriesCreated = False
self.assert_(directoriesCreated)
for directory in range(1,6):
shutil.rmtree(str(directory))
myModel = mcTestModels.dynamicModel()
dynFrw = df.DynamicFramework(myModel, 10)
mcFw = mf.MonteCarloFramework(dynFrw, 5)
directoriesCreated = True
for directory in range(1,6):
if not os.path.isdir(str(directory)):
directoriesCreated = False
self.assert_(directoriesCreated)
for directory in range(1,6):
shutil.rmtree(str(directory))
def test_5(self):
"""test execution of sections, report, readmap for a static model"""
myModel = mcTestModels.staticModel()
statFrw = sf.StaticFramework(myModel)
mcFw = mf.MonteCarloFramework(statFrw, 5)
mcFw.setQuiet(True)
mcFw.run()
filesInitialCreated = True
filesPremcCreated = True
filesPostmcCreated = True
for sample in range(1,6):
nameInit = "mcsi%d.map" % (sample)
namePre = "premc%d.map" % (sample)
namePost ="postmc%d.map" % (sample)
if not os.path.isfile(os.path.join(str(sample), nameInit)):
filesInitialCreated = False
if not os.path.isfile(namePre):
filesPremcCreated = False
if not os.path.isfile(namePost):
filesPostmcCreated = False
self.assert_(filesInitialCreated)
self.assert_(filesPremcCreated)
self.assert_(filesPostmcCreated)
def test_6(self):
"""test execution of sections, report, readmap for a static model"""
myModel = mcTestModels.dynamicModel()
dynFrw = df.DynamicFramework(myModel, 10)
dynFrw.setQuiet(True)
mcFw = mf.MonteCarloFramework(dynFrw, 5)
mcFw.setQuiet(True)
# see if existing directories were emptied
for directory in range(1,6):
assert len(os.listdir(str(directory))) == 0
mcFw.run()
filesInitialCreated = True
filesDynamicCreated = True
filesPremcCreated = True
filesPostmcCreated = True
for sample in range(1,6):
nameInit = "mcdi%d.map" % (sample)
if not os.path.isfile(os.path.join(str(sample), nameInit)):
filesInitialCreated = False
for timestep in range(1,11):
nameDyn = fb.generateNameT("mcdd%d" % (sample), timestep)
if not os.path.isfile(os.path.join(str(sample), nameDyn)):
filesDynamicCreated = False
for timestep in range(1,11):
namePre = "premc_%d_%d.map" % (sample, timestep)
namePost ="postmc_%d_%d.map" % (sample, timestep)
if not os.path.isfile(namePre):
filesPremcCreated = False
if not os.path.isfile(namePost):
filesPostmcCreated = False
self.assert_(filesInitialCreated)
self.assert_(filesPremcCreated)
self.assert_(filesPostmcCreated)
self.assert_(filesDynamicCreated)
# It is important to reset the seed values for the random number generators
# when a model is used in combination with forking, as clones of the processes
# are created.
# Reset of seed is done in the forkscript.py in DEVENV
#
# forking creates clones of the processes,
def test_7(self):
""" test random seed reset while forking """
myModel = mcTestModels.randomModel()
dynFrw = df.DynamicFramework(myModel, 2)
dynFrw.setQuiet(True)
mcFw = mf.MonteCarloFramework(dynFrw, 2)
mcFw.setQuiet(True)
mcFw.setForkSamples(True)
mcFw.run()
pyVal1 = pcraster.cellvalue(pcraster.readmap(os.path.join("1","pyVal.map")),1,1)[0]
pyVal2 = pcraster.cellvalue(pcraster.readmap(os.path.join("2","pyVal.map")),1,1)[0]
pcrVal1 = pcraster.cellvalue(pcraster.readmap(os.path.join("1","pcrVal.map")),1,1)[0]
pcrVal2 = pcraster.cellvalue(pcraster.readmap(os.path.join("2","pcrVal.map")),1,1)[0]
npVal1 = pcraster.cellvalue(pcraster.readmap(os.path.join("1","npVal.map")),1,1)[0]
npVal2 = pcraster.cellvalue(pcraster.readmap(os.path.join("2","npVal.map")),1,1)[0]
self.assertNotEqual(pyVal1, pyVal2)
self.assertNotEqual(pcrVal1, pcrVal2)
self.assertNotEqual(npVal1, npVal2)
| 34.677778
| 128
| 0.698814
|
6142ae84f188a502365735bbc29a065066d34ba8
| 2,587
|
py
|
Python
|
nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | 2
|
2019-01-25T18:20:51.000Z
|
2019-07-30T20:51:51.000Z
|
nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | null | null | null |
nipype/interfaces/freesurfer/tests/test_auto_ApplyVolTransform.py
|
sebastientourbier/nipype
|
99c5904176481520c5bf42a501aae1a12184e672
|
[
"Apache-2.0"
] | 2
|
2018-01-25T19:48:17.000Z
|
2019-01-25T18:20:52.000Z
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..preprocess import ApplyVolTransform
def test_ApplyVolTransform_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
fs_target=dict(argstr='--fstarg',
mandatory=True,
requires=['reg_file'],
xor=('target_file', 'tal', 'fs_target'),
),
fsl_reg_file=dict(argstr='--fsl %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
interp=dict(argstr='--interp %s',
),
inverse=dict(argstr='--inv',
),
invert_morph=dict(argstr='--inv-morph',
requires=['m3z_file'],
),
m3z_file=dict(argstr='--m3z %s',
),
no_ded_m3z_path=dict(argstr='--noDefM3zPath',
requires=['m3z_file'],
),
no_resample=dict(argstr='--no-resample',
),
reg_file=dict(argstr='--reg %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
reg_header=dict(argstr='--regheader',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
source_file=dict(argstr='--mov %s',
copyfile=False,
mandatory=True,
),
subject=dict(argstr='--s %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
subjects_dir=dict(),
tal=dict(argstr='--tal',
mandatory=True,
xor=('target_file', 'tal', 'fs_target'),
),
tal_resolution=dict(argstr='--talres %.10f',
),
target_file=dict(argstr='--targ %s',
mandatory=True,
xor=('target_file', 'tal', 'fs_target'),
),
terminal_output=dict(nohash=True,
),
transformed_file=dict(argstr='--o %s',
genfile=True,
),
xfm_reg_file=dict(argstr='--xfm %s',
mandatory=True,
xor=('reg_file', 'fsl_reg_file', 'xfm_reg_file', 'reg_header', 'subject'),
),
)
inputs = ApplyVolTransform.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ApplyVolTransform_outputs():
output_map = dict(transformed_file=dict(),
)
outputs = ApplyVolTransform.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 28.744444
| 78
| 0.624662
|
9bc9e03090bc77aea963849a08e0e19015ed4864
| 39,139
|
py
|
Python
|
cvpods/export/shared.py
|
lxtGH/BorderDet
|
ff425172a35a25e733d0facd79c2fd6f378052b0
|
[
"Apache-2.0"
] | null | null | null |
cvpods/export/shared.py
|
lxtGH/BorderDet
|
ff425172a35a25e733d0facd79c2fd6f378052b0
|
[
"Apache-2.0"
] | null | null | null |
cvpods/export/shared.py
|
lxtGH/BorderDet
|
ff425172a35a25e733d0facd79c2fd6f378052b0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import contextlib
import copy
import functools
import logging
import mock
import numpy as np
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import caffe2.python.utils as putils
import torch
import torch.nn.functional as F
from caffe2.proto import caffe2_pb2
from caffe2.python import core, net_drawer, workspace
from torch.nn.functional import interpolate as interp
logger = logging.getLogger(__name__)
# ==== torch/utils_toffee/cast.py =======================================
def to_device(t, device_str):
"""
This function is a replacement of .to(another_device) such that it allows the
casting to be traced properly by explicitly calling the underlying copy ops.
It also avoids introducing unncessary op when casting to the same device.
"""
src = t.device
dst = torch.device(device_str)
if src == dst:
return t
elif src.type == "cuda" and dst.type == "cpu":
return torch.ops._caffe2.CopyGPUToCPU(t)
elif src.type == "cpu" and dst.type == "cuda":
return torch.ops._caffe2.CopyCPUToGPU(t)
else:
raise RuntimeError(
"Can't cast tensor from device {} to device {}".format(src, dst))
# ==== torch/utils_toffee/interpolate.py =======================================
# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py
def BilinearInterpolation(tensor_in, up_scale):
assert up_scale % 2 == 0, "Scale should be even"
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * (
1 - abs(og[1] - center) / factor)
kernel_size = int(up_scale) * 2
bil_filt = upsample_filt(kernel_size)
dim = int(tensor_in.shape[1])
kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
kernel[range(dim), range(dim), :, :] = bil_filt
tensor_out = F.conv_transpose2d(
tensor_in,
weight=to_device(torch.Tensor(kernel), tensor_in.device),
bias=None,
stride=int(up_scale),
padding=int(up_scale / 2),
)
return tensor_out
# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
# using dynamic `scale_factor` rather than static `size`. (T43166860)
# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly.
def onnx_compatibale_interpolate(input,
size=None,
scale_factor=None,
mode="nearest",
align_corners=None):
# NOTE: The input dimensions are interpreted in the form:
# `mini-batch x channels x [optional depth] x [optional height] x width`.
if size is None and scale_factor is not None:
if input.dim() == 4:
if isinstance(scale_factor, (int, float)):
height_scale, width_scale = (scale_factor, scale_factor)
else:
assert isinstance(scale_factor, (tuple, list))
assert len(scale_factor) == 2
height_scale, width_scale = scale_factor
assert not align_corners, "No matching C2 op for align_corners == True"
if mode == "nearest":
return torch.ops._caffe2.ResizeNearest(
input,
order="NCHW",
width_scale=width_scale,
height_scale=height_scale,
)
elif mode == "bilinear":
logger.warning(
"Use F.conv_transpose2d for bilinear interpolate"
" because there's no such C2 op, this may cause significant"
" slowdown and the boundary pixels won't be as same as"
" using F.interpolate due to padding.")
assert height_scale == width_scale
return BilinearInterpolation(input, up_scale=height_scale)
logger.warning(
"Output size is not static, it might cause ONNX conversion issue")
return interp(input, size, scale_factor, mode, align_corners)
@contextlib.contextmanager
def mock_torch_nn_functional_interpolate():
if torch.onnx.is_in_onnx_export():
with mock.patch("torch.nn.functional.interpolate",
side_effect=onnx_compatibale_interpolate):
yield
else:
yield
# ==== torch/utils_caffe2/ws_utils.py ==========================================
class ScopedWS(object):
def __init__(self, ws_name, is_reset, is_cleanup=False):
self.ws_name = ws_name
self.is_reset = is_reset
self.is_cleanup = is_cleanup
self.org_ws = ""
def __enter__(self):
self.org_ws = workspace.CurrentWorkspace()
if self.ws_name is not None:
workspace.SwitchWorkspace(self.ws_name, True)
if self.is_reset:
workspace.ResetWorkspace()
return workspace
def __exit__(self, *args):
if self.is_cleanup:
workspace.ResetWorkspace()
if self.ws_name is not None:
workspace.SwitchWorkspace(self.org_ws)
def fetch_any_blob(name):
bb = None
try:
bb = workspace.FetchBlob(name)
except TypeError:
bb = workspace.FetchInt8Blob(name)
except Exception as e:
logger.error("Get blob {} error: {}".format(name, e))
return bb
# ==== torch/utils_caffe2/protobuf.py ==========================================
def get_pb_arg(pb, arg_name):
for x in pb.arg:
if x.name == arg_name:
return x
return None
def get_pb_arg_valf(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return arg.f if arg is not None else default_val
def get_pb_arg_floats(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return list(map(float, arg.floats)) if arg is not None else default_val
def get_pb_arg_ints(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return list(map(int, arg.ints)) if arg is not None else default_val
def get_pb_arg_vali(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return arg.i if arg is not None else default_val
def get_pb_arg_vals(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return arg.s if arg is not None else default_val
def get_pb_arg_valstrings(pb, arg_name, default_val):
arg = get_pb_arg(pb, arg_name)
return list(arg.strings) if arg is not None else default_val
def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False):
arg = get_pb_arg(pb, arg_name)
if arg is None:
arg = putils.MakeArgument(arg_name, arg_value)
assert hasattr(arg, arg_attr)
pb.arg.extend([arg])
if allow_override and getattr(arg, arg_attr) != arg_value:
logger.warning("Override argument {}: {} -> {}".format(
arg_name, getattr(arg, arg_attr), arg_value))
setattr(arg, arg_attr, arg_value)
else:
assert arg is not None
assert getattr(
arg,
arg_attr) == arg_value, "Existing value {}, new value {}".format(
getattr(arg, arg_attr), arg_value)
def _create_const_fill_op_from_numpy(name, tensor, device_option=None):
assert type(tensor) == np.ndarray
kTypeNameMapper = {
np.dtype("float32"): "GivenTensorFill",
np.dtype("int32"): "GivenTensorIntFill",
np.dtype("int64"): "GivenTensorInt64Fill",
np.dtype("uint8"): "GivenTensorStringFill",
}
args_dict = {}
if tensor.dtype == np.dtype("uint8"):
args_dict.update({"values": [str(tensor.data)], "shape": [1]})
else:
args_dict.update({"values": tensor, "shape": tensor.shape})
if device_option is not None:
args_dict["device_option"] = device_option
return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name],
**args_dict)
def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor):
assert type(int8_tensor) == workspace.Int8Tensor
kTypeNameMapper = {
np.dtype("int32"): "Int8GivenIntTensorFill",
np.dtype("uint8"): "Int8GivenTensorFill",
}
tensor = int8_tensor.data
assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")]
values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor
return core.CreateOperator(
kTypeNameMapper[tensor.dtype],
[],
[name],
values=values,
shape=tensor.shape,
Y_scale=int8_tensor.scale,
Y_zero_point=int8_tensor.zero_point,
)
def create_const_fill_op(
name: str,
blob: Union[np.ndarray, workspace.Int8Tensor],
device_option: Optional[caffe2_pb2.DeviceOption] = None,
) -> caffe2_pb2.OperatorDef:
"""
Given a blob object, return the Caffe2 operator that creates this blob
as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
"""
tensor_type = type(blob)
assert tensor_type in [
np.ndarray, workspace.Int8Tensor
], ('Error when creating const fill op for "{}", unsupported blob type: {}'
).format(name, type(blob))
if tensor_type == np.ndarray:
return _create_const_fill_op_from_numpy(name, blob, device_option)
elif tensor_type == workspace.Int8Tensor:
assert device_option is None
return _create_const_fill_op_from_c2_int8_tensor(name, blob)
def construct_init_net_from_params(
params: Dict[str, Any],
device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
) -> caffe2_pb2.NetDef:
"""
Construct the init_net from params dictionary
"""
init_net = caffe2_pb2.NetDef()
device_options = device_options or {}
for name, blob in params.items():
if isinstance(blob, str):
logger.warning((
"Blob {} with type {} is not supported in generating init net,"
" skipped.".format(name, type(blob))))
continue
init_net.op.extend([
create_const_fill_op(name,
blob,
device_option=device_options.get(name, None))
])
init_net.external_output.append(name)
return init_net
def get_producer_map(ssa):
"""
Return dict from versioned blob to (i, j),
where i is index of producer op, j is the index of output of that op.
"""
producer_map = {}
for i in range(len(ssa)):
outputs = ssa[i][1]
for j, outp in enumerate(outputs):
producer_map[outp] = (i, j)
return producer_map
def get_consumer_map(ssa):
"""
Return dict from versioned blob to list of (i, j),
where i is index of consumer op, j is the index of input of that op.
"""
consumer_map = collections.defaultdict(list)
for i in range(len(ssa)):
inputs = ssa[i][0]
for j, inp in enumerate(inputs):
consumer_map[inp].append((i, j))
return consumer_map
def get_params_from_init_net(
init_net: caffe2_pb2.NetDef
) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
"""
Take the output blobs from init_net by running it.
Outputs:
params: dict from blob name to numpy array
device_options: dict from blob name to the device option of its creating op
"""
# NOTE: this assumes that the params is determined by producer op with the
# only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
def _get_device_option(producer_op):
if producer_op.type == "CopyGPUToCPU":
return caffe2_pb2.DeviceOption()
else:
return producer_op.device_option
with ScopedWS("__get_params_from_init_net__",
is_reset=True,
is_cleanup=True) as ws:
ws.RunNetOnce(init_net)
params = {b: fetch_any_blob(b) for b in init_net.external_output}
ssa, versions = core.get_ssa(init_net)
producer_map = get_producer_map(ssa)
device_options = {
b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
for b in init_net.external_output
}
return params, device_options
def _updater_raise(op, input_types, output_types):
raise RuntimeError(
"Failed to apply updater for op {} given input_types {} and"
" output_types {}".format(op, input_types, output_types))
def _generic_status_identifier(
predict_net: caffe2_pb2.NetDef,
status_updater: Callable,
known_status: Dict[Tuple[str, int], Any],
) -> Dict[Tuple[str, int], Any]:
"""
Statically infer the status of each blob, the status can be such as device type
(CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here
is versioned blob (Tuple[str, int]) in the format compatible with ssa.
Inputs:
predict_net: the caffe2 network
status_updater: a callable, given an op and the status of its input/output,
it returns the updated status of input/output. `None` is used for
representing unknown status.
known_status: a dict containing known status, used as initialization.
Outputs:
A dict mapping from versioned blob to its status
"""
ssa, versions = core.get_ssa(predict_net)
versioned_ext_input = [(b, 0) for b in predict_net.external_input]
versioned_ext_output = [(b, versions[b])
for b in predict_net.external_output]
all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa])
allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(
versioned_ext_output)
assert all(k in allowed_vbs for k in known_status)
assert all(v is not None for v in known_status.values())
_known_status = copy.deepcopy(known_status)
def _check_and_update(key, value):
assert value is not None
if key in _known_status:
if not _known_status[key] == value:
raise RuntimeError(
"Confilict status for {}, existing status {}, new status {}"
.format(key, _known_status[key], value))
_known_status[key] = value
def _update_i(op, ssa_i):
versioned_inputs = ssa_i[0]
versioned_outputs = ssa_i[1]
inputs_status = [_known_status.get(b, None) for b in versioned_inputs]
outputs_status = [
_known_status.get(b, None) for b in versioned_outputs
]
new_inputs_status, new_outputs_status = status_updater(
op, inputs_status, outputs_status)
for versioned_blob, status in zip(
versioned_inputs + versioned_outputs,
new_inputs_status + new_outputs_status):
if status is not None:
_check_and_update(versioned_blob, status)
for op, ssa_i in zip(predict_net.op, ssa):
_update_i(op, ssa_i)
for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)):
_update_i(op, ssa_i)
# NOTE: This strictly checks all the blob from predict_net must be assgined
# a known status. However sometimes it's impossible (eg. having deadend op),
# we may relax this constraint if
for k in all_versioned_blobs:
if k not in _known_status:
raise NotImplementedError(
"Can not infer the status for {}. Currently only support the case where"
" a single forward and backward pass can identify status for all blobs."
.format(k))
return _known_status
def infer_device_type(
predict_net: caffe2_pb2.NetDef,
known_status: Dict[Tuple[str, int], Any],
device_name_style: str = "caffe2",
) -> Dict[Tuple[str, int], str]:
""" Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob """
assert device_name_style in ["caffe2", "pytorch"]
_CPU_STR = "cpu"
_GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda"
def _copy_cpu_to_gpu_updater(op, input_types, output_types):
if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR:
_updater_raise(op, input_types, output_types)
return ([_CPU_STR], [_GPU_STR])
def _copy_gpu_to_cpu_updater(op, input_types, output_types):
if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR:
_updater_raise(op, input_types, output_types)
return ([_GPU_STR], [_CPU_STR])
def _other_ops_updater(op, input_types, output_types):
non_none_types = [
x for x in input_types + output_types if x is not None
]
if len(non_none_types) > 0:
the_type = non_none_types[0]
if not all(x == the_type for x in non_none_types):
_updater_raise(op, input_types, output_types)
else:
the_type = None
return ([the_type for _ in op.input], [the_type for _ in op.output])
def _device_updater(op, *args, **kwargs):
return {
"CopyCPUToGPU": _copy_cpu_to_gpu_updater,
"CopyGPUToCPU": _copy_gpu_to_cpu_updater,
}.get(op.type, _other_ops_updater)(op, *args, **kwargs)
return _generic_status_identifier(predict_net, _device_updater,
known_status)
# ==== torch/utils_caffe2/vis.py ===============================================
def _modify_blob_names(ops, blob_rename_f):
ret = []
def _replace_list(blob_list, replaced_list):
del blob_list[:]
blob_list.extend(replaced_list)
for x in ops:
cur = copy.deepcopy(x)
_replace_list(cur.input, list(map(blob_rename_f, cur.input)))
_replace_list(cur.output, list(map(blob_rename_f, cur.output)))
ret.append(cur)
return ret
def _rename_blob(name, blob_sizes, blob_ranges):
def _list_to_str(bsize):
ret = ", ".join([str(x) for x in bsize])
ret = "[" + ret + "]"
return ret
ret = name
if blob_sizes is not None and name in blob_sizes:
ret += "\n" + _list_to_str(blob_sizes[name])
if blob_ranges is not None and name in blob_ranges:
ret += "\n" + _list_to_str(blob_ranges[name])
return ret
# graph_name could not contain word 'graph'
def save_graph(net,
file_name,
graph_name="net",
op_only=True,
blob_sizes=None,
blob_ranges=None):
blob_rename_f = functools.partial(_rename_blob,
blob_sizes=blob_sizes,
blob_ranges=blob_ranges)
return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f)
def save_graph_base(net,
file_name,
graph_name="net",
op_only=True,
blob_rename_func=None):
graph = None
ops = net.op
if blob_rename_func is not None:
ops = _modify_blob_names(ops, blob_rename_func)
if not op_only:
graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB")
else:
graph = net_drawer.GetPydotGraphMinimal(ops,
graph_name,
rankdir="TB",
minimal_dependency=True)
try:
par_dir = os.path.dirname(file_name)
if not os.path.exists(par_dir):
os.makedirs(par_dir)
format = os.path.splitext(os.path.basename(file_name))[-1]
if format == ".png":
graph.write_png(file_name)
elif format == ".pdf":
graph.write_pdf(file_name)
elif format == ".svg":
graph.write_svg(file_name)
else:
print("Incorrect format {}".format(format))
except Exception as e:
print("Error when writing graph to image {}".format(e))
return graph
# ==== torch/utils_toffee/aten_to_caffe2.py ====================================
def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef):
"""
For ONNX exported model, GroupNorm will be represented as ATen op,
this can be a drop in replacement from ATen to GroupNorm
"""
count = 0
for op in predict_net.op:
if op.type == "ATen":
op_name = get_pb_arg_vals(op, "operator",
None) # return byte in py3
if op_name and op_name.decode() == "group_norm":
op.arg.remove(get_pb_arg(op, "operator"))
if get_pb_arg_vali(op, "cudnn_enabled", None):
op.arg.remove(get_pb_arg(op, "cudnn_enabled"))
num_groups = get_pb_arg_vali(op, "num_groups", None)
if num_groups is not None:
op.arg.remove(get_pb_arg(op, "num_groups"))
check_set_pb_arg(op, "group", "i", num_groups)
op.type = "GroupNorm"
count += 1
if count > 1:
logger.info("Replaced {} ATen operator to GroupNormOp".format(count))
# ==== torch/utils_toffee/alias.py =============================================
def alias(x, name, is_backward=False):
if not torch.onnx.is_in_onnx_export():
return x
assert isinstance(x, torch.Tensor)
return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward)
def fuse_alias_placeholder(predict_net, init_net):
""" Remove AliasWithName placeholder and rename the input/output of it """
# First we finish all the re-naming
for i, op in enumerate(predict_net.op):
if op.type == "AliasWithName":
assert len(op.input) == 1
assert len(op.output) == 1
name = get_pb_arg_vals(op, "name", None).decode()
is_backward = bool(get_pb_arg_vali(op, "is_backward", 0))
rename_op_input(predict_net,
init_net,
i,
0,
name,
from_producer=is_backward)
rename_op_output(predict_net, i, 0, name)
# Remove AliasWithName, should be very safe since it's a non-op
new_ops = []
for op in predict_net.op:
if op.type != "AliasWithName":
new_ops.append(op)
else:
# safety check
assert op.input == op.output
assert op.input[0] == op.arg[0].s.decode()
del predict_net.op[:]
predict_net.op.extend(new_ops)
# ==== torch/utils_caffe2/graph_transform.py ===================================
class IllegalGraphTransformError(ValueError):
""" When a graph transform function call can't be executed. """
def _rename_versioned_blob_in_proto(
proto: caffe2_pb2.NetDef,
old_name: str,
new_name: str,
version: int,
ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]],
start_versions: Dict[str, int],
end_versions: Dict[str, int],
):
""" In given proto, rename all blobs with matched version """
# Operater list
for op, i_th_ssa in zip(proto.op, ssa):
versioned_inputs, versioned_outputs = i_th_ssa
for i in range(len(op.input)):
if versioned_inputs[i] == (old_name, version):
op.input[i] = new_name
for i in range(len(op.output)):
if versioned_outputs[i] == (old_name, version):
op.output[i] = new_name
# external_input
if start_versions.get(old_name, 0) == version:
for i in range(len(proto.external_input)):
if proto.external_input[i] == old_name:
proto.external_input[i] = new_name
# external_output
if end_versions.get(old_name, 0) == version:
for i in range(len(proto.external_output)):
if proto.external_output[i] == old_name:
proto.external_output[i] = new_name
def rename_op_input(
predict_net: caffe2_pb2.NetDef,
init_net: caffe2_pb2.NetDef,
op_id: int,
input_id: int,
new_name: str,
from_producer: bool = False,
):
"""
Rename the op_id-th operator in predict_net, change it's input_id-th input's
name to the new_name. It also does automatic re-route and change
external_input and init_net if necessary.
- It requires the input is only consumed by this op.
- This function modifies predict_net and init_net in-place.
- When from_producer is enable, this also updates other operators that consumes
the same input. Be cautious because may trigger unintended behaviour.
"""
assert isinstance(predict_net, caffe2_pb2.NetDef)
assert isinstance(init_net, caffe2_pb2.NetDef)
init_net_ssa, init_net_versions = core.get_ssa(init_net)
predict_net_ssa, predict_net_versions = core.get_ssa(
predict_net, copy.deepcopy(init_net_versions))
versioned_inputs, versioned_outputs = predict_net_ssa[op_id]
old_name, version = versioned_inputs[input_id]
if from_producer:
producer_map = get_producer_map(predict_net_ssa)
if not (old_name, version) in producer_map:
raise NotImplementedError(
"Can't find producer, the input {} is probably from"
" init_net, this is not supported yet.".format(old_name))
producer = producer_map[(old_name, version)]
rename_op_output(predict_net, producer[0], producer[1], new_name)
return
def contain_targets(op_ssa):
return (old_name, version) in op_ssa[0]
is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa]
if sum(is_consumer) > 1:
raise IllegalGraphTransformError((
"Input '{}' of operator(#{}) are consumed by other ops, please use"
+ " rename_op_output on the producer instead. Offending op: \n{}"
).format(old_name, op_id, predict_net.op[op_id]))
# update init_net
_rename_versioned_blob_in_proto(init_net, old_name, new_name, version,
init_net_ssa, {}, init_net_versions)
# update predict_net
_rename_versioned_blob_in_proto(
predict_net,
old_name,
new_name,
version,
predict_net_ssa,
init_net_versions,
predict_net_versions,
)
def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int,
output_id: int, new_name: str):
"""
Rename the op_id-th operator in predict_net, change it's output_id-th input's
name to the new_name. It also does automatic re-route and change
external_output and if necessary.
- It allows multiple consumers of its output.
- This function modifies predict_net in-place, doesn't need init_net.
"""
assert isinstance(predict_net, caffe2_pb2.NetDef)
ssa, blob_versions = core.get_ssa(predict_net)
versioned_inputs, versioned_outputs = ssa[op_id]
old_name, version = versioned_outputs[output_id]
# update predict_net
_rename_versioned_blob_in_proto(predict_net, old_name, new_name, version,
ssa, {}, blob_versions)
def get_sub_graph_external_input_output(
predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int]
) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]:
"""
Return the list of external input/output of sub-graph,
each element is tuple of the name and corresponding version in predict_net.
external input/output is defined the same way as caffe2 NetDef.
"""
ssa, versions = core.get_ssa(predict_net)
all_inputs = []
all_outputs = []
for op_id in sub_graph_op_indices:
all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs]
all_outputs += list(ssa[op_id][1]) # ssa output won't repeat
# for versioned blobs, external inputs are just those blob in all_inputs
# but not in all_outputs
ext_inputs = [inp for inp in all_inputs if inp not in all_outputs]
# external outputs are essentially outputs of this subgraph that are used
# outside of this sub-graph (including predict_net.external_output)
all_other_inputs = sum(
(ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices),
[(outp, versions[outp]) for outp in predict_net.external_output],
)
ext_outputs = [
outp for outp in all_outputs if outp in set(all_other_inputs)
]
return ext_inputs, ext_outputs
class DiGraph:
""" A DAG representation of caffe2 graph, each vertice is a versioned blob. """
def __init__(self):
self.vertices = set()
self.graph = collections.defaultdict(list)
def add_edge(self, u, v):
self.graph[u].append(v)
self.vertices.add(u)
self.vertices.add(v)
# grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/
def get_all_paths(self, s, d):
visited = {k: False for k in self.vertices}
path = []
all_paths = []
def _get_all_paths_util(graph, u, d, visited, path):
visited[u] = True
path.append(u)
if u == d:
all_paths.append(copy.deepcopy(path))
else:
for i in graph[u]:
if not visited[i]:
_get_all_paths_util(graph, i, d, visited, path)
path.pop()
visited[u] = False
_get_all_paths_util(self.graph, s, d, visited, path)
return all_paths
@staticmethod
def from_ssa(ssa):
graph = DiGraph()
for op_id in range(len(ssa)):
for inp in ssa[op_id][0]:
for outp in ssa[op_id][1]:
graph.add_edge(inp, outp)
return graph
def _get_dependency_chain(ssa, versioned_target, versioned_source):
"""
Return the index list of relevant operator to produce target blob from source blob,
if there's no dependency, return empty list.
"""
# finding all paths between nodes can be O(N!), thus we can only search
# in the subgraph using the op starting from the first consumer of source blob
# to the producer of the target blob.
consumer_map = get_consumer_map(ssa)
producer_map = get_producer_map(ssa)
start_op = min(x[0] for x in consumer_map[versioned_source]) - 15
end_op = (producer_map[versioned_target][0] +
15 if versioned_target in producer_map else start_op)
sub_graph_ssa = ssa[start_op:end_op + 1]
if len(sub_graph_ssa) > 30:
logger.warning(
"Subgraph bebetween {} and {} is large (from op#{} to op#{}), it"
" might take non-trival time to find all paths between them.".
format(versioned_source, versioned_target, start_op, end_op))
dag = DiGraph.from_ssa(sub_graph_ssa)
paths = dag.get_all_paths(versioned_source,
versioned_target) # include two ends
ops_in_paths = [[producer_map[blob][0] for blob in path[1:]]
for path in paths]
return sorted(set().union(*[set(ops) for ops in ops_in_paths]))
def identify_reshape_sub_graph(
predict_net: caffe2_pb2.NetDef, ) -> List[List[int]]:
"""
Idenfity the reshape sub-graph in a protobuf.
The reshape sub-graph is defined as matching the following pattern:
(input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐
└-------------------------------------------> Reshape -> (output_blob)
Return:
List of sub-graphs, each sub-graph is represented as a list of indices
of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape]
"""
ssa, _ = core.get_ssa(predict_net)
ret = []
for i, op in enumerate(predict_net.op):
if op.type == "Reshape":
assert len(op.input) == 2
input_ssa = ssa[i][0]
data_source = input_ssa[0]
shape_source = input_ssa[1]
op_indices = _get_dependency_chain(ssa, shape_source, data_source)
ret.append(op_indices + [i])
return ret
def remove_reshape_for_fc(predict_net, params):
"""
In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape
a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping
doesn't work well with ONNX and Int8 tools, and cause using extra
ops (eg. ExpandDims) that might not be available on mobile.
Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape
after exporting ONNX model.
"""
from caffe2.python import core
# find all reshape sub-graph that can be removed, which is now all Reshape
# sub-graph whose output is only consumed by FC.
# TODO: to make it safer, we may need the actually value to better determine
# if a Reshape before FC is removable.
reshape_sub_graphs = identify_reshape_sub_graph(predict_net)
sub_graphs_to_remove = []
for reshape_sub_graph in reshape_sub_graphs:
reshape_op_id = reshape_sub_graph[-1]
assert predict_net.op[reshape_op_id].type == "Reshape"
ssa, _ = core.get_ssa(predict_net)
reshape_output = ssa[reshape_op_id][1][0]
consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]]
if all(predict_net.op[consumer].type == "FC"
for consumer in consumers):
# safety check if the sub-graph is isolated, for this reshape sub-graph,
# it means it has one non-param external input and one external output.
ext_inputs, ext_outputs = get_sub_graph_external_input_output(
predict_net, reshape_sub_graph)
non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1:
sub_graphs_to_remove.append(reshape_sub_graph)
# perform removing subgraph by:
# 1: rename the Reshape's output to its input, then the graph can be
# seen as in-place itentify, meaning whose external input/output are the same.
# 2: simply remove those ops.
remove_op_ids = []
params_to_remove = []
for sub_graph in sub_graphs_to_remove:
logger.info("Remove Reshape sub-graph:\n{}".format("".join(
["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph])))
reshape_op_id = sub_graph[-1]
new_reshap_output = predict_net.op[reshape_op_id].input[0]
rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output)
ext_inputs, ext_outputs = get_sub_graph_external_input_output(
predict_net, sub_graph)
non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0]
assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1
assert ext_outputs[0][0] == non_params_ext_inputs[0][0]
assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1
remove_op_ids.extend(sub_graph)
params_to_remove.extend(params_ext_inputs)
predict_net = copy.deepcopy(predict_net)
new_ops = [
op for i, op in enumerate(predict_net.op) if i not in remove_op_ids
]
del predict_net.op[:]
predict_net.op.extend(new_ops)
for versioned_params in params_to_remove:
name = versioned_params[0]
logger.info(
"Remove params: {} from init_net and predict_net.external_input".
format(name))
del params[name]
predict_net.external_input.remove(name)
return predict_net, params
def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef):
"""
In-place fuse extra copy ops between cpu/gpu for the following case:
a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1
-CopyBToA> c2 -NextOp2-> d2
The fused network will look like:
a -NextOp1-> d1
-NextOp2-> d2
"""
_COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"]
def _fuse_once(predict_net):
ssa, blob_versions = core.get_ssa(predict_net)
consumer_map = get_consumer_map(ssa)
versioned_external_output = [(name, blob_versions[name])
for name in predict_net.external_output]
for op_id, op in enumerate(predict_net.op):
if op.type in _COPY_OPS:
fw_copy_versioned_output = ssa[op_id][1][0]
consumer_ids = [
x[0] for x in consumer_map[fw_copy_versioned_output]
]
reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)]
is_fusable = (
len(consumer_ids) > 0 and
fw_copy_versioned_output not in versioned_external_output
and all(predict_net.op[_op_id].type == reverse_op_type and
ssa[_op_id][1][0] not in versioned_external_output
for _op_id in consumer_ids))
if is_fusable:
for rv_copy_op_id in consumer_ids:
# making each NextOp uses "a" directly and removing Copy ops
rs_copy_versioned_output = ssa[rv_copy_op_id][1][0]
next_op_id, inp_id = consumer_map[
rs_copy_versioned_output][0]
predict_net.op[next_op_id].input[inp_id] = op.input[0]
# remove CopyOps
new_ops = [
op for i, op in enumerate(predict_net.op)
if i != op_id and i not in consumer_ids
]
del predict_net.op[:]
predict_net.op.extend(new_ops)
return True
return False
# _fuse_once returns False is nothing can be fused
while _fuse_once(predict_net):
pass
def remove_dead_end_ops(net_def: caffe2_pb2.NetDef):
""" remove ops if its output is not used or not in external_output """
ssa, versions = core.get_ssa(net_def)
versioned_external_output = [(name, versions[name])
for name in net_def.external_output]
consumer_map = get_consumer_map(ssa)
removed_op_ids = set()
def _is_dead_end(versioned_blob):
return not (versioned_blob in versioned_external_output or
(len(consumer_map[versioned_blob]) > 0
and all(x[0] not in removed_op_ids
for x in consumer_map[versioned_blob])))
for i, ssa_i in reversed(list(enumerate(ssa))):
versioned_outputs = ssa_i[1]
if all(_is_dead_end(outp) for outp in versioned_outputs):
removed_op_ids.add(i)
# simply removing those deadend ops should have no effect to external_output
new_ops = [
op for i, op in enumerate(net_def.op) if i not in removed_op_ids
]
del net_def.op[:]
net_def.op.extend(new_ops)
| 36.888784
| 88
| 0.620966
|
b6707dd93b49b6d0e1205564f681e12cc5118345
| 1,532
|
py
|
Python
|
CodeArena/security.py
|
SaberSz/WeByte
|
3f88d572990b8342d2f28065fbb1d092449bf0ea
|
[
"MIT"
] | null | null | null |
CodeArena/security.py
|
SaberSz/WeByte
|
3f88d572990b8342d2f28065fbb1d092449bf0ea
|
[
"MIT"
] | null | null | null |
CodeArena/security.py
|
SaberSz/WeByte
|
3f88d572990b8342d2f28065fbb1d092449bf0ea
|
[
"MIT"
] | null | null | null |
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from CodeArena import app, mail
import re
from flask_mail import Message
ts = Serializer(app.config["SECRET_KEY"], 86400)
def send_mail(email, subject, html):
# token = ts.dumps({'user_id': email}).decode('utf-8')
msg = Message(subject,
sender='noreply@WeByte.com',
recipients=[email])
msg.body = f''' Hey, this is the TechLead. You made a great decision by signing up on WeByte. To activate your account click here {html}
If you did not make this request then simply ignore this email and no changes will be made
'''
mail.send(msg)
# print("Mail sent")
def send_mail_content(email, subject, html):
msg = Message(subject,
sender='noreply@WeByte.com',
recipients=[email])
msg.body = html
mail.send(msg)
# print("Mail sent")
def check_pass_strength(password):
flag = 0
if (len(password) < 8):
flag = -1
# print("z1")
elif not re.search("[a-z]", password):
flag = -1
# print("z2")
elif not re.search("[A-Z]", password):
flag = -1
# print("z3")
elif not re.search("[0-9]", password):
flag = -1
# print("z4")
elif not re.search("[_@$#]", password):
flag = -1
# print("z5")
elif re.search("\s", password):
flag = -1
# print("z6")
else:
flag = 0
return True
if flag == -1:
return False
| 26.413793
| 140
| 0.574413
|
ec47c81206535d89cc5fb1443bd160b651c22ab0
| 2,573
|
py
|
Python
|
src/flaskz/utils/_response.py
|
taozh1982/flaskz
|
daf900027bd0bf83e19ba963c3d218072d0ebc1e
|
[
"MIT"
] | null | null | null |
src/flaskz/utils/_response.py
|
taozh1982/flaskz
|
daf900027bd0bf83e19ba963c3d218072d0ebc1e
|
[
"MIT"
] | null | null | null |
src/flaskz/utils/_response.py
|
taozh1982/flaskz
|
daf900027bd0bf83e19ba963c3d218072d0ebc1e
|
[
"MIT"
] | null | null | null |
from flask import current_app
from ._app import get_app_config
__all__ = ['create_response', 'get_status_msg', 'ResponseManager']
def create_response(success, data, data_wrapped=False):
"""
Create the response json result.
:param success:
:param data:
:param data_wrapped:
:return:
"""
if success is True:
return _create_success_response(data, data_wrapped)
else:
return _create_fail_response(data)
def _create_success_response(data, data_wrapped=False):
"""
:param data:
:param data_wrapped:
:return:
"""
status = get_app_config('FLASKZ_RES_SUCCESS_STATUS') or 'success'
if data_wrapped is True:
_data = {
'status': status,
}
_data.update(data)
return _data
else:
return {
'status': status,
'data': data
}
def _create_fail_response(status_code, msg=None):
"""
:param msg:
:param status_code:
:return:
"""
status = get_app_config('FLASKZ_RES_FAIL_STATUS') or 'fail'
msg = msg or get_status_msg(status_code)
if type(status_code) == tuple:
status_code = status_code[0]
return {
'status': status,
'status_code': status_code,
'message': str(msg),
}
def get_status_msg(status_code):
"""
Get the specified message by status_code.
Can be used to return internationalized text, Local can be fixed, or get the local from request
:param status_code:
:return:
"""
response_callback = get_current_response_manager('get_response_callback')
if response_callback:
return response_callback(status_code)
if type(status_code) == tuple:
len_ = len(status_code)
if len_ > 1:
return status_code[1] or status_code[0]
elif len_ > 0:
return status_code[0]
return status_code
def get_current_response_manager(callback_name):
response_manager = getattr(current_app, 'response_manager', None)
if response_manager:
return getattr(response_manager, callback_name)
class ResponseManager:
"""
Used to generate response.
"""
def __init__(self):
self._get_response = None
def init_app(self, app):
app.response_manager = self
def get_response(self, get_response):
self._get_response = get_response
@property
def get_response_callback(self):
return self._get_response
| 24.740385
| 100
| 0.620288
|
e6cffe0e3990a44d8d9029f334da45322a349d1c
| 26,927
|
py
|
Python
|
src/sage/rings/polynomial/flatten.py
|
Ivo-Maffei/DistanceRegular
|
d4dedd5c3e7da73111168fcce60d1f180fe24019
|
[
"BSL-1.0"
] | 1
|
2020-05-19T22:34:03.000Z
|
2020-05-19T22:34:03.000Z
|
src/sage/rings/polynomial/flatten.py
|
Ivo-Maffei/DistanceRegular
|
d4dedd5c3e7da73111168fcce60d1f180fe24019
|
[
"BSL-1.0"
] | null | null | null |
src/sage/rings/polynomial/flatten.py
|
Ivo-Maffei/DistanceRegular
|
d4dedd5c3e7da73111168fcce60d1f180fe24019
|
[
"BSL-1.0"
] | 3
|
2020-03-29T17:13:36.000Z
|
2021-05-03T18:11:28.000Z
|
# -*- coding: utf-8 -*-
r"""
Class to flatten polynomial rings over polynomial ring
For example ``QQ['a','b'],['x','y']`` flattens to ``QQ['a','b','x','y']``.
EXAMPLES::
sage: R = QQ['x']['y']['s','t']['X']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: phi = FlatteningMorphism(R); phi
Flattening morphism:
From: Univariate Polynomial Ring in X over Multivariate Polynomial Ring in s, t over Univariate Polynomial Ring in y over Univariate Polynomial Ring in x over Rational Field
To: Multivariate Polynomial Ring in x, y, s, t, X over Rational Field
sage: phi('x*y*s + t*X').parent()
Multivariate Polynomial Ring in x, y, s, t, X over Rational Field
Authors:
Vincent Delecroix, Ben Hutz (July 2016): initial implementation
"""
#*****************************************************************************
# Copyright (C) 2016
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import, print_function
import itertools, six
from sage.categories.homset import Homset
from sage.categories.morphism import Morphism
from sage.misc.cachefunc import cached_method
from .polynomial_ring_constructor import PolynomialRing
from .polynomial_ring import is_PolynomialRing
from .multi_polynomial_ring_base import is_MPolynomialRing
from sage.rings.fraction_field import is_FractionField
from sage.rings.fraction_field_element import FractionFieldElement
from sage.rings.polynomial.polydict import ETuple
class FlatteningMorphism(Morphism):
r"""
EXAMPLES::
sage: R = QQ['a','b']['x','y','z']['t1','t2']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: f.codomain()
Multivariate Polynomial Ring in a, b, x, y, z, t1, t2 over Rational Field
sage: p = R('(a+b)*x + (a^2-b)*t2*(z+y)')
sage: p
((a^2 - b)*y + (a^2 - b)*z)*t2 + (a + b)*x
sage: f(p)
a^2*y*t2 + a^2*z*t2 - b*y*t2 - b*z*t2 + a*x + b*x
sage: f(p).parent()
Multivariate Polynomial Ring in a, b, x, y, z, t1, t2 over Rational Field
Also works when univariate polynomial ring are involved::
sage: R = QQ['x']['y']['s','t']['X']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: f.codomain()
Multivariate Polynomial Ring in x, y, s, t, X over Rational Field
sage: p = R('((x^2 + 1) + (x+2)*y + x*y^3)*(s+t) + x*y*X')
sage: p
x*y*X + (x*y^3 + (x + 2)*y + x^2 + 1)*s + (x*y^3 + (x + 2)*y + x^2 + 1)*t
sage: f(p)
x*y^3*s + x*y^3*t + x^2*s + x*y*s + x^2*t + x*y*t + x*y*X + 2*y*s + 2*y*t + s + t
sage: f(p).parent()
Multivariate Polynomial Ring in x, y, s, t, X over Rational Field
"""
def __init__(self, domain):
"""
The Python constructor
EXAMPLES::
sage: R = ZZ['a', 'b', 'c']['x', 'y', 'z']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: FlatteningMorphism(R)
Flattening morphism:
From: Multivariate Polynomial Ring in x, y, z over Multivariate Polynomial Ring in a, b, c over Integer Ring
To: Multivariate Polynomial Ring in a, b, c, x, y, z over Integer Ring
::
sage: R = ZZ['a']['b']['c']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: FlatteningMorphism(R)
Flattening morphism:
From: Univariate Polynomial Ring in c over Univariate Polynomial Ring in b over Univariate Polynomial Ring in a over Integer Ring
To: Multivariate Polynomial Ring in a, b, c over Integer Ring
::
sage: R = ZZ['a']['a','b']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: FlatteningMorphism(R)
Flattening morphism:
From: Multivariate Polynomial Ring in a, b over Univariate Polynomial Ring in a over Integer Ring
To: Multivariate Polynomial Ring in a, a0, b over Integer Ring
::
sage: K.<v> = NumberField(x^3 - 2)
sage: R = K['x','y']['a','b']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: f(R('v*a*x^2 + b^2 + 1/v*y'))
(v)*x^2*a + b^2 + (1/2*v^2)*y
::
sage: R = QQbar['x','y']['a','b']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: f(R('QQbar(sqrt(2))*a*x^2 + b^2 + QQbar(I)*y'))
1.414213562373095?*x^2*a + b^2 + I*y
::
sage: R.<z> = PolynomialRing(QQbar,1)
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: f.domain(), f.codomain()
(Multivariate Polynomial Ring in z over Algebraic Field,
Multivariate Polynomial Ring in z over Algebraic Field)
::
sage: R.<z> = PolynomialRing(QQbar)
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: f.domain(), f.codomain()
(Univariate Polynomial Ring in z over Algebraic Field,
Univariate Polynomial Ring in z over Algebraic Field)
TESTS::
sage: Pol = QQ['x']['x0']['x']
sage: fl = FlatteningMorphism(Pol)
sage: fl
Flattening morphism:
From: Univariate Polynomial Ring in x over Univariate Polynomial Ring in x0 over Univariate Polynomial Ring in x over Rational Field
To: Multivariate Polynomial Ring in x, x0, x1 over Rational Field
sage: p = Pol([[[1,2],[3,4]],[[5,6],[7,8]]])
sage: fl.section()(fl(p)) == p
True
"""
if not is_PolynomialRing(domain) and not is_MPolynomialRing(domain):
raise ValueError("domain should be a polynomial ring")
ring = domain
variables = []
intermediate_rings = []
while is_PolynomialRing(ring) or is_MPolynomialRing(ring):
intermediate_rings.append(ring)
v = ring.variable_names()
variables.extend(reversed(v))
ring = ring.base_ring()
self._intermediate_rings = intermediate_rings
variables.reverse()
for i, a in enumerate(variables):
if a in variables[:i]:
for index in itertools.count():
b = a + str(index)
if b not in variables: # not just variables[:i]!
break
variables[i] = b
if is_MPolynomialRing(domain):
codomain = PolynomialRing(ring, variables, len(variables))
else:
codomain = PolynomialRing(ring, variables)
hom = Homset(domain, codomain, base=ring, check=False)
Morphism.__init__(self, hom)
self._repr_type_str = 'Flattening'
def _call_(self, p):
r"""
Evaluate a flattening morphism.
EXAMPLES::
sage: R = QQ['a','b','c']['x','y','z']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: h = FlatteningMorphism(R)('2*a*x + b*z'); h
2*a*x + b*z
sage: h.parent()
Multivariate Polynomial Ring in a, b, c, x, y, z over Rational Field
TESTS::
sage: R = QQ['x']['y']['s','t']
sage: p = R('s*x + y*t + x^2*s + 1 + t')
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: f._call_(p)
x^2*s + x*s + y*t + t + 1
"""
#If we are just specializing a univariate polynomial, then
#the flattening morphism is the identity
if self.codomain().ngens()==1:
return p
p = {(): p}
for ring in self._intermediate_rings:
new_p = {}
if is_PolynomialRing(ring):
for mon,pp in six.iteritems(p):
assert pp.parent() is ring
for i,j in six.iteritems(pp.dict()):
new_p[(i,)+(mon)] = j
elif is_MPolynomialRing(ring):
for mon,pp in six.iteritems(p):
assert pp.parent() is ring
for mmon,q in six.iteritems(pp.dict()):
new_p[tuple(mmon)+mon] = q
else:
raise RuntimeError
p = new_p
return self.codomain()(p, check=False)
@cached_method
def section(self):
"""
Inverse of this flattening morphism.
EXAMPLES::
sage: R = QQ['a','b','c']['x','y','z']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: h = FlatteningMorphism(R)
sage: h.section()
Unflattening morphism:
From: Multivariate Polynomial Ring in a, b, c, x, y, z over Rational Field
To: Multivariate Polynomial Ring in x, y, z over Multivariate Polynomial Ring in a, b, c over Rational Field
::
sage: R = ZZ['a']['b']['c']
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: FlatteningMorphism(R).section()
Unflattening morphism:
From: Multivariate Polynomial Ring in a, b, c over Integer Ring
To: Univariate Polynomial Ring in c over Univariate Polynomial Ring in b over Univariate Polynomial Ring in a over Integer Ring
"""
phi= UnflatteningMorphism(self.codomain(), self.domain())
return phi
class UnflatteningMorphism(Morphism):
r"""
Inverses for :class:`FlatteningMorphism`
EXAMPLES::
sage: R = QQ['c','x','y','z']
sage: S = QQ['c']['x','y','z']
sage: from sage.rings.polynomial.flatten import UnflatteningMorphism
sage: f = UnflatteningMorphism(R, S)
sage: g = f(R('x^2 + c*y^2 - z^2'));g
x^2 + c*y^2 - z^2
sage: g.parent()
Multivariate Polynomial Ring in x, y, z over Univariate Polynomial Ring in c over Rational Field
::
sage: R = QQ['a','b', 'x','y']
sage: S = QQ['a','b']['x','y']
sage: from sage.rings.polynomial.flatten import UnflatteningMorphism
sage: UnflatteningMorphism(R, S)
Unflattening morphism:
From: Multivariate Polynomial Ring in a, b, x, y over Rational Field
To: Multivariate Polynomial Ring in x, y over Multivariate Polynomial Ring in a, b over Rational Field
"""
def __init__(self, domain, codomain):
"""
The Python constructor
EXAMPLES::
sage: R = QQ['x']['y']['s','t']['X']
sage: p = R.random_element()
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: f = FlatteningMorphism(R)
sage: g = f.section()
sage: g(f(p)) == p
True
::
sage: R = QQ['a','b','x','y']
sage: S = ZZ['a','b']['x','z']
sage: from sage.rings.polynomial.flatten import UnflatteningMorphism
sage: UnflatteningMorphism(R, S)
Traceback (most recent call last):
...
ValueError: rings must have same base ring
::
sage: R = QQ['a','b','x','y']
sage: S = QQ['a','b']['x','z','w']
sage: from sage.rings.polynomial.flatten import UnflatteningMorphism
sage: UnflatteningMorphism(R, S)
Traceback (most recent call last):
...
ValueError: rings must have the same number of variables
"""
if not is_MPolynomialRing(domain):
raise ValueError("domain should be a multivariate polynomial ring")
if not is_PolynomialRing(codomain) and not is_MPolynomialRing(codomain):
raise ValueError("codomain should be a polynomial ring")
ring = codomain
intermediate_rings = []
while True:
is_polynomial_ring = is_PolynomialRing(ring)
if not (is_polynomial_ring or is_MPolynomialRing(ring)):
break
intermediate_rings.append((ring, is_polynomial_ring))
ring = ring.base_ring()
if domain.base_ring() != intermediate_rings[-1][0].base_ring():
raise ValueError("rings must have same base ring")
if domain.ngens() != sum([R.ngens() for R, _ in intermediate_rings]):
raise ValueError("rings must have the same number of variables")
self._intermediate_rings = intermediate_rings
hom = Homset(domain, codomain, base=ring, check=False)
Morphism.__init__(self, hom)
self._repr_type_str = 'Unflattening'
def _call_(self, p):
"""
Evaluate an unflattening morphism.
TESTS::
sage: from sage.rings.polynomial.flatten import FlatteningMorphism
sage: for R in [ZZ['x']['y']['a,b,c'], GF(4)['x','y']['a','b'],
....: AA['x']['a','b']['y'], QQbar['a1','a2']['t']['X','Y']]:
....: f = FlatteningMorphism(R)
....: g = f.section()
....: for _ in range(10):
....: p = R.random_element()
....: assert p == g(f(p))
....: z = R.zero()
....: assert z == g(f(z))
"""
index = [0]
for R, _ in reversed(self._intermediate_rings):
index.append(index[-1] + len(R.gens()))
newpol = [{} for _ in self._intermediate_rings]
expo = sorted(p.exponents(), key=lambda e: tuple(reversed(e)))
for i in range(len(expo)):
cur_exp = expo[i]
for l in range(len(self._intermediate_rings)):
R, univariate = self._intermediate_rings[-1-l]
sub_exp = (cur_exp[index[l]] if univariate
else cur_exp[index[l]:index[l+1]])
if l == 0:
newpol[l][sub_exp] = p[cur_exp]
else:
newpol[l][sub_exp] = newpol[l-1]
newpol[l-1] = {}
if (i == len(expo) - 1
or expo[i+1][index[l+1]:] != cur_exp[index[l+1]:]):
newpol[l] = R(newpol[l], check=False)
else:
break
return R(newpol[-1], check=False)
class SpecializationMorphism(Morphism):
r"""
Morphisms to specialize parameters in (stacked) polynomial rings
EXAMPLES::
sage: R.<c> = PolynomialRing(QQ)
sage: S.<x,y,z> = PolynomialRing(R)
sage: D = dict({c:1})
sage: from sage.rings.polynomial.flatten import SpecializationMorphism
sage: f = SpecializationMorphism(S, D)
sage: g = f(x^2 + c*y^2 - z^2); g
x^2 + y^2 - z^2
sage: g.parent()
Multivariate Polynomial Ring in x, y, z over Rational Field
::
sage: R.<c> = PolynomialRing(QQ)
sage: S.<z> = PolynomialRing(R)
sage: from sage.rings.polynomial.flatten import SpecializationMorphism
sage: xi = SpecializationMorphism(S, {c:0}); xi
Specialization morphism:
From: Univariate Polynomial Ring in z over Univariate Polynomial Ring in c over Rational Field
To: Univariate Polynomial Ring in z over Rational Field
sage: xi(z^2+c)
z^2
::
sage: R1.<u,v> = PolynomialRing(QQ)
sage: R2.<a,b,c> = PolynomialRing(R1)
sage: S.<x,y,z> = PolynomialRing(R2)
sage: D = dict({a:1, b:2, x:0, u:1})
sage: from sage.rings.polynomial.flatten import SpecializationMorphism
sage: xi = SpecializationMorphism(S, D); xi
Specialization morphism:
From: Multivariate Polynomial Ring in x, y, z over Multivariate Polynomial Ring in a, b, c over Multivariate Polynomial Ring in u, v over Rational Field
To: Multivariate Polynomial Ring in y, z over Univariate Polynomial Ring in c over Univariate Polynomial Ring in v over Rational Field
sage: xi(a*(x*z+y^2)*u+b*v*u*(x*z+y^2)*y^2*c+c*y^2*z^2)
2*v*c*y^4 + c*y^2*z^2 + y^2
"""
def __init__(self, domain, D):
"""
The Python constructor
EXAMPLES::
sage: S.<x,y> = PolynomialRing(QQ)
sage: D = dict({x:1})
sage: from sage.rings.polynomial.flatten import SpecializationMorphism
sage: phi = SpecializationMorphism(S, D); phi
Specialization morphism:
From: Multivariate Polynomial Ring in x, y over Rational Field
To: Univariate Polynomial Ring in y over Rational Field
sage: phi(x^2 + y^2)
y^2 + 1
::
sage: R.<a,b,c> = PolynomialRing(ZZ)
sage: S.<x,y,z> = PolynomialRing(R)
sage: from sage.rings.polynomial.flatten import SpecializationMorphism
sage: xi = SpecializationMorphism(S, {a:1/2})
Traceback (most recent call last):
...
TypeError: no conversion of this rational to integer
The following was fixed in :trac:`23811`::
sage: R.<c>=RR[]
sage: P.<z>=AffineSpace(R,1)
sage: H=End(P)
sage: f=H([z^2+c])
sage: f.specialization({c:1})
Scheme endomorphism of Affine Space of dimension 1 over Real Field with 53 bits of precision
Defn: Defined on coordinates by sending (z) to
(z^2 + 1.00000000000000)
"""
if not is_PolynomialRing(domain) and not is_MPolynomialRing(domain):
raise TypeError("domain should be a polynomial ring")
# use only the generators that are in the stack somewhere,
# and ignore the rest
all_gens = domain.gens_dict_recursive()
new_D = {}
for gen in D:
if str(gen) in all_gens:
new_D[gen] = D[gen]
D = new_D
# _sub_specialization is a specialization morphism (recursive)
# which is applied to the base Fraction field, or None if it's
# any other base ring
self._sub_specialization = None
# We use this composition where "flat" is a flattened
# polynomial ring.
#
# phi D psi
# domain → flat → flat → R
# │ │ │
# └─────────┴───────────────┘
# _flattening_morph _eval_morph
# = phi = psi ∘ D
phi = FlatteningMorphism(domain)
flat = phi.codomain()
base = flat.base_ring()
# Change domain of D to "flat" and ensure that the values lie
# in the base ring.
D = {phi(k): base(D[k]) for k in D}
# Construct unflattened codomain R
new_vars = []
R = domain
while is_PolynomialRing(R) or is_MPolynomialRing(R) or is_FractionField(R):
if is_FractionField(R):
# We've hit base_ring, so set _sub_specialization and exit the loop
field_over = R.base()
applicable_vars = {key: val for key,val in D.items() if key not in flat.gens()}
# If there are any variables in D to set in _sub_specialization
if len(applicable_vars) != 0:
# Coerce the generators to be in the right ring
# This un-does changing the domain of D to be in the flat base ring
tmp = {}
for var, val in applicable_vars.items():
for gstr, gen in field_over.gens_dict_recursive().items():
if str(var) == gstr:
tmp[gen] = val
break
else:
# Should have been caught earlier
raise NameError("argument " + str(var) + " is not a generator anywhere in the polynomial tower")
applicable_vars = tmp
self._sub_specialization = FractionSpecializationMorphism(R, applicable_vars)
break
# We're still in the polynomials, so keep track of the tower
old = R.gens()
new = [t for t in old if t not in D]
force_multivariate = ((len(old) == 1) and is_MPolynomialRing(R))
new_vars.append((new, force_multivariate, old))
R = R.base_ring()
if self._sub_specialization:
# The sub_specialization range will be different
# if it applied some variables from D
R = self._sub_specialization.codomain().fraction_field()
# Construct unflattening map psi (only defined on the variables
# of "flat" which are not involved in D)
psi = dict()
# Reconstruct the proper domain of this morphism
# based on the sub_specialization domains
new_domain = R
for new, force_multivariate, old in reversed(new_vars):
if self._sub_specialization:
if force_multivariate:
new_domain = PolynomialRing(new_domain, old, len(old))
else:
new_domain = PolynomialRing(new_domain, old)
if not new:
continue
var_names = [str(var) for var in new]
if force_multivariate:
R = PolynomialRing(R, var_names, len(var_names))
else:
R = PolynomialRing(R, var_names)
# Map variables in "new" to R
psi.update(zip([phi(w) for w in new], R.gens()))
# Fix domain of eval_morph
# (note: phi's domain is correct)
if self._sub_specialization:
phi_prime = FlatteningMorphism(new_domain)
flat_old = flat
flat = phi_prime.codomain()
base_prime = flat.base_ring()
D = {phi(k): base_prime(D[k]) for k in D}
else:
# The bottom of our tower hasn't changed
flat_old = lambda x: x
# Compose D with psi
vals = []
for t in flat.gens():
if t in D:
vals.append(R.coerce(D[t]))
else:
# Make sure keys are in the old domain
# or else they won't match exactly
vals.append(psi[flat_old(t)])
self._flattening_morph = phi
self._eval_morph = flat.hom(vals, R)
self._repr_type_str = 'Specialization'
Morphism.__init__(self, domain, R)
def _call_(self, p):
"""
Evaluate a specialization morphism.
EXAMPLES::
sage: R.<a,b,c> = PolynomialRing(ZZ)
sage: S.<x,y,z> = PolynomialRing(R)
sage: D = dict({a:1, b:2, c:3})
sage: from sage.rings.polynomial.flatten import SpecializationMorphism
sage: xi = SpecializationMorphism(S, D)
sage: xi(a*x + b*y + c*z)
x + 2*y + 3*z
"""
flat = self._flattening_morph(p)
if self._sub_specialization is not None:
# The base_ring should be a fraction field, so
# apply _sub_specialization to each coefficient
# in the flattened polynomial
tmp = {}
for exponent, coefficient in flat.dict().items():
# Fix the type of exponent from (a,) to a
# (necessary for R(tmp) later)
if isinstance(exponent, ETuple) and len(exponent) == 1:
exponent = exponent[0]
# Coefficient should be a fraction
tmp[exponent] = self._sub_specialization._call_(coefficient)
# tmp's parent should be the same construction as flat
# but over _sub_specialization's codomain
ring_constructor = flat.parent().construction()[0]
fraction_type = self._sub_specialization.codomain()
R = ring_constructor(fraction_type)
flat = R(tmp)
return self._eval_morph(flat)
class FractionSpecializationMorphism(Morphism):
"""
A specialization morphism for fraction fields over (stacked) polynomial rings
"""
def __init__(self, domain, D):
"""
Initialize the morphism with a domain and dictionary of specializations
EXAMPLES::
sage: R.<a,c> = QQ[]
sage: S.<x,y> = R[]
sage: from sage.rings.polynomial.flatten import FractionSpecializationMorphism
sage: phi = FractionSpecializationMorphism(Frac(S), {c:3})
sage: phi
Fraction Specialization morphism:
From: Fraction Field of Multivariate Polynomial Ring in x, y over Multivariate Polynomial Ring in a, c over Rational Field
To: Fraction Field of Multivariate Polynomial Ring in x, y over Univariate Polynomial Ring in a over Rational Field
"""
if not is_FractionField(domain):
raise TypeError("domain must be a fraction field")
self._specialization = SpecializationMorphism(domain.base(), D)
self._repr_type_str = 'Fraction Specialization'
Morphism.__init__(self, domain, self._specialization.codomain().fraction_field())
def _call_(self, p):
"""
Evaluate a fraction specialization morphism
EXAMPLES::
sage: R.<a,b,c> = QQ[]
sage: S.<x,y,z> = R[]
sage: from sage.rings.polynomial.flatten import FractionSpecializationMorphism
sage: phi = FractionSpecializationMorphism(Frac(S), {a:3, b:2, c:-2})
sage: spec = phi((a*x + b*y) / (c*z))
sage: spec
(3*x + 2*y)/(-2*z)
sage: spec.parent()
Fraction Field of Multivariate Polynomial Ring in x, y, z over Rational Field
"""
if not isinstance(p, FractionFieldElement):
raise TypeError("p must be a fraction field element")
numerator = self._specialization._call_(p.numerator())
denominator = self._specialization._call_(p.denominator())
return numerator / denominator
| 40.010401
| 179
| 0.556467
|
222e8c9ab0252751f658071380aa8b99e1d16b91
| 3,707
|
py
|
Python
|
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/resources/stacks/HDP/2.0.7/services/HIVE/package/scripts/hive.py
|
zyclove/ambari
|
1032f0f54cb7b312b9a3b37570cd840f4e1e89d4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
import sys
def hive(name=None):
import params
if name == 'metastore' or name == 'hiveserver2':
hive_config_dir = params.hive_server_conf_dir
config_file_mode = 0600
jdbc_connector()
else:
hive_config_dir = params.hive_conf_dir
config_file_mode = 0644
Directory(hive_config_dir,
owner=params.hive_user,
group=params.user_group,
create_parents = True
)
XmlConfig("hive-site.xml",
conf_dir=hive_config_dir,
configurations=params.config['configurations']['hive-site'],
configuration_attributes=params.config['configuration_attributes']['hive-site'],
owner=params.hive_user,
group=params.user_group,
mode=config_file_mode
)
cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
"{jdk_location}/{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
Execute(cmd,
not_if=format("[ -f {check_db_connection_jar_name}]"))
if name == 'metastore':
File(params.start_metastore_path,
mode=0755,
content=StaticFile('startMetastore.sh')
)
elif name == 'hiveserver2':
File(params.start_hiveserver2_path,
mode=0755,
content=StaticFile('startHiveserver2.sh')
)
if name != "client":
crt_directory(params.hive_pid_dir)
crt_directory(params.hive_log_dir)
crt_directory(params.hive_var_lib)
File(format("{hive_config_dir}/hive-env.sh"),
owner=params.hive_user,
group=params.user_group,
content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
)
crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
crt_file(format("{hive_conf_dir}/hive-exec-log4j.properties.template"))
crt_file(format("{hive_conf_dir}/hive-log4j.properties.template"))
def crt_directory(name):
import params
Directory(name,
create_parents = True,
owner=params.hive_user,
group=params.user_group,
mode=0755)
def crt_file(name):
import params
File(name,
owner=params.hive_user,
group=params.user_group
)
def jdbc_connector():
import params
if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
Execute(cmd,
not_if=format("test -f {target}"),
creates=params.target,
path=["/bin", "/usr/bin/"])
elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
cmd = format(
"mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} && "
"cp {driver_curl_target} {target}")
Execute(cmd,
not_if=format("test -f {target}"),
path=["/bin", "/usr/bin/"])
| 29.895161
| 103
| 0.680874
|
6c1bd0ab62fdcea31c29b3e371f8324ec23eff69
| 1,631
|
py
|
Python
|
openquake.hazardlib/openquake/hazardlib/tests/gsim/fukushima_tanaka_1990_test.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
openquake.hazardlib/openquake/hazardlib/tests/gsim/fukushima_tanaka_1990_test.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
openquake.hazardlib/openquake/hazardlib/tests/gsim/fukushima_tanaka_1990_test.py
|
rainzhop/ConvNetQuake
|
a3e6de3f7992eac72f1b9883fec36b8c7fdefd48
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2016 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.fukushima_tanaka_1990 import (
FukushimaTanaka1990,
FukushimaTanakaSite1990
)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class FukushimaTanaka1990TestCase(BaseGSIMTestCase):
GSIM_CLASS = FukushimaTanaka1990
def test_mean(self):
self.check('FT1990/FT1990_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('FT1990/FT1990_STDTOTAL.csv',
max_discrep_percentage=0.1)
class FukushimaTanaka1990SiteTestCase(BaseGSIMTestCase):
GSIM_CLASS = FukushimaTanakaSite1990
def test_mean(self):
self.check('FT1990/FT1990Site_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('FT1990/FT1990Site_STDTOTAL.csv',
max_discrep_percentage=0.1)
| 33.979167
| 74
| 0.729614
|
be8fea2350d5384086a66a475230b6cc337b8889
| 7,019
|
py
|
Python
|
src/config/vnc_openstack/vnc_openstack/tests/test_vnc_plugin_db.py
|
Mirantis/contrail-controller
|
6a8ce71bde9f30e14241027dc89fcd9ca6ac0673
|
[
"Apache-2.0"
] | 3
|
2019-01-11T06:16:40.000Z
|
2021-02-24T23:48:21.000Z
|
src/config/vnc_openstack/vnc_openstack/tests/test_vnc_plugin_db.py
|
Mirantis/contrail-controller
|
6a8ce71bde9f30e14241027dc89fcd9ca6ac0673
|
[
"Apache-2.0"
] | null | null | null |
src/config/vnc_openstack/vnc_openstack/tests/test_vnc_plugin_db.py
|
Mirantis/contrail-controller
|
6a8ce71bde9f30e14241027dc89fcd9ca6ac0673
|
[
"Apache-2.0"
] | 2
|
2019-02-06T12:52:00.000Z
|
2019-04-11T23:19:28.000Z
|
import unittest
import uuid
from flexmock import flexmock
import fake_neutron
from vnc_openstack import neutron_plugin_db as db
class MockDbInterface(db.DBInterface):
def __init__(self):
class MockConnection(object):
def wait(self):
return
self._connected_to_api_server = MockConnection()
pass
class TestDbInterface(unittest.TestCase):
_tenant_ids = ['tenant_id_1',
'tenant_id_2']
def _list_resource(self, resource, ret_count=0):
def _list_others(parent_id, count):
self.assertEqual(count, True)
self.assertTrue(parent_id in self._tenant_ids)
self.assertTrue(resource in ['virtual_networks',
'virtual_machine_interfaces',
'logical_routers',
'network_policys',
'network_ipams',
'route_tables'])
r = resource.replace("_", "-")
return {r: {'count': ret_count}}
def _list_fip(back_ref_id, count):
self.assertEqual(count, True)
self.assertTrue(back_ref_id in self._tenant_ids)
r = resource.replace("_", "-")
return {r: {'count': ret_count}}
if resource == "floating_ips":
return _list_fip
return _list_others
def _test_for(self, resource):
dbi = MockDbInterface()
kwargs={"operational": True,
resource + "_list": self._list_resource(resource, 1),
}
dbi._vnc_lib = flexmock(**kwargs)
ret = dbi._resource_count_optimized(resource,
filters={'tenant_id': self._tenant_ids[0]})
self.assertEqual(ret, 1)
ret = dbi._resource_count_optimized(resource,
filters={'tenant_id': self._tenant_ids})
self.assertEqual(ret, 2)
def test_resource_count_optimized(self):
dbi = MockDbInterface()
ret = dbi._resource_count_optimized('virtual-networks',
filters={'f': 'some-filter'})
self.assertEqual(ret, None)
ret = dbi._resource_count_optimized('virtual-networks',
filters={'tenant_id': 'some-id',
'f': 'some_filter'})
self.assertEqual(ret, None)
self._test_for("virtual_networks")
self._test_for("virtual_machine_interfaces")
self._test_for("floating_ips")
self._test_for("logical_routers")
self._test_for("network_policys")
self._test_for("network_ipams")
self._test_for("route_tables")
def test_floating_show_router_id(self):
dbi = MockDbInterface()
vmi_obj = None
def fake_virtual_machine_interface_properties():
return None
def fake_virtual_machine_read(id, fq_name=None, fields=None,
parent_id=None):
if id == 'fip_port_uuid1':
net_uuid = 'match_vn_uuid'
elif id == 'fip_port_uuid2':
net_uuid = 'miss_vn_uuid'
elif id == 'router_port_uuid':
net_uuid = 'match_vn_uuid'
return flexmock(uuid=id,
get_virtual_machine_interface_properties=\
fake_virtual_machine_interface_properties,
get_virtual_network_refs=\
lambda: [{'uuid': net_uuid}])
def fake_virtual_machine_interface_list(*args, **kwargs):
if kwargs.get('obj_uuids', []) == ['router_port_uuid']:
return [flexmock(
uuid='router_port_uuid',
get_virtual_machine_interface_properties=\
fake_virtual_machine_interface_properties,
get_virtual_network_refs=\
lambda: [{'uuid': 'match_vn_uuid'}])]
dbi._vnc_lib = flexmock(
fq_name_to_id=lambda res, name: 'fip_pool_uuid',
virtual_machine_interface_read=fake_virtual_machine_read,
virtual_machine_interfaces_list=fake_virtual_machine_interface_list,
logical_routers_list=lambda parent_id, detail: [
flexmock(uuid='router_uuid',
get_virtual_machine_interface_refs=\
lambda: [{'uuid': 'router_port_uuid'}])])
fip_obj = flexmock(
uuid = 'fip_uuid',
get_fq_name=lambda: ['domain', 'project', 'fip'],
get_project_refs=lambda: [{'uuid': str(uuid.uuid4())}],
get_floating_ip_address=lambda: 'fip_ip',
get_floating_ip_fixed_ip_address= lambda: 'fip_port_ip')
fip_obj.get_virtual_machine_interface_refs = \
lambda: [{'uuid': 'fip_port_uuid1'}]
fip_neutron = dbi._floatingip_vnc_to_neutron(fip_obj)
self.assertEqual(fip_neutron['router_id'], 'router_uuid')
fip_obj.get_virtual_machine_interface_refs = \
lambda: [{'uuid': 'fip_port_uuid2'}]
fip_neutron = dbi._floatingip_vnc_to_neutron(fip_obj)
self.assertIsNone(fip_neutron['router_id'])
def test_default_security_group_delete(self):
dbi = MockDbInterface()
sg_obj = None
delete_called_for = [""]
def _sg_delete(id):
delete_called_for[0] = id
dbi._vnc_lib = flexmock(operational=True,
security_group_read = lambda id: sg_obj,
security_group_delete = _sg_delete)
# sg_delete should be called when sg_name != default
tenant_uuid = str(uuid.uuid4())
sg_uuid = str(uuid.uuid4())
sg_obj = flexmock(operational=True,
name="non-default",
parent_uuid=tenant_uuid)
context = {'tenant_id': tenant_uuid}
dbi.security_group_delete(context, sg_uuid)
self.assertEqual(delete_called_for[0], sg_uuid)
delete_called_for = [""]
sg_obj = flexmock(operational=True,
name="non-default",
parent_uuid=str(uuid.uuid4()))
dbi.security_group_delete(context, sg_uuid)
self.assertEqual(delete_called_for[0], sg_uuid)
delete_called_for = [""]
sg_obj = flexmock(operational=True,
name="default",
parent_uuid=str(uuid.uuid4()))
dbi.security_group_delete(context, sg_uuid)
self.assertEqual(delete_called_for[0], sg_uuid)
with self.assertRaises(Exception):
delete_called_for = [""]
sg_obj = flexmock(operational=True,
name="default",
parent_uuid=tenant_uuid)
dbi.security_group_delete(context, sg_uuid)
| 38.994444
| 87
| 0.564895
|
4fb25145601df643b411f3f209217332d8839ac0
| 545
|
py
|
Python
|
code_all/day17/exercise02.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day17/exercise02.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
code_all/day17/exercise02.py
|
testcg/python
|
4db4bd5d0e44af807d2df80cf8c8980b40cc03c4
|
[
"MIT"
] | null | null | null |
"""
练习2. 定义函数,在列表中找出所有数字
[43,"悟空",True,56,"八戒",87.5,98]
"""
# 适用性
# 函数有一个结果使用return
# 函数有多个结果使用yield
def get_number1(list_number):
result = []
for item in list_number:
if type(item) in (int, float):
result.append(item)
return result
def get_number2(list_number):
for item in list_number:
if type(item) in (int, float):
yield item
list01 = [43, "悟空", True, 56, "八戒", 87.5, 98]
for item in get_number1(list01):
print(item)
for item in get_number2(list01):
print(item)
| 18.166667
| 45
| 0.609174
|
493f73ba9f6eb5c961d5c737636e858f26997227
| 73
|
py
|
Python
|
benchmarks/benchmarks/__init__.py
|
Ennosigaeon/scipy
|
2d872f7cf2098031b9be863ec25e366a550b229c
|
[
"BSD-3-Clause"
] | 9,095
|
2015-01-02T18:24:23.000Z
|
2022-03-31T20:35:31.000Z
|
benchmarks/benchmarks/__init__.py
|
Ennosigaeon/scipy
|
2d872f7cf2098031b9be863ec25e366a550b229c
|
[
"BSD-3-Clause"
] | 11,500
|
2015-01-01T01:15:30.000Z
|
2022-03-31T23:07:35.000Z
|
benchmarks/benchmarks/__init__.py
|
Ennosigaeon/scipy
|
2d872f7cf2098031b9be863ec25e366a550b229c
|
[
"BSD-3-Clause"
] | 5,838
|
2015-01-05T11:56:42.000Z
|
2022-03-31T23:21:19.000Z
|
import numpy as np
import random
np.random.seed(1234)
random.seed(1234)
| 12.166667
| 20
| 0.780822
|
a72573e54f44f9e83ac2254e225bfb3660686da5
| 967
|
py
|
Python
|
exercicios2/ex062j.py
|
LuanGermano/Mundo-2-Curso-em-Video-Python
|
8e860beb1d3b31f1c5ac369767d32b353935b8b0
|
[
"MIT"
] | null | null | null |
exercicios2/ex062j.py
|
LuanGermano/Mundo-2-Curso-em-Video-Python
|
8e860beb1d3b31f1c5ac369767d32b353935b8b0
|
[
"MIT"
] | null | null | null |
exercicios2/ex062j.py
|
LuanGermano/Mundo-2-Curso-em-Video-Python
|
8e860beb1d3b31f1c5ac369767d32b353935b8b0
|
[
"MIT"
] | null | null | null |
termo = int(input('Digite o Termo da PA: '))
razao = int(input('Digite a razão: '))
ulti = int(input('Qual o termo da PA para ser mostrado: ')) # valor de entrada dos termos pra PA.
while ulti != 0:
resp = "n/a"
while resp not in 'SsNn': # acontecimento pra resp errado
resp = str(input('Vc gostaria de ver o caminho até o termo solicitado? [S/N]: ')).upper().strip() # Decisão que vai separar entre sim e não os acontecimentos.
if resp in 'Ss': # caso Sim
for c in range(1, ulti+1):
pa = termo + (c - 1) * razao
print(pa, end=' ',)
print(end='\n')
break
elif resp in 'Nn': # Caso não
pa = termo + (ulti-1) * razao
print(pa, end='\n')
break
print('Por favor Responda se gostaria de ver o termo solicitado')
ulti = int(input('Qual o termo da PA para ser mostrado: '))
print('Encerrando o Programa')
print('=-='*12)
| 43.954545
| 167
| 0.563599
|
eae9f565ee9e731f72609a3c0a8ec66dc0792ab3
| 958
|
py
|
Python
|
src/RealmServer.py
|
Ekkoz/DofusProxy
|
00369a73dc30f613d900348cb013de377fad2c7f
|
[
"MIT"
] | null | null | null |
src/RealmServer.py
|
Ekkoz/DofusProxy
|
00369a73dc30f613d900348cb013de377fad2c7f
|
[
"MIT"
] | null | null | null |
src/RealmServer.py
|
Ekkoz/DofusProxy
|
00369a73dc30f613d900348cb013de377fad2c7f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import socket
from src.RealmLocalClient import RealmLocalClient
from src.Logger import Logger
class RealmServer:
def __init__(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def start(self):
try:
self.socket.bind(("127.0.0.1", 12345))
except socket.error as msg:
Logger.error('[REALM] Bind failed. Error : ' + str(sys.exc_info()))
return (False)
self.socket.listen(16)
Logger.info("[REALM] Listening on port 12345...")
while True:
fd, addr = self.socket.accept()
ip, port = str(addr[0]), str(addr[1])
Logger.info('[REALM] New incomming connection: ' + ip + ':' + port)
newClient = RealmLocalClient(fd, addr, ip, port)
newClient.start()
self.socket.close()
| 29.030303
| 79
| 0.587683
|
19550defe2e955d4f23de0292b622327edad4a30
| 211
|
py
|
Python
|
test/manualAreplTests/longStrings.py
|
manuth/LiveCode
|
4f55e4cf4bf1b230407caf29bd5b7cd263d292ea
|
[
"MIT"
] | 203
|
2018-02-12T00:51:47.000Z
|
2022-03-14T17:28:26.000Z
|
test/manualAreplTests/longStrings.py
|
manuth/LiveCode
|
4f55e4cf4bf1b230407caf29bd5b7cd263d292ea
|
[
"MIT"
] | 404
|
2018-01-17T03:50:36.000Z
|
2022-03-24T23:46:06.000Z
|
test/manualAreplTests/longStrings.py
|
manuth/LiveCode
|
4f55e4cf4bf1b230407caf29bd5b7cd263d292ea
|
[
"MIT"
] | 21
|
2018-07-17T05:55:28.000Z
|
2022-02-21T08:25:45.000Z
|
x = "fe\nfa"
y = "fafafafaf \nfafafa\nfaffafafafaffafafafaffafafafaffafafafaffafafafaffafafafaffafafafaf\nfafafafaffafafafaf"
# first string should show \n
# second string should only go multiline when expanded
| 42.2
| 112
| 0.834123
|
15a5980f8ba9ebdfa87313cfcab3932b365f2d48
| 1,497
|
py
|
Python
|
rlgym/utils/common_values.py
|
pizzalord22/rocket-league-gym
|
c07d237643fdb1264a04147fc9a556bcb786a816
|
[
"Apache-2.0"
] | 63
|
2021-03-07T00:00:09.000Z
|
2022-03-27T16:11:26.000Z
|
rlgym_compat/common_values.py
|
VirxEC/rlgym-compat
|
db7b0a41492dcbb1aaa2dbfa5385dae3b9e10558
|
[
"Apache-2.0"
] | 21
|
2021-03-12T16:02:30.000Z
|
2022-03-29T01:59:32.000Z
|
rlgym_compat/common_values.py
|
VirxEC/rlgym-compat
|
db7b0a41492dcbb1aaa2dbfa5385dae3b9e10558
|
[
"Apache-2.0"
] | 23
|
2021-03-12T16:04:35.000Z
|
2022-03-28T15:49:24.000Z
|
SIDE_WALL_X = 4096 # +/-
BACK_WALL_Y = 5120 # +/-
CEILING_Z = 2044
BACK_NET_Y = 6000 # +/-
GOAL_HEIGHT = 642.775
ORANGE_GOAL_CENTER = (0, BACK_WALL_Y, GOAL_HEIGHT / 2)
BLUE_GOAL_CENTER = (0, -BACK_WALL_Y, GOAL_HEIGHT / 2)
# Often more useful than center
ORANGE_GOAL_BACK = (0, BACK_NET_Y, GOAL_HEIGHT / 2)
BLUE_GOAL_BACK = (0, -BACK_NET_Y, GOAL_HEIGHT / 2)
BALL_RADIUS = 92.75
BALL_MAX_SPEED = 6000
CAR_MAX_SPEED = 2300
SUPERSONIC_THRESHOLD = 2200
CAR_MAX_ANG_VEL = 5.5
BLUE_TEAM = 0
ORANGE_TEAM = 1
NUM_ACTIONS = 8
BOOST_LOCATIONS = (
(0.0, -4240.0, 70.0),
(-1792.0, -4184.0, 70.0),
(1792.0, -4184.0, 70.0),
(-3072.0, -4096.0, 73.0),
(3072.0, -4096.0, 73.0),
(- 940.0, -3308.0, 70.0),
(940.0, -3308.0, 70.0),
(0.0, -2816.0, 70.0),
(-3584.0, -2484.0, 70.0),
(3584.0, -2484.0, 70.0),
(-1788.0, -2300.0, 70.0),
(1788.0, -2300.0, 70.0),
(-2048.0, -1036.0, 70.0),
(0.0, -1024.0, 70.0),
(2048.0, -1036.0, 70.0),
(-3584.0, 0.0, 73.0),
(-1024.0, 0.0, 70.0),
(1024.0, 0.0, 70.0),
(3584.0, 0.0, 73.0),
(-2048.0, 1036.0, 70.0),
(0.0, 1024.0, 70.0),
(2048.0, 1036.0, 70.0),
(-1788.0, 2300.0, 70.0),
(1788.0, 2300.0, 70.0),
(-3584.0, 2484.0, 70.0),
(3584.0, 2484.0, 70.0),
(0.0, 2816.0, 70.0),
(- 940.0, 3310.0, 70.0),
(940.0, 3308.0, 70.0),
(-3072.0, 4096.0, 73.0),
(3072.0, 4096.0, 73.0),
(-1792.0, 4184.0, 70.0),
(1792.0, 4184.0, 70.0),
(0.0, 4240.0, 70.0),
)
| 24.145161
| 54
| 0.537742
|
5d1e5c990fed71e7555b03d4bd2c4a4c7425548e
| 5,345
|
py
|
Python
|
test/test_package_identification_python.py
|
esteve/colcon-core
|
c11ce9cb27482dac85f05b50b3cd2c1b459ae6b3
|
[
"Apache-2.0"
] | null | null | null |
test/test_package_identification_python.py
|
esteve/colcon-core
|
c11ce9cb27482dac85f05b50b3cd2c1b459ae6b3
|
[
"Apache-2.0"
] | 13
|
2020-04-02T21:11:56.000Z
|
2022-01-27T05:47:30.000Z
|
test/test_package_identification_python.py
|
esteve/colcon-core
|
c11ce9cb27482dac85f05b50b3cd2c1b459ae6b3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2018 Dirk Thomas
# Licensed under the Apache License, Version 2.0
from pathlib import Path
from tempfile import TemporaryDirectory
from colcon_core.package_augmentation.python \
import create_dependency_descriptor
from colcon_core.package_augmentation.python \
import PythonPackageAugmentation
from colcon_core.package_descriptor import PackageDescriptor
from colcon_core.package_identification.python \
import PythonPackageIdentification
import pytest
def test_identify():
extension = PythonPackageIdentification()
augmentation_extension = PythonPackageAugmentation()
with TemporaryDirectory(prefix='test_colcon_') as basepath:
desc = PackageDescriptor(basepath)
desc.type = 'other'
assert extension.identify(desc) is None
assert desc.name is None
desc.type = None
assert extension.identify(desc) is None
assert desc.name is None
assert desc.type is None
basepath = Path(basepath)
(basepath / 'setup.py').write_text('setup()')
assert extension.identify(desc) is None
assert desc.name is None
assert desc.type is None
(basepath / 'setup.cfg').write_text('')
assert extension.identify(desc) is None
assert desc.name is None
assert desc.type is None
(basepath / 'setup.cfg').write_text(
'[metadata]\n'
'name = pkg-name\n')
assert extension.identify(desc) is None
assert desc.name == 'pkg-name'
assert desc.type == 'python'
assert not desc.dependencies
assert not desc.metadata
augmentation_extension.augment_package(desc)
assert set(desc.dependencies.keys()) == {'build', 'run', 'test'}
assert not desc.dependencies['build']
assert not desc.dependencies['run']
assert not desc.dependencies['test']
desc = PackageDescriptor(basepath)
desc.name = 'other-name'
with pytest.raises(RuntimeError) as e:
extension.identify(desc)
assert str(e.value).endswith(
'Package name already set to different value')
(basepath / 'setup.cfg').write_text(
'[metadata]\n'
'name = other-name\n'
'[options]\n'
'setup_requires =\n'
" build; sys_platform != 'win32'\n"
" build-windows; sys_platform == 'win32'\n"
'install_requires =\n'
' runA > 1.2.3\n'
' runB\n'
'tests_require = test == 2.0.0\n'
'zip_safe = false\n'
'[options.extras_require]\n'
'test = test2 == 3.0.0\n'
'tests = test3\n'
'testing = test4\n'
'other = not-test\n')
assert extension.identify(desc) is None
assert desc.name == 'other-name'
assert desc.type == 'python'
assert not desc.dependencies
assert not desc.metadata
augmentation_extension.augment_package(desc)
assert set(desc.dependencies.keys()) == {'build', 'run', 'test'}
assert desc.dependencies['build'] == {'build', 'build-windows'}
assert desc.dependencies['run'] == {'runA', 'runB'}
dep = next(x for x in desc.dependencies['run'] if x == 'runA')
assert dep.metadata['version_gt'] == '1.2.3'
assert desc.dependencies['test'] == {'test', 'test2', 'test3', 'test4'}
assert callable(desc.metadata['get_python_setup_options'])
options = desc.metadata['get_python_setup_options'](None)
assert 'zip_safe' in options
def test_create_dependency_descriptor():
eq_str = 'pkgname==2.2.0'
dep = create_dependency_descriptor(eq_str)
assert dep.metadata['version_eq'] == '2.2.0'
lt_str = 'pkgname<2.3.0'
dep = create_dependency_descriptor(lt_str)
assert dep.metadata['version_lt'] == '2.3.0'
lte_str = 'pkgname<=2.2.0'
dep = create_dependency_descriptor(lte_str)
assert dep.metadata['version_lte'] == '2.2.0'
gt_str = 'pkgname>2.3.0'
dep = create_dependency_descriptor(gt_str)
assert dep.metadata['version_gt'] == '2.3.0'
gte_str = 'pkgname>=2.2.0'
dep = create_dependency_descriptor(gte_str)
assert dep.metadata['version_gte'] == '2.2.0'
neq_str = 'pkgname!=1.2.1'
dep = create_dependency_descriptor(neq_str)
assert dep.metadata['version_neq'] == '1.2.1'
compat_str = 'pkgname~=1.4.1a4'
dep = create_dependency_descriptor(compat_str)
assert dep.metadata['version_gte'] == '1.4.1a4'
assert dep.metadata['version_lt'] == '1.5'
compat_str = 'pkgname~=1.4.1'
dep = create_dependency_descriptor(compat_str)
assert dep.metadata['version_gte'] == '1.4.1'
assert dep.metadata['version_lt'] == '1.5'
compat_str = 'pkgname~=1.4.1.4'
dep = create_dependency_descriptor(compat_str)
assert dep.metadata['version_gte'] == '1.4.1.4'
assert dep.metadata['version_lt'] == '1.4.2'
compat_str = 'pkgname~=11.12'
dep = create_dependency_descriptor(compat_str)
assert dep.metadata['version_gte'] == '11.12'
assert dep.metadata['version_lt'] == '12.0'
multi_str = 'pkgname<=3.2.0, >=2.2.0'
dep = create_dependency_descriptor(multi_str)
assert dep.metadata['version_gte'] == '2.2.0'
assert dep.metadata['version_lte'] == '3.2.0'
| 35.633333
| 79
| 0.635547
|
2ea6de1cec12fec6e7a6e31bcc5825edb87dbe64
| 7,174
|
py
|
Python
|
raisimGym/helper/env_helper.py
|
Pala-hmd/drl_mi_quadcopter
|
62836ebd78ec4df7af2309f0a8457023dd78daa0
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
raisimGym/helper/env_helper.py
|
Pala-hmd/drl_mi_quadcopter
|
62836ebd78ec4df7af2309f0a8457023dd78daa0
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
raisimGym/helper/env_helper.py
|
Pala-hmd/drl_mi_quadcopter
|
62836ebd78ec4df7af2309f0a8457023dd78daa0
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
import torch
import numpy as np
import os
import ntpath
from shutil import copyfile
from raisimGymTorch.env.RaisimGymVecEnv import RunningMeanStd
class helper:
def __init__(self, env, num_obs, normalize_ob=True, update_mean=True, scale_action=True, clip_action=False,
scale_obs_rms=False):
self.num_envs = env.num_envs
self.num_obs = num_obs
self.obs_rms = RunningMeanStd(shape=[self.num_envs, self.num_obs])
self.clip_obs = env.clip_obs
self.normalize_ob = normalize_ob
self.update_mean = update_mean
self.scale_action = scale_action
self.clip_action = clip_action
self.scale_obs_rms = scale_obs_rms
# action scaling
def limit_action(self, actions):
if self.clip_action:
return np.clip(actions.cpu().detach().numpy(), -1, 1)
elif self.scale_action:
for i in range(0, len(actions)):
min = torch.min(actions[i][:])
max = torch.max(actions[i][:])
if torch.abs(min) > 1 or torch.abs(max) > 1:
if torch.abs(min) < torch.abs(max):
actions[i][:] /= torch.abs(max)
else:
actions[i][:] /= torch.abs(min)
return actions.cpu().detach().numpy()
else:
return actions.cpu().detach().numpy()
""" works as an environment wrapper, uses methods of env to normalize the observation and update the RMS.
when to use: If the observation vector of env.observe has more entries than the actual neural network input.
The respective method of the env object normalizes also the additional entries of the observation vector
passed from the environment (defined in Environment.hpp)"""
def normalize_observation(self, observation):
if self.normalize_ob == True:
if self.update_mean:
# update observation scaling based on the parallel algorithm
self.obs_rms.update(observation)
""" if scale_obs_rms = True:
observation RMS will be scaled which has the effect of projecting the target point further away or closer
to the agent with a scaling factor of (0.2 + norm(pos)/10), since the agent is trained on target
points which are 10 m away."""
obs_rms_var = self.obs_rms.var.copy()
if self.scale_obs_rms:
if len(observation) > 1:
for i in range(len(obs_rms_var)):
obs_rms_var[i][0:3] = self.obs_rms.var[i][0:3] * (0.2+(np.linalg.norm(observation[i][0:3])/10)*0.8)
observation_norm = np.clip((observation - self.obs_rms.mean) / np.sqrt(obs_rms_var + 1e-8), - self.clip_obs,
self.clip_obs)
else:
obs_rms_var[0:3] = self.obs_rms.var[0:3] * (0.2+(np.linalg.norm(observation[0:3])/10)*0.8)
observation_norm = np.clip((observation - self.obs_rms.mean[0]) / np.sqrt(obs_rms_var[0] + 1e-8), - self.clip_obs,
self.clip_obs)
else:
observation_norm = np.clip((observation - self.obs_rms.mean) / np.sqrt(obs_rms_var + 1e-8), - self.clip_obs,
self.clip_obs)
return observation_norm
else:
return observation
# save and load observation scaling
def save_scaling(self, dir_name, iteration):
mean_file_name = dir_name + "/mean" + iteration + ".csv"
var_file_name = dir_name + "/var" + iteration + ".csv"
np.savetxt(mean_file_name, self.obs_rms.mean)
np.savetxt(var_file_name, self.obs_rms.var)
def load_scaling(self, weight_dir, iteration, count=1e5):
dir_name = weight_dir.rsplit('/', 1)[0]
mean_file_name = dir_name + "/mean" + str(iteration) + ".csv"
var_file_name = dir_name + "/var" + str(iteration) + ".csv"
self.obs_rms.count = count
self.obs_rms.mean = np.loadtxt(mean_file_name, dtype=np.float32)
self.obs_rms.var = np.loadtxt(var_file_name, dtype=np.float32)
# load the neural network model parameters and save setup config of pre-trained model
def load_param(self, weight_path, actor, critic, learner, data_dir, file_name, save_items=True):
if weight_path == "":
raise Exception("\nCan't find the pre-trained weight, please provide a pre-trained weight with --weight switch\n")
print("\nRetraining from the checkpoint:", weight_path+"\n")
iteration_number = weight_path.rsplit('/', 1)[1].split('_', 1)[1].rsplit('.', 1)[0]
weight_dir = weight_path.rsplit('/', 1)[0] + '/'
mean_csv_path = weight_dir + 'mean' + iteration_number + '.csv'
var_csv_path = weight_dir + 'var' + iteration_number + '.csv'
items_to_save = [weight_path, mean_csv_path, var_csv_path, weight_dir +file_name + "cfg.yaml", weight_dir +
"Environment.hpp"]
if items_to_save is not None and save_items:
pretrained_data_dir = data_dir + '/pretrained_' + weight_path.rsplit('/', 1)[0].rsplit('/', 1)[1]
os.makedirs(pretrained_data_dir)
for item_to_save in items_to_save:
copyfile(item_to_save, pretrained_data_dir+'/'+item_to_save.rsplit('/', 1)[1])
# load actor and critic parameters from full checkpoint
checkpoint = torch.load(weight_path)
actor.architecture.load_state_dict(checkpoint['actor_architecture_state_dict'])
actor.distribution.load_state_dict(checkpoint['actor_distribution_state_dict'])
critic.architecture.load_state_dict(checkpoint['critic_architecture_state_dict'])
#learner.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#learner.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
""" Only useful in combination with DAgger: A method to prevent learning from failed environments and restoring
from the last checkpoint"""
def restart_from_last_checkpoint(self, env, saver, actor, critic, learner, update_num):
# Reset update number
update_modulo = update_num % 10
# Reset learner params
learner.storage.clear()
learner.beta += update_modulo * learner.beta_scheduler
learner.scheduler.step(epoch=(update_num-update_modulo)*learner.num_learning_epochs*learner.num_mini_batches)
learner.beta += learner.beta_scheduler
# Set new environment target
for i in range(10 - update_modulo - 1):
env.reset()
# Restore weights from last checkpoint
weight_path = saver.data_dir + "/full_" + str(update_num - update_modulo) + '.pt'
self.load_param(weight_path, actor, critic, learner, saver.data_dir, 'dagger', False)
return update_modulo + 1
def identify_failed_envs(self, dones) -> object:
failed_envs = np.where(dones == 1)
index = list(dict.fromkeys(failed_envs[1].tolist()))
return len(index), index
| 48.802721
| 134
| 0.627265
|
d61076db57af678bdbcb76125eb263995408f600
| 2,947
|
py
|
Python
|
PrePorcessor/Preprocessor.py
|
Skyho1d/covid
|
f21b0c30be1144f667e69d582f968abe6b4e360f
|
[
"Apache-2.0"
] | 12
|
2020-03-23T18:26:45.000Z
|
2021-03-19T08:40:57.000Z
|
PrePorcessor/Preprocessor.py
|
Skyho1d/covid
|
f21b0c30be1144f667e69d582f968abe6b4e360f
|
[
"Apache-2.0"
] | 2
|
2021-05-21T16:18:56.000Z
|
2022-02-10T01:16:50.000Z
|
PrePorcessor/Preprocessor.py
|
Skyho1d/covid
|
f21b0c30be1144f667e69d582f968abe6b4e360f
|
[
"Apache-2.0"
] | 3
|
2020-03-24T13:23:57.000Z
|
2020-04-07T12:18:59.000Z
|
import cv2
import numpy as np
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
self.width = width
self.height = height
self.inter = inter
def preprocess(self, input_image):
try:
# if in train time with 100 epoch and size 50 get bad result remove all this line und just resize , convert all dataset befor train network
# input_image = cv2.imread(image_paths)
# Type = image_paths.split(os.path.sep)[-2]
# image_blur = cv2.GaussianBlur(input_image, (7, 7), 0)
# image_blur_hsv = cv2.cvtColor(image_blur, cv2.COLOR_RGB2HSV)
# min_red = np.array([80, 60, 140])
# max_red = np.array([255, 255, 255])
# image_red1 = cv2.inRange(image_blur_hsv, min_red, max_red)
# big_contour, mask = self.find_biggest_contour(image_red1)
# (x, y), radius = cv2.minEnclosingCircle(big_contour)
# center = (int(x), int(y))
# radius = int(radius)
# imCircle = input_image.copy()
# cv2.circle(imCircle, center, radius, (0, 255, 0), 1)
# height, width, channels = imCircle.shape
# border = [0, 0, 0, 0]
# if center[0] + radius > width:
# extera = (center[0] + radius) - width
# border[3] = extera + 1
#
# if (center[0] - radius < 0):
# extera = width - (center[0] + radius)
# border[2] = extera + 1
#
# if center[1] + radius > height:
# extera = (center[1] + radius) - height
# border[1] = extera + 1
#
# if center[1] + radius < 0:
# extera = height - (center[1] + radius)
# border[0] = extera + 1
#
# y = center[1] - radius
# if y < 0:
# y = 0
# y2 = center[1] + radius
# x = center[0] - radius
# if x < 0:
# x = 0
#
# x2 = center[0] + radius
#
# cropped_image = input_image[y:y2, x:x2]
return cv2.resize(input_image, (self.width, self.height),
interpolation=self.inter)
except Exception as a:
print("preprocessor", a)
def find_biggest_contour(self, image):
image = image.copy()
s, contours, hierarchy = cv2.findContours(image, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
biggest_contour = max(contours, key=cv2.contourArea)
mask = np.zeros(image.shape, np.uint8)
cv2.drawContours(mask, [biggest_contour], -1, 255, -1)
return biggest_contour, mask
def overlay_mask(self, mask, image):
rgb_mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2RGB)
img = cv2.addWeighted(rgb_mask, 0.5, image, 0.5, 0)
# show(img)
| 39.824324
| 151
| 0.519172
|
b98b76b89b983f67fd15ae383803f47a76dcc15a
| 2,219
|
py
|
Python
|
tests/integration_tests/data_steward/utils/sandbox_test.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 16
|
2017-06-30T20:05:05.000Z
|
2022-03-08T21:03:19.000Z
|
tests/integration_tests/data_steward/utils/sandbox_test.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 342
|
2017-06-23T21:37:40.000Z
|
2022-03-30T16:44:16.000Z
|
tests/integration_tests/data_steward/utils/sandbox_test.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 33
|
2017-07-01T00:12:20.000Z
|
2022-01-26T18:06:53.000Z
|
# Python imports
import os
import unittest
import app_identity
# Project Imports
from utils import sandbox
from utils.bq import get_client
class SandboxTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('**************************************************************')
print(cls.__name__)
print('**************************************************************')
def setUp(self):
self.project_id = app_identity.get_application_id()
self.dataset_id = os.environ.get('UNIONED_DATASET_ID')
self.sandbox_id = sandbox.get_sandbox_dataset_id(self.dataset_id)
self.fq_sandbox_id = f'{self.project_id}.{self.sandbox_id}'
# Removing any existing datasets that might interfere with the test
self.client = get_client(self.project_id)
self.client.delete_dataset(self.fq_sandbox_id,
delete_contents=True,
not_found_ok=True)
def test_create_sandbox_dataset(self):
# pre-conditions
pre_test_datasets_obj = list(self.client.list_datasets(self.project_id))
pre_test_datasets = [d.dataset_id for d in pre_test_datasets_obj]
# Create sandbox dataset
sandbox_dataset = sandbox.create_sandbox_dataset(
self.project_id, self.dataset_id)
# Post condition checks
post_test_datasets_obj = list(self.client.list_datasets(
self.project_id))
post_test_datasets = [d.dataset_id for d in post_test_datasets_obj]
# make sure the dataset didn't already exist
self.assertTrue(sandbox_dataset not in pre_test_datasets)
# make sure it was actually created
self.assertTrue(sandbox_dataset in post_test_datasets)
# Try to create same sandbox, which now already exists
self.assertRaises(RuntimeError, sandbox.create_sandbox_dataset,
self.project_id, self.dataset_id)
def tearDown(self):
# Remove fake dataset created in project
self.client.delete_dataset(self.fq_sandbox_id,
delete_contents=True,
not_found_ok=True)
| 38.258621
| 80
| 0.621902
|
e721be15faf05bd74e4b3ab092e7c5295fc04a4f
| 10,070
|
py
|
Python
|
src/oci/opsi/models/summarize_host_insight_resource_statistics_aggregation_collection.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/opsi/models/summarize_host_insight_resource_statistics_aggregation_collection.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/opsi/models/summarize_host_insight_resource_statistics_aggregation_collection.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SummarizeHostInsightResourceStatisticsAggregationCollection(object):
"""
Returns list of hosts with resource statistics like usage, capacity, utilization, usage change percent and load.
"""
#: A constant which can be used with the resource_metric property of a SummarizeHostInsightResourceStatisticsAggregationCollection.
#: This constant has a value of "CPU"
RESOURCE_METRIC_CPU = "CPU"
#: A constant which can be used with the resource_metric property of a SummarizeHostInsightResourceStatisticsAggregationCollection.
#: This constant has a value of "MEMORY"
RESOURCE_METRIC_MEMORY = "MEMORY"
#: A constant which can be used with the resource_metric property of a SummarizeHostInsightResourceStatisticsAggregationCollection.
#: This constant has a value of "LOGICAL_MEMORY"
RESOURCE_METRIC_LOGICAL_MEMORY = "LOGICAL_MEMORY"
#: A constant which can be used with the usage_unit property of a SummarizeHostInsightResourceStatisticsAggregationCollection.
#: This constant has a value of "CORES"
USAGE_UNIT_CORES = "CORES"
#: A constant which can be used with the usage_unit property of a SummarizeHostInsightResourceStatisticsAggregationCollection.
#: This constant has a value of "GB"
USAGE_UNIT_GB = "GB"
#: A constant which can be used with the usage_unit property of a SummarizeHostInsightResourceStatisticsAggregationCollection.
#: This constant has a value of "MBPS"
USAGE_UNIT_MBPS = "MBPS"
#: A constant which can be used with the usage_unit property of a SummarizeHostInsightResourceStatisticsAggregationCollection.
#: This constant has a value of "PERCENT"
USAGE_UNIT_PERCENT = "PERCENT"
def __init__(self, **kwargs):
"""
Initializes a new SummarizeHostInsightResourceStatisticsAggregationCollection object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param time_interval_start:
The value to assign to the time_interval_start property of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type time_interval_start: datetime
:param time_interval_end:
The value to assign to the time_interval_end property of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type time_interval_end: datetime
:param resource_metric:
The value to assign to the resource_metric property of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Allowed values for this property are: "CPU", "MEMORY", "LOGICAL_MEMORY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type resource_metric: str
:param usage_unit:
The value to assign to the usage_unit property of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Allowed values for this property are: "CORES", "GB", "MBPS", "PERCENT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type usage_unit: str
:param items:
The value to assign to the items property of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type items: list[oci.opsi.models.HostInsightResourceStatisticsAggregation]
"""
self.swagger_types = {
'time_interval_start': 'datetime',
'time_interval_end': 'datetime',
'resource_metric': 'str',
'usage_unit': 'str',
'items': 'list[HostInsightResourceStatisticsAggregation]'
}
self.attribute_map = {
'time_interval_start': 'timeIntervalStart',
'time_interval_end': 'timeIntervalEnd',
'resource_metric': 'resourceMetric',
'usage_unit': 'usageUnit',
'items': 'items'
}
self._time_interval_start = None
self._time_interval_end = None
self._resource_metric = None
self._usage_unit = None
self._items = None
@property
def time_interval_start(self):
"""
**[Required]** Gets the time_interval_start of this SummarizeHostInsightResourceStatisticsAggregationCollection.
The start timestamp that was passed into the request.
:return: The time_interval_start of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:rtype: datetime
"""
return self._time_interval_start
@time_interval_start.setter
def time_interval_start(self, time_interval_start):
"""
Sets the time_interval_start of this SummarizeHostInsightResourceStatisticsAggregationCollection.
The start timestamp that was passed into the request.
:param time_interval_start: The time_interval_start of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type: datetime
"""
self._time_interval_start = time_interval_start
@property
def time_interval_end(self):
"""
**[Required]** Gets the time_interval_end of this SummarizeHostInsightResourceStatisticsAggregationCollection.
The end timestamp that was passed into the request.
:return: The time_interval_end of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:rtype: datetime
"""
return self._time_interval_end
@time_interval_end.setter
def time_interval_end(self, time_interval_end):
"""
Sets the time_interval_end of this SummarizeHostInsightResourceStatisticsAggregationCollection.
The end timestamp that was passed into the request.
:param time_interval_end: The time_interval_end of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type: datetime
"""
self._time_interval_end = time_interval_end
@property
def resource_metric(self):
"""
**[Required]** Gets the resource_metric of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Defines the type of resource metric (CPU, Physical Memory, Logical Memory)
Allowed values for this property are: "CPU", "MEMORY", "LOGICAL_MEMORY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The resource_metric of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:rtype: str
"""
return self._resource_metric
@resource_metric.setter
def resource_metric(self, resource_metric):
"""
Sets the resource_metric of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Defines the type of resource metric (CPU, Physical Memory, Logical Memory)
:param resource_metric: The resource_metric of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type: str
"""
allowed_values = ["CPU", "MEMORY", "LOGICAL_MEMORY"]
if not value_allowed_none_or_none_sentinel(resource_metric, allowed_values):
resource_metric = 'UNKNOWN_ENUM_VALUE'
self._resource_metric = resource_metric
@property
def usage_unit(self):
"""
**[Required]** Gets the usage_unit of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Displays usage unit.
Allowed values for this property are: "CORES", "GB", "MBPS", "PERCENT", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The usage_unit of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:rtype: str
"""
return self._usage_unit
@usage_unit.setter
def usage_unit(self, usage_unit):
"""
Sets the usage_unit of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Displays usage unit.
:param usage_unit: The usage_unit of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type: str
"""
allowed_values = ["CORES", "GB", "MBPS", "PERCENT"]
if not value_allowed_none_or_none_sentinel(usage_unit, allowed_values):
usage_unit = 'UNKNOWN_ENUM_VALUE'
self._usage_unit = usage_unit
@property
def items(self):
"""
**[Required]** Gets the items of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Collection of Resource Statistics items
:return: The items of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:rtype: list[oci.opsi.models.HostInsightResourceStatisticsAggregation]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this SummarizeHostInsightResourceStatisticsAggregationCollection.
Collection of Resource Statistics items
:param items: The items of this SummarizeHostInsightResourceStatisticsAggregationCollection.
:type: list[oci.opsi.models.HostInsightResourceStatisticsAggregation]
"""
self._items = items
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 42.133891
| 245
| 0.719762
|
327a50741387f3a31cef74d7811b9450a135ab2d
| 253
|
py
|
Python
|
benefactor_challenge/benefactor_challenge/doctype/company/company.py
|
IRANKUND/Benefactor-challenge
|
78c364fcd9e776f50319e2de4eaf269d64a6508f
|
[
"MIT"
] | null | null | null |
benefactor_challenge/benefactor_challenge/doctype/company/company.py
|
IRANKUND/Benefactor-challenge
|
78c364fcd9e776f50319e2de4eaf269d64a6508f
|
[
"MIT"
] | null | null | null |
benefactor_challenge/benefactor_challenge/doctype/company/company.py
|
IRANKUND/Benefactor-challenge
|
78c364fcd9e776f50319e2de4eaf269d64a6508f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2021, patrick and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class company(Document):
pass
| 23
| 49
| 0.770751
|
5b58bb9c22522b79d92f3dde69f41f830a00ff52
| 100
|
py
|
Python
|
Django_UCSD/Registration/apps.py
|
yotohoshi/UCSDweb_design
|
eb6e9b61b78b33456c083bb3cb910b00fd527665
|
[
"MIT"
] | null | null | null |
Django_UCSD/Registration/apps.py
|
yotohoshi/UCSDweb_design
|
eb6e9b61b78b33456c083bb3cb910b00fd527665
|
[
"MIT"
] | null | null | null |
Django_UCSD/Registration/apps.py
|
yotohoshi/UCSDweb_design
|
eb6e9b61b78b33456c083bb3cb910b00fd527665
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class RegistrationConfig(AppConfig):
name = 'Registration'
| 14.285714
| 36
| 0.77
|
179a869460810b07be8b67bc8f5f0c55b7cd87ba
| 1,859
|
py
|
Python
|
ch10/1003_replace_nested_conditional_with_guard_clauses.py
|
twotwo/refactoring-python
|
d329601b963c80bf1c16cd36b39048cbed3e2e06
|
[
"MIT"
] | null | null | null |
ch10/1003_replace_nested_conditional_with_guard_clauses.py
|
twotwo/refactoring-python
|
d329601b963c80bf1c16cd36b39048cbed3e2e06
|
[
"MIT"
] | null | null | null |
ch10/1003_replace_nested_conditional_with_guard_clauses.py
|
twotwo/refactoring-python
|
d329601b963c80bf1c16cd36b39048cbed3e2e06
|
[
"MIT"
] | null | null | null |
"""
1003_replace_nested_conditional_with_guard_clauses.py
范例 将条件反转
我们常常可以将条件表达式反转,从而实现以卫语句取代嵌套条件表达式
"""
from dataclasses import dataclass
@dataclass
class Instrument:
capital: int
income: int
duration: int
interest_rate: float
adjustment_factor: float
def adjusted_capital(instrument):
result = 0
if instrument.capital > 0:
if instrument.interest_rate > 0 and instrument.duration > 0:
result = (instrument.income / instrument.duration) * instrument.adjustment_factor
return result
def adjusted_capital_stage1(instrument):
result = 0
if instrument.capital <= 0: # reverse 1
return result
if not (instrument.interest_rate > 0 and instrument.duration > 0): # reverse 2
return result
result = (instrument.income / instrument.duration) * instrument.adjustment_factor
return result
def adjusted_capital_stage2(instrument):
result = 0
if instrument.capital <= 0:
return result
if instrument.interest_rate <= 0 or instrument.duration <= 0: # elimination of not logic
return result
result = (instrument.income / instrument.duration) * instrument.adjustment_factor
return result
def adjusted_capital_stage3(instrument):
if (
instrument.capital <= 0 or instrument.interest_rate <= 0 or instrument.duration <= 0
): # 10.2 Consolidate Conditional Expression
return 0
return (instrument.income / instrument.duration) * instrument.adjustment_factor
if __name__ == "__main__":
instrument = Instrument(capital=100, income=10, duration=10, interest_rate=0.2, adjustment_factor=0.8)
print("origin", adjusted_capital(instrument))
print("stage1", adjusted_capital_stage1(instrument))
print("stage2", adjusted_capital_stage2(instrument))
print("stage3", adjusted_capital_stage3(instrument))
| 29.983871
| 106
| 0.723507
|
f8e0e7d8c1df0d0f8d57a2f6a72a793547108e5c
| 9,250
|
py
|
Python
|
utils/rule_dir_stats.py
|
deperrone/content
|
caaff27f01a1d6c15da461f9fafe26090e8fdd18
|
[
"BSD-3-Clause"
] | 1,138
|
2018-09-05T06:31:44.000Z
|
2022-03-31T03:38:24.000Z
|
utils/rule_dir_stats.py
|
deperrone/content
|
caaff27f01a1d6c15da461f9fafe26090e8fdd18
|
[
"BSD-3-Clause"
] | 4,743
|
2018-09-04T15:14:04.000Z
|
2022-03-31T23:17:57.000Z
|
utils/rule_dir_stats.py
|
deperrone/content
|
caaff27f01a1d6c15da461f9fafe26090e8fdd18
|
[
"BSD-3-Clause"
] | 400
|
2018-09-08T20:08:49.000Z
|
2022-03-30T20:54:32.000Z
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import sys
import json
import pprint
import ssg.build_yaml
import ssg.build_remediations
import ssg.products
import ssg.rule_dir_stats as rds
import ssg.rules
import ssg.yaml
SSG_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, action="store", default="build/rule_dirs.json",
help="File to read json output of rule_dir_json from (defaults to build/rule_dirs.json)")
parser.add_argument("-p", "--products", type=str, action="store", default="all",
help="Products to inquire about, as a comma separated list")
parser.add_argument("-t", "--strict", action="store_true",
help="Enforce strict --products checking against rule.yml prodtype only")
parser.add_argument("-q", "--query", type=str, action="store", default=None,
help="Limit actions to only act on a comma separated list of rule_ids")
parser.add_argument("-m", "--missing", action="store_true",
help="List rules which are missing OVALs or fixes")
parser.add_argument("-2", "--two-plus", action="store_true",
help="List rules which have two or more OVALs or fixes")
parser.add_argument("-r", "--prodtypes", action="store_true",
help="List rules which have different YAML prodtypes from checks+fix prodtypes")
parser.add_argument("-n", "--product-names", action="store_true",
help="List rules which have product specific objects with broader accepted products")
parser.add_argument("-?", "--introspect", action="store_true",
help="Dump raw objects for explicitly queried rule_ids")
parser.add_argument("-u", "--unassociated", action="store_true",
help="Search for rules without any product association")
parser.add_argument("-o", "--ovals-only", action="store_true",
help="Only output information about OVALs")
parser.add_argument("-f", "--fixes-only", action="store_true",
help="Only output information about fixes")
parser.add_argument("-s", "--summary-only", action="store_true",
help="Only output summary information")
return parser.parse_args()
def process_missing(args, known_rules):
result = rds.walk_rules_stats(args, known_rules, rds.missing_oval, rds.missing_remediation)
affected_rules = result[0]
affected_ovals = result[1]
affected_remediations = result[3]
affected_remediations_type = result[4]
verbose_output = result[5]
if not args.summary_only:
print("Missing Objects Specifics:")
for line in verbose_output:
print(line)
print("\n")
print("Missing Objects Summary:")
print("Total affected rules: %d" % affected_rules)
if not args.fixes_only:
print("Rules with no OVALs: %d / %d" % (affected_ovals, affected_rules))
if not args.ovals_only:
print("Rules without any remediations: %d / %d" % (affected_remediations, affected_rules))
for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP:
r_missing = affected_remediations_type[r_type]
print("Rules with no %s remediations: %d / %d" % (r_type, r_missing, affected_rules))
print("\n")
def process_two_plus(args, known_rules):
result = rds.walk_rules_stats(args, known_rules, rds.two_plus_oval, rds.two_plus_remediation)
affected_rules = result[0]
affected_ovals = result[1]
affected_remediations = result[2]
affected_remediations_type = result[4]
verbose_output = result[5]
if not args.summary_only:
print("Two Plus Object Specifics:")
for line in verbose_output:
print(line)
print("\n")
print("Two Plus Objects Summary:")
print("Total affected rules: %d" % affected_rules)
if not args.fixes_only:
print("Rules with two or more OVALs: %d / %d" % (affected_ovals, affected_rules))
if not args.ovals_only:
print("Rules with two or more remediations: %d / %d" % (affected_remediations, affected_rules))
for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP:
r_missing = affected_remediations_type[r_type]
print("Rules with two or more %s remediations: %d / %d" % (r_type, r_missing, affected_rules))
print("\n")
def process_prodtypes(args, known_rules):
result = rds.walk_rules_stats(args, known_rules, rds.prodtypes_oval, rds.prodtypes_remediation)
affected_rules = result[0]
affected_ovals = result[1]
affected_remediations = result[2]
affected_remediations_type = result[4]
verbose_output = result[5]
if not args.summary_only:
print("Prodtypes Object Specifics:")
for line in verbose_output:
print(line)
print("\n")
print("Prodtypes Objects Summary:")
print("Total affected rules: %d" % affected_rules)
if not args.fixes_only:
print("Rules with differing prodtypes between YAML and OVALs: %d / %d" % (affected_ovals, affected_rules))
if not args.ovals_only:
print("Rules with differing prodtypes between YAML and remediations: %d / %d" % (affected_remediations, affected_rules))
for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP:
r_missing = affected_remediations_type[r_type]
print("Rules with differing prodtypes between YAML and %s remediations: %d / %d" % (r_type, r_missing, affected_rules))
print("\n")
def process_product_names(args, known_rules):
result = rds.walk_rules_stats(args, known_rules, rds.product_names_oval, rds.product_names_remediation)
affected_rules = result[0]
affected_ovals = result[1]
affected_remediations = result[2]
affected_remediations_type = result[4]
verbose_output = result[5]
if not args.summary_only:
print("Product Names Specifics:")
for line in verbose_output:
print(line)
print("\n")
print("Product Names Summary:")
print("Total affected rules: %d" % affected_rules)
if not args.fixes_only:
print("Rules with differing products and OVAL names: %d / %d" % (affected_ovals, affected_rules))
if not args.ovals_only:
print("Rules with differing product and remediation names: %d / %d" % (affected_remediations, affected_rules))
for r_type in ssg.build_remediations.REMEDIATION_TO_EXT_MAP:
r_missing = affected_remediations_type[r_type]
print("Rules with differing product and %s remediation names: %d / %d" % (r_type, r_missing, affected_rules))
print("\n")
def process_introspection(args, known_rules):
for rule_id in args.query:
if not args.summary_only:
pprint.pprint(known_rules[rule_id])
print("\n")
else:
print(rule_id)
def process_unassociated(args, known_rules, all_products):
save_ovals_only = args.ovals_only
save_fixes_only = args.fixes_only
save_strict = args.strict
args.ovals_only = False
args.fixes_only = False
args.strict = False
for rule_id in known_rules:
rule_obj = known_rules[rule_id]
affected_products = rds.get_all_affected_products(args, rule_obj)
if affected_products.intersection(all_products):
continue
print("Unassociated Rule: rule_id:%s" % rule_id)
args.ovals_only = save_ovals_only
args.fixes_only = save_fixes_only
args.stict = save_strict
def main():
args = parse_args()
linux_products, other_products = ssg.products.get_all(SSG_ROOT)
all_products = linux_products.union(other_products)
json_file = open(args.input, 'r')
known_rules = json.load(json_file)
if args.products.lower() == 'all':
args.products = all_products
elif args.products.lower() == 'linux':
args.products = linux_products
elif args.products.lower() == 'other':
args.products = other_products
else:
args.products = args.products.split(',')
args.products = set(args.products)
args.query = rds.filter_rule_ids(set(known_rules), args.query)
if not args.missing and not args.two_plus and not args.prodtypes and not args.introspect and not args.unassociated and not args.product_names:
args.missing = True
args.two_plus = True
args.prodtypes = True
print("Total number of known rule directories: %d" % len(known_rules))
print("Total number of queried rules: %d\n" % len(args.query))
if args.missing:
process_missing(args, known_rules)
if args.two_plus:
process_two_plus(args, known_rules)
if args.prodtypes:
process_prodtypes(args, known_rules)
if args.product_names:
process_product_names(args, known_rules)
if args.introspect and args.query:
process_introspection(args, known_rules)
if args.unassociated:
process_unassociated(args, known_rules, all_products)
if __name__ == "__main__":
main()
| 38.381743
| 146
| 0.672757
|
8c0e9505cc0cfa2d29f4ef967d8337efddcb2383
| 8,154
|
py
|
Python
|
src/custom_oauth.py
|
EOEPCA/um-user-profile
|
790487a50fc7d3bca70c2444a6b3b1567574812c
|
[
"Apache-2.0"
] | 1
|
2020-06-30T14:46:36.000Z
|
2020-06-30T14:46:36.000Z
|
src/custom_oauth.py
|
EOEPCA/um-user-profile
|
790487a50fc7d3bca70c2444a6b3b1567574812c
|
[
"Apache-2.0"
] | 2
|
2022-02-28T16:32:44.000Z
|
2022-03-08T10:16:12.000Z
|
src/custom_oauth.py
|
EOEPCA/um-user-profile
|
790487a50fc7d3bca70c2444a6b3b1567574812c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import requests
import base64
import json
from eoepca_scim import *
import logging
import WellKnownHandler as wkh
from base64 import b64encode
import generic
from jwkest.jws import JWS
from jwkest.jwk import SYMKey, KEYS
from jwkest.jwk import RSAKey, import_rsa_key_from_file, load_jwks_from_url, import_rsa_key
from jwkest.jwk import load_jwks
from jwkest.jwk import rsa_load
from Crypto.PublicKey import RSA
from jwt_verification.signature_verification import JWT_Verification
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class OAuthClient(metaclass=Singleton):
def __init__(self, config,use_env_var):
if use_env_var is False:
self.scopes = self._get_valid_url_scopes(config["scopes"])
else:
config["scopes"] = config["scopes"].split(" ")
self.scopes = self._get_valid_url_scopes(config["scopes"])
sso_url = self._get_valid_https_url(config["sso_url"])
self.url= sso_url
self.wkhandler = wkh.WellKnownHandler(sso_url,secure=not config["debug_mode"]) # Force HTTPS if not debug mode
scim_client2 = EOEPCA_Scim(sso_url)
grantTypes=["client_credentials", "urn:ietf:params:oauth:grant-type:uma-ticket", "authorization_code", "refresh_token", "implicit", "password"]
redirectURIs=["https://"+config["sso_url"]+"/web_ui/oauth/callback"]
logoutURI="http://"+config["sso_url"]+"/web_ui"
responseTypes=["code", "token", "id_token"]
scopes=["openid", "user_name", "permission", "email", "eoepca", "is_operator"]
sectorIdentifier="https://"+config["sso_url"]+"/oxauth/sectoridentifier/9b473868-fa96-4fd1-a662-76e3663c9726"
token_endpoint_auth_method=ENDPOINT_AUTH_CLIENT_POST
scim_client2.registerClient("UserClient", grantTypes, redirectURIs, logoutURI, responseTypes, scopes, token_endpoint_auth_method, sectorIdentifier=sectorIdentifier)
self.client_id = self._get_valid_url_client_id(scim_client2.client_id)
self.redirect_uri = config["redirect_uri"]
self.client_secret = scim_client2.client_secret
self.post_logout_redirect_uri = config["post_logout_redirect_uri"]
def _get_valid_url_client_id(self, client_id):
return client_id.replace("@","%40")
def _get_valid_url_scopes(self, scopes):
return_scopes = ""
for scope in scopes:
return_scopes = return_scopes + scope + "%20"
return_scopes = return_scopes.rstrip("%20")
if "is_operator" not in str(return_scopes):
return_scopes=return_scopes+"%20is_operator"
return return_scopes
def _get_valid_https_url(self, url):
if "http" not in url:
return "https://" + url
def get_login_url(self):
auth_endpoint = self.wkhandler.get(wkh.TYPE_OIDC, wkh.KEY_OIDC_AUTHORIZATION_ENDPOINT)
return auth_endpoint + "?scope="+self.scopes+"&client_id="+self.client_id+"&redirect_uri="+self.redirect_uri+"&response_type=code"
def get_token(self, code):
token_endpoint = self.wkhandler.get(wkh.TYPE_OIDC, wkh.KEY_OIDC_TOKEN_ENDPOINT)
payload = "grant_type=authorization_code&client_id="+self.client_id+"&code="+code+"&client_secret="+self.client_secret+"&scope="+self.scopes+"&redirect_uri="+self.redirect_uri
headers = {"content-type": "application/x-www-form-urlencoded", 'cache-control': "no-cache"}
response = requests.request("POST", token_endpoint, data=payload, headers=headers, verify=False)
self.isOperator = self.verify_uid_headers(self.url, json.loads(response.text),'isOperator')
return json.loads(response.text)
def refresh_token(self, refresh_token):
"Gets a new token, using a previous refresh token"
token_endpoint = self.wkhandler.get(wkh.TYPE_OIDC, wkh.KEY_OIDC_TOKEN_ENDPOINT)
payload = "grant_type=refresh_token&refresh_token="+refresh_token+"&client_id="+self.client_id+"&client_secret="+self.client_secret
headers = {"content-type": "application/x-www-form-urlencoded", 'cache-control': "no-cache"}
response = requests.request("POST", token_endpoint, data=payload, headers=headers, verify=False)
return json.loads(response.text)
def get_user_info(self,access_token):
user_info_endpoint = self.wkhandler.get(wkh.TYPE_OIDC, wkh.KEY_OIDC_USERINFO_ENDPOINT)
response = requests.request("GET", user_info_endpoint+"?access_token="+access_token, verify=False)
status = response.status_code
if status > 199 and status < 300:
return json.loads(response.text)
else:
return None
def end_session_url(self, id_token):
end_session_endpoint = self.wkhandler.get(wkh.TYPE_OIDC, wkh.KEY_OIDC_END_SESSION_ENDPOINT)
return end_session_endpoint +"?post_logout_redirect_uri="+self.post_logout_redirect_uri+"&id_token_hint="+id_token
def verify_JWT_token(self,url, token, key):
try:
header = str(token).split(".")[0]
paddedHeader = header + '=' * (4 - len(header) % 4)
decodedHeader = base64.b64decode(paddedHeader)
#to remove byte-code
decodedHeader_format = decodedHeader.decode('utf-8')
decoded_str_header = json.loads(decodedHeader_format)
payload = str(token).split(".")[1]
paddedPayload = payload + '=' * (4 - len(payload) % 4)
decoded = base64.b64decode(paddedPayload)
#to remove byte-code
decoded = decoded.decode('utf-8')
decoded_str = json.loads(decoded)
if decoded_str_header['kid'] != "RSA1":
verificator = JWT_Verification(url)
result = verificator.verify_signature_JWT(token)
else:
#validate signature for rpt
rsajwk = RSAKey(kid="RSA1", key=import_rsa_key_from_file("config/public.pem"))
dict_rpt_values = JWS().verify_compact(token, keys=[rsajwk], sigalg="RS256")
if dict_rpt_values == decoded_str:
result = True
else:
result = False
if result == False:
print("Verification of the signature for the JWT failed!")
raise Exception
else:
print("Signature verification is correct!")
if decoded_str_header['kid'] != "RSA1":
if key in decoded_str.keys():
if decoded_str[key] != None:
user_value = decoded_str[key]
else:
raise Exception
else:
user_value = decoded_str['pct_claims'][key]
else:
if decoded_str[key] == None:
if decoded_str['pct_claims'][key][0] == None:
raise Exception
else:
user_value = decoded_str['pct_claims'][key][0]
else:
user_value = decoded_str[key]
return user_value
except Exception as e:
print("Authenticated RPT Resource. No Valid JWT id token passed! " +str(e))
return None
def verify_OAuth_token(self, token, key):
headers = { 'content-type': "application/json", 'Authorization' : 'Bearer '+token}
url = self.wkh.get(TYPE_OIDC, KEY_OIDC_USERINFO_ENDPOINT )
try:
res = get(url, headers=headers, verify=False)
user = (res.json())
return user[key]
except:
print("OIDC Handler: Get User "+key+": Exception occured!")
return None
def verify_uid_headers(self,url, jwt, key):
value = None
token_protected = None
myJWT = jwt['id_token']
value=self.verify_JWT_token(url, myJWT, key)
return value
| 44.075676
| 183
| 0.636988
|
5e33179e1ee8472c9d87f0b0be89a0da6157ac06
| 202
|
py
|
Python
|
Chapter16/parse_web_page.py
|
add54/ADMIN_SYS_PYTHON
|
5a6d9705537c8663c8f7b0f45d29ccc87b6096e7
|
[
"MIT"
] | 116
|
2018-12-21T01:05:47.000Z
|
2022-03-23T21:41:41.000Z
|
Chapter16/parse_web_page.py
|
add54/ADMIN_SYS_PYTHON
|
5a6d9705537c8663c8f7b0f45d29ccc87b6096e7
|
[
"MIT"
] | 2
|
2021-03-31T19:36:19.000Z
|
2021-06-10T22:29:26.000Z
|
Chapter16/parse_web_page.py
|
add54/ADMIN_SYS_PYTHON
|
5a6d9705537c8663c8f7b0f45d29ccc87b6096e7
|
[
"MIT"
] | 147
|
2018-12-19T14:10:32.000Z
|
2022-03-20T11:03:20.000Z
|
import requests
from bs4 import BeautifulSoup
page_result = requests.get('https://www.imdb.com/news/top?ref_=nv_nw_tp')
parse_obj = BeautifulSoup(page_result.content, 'html.parser')
print(parse_obj)
| 22.444444
| 73
| 0.792079
|
0b3247284309293fb1ec2674ecea0cacc053bce8
| 1,648
|
py
|
Python
|
etabotsite/etabotapp/TMSlib/Atlassian_API.py
|
aradnaev/pmp
|
2043c61d3b8bd21e49b5b07a4aa85d833d9531e8
|
[
"Apache-2.0"
] | 3
|
2018-07-05T00:04:59.000Z
|
2021-03-08T03:04:44.000Z
|
etabotsite/etabotapp/TMSlib/Atlassian_API.py
|
aradnaev/pmp
|
2043c61d3b8bd21e49b5b07a4aa85d833d9531e8
|
[
"Apache-2.0"
] | 20
|
2020-01-25T03:58:30.000Z
|
2022-03-02T01:05:52.000Z
|
etabotsite/etabotapp/TMSlib/Atlassian_API.py
|
aradnaev/pmp
|
2043c61d3b8bd21e49b5b07a4aa85d833d9531e8
|
[
"Apache-2.0"
] | 4
|
2020-04-08T00:00:49.000Z
|
2021-03-08T03:24:04.000Z
|
"""Atlassian API helper functions."""
import requests
ATLASSIAN_CLOUD_BASE = "https://api.atlassian.com/"
ATLASSIAN_CLOUD_PROFILE = ATLASSIAN_CLOUD_BASE + "me"
class AtlassianAPI:
def __init__(self, token):
self.token = token
self.accessible_resources_api = \
"https://api.atlassian.com/oauth/token/accessible-resources"
def default_headers(self):
return {
'Authorization': 'Bearer {}'.format(self.token.access_token),
'Accept': 'application/json'
}
@staticmethod
def mock_get_accessible_resources(self):
return [{
"id": "d1083787-4491-40c9-9581-8625f52baf7e",
"url": "https://etabot.atlassian.net",
"name": "etabot",
"scopes": ["write:jira-work", "read:jira-work", "read:jira-user"],
"avatarUrl":"https://site-admin-avatar-cdn.prod.public.atl-paas.net/avatars/240/site.png"}]
def get_accessible_resources(self):
"""Example:
[{"id":"d1083787-4491-40c9-9581-8625f52baf7e","url":"https://etabot.atlassian.net","name":"etabot","scopes":["write:jira-work","read:jira-work","read:jira-user"],"avatarUrl":"https://site-admin-avatar-cdn.prod.public.atl-paas.net/avatars/240/site.png"}]
"""
headers = self.default_headers()
res = requests.get(
self.accessible_resources_api, headers=headers)
if res.status_code == 200:
return res.json()
else:
raise NameError('Could not get resources with given token. Status code: {}, message {}:'.format(
res.status_code, res.json()
))
| 37.454545
| 261
| 0.615898
|
d58572d41159dac6085595d61d9686ef1499e0ac
| 3,780
|
py
|
Python
|
manga_py/info.py
|
theincognito-inc/manga-dl
|
899905bafb6c6891815b58cce41eaff32a682570
|
[
"MIT"
] | 1
|
2020-11-19T00:40:49.000Z
|
2020-11-19T00:40:49.000Z
|
manga_py/info.py
|
eduhoribe/manga-py
|
fe7eb2e08532b3c75b4f7ac8cc4132f0e7a65eb4
|
[
"MIT"
] | null | null | null |
manga_py/info.py
|
eduhoribe/manga-py
|
fe7eb2e08532b3c75b4f7ac8cc4132f0e7a65eb4
|
[
"MIT"
] | null | null | null |
from argparse import Namespace
from datetime import datetime
from sys import argv
from typing import Union
from manga_py import meta
class Info:
__doc__ = """
--print-json argument helper
{
'site': 'https://example.org/kumo-desu-ga-nani-ka',
'downloader': [
'https://manga-py.com/manga-py/',
'https://github.com/manga-py/manga-py',
'https://github.com/yuru-yuri/manga-py',
'https://yuru-yuri.github.io/manga-py',
],
'version': '1.1.4',
'delta': '0:00:00.003625',
'start': '2018-06-08 17:22:24.419565',
'end': '2018-06-08 17:22:24.423190',
'user_agent': 'Mozilla/5.0',
'cookies': {'cf_clearance': 'ec-1528654923-86400', '__cfduid': '21528654914'},
'args': {
'_raw_params': 'manga-py --cbz https://example.org/kumo-desu-ga-nani-ka',
'url': 'https://example.org/kumo-desu-ga-nani-ka',
'name': None,
'destination': None,
'no-progress': False,
'cbz': False,
'skip-volumes': None,
'max-volumes': None,
'user-agent': None,
'proxy': None,
'reverse-downloading': None,
'rewrite-exists-archives': None,
'no-multi-threads': None,
},
'error': False,
'error_msg': '',
'volumes': [
{
'name': 'Kumo desu ga, nani ka? - 0',
'path': 'Manga/kumo-desu-ga-nani-ka/vol_000.zip',
},
{
'name': 'Kumo desu ga, nani ka? - 1',
'path': 'Manga/kumo-desu-ga-nani-ka/vol_001.zip',
},
],
}
"""
_data = None
_start_time = None
@staticmethod
def _dt(dt, fmt: str = '%A, %d. %B %Y %H:%M:%S'):
return dt.strftime(fmt)
def __init__(self, args: Union[Namespace, dict]): # see manga_py.cli arguments
_args = args.__dict__ if args is not dict else args
_args['_raw_params'] = ' '.join(argv)
self._data = {
'site': args.url,
'downloader': meta.repo_url,
'version': meta.version,
'delta': None,
'init': self._dt(datetime.now()),
'start': None,
'end': None,
'user_agent': None,
'cookies': None,
'args': _args,
'return_code': 0,
'error': False,
'error_msg': None,
'volumes': [],
}
self._volumes = []
def set_ua(self, ua):
self._data['user_agent'] = ua
def set_error(self, e, rc: int = 1):
self._data['return_code'] = rc
self._data['error'] = e
def start(self):
self._start_time = datetime.now()
def set_cookies(self, cookies):
self._data['cookies'] = cookies
def set_volumes(self, volumes: list):
self._data['volumes'] = volumes
def set_last_volume_error(self, error_message):
try:
self._data['volumes'][-1]['error'] = True
self._data['volumes'][-1]['error_message'] = error_message
except IndexError:
pass
def add_volume(self, url: str, path: str, files: list = None):
volume = {
'url': url,
'path': path,
'error': False,
'error_message': '',
}
if files is not None:
volume['files'] = files
volume['num_files'] = len(files)
self._data['volumes'].append(volume)
def get(self):
self._data['delta'] = str(datetime.now() - self._start_time)
self._data['start'] = self._dt(self._start_time)
self._data['end'] = self._dt(datetime.now())
return self._data
| 30
| 86
| 0.504497
|
5409225d71dd1fbf8c41038ff40017a37f3e3829
| 849
|
py
|
Python
|
api/migrations/0002_auto_20170528_0513.py
|
sai29/friendster
|
f89bb0c27a29b7fc35c3c3746ccb2a51fabe2286
|
[
"MIT"
] | null | null | null |
api/migrations/0002_auto_20170528_0513.py
|
sai29/friendster
|
f89bb0c27a29b7fc35c3c3746ccb2a51fabe2286
|
[
"MIT"
] | null | null | null |
api/migrations/0002_auto_20170528_0513.py
|
sai29/friendster
|
f89bb0c27a29b7fc35c3c3746ccb2a51fabe2286
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-28 05:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='connectionrequest',
name='from_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='connectionrequest',
name='to_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reciever', to=settings.AUTH_USER_MODEL),
),
]
| 30.321429
| 135
| 0.665489
|
7c5526a277bd1072974067fd8a3778cf68f355ac
| 1,119
|
py
|
Python
|
server/djangoapp/urls.py
|
mrmicrosoft/agfzb-CloudAppDevelopment_Capstone
|
2b0de5544284e139ccbc4a9448333a47a373012b
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/urls.py
|
mrmicrosoft/agfzb-CloudAppDevelopment_Capstone
|
2b0de5544284e139ccbc4a9448333a47a373012b
|
[
"Apache-2.0"
] | null | null | null |
server/djangoapp/urls.py
|
mrmicrosoft/agfzb-CloudAppDevelopment_Capstone
|
2b0de5544284e139ccbc4a9448333a47a373012b
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from django.conf.urls.static import static
from django.conf import settings
from . import views
app_name = 'djangoapp'
urlpatterns = [
# route is a string contains a URL pattern
# view refers to the view function
# name the URL
# path for about view
path(route='about/', view=views.about, name='about'),
# path for contact us view
path(route='contact/', view=views.contact, name='contact'),
# path for registration
path(route='register/', view=views.registration_request, name='register'),
# path for login
path(route='login/', view=views.login_request, name='login'),
# path for logout
path(route='logout/', view=views.logout_request, name='logout'),
# path for index
path(route='', view=views.get_dealerships, name='index'),
# path for dealer reviews view
path('dealer/<int:dealer_id>/', views.get_dealer_details, name='dealer_details'),
# path for add a review view
path('review/<int:dealer_id>/', views.add_review, name='add_review'),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 31.083333
| 85
| 0.696157
|
5a496b32d0e4868e4154248907bba47b02fc5ce6
| 8,281
|
py
|
Python
|
HuggingFace-TSE/model.py
|
ywu94/Code-Notes
|
8c4e9cbfe86163d39608ddce63e550e0e9797a8e
|
[
"MIT"
] | 2
|
2020-09-11T10:40:43.000Z
|
2020-10-08T04:02:26.000Z
|
HuggingFace-TSE/model.py
|
ywu94/Code-Notes
|
8c4e9cbfe86163d39608ddce63e550e0e9797a8e
|
[
"MIT"
] | null | null | null |
HuggingFace-TSE/model.py
|
ywu94/Code-Notes
|
8c4e9cbfe86163d39608ddce63e550e0e9797a8e
|
[
"MIT"
] | null | null | null |
import os
import re
import pickle
import argparse
from collections import namedtuple
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import transformers
from transformers import BertModel, BertConfig
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from utils import initiate_logger
from dataset import get_TSE_dataloader_kfold
from callback import PyLoggerCallback
from config import get_config
class BERT_TSE(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.transformer = config.model_cls(config.model_config).from_pretrained(config.pretrain_wt)
if 'distilbert' in config.model_name.split('-')[0]:
self.n_feature = self.transformer.transformer.layer[-1].ffn.lin2.out_features
elif 'albert' in config.model_name.split('-')[0]:
self.n_feature = self.transformer.encoder.albert_layer_groups[-1].albert_layers[-1].ffn_output.out_features
elif 'bert' in config.model_name.split('-')[0]:
self.n_feature = self.transformer.pooler.dense.out_features
elif 'roberta' in config.model_name.split('-')[0]:
self.n_feature = self.transformer.pooler.dense.out_features
self.logits = nn.Sequential(
nn.Linear(self.config.n_layer*self.n_feature, 128),
nn.Tanh(),
nn.Linear(128, 2)
)
self.dropout = nn.Dropout(p=0.5)
def _set_feature_extract(self):
for name, param in self.named_parameters():
if re.search('logits', name):
param.requires_grad = True
else:
param.requires_grad = False
def _set_fine_tune(self):
for _, param in self.named_parameters():
param.requires_grad = True
def _check_parameter_requires_grad(self):
params_to_update = []
for name, param in self.named_parameters():
if param.requires_grad == True:
params_to_update.append(param)
return params_to_update
def forward(self, token_ids, token_type_ids, mask):
if 'distil' in self.config.model_name.split('-')[0]:
hidden_states = self.transformer(
token_ids,
attention_mask=mask,
output_hidden_states=True
)[-1]
else:
hidden_states = self.transformer(
token_ids,
attention_mask=mask,
token_type_ids=token_type_ids,
output_hidden_states=True
)[-1]
features = torch.cat(hidden_states[:self.config.n_layer], dim=-1)
if self.config.multi_sample_dropout and self.training:
logits = torch.mean(torch.stack([self.logits(self.dropout(features)) for _ in range(5)], dim=0), dim=0)
else:
logits = self.logits(features)
start_logits, end_logits = logits[:,:,0], logits[:,:,1]
return start_logits, end_logits
def _pred_token_to_char(self, text, offsets, token_pred):
char_pred = np.zeros(len(text))
for i, offset in enumerate(offsets):
if offset[0] or offset[1]: char_pred[offset[0]:offset[1]]=token_pred[i]
return char_pred
def _pred_selected(self, text, offsets, start_token_pred, end_token_pred):
start_pred = self._pred_token_to_char(text, offsets, start_token_pred)
end_pred = self._pred_token_to_char(text, offsets, end_token_pred)
start_idx = np.argmax(start_pred)
end_idx = len(end_pred) - 1 - np.argmax(end_pred[::-1])
return text[start_idx:end_idx+1]
def _jaccard(self, str1, str2):
a = set(str1.lower().split())
b = set(str2.lower().split())
c = a.intersection(b)
return float(len(c)) / (len(a) + len(b) - len(c))
def _evaluate_pred_selected(self, pred, actual):
return self._jaccard(actual, pred)
class PL_BERT_TSE(pl.LightningModule):
def __init__(self, fold, dl_config, ml_config, batch_size=64, learning_rate=1e-4, *args, **kwargs):
super().__init__()
self.batch_size = batch_size
self.learning_rate = learning_rate
self.save_hyperparameters()
self.hparams.num_warmup_steps = int(22000/self.batch_size*0.3)
self.hparams.num_training_steps = int(5*22000/self.batch_size)
self.model = BERT_TSE(ml_config)
self._reset_metric_state()
def _reset_metric_state(self):
self.stat = {
'train': {'loss': [], 'start_loss': [], 'end_loss': []},
'validation': {'loss': [], 'start_loss': [], 'end_loss': [], 'jaccard_score': []}
}
def forward(self, token_ids, token_type_ids, mask):
start_logits, end_logits = self.model(token_ids, token_type_ids, mask)
return start_logits, end_logits
def training_step(self, batch, batch_idx):
token_ids = batch['ids']
token_type_ids = batch['type_ids']
mask = batch['mask']
start_label = batch['start_label']
end_label = batch['end_label']
start_logits, end_logits = self.model(token_ids, token_type_ids, mask)
start_probs = torch.softmax(start_logits, dim=1)
end_probs = torch.softmax(end_logits, dim=1)
start_loss = F.kl_div(torch.log(start_probs), start_label, reduction='batchmean')
end_loss = F.kl_div(torch.log(end_probs), end_label, reduction='batchmean')
return {'loss': start_loss+end_loss, 'start_loss': start_loss, 'end_loss': end_loss}
def training_epoch_end(self, outputs):
loss_mean = torch.stack([x['callback_metrics']['loss'] for x in outputs]).mean()
start_loss_mean = torch.stack([x['callback_metrics']['start_loss'] for x in outputs]).mean()
end_loss_mean = torch.stack([x['callback_metrics']['end_loss'] for x in outputs]).mean()
self.stat['train']['loss'].append(float(loss_mean.cpu().detach().numpy()))
self.stat['train']['start_loss'].append(float(start_loss_mean.cpu().detach().numpy()))
self.stat['train']['end_loss'].append(float(end_loss_mean.cpu().detach().numpy()))
return {'loss': loss_mean}
def validation_step(self, batch, batch_idx):
token_ids = batch['ids']
token_type_ids = batch['type_ids']
mask = batch['mask']
start_label = batch['start_label']
end_label = batch['end_label']
texts = batch['text']
offsets = batch['offsets']
selected_texts = batch['selected_text']
start_logits, end_logits = self.model(token_ids, token_type_ids, mask)
start_probs = torch.softmax(start_logits, dim=1)
end_probs = torch.softmax(end_logits, dim=1)
start_loss = F.kl_div(torch.log(start_probs), start_label, reduction='batchmean')
end_loss = F.kl_div(torch.log(end_probs), end_label, reduction='batchmean')
start_probs = start_probs.cpu().detach().numpy()
end_probs = end_probs.cpu().detach().numpy()
selected_pred = [self.model._pred_selected(text, offset, start_token_pred, end_token_pred)
for text, offset, start_token_pred, end_token_pred in zip(texts, offsets, start_probs, end_probs)]
jaccard_score = np.mean([self.model._evaluate_pred_selected(p,t) for p, t in zip(selected_pred, selected_texts)])
return {'val_loss': start_loss+end_loss, 'val_start_loss': start_loss, 'val_end_loss': end_loss, 'val_jaccard_score': jaccard_score}
def validation_epoch_end(self, outputs):
loss_mean = torch.stack([x['val_loss'] for x in outputs]).mean()
start_loss_mean = torch.stack([x['val_start_loss'] for x in outputs]).mean()
end_loss_mean = torch.stack([x['val_end_loss'] for x in outputs]).mean()
jaccard_score_mean = np.mean([x['val_jaccard_score'] for x in outputs])
self.stat['validation']['loss'].append(float(loss_mean.cpu().detach().numpy()))
self.stat['validation']['start_loss'].append(float(start_loss_mean.cpu().detach().numpy()))
self.stat['validation']['end_loss'].append(float(end_loss_mean.cpu().detach().numpy()))
self.stat['validation']['jaccard_score'].append(jaccard_score_mean)
return {'val_loss': loss_mean, 'val_jaccard_score': jaccard_score_mean}
def train_dataloader(self):
dataloader = get_TSE_dataloader_kfold('train', self.hparams.fold, self.hparams.batch_size, self.hparams.dl_config)
return dataloader
def val_dataloader(self):
dataloader = get_TSE_dataloader_kfold('validation', self.hparams.fold, self.hparams.batch_size, self.hparams.dl_config)
return dataloader
def configure_optimizers(self):
params_to_update = self.model._check_parameter_requires_grad()
opt = transformers.AdamW(params_to_update, lr=self.hparams.learning_rate, weight_decay=1e-2)
sch = {
'scheduler': transformers.get_linear_schedule_with_warmup(
opt,
num_warmup_steps=self.hparams.num_warmup_steps,
num_training_steps=self.hparams.num_training_steps
),
'interval': 'step',
'frequency': 1,
}
return [opt], [sch]
| 37.640909
| 134
| 0.739162
|
7789071087f5b7eb8939df0561a5923c41565fb6
| 2,933
|
py
|
Python
|
backend/api/controller/user/add.py
|
Vedant1202/sepsis
|
ed73d897192e561d1742109e642af72f6f26cfdf
|
[
"MIT"
] | null | null | null |
backend/api/controller/user/add.py
|
Vedant1202/sepsis
|
ed73d897192e561d1742109e642af72f6f26cfdf
|
[
"MIT"
] | 2
|
2020-01-28T23:16:01.000Z
|
2020-09-26T00:36:53.000Z
|
backend/api/controller/user/add.py
|
Vedant1202/sepsis
|
ed73d897192e561d1742109e642af72f6f26cfdf
|
[
"MIT"
] | null | null | null |
# import pymysql
from app import app
from db import mysql
import json
from flask import jsonify
from flask import flash, request
from werkzeug.security import generate_password_hash, check_password_hash
from flask_cors import CORS
import time
from utils.utils import not_found, create_session, calculate_age
from utils.utils import not_found, create_session, calculate_age, verify_session, upload_file
from werkzeug.utils import secure_filename
import os
def user_add():
try:
_fname = request.form.getlist("fname")[0]
_lname = request.form.getlist("lname")[0]
_email = request.form.getlist("email")[0]
_password = request.form.getlist("password")[0]
_dept = request.form.getlist("dept")[0]
_dob = request.form.getlist("dob")[0]
_gender = request.form.getlist("gender")[0]
_phone = request.form.getlist("phone")[0]
_type = request.form.getlist("type")[0]
_specialization = request.form.getlist("specialization")[0]
_experience = request.form.getlist("experience")[0]
_registration = request.form.getlist("registration")[0]
file = request.files.to_dict()['profImg']
filename = secure_filename(file.filename)
filenamefull = filename
# filename = os.path.join('E:/HackerEarth/Missing/WebApp/backend/files/missing', filename)
filename = 'E:/sih2020/sepsis/backend/files/doctor/profile-pics/images/' + filename
print('hello')
print(filename)
# validate the received values
if _fname and _lname and _email and _password and _dept and _dob and _gender and _phone and _type and _specialization and _experience and _registration and request.method == "POST":
# do not save password as a plain text
_hashed_password = generate_password_hash(_password)
# save edits
sql = "INSERT INTO user(fname, lname, email, password, dept, dob, gender, phone, type, specialization, experience, registration, profimg, filename) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
data = (_fname, _lname, _email, _hashed_password, _dept, _dob, _gender, _phone, _type, _specialization, _experience, _registration, filename, filenamefull)
conn = mysql.connect()
cursor = conn.cursor()
cursor.execute(sql, data)
print(cursor.lastrowid)
uid = cursor.lastrowid
conn.commit()
upload_file('doctor/profile-pics/images')
skey = create_session(uid)
resp = jsonify(uid=uid, skey=skey)
resp.status_code = 200
# print(resp)
return resp
else:
return not_found()
except Exception as e:
print('====================== EXCEPTION ========================')
print(e)
finally:
# print('Done')
cursor.close()
conn.close()
| 43.776119
| 223
| 0.638595
|
5940d83d11f0b5c764f8c71360c8ec2217c61215
| 1,299
|
py
|
Python
|
setup.py
|
milonoir/yaml_rulz
|
ee8e3f4d2892aa636832970dddc044d0ca86c691
|
[
"MIT"
] | 1
|
2017-07-14T16:43:10.000Z
|
2017-07-14T16:43:10.000Z
|
setup.py
|
milonoir/yaml_rulz
|
ee8e3f4d2892aa636832970dddc044d0ca86c691
|
[
"MIT"
] | 4
|
2016-09-13T15:14:51.000Z
|
2017-05-22T10:57:20.000Z
|
setup.py
|
milonoir/yaml_rulz
|
ee8e3f4d2892aa636832970dddc044d0ca86c691
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="yaml_rulz",
version="0.0.1",
description="A YAML validator",
license="MIT",
author="Milan Boleradszki",
author_email="bmilan1985@gmail.com",
maintainer="Milan Boleradszki",
maintainer_email="bmilan1985@gmail.com",
url="https://github.com/milonoir/yaml_rulz",
packages=["yaml_rulz"],
install_requires=["PyYAML", "prettytable"],
tests_require=["mock"],
classifiers=[
# "Development Status :: 1 - Planning",
# "Development Status :: 2 - Pre-Alpha",
# "Development Status :: 3 - Alpha",
"Development Status :: 4 - Beta",
# "Development Status :: 5 - Production/Stable",
# "Development Status :: 6 - Mature",
# "Development Status :: 7 - Inactive",
"Environment :: Console",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Topic :: Text Processing :: Markup",
],
entry_points={
"console_scripts": [
"yaml_rulz = yaml_rulz.cli:main",
],
},
)
| 30.928571
| 56
| 0.582756
|
33688a71047f4c9d50f5025064846e0ee5d3648e
| 1,248
|
py
|
Python
|
jax/scipy/stats/expon.py
|
ecreager/jax
|
948def817fd7cc2ee7a988b5142401a580b1bbd3
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-06-13T20:51:49.000Z
|
2021-06-14T02:37:06.000Z
|
jax/scipy/stats/expon.py
|
ecreager/jax
|
948def817fd7cc2ee7a988b5142401a580b1bbd3
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
jax/scipy/stats/expon.py
|
ecreager/jax
|
948def817fd7cc2ee7a988b5142401a580b1bbd3
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-03-14T10:07:22.000Z
|
2019-03-14T10:07:22.000Z
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as onp
import scipy.stats as osp_stats
from ... import lax
from ...numpy.lax_numpy import _promote_args_like, _wraps, where, inf
@_wraps(osp_stats.expon.logpdf)
def logpdf(x, loc=0, scale=1):
x, loc, scale = _promote_args_like(osp_stats.expon.logpdf, x, loc, scale)
log_scale = lax.log(scale)
linear_term = lax.div(lax.sub(x, loc), scale)
log_probs = lax.neg(lax.add(linear_term, log_scale))
return where(lax.lt(x, loc), -inf, log_probs)
@_wraps(osp_stats.expon.pdf)
def pdf(x, loc=0, scale=1):
return lax.exp(logpdf(x, loc, scale))
| 33.72973
| 75
| 0.754808
|
875502dc3c94c25d89245ccb6fb17b332d505c04
| 4,147
|
py
|
Python
|
PythonCodes/ParallExtraction/ExtractPlanarTimeProfileData.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
PythonCodes/ParallExtraction/ExtractPlanarTimeProfileData.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
PythonCodes/ParallExtraction/ExtractPlanarTimeProfileData.py
|
Nicolucas/C-Scripts
|
2608df5c2e635ad16f422877ff440af69f98f960
|
[
"MIT"
] | null | null | null |
import os, time, sys
import numpy as np
from scipy.interpolate import RectBivariateSpline
import multiprocessing as mp
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/petsc-3.12.5/lib/petsc/bin/")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/se2wave/utils/python")
sys.path.insert(0,"/import/freenas-m-03-geodynamics/jhayek/TEAR/processing/TEAR/PythonCodes/")
from Lib_GeneralFunctions import *
from Lib_ProfilePlotting import *
from Lib_ProfileProcessing import *
from se2waveload import *
def ExtractFieldsPerTS_tilting(ListTimeProfileObj, w_filename, se2_coor,TiltAngle):
se2_field = se2wave_load_wavefield(w_filename,True,True)
TimeStep = se2_field["time"].item()
LCoorX, LCoorY = SeparateList(se2_coor['coor'], se2_coor['nx'].item(), se2_coor['ny'].item())
LFieldX, LFieldY = Tilt_SeparateList(se2_field['displ'], se2_field['nx'].item(), se2_field['ny'].item(), TiltAngle)
LFieldvelX, LFieldvelY = Tilt_SeparateList(se2_field['vel'], se2_field['nx'].item(), se2_field['ny'].item(), TiltAngle)
SplineDispl = [RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldX, kx=1, ky=1),
RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldY, kx=1, ky=1)]
SplineVel = [RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldvelX, kx=1, ky=1),
RectBivariateSpline(LCoorX[:,0], LCoorY[0,:], LFieldvelY, kx=1, ky=1)]
for OBJitem in ListTimeProfileObj:
CompDispX,CompDispY = GetLocDataTilt(OBJitem.Coord,OBJitem.TwinCoord, SplineDispl, True)
CompvelX,CompVelY = GetLocDataTilt(OBJitem.Coord,OBJitem.TwinCoord, SplineVel, True)
OBJitem.appendFieldValues(TimeStep, CompDispX, CompDispY, CompvelX, CompVelY)
def FillObjectInTime(ListTimeProfileObj, freq, maxtimestep, fname, path, TiltAngle, MeshFilename = "default_mesh_coor.pbin", NumProcessors=1):
TSList = np.arange(0, maxtimestep+1, freq).tolist()
FilenameList = [os.path.join(path,fname.format(timestep=i)) for i in TSList]
filename = os.path.join(path, MeshFilename)
se2_coor = se2wave_load_coordinates(filename)
ParallExtractField_per_TS = lambda lst: [ExtractFieldsPerTS_tilting(ListTimeProfileObj, x, se2_coor, TiltAngle) for x in lst]
FilenameListChunks = np.array_split(FilenameList, NumProcessors)
with mp.Pool(processes=NumProcessors) as pool:
chunk_processes = [pool.apply_async(ParallExtractField_per_TS, args=(chunk)) for chunk in FilenameListChunks]
[chunk.get() for chunk in chunk_processes]
start_time = time.time()
##########################################
ThickVal = "025"
TiltAngle = 0.00
OrderP = 3
thickness = float(ThickVal)*1.001
InFolder = "TEAR49_TPV_T0_P3_025x025_A12phi65_Delt1.001_7s"
fname = "step-{timestep:04}_wavefield.pbin"
NameWrapper = "{}/".format(InFolder)
path = "/import/freenas-m-03-geodynamics/jhayek/TEAR/Results/T2/Runs/{}".format(NameWrapper)
TimeStepList = GetListPatternFiles(path,fname,"{timestep:04}")
freq = int(TimeStepList[1])-int(TimeStepList[0])
maxtimestep = int(TimeStepList[-1])
OutputFolder = "/import/freenas-m-03-geodynamics/jhayek/SharedWolfel/PaperData/CorrectedSimulations/" + GetTodayDate() + "-2/"
OutFileName = "{InFolder}-Tilt{Tilt}-P{order}-TPList_t{timestep}_d{d}.pickle".format(InFolder=InFolder,order=OrderP, Tilt = TiltAngle, timestep = maxtimestep, d = thickness)
#############################
print("\n>>START: "+OutFileName+"\n")
# Locations relative to the fault
Locations = [[8000,thickness],[6000,thickness],[4000,thickness],[2000,thickness],[0,thickness]]
#Locations in the real domain
TwinLocations = [list(ApplyTilting(TiltAngle,Loc[0],-Loc[1])) for Loc in Locations]
Locations = [list(ApplyTilting(TiltAngle,Loc[0],Loc[1])) for Loc in Locations]
ListTimeProfileObj = [SingleTimeProfile(Loc) for Loc in Locations]
[ListTimeProfileObj[idx].AddTwin(TLoc) for idx,TLoc in enumerate(TwinLocations)]
FillObjectInTime(ListTimeProfileObj, freq, maxtimestep, fname, path, -TiltAngle, NumProcessors=60)
SavePickleFile(OutputFolder, OutFileName, ListTimeProfileObj)
print("--- %s seconds ---" % (time.time() - start_time))
| 43.197917
| 173
| 0.729684
|
63edbc5bd31d84cd2460550e259f1f92174e1406
| 4,636
|
py
|
Python
|
resources/lib/addon.py
|
dagwieers/plugin.video.fosdem
|
d2f7758159eb040b85dd4dccac009f10f519ca28
|
[
"MIT"
] | null | null | null |
resources/lib/addon.py
|
dagwieers/plugin.video.fosdem
|
d2f7758159eb040b85dd4dccac009f10f519ca28
|
[
"MIT"
] | null | null | null |
resources/lib/addon.py
|
dagwieers/plugin.video.fosdem
|
d2f7758159eb040b85dd4dccac009f10f519ca28
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
from datetime import datetime, timedelta
import routing
from xbmcgui import Dialog, ListItem
from xbmcplugin import addDirectoryItem, endOfDirectory, getSetting, setContent, setResolvedUrl
from fosdem import fetch_xml, contains_videos
FORMAT_URL = 'https://fosdem.org/{}/schedule/xml'
FORMATS = ['mp4', 'webm']
YEARS_SHOWN = 5
plugin = routing.Plugin() # pylint: disable=invalid-name
def years():
now = datetime.now()
year = now.year
# Determine if FOSDEM happened this year already
if now.month < 2 and now.day < 3:
year -= 1
# Range does not include the end.
year += 1
return range(year - YEARS_SHOWN, year)
def get_setting_int(name):
val = getSetting(plugin.handle, name)
if not val:
val = '0'
return int(val)
def get_format():
return FORMATS[get_setting_int('format')]
@plugin.route('/')
@plugin.route('/dir/<path:subdir>')
def show_dir(subdir=''):
if subdir == '':
for year in years():
year = str(year)
url = plugin.url_for(show_dir, subdir=year)
addDirectoryItem(plugin.handle, url, ListItem(year), True)
else:
root = fetch_xml(subdir)
for day in root.findall('day'):
number = day.attrib['index']
date = day.attrib['date']
text = 'Day {} ({})'.format(number, date)
url = plugin.url_for(show_day, year=subdir, day=number)
addDirectoryItem(plugin.handle, url,
ListItem(text), True)
endOfDirectory(plugin.handle)
@plugin.route('/day/<year>/<day>')
def show_day(year, day):
exp = './day[@index="{}"]/room'.format(day)
root = fetch_xml(year)
for room in root.findall(exp):
if not contains_videos(room.findall('./event/links/link')):
continue
name = room.attrib['name']
genre = room.find('./event/track').text
text = '{} - {}'.format(name, genre)
url = plugin.url_for(show_room, year=year, day=day, room=name)
addDirectoryItem(plugin.handle, url,
ListItem(text), True)
endOfDirectory(plugin.handle)
@plugin.route('/room/<year>/<day>/<room>')
def show_room(day, year, room):
exp = './day[@index="{}"]/room[@name="{}"]/event'.format(day, room)
root = fetch_xml(year)
for event in root.findall(exp):
if not contains_videos(event.findall('./links/link')):
continue
event_id = event.attrib['id']
title = event.find('title').text
track = event.find('track').text
subtitle = event.find('subtitle').text
person_items = event.find('./persons/person')
persons = [p.text for p in person_items] if person_items is not None else []
abstract = event.find('abstract').text
duration = event.find('duration').text or '0:0'
if abstract:
abstract = abstract.replace('<p>', '').replace('</p>', '')
item = ListItem(title)
item.setProperty('IsPlayable', 'true')
item.setInfo('video', {
'cast': persons,
'genre': track,
'plot': abstract,
'tagline': subtitle,
'title': title,
})
# duration is formatted as 01:30
hour, minute = duration.split(':')
seconds = timedelta(hours=int(hour), minutes=int(minute)).total_seconds()
item.addStreamInfo('video', {
'duration': seconds
})
url = plugin.url_for(show_event,
year=year,
event_id=event_id)
addDirectoryItem(plugin.handle, url, item, False)
setContent(plugin.handle, 'videos')
endOfDirectory(plugin.handle)
@plugin.route('/event/<year>/<event_id>')
def show_event(year, event_id):
root = fetch_xml(year)
event = root.find('.//event[@id="{}"]'.format(event_id))
videos = [link.attrib['href'] for link in event.findall('./links/link') if 'video.fosdem.org' in link.attrib['href']]
if not videos:
Dialog().ok('Error playing video', 'FOSDEM event {id} in {year} has no videos.'.format(id=event_id, year=year))
endOfDirectory(plugin.handle)
return
video_format = get_format()
urls = [video for video in videos if video.endswith(video_format)]
if urls:
url = urls[0]
else:
# Select a random video
url = videos[0]
setResolvedUrl(plugin.handle, True, ListItem(path=url))
def run(argv):
"""Addon entry point from wrapper"""
plugin.run(argv)
| 31.324324
| 121
| 0.599439
|
5c67fe8590f9b572f4ddf87ff786be8ef2f7947e
| 5,583
|
py
|
Python
|
main.py
|
dasfreak/npm-most-dependend-upon
|
bf08308209f91d7f99aa9ee39f19a9ea4d2bb017
|
[
"MIT"
] | null | null | null |
main.py
|
dasfreak/npm-most-dependend-upon
|
bf08308209f91d7f99aa9ee39f19a9ea4d2bb017
|
[
"MIT"
] | null | null | null |
main.py
|
dasfreak/npm-most-dependend-upon
|
bf08308209f91d7f99aa9ee39f19a9ea4d2bb017
|
[
"MIT"
] | 1
|
2021-10-11T05:37:08.000Z
|
2021-10-11T05:37:08.000Z
|
import json
import logging
import argparse
import asciitree
parser = argparse.ArgumentParser(
description='Calculate the most-depended upon packages on npm.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--loglevel',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO',
help='Which logelevel to use')
parser.add_argument('--preprocess', default=False, action='store_true', help='Preprocess the package index to speed up processing')
parser.add_argument('--infile',
type=str,
help='Filename of the package list you downloaded from npm')
parser.add_argument('--dependency_tree', default=False, action='store_true', help='Build a dependency tree for a given package')
parser.add_argument('--package',
type=str,
help='Package for which a dependency tree should be build')
parser.add_argument('--outfile',
type=str,
default='most_depended_upon.json',
help='Filename to which results will be written')
parser.add_argument('--limit',
type=int,
default=-1,
help=
'Return the n most depended-upon packages only, use -1 for untruncted results')
args = parser.parse_args()
loglevel = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
logger = logging.getLogger(__name__)
logger.setLevel(loglevel[args.loglevel])
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s')
# get the package index from npm:
# curl -o package_index_$(date --iso-8601=seconds).json https://replicate.npmjs.com/_all_docs?include_docs=true
def get_packages():
logger.info(f'Reading from {args.infile}')
with open(args.infile, 'r') as infile:
# skip first line
line = infile.readline()
# remove trailing newline and comma
line = infile.readline().replace(',\n', '')
# read huge ass JSON linewise and yield a single package's meta data
while line:
try:
package = json.loads(line)
except BaseException as exc:
logger.warning(f'Could not parse JSON: {line.strip()}: {exc}')
continue
finally:
line = infile.readline().replace(',\n', '')
yield package
def determine_most_depended_upon():
logger.info(f'Starting to count dependencies')
most_depended_upon = {}
with open('preprocessed.json', 'r') as infile:
preprocessed = json.load(infile)
for package in preprocessed:
logger.debug(f'{package} got {len(preprocessed[package])} dependencies')
for dependency in preprocessed[package]:
if most_depended_upon.get(dependency):
most_depended_upon[dependency] += 1
else:
most_depended_upon[dependency] = 1
logger.info('Sorting results by dependency count')
if args.limit > 0:
logger.info(f'Only returning the {args.limit} most depended upon packages')
most_depended_upon = dict(
sorted(most_depended_upon.items(),
key=lambda item: item[1],
reverse=True)[:args.limit])
else:
most_depended_upon = dict(
sorted(most_depended_upon.items(),
key=lambda item: item[1],
reverse=True))
logger.info(f'Writing results to file: {args.outfile}')
with open(args.outfile, 'w') as outfile:
json.dump(most_depended_upon, outfile)
logger.info('Goodbye')
def preprocess(package):
name = package['id']
try:
latest_version = package['doc']['dist-tags']['latest']
except KeyError:
# sometimes packages don't have a 'latest' version
logger.warning(f'{name} does not have a latest version')
return {name: []}
try:
dependencies = list(package['doc']['versions'][latest_version].get('dependencies', {}).keys())
except KeyError:
# sometimes packages list versions as latest that do not exist
logger.warning(f'{name} does not have version {latest_version}')
return {name: []}
return {name: dependencies}
def get_dependencies(package, preprocessed):
try:
return {dependency: get_dependencies(dependency, preprocessed) for dependency in preprocessed[package]}
except KeyError:
logger.error(f'{package} is not in the package index')
def build_dependency_tree(package):
logger.info(f'Building dependency tree for {package}')
with open('preprocessed.json', 'r') as infile:
preprocessed = json.load(infile)
dependency_tree = {package: get_dependencies(package, preprocessed)}
tr = asciitree.LeftAligned()
print(tr(dependency_tree))
if __name__ == '__main__':
if args.preprocess:
if not args.infile:
parser.error("--preprocess requires --infile")
preprocessed = {}
for package in get_packages():
preprocessed.update(preprocess(package))
with open('preprocessed.json', 'w') as outfile:
json.dump(preprocessed, outfile)
elif args.dependency_tree:
if not args.package:
parser.error("--dependency_tree requires --package")
build_dependency_tree(args.package)
else:
determine_most_depended_upon()
| 35.113208
| 131
| 0.626545
|
e8ad09050a796be7b85d8f5675f71c5906b20b59
| 2,213
|
py
|
Python
|
DecisionTree.py
|
denizgurzihin/Decision_Tree
|
7be1697a36113f44164b6d26701f85ab175738b9
|
[
"MIT"
] | null | null | null |
DecisionTree.py
|
denizgurzihin/Decision_Tree
|
7be1697a36113f44164b6d26701f85ab175738b9
|
[
"MIT"
] | null | null | null |
DecisionTree.py
|
denizgurzihin/Decision_Tree
|
7be1697a36113f44164b6d26701f85ab175738b9
|
[
"MIT"
] | null | null | null |
import pandas as pd
from sklearn.tree import DecisionTreeClassifier # Import Decision Tree Classifier
from sklearn.model_selection import train_test_split # Import train_test_split function
from sklearn.metrics import *
from time import time
col_names = ['att1', 'att2', 'att3', 'att4', 'att5', 'att6', 'att7', 'att8', 'att9', 'att10', 'att11', 'att12', 'att13', 'att14', 'att15', 'att16','label']
# load dataset
DATA = pd.read_csv("Final_Data.csv", header=None, names=col_names)
#DATA.info()
feature_cols = ['att1', 'att2', 'att3', 'att4', 'att5', 'att6', 'att7', 'att8', 'att9', 'att10', 'att11', 'att12', 'att13', 'att14', 'att15', 'att16']
X = DATA[feature_cols] # Features
y = DATA.label # Target variable
accuracy = 0
Confusion_Matrix = 0
f1 = 0
precision = 0
recall = 0
timee = 0
for x in range(10):
test_start = time()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=x) # 80% training and 20% test
# Create Decision Tree classifer object
clf = DecisionTreeClassifier(criterion="entropy")
# Train Decision Tree Classifer
clf = clf.fit(X_train, y_train)
#Predict the response for test dataset
y_pred = clf.predict(X_test)
accuracy = accuracy + accuracy_score(y_test, y_pred)
f1 = f1 + f1_score(y_test, y_pred, average='weighted')
precision = precision + precision_score(y_test, y_pred, average='weighted')
recall = recall + recall_score(y_test, y_pred, average='weighted')
Confusion_Matrix = Confusion_Matrix + confusion_matrix(y_test, y_pred)
test_finish = time()
timee = timee + test_finish-test_start
print("Overall accuracy for decision tree with gain ratio and hold out method(10 times) :", accuracy/10)
print("Overall f1_score for decision tree with gain ratio and hold out method(10 times) :", f1/10)
print("Overall precision_score for decision tree with gain ratio and hold out method(10 times) :", precision/10)
print("Overall recall_score for decision tree with gain ratio and hold out method(10 times) :", recall/10)
print(" Confusion Matrix : ")
print(Confusion_Matrix/10)
print("Overall time needed for decision tree with gain ratio and hold out method(10 times) :", ((timee/10)*10*10*10), " miliseconds")
| 40.981481
| 155
| 0.724356
|
67579fa4053d78e49a909fd2e7dd83d67942a610
| 162
|
py
|
Python
|
Python_Programing_Excercise/Day_1/Anagram_string.py
|
Ena-Sharma/Meraki_Solution
|
1bfff62f6aeb69354712d0b5a9e46ddacff357f5
|
[
"MIT"
] | null | null | null |
Python_Programing_Excercise/Day_1/Anagram_string.py
|
Ena-Sharma/Meraki_Solution
|
1bfff62f6aeb69354712d0b5a9e46ddacff357f5
|
[
"MIT"
] | null | null | null |
Python_Programing_Excercise/Day_1/Anagram_string.py
|
Ena-Sharma/Meraki_Solution
|
1bfff62f6aeb69354712d0b5a9e46ddacff357f5
|
[
"MIT"
] | null | null | null |
'''
Apko koi 2 words input me milenge apke program me apko yeh batana ha ki dono Anagram hai ya nhi
Eg:
Input-1: pears
Input-1: spear
Output: Nhi hai
'''
| 16.2
| 95
| 0.691358
|
c6e77a460575a94cd0666bcb6b839ae22b1994f7
| 1,613
|
py
|
Python
|
Other/tm1 rest api stress test.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | 1
|
2019-05-30T10:10:20.000Z
|
2019-05-30T10:10:20.000Z
|
Other/tm1 rest api stress test.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | null | null | null |
Other/tm1 rest api stress test.py
|
DJHig/TM1py-samples
|
da4050380447472a02e2a107a2c5be79ac284d0a
|
[
"MIT"
] | 1
|
2017-09-01T03:35:18.000Z
|
2017-09-01T03:35:18.000Z
|
"""
Do REST API operations in parallel. Can be handy when troubleshooting REST API bugs.
"""
import configparser
config = configparser.ConfigParser()
config.read('..\config.ini')
import asyncio
cube = "General Ledger"
view = "Default"
from TM1py.Services import TM1Service
# define functions
def get_server_name(tm1):
for i in range(1000):
data = tm1.server.get_server_name()
def execute_mdx(tm1):
mdx = "SELECT { [}Clients].Members } ON ROWS, { [}Groups].Members } ON COLUMNS FROM [}ClientGroups]"
for i in range(1000):
data = tm1.cubes.cells.execute_mdx(mdx)
def get_all_dimension_names(tm1):
for i in range(1000):
data = tm1.dimensions.get_all_names()
def get_all_process_names(tm1):
for i in range(1000):
data = tm1.processes.get_all_names()
def read_pnl(tm1):
for i in range(1000):
data = tm1.cubes.cells.execute_view(cube, view, private=False)
# fire requests asynchronously
async def main():
loop = asyncio.get_event_loop()
with TM1Service(**config['tm1srv01']) as tm1:
future1 = loop.run_in_executor(None, execute_mdx, tm1)
future2 = loop.run_in_executor(None, get_server_name, tm1)
future3 = loop.run_in_executor(None, read_pnl, tm1)
future4 = loop.run_in_executor(None, get_all_dimension_names, tm1)
future5 = loop.run_in_executor(None, get_all_process_names, tm1)
response1, response, response3, response4, response5 = \
await future1, await future2, await future3, await future4, await future5
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 31.019231
| 104
| 0.704278
|
65e93ac8ca514e52a29ee9ad5eb8cb5c21ce92b4
| 15,372
|
py
|
Python
|
tests/util/test_short_client_token.py
|
NYPL-Simplified/library-registry
|
adf1d24872459fd21f8e41f22c22a56a4558b6aa
|
[
"Apache-2.0"
] | null | null | null |
tests/util/test_short_client_token.py
|
NYPL-Simplified/library-registry
|
adf1d24872459fd21f8e41f22c22a56a4558b6aa
|
[
"Apache-2.0"
] | null | null | null |
tests/util/test_short_client_token.py
|
NYPL-Simplified/library-registry
|
adf1d24872459fd21f8e41f22c22a56a4558b6aa
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta
import pytest
from library_registry.util.short_client_token import (
ShortClientTokenDecoder,
ShortClientTokenEncoder,
ShortClientTokenTool,
)
from library_registry.model import DelegatedPatronIdentifier
TEST_NODE_VALUE = 114740953091845
SCT_TESTLIB_SHORT_NAME = 'LIBRARY'
SCT_TESTLIB_SECRET = 'LIBSECRET'
GENERIC_PATRONID = 'PATRONID'
@pytest.fixture
def encoder():
encoder_obj = ShortClientTokenEncoder()
yield encoder_obj
@pytest.fixture
def decoder():
decoder_obj = ShortClientTokenDecoder(TEST_NODE_VALUE, [])
yield decoder_obj
@pytest.fixture
def sct_test_library(db_session, create_test_library):
library_obj = create_test_library(db_session, short_name=SCT_TESTLIB_SHORT_NAME)
library_obj.shared_secret = SCT_TESTLIB_SECRET
yield library_obj
db_session.delete(library_obj)
db_session.commit()
class TestShortClientTokenTool:
@pytest.mark.parametrize(
'input,output',
[
pytest.param(b'alphabravocharliedelta', b'YWxwaGFicmF2b2NoYXJsaWVkZWx0YQ@@', id='simple_bytes'),
pytest.param('alphabravocharliedelta', b'YWxwaGFicmF2b2NoYXJsaWVkZWx0YQ@@', id='simple_string'),
pytest.param(chr(2110).encode('utf8'), b'4KC:', id='degree_symbol_includes_plus_sign'),
pytest.param(chr(3647).encode('utf8'), b'4Li;', id='thai_bhat_symbol_includes_forward_slash'),
pytest.param(chr(97).encode('utf8'), b'YQ@@', id='lowercase_a_includes_equals_sign')
]
)
def test_adobe_base64_encode(self, input, output):
"""
GIVEN: A string or bytestring to encode
WHEN: ShortClientTokenTool.adobe_base64_encode() is called on that string
THEN: A base64 encoded bytestring should be returned with the following changes:
- Any plus character ('+') should be replaced with a colon character (':')
- Any forward slash character ('/') should be replaced with a semicolon (';')
- Any equals sign character ('=') should be replaced with an at sign character ('@')
- Newlines should be stripped
Note that the substitutions are made in the base64 *output*, not the input string.
"""
assert ShortClientTokenTool.adobe_base64_encode(input) == output
@pytest.mark.parametrize(
'input,output',
[
pytest.param(b'YWxwaGFicmF2b2NoYXJsaWVkZWx0YQ@@', b'alphabravocharliedelta', id='simple_bytes'),
pytest.param(b'4KC:', chr(2110).encode('utf8'), id='degree_symbol_includes_plus_sign'),
pytest.param(b'4Li;', chr(3647).encode('utf8'), id='thai_bhat_symbol_includes_forward_slash'),
pytest.param(b'YQ@@', chr(97).encode('utf8'), id='lowercase_a_includes_equals_sign')
]
)
def test_adobe_base64_decode(self, input, output):
"""
GIVEN: A bytestring encoded by ShortClientTokenTool.adobe_base64_encode
WHEN: ShortClientTokenTool.adobe_base64_decode() is called on that bytestring
THEN: After the following substitutions are performed on the input, a decoded bytestring should return:
- Any colon character (':') should be replaced with a plus character ('+')
- Any semicolon character (';') should be replaced with a forward slash ('/')
- Any at sign ('@') should be replaced with an equals sign ('=')
"""
assert ShortClientTokenTool.adobe_base64_decode(input) == output
@pytest.mark.parametrize(
'input,output',
[
pytest.param(datetime(2018, 1, 1, 12, 30, 0, 0), 526350, id='jan_1_2018'),
pytest.param(ShortClientTokenTool.SCT_EPOCH - timedelta(days=365), 0, id='time_before_sct_epoch'),
]
)
def test_sct_numericdate(self, input, output):
"""
GIVEN: A datetime object
WHEN: ShortClientTokenTool.sct_numericdate() is called on that object
THEN: An integer representing the number of minutes since the epoch should be returned, where
the epoch datetime is defined in ShortClientTokenTool.SCT_EPOCH
"""
assert ShortClientTokenTool.sct_numericdate(input) == output
@pytest.mark.parametrize(
'input,output',
[
pytest.param(datetime(2018, 1, 1, 12, 30, 0, 0), 1514809800, id='jan_1_2018'),
pytest.param(ShortClientTokenTool.JWT_EPOCH - timedelta(days=365), 0, id='time_before_jwt_epoch'),
]
)
def test_jwt_numericdate(self, input, output):
"""
GIVEN: A datetime object
WHEN: ShortClientTokenTool.jwt_numericdate() is called on that object
THEN: An integer representing the number of seconds since the epoch should be returned, where
the epoch datetime is defined in ShortClientTokenTool.JWT_EPOCH
"""
assert ShortClientTokenTool.jwt_numericdate(input) == output
class TestShortClientTokenEncoder:
def test_encode_well_formed_result(self, encoder):
"""
GIVEN: Three strings, representing
- a library short name
- a library secret
- a patron identifier
WHEN: ShortClientTokenEncoder().encode() is called on those strings
THEN: A four part, pipe-delimited string should be returned, representing
<LIB_SHORT_NAME>|<EXPIRY>|<PATRON_ID>|<B64_ENCODED_SIGNATURE>, where:
- LIB_SHORT_NAME is the string passed in
- EXPIRY is an epoch time in minutes
- PATRON_ID is the string passed in
- B64_ENCODED_SIGNATURE is an encoded string signed with a signing key derived
from the library secret passed in, which can be decoded by ShortClientTokenDecoder.decode()
"""
lib_short_name = 'LIBSHORTNAME'
lib_secret = 'LIBSECRET'
patron_id = 'PATRONID'
result = encoder.encode(lib_short_name, lib_secret, patron_id).split('|')
assert len(result) == 4
assert result[0] == lib_short_name
try:
int(result[1])
except ValueError:
assert False
assert result[2] == patron_id
def test_encode_bad_parameters(self, encoder):
"""
GIVEN: An instance of ShortClientTokenEncoder
WHEN: .encode() is called with missing parameters, or None values
THEN: An appropriate ValueError should be raised
"""
with pytest.raises(ValueError) as exc:
encoder.encode(None, None, None)
assert "Both library short name and secret must be specified." in str(exc)
with pytest.raises(ValueError) as exc:
encoder.encode('LIBSHORTNAME', None, None)
assert "Both library short name and secret must be specified." in str(exc)
with pytest.raises(ValueError) as exc:
encoder.encode('LIBSHORTNAME', 'LIBSECRET', None)
assert "No patron identifier specified." in str(exc)
def test_encode_short_client_token_uses_adobe_base64_encoding(self, encoder):
class MockSigner:
def prepare_key(self, key):
return key
def sign(self, value, key):
"""Always return the same signature, crafted to contain a
plus sign, a slash and an equal sign when base64-encoded.
"""
return "!\tFN6~'Es52?X!#)Z*_S"
encoder.signer = MockSigner()
token = encoder._encode("lib", "My library secret", "1234", 0)
# The signature part of the token has been encoded with our
# custom encoding, not vanilla base64.
assert token == 'lib|0|1234|IQlGTjZ:J0VzNTI;WCEjKVoqX1M@'
class TestShortClientTokenDecoder:
def test_uuid(self, decoder):
"""
GIVEN: An instance of ShortClientTokenDecoder
WHEN: The .uuid() method is called
THEN: A string should be returned in the format 'urn:uuid:0' + uuid, where the uuid
value is seeded based on the node value the decoder was instantiated with.
"""
u = decoder.uuid()
# All UUIDs need to start with a 0 and end with the same node value.
assert u.startswith('urn:uuid:0')
assert u.endswith('685b35c00f05')
def test_decode(self, db_session, encoder, decoder, sct_test_library):
"""
GIVEN: A four part, pipe-delimited string produced by ShortClientTokenEncoder().encode(),
based on a known shared secret, and an instance of ShortClientTokenDecoder.
WHEN: That bytestring is passed to the .decode() method of the ShortClientTokenDecoder instance
THEN: An instance of DelegatedPatronIdentifier is returned
"""
token = encoder.encode(SCT_TESTLIB_SHORT_NAME, SCT_TESTLIB_SECRET, GENERIC_PATRONID)
identifier = decoder.decode(db_session, token)
assert isinstance(identifier, DelegatedPatronIdentifier)
assert identifier.library == sct_test_library
assert identifier.patron_identifier == GENERIC_PATRONID
assert identifier.delegated_identifier.startswith('urn:uuid:')
# Do the lookup again and verify we get the same DelegatedPatronIdentifier.
identifier2 = decoder.decode(db_session, token)
assert identifier2 == identifier
def test_decode_two_part(self, db_session, encoder, decoder, sct_test_library):
"""
GIVEN: A username and password derived from a short client token produced by ShortClientTokenEncoder.encode,
and an instance of ShortClientTokenDecoder, where the username is the pipe delimited, left-most portion
of the token, containing '<LIBRARY_SHORT_NAME>|<EXPIRY>|<PATRON_ID>' and the password is the signature
portion of the token.
WHEN: The username and password are passed to the .decode_two_part() method of the ShortClientTokenDecoder
THEN: An instance of DelegatedPatronIdentifier is returned
"""
token = encoder.encode(SCT_TESTLIB_SHORT_NAME, SCT_TESTLIB_SECRET, GENERIC_PATRONID)
(username, password) = token.rsplit('|', 1)
identifier = decoder.decode_two_part(db_session, username, password)
assert isinstance(identifier, DelegatedPatronIdentifier)
assert identifier.library == sct_test_library
assert identifier.patron_identifier == GENERIC_PATRONID
assert identifier.delegated_identifier.startswith('urn:uuid:')
# Do the lookup again and verify we get the same DelegatedPatronIdentifier.
identifier2 = decoder.decode(db_session, token)
assert identifier2 == identifier
def test__split_token_bad_parameter(self, db_session, decoder, sct_test_library):
"""
GIVEN: A corrupt or missing short client token string and an instance of ShortClientTokenDecoder
WHEN: The string is passed to the ._split_token() method of the ShortClientTokenDecoder instance
THEN: An appropriate exception should be raised
"""
# A token has to contain at least two pipe characters.
with pytest.raises(ValueError) as exc:
decoder._split_token(db_session, "foo|")
assert "Invalid client token" in str(exc.value)
# A library with the short name obtained from the token must exist
nonexistent_library = "NONEXISTENT_LIBRARY"
with pytest.raises(ValueError) as exc:
decoder._split_token(db_session, f"{nonexistent_library}|12345|patron")
assert f'I don\'t know how to handle tokens from library "{nonexistent_library}"' in str(exc.value)
# The expiration time must be numeric.
with pytest.raises(ValueError) as exc:
decoder._split_token(db_session, f"{sct_test_library.short_name}|a time|patron")
assert 'Expiration time "a time" is not numeric' in str(exc.value)
@pytest.mark.skip(reason="TODO")
def test_decode_two_part_bad_parameters(self):
"""
GIVEN: A short client token with a signature that cannot be decoded by any delegate or by
ShortClientTokenTool.adobe_base64_decode().
WHEN: ShortClientTokenDecoder.decode_two_part() is called with that signature
THEN: An exception should be raised
"""
@pytest.mark.skip(reason="TODO")
def test__decode(self):
"""
GIVEN: A valid short client token / signature and an instance of ShortClientTokenDecoder
WHEN: The ._decode() method is called on that token and signature
THEN: A DelegatedPatronIdentifier instance should be returned
"""
def test__decode_bad_parameters(self, db_session, decoder, sct_test_library):
"""
GIVEN: A corrupt or missing token string and an instance of ShortClientTokenDecoder
WHEN: That string is passed to the ._decode() method of the ShortClientTokenDecoder instance
THEN: An appropriate exception should be raised
"""
# The patron identifier must not be blank.
with pytest.raises(ValueError) as exc:
decoder._decode(db_session, f"{sct_test_library.short_name}|1234|", "signature")
assert f'Token {sct_test_library.short_name}|1234| has empty patron identifier' in str(exc.value)
# The token must not have expired.
with pytest.raises(ValueError) as exc:
decoder._decode(db_session, f"{sct_test_library.short_name}|1234|patron", "signature")
assert f'Token {sct_test_library.short_name}|1234|patron expired at 2017-01-01 20:34:00' in str(exc.value)
# (Even though the expiration number here is much higher, this token is also expired, because
# the expiration date calculation for an old-style token starts at a different epoch and treats
# the expiration number as seconds rather than minutes.)
with pytest.raises(ValueError) as exc:
decoder._decode(db_session, f"{sct_test_library.short_name}|1500000000|patron", "signature")
assert f'Token {sct_test_library.short_name}|1500000000|patron expired at 2017-07-14 02:40:00' in str(exc.value)
# Finally, the signature must be valid.
with pytest.raises(ValueError) as exc:
decoder._decode(db_session, f"{sct_test_library.short_name}|99999999999|patron", "signature")
assert 'Invalid signature for' in str(exc.value)
def test_decode_bad_parameter(self, db_session, decoder):
"""
GIVEN: A missing or corrupted token and an instance of ShortClientTokenDecoder
WHEN: The token is passed to the .decoder() method of the ShortClientTokenDecoder instance
THEN: An appropriate exception should be raised
"""
with pytest.raises(ValueError) as exc:
decoder.decode(db_session, "")
assert 'Cannot decode an empty token.' in str(exc.value)
with pytest.raises(ValueError) as exc:
decoder.decode(db_session, "no pipes")
assert 'Supposed client token "no pipes" does not contain a pipe.' in str(exc.value)
# The library must be a known one.
with pytest.raises(ValueError) as exc:
decoder._decode(db_session, "unknown|1234|patron", "signature")
assert 'I don\'t know how to handle tokens from library "UNKNOWN"' in str(exc.value)
| 48.188088
| 120
| 0.674668
|
e94f73239f13af66f4ed628974b1683cbaac2c1c
| 13,006
|
py
|
Python
|
tests/components/doorbird/test_config_flow.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 6
|
2017-08-02T19:26:39.000Z
|
2020-03-14T22:47:41.000Z
|
tests/components/doorbird/test_config_flow.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 58
|
2020-08-03T07:33:02.000Z
|
2022-03-31T06:02:05.000Z
|
tests/components/doorbird/test_config_flow.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 14
|
2018-08-19T16:28:26.000Z
|
2021-09-02T18:26:53.000Z
|
"""Test the DoorBird config flow."""
from unittest.mock import MagicMock, patch
import urllib
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.doorbird import CONF_CUSTOM_URL, CONF_TOKEN
from homeassistant.components.doorbird.const import CONF_EVENTS, DOMAIN
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_USERNAME
from tests.common import MockConfigEntry, init_recorder_component
VALID_CONFIG = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "friend",
CONF_PASSWORD: "password",
CONF_NAME: "mydoorbird",
}
def _get_mock_doorbirdapi_return_values(ready=None, info=None):
doorbirdapi_mock = MagicMock()
type(doorbirdapi_mock).ready = MagicMock(return_value=ready)
type(doorbirdapi_mock).info = MagicMock(return_value=info)
return doorbirdapi_mock
def _get_mock_doorbirdapi_side_effects(ready=None, info=None):
doorbirdapi_mock = MagicMock()
type(doorbirdapi_mock).ready = MagicMock(side_effect=ready)
type(doorbirdapi_mock).info = MagicMock(side_effect=info)
return doorbirdapi_mock
async def test_user_form(hass):
"""Test we get the user form."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {}
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "macaddr"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "1.2.3.4"
assert result2["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_import(hass):
"""Test we get the form with import source."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
import_config = VALID_CONFIG.copy()
import_config[CONF_EVENTS] = ["event1", "event2", "event3"]
import_config[CONF_TOKEN] = "imported_token"
import_config[
CONF_CUSTOM_URL
] = "http://legacy.custom.url/should/only/come/in/from/yaml"
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "macaddr"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch("homeassistant.components.logbook.async_setup", return_value=True), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=import_config,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "1.2.3.4"
assert result["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
"events": ["event1", "event2", "event3"],
"token": "imported_token",
# This will go away once we convert to cloud hooks
"hass_url_override": "http://legacy.custom.url/should/only/come/in/from/yaml",
}
# It is not possible to import options at this time
# so they end up in the config entry data and are
# used a fallback when they are not in options
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_import_with_zeroconf_already_discovered(hass):
"""Test we get the form with import source."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
# Running the zeroconf init will make the unique id
# in progress
zero_conf = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "1CCAE3DOORBIRD"},
"name": "Doorstation - abc123._axis-video._tcp.local.",
"host": "192.168.1.5",
},
)
assert zero_conf["type"] == data_entry_flow.RESULT_TYPE_FORM
assert zero_conf["step_id"] == "user"
assert zero_conf["errors"] == {}
import_config = VALID_CONFIG.copy()
import_config[CONF_EVENTS] = ["event1", "event2", "event3"]
import_config[CONF_TOKEN] = "imported_token"
import_config[
CONF_CUSTOM_URL
] = "http://legacy.custom.url/should/only/come/in/from/yaml"
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "1CCAE3DOORBIRD"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch("homeassistant.components.logbook.async_setup", return_value=True), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=import_config,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "1.2.3.4"
assert result["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
"events": ["event1", "event2", "event3"],
"token": "imported_token",
# This will go away once we convert to cloud hooks
"hass_url_override": "http://legacy.custom.url/should/only/come/in/from/yaml",
}
# It is not possible to import options at this time
# so they end up in the config entry data and are
# used a fallback when they are not in options
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_zeroconf_wrong_oui(hass):
"""Test we abort when we get the wrong OUI via zeroconf."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "notdoorbirdoui"},
"host": "192.168.1.8",
"name": "Doorstation - abc123._axis-video._tcp.local.",
},
)
assert result["type"] == "abort"
assert result["reason"] == "not_doorbird_device"
async def test_form_zeroconf_link_local_ignored(hass):
"""Test we abort when we get a link local address via zeroconf."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "1CCAE3DOORBIRD"},
"host": "169.254.103.61",
"name": "Doorstation - abc123._axis-video._tcp.local.",
},
)
assert result["type"] == "abort"
assert result["reason"] == "link_local_address"
async def test_form_zeroconf_correct_oui(hass):
"""Test we can setup from zeroconf with the correct OUI source."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"properties": {"macaddress": "1CCAE3DOORBIRD"},
"name": "Doorstation - abc123._axis-video._tcp.local.",
"host": "192.168.1.5",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {}
doorbirdapi = _get_mock_doorbirdapi_return_values(
ready=[True], info={"WIFI_MAC_ADDR": "macaddr"}
)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
), patch("homeassistant.components.logbook.async_setup", return_value=True), patch(
"homeassistant.components.doorbird.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.doorbird.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], VALID_CONFIG
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "1.2.3.4"
assert result2["data"] == {
"host": "1.2.3.4",
"name": "mydoorbird",
"password": "password",
"username": "friend",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_cannot_connect(hass):
"""Test we handle cannot connect error."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
doorbirdapi = _get_mock_doorbirdapi_side_effects(ready=OSError)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_user_invalid_auth(hass):
"""Test we handle cannot invalid auth error."""
await hass.async_add_executor_job(
init_recorder_component, hass
) # force in memory db
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_urllib_error = urllib.error.HTTPError(
"http://xyz.tld", 401, "login failed", {}, None
)
doorbirdapi = _get_mock_doorbirdapi_side_effects(ready=mock_urllib_error)
with patch(
"homeassistant.components.doorbird.config_flow.DoorBird",
return_value=doorbirdapi,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
VALID_CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_options_flow(hass):
"""Test config flow options."""
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=VALID_CONFIG,
options={CONF_EVENTS: ["event1", "event2", "event3"]},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.doorbird.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_EVENTS: "eventa, eventc, eventq"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_EVENTS: ["eventa", "eventc", "eventq"]}
| 35.438692
| 87
| 0.664924
|
797c93a728304d926ba6e3a81a8ab8956466548f
| 1,551
|
py
|
Python
|
tests/test_fast_bernoulli.py
|
altosaar/deep-exponential-families-gluon
|
80d69b54081f622c0012bb181aa6d8ab9a740f15
|
[
"MIT"
] | 19
|
2017-10-17T01:04:23.000Z
|
2021-11-14T20:50:56.000Z
|
tests/test_fast_bernoulli.py
|
afcarl/deep-exponential-families-gluon
|
80d69b54081f622c0012bb181aa6d8ab9a740f15
|
[
"MIT"
] | null | null | null |
tests/test_fast_bernoulli.py
|
afcarl/deep-exponential-families-gluon
|
80d69b54081f622c0012bb181aa6d8ab9a740f15
|
[
"MIT"
] | 1
|
2018-07-05T21:17:34.000Z
|
2018-07-05T21:17:34.000Z
|
import numpy as np
import time
import distributions
import scipy.stats
import scipy.special
import mxnet as mx
from mxnet import nd
mx.random.seed(13343)
np.random.seed(2324)
def test_bernoulli_sampling():
n_samples = 10000
K = 10 # num factors
C = 2 # num classes
# latent variable is of size [n_samples, batch_size, latent_size]
positive_latent = nd.ones((1, 1, K)) * 0.01
weight = nd.ones((K, C)) * 0.1
bias = nd.ones(C) * 0.01
p = distributions.FastBernoulli(
positive_latent=positive_latent, weight=weight, bias=bias)
samples = p.sample(n_samples)
print(samples.shape)
mean = nd.mean(samples, 0).asnumpy()
print('sampling mean, mean', mean, p.mean.asnumpy())
np.testing.assert_allclose(mean, p.mean.asnumpy(), rtol=1e-1)
def test_bernoulli_log_prob():
K = 10 # num factors
C = 100 # num classes
positive_latent = nd.ones((1, 1, K)) * nd.array(np.random.rand(K))
weight = nd.ones((K, C)) * nd.array(np.random.rand(K, C))
bias = nd.ones(C) * 0.01
data = np.random.binomial(n=1, p=0.1, size=C)
assert np.sum(data) > 0
nonzero_idx = np.nonzero(data)[0]
p = distributions.FastBernoulli(
positive_latent=positive_latent, weight=weight, bias=bias)
np_log_prob_sum = scipy.stats.bernoulli.logpmf(
np.array(data), p=p.mean.asnumpy()).sum()
mx_log_prob_sum = p.log_prob_sum(
nonzero_index=nd.array(nonzero_idx)).asnumpy()
print('mx log prob sum, np log prob sum', mx_log_prob_sum, np_log_prob_sum)
np.testing.assert_allclose(mx_log_prob_sum, np_log_prob_sum, rtol=1e-3)
| 32.3125
| 77
| 0.702772
|
bbeb677e5f2be509ccaaf2043fc03e2b85b88762
| 9,579
|
py
|
Python
|
docs/source/conf.py
|
spidezad/google_screener_data_extract
|
8efe14e73918808182d8745ef38c38f1ac686f6e
|
[
"BSD-3-Clause"
] | 28
|
2015-09-27T21:11:23.000Z
|
2021-05-17T06:33:20.000Z
|
docs/source/conf.py
|
spidezad/google_screener_data_extract
|
8efe14e73918808182d8745ef38c38f1ac686f6e
|
[
"BSD-3-Clause"
] | 1
|
2015-10-18T23:11:03.000Z
|
2018-03-27T05:58:10.000Z
|
docs/source/conf.py
|
spidezad/google_screener_data_extract
|
8efe14e73918808182d8745ef38c38f1ac686f6e
|
[
"BSD-3-Clause"
] | 24
|
2016-01-14T09:53:48.000Z
|
2018-05-17T02:00:56.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# google_screener_data_extract documentation build configuration file, created by
# cookiecutter pipproject
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'google_screener_data_extract'
copyright = '2016, Tan Kok Hua'
author = 'Tan Kok Hua'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'google_screener_data_extract v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'google_screener_data_extractdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'google_screener_data_extract.tex', 'google_screener_data_extract Documentation',
'Tan Kok Hua', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'google_screener_data_extract', 'google_screener_data_extract Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'google_screener_data_extract', 'google_screener_data_extract Documentation',
author, 'google_screener_data_extract', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 33.031034
| 98
| 0.723875
|
21250894db6b98dd1265573165d5299f694f5633
| 2,123
|
py
|
Python
|
clinica/pipelines/t1_freesurfer_longitudinal/t1_freesurfer_template_cli.py
|
Chengwei94/clinica
|
0e9d837baf9064a626198422b2a70fe120f227f0
|
[
"MIT"
] | null | null | null |
clinica/pipelines/t1_freesurfer_longitudinal/t1_freesurfer_template_cli.py
|
Chengwei94/clinica
|
0e9d837baf9064a626198422b2a70fe120f227f0
|
[
"MIT"
] | null | null | null |
clinica/pipelines/t1_freesurfer_longitudinal/t1_freesurfer_template_cli.py
|
Chengwei94/clinica
|
0e9d837baf9064a626198422b2a70fe120f227f0
|
[
"MIT"
] | null | null | null |
# coding: utf8
import clinica.engine as ce
class T1FreeSurferTemplateCLI(ce.CmdParser):
def define_name(self):
"""Define the sub-command name to run this pipeline."""
self._name = "t1-freesurfer-template"
def define_description(self):
"""Define a description of this pipeline."""
self._description = (
"Creation of unbiased template with FreeSurfer:\n"
"https://aramislab.paris.inria.fr/clinica/docs/public/latest/Pipelines/T1_FreeSurfer_Longitudinal/"
)
def define_options(self):
from clinica.engine.cmdparser import PIPELINE_CATEGORIES
# Clinica compulsory arguments (e.g. BIDS, CAPS, group_label)
clinica_comp = self._args.add_argument_group(
PIPELINE_CATEGORIES["CLINICA_COMPULSORY"]
)
clinica_comp.add_argument("caps_directory", help="Path to the CAPS directory.")
# Clinica standard arguments (e.g. --n_procs)
self.add_clinica_standard_arguments(add_overwrite_flag=True)
def run_command(self, args):
"""Run the pipeline with defined args."""
from networkx import Graph
from clinica.utils.ux import print_crash_files_and_exit, print_end_pipeline
from .t1_freesurfer_template_pipeline import T1FreeSurferTemplate
pipeline = T1FreeSurferTemplate(
caps_directory=self.absolute_path(args.caps_directory),
tsv_file=self.absolute_path(args.subjects_sessions_tsv),
base_dir=self.absolute_path(args.working_directory),
name="t1-freesurfer-template",
overwrite_caps=args.overwrite_outputs,
)
if args.n_procs:
exec_pipeline = pipeline.run(
plugin="MultiProc", plugin_args={"n_procs": args.n_procs}
)
else:
exec_pipeline = pipeline.run()
if isinstance(exec_pipeline, Graph):
print_end_pipeline(
self.name, pipeline.base_dir, pipeline.base_dir_was_specified
)
else:
print_crash_files_and_exit(args.logname, pipeline.base_dir)
| 36.603448
| 111
| 0.663212
|
76841d0854cc0bf4fac1998414c9cecf0d563966
| 70
|
py
|
Python
|
src/rpi_ati_net_ft/__init__.py
|
rpiRobotics/rpi_ati_net_ft
|
fdf202d5c2d66dff4ef92f7ed6eb6a9f3121d911
|
[
"BSD-3-Clause"
] | 4
|
2018-05-25T13:07:06.000Z
|
2022-01-05T21:26:55.000Z
|
src/rpi_ati_net_ft/__init__.py
|
rpiRobotics/rpi_ati_net_ft
|
fdf202d5c2d66dff4ef92f7ed6eb6a9f3121d911
|
[
"BSD-3-Clause"
] | 1
|
2018-01-08T23:44:17.000Z
|
2018-01-08T23:44:17.000Z
|
src/rpi_ati_net_ft/__init__.py
|
rpiRobotics/rpi_ati_net_ft
|
fdf202d5c2d66dff4ef92f7ed6eb6a9f3121d911
|
[
"BSD-3-Clause"
] | 1
|
2020-11-20T07:09:34.000Z
|
2020-11-20T07:09:34.000Z
|
from __future__ import absolute_import
from .rpi_ati_net_ft import *
| 17.5
| 38
| 0.842857
|
aa56ab57123d686c1cb0904c2f788f7abf049bdb
| 5,197
|
py
|
Python
|
imagetagger/imagetagger/images/models.py
|
plieningerweb/imagetagger
|
89c49ef557fa8b4f1f3fd9f2cd58988ce88afee2
|
[
"MIT"
] | 1
|
2019-05-06T11:14:39.000Z
|
2019-05-06T11:14:39.000Z
|
imagetagger/imagetagger/images/models.py
|
plieningerweb/imagetagger
|
89c49ef557fa8b4f1f3fd9f2cd58988ce88afee2
|
[
"MIT"
] | null | null | null |
imagetagger/imagetagger/images/models.py
|
plieningerweb/imagetagger
|
89c49ef557fa8b4f1f3fd9f2cd58988ce88afee2
|
[
"MIT"
] | null | null | null |
from typing import Set
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
import os
from imagetagger.users.models import Team
class Image(models.Model):
image_set = models.ForeignKey(
'ImageSet', on_delete=models.CASCADE, related_name='images')
name = models.CharField(max_length=100)
filename = models.CharField(max_length=100, unique=True)
time = models.DateTimeField(auto_now_add=True)
checksum = models.BinaryField()
width = models.IntegerField(default=800)
height = models.IntegerField(default=600)
def path(self):
return os.path.join(self.image_set.root_path(), self.filename)
def relative_path(self):
return os.path.join(self.image_set.path, self.filename)
def __str__(self):
return u'Image: {0}'.format(self.name)
class ImageSet(models.Model):
class Meta:
unique_together = [
'name',
'team',
]
PRIORITIES = (
(1, 'High'),
(0, 'Normal'),
(-1, 'Low'),
)
path = models.CharField(max_length=100, unique=True, null=True)
name = models.CharField(max_length=100)
location = models.CharField(max_length=100, null=True, blank=True)
description = models.TextField(max_length=1000, null=True, blank=True)
time = models.DateTimeField(auto_now_add=True)
team = models.ForeignKey(
Team,
on_delete=models.SET_NULL,
related_name='image_sets',
null=True,
)
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
default=None,
on_delete=models.SET_NULL,
null=True,
blank=True)
public = models.BooleanField(default=False)
public_collaboration = models.BooleanField(default=False)
image_lock = models.BooleanField(default=False)
priority = models.IntegerField(choices=PRIORITIES, default=0)
main_annotation_type = models.ForeignKey(
to='annotations.AnnotationType',
on_delete=models.SET_NULL,
null=True,
blank=True,
default=None
)
pinned_by = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='pinned_sets')
def root_path(self):
return os.path.join(settings.IMAGE_PATH, self.path)
@property
def image_count(self):
if hasattr(self, 'image_count_agg'):
return self.image_count_agg
return self.images.count()
def get_perms(self, user: get_user_model()) -> Set[str]:
"""Get all permissions of the user."""
perms = set()
if self.team is not None:
if self.team.is_admin(user):
perms.update({
'verify',
'annotate',
'create_export',
'delete_annotation',
'delete_export',
'delete_set',
'delete_images',
'edit_annotation',
'edit_set',
'read',
})
if self.team.is_member(user):
perms.update({
'verify',
'annotate',
'create_export',
'delete_annotation',
'delete_export',
'edit_annotation',
'edit_set',
'read',
})
if user == self.creator:
perms.update({
'verify',
'annotate',
'create_export',
'delete_annotation',
'delete_export',
'delete_set',
'delete_images',
'edit_annotation',
'edit_set',
'read',
})
if self.public:
perms.update({
'read',
'create_export',
})
if self.public_collaboration:
perms.update({
'verify',
'annotate',
'delete_annotation',
'edit_annotation',
})
return perms
def has_perm(self, permission: str, user: get_user_model()) -> bool:
"""Check whether user has specified permission."""
return permission in self.get_perms(user)
def __str__(self):
return u'Imageset: {0}'.format(self.name)
@property
def prio_symbol(self):
if self.priority is -1:
return '<span class="glyphicon glyphicon-download" data-toggle="tooltip" data-placement="right" title="Low labeling priority"></span>'
elif self.priority is 0:
return ''
elif self.priority is 1:
return '<span class="glyphicon glyphicon-exclamation-sign" data-toggle="tooltip" data-placement="right" title="High labeling priority"></span>'
class SetTag(models.Model):
name = models.CharField(max_length=100, unique=True)
imagesets = models.ManyToManyField(ImageSet, related_name='set_tags')
| 33.314103
| 155
| 0.548393
|
6661e92eb1d4b470603b9d5fd7b0f819450f6492
| 444
|
py
|
Python
|
save_restore_model/variable/save_model.py
|
zlpmichelle/crackingtensorflow
|
66c3517b60c3793ef06f904e5d58e4d044628182
|
[
"Apache-2.0"
] | 3
|
2017-10-19T23:41:26.000Z
|
2019-10-22T08:59:35.000Z
|
save_restore_model/variable/save_model.py
|
zlpmichelle/crackingtensorflow
|
66c3517b60c3793ef06f904e5d58e4d044628182
|
[
"Apache-2.0"
] | null | null | null |
save_restore_model/variable/save_model.py
|
zlpmichelle/crackingtensorflow
|
66c3517b60c3793ef06f904e5d58e4d044628182
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
比如,我们需要保存的模型是参数v1和v2,那么只需要使用下列的保存代码save_model.py。
'''
import tensorflow as tf
v1 = tf.Variable(1.1, name="v1")
v2 = tf.Variable(1.2, name="v2")
init = tf.initialize_all_variables()
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
print v2.eval(sess)
save_path="/Users/lipingzhang/Downloads/model.ckpt"
saver.save(sess,save_path)
print "Model stored...."
| 23.368421
| 55
| 0.682432
|
e06047655833312df2eb44152e844af391358d61
| 3,926
|
py
|
Python
|
flo/commands/run.py
|
deanmalmgren/flo
|
40ba3ce29a03cecb74bf809e40061e5e5c9d6a6b
|
[
"MIT"
] | 15
|
2015-03-26T07:45:24.000Z
|
2019-09-09T13:07:29.000Z
|
flo/commands/run.py
|
deanmalmgren/flo
|
40ba3ce29a03cecb74bf809e40061e5e5c9d6a6b
|
[
"MIT"
] | 3
|
2015-09-16T09:33:30.000Z
|
2016-08-24T06:39:56.000Z
|
flo/commands/run.py
|
deanmalmgren/flo
|
40ba3ce29a03cecb74bf809e40061e5e5c9d6a6b
|
[
"MIT"
] | 4
|
2016-07-07T18:32:56.000Z
|
2020-06-19T07:24:11.000Z
|
import os
import sys
from ..exceptions import ShellError, CommandLineException
from ..notify import notify
from .base import BaseCommand
class Command(BaseCommand):
help_text = "Run the task workflow."
def manipulate_task_graph(self, task_id, start_at, skip, only):
# --only is a synonym for setting start_at and task_id to be
# the same thing. enforce logic here as it is not possible (?)
# to do so with argparse directly
# http://stackoverflow.com/q/14985474/564709
if only is not None:
if start_at is not None or task_id is not None:
self.option_parser.error((
"--only can not be used with --start-at or specifying a "
"TASK_ID"
))
else:
start_at = task_id = only
# restrict task graph as necessary for the purposes of running
# the workflow
if task_id is not None or start_at is not None:
self.task_graph = self.task_graph.subgraph_needed_for(start_at,
task_id)
# if we are skipping a task, remove it from the task graph to
# take it out of execution flow and avoid updating its status
# in .flo/state.csv
if skip:
self.task_graph.remove_node_substituting_dependencies(skip)
def inner_execute(self, task_id, start_at, skip, only, force,
mock_run=False):
self.manipulate_task_graph(task_id, start_at, skip, only)
# when the workflow is --force'd, this runs all
# tasks. Otherwise, only runs tasks that are out of sync.
if force:
self.task_graph.run_all(mock_run=mock_run)
else:
self.task_graph.run_all_out_of_sync(mock_run=mock_run)
# mark the self.task_graph as completing successfully to send the
# correct email message
self.task_graph.successful = True
def execute(self, task_id=None, start_at=None, skip=None, only=None,
force=False, notify_emails=None, **kwargs):
super(Command, self).execute(**kwargs)
try:
self.inner_execute(task_id, start_at, skip, only, force)
except CommandLineException:
raise
finally:
if notify_emails:
notify(*notify_emails)
def add_common_run_options(self):
# these options are used by both the `run` and `status` command
self.option_parser.add_argument(
'-f', '--force',
action="store_true",
help="Rerun entire workflow, regardless of task state.",
)
self.add_task_id_option('Specify a particular task to run.')
self.add_task_id_argument(
'--start-at',
type=str,
metavar='TASK_ID',
choices=self.available_task_ids,
help=(
'Specify a task to start from (run everything downstream, '
'ignore everything upstream).'
),
)
self.add_task_id_argument(
'--skip',
type=str,
metavar='TASK_ID',
choices=self.available_task_ids,
help='Skip the specified task and ignore whether it is in sync.',
)
self.add_task_id_argument(
'--only',
type=str,
metavar='TASK_ID',
choices=self.available_task_ids,
help='Only run the specified task.',
)
def add_command_line_options(self):
super(Command, self).add_command_line_options()
self.add_common_run_options()
self.option_parser.add_argument(
'--notify',
type=str,
metavar='EMAIL',
dest="notify_emails",
nargs=1,
help='Specify an email address to notify on completion.',
)
| 36.018349
| 77
| 0.582527
|
2649f77dede4b0aa6ee3d9bd86310fc408a6cbe6
| 23,917
|
py
|
Python
|
alpaca_backtrader_api/alpacastore.py
|
axey733/alpaca-backtrader-api
|
9e7ff0f61c92832b93a0a83ca97505c3637c3272
|
[
"Apache-2.0"
] | null | null | null |
alpaca_backtrader_api/alpacastore.py
|
axey733/alpaca-backtrader-api
|
9e7ff0f61c92832b93a0a83ca97505c3637c3272
|
[
"Apache-2.0"
] | null | null | null |
alpaca_backtrader_api/alpacastore.py
|
axey733/alpaca-backtrader-api
|
9e7ff0f61c92832b93a0a83ca97505c3637c3272
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import collections
from datetime import datetime, timedelta
from dateutil.parser import parse as date_parse
import time as _time
import threading
import asyncio
import alpaca_trade_api as tradeapi
import pytz
import requests
import pandas as pd
import backtrader as bt
from alpaca_trade_api.entity import Aggs
from alpaca_trade_api.polygon.entity import NY
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import queue, with_metaclass
# Extend the exceptions to support extra cases
class AlpacaError(Exception):
""" Generic error class, catches Alpaca response errors
"""
def __init__(self, error_response):
self.error_response = error_response
msg = "Alpaca API returned error code %s (%s) " % \
(error_response['code'], error_response['message'])
super(AlpacaError, self).__init__(msg)
class AlpacaRequestError(AlpacaError):
def __init__(self):
er = dict(code=599, message='Request Error', description='')
super(self.__class__, self).__init__(er)
class AlpacaStreamError(AlpacaError):
def __init__(self, content=''):
er = dict(code=598, message='Failed Streaming', description=content)
super(self.__class__, self).__init__(er)
class AlpacaTimeFrameError(AlpacaError):
def __init__(self, content):
er = dict(code=597, message='Not supported TimeFrame', description='')
super(self.__class__, self).__init__(er)
class AlpacaNetworkError(AlpacaError):
def __init__(self):
er = dict(code=596, message='Network Error', description='')
super(self.__class__, self).__init__(er)
class API(tradeapi.REST):
def _request(self,
method,
path,
data=None,
base_url=None,
api_version=None):
# Added the try block
try:
return super(API, self)._request(
method, path, data, base_url, api_version)
except requests.RequestException as e:
resp = AlpacaRequestError().error_response
resp['description'] = str(e)
return resp
except tradeapi.rest.APIError as e:
# changed from raise to return
return e._error
except Exception as e:
resp = AlpacaNetworkError().error_response
resp['description'] = str(e)
return resp
return None
class Streamer:
conn = None
def __init__(
self,
q,
api_key='',
api_secret='',
instrument='',
method='',
base_url='',
data_stream='',
*args,
**kwargs):
try:
# make sure we have an event loop, if not create a new one
asyncio.get_event_loop()
except RuntimeError:
asyncio.set_event_loop(asyncio.new_event_loop())
self.data_stream = data_stream
self.conn = tradeapi.StreamConn(api_key,
api_secret,
base_url,
data_stream=self.data_stream)
self.instrument = instrument
self.method = method
self.q = q
self.conn.on('authenticated')(self.on_auth)
self.conn.on(r'Q.*')(self.on_quotes)
self.conn.on(r'account_updates')(self.on_account)
self.conn.on(r'trade_updates')(self.on_trade)
def run(self):
channels = []
if not self.method:
channels = ['trade_updates'] # 'account_updates'
else:
if self.data_stream == 'polygon':
maps = {"quote": "Q."}
elif self.data_stream == 'alpacadatav1':
maps = {"quote": "alpacadatav1/Q."}
channels = [maps[self.method] + self.instrument]
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.conn.run(channels)
# Setup event handlers
async def on_auth(self, conn, stream, msg):
pass
async def on_listen(self, conn, stream, msg):
pass
async def on_quotes(self, conn, subject, msg):
msg._raw['time'] = msg.timestamp.to_pydatetime().timestamp()
self.q.put(msg._raw)
async def on_agg_sec(self, conn, subject, msg):
self.q.put(msg)
async def on_agg_min(self, conn, subject, msg):
self.q.put(msg)
async def on_account(self, conn, stream, msg):
self.q.put(msg)
async def on_trade(self, conn, stream, msg):
self.q.put(msg)
class MetaSingleton(MetaParams):
'''Metaclass to make a metaclassed class a singleton'''
def __init__(cls, name, bases, dct):
super(MetaSingleton, cls).__init__(name, bases, dct)
cls._singleton = None
def __call__(cls, *args, **kwargs):
if cls._singleton is None:
cls._singleton = (
super(MetaSingleton, cls).__call__(*args, **kwargs))
return cls._singleton
class AlpacaStore(with_metaclass(MetaSingleton, object)):
'''Singleton class wrapping to control the connections to Alpaca.
Params:
- ``key_id`` (default:``None``): Alpaca API key id
- ``secret_key`` (default: ``None``): Alpaca API secret key
- ``paper`` (default: ``False``): use the paper trading environment
- ``account_tmout`` (default: ``10.0``): refresh period for account
value/cash refresh
'''
BrokerCls = None # broker class will autoregister
DataCls = None # data class will auto register
params = (
('key_id', ''),
('secret_key', ''),
('paper', False),
('usePolygon', False),
('account_tmout', 10.0), # account balance refresh timeout
('api_version', None)
)
_DTEPOCH = datetime(1970, 1, 1)
_ENVPRACTICE = 'paper'
_ENVLIVE = 'live'
_ENV_PRACTICE_URL = 'https://paper-api.alpaca.markets'
_ENV_LIVE_URL = ''
@classmethod
def getdata(cls, *args, **kwargs):
'''Returns ``DataCls`` with args, kwargs'''
return cls.DataCls(*args, **kwargs)
@classmethod
def getbroker(cls, *args, **kwargs):
'''Returns broker with *args, **kwargs from registered ``BrokerCls``'''
return cls.BrokerCls(*args, **kwargs)
def __init__(self):
super(AlpacaStore, self).__init__()
self.notifs = collections.deque() # store notifications for cerebro
self._env = None # reference to cerebro for general notifications
self.broker = None # broker instance
self.datas = list() # datas that have registered over start
self._orders = collections.OrderedDict() # map order.ref to oid
self._ordersrev = collections.OrderedDict() # map oid to order.ref
self._transpend = collections.defaultdict(collections.deque)
if self.p.paper:
self._oenv = self._ENVPRACTICE
self.p.base_url = self._ENV_PRACTICE_URL
else:
self._oenv = self._ENVLIVE
self.p.base_url = self._ENV_LIVE_URL
self.oapi = API(self.p.key_id,
self.p.secret_key,
self.p.base_url,
self.p.api_version)
self._cash = 0.0
self._value = 0.0
self._evt_acct = threading.Event()
def start(self, data=None, broker=None):
# Datas require some processing to kickstart data reception
if data is None and broker is None:
self.cash = None
return
if data is not None:
self._env = data._env
# For datas simulate a queue with None to kickstart co
self.datas.append(data)
if self.broker is not None:
self.broker.data_started(data)
elif broker is not None:
self.broker = broker
self.streaming_events()
self.broker_threads()
def stop(self):
# signal end of thread
if self.broker is not None:
self.q_ordercreate.put(None)
self.q_orderclose.put(None)
self.q_account.put(None)
def put_notification(self, msg, *args, **kwargs):
self.notifs.append((msg, args, kwargs))
def get_notifications(self):
'''Return the pending "store" notifications'''
self.notifs.append(None) # put a mark / threads could still append
return [x for x in iter(self.notifs.popleft, None)]
# Alpaca supported granularities
_GRANULARITIES = {
(bt.TimeFrame.Minutes, 1): '1Min',
(bt.TimeFrame.Minutes, 5): '5Min',
(bt.TimeFrame.Minutes, 15): '15Min',
(bt.TimeFrame.Minutes, 60): '1H',
(bt.TimeFrame.Days, 1): '1D',
}
def get_positions(self):
try:
positions = self.oapi.list_positions()
except (AlpacaError, AlpacaRequestError,):
return []
if positions:
if 'code' in positions[0]._raw:
return []
# poslist = positions.get('positions', [])
return positions
def get_granularity(self, timeframe, compression):
if timeframe == bt.TimeFrame.Minutes:
return "minute"
elif timeframe == bt.TimeFrame.Days:
return "day"
return None
def get_instrument(self, dataname):
try:
insts = self.oapi.get_asset(dataname)
except (AlpacaError, AlpacaRequestError,):
return None
return insts or None
def streaming_events(self, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_listener, kwargs=kwargs)
t.daemon = True
t.start()
t = threading.Thread(target=self._t_streaming_events, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_listener(self, q, tmout=None):
while True:
trans = q.get()
self._transaction(trans.order)
def _t_streaming_events(self, q, tmout=None):
if tmout is not None:
_time.sleep(tmout)
streamer = Streamer(q,
api_key=self.p.key_id,
api_secret=self.p.secret_key,
base_url=self.p.base_url,
data_stream='polygon' if self.p.usePolygon else
'alpacadatav1'
)
streamer.run()
def candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst):
"""
:param dataname: symbol name. e.g AAPL
:param dtbegin: datetime start
:param dtend: datetime end
:param timeframe: bt.TimeFrame
:param compression: distance between samples. e.g if 1 =>
get sample every day. if 3 => get sample every 3 days
:param candleFormat: (bidask, midpoint, trades)
:param includeFirst:
:return:
"""
kwargs = locals().copy()
kwargs.pop('self')
kwargs['q'] = q = queue.Queue()
t = threading.Thread(target=self._t_candles, kwargs=kwargs)
t.daemon = True
t.start()
return q
@staticmethod
def iso_date(date_str):
"""
this method will make sure that dates are formatted properly
as with isoformat
:param date_str:
:return: YYYY-MM-DD date formatted
"""
return date_parse(date_str).date().isoformat()
def _t_candles(self, dataname, dtbegin, dtend, timeframe, compression,
candleFormat, includeFirst, q):
granularity = self.get_granularity(timeframe, compression)
if granularity is None:
e = AlpacaTimeFrameError()
q.put(e.error_response)
return
dtkwargs = {'start': None, 'end': None}
if not dtend:
dtend = datetime.utcnow()
if not dtbegin:
days = 30 if 'd' in granularity else 3
delta = timedelta(days=days)
dtbegin = dtend - delta
dtkwargs['start'] = dtbegin
end_dt = None
dtkwargs['end'] = dtend
end_dt = dtend.isoformat()
cdl = pd.DataFrame()
prevdt = 0
while True:
try:
start_dt = None
if dtkwargs['start']:
start_dt = dtkwargs['start'].isoformat()
if self.p.usePolygon:
response = \
self.oapi.polygon.historic_agg_v2(
dataname,
compression,
granularity,
_from=self.iso_date(start_dt),
to=self.iso_date(end_dt))
else:
# response = self.oapi.get_aggs(dataname,
# compression,
# granularity,
# self.iso_date(start_dt),
# self.iso_date(end_dt))
# so get_aggs work nicely for days but not for minutes, and
# it is not a documented API. barset on the other hand does
# but we need to manipulate it to be able to work with it
# smoothly and return data the same way polygon does
response = self.oapi.get_barset(
dataname,
granularity,
start=start_dt,
end=end_dt)[dataname]._raw
for bar in response:
# Aggs are in milliseconds, we multiply by 1000 to
# change seconds to ms
bar['t'] *= 1000
response = Aggs({"results": response})
except AlpacaError as e:
print(str(e))
q.put(e.error_response)
q.put(None)
return
except Exception as e:
print(str(e))
q.put({'code': 'error'})
q.put(None)
return
# No result from the server, most likely error
if response.df.shape[0] == 0:
print(response)
q.put({'code': 'error'})
q.put(None)
return
temp = response.df
cdl.update(temp)
cdl = pd.concat([cdl, temp])
cdl = cdl[~cdl.index.duplicated()]
prevdt = dtkwargs['start']
dtkwargs['start'] = cdl.index[-1].to_pydatetime()
if prevdt == dtkwargs['start']: # end of the data
break
freq = str(compression) + ('D' if 'd' in granularity else 'T')
cdl = cdl.resample(freq).agg({'open': 'first',
'high': 'max',
'low': 'min',
'close': 'last',
'volume': 'sum'})
# don't use dt.replace. use localize
# (https://stackoverflow.com/a/1592837/2739124)
cdl = cdl.loc[
pytz.timezone(NY).localize(dtbegin):
pytz.timezone(NY).localize(dtend)
].dropna(subset=['high'])
records = cdl.reset_index().to_dict('records')
for r in records:
r['time'] = r['timestamp']
q.put(r)
q.put({}) # end of transmission
def streaming_prices(self, dataname, tmout=None):
q = queue.Queue()
kwargs = {'q': q, 'dataname': dataname, 'tmout': tmout}
t = threading.Thread(target=self._t_streaming_prices, kwargs=kwargs)
t.daemon = True
t.start()
return q
def _t_streaming_prices(self, dataname, q, tmout):
if tmout is not None:
_time.sleep(tmout)
streamer = Streamer(q,
api_key=self.p.key_id,
api_secret=self.p.secret_key,
instrument=dataname,
method='quote',
base_url=self.p.base_url,
data_stream='polygon' if self.p.usePolygon else
'alpacadatav1')
streamer.run()
def get_cash(self):
return self._cash
def get_value(self):
return self._value
_ORDEREXECS = {
bt.Order.Market: 'market',
bt.Order.Limit: 'limit',
bt.Order.Stop: 'stop',
bt.Order.StopLimit: 'stop_limit',
}
def broker_threads(self):
self.q_account = queue.Queue()
self.q_account.put(True) # force an immediate update
t = threading.Thread(target=self._t_account)
t.daemon = True
t.start()
self.q_ordercreate = queue.Queue()
t = threading.Thread(target=self._t_order_create)
t.daemon = True
t.start()
self.q_orderclose = queue.Queue()
t = threading.Thread(target=self._t_order_cancel)
t.daemon = True
t.start()
# Wait once for the values to be set
self._evt_acct.wait(self.p.account_tmout)
def _t_account(self):
while True:
try:
msg = self.q_account.get(timeout=self.p.account_tmout)
if msg is None:
break # end of thread
except queue.Empty: # tmout -> time to refresh
pass
try:
accinfo = self.oapi.get_account()
except Exception as e:
self.put_notification(e)
continue
if 'code' in accinfo._raw:
self.put_notification(accinfo.message)
continue
try:
self._cash = float(accinfo.cash)
self._value = float(accinfo.portfolio_value)
except KeyError:
pass
self._evt_acct.set()
def order_create(self, order, stopside=None, takeside=None, **kwargs):
okwargs = dict()
# different data feeds may set _name or _dataname so we cover both
okwargs['symbol'] = order.data._name if order.data._name else \
order.data._dataname
okwargs['qty'] = abs(int(order.created.size))
okwargs['side'] = 'buy' if order.isbuy() else 'sell'
okwargs['type'] = self._ORDEREXECS[order.exectype]
okwargs['time_in_force'] = "gtc"
if order.exectype != bt.Order.Market:
okwargs['limit_price'] = str(order.created.price)
if order.exectype in [bt.Order.StopLimit, bt.Order.Stop]:
okwargs['stop_price'] = order.created.pricelimit
# Not supported in the alpaca api
# if order.exectype == bt.Order.StopTrail:
# okwargs['trailingStop'] = order.trailamount
if stopside:
okwargs['stop_loss'] = {'stop_price': stopside.price}
if takeside:
okwargs['take_profit'] = {'limit_price': takeside.price}
if stopside or takeside:
okwargs['order_class'] = "bracket"
okwargs.update(**kwargs) # anything from the user
self.q_ordercreate.put((order.ref, okwargs,))
return order
def _t_order_create(self):
def _check_if_transaction_occurred(order_id):
# a transaction may have happened and was stored. if so let's
# process it
tpending = self._transpend[order_id]
tpending.append(None) # eom marker
while True:
trans = tpending.popleft()
if trans is None:
break
self._process_transaction(order_id, trans)
while True:
try:
if self.q_ordercreate.empty():
continue
msg = self.q_ordercreate.get()
if msg is None:
continue
oref, okwargs = msg
try:
o = self.oapi.submit_order(**okwargs)
except Exception as e:
self.put_notification(e)
self.broker._reject(oref)
continue
try:
oid = o.id
except Exception:
if 'code' in o._raw:
self.put_notification(o.message)
else:
self.put_notification(
"General error from the Alpaca server")
self.broker._reject(oref)
continue
if okwargs['type'] == 'market':
self.broker._accept(oref) # taken immediately
self._orders[oref] = oid
self._ordersrev[oid] = oref # maps ids to backtrader order
_check_if_transaction_occurred(oid)
if o.legs:
index = 1
for leg in o.legs:
self._orders[oref + index] = leg.id
self._ordersrev[leg.id] = oref + index
_check_if_transaction_occurred(leg.id)
self.broker._submit(oref) # inside it submits the legs too
if okwargs['type'] == 'market':
self.broker._accept(oref) # taken immediately
except Exception as e:
print(str(e))
def order_cancel(self, order):
self.q_orderclose.put(order.ref)
return order
def _t_order_cancel(self):
while True:
oref = self.q_orderclose.get()
if oref is None:
break
oid = self._orders.get(oref, None)
if oid is None:
continue # the order is no longer there
try:
self.oapi.cancel_order(oid)
except Exception as e:
self.put_notification(
"Order not cancelled: {}, {}".format(
oid, e))
continue
self.broker._cancel(oref)
_X_ORDER_CREATE = (
'new',
'accepted',
'pending_new',
'accepted_for_bidding',
)
def _transaction(self, trans):
# Invoked from Streaming Events. May actually receive an event for an
# oid which has not yet been returned after creating an order. Hence
# store if not yet seen, else forward to processer
oid = trans['id']
if not self._ordersrev.get(oid, False):
self._transpend[oid].append(trans)
self._process_transaction(oid, trans)
_X_ORDER_FILLED = ('partially_filled', 'filled', )
def _process_transaction(self, oid, trans):
try:
oref = self._ordersrev.pop(oid)
except KeyError:
return
ttype = trans['status']
if ttype in self._X_ORDER_FILLED:
size = float(trans['filled_qty'])
if trans['side'] == 'sell':
size = -size
price = float(trans['filled_avg_price'])
self.broker._fill(oref, size, price, ttype=ttype)
elif ttype in self._X_ORDER_CREATE:
self.broker._accept(oref)
self._ordersrev[oid] = oref
elif ttype == 'calculated':
return
elif ttype == 'expired':
self.broker._expire(oref)
else: # default action ... if nothing else
print("Process transaction - Order type: {}".format(ttype))
self.broker._reject(oref)
| 33.264256
| 79
| 0.53878
|
d0d780dcb52f90157dda753776d638447530e2fd
| 27,845
|
py
|
Python
|
test/unit/test_jinja.py
|
vogt4nick/dbt
|
1bd82d4914fd80fcc6fe17140e46554ad677eab0
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_jinja.py
|
vogt4nick/dbt
|
1bd82d4914fd80fcc6fe17140e46554ad677eab0
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_jinja.py
|
vogt4nick/dbt
|
1bd82d4914fd80fcc6fe17140e46554ad677eab0
|
[
"Apache-2.0"
] | null | null | null |
from contextlib import contextmanager
import pytest
import unittest
import yaml
from dbt.clients.jinja import get_rendered
from dbt.clients.jinja import get_template
from dbt.clients.jinja import extract_toplevel_blocks
from dbt.exceptions import CompilationException, JinjaRenderingException
@contextmanager
def returns(value):
yield value
@contextmanager
def raises(value):
with pytest.raises(value) as exc:
yield exc
def expected_id(arg):
if isinstance(arg, list):
return '_'.join(arg)
jinja_tests = [
# strings
(
'''foo: bar''',
returns('bar'),
returns('bar'),
),
(
'''foo: "bar"''',
returns('bar'),
returns('bar'),
),
(
'''foo: "'bar'"''',
returns("'bar'"),
returns("'bar'"),
),
(
"""foo: '"bar"'""",
returns('"bar"'),
returns('"bar"'),
),
(
'''foo: "{{ 'bar' | as_text }}"''',
returns('bar'),
returns('bar'),
),
(
'''foo: "{{ 'bar' | as_bool }}"''',
returns('bar'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ 'bar' | as_number }}"''',
returns('bar'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ 'bar' | as_native }}"''',
returns('bar'),
returns('bar'),
),
# ints
(
'''foo: 1''',
returns('1'),
returns('1'),
),
(
'''foo: "1"''',
returns('1'),
returns('1'),
),
(
'''foo: "'1'"''',
returns("'1'"),
returns("'1'"),
),
(
"""foo: '"1"'""",
returns('"1"'),
returns('"1"'),
),
(
'''foo: "{{ 1 }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "{{ '1' }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "'{{ 1 }}'"''',
returns("'1'"),
returns("'1'"),
),
(
'''foo: "'{{ '1' }}'"''',
returns("'1'"),
returns("'1'"),
),
(
'''foo: "{{ 1 | as_text }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "{{ 1 | as_bool }}"''',
returns('1'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ 1 | as_number }}"''',
returns('1'),
returns(1),
),
(
'''foo: "{{ 1 | as_native }}"''',
returns('1'),
returns(1),
),
(
'''foo: "{{ '1' | as_text }}"''',
returns('1'),
returns('1'),
),
(
'''foo: "{{ '1' | as_bool }}"''',
returns('1'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ '1' | as_number }}"''',
returns('1'),
returns(1),
),
(
'''foo: "{{ '1' | as_native }}"''',
returns('1'),
returns(1),
),
# booleans.
# Note the discrepancy with true vs True: `true` is recognized by jinja but
# not literal_eval, but `True` is recognized by ast.literal_eval.
# For extra fun, yaml recognizes both.
# unquoted true
(
'''foo: "{{ True }}"''',
returns('True'),
returns('True'),
),
(
'''foo: "{{ True | as_text }}"''',
returns('True'),
returns('True'),
),
(
'''foo: "{{ True | as_bool }}"''',
returns('True'),
returns(True),
),
(
'''foo: "{{ True | as_number }}"''',
returns('True'),
raises(JinjaRenderingException),
),
(
'''foo: "{{ True | as_native }}"''',
returns('True'),
returns(True),
),
# unquoted true
(
'''foo: "{{ true }}"''',
returns("True"),
returns("True"),
),
(
'''foo: "{{ true | as_text }}"''',
returns("True"),
returns("True"),
),
(
'''foo: "{{ true | as_bool }}"''',
returns("True"),
returns(True),
),
(
'''foo: "{{ true | as_number }}"''',
returns("True"),
raises(JinjaRenderingException),
),
(
'''foo: "{{ true | as_native }}"''',
returns("True"),
returns(True),
),
(
'''foo: "{{ 'true' | as_text }}"''',
returns("true"),
returns("true"),
),
# quoted 'true'
(
'''foo: "'{{ true }}'"''',
returns("'True'"),
returns("'True'"),
), # jinja true -> python True -> str(True) -> "True" -> quoted
(
'''foo: "'{{ true | as_text }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ true | as_bool }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ true | as_number }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ true | as_native }}'"''',
returns("'True'"),
returns("'True'"),
),
# unquoted True
(
'''foo: "{{ True }}"''',
returns('True'),
returns('True'),
),
(
'''foo: "{{ True | as_text }}"''',
returns("True"),
returns("True"),
), # True -> string 'True' -> text -> str('True') -> 'True'
(
'''foo: "{{ True | as_bool }}"''',
returns("True"),
returns(True),
),
(
'''foo: "{{ True | as_number }}"''',
returns("True"),
raises(JinjaRenderingException),
),
(
'''foo: "{{ True | as_native }}"''',
returns("True"),
returns(True),
),
# quoted 'True' within rendering
(
'''foo: "{{ 'True' | as_text }}"''',
returns("True"),
returns("True"),
),
# 'True' -> string 'True' -> text -> str('True') -> 'True'
(
'''foo: "{{ 'True' | as_bool }}"''',
returns('True'),
returns(True),
),
# quoted 'True' outside rendering
(
'''foo: "'{{ True }}'"''',
returns("'True'"),
returns("'True'"),
),
(
'''foo: "'{{ True | as_bool }}'"''',
returns("'True'"),
returns("'True'"),
),
# yaml turns 'yes' into a boolean true
(
'''foo: yes''',
returns('True'),
returns('True'),
),
(
'''foo: "yes"''',
returns('yes'),
returns('yes'),
),
# concatenation
(
'''foo: "{{ (a_int + 100) | as_native }}"''',
returns('200'),
returns(200),
),
(
'''foo: "{{ (a_str ~ 100) | as_native }}"''',
returns('100100'),
returns(100100),
),
(
'''foo: "{{( a_int ~ 100) | as_native }}"''',
returns('100100'),
returns(100100),
),
# multiple nodes -> always str
(
'''foo: "{{ a_str | as_native }}{{ a_str | as_native }}"''',
returns('100100'),
returns('100100'),
),
(
'''foo: "{{ a_int | as_native }}{{ a_int | as_native }}"''',
returns('100100'),
returns('100100'),
),
(
'''foo: "'{{ a_int | as_native }}{{ a_int | as_native }}'"''',
returns("'100100'"),
returns("'100100'"),
),
(
'''foo:''',
returns('None'),
returns('None'),
),
(
'''foo: null''',
returns('None'),
returns('None'),
),
(
'''foo: ""''',
returns(''),
returns(''),
),
(
'''foo: "{{ '' | as_native }}"''',
returns(''),
returns(''),
),
# very annoying, but jinja 'none' is yaml 'null'.
(
'''foo: "{{ none | as_native }}"''',
returns('None'),
returns(None),
),
]
@pytest.mark.parametrize(
'value,text_expectation,native_expectation',
jinja_tests,
ids=expected_id
)
def test_jinja_rendering(value, text_expectation, native_expectation):
foo_value = yaml.safe_load(value)['foo']
ctx = {
'a_str': '100',
'a_int': 100,
'b_str': 'hello'
}
with text_expectation as text_result:
assert text_result == get_rendered(foo_value, ctx, native=False)
with native_expectation as native_result:
assert native_result == get_rendered(foo_value, ctx, native=True)
class TestJinja(unittest.TestCase):
def test_do(self):
s = '{% set my_dict = {} %}\n{% do my_dict.update(a=1) %}'
template = get_template(s, {})
mod = template.make_module()
self.assertEqual(mod.my_dict, {'a': 1})
def test_regular_render(self):
s = '{{ "some_value" | as_native }}'
value = get_rendered(s, {}, native=False)
assert value == 'some_value'
s = '{{ 1991 | as_native }}'
value = get_rendered(s, {}, native=False)
assert value == '1991'
s = '{{ "some_value" | as_text }}'
value = get_rendered(s, {}, native=False)
assert value == 'some_value'
s = '{{ 1991 | as_text }}'
value = get_rendered(s, {}, native=False)
assert value == '1991'
def test_native_render(self):
s = '{{ "some_value" | as_native }}'
value = get_rendered(s, {}, native=True)
assert value == 'some_value'
s = '{{ 1991 | as_native }}'
value = get_rendered(s, {}, native=True)
assert value == 1991
s = '{{ "some_value" | as_text }}'
value = get_rendered(s, {}, native=True)
assert value == 'some_value'
s = '{{ 1991 | as_text }}'
value = get_rendered(s, {}, native=True)
assert value == '1991'
class TestBlockLexer(unittest.TestCase):
def test_basic(self):
body = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{%endmytype -%}'
blocks = extract_toplevel_blocks(block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_multiple(self):
body_one = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
body_two = (
'{{ config(bar=1)}}\r\nselect * from {% if foo %} thing '
'{% else %} other_thing {% endif %}'
)
block_data = (
' {% mytype foo %}' + body_one + '{% endmytype %}' +
'\r\n{% othertype bar %}' + body_two + '{% endothertype %}'
)
blocks = extract_toplevel_blocks(block_data, allowed_blocks={'mytype', 'othertype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
def test_comments(self):
body = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
comment = '{# my comment #}'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{%endmytype -%}'
blocks = extract_toplevel_blocks(comment+block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_evil_comments(self):
body = '{{ config(foo="bar") }}\r\nselect * from this.that\r\n'
comment = '{# external comment {% othertype bar %} select * from thing.other_thing{% endothertype %} #}'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{%endmytype -%}'
blocks = extract_toplevel_blocks(comment+block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_nested_comments(self):
body = '{# my comment #} {{ config(foo="bar") }}\r\nselect * from {# my other comment embedding {% endmytype %} #} this.that\r\n'
block_data = ' \n\r\t{%- mytype foo %}'+body+'{% endmytype -%}'
comment = '{# external comment {% othertype bar %} select * from thing.other_thing{% endothertype %} #}'
blocks = extract_toplevel_blocks(comment+block_data, allowed_blocks={'mytype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, body)
self.assertEqual(blocks[0].full_block, block_data)
def test_complex_file(self):
blocks = extract_toplevel_blocks(complex_snapshot_file, allowed_blocks={'mytype', 'myothertype'}, collect_raw_data=False)
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[0].block_type_name, 'mytype')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].full_block, '{% mytype foo %} some stuff {% endmytype %}')
self.assertEqual(blocks[0].contents, ' some stuff ')
self.assertEqual(blocks[1].block_type_name, 'mytype')
self.assertEqual(blocks[1].block_name, 'bar')
self.assertEqual(blocks[1].full_block, bar_block)
self.assertEqual(blocks[1].contents, bar_block[16:-15].rstrip())
self.assertEqual(blocks[2].block_type_name, 'myothertype')
self.assertEqual(blocks[2].block_name, 'x')
self.assertEqual(blocks[2].full_block, x_block.strip())
self.assertEqual(blocks[2].contents, x_block[len('\n{% myothertype x %}'):-len('{% endmyothertype %}\n')])
def test_peaceful_macro_coexistence(self):
body = '{# my macro #} {% macro foo(a, b) %} do a thing {%- endmacro %} {# my model #} {% a b %} test {% enda %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'macro', 'a'}, collect_raw_data=True)
self.assertEqual(len(blocks), 4)
self.assertEqual(blocks[0].full_block, '{# my macro #} ')
self.assertEqual(blocks[1].block_type_name, 'macro')
self.assertEqual(blocks[1].block_name, 'foo')
self.assertEqual(blocks[1].contents, ' do a thing')
self.assertEqual(blocks[2].full_block, ' {# my model #} ')
self.assertEqual(blocks[3].block_type_name, 'a')
self.assertEqual(blocks[3].block_name, 'b')
self.assertEqual(blocks[3].contents, ' test ')
def test_macro_with_trailing_data(self):
body = '{# my macro #} {% macro foo(a, b) %} do a thing {%- endmacro %} {# my model #} {% a b %} test {% enda %} raw data so cool'
blocks = extract_toplevel_blocks(body, allowed_blocks={'macro', 'a'}, collect_raw_data=True)
self.assertEqual(len(blocks), 5)
self.assertEqual(blocks[0].full_block, '{# my macro #} ')
self.assertEqual(blocks[1].block_type_name, 'macro')
self.assertEqual(blocks[1].block_name, 'foo')
self.assertEqual(blocks[1].contents, ' do a thing')
self.assertEqual(blocks[2].full_block, ' {# my model #} ')
self.assertEqual(blocks[3].block_type_name, 'a')
self.assertEqual(blocks[3].block_name, 'b')
self.assertEqual(blocks[3].contents, ' test ')
self.assertEqual(blocks[4].full_block, ' raw data so cool')
def test_macro_with_crazy_args(self):
body = '''{% macro foo(a, b=asdf("cool this is 'embedded'" * 3) + external_var, c)%}cool{# block comment with {% endmacro %} in it #} stuff here {% endmacro %}'''
blocks = extract_toplevel_blocks(body, allowed_blocks={'macro'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'macro')
self.assertEqual(blocks[0].block_name, 'foo')
self.assertEqual(blocks[0].contents, 'cool{# block comment with {% endmacro %} in it #} stuff here ')
def test_materialization_parse(self):
body = '{% materialization xxx, default %} ... {% endmaterialization %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'materialization'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'materialization')
self.assertEqual(blocks[0].block_name, 'xxx')
self.assertEqual(blocks[0].full_block, body)
body = '{% materialization xxx, adapter="other" %} ... {% endmaterialization %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'materialization'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'materialization')
self.assertEqual(blocks[0].block_name, 'xxx')
self.assertEqual(blocks[0].full_block, body)
def test_nested_not_ok(self):
# we don't allow nesting same blocks
body = '{% myblock a %} {% myblock b %} {% endmyblock %} {% endmyblock %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body, allowed_blocks={'myblock'})
def test_incomplete_block_failure(self):
fullbody = '{% myblock foo %} {% endmyblock %}'
for length in range(len('{% myblock foo %}'), len(fullbody)-1):
body = fullbody[:length]
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body, allowed_blocks={'myblock'})
def test_wrong_end_failure(self):
body = '{% myblock foo %} {% endotherblock %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body, allowed_blocks={'myblock', 'otherblock'})
def test_comment_no_end_failure(self):
body = '{# '
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body)
def test_comment_only(self):
body = '{# myblock #}'
blocks = extract_toplevel_blocks(body)
self.assertEqual(len(blocks), 1)
blocks = extract_toplevel_blocks(body, collect_raw_data=False)
self.assertEqual(len(blocks), 0)
def test_comment_block_self_closing(self):
# test the case where a comment start looks a lot like it closes itself
# (but it doesn't in jinja!)
body = '{#} {% myblock foo %} {#}'
blocks = extract_toplevel_blocks(body, collect_raw_data=False)
self.assertEqual(len(blocks), 0)
def test_embedded_self_closing_comment_block(self):
body = '{% myblock foo %} {#}{% endmyblock %} {#}{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, body)
self.assertEqual(blocks[0].contents, ' {#}{% endmyblock %} {#}')
def test_set_statement(self):
body = '{% set x = 1 %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_set_block(self):
body = '{% set x %}1{% endset %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_crazy_set_statement(self):
body = '{% set x = (thing("{% myblock foo %}")) %}{% otherblock bar %}x{% endotherblock %}{% set y = otherthing("{% myblock foo %}") %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'otherblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% otherblock bar %}x{% endotherblock %}')
self.assertEqual(blocks[0].block_type_name, 'otherblock')
def test_do_statement(self):
body = '{% do thing.update() %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_deceptive_do_statement(self):
body = '{% do thing %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_do_block(self):
body = '{% do %}thing.update(){% enddo %}{% myblock foo %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'do', 'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0].contents, 'thing.update()')
self.assertEqual(blocks[0].block_type_name, 'do')
self.assertEqual(blocks[1].full_block, '{% myblock foo %}hi{% endmyblock %}')
def test_crazy_do_statement(self):
body = '{% do (thing("{% myblock foo %}")) %}{% otherblock bar %}x{% endotherblock %}{% do otherthing("{% myblock foo %}") %}{% myblock x %}hi{% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock', 'otherblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0].full_block, '{% otherblock bar %}x{% endotherblock %}')
self.assertEqual(blocks[0].block_type_name, 'otherblock')
self.assertEqual(blocks[1].full_block, '{% myblock x %}hi{% endmyblock %}')
self.assertEqual(blocks[1].block_type_name, 'myblock')
def test_awful_jinja(self):
blocks = extract_toplevel_blocks(
if_you_do_this_you_are_awful,
allowed_blocks={'snapshot', 'materialization'},
collect_raw_data=False
)
self.assertEqual(len(blocks), 2)
self.assertEqual(len([b for b in blocks if b.block_type_name == '__dbt__data']), 0)
self.assertEqual(blocks[0].block_type_name, 'snapshot')
self.assertEqual(blocks[0].contents, '\n '.join([
'''{% set x = ("{% endsnapshot %}" + (40 * '%})')) %}''',
'{# {% endsnapshot %} #}',
'{% embedded %}',
' some block data right here',
'{% endembedded %}'
]))
self.assertEqual(blocks[1].block_type_name, 'materialization')
self.assertEqual(blocks[1].contents, '\nhi\n')
def test_quoted_endblock_within_block(self):
body = '{% myblock something -%} {% set x = ("{% endmyblock %}") %} {% endmyblock %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'myblock'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'myblock')
self.assertEqual(blocks[0].contents, '{% set x = ("{% endmyblock %}") %} ')
def test_docs_block(self):
body = '{% docs __my_doc__ %} asdf {# nope {% enddocs %}} #} {% enddocs %} {% docs __my_other_doc__ %} asdf "{% enddocs %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'docs'}, collect_raw_data=False)
self.assertEqual(len(blocks), 2)
self.assertEqual(blocks[0].block_type_name, 'docs')
self.assertEqual(blocks[0].contents, ' asdf {# nope {% enddocs %}} #} ')
self.assertEqual(blocks[0].block_name, '__my_doc__')
self.assertEqual(blocks[1].block_type_name, 'docs')
self.assertEqual(blocks[1].contents, ' asdf "')
self.assertEqual(blocks[1].block_name, '__my_other_doc__')
def test_docs_block_expr(self):
body = '{% docs more_doc %} asdf {{ "{% enddocs %}" ~ "}}" }}{% enddocs %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'docs'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'docs')
self.assertEqual(blocks[0].contents, ' asdf {{ "{% enddocs %}" ~ "}}" }}')
self.assertEqual(blocks[0].block_name, 'more_doc')
def test_unclosed_model_quotes(self):
# test case for https://github.com/fishtown-analytics/dbt/issues/1533
body = '{% model my_model -%} select * from "something"."something_else{% endmodel %}'
blocks = extract_toplevel_blocks(body, allowed_blocks={'model'}, collect_raw_data=False)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].block_type_name, 'model')
self.assertEqual(blocks[0].contents, 'select * from "something"."something_else')
self.assertEqual(blocks[0].block_name, 'my_model')
def test_if(self):
# if you conditionally define your macros/models, don't
body = '{% if true %}{% macro my_macro() %} adsf {% endmacro %}{% endif %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body)
def test_if_innocuous(self):
body = '{% if true %}{% something %}asdfasd{% endsomething %}{% endif %}'
blocks = extract_toplevel_blocks(body)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, body)
def test_for(self):
# no for-loops over macros.
body = '{% for x in range(10) %}{% macro my_macro() %} adsf {% endmacro %}{% endfor %}'
with self.assertRaises(CompilationException):
extract_toplevel_blocks(body)
def test_for_innocuous(self):
# no for-loops over macros.
body = '{% for x in range(10) %}{% something my_something %} adsf {% endsomething %}{% endfor %}'
blocks = extract_toplevel_blocks(body)
self.assertEqual(len(blocks), 1)
self.assertEqual(blocks[0].full_block, body)
def test_endif(self):
body = '{% snapshot foo %}select * from thing{% endsnapshot%}{% endif %}'
with self.assertRaises(CompilationException) as err:
extract_toplevel_blocks(body)
self.assertIn('Got an unexpected control flow end tag, got endif but never saw a preceeding if (@ 1:53)', str(err.exception))
def test_if_endfor(self):
body = '{% if x %}...{% endfor %}{% endif %}'
with self.assertRaises(CompilationException) as err:
extract_toplevel_blocks(body)
self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 1:13)', str(err.exception))
def test_if_endfor_newlines(self):
body = '{% if x %}\n ...\n {% endfor %}\n{% endif %}'
with self.assertRaises(CompilationException) as err:
extract_toplevel_blocks(body)
self.assertIn('Got an unexpected control flow end tag, got endfor but expected endif next (@ 3:4)', str(err.exception))
bar_block = '''{% mytype bar %}
{# a comment
that inside it has
{% mytype baz %}
{% endmyothertype %}
{% endmytype %}
{% endmytype %}
{#
{% endmytype %}#}
some other stuff
{%- endmytype%}'''
x_block = '''
{% myothertype x %}
before
{##}
and after
{% endmyothertype %}
'''
complex_snapshot_file = '''
{#some stuff {% mytype foo %} #}
{% mytype foo %} some stuff {% endmytype %}
'''+bar_block+x_block
if_you_do_this_you_are_awful = '''
{#} here is a comment with a block inside {% block x %} asdf {% endblock %} {#}
{% do
set('foo="bar"')
%}
{% set x = ("100" + "hello'" + '%}') %}
{% snapshot something -%}
{% set x = ("{% endsnapshot %}" + (40 * '%})')) %}
{# {% endsnapshot %} #}
{% embedded %}
some block data right here
{% endembedded %}
{%- endsnapshot %}
{% raw %}
{% set x = SYNTAX ERROR}
{% endraw %}
{% materialization whatever, adapter='thing' %}
hi
{% endmaterialization %}
'''
| 35.561941
| 170
| 0.560783
|
cfc419de9bb47e9ef4174692fcb76c99137bacb8
| 1,208
|
py
|
Python
|
analysis/visualize.py
|
metaShen/AlphaPose
|
662a72e4831778f97d3845273c63782d4b5236af
|
[
"Apache-2.0"
] | 1
|
2021-05-13T04:50:54.000Z
|
2021-05-13T04:50:54.000Z
|
analysis/visualize.py
|
metaShen/AlphaPose
|
662a72e4831778f97d3845273c63782d4b5236af
|
[
"Apache-2.0"
] | 5
|
2021-03-19T01:46:35.000Z
|
2022-03-11T23:52:41.000Z
|
analysis/visualize.py
|
metaStor/AlphaPose
|
662a72e4831778f97d3845273c63782d4b5236af
|
[
"Apache-2.0"
] | null | null | null |
import json
from pyecharts import Line, Overlap
path1 = r'/home/meta/software/AlphaPose/examples/demo/res1/video1.json'
path2 = r'/home/meta/software/AlphaPose/examples/demo/res2/video2.json'
path3 = r'/home/meta/software/AlphaPose/examples/demo/res3/video3.json'
def read(path):
with open(path, 'r') as f:
return json.load(f)
def extract(data):
x = []
for i in data:
x.append(i['keypoints'][36])
return x
x1, x2, x3 = extract(read(path1)), extract(read(path2)), extract(read(path3))
line = Line('line')
line.add('1', list(range(len(x1))), x1, mark_point=['max', 'min'],
is_datazoom_show=True, is_datazoom_extra_show=True, datazoom_type='both')
line.add('2', list(range(len(x2))), x2, mark_point=['max', 'min'],
is_datazoom_show=True, is_datazoom_extra_show=True, datazoom_type='both')
line.add('3', list(range(len(x3))), x3, mark_point=['max', 'min'],
is_datazoom_show=True, is_datazoom_extra_show=True, datazoom_type='both')
# line.add('y坐标变化', attr, y, mark_point=['max', 'min'],
# is_datazoom_show=True, is_datazoom_extra_show=True, datazoom_type='both')
line.render('./examples/demo/res1/video1.html')
line.show_config()
| 30.2
| 84
| 0.683775
|
ed02e47668fb3b7187e6d1e016038e9c519eca30
| 2,469
|
py
|
Python
|
pytorchtools.py
|
jeffreyng99/early-stopping-pytorch
|
2e54511f24386310fd66aac737e14fa45305c618
|
[
"MIT"
] | null | null | null |
pytorchtools.py
|
jeffreyng99/early-stopping-pytorch
|
2e54511f24386310fd66aac737e14fa45305c618
|
[
"MIT"
] | null | null | null |
pytorchtools.py
|
jeffreyng99/early-stopping-pytorch
|
2e54511f24386310fd66aac737e14fa45305c618
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=7, verbose=False, delta=0, general_checkpoint=False):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
general_checkpoint (bool): Saves addition information that can be used to resume training.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
self.general_checkpoint = general_checkpoint
def __call__(self, val_loss, model, epoch=None, optimizer=None):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score + self.delta:
self.counter += 1
print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model, epoch=epoch, optimizer=optimizer)
self.counter = 0
def save_checkpoint(self, val_loss, model, epoch=None, optimizer=None):
'''Saves model when validation loss decrease.'''
if self.verbose:
print(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
if self.general_checkpoint and epoch is not None and optimizer is not None:
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': val_loss
}, 'checkpoint.tar')
else:
torch.save(model.state_dict(), 'checkpoint.pt')
self.val_loss_min = val_loss
| 42.568966
| 111
| 0.581612
|
bc90f80c2efb6b79c8ffaadf69b3f5187ccc4c7f
| 263
|
py
|
Python
|
02-python-oo/aula-05/exemplos/sync.py
|
opensanca/trilha-python
|
9ffadd266e22a920a3c1acfdbc6a5a4645fce9d6
|
[
"MIT"
] | 47
|
2016-05-19T22:37:18.000Z
|
2022-02-22T02:34:18.000Z
|
02-python-oo/aula-05/exemplos/sync.py
|
opensanca/trilha-python
|
9ffadd266e22a920a3c1acfdbc6a5a4645fce9d6
|
[
"MIT"
] | 21
|
2016-05-20T12:35:25.000Z
|
2016-07-26T00:23:33.000Z
|
02-python-oo/aula-05/exemplos/sync.py
|
lamenezes/python-intro
|
9ffadd266e22a920a3c1acfdbc6a5a4645fce9d6
|
[
"MIT"
] | 25
|
2016-05-19T22:52:32.000Z
|
2022-01-08T15:15:36.000Z
|
import requests
if __name__ == '__main__':
import sys
verbose = '-v' in sys.argv
for _ in range(25):
response = requests.get('http://httpbin.org/get')
print(response.status_code)
if verbose:
print(response.text)
| 20.230769
| 57
| 0.596958
|
28beb6760d11514fe41d5c6c4b65e5f1772b84f8
| 5,482
|
py
|
Python
|
tensorflow/python/keras/initializers.py
|
elielhojman/tensorflow
|
163aae337c875efce2518c3cd0fecb61968fe408
|
[
"Apache-2.0"
] | 4
|
2019-04-12T00:49:38.000Z
|
2020-06-12T07:12:00.000Z
|
tensorflow/python/keras/initializers.py
|
elielhojman/tensorflow
|
163aae337c875efce2518c3cd0fecb61968fe408
|
[
"Apache-2.0"
] | 1
|
2020-11-25T21:29:56.000Z
|
2021-06-11T05:31:49.000Z
|
tensorflow/python/keras/initializers.py
|
elielhojman/tensorflow
|
163aae337c875efce2518c3cd0fecb61968fe408
|
[
"Apache-2.0"
] | 4
|
2019-11-11T13:46:27.000Z
|
2020-03-14T05:36:53.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras initializer classes (soon to be replaced with core TF initializers).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops.init_ops import Constant
from tensorflow.python.ops.init_ops import glorot_normal_initializer
from tensorflow.python.ops.init_ops import glorot_uniform_initializer
from tensorflow.python.ops.init_ops import Identity
from tensorflow.python.ops.init_ops import Initializer # pylint: disable=unused-import
from tensorflow.python.ops.init_ops import Ones
from tensorflow.python.ops.init_ops import Orthogonal
from tensorflow.python.ops.init_ops import RandomNormal
from tensorflow.python.ops.init_ops import RandomUniform
from tensorflow.python.ops.init_ops import TruncatedNormal
from tensorflow.python.ops.init_ops import VarianceScaling
from tensorflow.python.ops.init_ops import Zeros
from tensorflow.python.util.tf_export import tf_export
@tf_export('keras.initializers.lecun_normal')
def lecun_normal(seed=None):
"""LeCun normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(1 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
- [Efficient
Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode='fan_in', distribution='normal', seed=seed)
@tf_export('keras.initializers.lecun_uniform')
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
LeCun 98, Efficient Backprop,
http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf
"""
return VarianceScaling(
scale=1., mode='fan_in', distribution='uniform', seed=seed)
@tf_export('keras.initializers.he_normal')
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(
scale=2., mode='fan_in', distribution='normal', seed=seed)
@tf_export('keras.initializers.he_uniform')
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
He et al., http://arxiv.org/abs/1502.01852
"""
return VarianceScaling(
scale=2., mode='fan_in', distribution='uniform', seed=seed)
# Compatibility aliases
# pylint: disable=invalid-name
zero = zeros = Zeros
one = ones = Ones
constant = Constant
uniform = random_uniform = RandomUniform
normal = random_normal = RandomNormal
truncated_normal = TruncatedNormal
identity = Identity
orthogonal = Orthogonal
glorot_normal = glorot_normal_initializer
glorot_uniform = glorot_uniform_initializer
# pylint: enable=invalid-name
# Utility functions
@tf_export('keras.initializers.serialize')
def serialize(initializer):
return serialize_keras_object(initializer)
@tf_export('keras.initializers.deserialize')
def deserialize(config, custom_objects=None):
return deserialize_keras_object(
config,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='initializer')
@tf_export('keras.initializers.get')
def get(identifier):
if identifier is None:
return None
if isinstance(identifier, dict):
return deserialize(identifier)
elif isinstance(identifier, six.string_types):
config = {'class_name': str(identifier), 'config': {}}
return deserialize(config)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret initializer identifier: ' +
str(identifier))
| 31.325714
| 87
| 0.743889
|
4cfc4c8c2894bc07ca1129483810a186a5d4a6d5
| 62,592
|
py
|
Python
|
run_list.py
|
DocOneMedical/bioasq-biobert
|
ac0383f223a5968443c38b877207011588503de0
|
[
"Apache-2.0"
] | null | null | null |
run_list.py
|
DocOneMedical/bioasq-biobert
|
ac0383f223a5968443c38b877207011588503de0
|
[
"Apache-2.0"
] | null | null | null |
run_list.py
|
DocOneMedical/bioasq-biobert
|
ac0383f223a5968443c38b877207011588503de0
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from datetime import datetime
import json
import math
import os
import random
import modeling
import optimization
import tokenization
import six
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
# Required parameters
flags.DEFINE_string("bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string("output_dir", None,
"The output directory where the model checkpoints will be written.")
# Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string("predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string("init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool("do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer("max_seq_length", 384,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer("doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer("max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 12, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 1e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 5.0, "Total number of training epochs to perform.")
flags.DEFINE_float("warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer("max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string("tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string("tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer("num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool("verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool("version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float("null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
flags.DEFINE_integer("input_shuffle_seed", 12345, "")
# Docone additions
flags.DEFINE_bool("docone", False, "If true, use the docone set for prediction.")
flags.DEFINE_string("docone_directory", None, "SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_integer("docone_chunk", None, "chunk to start at")
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class DoconeIter:
def __init__(self, docone_data_path: str):
if not tf.io.gfile.exists(docone_data_path):
raise ValueError(f"Data folder doesn't exist {docone_data_path}")
self.data_path = docone_data_path
def iterate(self):
files = tf.io.gfile.glob(f"{self.data_path}/*")
for js in files:
with tf.io.gfile.GFile(js, 'r') as js_handle:
examples = self.load_examples_from_json(json.load(js_handle))
yield examples
def load_examples_from_json(self, js_data: dict):
paragraphs = js_data["data"][0]["paragraphs"]
examples = []
# Basically just used for prediction now.
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
for p in paragraphs:
text = p["context"]
questions = p["qas"]
doc_tokens = self.process_text(text)
for q in questions:
example = SquadExample(
qas_id=q['id'],
question_text=q['question'],
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
@staticmethod
def process_text(paragraph_text, is_bioasq: bool = True):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
if is_bioasq:
paragraph_text.replace('/', ' ')
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
return doc_tokens
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
is_bioasq = True
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
if is_bioasq:
paragraph_text.replace('/', ' ') # need review
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
if example_index < 20:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (unique_id))
tf.logging.info("example_index: %s" % (example_index))
tf.logging.info("doc_span_index: %s" % (doc_span_index))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.logging.info("start_position: %d" % (start_position))
tf.logging.info("end_position: %d" % (end_position))
tf.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden, [batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string)
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
# https://github.com/google-research/bert/issues/70#issuecomment-436533131
logging_hook = tf.train.LoggingTensorHook(
tensors={"loss": total_loss}, every_n_iter=100)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn,
training_hooks=[logging_hook])
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions=predictions,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and PREDICT modes are supported: %s" % (mode))
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file):
"""Write final predictions to the json file and log-odds of null if needed."""
tf.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.logging.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit", "question_text"]
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
question_text=example.question_text
))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
question_text=''
))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
# pylint: disable=invalid-name
_NbestPrediction = collections.namedtuple(
"NbestPrediction", ["text", "start_logit", "end_logit", "question_text"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
question_text=pred.question_text
))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit
))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(
text="empty",
start_logit=0.0,
end_logit=0.0,
question_text=pred.question_text
))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
output["question_text"] = entry.question_text
nbest_json.append(output)
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.logging.info("Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
self._writer = tf.python_io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case, FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError("If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError("If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError("Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError("The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
def main(_):
tf.logging.set_verbosity(tf.logging.WARN)
start_t = datetime.now()
print(start_t)
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
session_config=session_config,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
keep_checkpoint_max=50,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = read_squad_examples(
input_file=FLAGS.train_file, is_training=True)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
# Pre-shuffle the input to avoid having to make a very large shuffle
# buffer in in the `input_fn`.
rng = random.Random(FLAGS.input_shuffle_seed)
rng.shuffle(train_examples)
print('#train_examples', len(train_examples))
print('#train_steps', num_train_steps)
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
# We write to a temporary file to avoid storing very large constant tensors
# in memory.
train_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "train.tf_record"),
is_training=True)
convert_examples_to_features(
examples=train_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=True,
output_fn=train_writer.process_feature)
train_writer.close()
tf.logging.info("***** Running training *****")
tf.logging.info(" Num orig examples = %d", len(train_examples))
tf.logging.info(" Num split examples = %d", train_writer.num_features)
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
del train_examples
train_input_fn = input_fn_builder(
input_file=train_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
pdir = os.path.join(FLAGS.output_dir, 'predictions')
if not tf.io.gfile.exists(pdir):
tf.io.gfile.makedirs(pdir)
if FLAGS.do_predict:
if FLAGS.docone:
do_iter = DoconeIter(FLAGS.docone_directory)
for i, eval_examples in enumerate(do_iter.iterate()):
tf.logging.warning(f"Processing chunk {i}".format(i))
if FLAGS.docone_chunk and i < FLAGS.docone_chunk:
continue
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
all_results = []
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for result in estimator.predict(predict_input_fn, yield_single_examples=True):
if len(all_results) % 100 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits
))
output_prediction_file = os.path.join(pdir, f"predictions_{i}.json")
output_nbest_file = os.path.join(pdir, f"nbest_predictions_{i}.json")
output_null_log_odds_file = os.path.join(pdir, f"null_odds_{i}.json")
write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
else:
eval_examples = read_squad_examples(
input_file=FLAGS.predict_file, is_training=False)
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False)
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
convert_examples_to_features(
examples=eval_examples,
tokenizer=tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature)
eval_writer.close()
tf.logging.info("***** Running predictions *****")
tf.logging.info(" Num orig examples = %d", len(eval_examples))
tf.logging.info(" Num split examples = %d", len(eval_features))
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
# If running eval on the TPU, you will need to specify the number of
# steps.
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True):
if len(all_results) % 1000 == 0:
tf.logging.info("Processing example: %d" % (len(all_results)))
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(
RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits
))
"""
# example: BioASQ-test-list-6b-1.json
# example: BioASQ-test-list-7b-3-snippet.json
# example: BioASQ-test-list-6b-snippet-all.json
pred_filename = os.path.split(FLAGS.predict_file)[1]
pred_filename = pred_filename.replace('BioASQ-test-list-', '')
pred_filename = pred_filename.replace('.json', '')
if '-all' == pred_filename[-4:]:
task_batch = pred_filename[0] + 'B' # no batch number
snp = pred_filename[2:-4]
golden = '/hdd1/bioasq/{}b_test/{}_total_golden.json'.format(
pred_filename[0], task_batch)
else:
task_batch = pred_filename.replace('b-', 'B')
snp = task_batch[3:]
task_batch = task_batch[:3]
golden = '/hdd1/biobert/BioASQ/{}_golden.json'.format(task_batch)
output_prediction_file = os.path.join(
FLAGS.output_dir, "{}_predictions.json".format(task_batch))
output_nbest_file = os.path.join(
FLAGS.output_dir, "{}_nbest_predictions.json".format(task_batch))
output_null_log_odds_file = os.path.join(
FLAGS.output_dir, "{}_null_odds.json".format(task_batch))
"""
output_prediction_file = os.path.join(
FLAGS.output_dir, "predictions.json")
output_nbest_file = os.path.join(
FLAGS.output_dir, "nbest_predictions.json")
output_null_log_odds_file = os.path.join(
FLAGS.output_dir, "null_odds.json")
write_predictions(eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file)
"""
# convert
print("\nConvert to BioASQ format")
import subprocess
outdir = FLAGS.output_dir[:-1] if FLAGS.output_dir[-1] == '/' \
else FLAGS.output_dir
out_file = '{}_list_result{}.json'.format(task_batch, snp)
print('BioASQ format output', os.path.join(outdir, out_file))
eval_proc = subprocess.Popen(
['python3', './biocodes/transform_n2b_list.py',
'--nbest_path={}/{}_nbest_predictions.json'.format(outdir, task_batch),
# '-s',
'--output_path=' + outdir,
'--output_file=' + out_file],
cwd='.',
stdout=subprocess.PIPE
)
stdout, stderr = eval_proc.communicate()
print(stdout.decode('utf-8'))
if stderr is not None:
print(stderr.decode('utf-8'))
print("\nEvaluation")
# https://github.com/BioASQ/Evaluation-Measures/blob/master/flat/BioASQEvaluation/src/evaluation/EvaluatorTask1b.java#L59
print('pred ', os.path.join(outdir, out_file))
print('golden', golden)
if os.path.exists(golden):
# 1: [1, 2], 3: [3, 4], 5: [5, 6]
task_e = {
'1': 1,
'2': 1,
'3': 3,
'4': 3,
'5': 5,
'6': 5,
}
evalproc1 = subprocess.Popen(
['java', '-Xmx10G', '-cp',
'$CLASSPATH:./flat/BioASQEvaluation/dist/BioASQEvaluation.jar',
'evaluation.EvaluatorTask1b', '-phaseB',
'-e', '{}'.format(task_e[task_batch[0]]),
golden, os.path.join(outdir, out_file)],
cwd='/hdd1/biobert/Evaluation-Measures',
stdout=subprocess.PIPE
)
stdout, _ = evalproc1.communicate()
print('\t'.join(
['{:.4f}'.format(float(v))
for v in stdout.decode('utf-8').split(' ')][4:7]), sep='\t')
"""
print('\nElapsed time', datetime.now() - start_t)
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| 41.589369
| 133
| 0.591609
|
fcf21f8f3fc8572f606036cd40396b86c4bf2acd
| 305
|
py
|
Python
|
bookapi/api/schemas/user.py
|
Fersubair40/bookapi
|
587b512edfad3e05fe648719c6ac176027a630f4
|
[
"MIT"
] | null | null | null |
bookapi/api/schemas/user.py
|
Fersubair40/bookapi
|
587b512edfad3e05fe648719c6ac176027a630f4
|
[
"MIT"
] | null | null | null |
bookapi/api/schemas/user.py
|
Fersubair40/bookapi
|
587b512edfad3e05fe648719c6ac176027a630f4
|
[
"MIT"
] | null | null | null |
from bookapi.models import User
from bookapi.extensions import ma, db
class UserSchema(ma.SQLAlchemyAutoSchema):
id = ma.Int(dump_only=True)
password = ma.String(load_only=True, required=True)
class Meta:
model = User
sqla_session = db.session
load_instance = True
| 21.785714
| 55
| 0.695082
|
93ab8e22b6bfb1396dfedc141a2d800d19ff8828
| 10,300
|
py
|
Python
|
pictures/substorms/sea_onset_comparison.py
|
jhaiduce/defense_slides_www
|
55ca9ea5ebd3bf8f91d5211cc87eb5a24e79c688
|
[
"Apache-2.0"
] | null | null | null |
pictures/substorms/sea_onset_comparison.py
|
jhaiduce/defense_slides_www
|
55ca9ea5ebd3bf8f91d5211cc87eb5a24e79c688
|
[
"Apache-2.0"
] | null | null | null |
pictures/substorms/sea_onset_comparison.py
|
jhaiduce/defense_slides_www
|
55ca9ea5ebd3bf8f91d5211cc87eb5a24e79c688
|
[
"Apache-2.0"
] | null | null | null |
import matplotlib
matplotlib.use('agg')
from substorm_utils.signature_lists import get_model_signature_lists, get_obs_signature_lists
from substorm_utils.bin_listings import find_convolution_onsets, find_substorms_convolution
from datetime import datetime, timedelta
from substorm_utils.forecast_stats import dump_stats
import numpy as np
from spacepy.pybats import ImfInput
from matplotlib import pyplot as plt
import os
from pytz import UTC
from matplotlib_utils import remove_overhanging_labels
from substorm_utils.parsers.mpb_parsers import parse_index
from sea_functions import get_sea_curves
from scipy.io import loadmat
matplotlib.rcParams['font.size']=8
matplotlib.rcParams['legend.handlelength']=1
matplotlib.rcParams['legend.borderpad']=0.2
matplotlib.rcParams['legend.borderaxespad']=0.2
matplotlib.rcParams['legend.handletextpad']=0.4
matplotlib.rcParams['legend.labelspacing']=0.25
matplotlib.rcParams['lines.linewidth']=0.75
run_properties=[
{
'name':'Hi-res w/ RCM',
'displayname':'SWMF',
'path':'/data2/jhaiduce/substorms_Jan2005_young-comp'
},
#{
# 'name':'Hi-res w/o RCM',
# 'path':'/data2/jhaiduce/Jan2005_rerun'
#},
#{
# 'name':'SWPC',
# 'path':'/data1/jhaiduce/Jan2005_swpc'
#},
]
signature_filters=('AL','MPB','dipolarizations','plasmoids','epdata','image')
mandatory_signatures=()
model_threshold=2.5
obs_threshold=2.5
tstep=timedelta(0,1800)
from decouple import config
datadir=config('DATADIR')
supermag_data=np.loadtxt(os.path.join(datadir,'20160728-19-38-supermag.txt'),skiprows=88)
supermag_times=np.array([datetime(2005,1,1,tzinfo=UTC)+timedelta(seconds=60*i) for i in range(1440*31+1)])
sml=supermag_data[:,6]
imfdata=ImfInput(os.path.join(datadir,'imf_jan2005_merged_zeroed.dat'))
imfdata['time']=[t.replace(tzinfo=UTC) for t in imfdata['time']]
imf_clockangle=np.arctan2(imfdata['by'],imfdata['bz'])
imf_bmag=np.sqrt((imfdata['bx']**2+imfdata['by']**2+imfdata['bz']**2))*1e-9
mu0=4*np.pi*1e-7
imf_bz=imfdata['bz']*1e-9
imf_ux=imfdata['ux']*1000
imf_epsilon=-imf_ux*imf_bmag**2*np.sin(imf_clockangle/2)**4/mu0*1e6
obs_mpb_t,obs_mpb_v=parse_index(os.path.join(datadir,'obs_mpb_index.txt'))
obs_signatures=get_obs_signature_lists(datadir=datadir)
seadata={
'bz':['IMF $B_z$ (nT)',imfdata['bz'],imfdata['time']],
'al':['AL (nT)',sml,supermag_times],
'ux':['Solar wind $u_x$ (km/s)',-imfdata['ux'],imfdata['time']],
'rho':[r'Solar wind $\rho$ ($cm^{-3}$)',imfdata['rho'],imfdata['time']],
'epsilon':['Solar wind $\epsilon$ ($\mu W/m^2$)',imf_epsilon,imfdata['time']],
'MPB':['MPB ($nT^4$)',obs_mpb_v,obs_mpb_t]
}
def plot_sea(ax,onsets,data,times,color,**kwargs):
x,median,bound_low,bound_high=get_sea_curves(data,times,onsets)
show_iqr=False
if show_iqr:
polies=ax.fill_between(x,bound_low,bound_high,facecolor=iqr_color,alpha=0.5,edgecolor=iqr_color)
else:
iqr_color='none'
hatch=None
polies=ax.fill_between(x,median,median,facecolor=iqr_color,alpha=0.5,edgecolor=iqr_color)
#polies=ax.plot(mysea.x,mysea.bound_low.ravel(),linestyle='--',color=color,alpha=0.5)
#polies=ax.plot(mysea.x,mysea.bound_high.ravel(),linestyle='--',color=color,alpha=0.5)
line,=ax.plot(x,median,color=color,**kwargs)
return line,polies
def plot_onset_sea(signatures,threshold,data,times,ylabel,ax):
onsets=find_convolution_onsets(signatures,threshold)
onsets=[datetime(2005,1,1)+timedelta(0,s) for s in onsets]
line,polies=plot_sea(ax,onsets,data,times,color=run_colors['All'],
linestyle=run_linestyles['All'],linewidth=2)
lines=[line]
polycols=[polies]
for key in signature_types:
if key=='All': continue
if key in signatures:
onsets=signatures[key]
onsets=[datetime(2005,1,1)+timedelta(0,s) for s in onsets]
if len(onsets)==0: continue
line,polies=plot_sea(ax,onsets,data,times,color=run_colors[key],
linestyle=run_linestyles[key])
lines.append(line)
polycols.append(polies)
else:
from matplotlib.lines import Line2D
from matplotlib.patches import Patch
lines.append(Line2D([],[],color=run_colors[key],
linestyle=run_linestyles[key]))
polycols.append(Patch(color='none',edgecolor='none'))
ax.autoscale(False)
ax.axhline(0,color='k',linestyle=':')
ax.axvline(0,color='k',linestyle=':')
ax.set_ylabel(ylabel)
ax.set_xlabel('Time since onset (h)')
return zip(polycols,lines),[signature_type_labels[key] for key in signature_types]
obs_substorms,obs_onsets=find_substorms_convolution(obs_signatures,obs_threshold,tstep=tstep,return_times=True)
run_onsets={}
run_signatures=get_model_signature_lists(run_properties[0],datadir=datadir)
signature_type_labels={
'All':'All',
'AL':'AL',
'image':'IMAGE/FUV',
'plasmoids':'Plasmoids',
'dipolarizations':'Dipolarizations',
'epdata':'LANL',
'MPB':'MPB'
}
signature_types=set(['All']+obs_signatures.keys()+run_signatures.keys())
run_colors={run:color for run,color in zip(
signature_types,
matplotlib.rcParams['axes.prop_cycle'].by_key()['color']
)}
run_linestyles={run:linestyle for run,linestyle in zip(
signature_types,
['-','-.','--',':',
(0, (3, 1, 1, 1, 1, 1)),
(0, (3, 1, 1, 1)),
(0, (5, 1))]
)}
def plot_sea_onset_comparison(run_name,var,ax):
if run_name=='obs':
seadata['MPB']=['MPB ($nT^4$)',obs_mpb_v,obs_mpb_t]
seadata['al']=['AL (nT)',sml,supermag_times]
ylabel,data,times=seadata[var]
artists,labels=plot_onset_sea(obs_signatures,obs_threshold,data,times,ylabel,ax)
else:
names=[runprops['name'] for runprops in run_properties]
runprops=run_properties[names.index(run_name)]
run_signatures=get_model_signature_lists(runprops,datadir=datadir)
from spacepy import datamodel as dm
auroral_inds=dm.fromHDF5(os.path.join(datadir,runprops['name'].replace('/','').replace(' ','_')+'_auroral_inds.h5'))
al_time=[datetime(2005,1,1,tzinfo=UTC)+timedelta(seconds=60*m) for m in range(0,1440*31)]
#try:
# mpbdata=loadmat(os.path.join(datadir,'John Haiducek - '+runprops['name'].replace('/','').replace(' ','_')+'_mag_grid_lat=33_mpb.mat'))
#except:
# raise
#mpb_t=[datetime(2005,1,1,tzinfo=UTC)+timedelta(seconds=m*60) for m in range(0,31*24*60)]
#mpb_v=mpbdata['mpb']
mpb_t,mpb_v=parse_index(os.path.join(datadir,'mpb_index.txt'))
seadata['MPB']=['MPB ($nT^4$)',mpb_v,mpb_t]
seadata['al']=['AL (nT)',auroral_inds['AL'],al_time]
ylabel,data,times=seadata[var]
artists,labels=plot_onset_sea(run_signatures,model_threshold,data,times,ylabel,ax)
return artists,labels
def plot_all_all_tiled_sea():
from matplotlib.gridspec import GridSpec
fig=plt.figure(figsize=[5.5,6.5])
varlist=['bz','epsilon','al','MPB']
gs=GridSpec(len(varlist),len(run_properties)+1,hspace=0,right=0.98,top=0.95,wspace=0,left=0.12,bottom=0.09)
axes=[]
run_names=['obs']+[runprops['name'] for runprops in run_properties]
labelpos=(0.94,0.94)
from string import ascii_lowercase
subplot_labels=[ascii_lowercase[i] for i in range(len(varlist)*len(run_names))]
for i in range(len(varlist)):
axes.append([])
for j in range(len(run_names)):
if j>0:
ax_kwargs={'sharey':axes[i][0]}
else:
ax_kwargs={}
ax=fig.add_subplot(gs[i,j],**ax_kwargs)
axes[i].append(ax)
# Add a label to the axis
label=subplot_labels[i+j*len(varlist)]
text=ax.text(labelpos[0],labelpos[1],label,transform=ax.transAxes,weight='bold',fontsize=11,verticalalignment='top',color='k',horizontalalignment='right')
var=varlist[i]
run_name=run_names[j]
artists,labels=plot_sea_onset_comparison(run_name,var,ax)
ylabel,data,times=seadata[var]
if j==0:
ax.set_ylabel(ylabel)
else:
plt.setp(ax.get_yticklabels(),visible=False)
ax.set_ylabel('')
if i==len(varlist)-1:
ax.set_xlabel('Time since\nonset (h)')
else:
plt.setp(ax.get_xticklabels(),visible=False)
ax.tick_params('x',which='both',direction='inout',top=True)
ax.tick_params('y',which='both',direction='inout',top=True)
if i==0:
if run_name=='obs':
ax.set_title('Observations')
else:
if len(run_properties)>1:
ax.set_title(run_name)
else:
ax.set_title('MHD')
#axes[0][0].set_ylim(-6.5,6.5)
#axes[1][0].set_ylim(0,50)
#axes[2][0].set_ylim(-650,0)
#axes[3][0].set_ylim(0,3500)
fig.canvas.draw()
for i in range(len(varlist)):
for j in range(len(run_names)):
ax=axes[i][j]
remove_overhanging_labels(ax,fig,'x')
remove_overhanging_labels(ax,fig,'y')
axes[3][0].legend(artists,labels,loc='best')
fig.savefig('all_all_tiled_onsetcomp_sea.pdf')
def plot_onsetcomp_fig(run_name,var):
fig=plt.figure()
ax=fig.add_subplot(1,1,1)
plot_sea_onset_comparison(run_name,var,ax)
namestr=run_name.replace('/','').replace(' ','_')
fig.savefig(namestr+'_'+var+'_onsetcomp_sea.pdf')
if __name__=='__main__':
from sys import argv
filename=argv[1]
import re
if filename=='all_all_tiled_onsetcomp_sea.pdf':
plot_all_all_tiled_sea()
else:
match=re.match('(.+)_([^_]+)_onsetcomp_sea.pdf',filename)
namestr=match.group(1)
var=match.group(2)
if namestr=='obs':
plot_onsetcomp_fig('obs',var)
else:
namestrs=[runprops['name'].replace('/','').replace(' ','_') for runprops in run_properties]
runprops=run_properties[namestrs.index(namestr)]
plot_onsetcomp_fig(runprops['name'],var)
| 35.153584
| 166
| 0.645922
|
0494f10d01c7dd99ca42a6d12dd2a718f1b2c832
| 5,032
|
py
|
Python
|
src/olympia/amo/tests/test_sitemap.py
|
anik31/addons-server
|
cecb61da98d6e830fb45a2b1d61b41e72812137e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/tests/test_sitemap.py
|
anik31/addons-server
|
cecb61da98d6e830fb45a2b1d61b41e72812137e
|
[
"BSD-3-Clause"
] | null | null | null |
src/olympia/amo/tests/test_sitemap.py
|
anik31/addons-server
|
cecb61da98d6e830fb45a2b1d61b41e72812137e
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import os
from collections import namedtuple
from unittest import mock
from django.conf import settings
from django.urls import reverse
from olympia import amo
from olympia.amo.sitemap import (
AddonSitemap,
AMOSitemap,
build_sitemap,
CollectionSitemap,
get_sitemap_path,
get_sitemap_section_pages,
sitemaps,
)
from olympia.amo.tests import (
addon_factory,
collection_factory,
license_factory,
user_factory,
)
from .test_views import TEST_SITEMAPS_DIR
def test_addon_sitemap():
addon_a = addon_factory(
privacy_policy='privacy!',
eula='eula!',
version_kw={'license': license_factory()},
)
# addon_factory generates licenses by default, but always with a builtin >0
addon_b = addon_factory()
addon_b.update(last_updated=datetime.datetime(2020, 1, 1, 1, 1, 1))
addon_c = addon_factory(
eula='only eula', version_kw={'license': license_factory(builtin=1)}
)
addon_d = addon_factory(privacy_policy='only privacy')
addon_factory(status=amo.STATUS_NOMINATED) # shouldn't show up
sitemap = AddonSitemap()
assert list(sitemap.items()) == [
(addon_d.last_updated, addon_d.slug, 'detail'),
(addon_c.last_updated, addon_c.slug, 'detail'),
(addon_a.last_updated, addon_a.slug, 'detail'),
(addon_b.last_updated, addon_b.slug, 'detail'),
(addon_d.last_updated, addon_d.slug, 'privacy'),
(addon_a.last_updated, addon_a.slug, 'privacy'),
(addon_c.last_updated, addon_c.slug, 'eula'),
(addon_a.last_updated, addon_a.slug, 'eula'),
(addon_a.last_updated, addon_a.slug, 'license'),
]
for item in sitemap.items():
assert sitemap.location(item) == reverse(
'addons.' + item.urlname, args=[item.slug]
)
assert '/en-US/firefox/' in sitemap.location(item)
assert sitemap.lastmod(item) == item.last_updated
def test_amo_sitemap():
sitemap = AMOSitemap()
for item in sitemap.items():
assert sitemap.location(item) == reverse(item)
def test_collection_sitemap(mozilla_user):
collection_a = collection_factory(
author=mozilla_user, modified=datetime.datetime(2020, 1, 1, 1, 1, 1)
)
collection_b = collection_factory(
author=mozilla_user, modified=datetime.datetime(2020, 2, 2, 2, 2, 2)
)
collection_factory(author=user_factory()) # not mozilla user
sitemap = CollectionSitemap()
assert list(sitemap.items()) == [
(collection_b.modified, collection_b.slug, mozilla_user.id),
(collection_a.modified, collection_a.slug, mozilla_user.id),
]
for item in sitemap.items():
assert sitemap.location(item) == reverse(
'collections.detail', args=[mozilla_user.id, item.slug]
)
assert '/en-US/firefox/' in sitemap.location(item)
assert sitemap.lastmod(item) == item.modified
def test_get_sitemap_section_pages():
addon_factory()
addon_factory()
addon_factory()
assert list(sitemaps.keys()) == ['amo', 'addons', 'collections']
pages = get_sitemap_section_pages()
assert pages == [
('amo', 1),
('addons', 1),
('collections', 1),
]
with mock.patch.object(AddonSitemap, 'limit', 2):
pages = get_sitemap_section_pages()
assert pages == [
('amo', 1),
('addons', 1),
('addons', 2),
('collections', 1),
]
def test_build_sitemap():
# test the index sitemap build first
with mock.patch('olympia.amo.sitemap.get_sitemap_section_pages') as pages_mock:
pages_mock.return_value = [
('amo', 1),
('addons', 1),
('addons', 2),
]
built = build_sitemap()
with open(os.path.join(TEST_SITEMAPS_DIR, 'sitemap.xml')) as sitemap:
assert built == sitemap.read()
# then a section build
def items_mock(self):
AddonValuesList = namedtuple('AddonValuesList', 'last_updated,slug,urlname')
return [
AddonValuesList(
datetime.datetime(2020, 10, 2, 0, 0, 0), 'delicious-pierogi', 'detail'
),
AddonValuesList(
datetime.datetime(2020, 10, 1, 0, 0, 0), 'swanky-curry', 'detail'
),
AddonValuesList(
datetime.datetime(2020, 9, 30, 0, 0, 0), 'spicy-pierogi', 'detail'
),
]
with mock.patch.object(AddonSitemap, 'items', items_mock):
built = build_sitemap('addons')
with open(os.path.join(TEST_SITEMAPS_DIR, 'sitemap-addons-2.xml')) as sitemap:
assert built == sitemap.read()
def test_get_sitemap_path():
path = settings.SITEMAP_STORAGE_PATH
assert get_sitemap_path() == f'{path}/sitemap.xml'
assert get_sitemap_path('foo') == f'{path}/sitemap-foo.xml'
assert get_sitemap_path('foo', 1) == f'{path}/sitemap-foo.xml'
assert get_sitemap_path('foo', 2) == f'{path}/sitemap-foo-2.xml'
| 32.675325
| 86
| 0.632154
|
a4b215a0383aa17cd7f9cd39d310d6009c08966e
| 211
|
py
|
Python
|
scripts/item/consume_2434545.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/item/consume_2434545.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/item/consume_2434545.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Created by MechAviv
# Hayato Damage Skin | (2434545)
if sm.addDamageSkin(2434545):
sm.chat("'Hayato Damage Skin' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem()
| 42.2
| 105
| 0.734597
|
a99ea7569f5a4ca153ceef5a6033d88dbc09e0ec
| 1,240
|
py
|
Python
|
apps/terreno/views.py
|
charles7aponteunillanos/sifos
|
7c7804cb26da3f82ab591d7668b5b0d43b7f805b
|
[
"MIT"
] | null | null | null |
apps/terreno/views.py
|
charles7aponteunillanos/sifos
|
7c7804cb26da3f82ab591d7668b5b0d43b7f805b
|
[
"MIT"
] | null | null | null |
apps/terreno/views.py
|
charles7aponteunillanos/sifos
|
7c7804cb26da3f82ab591d7668b5b0d43b7f805b
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from django.http import HttpResponse, HttpResponseRedirect
# Create your views here.
from apps.terreno.models import Poligono
def index(request):
obj = Poligono.objects.all()
obj_nombre = Poligono.nombre
obj_coordenadas_puntos = Poligono.coordenadas_puntos
obj_area = Poligono.area
obj_perimetro = Poligono.perimetro
obj_tipo_patron = Poligono.tipo_patron
obj_municipio = Poligono.municipio
obj_usuario = Poligono.usuario
for abc in obj:
obj_nombre = abc.nombre
obj_coordenadas_puntos = abc.coordenadas_puntos
obj_area = abc.area
obj_perimetro= abc.perimetro
obj_tipo_patron = abc.tipo_patron
obj_municipio = abc.municipio
obj_usuario = abc.usuario
context = {
"obj":obj,
"obj_nombre":obj_nombre,
"obj_coordenadas_puntos":obj_coordenadas_puntos,
"obj_area":obj_area,
"obj_perimetro":obj_perimetro,
"obj_tipo_patron":obj_tipo_patron,
"obj_municipio":obj_municipio,
"obj_usuario":obj_usuario
}
return render(request, "terreno/index.html", context)
def registro(request):
return render(request, "terreno/registrar.html")
| 31.794872
| 58
| 0.707258
|
122d1ae4507bb90f9735ddccd1cea6cd731df161
| 3,743
|
py
|
Python
|
PythonClient/airsim/pedestrian.py
|
tobhil98/MastersProject-AirSim
|
e4576a84919a4a4baa57473e68a7a65dafcdbc30
|
[
"MIT"
] | null | null | null |
PythonClient/airsim/pedestrian.py
|
tobhil98/MastersProject-AirSim
|
e4576a84919a4a4baa57473e68a7a65dafcdbc30
|
[
"MIT"
] | null | null | null |
PythonClient/airsim/pedestrian.py
|
tobhil98/MastersProject-AirSim
|
e4576a84919a4a4baa57473e68a7a65dafcdbc30
|
[
"MIT"
] | null | null | null |
import msgpackrpc #install as admin: pip install msgpack-rpc-python
import numpy as np #pip install numpy
import msgpack
from .utils import *
from .types import *
class PedestrianClient:
def __init__(self, ip = "", port = 41452, timeout_value = 3600):
if (ip == ""):
ip = "127.0.0.1"
self.client = msgpackrpc.Client(msgpackrpc.Address(ip, port), timeout = timeout_value, pack_encoding = 'utf-8', unpack_encoding = 'utf-8')
# ----------------------------------- Common vehicle APIs ---------------------------------------------
def reset(self, pedestrian_name = ''):
"""
Reset the vehicle to its original starting state
Note that you must call `enableApiControl` and `armDisarm` again after the call to reset
"""
self.client.call('PedestrianReset', pedestrian_name)
def ping(self):
"""
If connection is established then this call will return true otherwise it will be blocked until timeout
Returns:
bool:
"""
return self.client.call('PedestrianPing')
def getClientVersion(self):
return 1 # sync with C++ client
def getServerVersion(self):
return self.client.call('getServerVersion')
def setPedestrianPose(self, pose, ignore_collison, pedestrian_name):
"""
Set the pose of the vehicle
If you don't want to change position (or orientation) then just set components of position (or orientation) to floating point nan values
Args:
pose (Pose): Desired Pose pf the vehicle
ignore_collision (bool): Whether to ignore any collision or not
pedestrian_name (str, optional): Name of the vehicle to move
"""
self.client.call('SetPedestrianPose', pose, ignore_collison, pedestrian_name)
def getPedestianPose(self, pedestrian_name):
"""
Args:
pedestrian_name (str, optional): Name of the vehicle to get the Pose of
Returns:
Pose:
"""
pose = self.client.call('GetPedestrianPose', pedestrian_name)
return Pose.from_msgpack(pose)
def enableApiControl(self, is_enabled, pedestrian_name = ''):
"""
Enables or disables API control for vehicle corresponding to pedestrian_name
Args:
is_enabled (bool): True to enable, False to disable API control
pedestrian_name (str, optional): Name of the vehicle to send this command to
"""
return self.client.call('PedestrianEnableApiControl', is_enabled, pedestrian_name)
def setPedestrianControl(self, controls, pedestrian_name = ''):
"""
Control the car using throttle, steering, brake, etc.
Args:
controls (PedestrianControls): Struct containing control values
vehicle_name (str, optional): Name of vehicle to be controlled
"""
self.client.call('setPedestrianControls', controls, pedestrian_name)
# Pedestrian camera
def simGetImages(self, requests, vehicle_name = ''):
"""
Get multiple images
See https://microsoft.github.io/AirSim/image_apis/ for details and examples
Args:
requests (list[ImageRequest]): Images required
vehicle_name (str, optional): Name of vehicle associated with the camera
Returns:
list[ImageResponse]:
"""
responses_raw = self.client.call('simGetImages', requests, vehicle_name)
return [ImageResponse.from_msgpack(response_raw) for response_raw in responses_raw]
def simGetCameras(self, vehicle_name):
return self.client.call('getCameras', vehicle_name)
# Get a list of all pedestrians
| 34.981308
| 146
| 0.636121
|
6e101776847cc1924d732c80249616f63eade00e
| 900
|
py
|
Python
|
pythonAlgorithm/dp/LongestPalindromicSubsequence.py
|
Sky-zzt/lintcodePractice
|
d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1
|
[
"MIT"
] | 1
|
2020-09-15T07:58:55.000Z
|
2020-09-15T07:58:55.000Z
|
pythonAlgorithm/dp/LongestPalindromicSubsequence.py
|
Sky-zzt/lintcodePractice
|
d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1
|
[
"MIT"
] | null | null | null |
pythonAlgorithm/dp/LongestPalindromicSubsequence.py
|
Sky-zzt/lintcodePractice
|
d6436b296c5865d85e55c8ad4fcdbb0165d4ebb1
|
[
"MIT"
] | null | null | null |
class Solution:
"""
@param s: the maximum length of s is 1000
@return: the longest palindromic subsequence's length
"""
'''
设f[i][j]为S[i..j]的最长回文子串的长度
注意 f[i][j]是指i-j的回文串长度,不是指i-j是回文串
'''
def longestPalindromeSubseq(self, s):
# write your code here
l = len(s)
f = [[0] * (l) for _ in range(l)]
for i in range(l):
f[i][i]=1
for i in range(l-1):
if s[i]==s[i+1]:
f[i][i+1]=2
else:
f[i][i+1]=1
for length in range(3, l+1):
for i in range(l - length+1):
j = i + length - 1
f[i][j] = max(f[i + 1][j], f[i][j - 1])
if s[i] == [j]:
f[i][j] = max(f[i][j], f[i + 1][j - 1] + 2)
print(f)
return f[0][l-1]
s=Solution()
print(s.longestPalindromeSubseq("bbbab"))
| 27.272727
| 63
| 0.432222
|
f1daaccac1e1558695b49167349232325a1c42b4
| 5,199
|
py
|
Python
|
stlapp/views/project.py
|
SahalaProject/stl_simple
|
9fde58c5987dbf97687a95953282e20b8c117b24
|
[
"MIT"
] | null | null | null |
stlapp/views/project.py
|
SahalaProject/stl_simple
|
9fde58c5987dbf97687a95953282e20b8c117b24
|
[
"MIT"
] | null | null | null |
stlapp/views/project.py
|
SahalaProject/stl_simple
|
9fde58c5987dbf97687a95953282e20b8c117b24
|
[
"MIT"
] | null | null | null |
from django.core.exceptions import ObjectDoesNotExist
from django.utils.decorators import method_decorator
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from stlapp import models, serializers
from STL import pagination
from rest_framework.response import Response
from stlapp.utils import response
# from stlapp.utils import prepare
from stlapp.utils.decorator import request_log
class ProjectView(GenericViewSet):
"""
项目增删改查
"""
queryset = models.Project.objects.all().order_by('-update_time')
serializer_class = serializers.ProjectSerializer
pagination_class = pagination.MyCursorPagination
@method_decorator(request_log(level='DEBUG'))
def list(self, request):
"""
查询项目信息
"""
field_message = request.user.project_field
is_not_page = request.query_params.get('is_not_page') # 克隆模态不分页查看所有项目
try:
if is_not_page:
if field_message == 'all':
project_s = models.Project.objects.all().order_by('-create_time')
else:
project_s = models.Project.objects.filter(id__in=eval(field_message)).order_by('-create_time')
serializer_projects = serializers.ProjectSerializer(instance=project_s, many=True).data
return Response({'results': serializer_projects})
else:
if field_message == 'all':
project_s = models.Project.objects.filter(id__in=eval(field_message)).order_by('-create_time')
else:
project_s = models.Project.objects.filter(id__in=eval(field_message)).order_by('-create_time')
page_projects = self.paginate_queryset(project_s)
except:
if field_message == 'all':
projects = self.get_queryset()
page_projects = self.paginate_queryset(projects)
if not field_message:
projects = models.Project.objects.filter(id__in=[])
page_projects = self.paginate_queryset(projects)
serializer = self.get_serializer(page_projects, many=True)
return self.get_paginated_response(serializer.data)
@method_decorator(request_log(level='INFO'))
def add(self, request):
"""添加项目 {
name: str
}
"""
if request.user.user_type != 3:
return Response(response.PROJECT_CREATE_POWER)
name = request.data["name"]
if models.Project.objects.filter(name=name).first():
response.PROJECT_EXISTS["name"] = name
return Response(response.PROJECT_EXISTS)
request.data["responsible"] = request.user.real_name
# 反序列化
serializer = serializers.ProjectSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
project = models.Project.objects.get(name=name)
# prepare.project_init(project)
return Response(response.PROJECT_ADD_SUCCESS)
return Response(response.SYSTEM_ERROR)
@method_decorator(request_log(level='INFO'))
def update(self, request):
"""
编辑项目
"""
if request.user.user_type != 3:
return Response(response.PROJECT_UPDATE_POWER)
try:
project = models.Project.objects.get(id=request.data['id'])
except (KeyError, ObjectDoesNotExist):
return Response(response.SYSTEM_ERROR)
if request.data['name'] != project.name:
if models.Project.objects.filter(name=request.data['name']).first():
return Response(response.PROJECT_EXISTS)
# 调用save方法update_time字段才会自动更新
project.name = request.data['name']
project.responsible = request.user.real_name
project.desc = request.data['desc']
project.save()
return Response(response.PROJECT_UPDATE_SUCCESS)
@method_decorator(request_log(level='INFO'))
def delete(self, request):
"""
删除项目
"""
if request.user.user_type != 3:
return Response(response.PROJECT_DELETE_POWER)
try:
project = models.Project.objects.get(id=request.data['id'])
project.delete()
return Response(response.PROJECT_DELETE_SUCCESS)
except ObjectDoesNotExist:
return Response(response.SYSTEM_ERROR)
@method_decorator(request_log(level='INFO'))
def single(self, request, **kwargs):
"""
得到单个项目相关统计信息
"""
pk = kwargs.pop('pk')
field_message = request.user.project_field
if (str(pk) in field_message) or (field_message == 'all'):
try:
queryset = models.Project.objects.get(id=pk)
except ObjectDoesNotExist:
return Response(response.PROJECT_NOT_EXISTS)
serializer = self.get_serializer(queryset, many=False)
# project_info = prepare.get_project_detail(pk)
project_info = {}
project_info.update(serializer.data)
return Response(project_info)
return Response(response.PROJECT_SINGLE_POWER)
| 35.367347
| 114
| 0.633583
|
69ece29089d6dc4e529d3285eeb10fed2048b6eb
| 12,839
|
py
|
Python
|
tests/interpreters/test_python.py
|
jiehuan/m2cgen
|
e0e628bdd0453a8b11f3607bb3a0be60924f515f
|
[
"MIT"
] | 2,161
|
2019-01-13T02:37:56.000Z
|
2022-03-30T13:24:09.000Z
|
tests/interpreters/test_python.py
|
jiehuan/m2cgen
|
e0e628bdd0453a8b11f3607bb3a0be60924f515f
|
[
"MIT"
] | 380
|
2019-01-17T15:59:29.000Z
|
2022-03-31T20:59:20.000Z
|
tests/interpreters/test_python.py
|
jiehuan/m2cgen
|
e0e628bdd0453a8b11f3607bb3a0be60924f515f
|
[
"MIT"
] | 201
|
2019-02-13T19:06:44.000Z
|
2022-03-12T09:45:46.000Z
|
import pytest
from m2cgen import ast
from m2cgen.interpreters import PythonInterpreter
from tests.utils import assert_code_equal
def test_if_expr():
expr = ast.IfExpr(
ast.CompExpr(ast.NumVal(1), ast.FeatureRef(0), ast.CompOpType.EQ),
ast.NumVal(2),
ast.NumVal(3))
expected_code = """
def score(input):
if (1.0) == (input[0]):
var0 = 2.0
else:
var0 = 3.0
return var0
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_bin_num_expr():
expr = ast.BinNumExpr(
ast.BinNumExpr(
ast.FeatureRef(0), ast.NumVal(-2), ast.BinNumOpType.DIV),
ast.NumVal(2),
ast.BinNumOpType.MUL)
expected_code = """
def score(input):
return ((input[0]) / (-2.0)) * (2.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_dependable_condition():
left = ast.BinNumExpr(
ast.IfExpr(
ast.CompExpr(ast.NumVal(1),
ast.NumVal(1),
ast.CompOpType.EQ),
ast.NumVal(1),
ast.NumVal(2)),
ast.NumVal(2),
ast.BinNumOpType.ADD)
right = ast.BinNumExpr(ast.NumVal(1), ast.NumVal(2), ast.BinNumOpType.DIV)
bool_test = ast.CompExpr(left, right, ast.CompOpType.GTE)
expr = ast.IfExpr(bool_test, ast.NumVal(1), ast.FeatureRef(0))
expected_code = """
def score(input):
if (1.0) == (1.0):
var1 = 1.0
else:
var1 = 2.0
if ((var1) + (2.0)) >= ((1.0) / (2.0)):
var0 = 1.0
else:
var0 = input[0]
return var0
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_nested_condition():
left = ast.BinNumExpr(
ast.IfExpr(
ast.CompExpr(ast.NumVal(1),
ast.NumVal(1),
ast.CompOpType.EQ),
ast.NumVal(1),
ast.NumVal(2)),
ast.NumVal(2),
ast.BinNumOpType.ADD)
bool_test = ast.CompExpr(ast.NumVal(1), left, ast.CompOpType.EQ)
expr_nested = ast.IfExpr(bool_test, ast.FeatureRef(2), ast.NumVal(2))
expr = ast.IfExpr(bool_test, expr_nested, ast.NumVal(2))
expected_code = """
def score(input):
if (1.0) == (1.0):
var1 = 1.0
else:
var1 = 2.0
if (1.0) == ((var1) + (2.0)):
if (1.0) == (1.0):
var2 = 1.0
else:
var2 = 2.0
if (1.0) == ((var2) + (2.0)):
var0 = input[2]
else:
var0 = 2.0
else:
var0 = 2.0
return var0
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_raw_array():
expr = ast.VectorVal([ast.NumVal(3), ast.NumVal(4)])
expected_code = """
def score(input):
return [3.0, 4.0]
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_multi_output():
expr = ast.IfExpr(
ast.CompExpr(
ast.NumVal(1),
ast.NumVal(1),
ast.CompOpType.NOT_EQ),
ast.VectorVal([ast.NumVal(1), ast.NumVal(2)]),
ast.VectorVal([ast.NumVal(3), ast.NumVal(4)]))
expected_code = """
def score(input):
if (1.0) != (1.0):
var0 = [1.0, 2.0]
else:
var0 = [3.0, 4.0]
return var0
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_bin_vector_expr():
expr = ast.BinVectorExpr(
ast.VectorVal([ast.NumVal(1), ast.NumVal(2)]),
ast.VectorVal([ast.NumVal(3), ast.NumVal(4)]),
ast.BinNumOpType.ADD)
expected_code = """
def add_vectors(v1, v2):
return [sum(i) for i in zip(v1, v2)]
def mul_vector_number(v1, num):
return [i * num for i in v1]
def score(input):
return add_vectors([1.0, 2.0], [3.0, 4.0])
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_bin_vector_num_expr():
expr = ast.BinVectorNumExpr(
ast.VectorVal([ast.NumVal(1), ast.NumVal(2)]),
ast.NumVal(1),
ast.BinNumOpType.MUL)
expected_code = """
def add_vectors(v1, v2):
return [sum(i) for i in zip(v1, v2)]
def mul_vector_number(v1, num):
return [i * num for i in v1]
def score(input):
return mul_vector_number([1.0, 2.0], 1.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
class CustomPythonInterpreter(PythonInterpreter):
bin_depth_threshold = 2
def test_depth_threshold_with_bin_expr():
expr = ast.NumVal(1)
for _ in range(4):
expr = ast.BinNumExpr(ast.NumVal(1), expr, ast.BinNumOpType.ADD)
expected_code = """
def score(input):
var0 = (1.0) + ((1.0) + (1.0))
return (1.0) + ((1.0) + (var0))
"""
interpreter = CustomPythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_depth_threshold_with_reused_bin_expr():
reused_expr = ast.BinNumExpr(ast.NumVal(1), ast.NumVal(1), ast.BinNumOpType.ADD, to_reuse=True)
expr = ast.BinNumExpr(ast.NumVal(1), reused_expr, ast.BinNumOpType.ADD)
expr = ast.BinNumExpr(expr, expr, ast.BinNumOpType.ADD)
expected_code = """
def score(input):
var0 = (1.0) + (1.0)
var1 = var0
return ((1.0) + (var1)) + ((1.0) + (var0))
"""
interpreter = CustomPythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_depth_threshold_without_bin_expr():
expr = ast.NumVal(1)
for _ in range(4):
expr = ast.IfExpr(
ast.CompExpr(
ast.NumVal(1), ast.NumVal(1), ast.CompOpType.EQ),
ast.NumVal(1),
expr)
expected_code = """
def score(input):
if (1.0) == (1.0):
var0 = 1.0
else:
if (1.0) == (1.0):
var0 = 1.0
else:
if (1.0) == (1.0):
var0 = 1.0
else:
if (1.0) == (1.0):
var0 = 1.0
else:
var0 = 1.0
return var0
"""
interpreter = CustomPythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_deep_mixed_exprs_not_reaching_threshold():
expr = ast.NumVal(1)
for _ in range(4):
inner = ast.NumVal(1)
for __ in range(2):
inner = ast.BinNumExpr(ast.NumVal(1), inner, ast.BinNumOpType.ADD)
expr = ast.IfExpr(
ast.CompExpr(
inner, ast.NumVal(1), ast.CompOpType.EQ),
ast.NumVal(1),
expr)
expected_code = """
def score(input):
if ((1.0) + ((1.0) + (1.0))) == (1.0):
var0 = 1.0
else:
if ((1.0) + ((1.0) + (1.0))) == (1.0):
var0 = 1.0
else:
if ((1.0) + ((1.0) + (1.0))) == (1.0):
var0 = 1.0
else:
if ((1.0) + ((1.0) + (1.0))) == (1.0):
var0 = 1.0
else:
var0 = 1.0
return var0
"""
interpreter = CustomPythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_deep_mixed_exprs_exceeding_threshold():
expr = ast.NumVal(1)
for i in range(4):
inner = ast.NumVal(1)
for _ in range(4):
inner = ast.BinNumExpr(ast.NumVal(i), inner, ast.BinNumOpType.ADD)
expr = ast.IfExpr(
ast.CompExpr(
inner, ast.NumVal(1), ast.CompOpType.EQ),
ast.NumVal(1),
expr)
expected_code = """
def score(input):
var1 = (3.0) + ((3.0) + (1.0))
if ((3.0) + ((3.0) + (var1))) == (1.0):
var0 = 1.0
else:
var2 = (2.0) + ((2.0) + (1.0))
if ((2.0) + ((2.0) + (var2))) == (1.0):
var0 = 1.0
else:
var3 = (1.0) + ((1.0) + (1.0))
if ((1.0) + ((1.0) + (var3))) == (1.0):
var0 = 1.0
else:
var4 = (0.0) + ((0.0) + (1.0))
if ((0.0) + ((0.0) + (var4))) == (1.0):
var0 = 1.0
else:
var0 = 1.0
return var0
"""
interpreter = CustomPythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_deep_expression():
expr = ast.NumVal(1)
for _ in range(120):
expr = ast.BinNumExpr(expr, ast.NumVal(1), ast.BinNumOpType.ADD)
interpreter = PythonInterpreter()
result_code = interpreter.interpret(expr)
result_code += """
result = score(None)
"""
scope = {}
exec(result_code, scope)
assert scope["result"] == 121
def test_abs_expr():
expr = ast.AbsExpr(ast.NumVal(-1.0))
expected_code = """
def score(input):
return abs(-1.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_exp_expr():
expr = ast.ExpExpr(ast.NumVal(1.0))
expected_code = """
import math
def score(input):
return math.exp(1.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_pow_expr():
expr = ast.PowExpr(ast.NumVal(2.0), ast.NumVal(3.0))
expected_code = """
import math
def score(input):
return math.pow(2.0, 3.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_sqrt_expr():
expr = ast.SqrtExpr(ast.NumVal(2.0))
expected_code = """
import math
def score(input):
return math.sqrt(2.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_tanh_expr():
expr = ast.TanhExpr(ast.NumVal(2.0))
expected_code = """
import math
def score(input):
return math.tanh(2.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_log_expr():
expr = ast.LogExpr(ast.NumVal(2.0))
expected_code = """
import math
def score(input):
return math.log(2.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_log1p_expr():
expr = ast.Log1pExpr(ast.NumVal(2.0))
expected_code = """
import math
def score(input):
return math.log1p(2.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_atan_expr():
expr = ast.AtanExpr(ast.NumVal(2.0))
expected_code = """
import math
def score(input):
return math.atan(2.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_softmax_expr():
expr = ast.SoftmaxExpr([ast.NumVal(2.0), ast.NumVal(3.0)])
expected_code = """
import math
def softmax(x):
m = max(x)
exps = [math.exp(i - m) for i in x]
s = sum(exps)
for idx, _ in enumerate(exps):
exps[idx] /= s
return exps
def score(input):
return softmax([2.0, 3.0])
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_sigmoid_expr():
expr = ast.SigmoidExpr(ast.NumVal(2.0))
expected_code = """
import math
def sigmoid(x):
if x < 0.0:
z = math.exp(x)
return z / (1.0 + z)
return 1.0 / (1.0 + math.exp(-x))
def score(input):
return sigmoid(2.0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_reused_expr():
reused_expr = ast.ExpExpr(ast.NumVal(1.0), to_reuse=True)
expr = ast.BinNumExpr(reused_expr, reused_expr, ast.BinNumOpType.DIV)
expected_code = """
import math
def score(input):
var0 = math.exp(1.0)
return (var0) / (var0)
"""
interpreter = PythonInterpreter()
assert_code_equal(interpreter.interpret(expr), expected_code)
def test_unsupported_exprs():
interpreter = PythonInterpreter()
expr = ast.Expr()
with pytest.raises(NotImplementedError, match="No handler found for 'Expr'"):
interpreter.interpret(expr)
expr = ast.BinVectorNumExpr(
ast.VectorVal([ast.NumVal(1), ast.NumVal(2)]),
ast.NumVal(1),
ast.BinNumOpType.ADD)
with pytest.raises(NotImplementedError, match="Op 'ADD' is unsupported"):
interpreter.interpret(expr)
expr = ast.BinVectorExpr(
ast.VectorVal([ast.NumVal(1), ast.NumVal(2)]),
ast.VectorVal([ast.NumVal(3), ast.NumVal(4)]),
ast.BinNumOpType.MUL)
with pytest.raises(NotImplementedError, match="Op 'MUL' is unsupported"):
interpreter.interpret(expr)
| 24.833656
| 99
| 0.586962
|
c3a82459471dc869057c03114ed773a0b015c608
| 3,285
|
py
|
Python
|
hippynn/layers/pairs/analysis.py
|
tautomer/hippynn
|
df4504a5ea4680cfc61f490984dcddeac7ed99ee
|
[
"BSD-3-Clause"
] | 21
|
2021-11-17T00:56:35.000Z
|
2022-03-22T05:57:11.000Z
|
hippynn/layers/pairs/analysis.py
|
tautomer/hippynn
|
df4504a5ea4680cfc61f490984dcddeac7ed99ee
|
[
"BSD-3-Clause"
] | 4
|
2021-12-17T16:16:53.000Z
|
2022-03-16T23:50:38.000Z
|
hippynn/layers/pairs/analysis.py
|
tautomer/hippynn
|
df4504a5ea4680cfc61f490984dcddeac7ed99ee
|
[
"BSD-3-Clause"
] | 6
|
2021-11-30T21:09:31.000Z
|
2022-03-18T07:07:32.000Z
|
"""
Modules for analyzing pair-valued data
"""
import torch
class RDFBins(torch.nn.Module):
def __init__(self, bins, species_set):
super().__init__()
if bins is None:
raise TypeError("Bins must not be None!")
bins = torch.as_tensor(bins)
species_set = torch.as_tensor(species_set)
self.register_buffer("bins", bins.to(torch.float))
self.register_buffer("species_set", species_set.to(torch.int))
def bin_info(self):
# Note: widths don't make perfect sense for non-evenly-spaced bins.
centers = (self.bins[1:] + self.bins[:-1]) / 2
widths = self.bins[1:] - self.bins[:-1]
return centers, widths
def forward(self, pair_dist, pair_first, pair_second, one_hot, n_molecules):
n_species = one_hot.shape[-1]
n_bins = self.bins.shape[0] - 1
rdf = torch.zeros((n_species, n_species, n_bins), dtype=pair_dist.dtype, device=pair_dist.device)
for i in range(n_species):
for j in range(n_species):
mask = one_hot[:, i][pair_first] & one_hot[:, j][pair_second]
maskpairs = pair_dist[mask]
less = maskpairs.unsqueeze(-1) < self.bins.unsqueeze(0)
less_counts = less.sum(dim=0)
rdf[i, j] = less_counts[..., 1:] - less_counts[..., :-1]
return (rdf / n_molecules).unsqueeze(0)
def min_dist_info(rij_list, j_list, mol_index, atom_index, inv_real_atoms, n_atoms_max, n_molecules):
n_atoms = rij_list.shape[0]
dev = rij_list.device
if rij_list.shape[1] == 0:
# empty neighbors list
min_dist_mol = torch.zeros(n_molecules, dtype=rij_list.dtype, device=dev)
min_dist_atom = torch.zeros(n_atoms, dtype=rij_list.dtype, device=dev)
min_dist_mol_atom_locs = torch.zeros(n_molecules, dtype=torch.int64, device=dev)
min_dist_atomneigh = torch.zeros((n_atoms), dtype=torch.int64, device=dev)
return min_dist_mol, min_dist_mol_atom_locs, min_dist_atom, min_dist_atomneigh
rmag_list = rij_list.norm(dim=2)
maxr = rmag_list.max()
rmaglist_new = rmag_list.clone()
rmaglist_new[rmaglist_new == 0] = maxr
min_dist_atom, where_min_dist_atom = rmaglist_new.min(dim=1)
ara = torch.arange(n_atoms, dtype=where_min_dist_atom.dtype, device=dev)
min_dist_atomneigh = j_list[ara, where_min_dist_atom]
min_dist_molatom = torch.full((n_molecules, n_atoms_max), maxr, device=rmag_list.device, dtype=rmag_list.dtype)
min_dist_molatom[mol_index, atom_index] = min_dist_atom
min_dist_mol, where_min_dist_mol = min_dist_molatom.min(dim=1)
atom1 = where_min_dist_mol
atom1_batchloc = torch.arange(n_molecules, device=dev, dtype=torch.int64) * n_atoms_max + atom1
atom1_atomloc = inv_real_atoms[atom1_batchloc]
atom2 = atom_index[min_dist_atomneigh[atom1_atomloc]]
min_dist_mol_atom_locs = torch.stack([atom1, atom2], dim=1)
return min_dist_mol, min_dist_mol_atom_locs, min_dist_atom, min_dist_atomneigh
class MinDistModule(torch.nn.Module):
def forward(self, rmag_list, j_list, mol_index, atom_index, inv_real_atoms, n_atoms_max, n_molecules):
return min_dist_info(rmag_list, j_list, mol_index, atom_index, inv_real_atoms, n_atoms_max, n_molecules)
| 42.115385
| 115
| 0.690715
|
cbf6e8f4cd6b47661d58d562e10061005b71d760
| 2,764
|
py
|
Python
|
examples/poleval_ner/train.py
|
sdadas/yast
|
f9cd471ae3c915acb8111dd85a53acc72348c355
|
[
"Apache-2.0"
] | 2
|
2018-12-18T03:12:13.000Z
|
2018-12-31T18:03:27.000Z
|
examples/poleval_ner/train.py
|
sdadas/yast
|
f9cd471ae3c915acb8111dd85a53acc72348c355
|
[
"Apache-2.0"
] | 6
|
2020-01-28T21:59:18.000Z
|
2022-02-09T23:29:00.000Z
|
examples/poleval_ner/train.py
|
sdadas/yast
|
f9cd471ae3c915acb8111dd85a53acc72348c355
|
[
"Apache-2.0"
] | 1
|
2020-07-07T18:25:15.000Z
|
2020-07-07T18:25:15.000Z
|
import argparse
import os
from typing import List
from dataset import DataSet
from feature.base import OneHotFeature, Feature
from feature.casing import CasingFeature
from feature.chars import CharsFeature, CharCNNFeature
from feature.elmo import ELMoEmbeddingFeature
from feature.fstlexicon import FSTFeature
from model import TaggingModel, TaggingPrediction, ModelParams
from utils.files import ProjectPath
def parse_args() -> argparse.Namespace:
default_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data")
parser = argparse.ArgumentParser()
parser.add_argument("--data-path", type=str, default=default_path)
parser.add_argument("--submodel", action="store_true")
parser.add_argument("--no-wikipedia", action="store_true")
parser.add_argument("--no-lexicons", action="store_true")
return parser.parse_args()
def create_features(dataset: DataSet, base_path: ProjectPath, args: argparse.Namespace) -> List[Feature]:
fst = lambda name: base_path.join("lexicons", name + ".fst")
labels = lambda name: dataset.labels(name)
features = [
ELMoEmbeddingFeature("value", base_path.join("elmo")),
CharCNNFeature("value", CharsFeature.default_alphabet()),
CasingFeature("value")
]
lexicons = [
FSTFeature("base", "naming", labels("naming"), fst("names"), to_lower="all", otag="other"),
FSTFeature("value", "polimorf", labels("polimorf"), fst("polimorf"), to_lower="first", otag="O"),
FSTFeature("value", "gazetteer", labels("gazetteer"), fst("gazetteer"), to_lower="no", otag="other"),
FSTFeature("value", "nelexicon", labels("nelexicon"), fst("nelexicon"), to_lower="no", otag="O"),
FSTFeature("value", "extras", labels('extras'), fst("extras"), to_lower="no", otag="O")
]
if not args.no_lexicons: features.extend(lexicons)
if not args.no_wikipedia: features.append(OneHotFeature("wikipedia", labels("wikipedia")))
if args.submodel: features.append(OneHotFeature("type", labels("type")))
return features
if __name__ == '__main__':
args = parse_args()
os.environ["NER_PATH"] = args.data_path
path: ProjectPath = ProjectPath("NER_PATH")
meta_path = path.join("meta.json").get()
train: DataSet = DataSet(path.join("nkjp.txt").get(), meta_path, padding=80)
train, valid = train.train_test_split(0.95)
features: List[Feature] = create_features(train, path, args)
model = TaggingModel(features, train.column("subtype" if args.submodel else "type"))
model.train(train, valid=valid, epochs=50)
TaggingModel.save(model, path.join("submodel" if args.submodel else "model").get())
pred: TaggingPrediction = model.test(train)
pred.evaluate(ignore_tagging_scheme=args.submodel)
| 46.847458
| 109
| 0.710203
|
c7826df739aa1b0a96eacf322f958e0b11a3bbf9
| 1,121
|
py
|
Python
|
auth-api/src/auth_api/schemas/suspension_reason_code.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | null | null | null |
auth-api/src/auth_api/schemas/suspension_reason_code.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | null | null | null |
auth-api/src/auth_api/schemas/suspension_reason_code.py
|
karthik-aot/sbc-auth
|
f24028040fda67d4f10ae9b608b8832c15d2a8ad
|
[
"Apache-2.0"
] | 1
|
2019-07-25T18:20:41.000Z
|
2019-07-25T18:20:41.000Z
|
# Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manager for suspension reason schema and export."""
from auth_api.models import SuspensionReasonCode as SuspensionReasonCodeModel
from .base_schema import BaseSchema
class SuspensionReasonCodeSchema(BaseSchema): # pylint: disable=too-many-ancestors, too-few-public-methods
"""This is the schema for the SuspensionReasonCode model."""
class Meta: # pylint: disable=too-few-public-methods
"""Maps all of the SuspensionReasonCode fields to a default schema."""
model = SuspensionReasonCodeModel
| 40.035714
| 107
| 0.764496
|
76cef1311627f42e7217a0181c1df77854d00703
| 8,311
|
py
|
Python
|
tests/io/test_parquet_local.py
|
yhzqb/kedro
|
619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab
|
[
"Apache-2.0"
] | 1
|
2021-11-19T05:36:47.000Z
|
2021-11-19T05:36:47.000Z
|
tests/io/test_parquet_local.py
|
yhzqb/kedro
|
619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab
|
[
"Apache-2.0"
] | null | null | null |
tests/io/test_parquet_local.py
|
yhzqb/kedro
|
619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab
|
[
"Apache-2.0"
] | 1
|
2021-11-19T05:36:49.000Z
|
2021-11-19T05:36:49.000Z
|
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from random import randint
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from kedro.io import DataSetError, ParquetLocalDataSet
from kedro.io.core import Version
def _generate_sample_pandas_df(
randomised: bool = False, with_nan: bool = False
) -> pd.DataFrame:
"""
Return a dummy data frame with 2-columns [Name, Age]
Args:
randomised: Set to true to randomise ages.
with_nan: Include an entry with nan age value.
Returns:
Dataframe as specified by the arguments.
"""
def age(default):
return randint(1, 120) if randomised else default
return pd.DataFrame(
{
"Name": ["Alex", "Bob", "Clarke", "Dave"],
"Age": [age(31), age(12), age(65), np.nan if with_nan else age(29)],
}
)
@pytest.fixture(params=[(False, False)])
def input_data(request):
randomised, with_nan = request.param
return _generate_sample_pandas_df(randomised, with_nan)
@pytest.fixture
def data_path(tmp_path):
return str(tmp_path / "data")
@pytest.fixture(params=[dict()])
def parquet_data_set(data_path, request):
return ParquetLocalDataSet(filepath=data_path, **request.param)
@pytest.fixture
def versioned_parquet_data_set(data_path, load_version, save_version):
return ParquetLocalDataSet(
filepath=data_path, version=Version(load_version, save_version)
)
class TestParquetLocalDataSet:
_AGE_COLUMN = "Age"
@pytest.mark.parametrize("input_data", [(False, False)], indirect=True)
def test_save(self, parquet_data_set, input_data):
"""Test saving and overriding the data set."""
parquet_data_set.save(input_data)
# Check that it is loaded correctly
loaded_data = parquet_data_set.load()
assert_frame_equal(loaded_data, input_data)
# Save on top of existing
new_data = _generate_sample_pandas_df(randomised=True)
parquet_data_set.save(new_data)
# Assert data has been overwritten
reloaded_data = parquet_data_set.load()
assert_frame_equal(reloaded_data, new_data)
@pytest.mark.parametrize("input_data", [(False, True)], indirect=True)
@pytest.mark.parametrize(
"parquet_data_set", [dict(load_args={"columns": [_AGE_COLUMN]})], indirect=True
)
def test_load_with_args(self, parquet_data_set, input_data):
"""Test loading the data set with extra load arguments specified."""
parquet_data_set.save(input_data)
loaded_data = parquet_data_set.load()
assert_frame_equal(loaded_data, input_data[[self._AGE_COLUMN]])
@pytest.mark.parametrize(
"parquet_data_set", [dict(save_args={"compression": "GZIP"})], indirect=True
)
def test_save_with_args(self, parquet_data_set, input_data):
"""Test loading the data set with extra save arguments specified."""
parquet_data_set.save(input_data)
loaded_data = parquet_data_set.load()
assert_frame_equal(loaded_data, input_data)
def test_save_none(self, parquet_data_set):
"""Check the error when trying to save None."""
pattern = r"Saving `None` to a `DataSet` is not allowed"
with pytest.raises(DataSetError, match=pattern):
parquet_data_set.save(None)
def test_str_representation(self):
"""Test string representation of the data set instance."""
parquet_data_set = ParquetLocalDataSet("test_file.parquet")
pattern = (
"ParquetLocalDataSet(engine=auto, "
"filepath=test_file.parquet, save_args={})"
)
assert pattern in str(parquet_data_set)
def test_exists(self, parquet_data_set, input_data):
"""Test `exists` method invocation."""
assert not parquet_data_set.exists()
parquet_data_set.save(input_data)
assert parquet_data_set.exists()
class TestParquetLocalDataSetVersioned:
def test_save_and_load(self, versioned_parquet_data_set, input_data):
"""Test that saved and reloaded data matches the original one for
the versioned data set."""
versioned_parquet_data_set.save(input_data)
reloaded_df = versioned_parquet_data_set.load()
assert_frame_equal(reloaded_df, input_data)
def test_no_versions(self, versioned_parquet_data_set):
"""Check the error if no versions are available for load."""
pattern = r"Did not find any versions for ParquetLocalDataSet\(.+\)"
with pytest.raises(DataSetError, match=pattern):
versioned_parquet_data_set.load()
def test_exists(self, versioned_parquet_data_set, input_data):
"""Test `exists` method invocation for versioned data set."""
assert not versioned_parquet_data_set.exists()
versioned_parquet_data_set.save(input_data)
assert versioned_parquet_data_set.exists()
def test_prevent_overwrite(self, versioned_parquet_data_set, input_data):
"""Check the error when attempting to override the data set if the
corresponding parquet file for a given save version already exists."""
versioned_parquet_data_set.save(input_data)
pattern = (
r"Save path \`.+\` for ParquetLocalDataSet\(.+\) must "
r"not exist if versioning is enabled\."
)
with pytest.raises(DataSetError, match=pattern):
versioned_parquet_data_set.save(input_data)
@pytest.mark.parametrize(
"load_version", ["2019-01-01T23.59.59.999Z"], indirect=True
)
@pytest.mark.parametrize(
"save_version", ["2019-01-02T00.00.00.000Z"], indirect=True
)
def test_save_version_warning(
self, versioned_parquet_data_set, load_version, save_version, input_data
):
"""Check the warning when saving to the path that differs from
the subsequent load path."""
pattern = (
r"Save path `.*/{}/data` did not match load path "
r"`.*/{}/data` for ParquetLocalDataSet\(.+\)".format(
save_version, load_version
)
)
with pytest.warns(UserWarning, match=pattern):
versioned_parquet_data_set.save(input_data)
def test_version_str_repr(self, load_version, save_version):
"""Test that version is in string representation of the class instance
when applicable."""
filepath = "data"
ds = ParquetLocalDataSet(filepath=filepath)
ds_versioned = ParquetLocalDataSet(
filepath=filepath, version=Version(load_version, save_version)
)
assert filepath in str(ds)
assert "version" not in str(ds)
assert filepath in str(ds_versioned)
ver_str = "version=Version(load={}, save='{}')".format(
load_version, save_version
)
assert ver_str in str(ds_versioned)
| 38.299539
| 87
| 0.694862
|
39976539502be29b0dc4668a3a8f5004bdc28fe6
| 28,460
|
py
|
Python
|
e7cataTradeBot.py
|
svzhukov/angelica-the-bot
|
c4923a9f58b2b4eadf009b9844b87bbd2c8e885f
|
[
"MIT"
] | null | null | null |
e7cataTradeBot.py
|
svzhukov/angelica-the-bot
|
c4923a9f58b2b4eadf009b9844b87bbd2c8e885f
|
[
"MIT"
] | null | null | null |
e7cataTradeBot.py
|
svzhukov/angelica-the-bot
|
c4923a9f58b2b4eadf009b9844b87bbd2c8e885f
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import json
import uuid
import os
import random
import boto3
import discord
import jsonpickle
import configparser
import traceback
import inspect
import sys
from enum import IntEnum
from typing import List
from typing import Optional
from typing import Union
from discord.ext import commands
from pyaztro import Aztro
from time import time
from botocore.exceptions import ClientError
# region Context Event
class EventLogger():
def __init__(self):
self.pfx = ''
self.target = ''
self.invoker = ''
self.guild = ''
self.action = ''
self.function = ''
def __repr__(self):
return "{}{}{}{}{}".format(self.action, self.target, self.function, self.invoker, self.guild)
@staticmethod
def logger(target: str, action: str = None, ctx=None):
logger = EventLogger()
try:
logger.invoker = "{} <{}>".format(ctx.message.author, ctx.message.author.id)
logger.guild = "{} <{}>".format(ctx.guild, ctx.guild.id)
logger.action = action if action else "{}{}".format(ctx.prefix, ctx.command)
except AttributeError:
logger.action = action
finally:
logger.target = target
logger.function = inspect.getframeinfo(inspect.currentframe().f_back.f_back).function
return logger
@staticmethod
def log(target: str, action: str = None, ctx=None):
logger = EventLogger.logger(target, action, ctx)
print(logger)
# region Properties
@property
def function(self):
return " -> [{}]".format(self._function) if self._function else ''
@function.setter
def function(self, value):
self._function = value
@property
def target(self):
return " {}".format(self._target) if self._target else ''
@target.setter
def target(self, value):
self._target = value
@property
def invoker(self):
return " by {}".format(self._invoker) if self._invoker else ''
@invoker.setter
def invoker(self, value):
self._invoker = value
@property
def guild(self):
return " in {}".format(self._guild) if self._guild else ''
@guild.setter
def guild(self, value):
self._guild = value
@property
def action(self):
return "{} -".format(self._action) if self._action else ''
@action.setter
def action(self, value):
self._action = value
# endregion
# endregion
# region Classes
class Config:
config = None
@staticmethod
def load():
Config.setup_config()
Config.set_env_vars()
@staticmethod
def setup_config():
Config.config = configparser.ConfigParser()
Config.config.read('config.ini')
@staticmethod
def set_env_vars():
try:
os.environ['CLOUDCUBE_ACCESS_KEY_ID'] = Config.config['DEFAULT']['CLOUDCUBE_ACCESS_KEY_ID']
os.environ['CLOUDCUBE_SECRET_ACCESS_KEY'] = Config.config['DEFAULT']['CLOUDCUBE_SECRET_ACCESS_KEY']
os.environ['DISCORD_BOT_TOKEN'] = Config.config['DEFAULT']['DISCORD_BOT_TOKEN']
os.environ['DISCORD_BOT_PREFIX'] = Config.config['DEFAULT']['DISCORD_BOT_PREFIX']
os.environ['DISCORD_BOT_PREFIX_SECOND'] = Config.config['DEFAULT']['DISCORD_BOT_PREFIX_SECOND']
EventLogger.log("From config.ini", action="load")
except KeyError:
EventLogger.log("From os", action="load")
class S3FileManager:
client = None
bucket = 'cloud-cube-eu'
key = 'ln75ki813ek6/public/'
guild_ids_file_name = 'guild_ids.txt'
guild_file_prefix = 'guild-'
guild_file_suffix = '.json'
guild_ids: List[int] = list()
@staticmethod
def load():
EventLogger.log("S3FileManager", action="load")
S3FileManager.setup_client()
S3FileManager.download()
@staticmethod
def setup_client():
S3FileManager.client = boto3.client(
's3',
aws_access_key_id=os.environ['CLOUDCUBE_ACCESS_KEY_ID'],
aws_secret_access_key=os.environ['CLOUDCUBE_SECRET_ACCESS_KEY'],
region_name='eu-west-1')
@staticmethod
def download():
S3FileManager.download_guild_ids()
S3FileManager.download_guilds()
@staticmethod
def upload(ctx):
# Save and upload guild_ids.txt if needed
if ctx.guild.id not in S3FileManager.guild_ids:
S3FileManager.guild_ids.append(ctx.guild.id)
S3FileManager.upload_guild_ids()
# Save and upload guild-<id>.json
S3FileManager.upload_guild(ctx)
@staticmethod
def download_guild_ids():
EventLogger.log(S3FileManager.guild_ids_file_name, action="download")
try:
S3FileManager.client.download_file(S3FileManager.bucket, S3FileManager.key + S3FileManager.guild_ids_file_name,
S3FileManager.guild_ids_file_name)
with open(S3FileManager.guild_ids_file_name, 'r') as f:
S3FileManager.guild_ids = json.load(f)
except ClientError as e:
if e.response['Error']['Code'] == "404":
S3FileManager.guild_ids = []
S3FileManager.upload_guild_ids()
@staticmethod
def download_guilds():
EventLogger.log("All guild-<id>.json", action="download")
for guild in S3FileManager.guild_ids:
S3FileManager.client.download_file(S3FileManager.bucket, S3FileManager.key + S3FileManager.file_name(guild),
S3FileManager.file_name(guild))
@staticmethod
def upload_guild_ids():
EventLogger.log(S3FileManager.guild_ids_file_name, action="upload")
with open(S3FileManager.guild_ids_file_name, 'w') as f:
json.dump(S3FileManager.guild_ids, f)
S3FileManager.client.upload_file(S3FileManager.guild_ids_file_name, S3FileManager.bucket,
S3FileManager.key + S3FileManager.guild_ids_file_name)
@staticmethod
def upload_guild(ctx):
file = S3FileManager.file_name(ctx.guild.id)
EventLogger.log(file, ctx=ctx, action="upload")
with open(file, 'w') as f:
json.dump(jsonpickle.encode(Guild.guild(ctx)), f)
S3FileManager.client.upload_file(file, S3FileManager.bucket, S3FileManager.key + file)
@staticmethod
def file_name(guild_id) -> str:
return S3FileManager.guild_file_prefix + str(guild_id) + S3FileManager.guild_file_suffix
class Guild:
guilds: List[Guild] = list()
def __init__(self, guild_id: int, name: str, users: List[User] = None, requests: List[Request] = None, bot_var: BotVar = None):
self.id = guild_id
self.name = name
self.users = users if users else []
self.requests = requests if requests else []
self.bot_var = bot_var if bot_var else BotVar()
def __repr__(self):
return "{}-{}".format(self.name, self.id)
def assign_admin_role(self, ctx, role: Union[int, str]):
self.bot_var.admin_role = role
S3FileManager.upload(ctx)
def assign_min_score(self, ctx, score: int):
self.bot_var.min_score = score
S3FileManager.upload(ctx)
@staticmethod
# Finds and creates a new one if not found
def guild(ctx) -> Guild:
gld = Guild.find_guild(ctx)
return gld if gld else Guild.add(ctx)
@staticmethod
def find_guild(ctx) -> Optional[Guild]:
return next((guild for guild in Guild.guilds if guild.id == ctx.guild.id), None)
@staticmethod
def add(ctx) -> Guild:
EventLogger.log("NEW trade hub {} is now open!".format(ctx.guild), ctx=ctx)
gld = Guild(ctx.guild.id, ctx.guild.name)
Guild.guilds.append(gld)
return gld
@staticmethod
def load():
EventLogger.log("Guilds[{}]".format(len(S3FileManager.guild_ids)), action="load")
Guild.guilds = []
for guild in S3FileManager.guild_ids:
with open(S3FileManager.file_name(guild), 'r') as f:
Guild.guilds.append(jsonpickle.decode(json.load(f)))
class Request:
total_stages = 2
points_per_stage = 1
def __init__(self, guild_id: int, user_id: int, cata_id: uuid.UUID, date_created: float = None,
req_id: uuid.UUID = None, stage: int = None, active: bool = None):
self.guild_id = guild_id
self.id = req_id if req_id else uuid.uuid4()
self.user_id = user_id
self.cata_id = cata_id
self.stage = stage if stage else 0
self.active = active if active else True
self.date_created = date_created if date_created else time()
def __repr__(self):
finished = " - finished" if self.is_complete() else ''
return "{name} [{stage}/{total}{finished}]".format(name=self.name(), stage=self.stage, total=Request.total_stages, finished=finished)
def stage_advance(self) -> bool:
self.stage += 1
self.active = False if self.is_complete() else True
return self.active
def name(self) -> str:
return Catalyst.catalyst(self.cata_id).name
def is_complete(self) -> bool:
return self.stage == Request.total_stages
def cancel(self):
self.active = False
@staticmethod
def repr(ctx, req_id: uuid.UUID) -> str:
return Request.find_request(ctx, req_id).__repr__() if Request.find_request(ctx, req_id) else "None"
@staticmethod
def requests(ctx) -> List[Request]:
return Guild.guild(ctx).requests
@staticmethod
def find_request(ctx, req_id: uuid.UUID) -> Optional[Request]:
return next((req for req in Request.requests(ctx) if req.id == req_id), None)
@staticmethod
def add(ctx, user_id: int, cata_id: uuid.UUID) -> Request:
req = Request(ctx.guild.id, user_id, cata_id)
EventLogger.log("Don't miss the fresh deal for {}<{}>!".format(req.name(), req.id), ctx=ctx)
Guild.guild(ctx).requests.append(req)
return req
class User:
bot_dev_id = 118435077477761029
class AdminRoleCheckError(commands.CommandError):
def __init__(self, message: str = None):
self.message = message if message else "Comand is restricted to bot admin role"
def __repr__(self):
return self.message
class RoleManagementCheckError(commands.CommandError):
def __init__(self, message: str = None):
self.message = message if message else "Command requires discord role management permissions"
def __repr__(self):
return self.message
def __init__(self, guild_id: int, user_id: int, name: str, score: int = None, request_id: uuid.UUID = None, assistance: int = None):
self.guild_id = guild_id
self.id = user_id
self.name = name
self.score = score if score else 0
self.request_id = request_id if request_id else None
self.assistance = assistance if assistance else 0
def __repr__(self):
return "{}-{}".format(self.name, self.id)
def finished_request_count(self, ctx) -> int:
return len([req for req in Request.requests(ctx) if req.user_id == self.id and not req.active and req.is_complete()])
def assign_request(self, ctx, request: Request):
self.request_id = request.id
self.score -= Request.points_per_stage * Request.total_stages
S3FileManager.upload(ctx)
def request_cancel(self, ctx) -> int:
score_refund = Request.points_per_stage * (Request.total_stages - Request.find_request(ctx, self.request_id).stage)
self.score += score_refund
Request.find_request(ctx, self.request_id).cancel()
self.request_id = None
S3FileManager.upload(ctx)
return score_refund
def thank(self, ctx, user: User):
if user.id == self.id: return
active = Request.find_request(ctx, self.request_id).stage_advance()
self.request_id = self.request_id if active else None
if user.id == bot.user.id:
self.score += Request.points_per_stage
elif user.id != self.id:
user.score += Request.points_per_stage
user.assistance += Request.points_per_stage
S3FileManager.upload(ctx)
def gift(self, ctx, user: User):
if user.id != bot.user.id and user.id != self.id:
self.score -= Request.points_per_stage
self.assistance += Request.points_per_stage
user.score += Request.points_per_stage
S3FileManager.upload(ctx)
def set_score(self, ctx, score: int):
self.score = score
S3FileManager.upload(ctx)
@staticmethod
def users(ctx) -> List[User]:
return Guild.guild(ctx).users
@staticmethod
# Finds and creates a new one if not found
def user(ctx, dc_user) -> User:
usr = User.find_user(ctx, dc_user.id)
return usr if usr else User.add(ctx, dc_user.id, dc_user.name)
@staticmethod
def find_user(ctx, user_id: int) -> Optional[User]:
return next((usr for usr in User.users(ctx) if usr.id == user_id), None)
@staticmethod
def add(ctx, user_id: int, user_name: str) -> User:
usr = User(ctx.guild.id, user_id, user_name)
EventLogger.log("Welcome, new trader {} <{}>!".format(usr.name, usr.id), ctx=ctx)
Guild.guild(ctx).users.append(usr)
return usr
@staticmethod
def remove(ctx, user: User):
EventLogger.log("Farewell, {} <{}>!".format(user.name, user.id), ctx=ctx)
if user.request_id:
Request.find_request(ctx, user.request_id).cancel()
Guild.guild(ctx).users.remove(user)
S3FileManager.upload(ctx)
# Checks
@staticmethod
def has_bot_admin_role(ctx) -> bool:
role = Guild.guild(ctx).bot_var.admin_role
try:
if int(role) in [role.id for role in ctx.message.author.roles]:
return True
else:
raise User.AdminRoleCheckError
except ValueError:
if role in [role.name for role in ctx.message.author.roles]:
return True
else:
raise User.AdminRoleCheckError
@staticmethod
def has_role_management_permissions(ctx) -> bool:
if True in [role.permissions.manage_roles for role in ctx.message.author.roles] or ctx.message.author.id == ctx.guild.owner.id:
return True
else:
raise User.RoleManagementCheckError
class Catalyst:
catalysts: List[Catalyst] = list()
class Rarity(IntEnum):
rare = 1
epic = 4
def __init__(self, cata_id: uuid.UUID, sign_id: uuid.UUID, name: str, rarity_id: Catalyst.Rarity):
self.id = cata_id
self.sign_id = sign_id
self.name = name
self.rarity_id = rarity_id
def __repr__(self):
return "{} <{}>".format(self.name, self.id)
@staticmethod
def search(query: str) -> List[Catalyst]:
return [cata for cata in Catalyst.catalysts if query.lower() in cata.name.lower()]
@staticmethod
def catalyst(cata_id: uuid.UUID) -> Catalyst:
return next(cata for cata in Catalyst.catalysts if cata.id == cata_id)
class Sign:
signs: List[Sign] = list()
def __init__(self, sign_id: uuid.UUID, name: str, catas: List[Catalyst]):
self.id = sign_id
self.name = name
self.catas = catas
@staticmethod
def all_names() -> List[str]:
return [sign.name.lower() for sign in Sign.signs]
@staticmethod
def load():
with open('catalysts.json', 'r') as f:
Sign.signs = jsonpickle.decode(json.load(f))
Catalyst.catalysts = [cata for cata_list in [sign.catas for sign in Sign.signs] for cata in cata_list]
EventLogger.log("Signs[{}], Catalysts[{}]".format(len(Sign.signs), len(Catalyst.catalysts)), action="load")
class BotVar:
default_role = 'Angelica\'s Crew'
default_min_score = -6
def __init__(self, admin_role: Union[str, int] = None, min_score: int = None):
self.admin_role = admin_role if admin_role else BotVar.default_role
self.min_score = min_score if min_score else BotVar.default_min_score
# endregion
# region Boot up
def load():
Config.load()
S3FileManager.load()
Sign.load()
Guild.load()
load()
# endregion
# region Commands
# Admin
bot = commands.Bot(command_prefix=(os.environ['DISCORD_BOT_PREFIX'], os.environ['DISCORD_BOT_PREFIX_SECOND']))
@bot.command(name='adminrole')
@commands.check(User.has_role_management_permissions)
async def com_admin_role(ctx, *args):
if len(args):
Guild.guild(ctx).assign_admin_role(ctx, ' '.join(args))
await ctx.send("New bot admin role has been set to **" + ' '.join(args) + "**")
else:
await ctx.send("Current bot admin role is **{}**, to set a new one specify either role id or role name"
.format(Guild.guild(ctx).bot_var.admin_role))
@bot.command(name='minscore')
@commands.check(User.has_bot_admin_role)
async def com_min_score(ctx, score=None):
try:
Guild.guild(ctx).assign_min_score(ctx, int(score))
await ctx.send("New minimum score has been set to **" + score + "**")
except (ValueError, TypeError):
await ctx.send("Current minimum score is **" + str(Guild.guild(ctx).bot_var.min_score) + "**, to set a new one specify a value")
@bot.command(name='cancel')
@commands.check(User.has_bot_admin_role)
async def com_cancel(ctx):
try:
user = User.find_user(ctx, ctx.message.mentions[0].id)
score_refund = user.request_cancel(ctx)
await ctx.send("**{}**'s active request is canceled, **{}** points are refunded back".format(user.name, score_refund))
except (AttributeError, IndexError):
await ctx.send("Nothing to cancel ¯\\_(ツ)_/¯")
@bot.command(name='remove')
@commands.check(User.has_bot_admin_role)
async def com_remove(ctx, user_id=None):
try:
remove_id = ctx.message.mentions[0].id if len(ctx.message.mentions) else int(user_id)
User.remove(ctx, User.find_user(ctx, remove_id))
await ctx.send("User **{}** has been removed".format(user_id))
except (ValueError, AttributeError, TypeError):
await ctx.send("User not found")
@bot.command(name='setscore')
@commands.check(User.has_bot_admin_role)
async def com_set_score(ctx, mention=None, value=None):
try:
user = User.user(ctx, ctx.message.mentions[0])
user.set_score(ctx, int(value))
await ctx.send("**{}**'s score successfully set to **{}**".format(user.name, user.score))
except (ValueError, IndexError, TypeError, AttributeError):
await ctx.send("Please provide correct arguments")
@bot.command(name='test')
@commands.check(User.has_bot_admin_role)
async def com_test(ctx, arg=None):
pass
# All user
@bot.command(name='respond')
async def com_respond(ctx):
print("Hello, I'm alive and responding!")
await ctx.send("Hello, I'm alive and responding!")
@bot.command(name='board')
async def com_board(ctx):
msg = "{} guild catalyst exchange board:\n".format(ctx.guild.name.capitalize())
for user in User.users(ctx):
msg += "**{}** - score: **{}**, request: **{}**, assistance: **[{}]**\n". \
format(user.name, user.score, Request.repr(ctx, user.request_id), user.assistance)
await ctx.send(msg)
@bot.command(name='request', aliases=['req'])
async def com_request(ctx, *args):
query = ' '.join(args) if len(args) else ''
user = User.user(ctx, ctx.message.author)
catas = Catalyst.search(query)
if user.request_id:
await ctx.send("**{}**, you already have active request for **{}**".format(user.name, Request.repr(ctx, user.request_id)))
elif len(query) < 3:
await ctx.send("Provide at least **3** characters for catalyst name")
elif len(catas) == 0:
await ctx.send("No catalyst found")
elif len(catas) > 1:
await ctx.send("Found more than one catalyst, please specify")
elif catas[0].rarity_id == Catalyst.Rarity.epic:
await ctx.send("Can't request epic catalysts")
elif user.score <= Guild.guild(ctx).bot_var.min_score:
await ctx.send("**{}**, your exchange score **({})** has reached its minimum threshold **({})**."
" Aid other guild members to improve your score"
.format(user.name, user.score, Guild.guild(ctx).bot_var.min_score))
else:
request = Request.add(ctx, user.id, catas[0].id)
user.assign_request(ctx, request)
await ctx.send("**{}** has requested **{}**. User's new score: **{}**".format(user.name, request, user.score))
@bot.command(name='signs', aliases=['sign', 'horoscope'])
async def com_horoscope(ctx, query=None):
if not query:
await ctx.send("Here's the list of available zodiac signs: {}".format(Sign.all_names()))
else:
sign = query.lower() if query.lower() in Sign.all_names() else random.choice(Sign.all_names())
print(sign)
await ctx.send(Aztro(sign=sign).description)
@bot.command(name='thank', aliases=['thanks'])
async def com_thank(ctx):
try:
mention = User.user(ctx, ctx.message.mentions[0])
author = User.find_user(ctx, ctx.message.author.id)
request = Request.find_request(ctx, author.request_id)
if mention.id == ctx.message.author.id:
await ctx.send("Don't do that!")
else:
author.thank(ctx, mention)
msg = "No problem :blush:, here's a blessing from the Goddess for you **+1** :pray:! **{}**'s request: **{}**" \
.format(author.name, request) if mention.id == bot.user.id else \
"Thanks for the assistance, **{}**, here's your **+1** :thumbsup:! **{}**'s request: **{}**" \
.format(mention.name, author.name, request)
await ctx.send(msg)
except (IndexError, AttributeError) as e:
await ctx.send("Please mention a user you want to thank, you must have an active request")
@bot.command(name='catalysts', aliases=['catas'])
async def com_catalysts(ctx):
await send_file(ctx, 'catas.jpg')
@bot.command(name='how')
async def com_how(ctx):
await send_file(ctx, 'how.jpg')
@bot.command(name='aid')
async def com_aid(ctx):
try:
mention = User.find_user(ctx, ctx.message.mentions[0].id)
author = User.find_user(ctx, ctx.message.author.id)
request = Request.find_request(ctx, mention.request_id)
await ctx.send("**{}**, check your guild box, there might be some :gift: **{}**'s for you from **{}**!"
.format(mention.name, request.name(), author.name))
except (IndexError, AttributeError):
await ctx.send("Please mention user with an active request you want to notify")
@bot.command(name='gift')
@commands.cooldown(3, 28800, type=commands.BucketType.member)
async def com_gift(ctx):
try:
author = User.find_user(ctx, ctx.message.author.id)
mention = User.user(ctx, ctx.message.mentions[0])
if author.score < 1:
await ctx.send("Your score must be above zero to gift to other users")
elif mention.id == author.id:
await ctx.send("**{}** throws their own party, everyone is invited :partying_face:!".format(author.name))
elif mention.id == bot.user.id:
await ctx.send("Thank you, **{}** :blush:, I have a gift for you as well **+1** :heart:!".format(ctx.message.author.name))
else:
author.gift(ctx, mention)
await ctx.send("**{}** feels generous today and gifts one of their points to **{} +1** :heart:!"
.format(ctx.message.author.name, ctx.message.mentions[0].name))
except IndexError:
await ctx.send("Please mention user you want to send gifts to")
except AttributeError:
await ctx.send("Get on the board first!")
@bot.command(name='ahelp')
async def com_help(ctx):
embed = discord.Embed(title="Angelica The Bot",
description="See more info about the bot on [GitHub](https://github.com/svzhukov/angelica-the-bot)",
color=0xffc0cb)
embed.add_field(name="`[User commands]`", value="All arguments should be provided without **<**, **>** brackets", inline=False)
embed.add_field(name="!how **<--**", value="Quick visual tutorial that shows how to use the bot", inline=False)
embed.add_field(name="!request <catalyst_query>", value="Makes a request for named catalysts, **-2** to points", inline=False)
embed.add_field(name="!thanks <@user>", value="Thanks the user who provided the assistance, "
"**+1** to exchange and assistance scores of the mentioned user", inline=False)
embed.add_field(name="!aid <@user>", value="Notifies mentioned user about your aid, optional command", inline=False)
embed.add_field(name="!board", value="Guild board with user scores and active requests", inline=False)
embed.add_field(name="!gift <@user>", value="Gifts **1** of your points to the mentioned user, "
"gifter receives **+1** assistance in return. Has a cooldown", inline=False)
embed.add_field(name="!catalysts", value="Shows neat picture with all the catalysts", inline=False)
embed.add_field(name="!signs <sign_name>", value="Your daily horoscope, provide no argument to see all available signs,"
" if provided sign is not on the list random one will be chosen", inline=False)
embed.add_field(name="**\n`[Admin commands]`**", value="Requires bot admin role", inline=False)
embed.add_field(name="!adminrole <role_id> or <role_name>",
value="Sets bot admin role, requires discord role management permissions to call, "
"pass no arguments to view current role",
inline=False)
embed.add_field(name="!minscore <score>", value="Sets the minimum score threshold value, default is **-6**", inline=False)
embed.add_field(name="!setscore <@user new_score>",
value="Sets the score manually, should only be used in cases of malfunction. "
"Note that all exchange scores from active bot users, taking into account current requests,"
" should add up close to **0**", inline=False)
embed.add_field(name="!cancel <@user>", value="Cancels current request and refunds remaining points", inline=False)
embed.add_field(name="!remove <@user> or <user_discord_id>", value="Removes user from the board", inline=False)
me = bot.get_user(User.bot_dev_id)
embed.set_footer(text="Developed by {0} (Discord ID: {0.id})".format(me), icon_url=me.avatar_url)
await ctx.send(embed=embed)
# endregion
# region Events
@bot.event
async def on_guild_join(guild):
print("A NEW guild {} welcomes Angelica!".format(guild))
@bot.event
async def on_guild_remove(guild):
print("Angelica just got kicked out from {}, too bad for them!".format(guild))
@bot.event
async def on_command(ctx):
EventLogger.log('-', ctx=ctx)
@bot.event
async def on_ready():
print('Bot is ready')
await bot.change_presence(status=discord.Status.online, activity=discord.Game("!ahelp"))
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, User.AdminRoleCheckError) or isinstance(error, User.RoleManagementCheckError):
EventLogger.log(error.message, ctx=ctx)
await ctx.send(error.message)
elif isinstance(error, discord.ext.commands.errors.CommandOnCooldown):
EventLogger.log(str(error), ctx=ctx)
await ctx.send("**{}**, {}".format(ctx.message.author.name, error))
elif isinstance(error, discord.ext.commands.errors.CommandNotFound):
# Disable the spam from other bots with the same prefixes
pass
else:
traceback.print_exception(type(error), error, error.__traceback__)
# endregion
# region Utils
async def send_file(ctx, file_name: str):
with open(file_name, 'rb') as f:
await ctx.send(file=discord.File(f, file_name))
# endregion
#######################################################
bot.run(os.environ['DISCORD_BOT_TOKEN'])
| 37.105606
| 141
| 0.640267
|
fe71767979c726aae4185a4c29b2db6cd9b9277e
| 2,829
|
py
|
Python
|
preprocessing.py
|
luizcartolano2/ann-option-pricing
|
94875b624987c0476feced496e67c4fa42a27767
|
[
"MIT"
] | 1
|
2021-10-02T02:32:27.000Z
|
2021-10-02T02:32:27.000Z
|
preprocessing.py
|
luizcartolano2/ann-option-pricing
|
94875b624987c0476feced496e67c4fa42a27767
|
[
"MIT"
] | 2
|
2020-09-18T03:04:20.000Z
|
2020-09-18T03:09:58.000Z
|
preprocessing.py
|
luizcartolano2/ann-option-pricing
|
94875b624987c0476feced496e67c4fa42a27767
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from impliedVolatility.ImpliedVolatility import ImpliedVolatility
# read DataFrames with available info about options
df_contracts = pd.read_csv('data-source/contracts.csv', usecols=['commodity', 'contract', 'expiry_date'])
df_fut = pd.read_csv('data-source/und-fut.csv', usecols=['commodity', 'contract', 'date', 'future_price'])
df_opt = pd.read_csv('data-source/option-price.csv', usecols=['commodity', 'contract', 'date', 'put_or_call', 'strike_value', 'settle_price'])
# here we merge the DataFrame with information's about contracts
# into the one with information's about the options so we are able to
# get the expiration date to every option we are looking for
df_opt = pd.merge(df_opt, df_contracts, how='left', left_on=['commodity', 'contract'], right_on=['commodity', 'contract'])
# here we merge the DataFrame with information's about future
# into the one with information's about the options so we are able to
# get the underlying price to every option we are looking for
df_opt = pd.merge(df_opt, df_fut, how='left', left_on=['commodity', 'contract', 'date'], right_on=['commodity', 'contract', 'date'])
# in order to calculate the option tenor we first need to know
# the number of days between the "operation date" eg, the day data
# was collected and the expiration date of the contract
days_to_mat = (pd.to_datetime(df_opt['expiry_date']) - pd.to_datetime(df_opt['date'])).dt.days.values
# the tenor is a number representing
# the number of days to maturity divide
# by 365.25 (since we consider) running days
tenor = (days_to_mat + 1) / 365.25
# add the days to maturity and the tenor
# to the options info DataFrame
df_opt['days_to_mat'] = days_to_mat
df_opt['tenor'] = tenor
# drop if any NaN number exists on DataFrame
df_opt = df_opt[df_opt['days_to_mat'] > 0]
df_opt = df_opt.dropna(axis=0, how='any')
# in order to calculate the implied volatility
# we are going to use the Black Scholes model
# together with a bissection function to find equation roots
prices = df_opt['settle_price'].values.reshape((-1, 1))
underlying = df_opt['future_price'].values.reshape((-1, 1))
strike = df_opt['strike_value'].values.reshape((-1, 1))
call_put = df_opt['put_or_call'].values.reshape((-1, 1)).astype('float64')
tenor = df_opt['tenor'].values.reshape((-1, 1))
# instantiate implied vol class and calculate bissec method
implied_volatility = ImpliedVolatility(np.log(1.00501), 0.0001, 2000, 0)
i, temp, vols = implied_volatility.implied_volatility_bissec(prices, underlying, strike, call_put, tenor)
# add the implied volatility
# to the options DataFrame
df_opt['volimp'] = vols
# save the options DataFrame that will be
# used in the future to training the machine
# learning model in order to predict options price
df_opt.to_csv('data-source/options-data.csv')
| 47.949153
| 142
| 0.75327
|
f9d2e8d72798f0787f611c6795991625a3685857
| 6,406
|
py
|
Python
|
src/tutorial_scenarios/userMovement/main.py
|
vyk1/YAFS
|
514f8362c90923fa28f871fcf179b755a9315c47
|
[
"MIT"
] | null | null | null |
src/tutorial_scenarios/userMovement/main.py
|
vyk1/YAFS
|
514f8362c90923fa28f871fcf179b755a9315c47
|
[
"MIT"
] | null | null | null |
src/tutorial_scenarios/userMovement/main.py
|
vyk1/YAFS
|
514f8362c90923fa28f871fcf179b755a9315c47
|
[
"MIT"
] | null | null | null |
"""
In this simulation, the users moves in different nodes. There are linked to another nodes.
@author: Isaac Lera
"""
import os
import time
import json
import random
import logging.config
import networkx as nx
from pathlib import Path
from yafs.core import Sim
from yafs.application import create_applications_from_json
from yafs.topology import Topology
from yafs.placement import JSONPlacement
from yafs.path_routing import DeviceSpeedAwareRouting
from yafs.distribution import deterministic_distribution, deterministicDistributionStartPoint
from collections import defaultdict
class CustomStrategy():
def __init__(self, pathResults, listIdApps):
self.activations = 0
self.pathResults = pathResults
self.listUsers = []
self.numberMaxUsers = 100
self.listIdApps = listIdApps
self.placeAt = {}
def createUser(self, sim):
app_name = random.sample(self.listIdApps, 1)[0]
app = sim.apps[app_name]
msg = app.get_message("M.USER.APP.%i" % app_name)
dist = deterministic_distribution(30, name="Deterministic")
node = random.sample(sim.topology.G.nodes(), 1)[0]
idDES = sim.deploy_source(app_name, id_node=node, msg=msg, distribution=dist)
self.listUsers.append(idDES)
self.placeAt[idDES] = node
return idDES
def __call__(self, sim, routing):
# logging.info("Activating Custom process - number %i " % self.activations)
self.activations += 1
# In this case, the new users not change the topology
# routing.invalid_cache_value = True # when the service change the cache of the Path.routing is outdated.
# We can introduce a new user or we move it
if len(self.listUsers) == 0:
self.createUser(sim)
if random.random() < 0.6:
# we create a new user
idDES = self.createUser(sim)
logging.info(" Creating a new user %i on node %i" % (idDES, self.placeAt[idDES]))
elif random.random() < 0.8:
# we move a user from one node to other
userDES = random.sample(self.listUsers, 1)[0]
newNode = random.sample(sim.topology.G.nodes(), 1)[0]
logging.info(" Moving a user %i from node %i to %i" % (userDES, self.placeAt[userDES], newNode))
sim.alloc_DES[self.placeAt[userDES]] = newNode
else:
# we remove an user
userDES = random.sample(self.listUsers, 1)[0]
sim.undeploy_source(userDES)
self.listUsers.remove(userDES)
logging.info(" Removing a user %i on node %i" % (userDES, self.placeAt[userDES]))
def main(stop_time, it):
folder_results = Path("results/")
folder_results.mkdir(parents=True, exist_ok=True)
folder_results = str(folder_results) + "/"
"""
TOPOLOGY
"""
t = Topology()
# You also can create a topology using JSONs files. Check out examples folder
size = 5
t.G = nx.generators.binomial_tree(size) # In NX-lib there are a lot of Graphs generators
# Definition of mandatory attributes of a Topology
## Attr. on edges
# PR and BW are 1 unit
attPR_BW = {x: 1 for x in t.G.edges()}
nx.set_edge_attributes(t.G, name="PR", values=attPR_BW)
nx.set_edge_attributes(t.G, name="BW", values=attPR_BW)
## Attr. on nodes
# IPT
attIPT = {x: 100 for x in t.G.nodes()}
nx.set_node_attributes(t.G, name="IPT", values=attIPT)
nx.write_gexf(t.G,
folder_results + "graph_binomial_tree_%i" % size) # you can export the Graph in multiples format to view in tools like Gephi, and so on.
print(t.G.nodes()) # nodes id can be str or int
"""
APPLICATION or SERVICES
"""
dataApp = json.load(open('data/appDefinition.json'))
apps = create_applications_from_json(dataApp)
"""
SERVICE PLACEMENT
"""
placementJson = json.load(open('data/allocDefinition.json'))
placement = JSONPlacement(name="Placement", json=placementJson)
"""
Defining ROUTING algorithm to define how path messages in the topology among modules
"""
selectorPath = DeviceSpeedAwareRouting()
"""
SIMULATION ENGINE
"""
s = Sim(t, default_results_path=folder_results + "sim_trace")
"""
Deploy services == APP's modules
"""
for aName in apps.keys():
s.deploy_app(apps[aName], placement, selectorPath)
"""
Deploy users
"""
### IN THIS CASE, We control the users from our custom strategy
# userJSON = json.load(open('data/usersDefinition.json'))
# for user in userJSON["sources"]:
# app_name = user["app"]
# app = s.apps[app_name]
# msg = app.get_message(user["message"])
# node = user["id_resource"]
# dist = deterministic_distribution(100, name="Deterministic")
# idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist)
"""
This internal monitor in the simulator (a DES process) changes the sim's behaviour.
You can have multiples monitors doing different or same tasks.
In this case, it changes the number or movement of users.
"""
listIdApps = [x["id"] for x in dataApp]
dist = deterministicDistributionStartPoint(stop_time / 4., stop_time / 2.0 / 10.0, name="Deterministic")
evol = CustomStrategy(folder_results, listIdApps)
s.deploy_monitor("RandomAllocation",
evol,
dist,
**{"sim": s, "routing": selectorPath}) # __call__ args
"""
RUNNING - last step
"""
logging.info(" Performing simulation: %i " % it)
s.run(stop_time) # To test deployments put test_initial_deploy a TRUE
s.print_debug_assignaments()
print("Number of new users: %i" % len(evol.listUsers))
if __name__ == '__main__':
logging.config.fileConfig(os.getcwd() + '/logging.ini')
nIterations = 1 # iteration for each experiment
simulationDuration = 20000
# Iteration for each experiment changing the seed of randoms
for iteration in range(nIterations):
random.seed(iteration)
logging.info("Running experiment it: - %i" % iteration)
start_time = time.time()
main(stop_time=simulationDuration,
it=iteration)
print("\n--- %s seconds ---" % (time.time() - start_time))
print("Simulation Done!")
| 33.715789
| 155
| 0.646113
|
a25aa06c9c0e5a5371f420a7ced973e39b0d38af
| 3,271
|
py
|
Python
|
scripts/books_database.py
|
scianand/Programming-With-Databases
|
4b4363be91f066f2852d78ae2a9240731c46f3b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/books_database.py
|
scianand/Programming-With-Databases
|
4b4363be91f066f2852d78ae2a9240731c46f3b9
|
[
"Apache-2.0"
] | null | null | null |
scripts/books_database.py
|
scianand/Programming-With-Databases
|
4b4363be91f066f2852d78ae2a9240731c46f3b9
|
[
"Apache-2.0"
] | null | null | null |
"""
SQL Alchemy ORM example: schema creation and access both
user: postgres
password: postgres
"""
from sqlalchemy import create_engine, Column, Integer, String, ForeignKey
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
# connect to the database
engine = create_engine('postgres://postgres:postgres@localhost/books')
Base = declarative_base()
# class representation of authors table
class Author(Base):
__tablename__ = 'authors'
author_id = Column(Integer, primary_key=True)
first_name = Column(String(length=50))
last_name = Column(String(length=50))
def __repr__(self):
return '''<Author(author_id='{0}', first_name='{1}', last_name='{2}')>'''.format(
self.author_id, self.first_name, self.last_name)
# class representation of books table
class Book(Base):
__tablename__ = 'books'
book_id = Column(Integer, primary_key=True)
title = Column(String(length=50))
number_of_pages = Column(Integer)
def __repr__(self):
return '''<Book(book_id='{0}', title='{1}', number_of_pages='{2}')>'''.format(
self.book_id, self.title, self.number_of_pages)
# class representation of bookauthors table
class BookAuthor(Base):
__tablename__ = 'bookauthors'
bookauthor_id = Column(Integer, primary_key=True)
author_id = Column(Integer, ForeignKey('authors.author_id'))
book_id = Column(Integer, ForeignKey('books.book_id'))
def __repr__(self):
return '''<BookAuthor(bookauthor_id='{0}', author_id='{1}', book_id='{2}')>'''.format(
self.bookauthor_id, self.author_id, self.book_id)
# relationship for SQLAlchemy classes
author = relationship("Author")
book = relationship("Book")
# create all the above tables using engine
Base.metadata.create_all(engine)
def create_session():
Session = sessionmaker(bind=engine)
return Session()
def add_book(title, number_of_pages, first_name, last_name):
# create book record
book = Book(title=title, number_of_pages=number_of_pages)
session = create_session()
try:
existing_author = session.query(Author).filter(Author.first_name == first_name,
Author.last_name == last_name).first()
session.add(book)
if existing_author is not None:
session.flush()
pairing = BookAuthor(author_id=existing_author.author_id,
book_id = book.book_id)
else:
# create author record
author = Author(first_name=first_name, last_name=last_name)
session.add(author)
session.flush()
pairing = BookAuthor(author_id=author.author_id, book_id=book.book_id)
session.add(pairing)
session.commit()
except:
session.rollback()
raise
finally:
session.close()
if __name__ == '__main__':
print("Input new book!")
title = input("What's the book name?\n")
number_of_pages = int(input("How many pages are in the book?\n"))
first_name = input("What is the first name of the author?\n")
last_name = input("What is the last name of the author?")
add_book(title, number_of_pages, first_name, last_name)
print("Done!")
| 30.858491
| 94
| 0.674411
|
c741406d9837b164561a6db2eba6d6678dc8826c
| 1,268
|
py
|
Python
|
getNearestClusterAccuracy.py
|
arjish/meta-meta-classification
|
3e1df26a486094bd9cb394ff99d3c29b587b66c3
|
[
"MIT"
] | 3
|
2021-07-30T23:58:35.000Z
|
2021-11-11T02:05:58.000Z
|
getNearestClusterAccuracy.py
|
arjish/meta-meta-classification
|
3e1df26a486094bd9cb394ff99d3c29b587b66c3
|
[
"MIT"
] | null | null | null |
getNearestClusterAccuracy.py
|
arjish/meta-meta-classification
|
3e1df26a486094bd9cb394ff99d3c29b587b66c3
|
[
"MIT"
] | 1
|
2021-09-13T10:12:16.000Z
|
2021-09-13T10:12:16.000Z
|
import os
import numpy as np
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('data_path', metavar='DATA',
help='path to data')
parser.add_argument('-n', '--n_clusters', default=16, type=int,
help='# of models (default=16)')
parser.add_argument('-p', '--pkl_file', default='filelist', type=str,
help='path to pickle file')
args = parser.parse_args()
data_path = args.data_path
n_clusters = args.n_clusters
pkl_file = args.pkl_file
clusterLabelsFile = 'clusterLabels_'+str(n_clusters)+'_'+pkl_file + '.npy'
labels = np.load(os.path.join(data_path, 'CLUSTER_'+str(n_clusters), clusterLabelsFile))
print('Shape of labels:', labels.shape)
acc_list = []
for i in range(n_clusters):
acc_file = os.path.join(data_path, 'CLUSTER_'+str(n_clusters),
'queryAcc_' + pkl_file + '_cluster' + str(n_clusters) + '_' + str(i) + '.npy')
acc_list.append(np.load(acc_file))
acc_list = np.stack(acc_list, axis=1)
print('Shape of acc_list:', acc_list.shape)
accs_nearest = acc_list[np.arange(len(labels)), labels]
mean_accs = np.mean(accs_nearest)
print('Mean accuracy using nearest model:', mean_accs)
stds = np.std(accs_nearest)
ci95 = 1.96 * stds * 100 / np.sqrt(accs_nearest.shape[0])
print('stds:', stds)
print('ci95:', ci95)
| 31.7
| 88
| 0.711356
|
85a5e232a356a236b8ef07e31d9cb5f5202e7a22
| 538
|
py
|
Python
|
spokes_deepsense_app/config/desktop.py
|
karantkiruba/spokes_deepsense_app
|
90b23fec05866965da8b5a096d0f2084cb3ee4c5
|
[
"MIT"
] | null | null | null |
spokes_deepsense_app/config/desktop.py
|
karantkiruba/spokes_deepsense_app
|
90b23fec05866965da8b5a096d0f2084cb3ee4c5
|
[
"MIT"
] | null | null | null |
spokes_deepsense_app/config/desktop.py
|
karantkiruba/spokes_deepsense_app
|
90b23fec05866965da8b5a096d0f2084cb3ee4c5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Spokes Deepsense App",
"color": "default",
"icon": "default",
"type": "module",
"label": _("Spokes Deepsense App")
},
{
"module_name": "Reports",
"color": "grey",
"icon": "octicon octicon-file-directory",
"type": "module",
"label": _("Reports")
}
]
| 23.391304
| 65
| 0.459108
|
b7dae768eae9b436605c893c21bd37c1da83bd56
| 3,763
|
py
|
Python
|
model-experiments/token-based-similarity-classification/src/PostProcessor/MapAtR.py
|
yingkitw/Project_CodeNet
|
565ac79043d240c70e5bb9fdfbf5eccdec4e0bc3
|
[
"Apache-2.0"
] | 1,156
|
2021-05-11T08:37:00.000Z
|
2022-03-31T11:28:51.000Z
|
model-experiments/token-based-similarity-classification/src/PostProcessor/MapAtR.py
|
yingkitw/Project_CodeNet
|
565ac79043d240c70e5bb9fdfbf5eccdec4e0bc3
|
[
"Apache-2.0"
] | 29
|
2021-05-11T18:54:49.000Z
|
2022-03-11T15:26:35.000Z
|
model-experiments/token-based-similarity-classification/src/PostProcessor/MapAtR.py
|
yingkitw/Project_CodeNet
|
565ac79043d240c70e5bb9fdfbf5eccdec4e0bc3
|
[
"Apache-2.0"
] | 149
|
2021-05-11T10:38:24.000Z
|
2022-03-21T04:52:28.000Z
|
"""
Program for computing MAP at R metric
using matrix of predicted similarity strength for all sample pairs
Input data are expected to be in the form of two pickled files:
- problem_indices.pcl -- specifies indices of tested problem
for each similarity test experiment
- similarity_probabilities.pcl -- specifies matrix of similarity test
experiment results
Each matrix column describes results
of comparing one problem solution with
solutions of all other problems
"""
import sys
import os
import argparse
import pickle
import numpy as np
def map_at_r(sim, pids):
"""
Function for computing MAP at R metric
Parameter:
- sim -- 2D numpy array of predicted similarity measures
for all pairs of samples
- pids -- 1D numpy array of problem ids corresponding
to columns of matrix of predicted similarity measures
Returns: computed MAP at R metric
"""
#Count number of source code solutions of each problem
r = np.bincount(pids) - 1
max_r = r.max()
#Mask for ignoring the similarity predictions lying
#beyond the number of solutions of checked problem
mask = np.arange(max_r)[np.newaxis, :] < r[pids][:, np.newaxis]
np.fill_diagonal(sim,-np.inf)
#Select and sort top predictions
result = np.argpartition(-sim,
range(max_r + 1), axis=1)[:, : max_r]
#Get correct similarity predictions
tp = pids[result] == pids[:, np.newaxis]
#Remove all predictions beyond the number of
#solutions of tested problem
tp[~mask] = False
#Get only tested problem
valid = r[pids] > 0
#Compute cumulative probability of correct predictions
p = np.cumsum(tp, axis=1,
dtype = np.float32) / np.arange(1, max_r+1,
dtype = np.float32)[np.newaxis, :]
#average across similarity prediction for each tested problem
ap = (p * tp).sum(axis=1)[valid] / r[pids][valid]
val = np.mean(ap).item()
return val
def main(args):
"""
Main function of program for computing MAP at R metric
Arguments are descibed below
"""
if not os.path.exists(args.similarities):
sys.exit(f"Directory {args.similarities} with similarity analysis does not exist")
with open(f"{args.similarities}/problem_indices.pcl", "rb") as _f:
pids = pickle.load(_f)
with open(f"{args.similarities}/similarity_probabilities.pcl", "rb") as _f:
sim = pickle.load(_f)
n_problem_solutions = pids.shape[0]
if sim.shape[0] != n_problem_solutions * n_problem_solutions:
sys.exit(
f"Number of similarity samples {n_problem_solutions.shape[0]} ",
f" is not square of number of problem solutions {n_problem_solutions}")
map_r = map_at_r(sim.reshape(n_problem_solutions, n_problem_solutions),
pids)
print("Map@R is ", map_r)
#######################################################################
# Command line arguments of are described below
#######################################################################
if __name__ == '__main__':
print("\nComputation of MAP at R metric of similarity analysis")
#Command-line arguments
parser = argparse.ArgumentParser(
description = "Computation of MAP at R metric")
parser.add_argument('similarities', type=str,
help='Directory with similarity results')
args = parser.parse_args()
print("Parameter settings used:")
for k,v in sorted(vars(args).items()):
print("{}: {}".format(k,v))
main(args)
| 39.197917
| 90
| 0.610949
|
2b70563c55726013ccb81ff2c26481883d3666e3
| 47,553
|
py
|
Python
|
octavia_dashboard/api/rest/lbaasv2.py
|
TomEros/octavia-dashboard
|
7fb4bac2e835699b0b386336a9f09aeda67a5996
|
[
"Apache-2.0"
] | null | null | null |
octavia_dashboard/api/rest/lbaasv2.py
|
TomEros/octavia-dashboard
|
7fb4bac2e835699b0b386336a9f09aeda67a5996
|
[
"Apache-2.0"
] | null | null | null |
octavia_dashboard/api/rest/lbaasv2.py
|
TomEros/octavia-dashboard
|
7fb4bac2e835699b0b386336a9f09aeda67a5996
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API over the neutron LBaaS v2 service.
"""
import _thread as thread
from time import sleep
from django.conf import settings
from django.views import generic
from horizon import conf
import octavia_dashboard
from openstack import connection
try:
from openstack import config as occ
except ImportError:
from os_client_config import config as occ
from openstack_dashboard.api import neutron
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
neutronclient = neutron.neutronclient
def _get_sdk_connection(request):
"""Creates an SDK connection based on the request.
:param request: Django request object
:returns: SDK connection object
"""
# NOTE(mordred) Nothing says love like two inverted booleans
# The config setting is NO_VERIFY which is, in fact, insecure.
# get_one_cloud wants verify, so we pass 'not insecure' to verify.
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
# Pass interface to honor 'OPENSTACK_ENDPOINT_TYPE'
interface = getattr(settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL')
# Pass load_yaml_config as this is a Django service with its own config
# and we don't want to accidentally pick up a clouds.yaml file. We want to
# use the settings we're passing in.
cloud_config = occ.OpenStackConfig(load_yaml_config=False).get_one_cloud(
verify=not insecure,
cacert=cacert,
interface=interface,
region_name=request.user.services_region,
auth_type='token',
auth=dict(
project_id=request.user.project_id,
project_domain_id=request.user.domain_id,
token=request.user.token.unscoped_token,
auth_url=request.user.endpoint),
app_name='octavia-dashboard',
app_version=octavia_dashboard.__version__)
return connection.from_config(cloud_config=cloud_config)
def _sdk_object_to_list(object):
"""Converts an SDK generator object to a list of dictionaries.
:param object: SDK generator object
:returns: List of dictionaries
"""
result_list = []
for item in object:
result_list.append(_get_sdk_object_dict(item))
return result_list
def _get_sdk_object_dict(object):
"""Converts an SDK object to a dictionary.
Fixes any SDK imposed object oddities.
:param object: SDK object
:returns: Dictionary
"""
item_dict = object.to_dict()
if 'is_admin_state_up' in item_dict:
item_dict['admin_state_up'] = item_dict['is_admin_state_up']
return item_dict
def poll_loadbalancer_status(request, loadbalancer_id, callback,
from_state='PENDING_UPDATE', to_state='ACTIVE',
callback_kwargs=None):
"""Poll for the status of the load balancer.
Polls for the status of the load balancer and calls a function when the
status changes to a specified state.
:param request: django request object
:param loadbalancer_id: id of the load balancer to poll
:param callback: function to call when polling is complete
:param from_state: initial expected state of the load balancer
:param to_state: state to check for
:param callback_kwargs: kwargs to pass into the callback function
"""
interval = conf.HORIZON_CONFIG['ajax_poll_interval'] / 1000.0
status = from_state
while status == from_state:
sleep(interval)
conn = _get_sdk_connection(request)
lb = conn.load_balancer.get_load_balancer(loadbalancer_id)
status = lb.provisioning_status
if status == to_state:
kwargs = {'loadbalancer_id': loadbalancer_id}
if callback_kwargs:
kwargs.update(callback_kwargs)
callback(request, **kwargs)
def create_loadbalancer(request):
data = request.DATA
conn = _get_sdk_connection(request)
build_kwargs = dict(
project_id=request.user.project_id,
vip_subnet_id=data['loadbalancer']['vip_subnet_id'],
name=data['loadbalancer'].get('name'),
description=data['loadbalancer'].get('description'),
vip_address=data['loadbalancer'].get('vip_address'),
admin_state_up=data['loadbalancer'].get('admin_state_up'),
)
flavor_id = data['loadbalancer'].get('flavor_id')
if flavor_id:
build_kwargs['flavor_id'] = flavor_id
availability_zone = data['loadbalancer'].get('availability_zone')
if availability_zone:
build_kwargs['availability_zone'] = availability_zone
loadbalancer = conn.load_balancer.create_load_balancer(**build_kwargs)
if data.get('listener'):
# There is work underway to add a new API to LBaaS v2 that will
# allow us to pass in all information at once. Until that is
# available we use a separate thread to poll for the load
# balancer status and create the other resources when it becomes
# active.
args = (request, loadbalancer.id, create_listener)
kwargs = {'from_state': 'PENDING_CREATE'}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
return _get_sdk_object_dict(loadbalancer)
def create_listener(request, **kwargs):
"""Create a new listener.
"""
data = request.DATA
try:
default_tls_ref = data['certificates'][0]
except (KeyError, IndexError):
default_tls_ref = None
conn = _get_sdk_connection(request)
# TODO(johnsom) Add SNI support
# https://bugs.launchpad.net/octavia/+bug/1714294
listener = conn.load_balancer.create_listener(
protocol=data['listener']['protocol'],
protocol_port=data['listener']['protocol_port'],
load_balancer_id=kwargs['loadbalancer_id'],
name=data['listener'].get('name'),
description=data['listener'].get('description'),
connection_limit=data['listener'].get('connection_limit'),
default_tls_container_ref=default_tls_ref,
sni_container_refs=None,
admin_state_up=data['listener'].get('admin_state_up'),
insert_headers=data['listener'].get('insert_headers'),
timeout_client_data=data['listener'].get('timeout_client_data'),
timeout_member_connect=data['listener'].get('timeout_member_connect'),
timeout_member_data=data['listener'].get('timeout_member_data'),
timeout_tcp_inspect=data['listener'].get('timeout_tcp_inspect'),
allowed_cidrs=data['listener'].get('allowed_cidrs'),
# Replace empty string by None (uses default tls cipher string)
tls_ciphers=data['listener'].get('tls_ciphers') or None,
)
if data.get('pool'):
args = (request, kwargs['loadbalancer_id'], create_pool)
kwargs = {'callback_kwargs': {'listener_id': listener.id}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
return _get_sdk_object_dict(listener)
def create_l7_policy(request, **kwargs):
"""Create a new l7 policy.
"""
data = request.DATA
conn = _get_sdk_connection(request)
l7_policy = conn.load_balancer.create_l7_policy(
action=data['l7policy']['action'],
admin_state_up=data['l7policy'].get('admin_state_up'),
description=data['l7policy'].get('description'),
listener_id=kwargs['listener_id'],
name=data['l7policy'].get('name'),
position=data['l7policy'].get('position'),
redirect_pool_id=data['l7policy'].get('redirect_pool_id'),
redirect_url=data['l7policy'].get('redirect_url'),
)
return _get_sdk_object_dict(l7_policy)
def create_l7_rule(request, **kwargs):
"""Create a new l7 rule.
"""
data = request.DATA
conn = _get_sdk_connection(request)
l7_rule = conn.load_balancer.create_l7_rule(
admin_state_up=data['l7rule'].get('admin_state_up'),
compare_type=data['l7rule']['compare_type'],
invert=data['l7rule'].get('invert'),
key=data['l7rule'].get('key'),
l7_policy=kwargs['l7_policy_id'],
type=data['l7rule']['type'],
rule_value=data['l7rule']['rule_value'],
)
return _get_sdk_object_dict(l7_rule)
def create_pool(request, **kwargs):
"""Create a new pool.
"""
data = request.DATA
conn = _get_sdk_connection(request)
pool = conn.load_balancer.create_pool(
protocol=data['pool']['protocol'],
lb_algorithm=data['pool']['lb_algorithm'],
session_persistence=data['pool'].get('session_persistence'),
listener_id=kwargs['listener_id'],
loadbalancer_id=kwargs['loadbalancer_id'],
name=data['pool'].get('name'),
description=data['pool'].get('description'),
admin_state_up=data['pool'].get('admin_state_up'),
tls_enabled=data['pool'].get('tls_enabled'),
# Replace empty string by None (uses default tls cipher string)
tls_ciphers=data['pool'].get('tls_ciphers') or None,
)
if data.get('members'):
args = (request, kwargs['loadbalancer_id'], add_member)
kwargs = {'callback_kwargs': {'pool_id': pool.id,
'index': 0}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
elif data.get('monitor'):
args = (request, kwargs['loadbalancer_id'], create_health_monitor)
kwargs = {'callback_kwargs': {'pool_id': pool.id}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
return _get_sdk_object_dict(pool)
def create_health_monitor(request, **kwargs):
"""Create a new health monitor for a pool.
"""
data = request.DATA
conn = _get_sdk_connection(request)
health_mon = conn.load_balancer.create_health_monitor(
type=data['monitor']['type'],
delay=data['monitor']['delay'],
timeout=data['monitor']['timeout'],
max_retries=data['monitor']['max_retries'],
max_retries_down=data['monitor']['max_retries_down'],
pool_id=kwargs['pool_id'],
http_method=data['monitor'].get('http_method'),
url_path=data['monitor'].get('url_path'),
expected_codes=data['monitor'].get('expected_codes'),
admin_state_up=data['monitor'].get('admin_state_up'),
name=data['monitor'].get('name')
)
return _get_sdk_object_dict(health_mon)
def create_flavor(request, **kwargs):
"""Create a new flavor.
"""
data = request.DATA
conn = _get_sdk_connection(request)
flavor = conn.load_balancer.create_flavor(
name=data['flavor']['name'],
flavor_profile_id=data['flavor']['flavor_profile_id'],
description=data['flavor'].get('description'),
enabled=data['flavor'].get('enabled'),
)
return _get_sdk_object_dict(flavor)
def create_flavor_profile(request, **kwargs):
"""Create a new flavor profile.
"""
data = request.DATA
conn = _get_sdk_connection(request)
flavor_profile = conn.load_balancer.create_flavor(
name=data['flavor_profile']['name'],
provider_name=data['flavor_profile']['provider_name'],
flavor_data=data['flavor_profile']['flavor_data'],
)
return _get_sdk_object_dict(flavor_profile)
def add_member(request, **kwargs):
"""Add a member to a pool.
"""
data = request.DATA
members = data.get('members')
pool_id = kwargs.get('pool_id')
if kwargs.get('members_to_add'):
members_to_add = kwargs['members_to_add']
index = [members.index(member) for member in members
if member['id'] == members_to_add[0]][0]
loadbalancer_id = data.get('loadbalancer_id')
else:
index = kwargs.get('index')
loadbalancer_id = kwargs.get('loadbalancer_id')
member = members[index]
conn = _get_sdk_connection(request)
monitor_address = member.get('monitor_address')
member = conn.load_balancer.create_member(
pool_id,
address=member['address'],
protocol_port=member['protocol_port'],
subnet_id=member['subnet_id'],
weight=member.get('weight'),
monitor_address=monitor_address if monitor_address else None,
monitor_port=member.get('monitor_port'),
admin_state_up=member.get('admin_state_up'),
backup=member.get('backup', False),
name=member.get('name'),
)
index += 1
if kwargs.get('members_to_add'):
args = (request, loadbalancer_id, update_member_list)
members_to_add = kwargs['members_to_add']
members_to_add.pop(0)
kwargs = {'callback_kwargs': {
'existing_members': kwargs.get('existing_members'),
'members_to_add': members_to_add,
'members_to_delete': kwargs.get('members_to_delete'),
'pool_id': pool_id}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
elif len(members) > index:
args = (request, loadbalancer_id, add_member)
kwargs = {'callback_kwargs': {'pool_id': pool_id,
'index': index}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
elif data.get('monitor'):
args = (request, loadbalancer_id, create_health_monitor)
kwargs = {'callback_kwargs': {'pool_id': pool_id}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
return _get_sdk_object_dict(member)
def remove_member(request, **kwargs):
"""Remove a member from the pool.
"""
data = request.DATA
loadbalancer_id = data.get('loadbalancer_id')
pool_id = kwargs.get('pool_id')
if kwargs.get('members_to_delete'):
members_to_delete = kwargs['members_to_delete']
member_id = members_to_delete.pop(0)
conn = _get_sdk_connection(request)
conn.load_balancer.delete_member(member_id, pool_id,
ignore_missing=True)
args = (request, loadbalancer_id, update_member_list)
kwargs = {'callback_kwargs': {
'existing_members': kwargs.get('existing_members'),
'members_to_add': kwargs.get('members_to_add'),
'members_to_delete': members_to_delete,
'pool_id': pool_id}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
def update_loadbalancer(request, **kwargs):
"""Update a load balancer.
"""
data = request.DATA
loadbalancer_id = kwargs.get('loadbalancer_id')
conn = _get_sdk_connection(request)
loadbalancer = conn.load_balancer.update_load_balancer(
loadbalancer_id,
name=data['loadbalancer'].get('name'),
description=data['loadbalancer'].get('description'),
admin_state_up=data['loadbalancer'].get('admin_state_up'))
return _get_sdk_object_dict(loadbalancer)
def update_listener(request, **kwargs):
"""Update a listener.
"""
data = request.DATA
listener_id = data['listener'].get('id')
loadbalancer_id = data.get('loadbalancer_id')
default_pool_id = data['listener'].get('default_pool_id')
if not default_pool_id:
default_pool_id = None
else:
default_pool_id = default_pool_id[:36]
try:
default_tls_ref = data['certificates'][0]
except (KeyError, IndexError):
default_tls_ref = None
conn = _get_sdk_connection(request)
listener = conn.load_balancer.update_listener(
listener=listener_id,
name=data['listener'].get('name'),
description=data['listener'].get('description'),
connection_limit=data['listener'].get('connection_limit'),
default_tls_container_ref=default_tls_ref,
sni_container_refs=None,
admin_state_up=data['listener'].get('admin_state_up'),
default_pool_id=default_pool_id,
insert_headers=data['listener'].get('insert_headers'),
timeout_client_data=data['listener'].get('timeout_client_data'),
timeout_member_connect=data['listener'].get('timeout_member_connect'),
timeout_member_data=data['listener'].get('timeout_member_data'),
timeout_tcp_inspect=data['listener'].get('timeout_tcp_inspect'),
allowed_cidrs=data['listener'].get('allowed_cidrs'),
# Replace empty string by None (uses default tls cipher string)
tls_ciphers=data['listener'].get('tls_ciphers') or None,
)
if data.get('pool'):
args = (request, loadbalancer_id, update_pool)
thread.start_new_thread(poll_loadbalancer_status, args)
return _get_sdk_object_dict(listener)
def update_l7_policy(request, **kwargs):
"""Update a l7 policy.
"""
data = request.DATA
l7_policy_id = data['l7policy'].get('id')
conn = _get_sdk_connection(request)
l7_policy = conn.load_balancer.update_l7_policy(
action=data['l7policy']['action'],
admin_state_up=data['l7policy'].get('admin_state_up'),
description=data['l7policy'].get('description'),
l7_policy=l7_policy_id,
name=data['l7policy'].get('name'),
position=data['l7policy'].get('position'),
redirect_pool_id=data['l7policy'].get('redirect_pool_id'),
redirect_url=data['l7policy'].get('redirect_url'),
)
return _get_sdk_object_dict(l7_policy)
def update_l7_rule(request, **kwargs):
"""Update a l7 rule.
"""
data = request.DATA
l7_rule_id = data['l7rule'].get('id')
conn = _get_sdk_connection(request)
l7_rule = conn.load_balancer.update_l7_rule(
admin_state_up=data['l7rule'].get('admin_state_up'),
compare_type=data['l7rule']['compare_type'],
invert=data['l7rule'].get('invert'),
key=data['l7rule'].get('key'),
l7_policy=kwargs['l7_policy_id'],
l7rule=l7_rule_id,
type=data['l7rule']['type'],
rule_value=data['l7rule']['rule_value'],
)
return _get_sdk_object_dict(l7_rule)
def update_pool(request, **kwargs):
"""Update a pool.
"""
data = request.DATA
pool_id = data['pool'].get('id')
loadbalancer_id = data.get('loadbalancer_id')
conn = _get_sdk_connection(request)
pool = conn.load_balancer.update_pool(
pool=pool_id,
lb_algorithm=data['pool']['lb_algorithm'],
session_persistence=data['pool'].get('session_persistence'),
name=data['pool'].get('name'),
description=data['pool'].get('description'),
admin_state_up=data['pool'].get('admin_state_up'),
tls_enabled=data['pool'].get('tls_enabled'),
# Replace empty string by None (uses default tls cipher string)
tls_ciphers=data['pool'].get('tls_ciphers') or None,
)
# Assemble the lists of member id's to add and remove, if any exist
request_member_data = data.get('members', [])
existing_members = _sdk_object_to_list(conn.load_balancer.members(pool_id))
(members_to_add, members_to_delete) = get_members_to_add_remove(
request_member_data, existing_members)
if members_to_add or members_to_delete:
args = (request, loadbalancer_id, update_member_list)
kwargs = {'callback_kwargs': {'existing_members': existing_members,
'members_to_add': members_to_add,
'members_to_delete': members_to_delete,
'pool_id': pool_id}}
thread.start_new_thread(poll_loadbalancer_status, args, kwargs)
elif data.get('monitor'):
args = (request, loadbalancer_id, update_monitor)
thread.start_new_thread(poll_loadbalancer_status, args)
return _get_sdk_object_dict(pool)
def update_monitor(request, **kwargs):
"""Update a health monitor.
"""
data = request.DATA
monitor_id = data['monitor']['id']
conn = _get_sdk_connection(request)
healthmonitor = conn.load_balancer.update_health_monitor(
monitor_id,
delay=data['monitor'].get('delay'),
timeout=data['monitor'].get('timeout'),
max_retries=data['monitor'].get('max_retries'),
max_retries_down=data['monitor'].get('max_retries_down'),
http_method=data['monitor'].get('http_method'),
url_path=data['monitor'].get('url_path'),
expected_codes=data['monitor'].get('expected_codes'),
admin_state_up=data['monitor'].get('admin_state_up'),
name=data['monitor'].get('name')
)
return _get_sdk_object_dict(healthmonitor)
def update_flavor(request, **kwargs):
"""Update a flavor.
"""
data = request.DATA
flavor_id = data['flavor']['id']
conn = _get_sdk_connection(request)
flavor = conn.load_balancer.update_flavor(
flavor_id,
name=data['flavor'].get('name'),
description=data['flavor'].get('description'),
enabled=data['flavor'].get('enabled'),
)
return _get_sdk_object_dict(flavor)
def update_flavor_profile(request, **kwargs):
"""Update a flavor profile.
"""
data = request.DATA
flavor_profile_id = data['flavor_profile']['id']
conn = _get_sdk_connection(request)
flavor_profile = conn.load_balancer.update_flavor_profile(
flavor_profile_id,
name=data['flavor_profile'].get('name'),
provider_name=data['flavor_profile'].get('provider_name'),
flavor_data=data['flavor_profile'].get('flavor_data'),
)
return _get_sdk_object_dict(flavor_profile)
def update_member_list(request, **kwargs):
"""Update the list of members by adding or removing the necessary members.
"""
data = request.DATA
loadbalancer_id = data.get('loadbalancer_id')
pool_id = kwargs.get('pool_id')
existing_members = kwargs.get('existing_members')
members_to_add = kwargs.get('members_to_add')
members_to_delete = kwargs.get('members_to_delete')
if members_to_delete:
kwargs = {'existing_members': existing_members,
'members_to_add': members_to_add,
'members_to_delete': members_to_delete,
'pool_id': pool_id}
remove_member(request, **kwargs)
elif members_to_add:
kwargs = {'existing_members': existing_members,
'members_to_add': members_to_add,
'members_to_delete': members_to_delete,
'pool_id': pool_id}
add_member(request, **kwargs)
elif data.get('monitor'):
args = (request, loadbalancer_id, update_monitor)
thread.start_new_thread(poll_loadbalancer_status, args)
def get_members_to_add_remove(request_member_data, existing_members):
new_member_ids = [member['id'] for member in request_member_data]
existing_member_ids = [member['id'] for member in existing_members]
members_to_add = [member_id for member_id in new_member_ids
if member_id not in existing_member_ids]
members_to_delete = [member_id for member_id in existing_member_ids
if member_id not in new_member_ids]
return members_to_add, members_to_delete
def add_floating_ip_info(request, loadbalancers):
"""Add floating IP address info to each load balancer.
"""
floating_ips = neutron.tenant_floating_ip_list(request)
for lb in loadbalancers:
floating_ip = {}
associated_ip = next((fip for fip in floating_ips
if fip['port_id'] == lb['vip_port_id']), None)
if associated_ip is not None:
floating_ip['id'] = associated_ip['id']
floating_ip['ip'] = associated_ip['ip']
lb['floating_ip'] = floating_ip
@urls.register
class LoadBalancers(generic.View):
"""API for load balancers.
"""
url_regex = r'lbaas/loadbalancers/$'
@rest_utils.ajax()
def get(self, request):
"""List load balancers for current project.
The listing result is an object with property "items".
"""
conn = _get_sdk_connection(request)
lb_list = _sdk_object_to_list(conn.load_balancer.load_balancers(
project_id=request.user.project_id))
if request.GET.get('full') and neutron.floating_ip_supported(request):
add_floating_ip_info(request, lb_list)
return {'items': lb_list}
@rest_utils.ajax()
def post(self, request):
"""Create a new load balancer.
Creates a new load balancer as well as other optional resources such as
a listener, pool, monitor, etc.
"""
return create_loadbalancer(request)
@urls.register
class LoadBalancer(generic.View):
"""API for retrieving, updating, and deleting a single load balancer.
"""
url_regex = r'lbaas/loadbalancers/(?P<loadbalancer_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, loadbalancer_id):
"""Get a specific load balancer.
http://localhost/api/lbaas/loadbalancers/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
loadbalancer = conn.load_balancer.find_load_balancer(loadbalancer_id)
loadbalancer_dict = _get_sdk_object_dict(loadbalancer)
if request.GET.get('full') and neutron.floating_ip_supported(request):
add_floating_ip_info(request, [loadbalancer_dict])
return loadbalancer_dict
@rest_utils.ajax()
def put(self, request, loadbalancer_id):
"""Edit a load balancer.
"""
kwargs = {'loadbalancer_id': loadbalancer_id}
update_loadbalancer(request, **kwargs)
@rest_utils.ajax()
def delete(self, request, loadbalancer_id):
"""Delete a specific load balancer.
http://localhost/api/lbaas/loadbalancers/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_load_balancer(loadbalancer_id,
ignore_missing=True,
cascade=True)
@urls.register
class Listeners(generic.View):
"""API for load balancer listeners.
"""
url_regex = r'lbaas/listeners/$'
@rest_utils.ajax()
def get(self, request):
"""List of listeners for the current project.
The listing result is an object with property "items".
"""
loadbalancer_id = request.GET.get('loadbalancerId')
conn = _get_sdk_connection(request)
listener_list = _sdk_object_to_list(conn.load_balancer.listeners(
project_id=request.user.project_id))
if loadbalancer_id:
listener_list = self._filter_listeners(listener_list,
loadbalancer_id)
return {'items': listener_list}
@rest_utils.ajax()
def post(self, request):
"""Create a new listener.
Creates a new listener as well as other optional resources such as
a pool, members, and health monitor.
"""
kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id')}
return create_listener(request, **kwargs)
def _filter_listeners(self, listener_list, loadbalancer_id):
filtered_listeners = []
for listener in listener_list:
if listener['load_balancers'][0]['id'] == loadbalancer_id:
filtered_listeners.append(listener)
return filtered_listeners
@urls.register
class Listener(generic.View):
"""API for retrieving, updating, and deleting a single listener.
"""
url_regex = r'lbaas/listeners/(?P<listener_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, listener_id):
"""Get a specific listener.
If the param 'includeChildResources' is passed in as a truthy value,
the details of all resources that exist under the listener will be
returned along with the listener details.
http://localhost/api/lbaas/listeners/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
listener = conn.load_balancer.find_listener(listener_id)
listener = _get_sdk_object_dict(listener)
if request.GET.get('includeChildResources'):
resources = {}
resources['listener'] = listener
if listener.get('default_pool_id'):
pool_id = listener['default_pool_id']
pool = conn.load_balancer.find_pool(pool_id)
pool = _get_sdk_object_dict(pool)
resources['pool'] = pool
if pool.get('members'):
member_list = _sdk_object_to_list(
conn.load_balancer.members(pool_id))
resources['members'] = member_list
if pool.get('health_monitor_id'):
monitor_id = pool['health_monitor_id']
monitor = conn.load_balancer.find_health_monitor(
monitor_id)
monitor = _get_sdk_object_dict(monitor)
resources['monitor'] = monitor
return resources
else:
return listener
@rest_utils.ajax()
def put(self, request, listener_id):
"""Edit a listener as well as any resources below it.
"""
kwargs = {'listener_id': listener_id}
update_listener(request, **kwargs)
@rest_utils.ajax()
def delete(self, request, listener_id):
"""Delete a specific listener.
http://localhost/api/lbaas/listeners/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_listener(listener_id, ignore_missing=True)
@urls.register
class L7Policies(generic.View):
"""API for load balancer l7 policies.
"""
url_regex = r'lbaas/l7policies/$'
@rest_utils.ajax()
def get(self, request):
"""List of l7 policies for the current project.
The listing result is an object with property "items".
"""
listener_id = request.GET.get('listenerId')
conn = _get_sdk_connection(request)
l7_policy_list = _sdk_object_to_list(conn.load_balancer.l7_policies(
listener_id=listener_id))
return {'items': l7_policy_list}
@rest_utils.ajax()
def post(self, request):
"""Create a new l7 policy.
Creates a new l7 policy as well as other optional resources such as
l7 rules.
"""
kwargs = {'listener_id': request.DATA.get('parentResourceId')}
return create_l7_policy(request, **kwargs)
@urls.register
class L7Policy(generic.View):
"""API for retrieving a single l7 policy.
"""
url_regex = r'lbaas/l7policies/(?P<l7_policy_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, l7_policy_id):
"""Get a specific l7 policy.
If the param 'includeChildResources' is passed in as a truthy value,
the details of all resources that exist under the l7 policy will be
returned along with the l7 policy details.
http://localhost/api/lbaas/l7policies/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
l7_policy = conn.load_balancer.find_l7_policy(l7_policy_id)
l7_policy = _get_sdk_object_dict(l7_policy)
if request.GET.get('includeChildResources'):
resources = {}
if l7_policy.get('rules'):
l7_rules_list = _sdk_object_to_list(
conn.load_balancer.l7_rules(l7_policy_id))
l7_policy['rules'] = l7_rules_list
resources['l7policy'] = l7_policy
return resources
else:
return l7_policy
@rest_utils.ajax()
def put(self, request, l7_policy_id):
"""Edit a l7 policy as well as any resources below it.
"""
kwargs = {'l7_policy_id': l7_policy_id}
update_l7_policy(request, **kwargs)
@rest_utils.ajax()
def delete(self, request, l7_policy_id):
"""Delete a specific l7 policy.
http://localhost/api/lbaas/l7policies/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_l7_policy(l7_policy_id)
@urls.register
class L7Rules(generic.View):
"""API for load balancer l7 rules.
"""
url_regex = r'lbaas/l7policies/(?P<l7_policy_id>[^/]+)/l7rules/$'
@rest_utils.ajax()
def get(self, request, l7_policy_id):
"""List of l7 rules for the current project.
The listing result is an object with property "items".
"""
conn = _get_sdk_connection(request)
l7_rule_list = _sdk_object_to_list(conn.load_balancer.l7_rules(
l7_policy_id))
return {'items': l7_rule_list}
@rest_utils.ajax()
def post(self, request, l7_policy_id):
"""Create a new l7 rule.
Creates a new l7 rule as well as other optional resources such as
l7 rules.
"""
kwargs = {'l7_policy_id': l7_policy_id}
return create_l7_rule(request, **kwargs)
@urls.register
class L7Rule(generic.View):
"""API for retrieving a single l7 rule.
"""
url_regex = (
r'lbaas/l7policies/(?P<l7_policy_id>[^/]+)'
r'/l7rules/(?P<l7_rule_id>[^/]+)/$'
)
@rest_utils.ajax()
def get(self, request, l7_rule_id, l7_policy_id):
"""Get a specific l7 rule."""
conn = _get_sdk_connection(request)
l7_rule = conn.load_balancer.find_l7_rule(l7_rule_id, l7_policy_id)
return _get_sdk_object_dict(l7_rule)
@rest_utils.ajax()
def put(self, request, l7_rule_id, l7_policy_id):
"""Edit a specific l7 rule."""
kwargs = {'l7_rule_id': l7_rule_id, 'l7_policy_id': l7_policy_id}
update_l7_rule(request, **kwargs)
@rest_utils.ajax()
def delete(self, request, l7_rule_id, l7_policy_id):
"""Delete a specific l7 rule."""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_l7_rule(l7_rule_id, l7_policy_id)
@urls.register
class Pools(generic.View):
"""API for load balancer pools.
"""
url_regex = r'lbaas/pools/$'
@rest_utils.ajax()
def get(self, request):
"""List of pools for the current project.
The listing result is an object with property "items".
"""
loadbalancer_id = request.GET.get('loadbalancerId')
listener_id = request.GET.get('listenerId')
conn = _get_sdk_connection(request)
pool_list = _sdk_object_to_list(conn.load_balancer.pools(
project_id=request.user.project_id))
if loadbalancer_id or listener_id:
pool_list = self._filter_pools(pool_list,
loadbalancer_id,
listener_id)
return {'items': pool_list}
@rest_utils.ajax()
def post(self, request):
"""Create a new pool.
Creates a new pool as well as other optional resources such as
members and health monitor.
"""
kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id'),
'listener_id': request.DATA.get('parentResourceId')}
return create_pool(request, **kwargs)
def _filter_pools(self, pool_list, loadbalancer_id, listener_id):
filtered_pools = []
for pool in pool_list:
if loadbalancer_id:
if pool['loadbalancers'][0]['id'] == loadbalancer_id:
if listener_id:
if (pool['listeners'] and
pool['listeners'][0]['id'] == listener_id):
filtered_pools.append(pool)
else:
filtered_pools.append(pool)
elif (pool['listeners'] and
pool['listeners'][0]['id'] == listener_id):
filtered_pools.append(pool)
return filtered_pools
@urls.register
class Pool(generic.View):
"""API for retrieving a single pool.
"""
url_regex = r'lbaas/pools/(?P<pool_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, pool_id):
"""Get a specific pool.
If the param 'includeChildResources' is passed in as a truthy value,
the details of all resources that exist under the pool will be
returned along with the pool details.
http://localhost/api/lbaas/pools/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
pool = conn.load_balancer.find_pool(pool_id)
pool = _get_sdk_object_dict(pool)
if request.GET.get('includeChildResources'):
resources = {}
resources['pool'] = pool
if pool.get('members'):
member_list = _sdk_object_to_list(
conn.load_balancer.members(pool_id))
resources['members'] = member_list
if pool.get('health_monitor_id'):
monitor_id = pool['health_monitor_id']
monitor = conn.load_balancer.find_health_monitor(
monitor_id)
monitor = _get_sdk_object_dict(monitor)
resources['monitor'] = monitor
return resources
else:
return pool
@rest_utils.ajax()
def put(self, request, pool_id):
"""Edit a listener as well as any resources below it.
"""
kwargs = {'pool_id': pool_id}
update_pool(request, **kwargs)
@rest_utils.ajax()
def delete(self, request, pool_id):
"""Delete a specific pool.
http://localhost/api/lbaas/pools/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_pool(pool_id)
@urls.register
class Members(generic.View):
"""API for load balancer members.
"""
url_regex = r'lbaas/pools/(?P<pool_id>[^/]+)/members/$'
@rest_utils.ajax()
def get(self, request, pool_id):
"""List of members for the current project.
The listing result is an object with property "items".
"""
conn = _get_sdk_connection(request)
members_list = _sdk_object_to_list(conn.load_balancer.members(pool_id))
return {'items': members_list}
@rest_utils.ajax()
def put(self, request, pool_id):
"""Update the list of members for the current project.
"""
# Assemble the lists of member id's to add and remove, if any exist
request_member_data = request.DATA.get('members', [])
conn = _get_sdk_connection(request)
existing_members = _sdk_object_to_list(
conn.load_balancer.members(pool_id))
(members_to_add, members_to_delete) = get_members_to_add_remove(
request_member_data, existing_members)
if members_to_add or members_to_delete:
kwargs = {'existing_members': existing_members,
'members_to_add': members_to_add,
'members_to_delete': members_to_delete,
'pool_id': pool_id}
update_member_list(request, **kwargs)
@urls.register
class Member(generic.View):
"""API for retrieving a single member.
"""
url_regex = r'lbaas/pools/(?P<pool_id>[^/]+)' + \
'/members/(?P<member_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, member_id, pool_id):
"""Get a specific member belonging to a specific pool.
"""
conn = _get_sdk_connection(request)
member = conn.load_balancer.find_member(member_id, pool_id)
return _get_sdk_object_dict(member)
@rest_utils.ajax()
def delete(self, request, member_id, pool_id):
"""Delete a specific member belonging to a specific pool.
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_member(member_id, pool_id)
@rest_utils.ajax()
def put(self, request, member_id, pool_id):
"""Edit a pool member.
"""
data = request.DATA
conn = _get_sdk_connection(request)
monitor_address = data.get('monitor_address')
member = conn.load_balancer.update_member(
member_id, pool_id, weight=data.get('weight'),
monitor_address=monitor_address if monitor_address else None,
monitor_port=data.get('monitor_port'),
admin_state_up=data.get('admin_state_up'),
backup=data.get('backup', False),
name=data.get('name'),
)
return _get_sdk_object_dict(member)
@urls.register
class HealthMonitors(generic.View):
"""API for load balancer pool health monitors.
"""
url_regex = r'lbaas/healthmonitors/$'
@rest_utils.ajax()
def get(self, request):
"""List of health monitors for the current project.
The listing result is an object with property "items".
"""
pool_id = request.GET.get('poolId')
conn = _get_sdk_connection(request)
health_monitor_list = _sdk_object_to_list(
conn.load_balancer.health_monitors(
project_id=request.user.project_id
)
)
if pool_id:
health_monitor_list = self._filter_health_monitors(
health_monitor_list,
pool_id)
return {'items': health_monitor_list}
@rest_utils.ajax()
def post(self, request):
"""Create a new health monitor.
"""
kwargs = {'loadbalancer_id': request.DATA.get('loadbalancer_id'),
'pool_id': request.DATA.get('parentResourceId')}
return create_health_monitor(request, **kwargs)
def _filter_health_monitors(self, health_monitor_list, pool_id):
filtered_health_monitors = []
for health_monitor in health_monitor_list:
if health_monitor['pools'][0]['id'] == pool_id:
filtered_health_monitors.append(health_monitor)
return filtered_health_monitors
@urls.register
class HealthMonitor(generic.View):
"""API for retrieving a single health monitor.
"""
url_regex = r'lbaas/healthmonitors/(?P<health_monitor_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, health_monitor_id):
"""Get a specific health monitor.
"""
conn = _get_sdk_connection(request)
health_mon = conn.load_balancer.find_health_monitor(health_monitor_id)
return _get_sdk_object_dict(health_mon)
@rest_utils.ajax()
def delete(self, request, health_monitor_id):
"""Delete a specific health monitor.
http://localhost/api/lbaas/healthmonitors/cc758c90-3d98-4ea1-af44-aab405c9c915
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_health_monitor(health_monitor_id,
ignore_missing=True)
@rest_utils.ajax()
def put(self, request, health_monitor_id):
"""Edit a health monitor.
"""
update_monitor(request)
@urls.register
class Flavors(generic.View):
"""API for load balancer flavors.
"""
url_regex = r'lbaas/flavors/$'
@rest_utils.ajax()
def get(self, request):
"""List of flavors for the current project.
The listing result is an object with property "items".
"""
conn = _get_sdk_connection(request)
flavor_list = _sdk_object_to_list(
conn.load_balancer.flavors()
)
return {'items': flavor_list}
@rest_utils.ajax()
def post(self, request):
"""Create a new flavor.
"""
kwargs = {
'flavor': request.DATA.get('flavor')
}
return create_flavor(request, **kwargs)
@urls.register
class Flavor(generic.View):
"""API for retrieving a single flavor.
"""
url_regex = r'lbaas/flavors/(?P<flavor_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, flavor_id):
"""Get a specific flavor.
"""
conn = _get_sdk_connection(request)
flavor = conn.load_balancer.find_flavor(flavor_id)
return _get_sdk_object_dict(flavor)
@rest_utils.ajax()
def delete(self, request, flavor_id):
"""Delete a specific flavor.
http://localhost/api/lbaas/flavors/3971d368-ca9b-4770-929a-3adca5bf89eb
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_flavor(flavor_id,
ignore_missing=True)
@rest_utils.ajax()
def put(self, request, flavor_id):
"""Edit a flavor.
"""
update_flavor(request)
@urls.register
class FlavorProfiles(generic.View):
"""API for load balancer flavor profiles.
"""
url_regex = r'lbaas/flavorprofiles/$'
@rest_utils.ajax()
def get(self, request):
"""List of flavor profiles for the current project.
The listing result is an object with property "items".
"""
conn = _get_sdk_connection(request)
flavor_profile_list = _sdk_object_to_list(
conn.load_balancer.flavor_profiles()
)
return {'items': flavor_profile_list}
@rest_utils.ajax()
def post(self, request):
"""Create a new flavor_profile.
"""
kwargs = {
'flavor_profile': request.DATA.get('flavor_profile')
}
return create_flavor_profile(request, **kwargs)
@urls.register
class FlavorProfile(generic.View):
"""API for retrieving a single flavor profile.
"""
url_regex = r'lbaas/flavorprofiles/(?P<flavor_profile_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, flavor_profile_id):
"""Get a specific flavor profile.
"""
conn = _get_sdk_connection(request)
flavor_profile = conn.load_balancer.find_flavor_profile(
flavor_profile_id)
return _get_sdk_object_dict(flavor_profile)
@rest_utils.ajax()
def delete(self, request, flavor_profile_id):
"""Delete a specific flavor profile.
http://localhost/api/lbaas/flavorprofiles/e8150eab-aefa-42cc-867e-3fb336da52bd
"""
conn = _get_sdk_connection(request)
conn.load_balancer.delete_flavor_profile(flavor_profile_id,
ignore_missing=True)
@rest_utils.ajax()
def put(self, request, flavor_profile_id):
"""Edit a flavor profile.
"""
update_flavor_profile(request)
@urls.register
class AvailabilityZones(generic.View):
"""API for load balancer availability zones.
"""
url_regex = r'lbaas/availabilityzones/$'
@rest_utils.ajax()
def get(self, request):
"""List of availability zones for the current project.
The listing result is an object with property "items".
"""
conn = _get_sdk_connection(request)
availability_zone_list = _sdk_object_to_list(
conn.load_balancer.availability_zones()
)
return {'items': availability_zone_list}
| 33.347125
| 86
| 0.644229
|
97dc00bae8d02e8f139b5f6677d017e9fef4491a
| 1,994
|
py
|
Python
|
application.py
|
amfox/af_sbs
|
327a0a68731e905818b24aaf4efb35cfd0d29637
|
[
"MIT"
] | null | null | null |
application.py
|
amfox/af_sbs
|
327a0a68731e905818b24aaf4efb35cfd0d29637
|
[
"MIT"
] | null | null | null |
application.py
|
amfox/af_sbs
|
327a0a68731e905818b24aaf4efb35cfd0d29637
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import os
import tornado
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR, EVENT_JOB_ADDED
from apscheduler.schedulers.tornado import TornadoScheduler
from handler.AddJobHandler import AddJobHandler
from handler.BackUIHandler import BackUIHandler
from handler.DebugHandler import DebugHandler
from handler.GetJobHandler import GetJobHandler
from handler.GetJobsHandler import GetJobsHandler
from handler.ModifyJobHandler import ModifyJobHandler
from handler.PauseJobHandler import PauseJobHandler
from handler.RemoveJobHandler import RemoveJobHandler
from handler.ResumeJobHandler import ResumeJobHandler
from handler.RunJobHandler import RunJobHandler
from settings import jobstores, executors, job_defaults
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", DebugHandler),
(r"/backui", BackUIHandler),
(r"/job/getjob", GetJobHandler),
(r"/job/getjobs", GetJobsHandler),
(r"/job/addjob", AddJobHandler),
(r"/job/removejob", RemoveJobHandler),
(r"/job/runjob", RunJobHandler),
(r"/job/modifyjob", ModifyJobHandler),
(r"/job/pausejob", PauseJobHandler),
(r"/job/resumejob", ResumeJobHandler),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=True,
)
tornado.web.Application.__init__(self, handlers, **settings)
# Scheduler of jobs
self.scheduler = TornadoScheduler()
self.scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
self.scheduler.add_listener(self.listener, EVENT_JOB_ADDED | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
def listener(self, event):
print "jobid:", event.job_id, "jobstore", event.jobstore, "eventcode:", event.code
| 39.098039
| 106
| 0.713641
|
dca618e4aee2a10126bee1646d0196d38f716d03
| 4,114
|
py
|
Python
|
hello.py
|
julencastro37/PianoPython
|
1e72d375f9f5f94c6f4fcad1892df574befe9d8b
|
[
"CC0-1.0"
] | null | null | null |
hello.py
|
julencastro37/PianoPython
|
1e72d375f9f5f94c6f4fcad1892df574befe9d8b
|
[
"CC0-1.0"
] | null | null | null |
hello.py
|
julencastro37/PianoPython
|
1e72d375f9f5f94c6f4fcad1892df574befe9d8b
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tkinter import *
from tkinter import ttk
# La clase 'Aplicacion' ha crecido. En el ejemplo se incluyen
# nuevos widgets en el método constructor __init__(): Uno de
# ellos es el botón 'Info' que cuando sea presionado llamará
# al método 'verinfo' para mostrar información en el otro
# widget, una caja de texto: un evento ejecuta una acción:
class Aplicacion():
def __init__(self):
# En el ejemplo se utiliza el prefijo 'self' para
# declarar algunas variables asociadas al objeto
# ('mi_app') de la clase 'Aplicacion'. Su uso es
# imprescindible para que se pueda acceder a sus
# valores desde otros métodos:
self.raiz = Tk()
self.raiz.geometry('600x400')
# Impide que los bordes puedan desplazarse para
# ampliar o reducir el tamaño de la ventana 'self.raiz':
self.raiz.resizable(width=False,height=False)
self.raiz.title('Piano')
# Define el widget Text 'self.tinfo ' en el que se
# pueden introducir varias líneas de texto:
self.tinfo = Text(self.raiz, width=60, height=20)
# Sitúa la caja de texto 'self.tinfo' en la parte
# superior de la ventana 'self.raiz':
self.tinfo.pack(side=TOP)
# Define el widget Button 'self.binfo' que llamará
# al metodo 'self.verinfo' cuando sea presionado
self.binfo = ttk.Button(self.raiz, text='Info',
command=self.verinfo)
# Coloca el botón 'self.binfo' debajo y a la izquierda
# del widget anterior
self.binfo.pack(side=LEFT)
# Define el botón 'self.bsalir'. En este caso
# cuando sea presionado, el método destruirá o
# terminará la aplicación-ventana 'self.raíz' con
# 'self.raiz.destroy'
self.bsalir = ttk.Button(self.raiz, text='Salir',
command=self.raiz.destroy)
# Coloca el botón 'self.bsalir' a la derecha del
# objeto anterior.
self.bsalir.pack(side=RIGHT)
# El foco de la aplicación se sitúa en el botón
# 'self.binfo' resaltando su borde. Si se presiona
# la barra espaciadora el botón que tiene el foco
# será pulsado. El foco puede cambiar de un widget
# a otro con la tecla tabulador [tab]
self.binfo.focus_set()
self.raiz.mainloop()
def verinfo(self):
# Borra el contenido que tenga en un momento dado
# la caja de texto
self.tinfo.delete("1.0", END)
# Obtiene información de la ventana 'self.raiz':
info1 = self.raiz.winfo_class()
info2 = self.raiz.winfo_geometry()
info3 = str(self.raiz.winfo_width())
info4 = str(self.raiz.winfo_height())
info5 = str(self.raiz.winfo_rootx())
info6 = str(self.raiz.winfo_rooty())
info7 = str(self.raiz.winfo_id())
info8 = self.raiz.winfo_name()
info9 = self.raiz.winfo_manager()
# Construye una cadena de texto con toda la
# información obtenida:
texto_info = "Clase de 'raiz': " + info1 + "\n"
texto_info += "Resolución y posición: " + info2 + "\n"
texto_info += "Anchura ventana: " + info3 + "\n"
texto_info += "Altura ventana: " + info4 + "\n"
texto_info += "Pos. Ventana X: " + info5 + "\n"
texto_info += "Pos. Ventana Y: " + info6 + "\n"
texto_info += "Id. de 'raiz': " + info7 + "\n"
texto_info += "Nombre objeto: " + info8 + "\n"
texto_info += "Gestor ventanas: " + info9 + "\n"
# Inserta la información en la caja de texto:
self.tinfo.insert("1.0", texto_info)
def main():
mi_app = Aplicacion()
return 0
if __name__ == '__main__':
main()
| 35.773913
| 64
| 0.56174
|
8a8e5d9aed1cf923d116e0e4e0c589b14041b24f
| 4,642
|
py
|
Python
|
mmedit/models/mattors/indexnet.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 1,884
|
2020-07-09T18:53:43.000Z
|
2022-03-31T12:06:18.000Z
|
mmedit/models/mattors/indexnet.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 622
|
2020-07-09T18:52:27.000Z
|
2022-03-31T14:41:09.000Z
|
mmedit/models/mattors/indexnet.py
|
Jian137/mmediting-1
|
e1ac6c93441ec96696d0b530f040b91b809015b6
|
[
"Apache-2.0"
] | 361
|
2020-07-09T19:21:47.000Z
|
2022-03-31T09:58:27.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner import auto_fp16
from ..builder import build_loss
from ..registry import MODELS
from .base_mattor import BaseMattor
from .utils import get_unknown_tensor
@MODELS.register_module()
class IndexNet(BaseMattor):
"""IndexNet matting model.
This implementation follows:
Indices Matter: Learning to Index for Deep Image Matting
Args:
backbone (dict): Config of backbone.
train_cfg (dict): Config of training. In 'train_cfg', 'train_backbone'
should be specified.
test_cfg (dict): Config of testing.
pretrained (str): path of pretrained model.
loss_alpha (dict): Config of the alpha prediction loss. Default: None.
loss_comp (dict): Config of the composition loss. Default: None.
"""
def __init__(self,
backbone,
train_cfg=None,
test_cfg=None,
pretrained=None,
loss_alpha=None,
loss_comp=None):
super().__init__(backbone, None, train_cfg, test_cfg, pretrained)
self.loss_alpha = (
build_loss(loss_alpha) if loss_alpha is not None else None)
self.loss_comp = (
build_loss(loss_comp) if loss_comp is not None else None)
# support fp16
self.fp16_enabled = False
def forward_dummy(self, inputs):
return self.backbone(inputs)
@auto_fp16(apply_to=('merged', 'trimap'))
def forward_train(self, merged, trimap, meta, alpha, ori_merged, fg, bg):
"""Forward function for training IndexNet model.
Args:
merged (Tensor): Input images tensor with shape (N, C, H, W).
Typically these should be mean centered and std scaled.
trimap (Tensor): Tensor of trimap with shape (N, 1, H, W).
meta (list[dict]): Meta data about the current data batch.
alpha (Tensor): Tensor of alpha with shape (N, 1, H, W).
ori_merged (Tensor): Tensor of origin merged images (not
normalized) with shape (N, C, H, W).
fg (Tensor): Tensor of foreground with shape (N, C, H, W).
bg (Tensor): Tensor of background with shape (N, C, H, W).
Returns:
dict: Contains the loss items and batch information.
"""
pred_alpha = self.backbone(torch.cat((merged, trimap), 1))
losses = dict()
weight = get_unknown_tensor(trimap, meta)
if self.loss_alpha is not None:
losses['loss_alpha'] = self.loss_alpha(pred_alpha, alpha, weight)
if self.loss_comp is not None:
losses['loss_comp'] = self.loss_comp(pred_alpha, fg, bg,
ori_merged, weight)
return {'losses': losses, 'num_samples': merged.size(0)}
def forward_test(self,
merged,
trimap,
meta,
save_image=False,
save_path=None,
iteration=None):
"""Defines the computation performed at every test call.
Args:
merged (Tensor): Image to predict alpha matte.
trimap (Tensor): Trimap of the input image.
meta (list[dict]): Meta data about the current data batch.
Currently only batch_size 1 is supported. It may contain
information needed to calculate metrics (``ori_alpha`` and
``ori_trimap``) or save predicted alpha matte
(``merged_path``).
save_image (bool, optional): Whether save predicted alpha matte.
Defaults to False.
save_path (str, optional): The directory to save predicted alpha
matte. Defaults to None.
iteration (int, optional): If given as None, the saved alpha matte
will have the same file name with ``merged_path`` in meta dict.
If given as an int, the saved alpha matte would named with
postfix ``_{iteration}.png``. Defaults to None.
Returns:
dict: Contains the predicted alpha and evaluation result.
"""
pred_alpha = self.backbone(torch.cat((merged, trimap), 1))
pred_alpha = pred_alpha.cpu().numpy().squeeze()
pred_alpha = self.restore_shape(pred_alpha, meta)
eval_result = self.evaluate(pred_alpha, meta)
if save_image:
self.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
| 40.017241
| 79
| 0.596726
|
10c4c08fa6ec45f89c60c38f06511319a9ac77cc
| 1,617
|
py
|
Python
|
example/test_clustering.py
|
anandbhoraskar/img2vec
|
5ae18d32fa54c46dd10bc01c0d340143283296da
|
[
"MIT"
] | 1
|
2020-05-09T09:11:36.000Z
|
2020-05-09T09:11:36.000Z
|
example/test_clustering.py
|
anandbhoraskar/img2vec
|
5ae18d32fa54c46dd10bc01c0d340143283296da
|
[
"MIT"
] | null | null | null |
example/test_clustering.py
|
anandbhoraskar/img2vec
|
5ae18d32fa54c46dd10bc01c0d340143283296da
|
[
"MIT"
] | null | null | null |
import sys
import os
from shutil import copyfile
sys.path.append("../img2vec_pytorch") # Adds higher directory to python modules path.
from img_to_vec import Img2Vec
from PIL import Image
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
input_path = './test_images'
files = os.listdir(input_path)
img2vec = Img2Vec()
vec_length = 512 # Using resnet-18 as default
samples = len(files) # Amount of samples to take from input path
k_value = 2 # How many clusters
# Matrix to hold the image vectors
vec_mat = np.zeros((samples, vec_length))
# If samples is < the number of files in the folder, we sample them randomly
sample_indices = np.random.choice(range(0, len(files)), size=samples, replace=False)
print('Reading images...')
for index, i in enumerate(sample_indices):
file = files[i]
filename = os.fsdecode(file)
img = Image.open(os.path.join(input_path, filename))
vec = img2vec.get_vec(img)
vec_mat[index, :] = vec
print('Applying PCA...')
reduced_data = PCA(n_components=2).fit_transform(vec_mat)
kmeans = KMeans(init='k-means++', n_clusters=k_value, n_init=10)
kmeans.fit(reduced_data)
# Create a folder for each cluster (0, 1, 2, ..)
for i in set(kmeans.labels_):
try:
os.mkdir('./' + str(i))
except FileExistsError:
continue
print('Predicting...')
preds = kmeans.predict(reduced_data)
print('Copying images...')
for index, i in enumerate(sample_indices):
file = files[i]
filename = os.fsdecode(file)
copyfile(input_path + '/' + filename, './' + str(preds[index]) + '/' + filename)
print('Done!')
| 28.875
| 86
| 0.709957
|
d9f3cdcbfe85fbd33bca90cf16c29336075d942c
| 2,701
|
py
|
Python
|
final_benchmark.py
|
Freeha-S/Algorithm
|
61d08de9b7ac2899d29ba7ba8a29730923e1f193
|
[
"MIT"
] | null | null | null |
final_benchmark.py
|
Freeha-S/Algorithm
|
61d08de9b7ac2899d29ba7ba8a29730923e1f193
|
[
"MIT"
] | null | null | null |
final_benchmark.py
|
Freeha-S/Algorithm
|
61d08de9b7ac2899d29ba7ba8a29730923e1f193
|
[
"MIT"
] | null | null | null |
import numpy as np
from numpy.random import randint
from time import time
from sorting import insertion_sort, merge_sort, quick_sort,selection_sort,radix_sort,radix_Sort
from numpy import mean
import pandas as pd
import matplotlib.pyplot as plt
import math
"""
def random_array(ArraySize):
numpy.random.seed(0)
testArray = randint(1,ArraySize*2,ArraySize)
return testArray
"""
def benchmark(func,array):
results=[]
unsorted=list(array)
#print("unsorted",unsorted)# to check results
for j in range(0,10):
#unsorted = random_array(i)
start =time() #start time
sorted1= list(func(unsorted))
end=time()#end time
res=end-start #time elapsed
np.random.shuffle(unsorted)#shuffle array for next run
#print("sorted",sorted1)# to cofirm if it is working fine
results.append(res)
print(results)
return round(mean(results)*1000,3)# return the average time in milliseconds and rounded to 3 didgits
if __name__ == "__main__":
# create a dataframe
size = [100, 250, 500, 750, 1000, 1250, 2500, 3750, 5000, 6250, 7500, 8750, 10000]
df = pd.DataFrame(columns = size, index = ["selection_sort","insertion_sort","merge_sort","quick_sort","radix_sort","radix_Sort"])
for ArraySize in (100, 250, 500, 750, 1000, 1250, 2500, 3750, 5000, 6250, 7500, 8750, 10000):
results=[]
# create a randowm array of values to use in the tests below
array1 = [randint(1,ArraySize*2)for i in range(ArraySize)]
#array = [randint(0, 1000) for i in range(ArraySize)]
results.append(ArraySize)
#print (ArraySize)
#call the benchmark function and put the returned value in appropriate row in dataframe
df.loc["selection_sort",ArraySize] = benchmark(selection_sort,array1)
df.loc["insertion_sort",ArraySize] = benchmark(insertion_sort,array1)
df.loc["merge_sort",ArraySize]= benchmark(merge_sort,array1)
df.loc["quick_sort",ArraySize]= benchmark(quick_sort,array1)
df.loc["radix_sort",ArraySize]=benchmark(radix_sort,array1)
df.loc["radix_Sort",ArraySize]=benchmark(radix_Sort,array1)
#df.loc[[0,3],'Z'] =used this as reference how to fill dataframe
print()
print("Average time taken for 10 runs for each array size")
pd.set_option('display.max_columns',None)
with pd.option_context('display.float_format', '{:0.3f}'.format):
print(df)
df.to_csv("resultsort.csv",header=True)
df2=df.T
df2.plot(kind='line')
plt.title("Average Time taken by sorting algorithms to sort list")
plt.xlabel("n-Input size")
plt.ylabel("T(n) in milliseconds")
plt.show()
| 39.144928
| 134
| 0.681229
|
d04936ca4503fae4eacfff41230f9dc60eebde16
| 9,239
|
py
|
Python
|
sopel/tools/time.py
|
HumorBaby/sopel
|
c78d85197538ac4d74389ce1ebe48a67c9c55978
|
[
"EFL-2.0"
] | 1
|
2019-01-14T13:05:24.000Z
|
2019-01-14T13:05:24.000Z
|
sopel/tools/time.py
|
HumorBaby/sopel
|
c78d85197538ac4d74389ce1ebe48a67c9c55978
|
[
"EFL-2.0"
] | null | null | null |
sopel/tools/time.py
|
HumorBaby/sopel
|
c78d85197538ac4d74389ce1ebe48a67c9c55978
|
[
"EFL-2.0"
] | null | null | null |
# coding=utf-8
"""Tools for getting and displaying the time."""
from __future__ import unicode_literals, absolute_import, print_function, division
import datetime
import pytz
def validate_timezone(zone):
"""Return an IETF timezone from the given IETF zone or common abbreviation.
:param str zone: in a strict or a human-friendly format
:return: the valid IETF timezone properly formatted
:raise ValueError: when ``zone`` is not a valid timezone
Prior to checking timezones, two transformations are made to make the zone
names more human-friendly:
1. the string is split on ``', '``, the pieces reversed, and then joined
with ``/`` (*"New York, America"* becomes *"America/New York"*)
2. Remaining spaces are replaced with ``_``
This means ``new york, america`` becomes ``America/New_York``, and ``utc``
becomes ``UTC``. In the majority of user-facing interactions, such
case-insensitivity will be expected.
If the zone is not valid, ``ValueError`` will be raised.
"""
if zone is None:
return None
zone = '/'.join(reversed(zone.split(', '))).replace(' ', '_')
try:
tz = pytz.timezone(zone)
except pytz.exceptions.UnknownTimeZoneError:
raise ValueError('Invalid time zone.')
return tz.zone
def validate_format(tformat):
"""Validate a time format string.
:param str tformat: the format string to validate
:return: the format string, if valid
:raise ValueError: when ``tformat`` is not a valid time format string
"""
try:
time = datetime.datetime.utcnow()
time.strftime(tformat)
except (ValueError, TypeError):
raise ValueError('Invalid time format.')
return tformat
def get_nick_timezone(db, nick):
"""Get a nick's timezone from database.
:param db: Bot's database handler (usually ``bot.db``)
:type db: :class:`~sopel.db.SopelDB`
:param nick: IRC nickname
:type nick: :class:`~sopel.tools.Identifier`
:return: the timezone associated with the ``nick``
If a timezone cannot be found for ``nick``, or if it is invalid, ``None``
will be returned.
"""
try:
return validate_timezone(db.get_nick_value(nick, 'timezone'))
except ValueError:
return None
def get_channel_timezone(db, channel):
"""Get a channel's timezone from database.
:param db: Bot's database handler (usually ``bot.db``)
:type db: :class:`~sopel.db.SopelDB`
:param channel: IRC channel name
:type channel: :class:`~sopel.tools.Identifier`
:return: the timezone associated with the ``channel``
If a timezone cannot be found for ``channel``, or if it is invalid,
``None`` will be returned.
"""
try:
return validate_timezone(db.get_channel_value(channel, 'timezone'))
except ValueError:
return None
def get_timezone(db=None, config=None, zone=None, nick=None, channel=None):
"""Find, and return, the appropriate timezone.
:param db: bot database object (optional)
:type db: :class:`~.db.SopelDB`
:param config: bot config object (optional)
:type config: :class:`~.config.Config`
:param str zone: preferred timezone name (optional)
:param str nick: nick whose timezone to use, if set (optional)
:param str channel: channel whose timezone to use, if set (optional)
Timezone is pulled in the following priority:
1. ``zone``, if it is valid
2. The timezone for the channel or nick ``zone`` in ``db`` if one is set
and valid.
3. The timezone for the nick ``nick`` in ``db``, if one is set and valid.
4. The timezone for the channel ``channel`` in ``db``, if one is set and
valid.
5. The default timezone in ``config``, if one is set and valid.
If ``db`` is not given, or given but not set up, steps 2 and 3 will be
skipped. If ``config`` is not given, step 4 will be skipped. If no step
yields a valid timezone, ``None`` is returned.
Valid timezones are those present in the IANA Time Zone Database.
.. seealso::
The :func:`validate_timezone` function handles the validation and
formatting of the timezone.
"""
def _check(zone):
try:
return validate_timezone(zone)
except ValueError:
return None
tz = None
if zone:
tz = _check(zone)
if not tz:
tz = _check(
db.get_nick_or_channel_value(zone, 'timezone'))
if not tz and nick:
tz = _check(db.get_nick_value(nick, 'timezone'))
if not tz and channel:
tz = _check(db.get_channel_value(channel, 'timezone'))
if not tz and config and config.core.default_timezone:
tz = _check(config.core.default_timezone)
return tz
def format_time(db=None, config=None, zone=None, nick=None, channel=None,
time=None):
"""Return a formatted string of the given time in the given zone.
:param db: bot database object (optional)
:type db: :class:`~.db.SopelDB`
:param config: bot config object (optional)
:type config: :class:`~.config.Config`
:param str zone: name of timezone to use (optional)
:param str nick: nick whose time format to use, if set (optional)
:param str channel: channel whose time format to use, if set (optional)
:param time: the time value to format (optional)
:type time: :class:`~datetime.datetime`
``time``, if given, should be a naive ``datetime.datetime`` object and will
be treated as being in the UTC timezone. If it is not given, the current
time will be used. If ``zone`` is given it must be present in the IANA Time
Zone Database; ``get_timezone`` can be helpful for this. If ``zone`` is not
given, UTC will be assumed.
The format for the string is chosen in the following order:
1. The format for the nick ``nick`` in ``db``, if one is set and valid.
2. The format for the channel ``channel`` in ``db``, if one is set and
valid.
3. The default format in ``config``, if one is set and valid.
4. ISO-8601
If ``db`` is not given or is not set up, steps 1 and 2 are skipped. If
config is not given, step 3 will be skipped.
"""
tformat = None
if db:
if nick:
tformat = db.get_nick_value(nick, 'time_format')
if not tformat and channel:
tformat = db.get_channel_value(channel, 'time_format')
if not tformat and config and config.core.default_time_format:
tformat = config.core.default_time_format
if not tformat:
tformat = '%Y-%m-%d - %T%Z'
if not time:
time = datetime.datetime.utcnow()
if not zone:
return time.strftime(tformat)
else:
if not time.tzinfo:
utc = pytz.timezone('UTC')
time = utc.localize(time)
zone = pytz.timezone(zone)
return time.astimezone(zone).strftime(tformat)
def seconds_to_human(secs):
"""Format :class:`~datetime.timedelta` as a human-readable relative time.
:param secs: time difference to format
:type secs: :class:`~datetime.timedelta` or integer
Inspiration for function structure from:
https://gist.github.com/Highstaker/280a09591df4a5fb1363b0bbaf858f0d
Example outputs are:
.. code-block:: text
2 years, 1 month ago
in 4 hours, 45 minutes
in 8 days, 5 hours
1 year ago
"""
if isinstance(secs, datetime.timedelta):
secs = secs.total_seconds()
future = False
if secs < 0:
future = True
secs = int(secs)
secs = abs(secs)
if secs == 0:
# zero is a special case that the algorithm below won't handle correctly (#1841)
result = "0 seconds"
else:
years = secs // 31536000
months = (secs - years * 31536000) // 2635200
days = (secs - years * 31536000 - months * 2635200) // 86400
hours = (secs - years * 31536000 - months * 2635200 - days * 86400) // 3600
minutes = (secs - years * 31536000 - months * 2635200 - days * 86400 - hours * 3600) // 60
seconds = secs - years * 31536000 - months * 2635200 - days * 86400 - hours * 3600 - minutes * 60
years_text = "year{}".format("s" if years != 1 else "")
months_text = "month{}".format("s" if months != 1 else "")
days_text = "day{}".format("s" if days != 1 else "")
hours_text = "hour{}".format("s" if hours != 1 else "")
minutes_text = "minute{}".format("s" if minutes != 1 else "")
seconds_text = "second{}".format("s" if seconds != 1 else "")
result = ", ".join(filter(lambda x: bool(x), [
"{0} {1}".format(years, years_text) if years else "",
"{0} {1}".format(months, months_text) if months else "",
"{0} {1}".format(days, days_text) if days else "",
"{0} {1}".format(hours, hours_text) if hours else "",
"{0} {1}".format(minutes, minutes_text) if minutes else "",
"{0} {1}".format(seconds, seconds_text) if seconds else ""
]))
# Granularity
result = ", ".join(result.split(", ")[:2])
if future is False:
result += " ago"
else:
result = "in " + result
return result
| 34.864151
| 105
| 0.630696
|
92e8475d0f77a10ea2fe8c9ea35b307b176af0b5
| 3,808
|
py
|
Python
|
frameit/frame.py
|
biggorilla-gh/frameit
|
9cae56b4089fff6c955f8b86c699aa402a1390ac
|
[
"Apache-2.0"
] | 3
|
2018-12-29T01:10:37.000Z
|
2020-06-12T09:45:11.000Z
|
frameit/frame.py
|
biggorilla-gh/frameit
|
9cae56b4089fff6c955f8b86c699aa402a1390ac
|
[
"Apache-2.0"
] | null | null | null |
frameit/frame.py
|
biggorilla-gh/frameit
|
9cae56b4089fff6c955f8b86c699aa402a1390ac
|
[
"Apache-2.0"
] | null | null | null |
import json
from frameit.models import Model
from frameit.utterance import Utterance
from frameit.frameattr import FrameAttribute
class Frame(object):
def __init__(self, name, lang='en'):
self.name = name
self.utterances = set()
self.models = []
self.weights = []
self.sum_weights = 0.0
self.num_models = 0
self.attributes = set()
self.lang = lang
def addExample(self, utterance):
assert isinstance(utterance, Utterance)
self.utterances.add(utterance)
def addExamples(self, utterances):
for utterance in utterances:
self.addExample(utterance)
def addAttribute(self, attrib):
self.attributes.add(attrib)
def getAttribute(self, name):
for att in self.attributes:
if att.name == name:
return att
return None
@property
def model(self):
if self.num_models == 0:
self.addModel(Model(lang=self.lang))
return self.models[0]
def addModel(self, model, weight=1.0):
self.models.append(model)
self.weights.append(weight)
self.num_models += 1
self.sum_weights += weight
def predict(self, data):
for x in data:
assert isinstance(x, Utterance)
labels = []
for x in data:
score = 0.0
for i in range(self.num_models):
score += self.weighs[i] * model.predict([x])[0][1]
score /= self.sum_weights
labels.append([1-score, score])
return labels
def trainModel(self, corpus, neg_set=None, reg_param=None,
batch_size=128, epochs=4, scale_to=4000, index=0):
while(self.num_models <= index):
self.addModel(Model(lang=self.lang))
history = self.models[index].train(self.utterances, set(corpus.utterances),
neg_set, reg_param=reg_param,
batch_size=batch_size, epochs=epochs,
scale_to=scale_to)
return history
def trainAll(self, corpus):
self.trainModel()
for attrib in self.attributes:
attrib.trainModel(corpus)
# ------------------------------------------
def __str__(self):
s = "Frame: " + self.name + "\n"
for attr in self.attributes:
s += "\t" + str(attr)
return s
def save(self, filename):
with open(filename, "w") as handle:
json.dump(self.get_state(), handle, indent=2)
@classmethod
def load(cls, filename):
with open(filename, "r") as handle:
state = json.load(handle)
frame = cls(None)
frame.set_state(state)
return frame
def get_state(self):
state = {'name': self.name,
'lang': self.lang,
'models': [model.get_state() for model in self.models],
'weights': self.weights,
'attributes': [attr.get_state() for attr in self.attributes]}
return state
def set_state(self, state):
self.name = state['name']
self.lang = state.get('lang', 'en')
self.attributes = []
for attr_state in state['attributes']:
attribute = FrameAttribute(None, None)
attribute.set_state(attr_state)
self.attributes.append(attribute)
model_states = state['models'] if 'models' in state else [state['model']]
model_weights = state['weights'] if 'weights' in state else [1.0]
for i in range(len(model_states)):
model = Model(lang=self.lang)
model.set_state(model_states[i])
self.addModel(model, model_weights[i])
| 31.471074
| 83
| 0.555147
|
db7cf28eb48f844ce7d2fe90fec5cf468e6eb4ad
| 2,100
|
py
|
Python
|
gan_training/eval.py
|
MiaoyunZhao/GANTransferLimitedData
|
5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
[
"MIT"
] | 41
|
2020-06-10T03:49:35.000Z
|
2022-03-24T02:06:38.000Z
|
gan_training/eval.py
|
nianweijie/GANTransferLimitedData
|
5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
[
"MIT"
] | 5
|
2020-11-16T23:50:17.000Z
|
2021-05-13T09:43:49.000Z
|
gan_training/eval.py
|
nianweijie/GANTransferLimitedData
|
5545bc37a1d7d4f28a9c3588aaa12a616bbddd88
|
[
"MIT"
] | 6
|
2020-11-08T16:14:07.000Z
|
2022-03-11T14:31:44.000Z
|
import torch
from gan_training.metrics import inception_score
from gan_training.metrics.fid_score import calculate_fid_given_images
import numpy as np
class Evaluator(object):
def __init__(self, generator, zdist, ydist, batch_size=64,
inception_nsamples=10000, device=None, fid_real_samples=None,
fid_sample_size=10000):
self.generator = generator
self.zdist = zdist
self.ydist = ydist
self.inception_nsamples = inception_nsamples
self.batch_size = batch_size
self.device = device
if fid_real_samples is not None:
self.fid_real_samples = fid_real_samples.numpy()
self.fid_sample_size = fid_sample_size
def compute_inception_score(self):
self.generator.eval()
imgs = []
while(len(imgs) < self.inception_nsamples):
ztest = self.zdist.sample((self.batch_size,))
ytest = self.ydist.sample((self.batch_size,))
samples, _ = self.generator(ztest, ytest)
samples = [s.data.cpu().numpy() for s in samples]
imgs.extend(samples)
inception_imgs = imgs[:self.inception_nsamples]
score, score_std = inception_score(
inception_imgs, device=self.device, resize=True, splits=10,
batch_size=self.batch_size)
fid_imgs = np.array(imgs[:self.fid_sample_size])
if self.fid_real_samples is not None:
fid = calculate_fid_given_images(
self.fid_real_samples,
fid_imgs,
batch_size=self.batch_size,
cuda=True)
return score, score_std, fid
def create_samples(self, z, y=None):
self.generator.eval()
batch_size = z.size(0)
# Parse y
if y is None:
y = self.ydist.sample((batch_size,))
elif isinstance(y, int):
y = torch.full((batch_size,), y,
device=self.device, dtype=torch.int64)
# Sample x
with torch.no_grad():
x = self.generator(z, y)
return x
| 35
| 78
| 0.608571
|
73e018fc8c400db9ac968f1f816273190fdf58b3
| 7,125
|
py
|
Python
|
rfvision/datasets/pipelines/imvotenet_pipeline.py
|
tycoer/rfvision-1
|
db6e28746d8251d1f394544c32b9e0af388d9964
|
[
"Apache-2.0"
] | 6
|
2021-09-25T03:53:06.000Z
|
2022-02-19T03:25:11.000Z
|
rfvision/datasets/pipelines/imvotenet_pipeline.py
|
tycoer/rfvision-1
|
db6e28746d8251d1f394544c32b9e0af388d9964
|
[
"Apache-2.0"
] | 1
|
2021-07-21T13:14:54.000Z
|
2021-07-21T13:14:54.000Z
|
rfvision/datasets/pipelines/imvotenet_pipeline.py
|
tycoer/rfvision-1
|
db6e28746d8251d1f394544c32b9e0af388d9964
|
[
"Apache-2.0"
] | 2
|
2021-07-16T03:25:04.000Z
|
2021-11-22T06:04:01.000Z
|
import numpy as np
import rflib
import os
from rfvision.datasets.builder import PIPELINES
import torch
@PIPELINES.register_module()
class LoadImVote:
def __init__(self,
data_root,
class_names,
max_imvote_per_pixel=3,
):
self.data_root = data_root
self.max_imvote_per_pixel = max_imvote_per_pixel
self.vote_dims = 1+self.max_imvote_per_pixel*4
self.cat2label = {cat: class_names.index(cat) for cat in class_names}
self.MAX_NUM_2D_DET = 100
self.MAX_NUM_PIXEL = 530 * 730
self.NUM_CLS = len(class_names)
self.cls_id_map = {}
self.cls_score_map = {}
self.bbox_2d_map = {}
bbox2d_train_dir = os.path.join(data_root, 'sunrgbd_2d_bbox_50k_v1_train')
bbox2d_test_dir = os.path.join(data_root, 'sunrgbd_2d_bbox_50k_v1_val')
cls_id_map_train , cls_score_map_train, bbox_2d_map_train = self._pre_load_bbox2d(bbox2d_train_dir)
cls_id_map_test , cls_score_map_test, bbox_2d_map_test = self._pre_load_bbox2d(bbox2d_test_dir)
self.cls_id_map.update(cls_id_map_train)
self.cls_id_map.update(cls_id_map_test)
self.cls_score_map.update(cls_score_map_train)
self.cls_score_map.update(cls_score_map_test)
self.bbox_2d_map.update(bbox_2d_map_train)
self.bbox_2d_map.update(bbox_2d_map_test)
def _pre_load_bbox2d(self,bbox2d_dir):
cls_id_map = {}
cls_score_map = {}
bbox_2d_map = {}
for filename in os.listdir(bbox2d_dir):
# Read 2D object detection boxes and scores
cls_id_list = []
cls_score_list = []
bbox_2d_list = []
idx = int(filename[:6])
for line in open(os.path.join(bbox2d_dir , filename), 'r'):
det_info = line.rstrip().split(" ")
prob = float(det_info[-1])
# Filter out low-confidence 2D detections
if prob < 0.1:
continue
cls_id_list.append(self.cat2label[det_info[0]])
cls_score_list.append(prob)
bbox_2d_list.append(np.float32(det_info[4:8]).astype('int32'))
cls_id_map[idx] = cls_id_list
cls_score_map[idx] = cls_score_list
bbox_2d_map[idx] = bbox_2d_list
return cls_id_map, cls_score_map, bbox_2d_map
def get_imvote(self, idx):
# Read image
full_img = rflib.imread(os.path.join(self.data_root, 'sunrgbd_trainval/image/', f'{idx:06d}.jpg'))
full_img_height = full_img.shape[0]
full_img_width = full_img.shape[1]
# Read camera parameters
# ------------------------------- 2D IMAGE VOTES ------------------------------
cls_id_list = self.cls_id_map[idx]
cls_score_list = self.cls_score_map[idx]
bbox_2d_list =self. bbox_2d_map[idx]
obj_img_list = []
for i2d, (cls2d, box2d) in enumerate(zip(cls_id_list, bbox_2d_list)):
xmin, ymin, xmax, ymax = box2d
# During training we randomly drop 2D boxes to reduce over-fitting
if np.random.random()>0.5:
continue
obj_img = full_img[ymin:ymax, xmin:xmax, :]
obj_h = obj_img.shape[0]
obj_w = obj_img.shape[1]
# Bounding box coordinates (4 values), class id, index to the semantic cues
meta_data = (xmin, ymin, obj_h, obj_w, cls2d, i2d)
if obj_h == 0 or obj_w == 0:
continue
# Use 2D box center as approximation
uv_centroid = np.array([int(obj_w/2), int(obj_h/2)])
uv_centroid = np.expand_dims(uv_centroid, 0)
v_coords, u_coords = np.meshgrid(range(obj_h), range(obj_w), indexing='ij')
img_vote = np.transpose(np.array([u_coords, v_coords]), (1,2,0))
img_vote = np.expand_dims(uv_centroid, 0) - img_vote
obj_img_list.append((meta_data, img_vote))
full_img_votes = np.zeros((full_img_height,full_img_width,self.vote_dims), dtype=np.float32)
# Empty votes: 2d box index is set to -1
full_img_votes[:,:,3::4] = -1.
for obj_img_data in obj_img_list:
meta_data, img_vote = obj_img_data
u0, v0, h, w, cls2d, i2d = meta_data
for u in range(u0, u0+w):
for v in range(v0, v0+h):
iidx = int(full_img_votes[v,u,0])
if iidx >= self.max_imvote_per_pixel:
continue
full_img_votes[v,u,(1+iidx*4):(1+iidx*4+2)] = img_vote[v-v0,u-u0,:]
full_img_votes[v,u,(1+iidx*4+2)] = cls2d
full_img_votes[v,u,(1+iidx*4+3)] = i2d + 1 # add +1 here as we need a dummy feature for pixels outside all boxes
full_img_votes[v0:(v0+h), u0:(u0+w), 0] += 1
full_img_votes_1d = np.zeros((self.MAX_NUM_PIXEL*self.vote_dims), dtype=np.float32)
full_img_votes_1d[0:full_img_height*full_img_width*self.vote_dims] = full_img_votes.flatten()
full_img_1d = ((full_img - 128) / 255.)
full_img_1d = np.zeros((self.MAX_NUM_PIXEL*3), dtype=np.float32)
full_img_1d[:full_img_height*full_img_width*3] = full_img.flatten()
# Semantic cues: one-hot vector for class scores
cls_score_feats = np.zeros((1+self.MAX_NUM_2D_DET,self.NUM_CLS), dtype=np.float32)
# First row is dumpy feature
len_obj = len(cls_id_list)
if len_obj:
ind_obj = np.arange(1,len_obj+1)
ind_cls = np.array(cls_id_list)
cls_score_feats[ind_obj, ind_cls] = np.array(cls_score_list)
imvote_dict = {}
imvote_dict['cls_score_feats'] = cls_score_feats.astype(np.float32)
imvote_dict['full_img_votes_1d'] = full_img_votes_1d.astype(np.float32)
imvote_dict['full_img_1d'] = full_img_1d.astype(np.float32)
imvote_dict['full_img_width'] = full_img_width
return imvote_dict
def __call__(self, results):
info = results['ann_info']['info']
imvote_dict = self.get_imvote(results['sample_idx'])
# update Rtilt according to aug
Rtilt = info['calib']['Rt']
rot_mat_T = np.eye(3).T
# rotation
if 'pcd_rotation' in results:
rot_mat_T = results['pcd_rotation']
# filp
if results['pcd_horizontal_flip'] == True:
rot_mat_T[0,:] *= -1
Rtilt = np.dot(rot_mat_T.T, Rtilt)
# scale
pcd_scale_factor = np.float32([results['pcd_scale_factor']])
Rtilt = np.dot(np.eye(3) * pcd_scale_factor, Rtilt)
# add additional info to imvote_dict
imvote_dict['scale'] = pcd_scale_factor.astype('float32')
imvote_dict['calib_Rtilt'] = Rtilt.astype('float32')
imvote_dict['calib_K'] = info['calib']['K'].reshape(3, 3, order = 'F').astype('float32')
results['imvote_dict'] = imvote_dict
return results
| 43.981481
| 132
| 0.597754
|
3b9a07adc0a77609caff823a3149f8be00f3f357
| 256
|
py
|
Python
|
django_summernote/__init__.py
|
yuong1979/django-summernote
|
cf9b2d838310211738162c2acbc7b9145d3970b2
|
[
"MIT"
] | null | null | null |
django_summernote/__init__.py
|
yuong1979/django-summernote
|
cf9b2d838310211738162c2acbc7b9145d3970b2
|
[
"MIT"
] | null | null | null |
django_summernote/__init__.py
|
yuong1979/django-summernote
|
cf9b2d838310211738162c2acbc7b9145d3970b2
|
[
"MIT"
] | null | null | null |
version_info = (0, 8, 8, 6)
__version__ = version = '.'.join(map(str, version_info))
__project__ = PROJECT = 'django-summernote'
__author__ = AUTHOR = "django-summernote contributors"
default_app_config = 'django_summernote.apps.DjangoSummernoteConfig'
| 28.444444
| 68
| 0.761719
|
534411eb83793cc72583dc3ab8c9c887d49831f1
| 4,699
|
py
|
Python
|
tests/examples/minlplib/cvxnonsep_psig20r.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 2
|
2021-07-03T13:19:10.000Z
|
2022-02-06T10:48:13.000Z
|
tests/examples/minlplib/cvxnonsep_psig20r.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | 1
|
2021-07-04T14:52:14.000Z
|
2021-07-15T10:17:11.000Z
|
tests/examples/minlplib/cvxnonsep_psig20r.py
|
ouyang-w-19/decogo
|
52546480e49776251d4d27856e18a46f40c824a1
|
[
"MIT"
] | null | null | null |
# MINLP written by GAMS Convert at 04/21/18 13:51:24
#
# Equation counts
# Total E G L N X C B
# 23 1 0 22 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 43 33 0 10 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 85 64 21 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i2 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i3 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i4 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i5 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i6 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i7 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i8 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i9 = Var(within=Integers,bounds=(1,10),initialize=1)
m.i10 = Var(within=Integers,bounds=(1,10),initialize=1)
m.x11 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x12 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x13 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x14 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x15 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x16 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x17 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x18 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x19 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x20 = Var(within=Reals,bounds=(1,10),initialize=1)
m.x21 = Var(within=Reals,bounds=(1E-8,None),initialize=1E-8)
m.x22 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x23 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x24 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x25 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x26 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x27 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x28 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x29 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x30 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x31 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x32 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x33 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x34 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x35 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x36 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x37 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x38 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x39 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x40 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x41 = Var(within=Reals,bounds=(None,0),initialize=0)
m.x42 = Var(within=Reals,bounds=(None,None),initialize=0)
m.obj = Objective(expr= m.i1 + m.i2 + m.i3 + m.i4 + m.i5 + m.i6 + m.i7 + m.i8 + m.i9 + m.i10 + m.x11 + m.x12 + m.x13
+ m.x14 + m.x15 + m.x16 + m.x17 + m.x18 + m.x19 + m.x20 + 20000*m.x21, sense=minimize)
m.c2 = Constraint(expr= m.x22 + m.x23 + m.x24 + m.x25 + m.x26 + m.x27 + m.x28 + m.x29 + m.x30 + m.x31 + m.x32 + m.x33
+ m.x34 + m.x35 + m.x36 + m.x37 + m.x38 + m.x39 + m.x40 + m.x41 + m.x42 <= 0)
m.c3 = Constraint(expr=-0.32*log(m.i1) - m.x22 <= 0)
m.c4 = Constraint(expr=-0.19*log(m.i2) - m.x23 <= 0)
m.c5 = Constraint(expr=-0.405*log(m.i3) - m.x24 <= 0)
m.c6 = Constraint(expr=-0.265*log(m.i4) - m.x25 <= 0)
m.c7 = Constraint(expr=-0.175*log(m.i5) - m.x26 <= 0)
m.c8 = Constraint(expr=-0.44*log(m.i6) - m.x27 <= 0)
m.c9 = Constraint(expr=-0.275*log(m.i7) - m.x28 <= 0)
m.c10 = Constraint(expr=-0.47*log(m.i8) - m.x29 <= 0)
m.c11 = Constraint(expr=-0.31*log(m.i9) - m.x30 <= 0)
m.c12 = Constraint(expr=-0.295*log(m.i10) - m.x31 <= 0)
m.c13 = Constraint(expr=-0.105*log(m.x11) - m.x32 <= 0)
m.c14 = Constraint(expr=-0.15*log(m.x12) - m.x33 <= 0)
m.c15 = Constraint(expr=-0.235*log(m.x13) - m.x34 <= 0)
m.c16 = Constraint(expr=-0.115*log(m.x14) - m.x35 <= 0)
m.c17 = Constraint(expr=-0.42*log(m.x15) - m.x36 <= 0)
m.c18 = Constraint(expr=-0.095*log(m.x16) - m.x37 <= 0)
m.c19 = Constraint(expr=-0.115*log(m.x17) - m.x38 <= 0)
m.c20 = Constraint(expr=-0.085*log(m.x18) - m.x39 <= 0)
m.c21 = Constraint(expr=-0.115*log(m.x19) - m.x40 <= 0)
m.c22 = Constraint(expr=-0.022*log(m.x20) - m.x41 <= 0)
m.c23 = Constraint(expr=-log(m.x21) - m.x42 <= 0)
| 40.86087
| 119
| 0.606299
|
564c46c7d039fb692ab48b4d826556ad55ff1b69
| 11,157
|
py
|
Python
|
src/se3_distributions/bbTrans/testGaussianMMBounds.py
|
Mostafa-Mansour/se3_distributions
|
3c1c2c754e9102a031ae6ff14b703cee0163c413
|
[
"MIT"
] | 1
|
2021-09-19T18:35:42.000Z
|
2021-09-19T18:35:42.000Z
|
src/se3_distributions/bbTrans/testGaussianMMBounds.py
|
Mostafa-Mansour/se3_distributions
|
3c1c2c754e9102a031ae6ff14b703cee0163c413
|
[
"MIT"
] | null | null | null |
src/se3_distributions/bbTrans/testGaussianMMBounds.py
|
Mostafa-Mansour/se3_distributions
|
3c1c2c754e9102a031ae6ff14b703cee0163c413
|
[
"MIT"
] | 3
|
2021-11-07T12:51:20.000Z
|
2022-01-07T10:37:07.000Z
|
import numpy as np
from scipy.linalg import det, solve, inv
class Box(object):
def __init__(self, ld, ru):
self.ld = ld
self.ru = ru
self.lu = np.array([self.ld[0],self.ru[1]])
self.rd = np.array([self.ru[0],self.ld[1]])
def Inside(self, x):
return (self.ld <= x).all() and (x <= self.ru).all()
def GetEdge(self, i):
if i == 0:
return self.ld, -self.ld + self.lu
elif i == 1:
return self.lu, -self.lu + self.ru
elif i == 2:
return self.ru, -self.ru + self.rd
elif i == 3:
return self.rd, -self.rd + self.ld
def GetMiddle(self):
return (self.ld+self.ru)*0.5
class Gaussian(object):
def __init__(self, mu, Sigma, pi):
self.pi = pi
self.mu = mu
self.Sigma = Sigma
self.D = mu.size
def pdf(self, x):
return (2.*np.pi)**(-self.D*0.5) / np.sqrt(det(self.Sigma)) \
* np.exp(-0.5*(x-self.mu).T.dot(solve(self.Sigma, x-self.mu)))
def logPdf(self, x):
return np.log(2.*np.pi)*(-self.D*0.5) - 0.5*np.log(det(self.Sigma))\
-0.5*(x-self.mu).T.dot(solve(self.Sigma, x-self.mu))
def GetZ(self):
return (2.*np.pi)**(-self.D*0.5) / np.sqrt(det(self.Sigma))
def ComputeGmmForT(gmmA, gmmB, R):
gmmT = []
for gA in gmmA:
for gB in gmmB:
gmmT.append(Gaussian(-gA.mu + R.dot(gB.mu), gA.Sigma + \
R.dot(gB.Sigma.dot(R.T)), gA.pi*gB.pi))
print(gmmT[-1].mu.ravel(), gmmT[-1].pi)
# Gamma_global = np.zeros(len(gmmT))
Gamma_jk = np.zeros((len(gmmT),len(gmmT)))
for k, gTk in enumerate(gmmT):
# tU = FindMaxTinBox(inv(gTk.Sigma), solve(gTk.Sigma, gTk.mu),
# box_max)
if False:
print('--', t.ravel())
plt.figure()
for i in range(4):
a,_ = box.GetEdge(i)
plt.plot(a[0],a[1],'ro')
print(a.ravel())
plt.plot(tU[0],tU[1],'bx')
plt.plot(gTk.mu[0],gTk.mu[1],'gx')
plt.xlim([-0.1,2.1])
plt.ylim([-0.1,2.1])
plt.show()
for j, gTj in enumerate(gmmT):
if not k == j:
Gamma_jk[j,k] = gTj.pi*gTj.GetZ()/(gTk.pi*gTk.GetZ())
# Gamma_global[k] += Gamma_jk[j,k] * \
# np.exp(0.5*(tU-gTk.mu).T.dot(solve(gTk.Sigma, tU-gTk.mu)))
A = []
b = []
for k,gT in enumerate(gmmT):
A.append(inv(gT.Sigma))
b.append(solve(gT.Sigma, gT.mu))
# A += Gamma_global[k] * inv(gT.Sigma)
# b += Gamma_global[k] * solve(gT.Sigma, gT.mu)
return gmmT, A, b, Gamma_jk
def LowerBound(gmmT, box):
lb = 0;
for gT in gmmT:
lb += gT.pi * gT.pdf(box.GetMiddle())
lbs = np.zeros(len(gmmT))
for k,gT in enumerate(gmmT):
lbs[k] = np.log(gT.pi) + gT.logPdf(box.GetMiddle())
print("LB", lb, np.exp(lbs-lbs.max()).sum()*np.exp(lbs.max()))
if False:
print('--', t.ravel())
plt.figure()
for i in range(4):
a,_ = box.GetEdge(i)
plt.plot(a[0],a[1],'ro')
plt.plot(box.GetMiddle()[0],box.GetMiddle()[1],'bx')
plt.xlim([-0.1,2.1])
plt.ylim([-0.1,2.1])
plt.show()
return lb
def JensenLowerBound(gmmT, box):
lbs = np.ones(len(gmmT));
for i,gT in enumerate(gmmT):
lbs[i] = gT.pi * gT.logPdf(box.GetMiddle())
return (lbs.sum())
def UpperBound(gmmT, box):
ubs = np.ones(len(gmmT));
for i,gT in enumerate(gmmT):
t = FindMinTinBox(inv(gT.Sigma), solve(gT.Sigma, gT.mu), box)
ubs[i] = gT.pi * gT.pdf(t)
# if box.Inside(gT.mu):
# ubs[i] = gT.pi * gT.pdf(gT.mu) # can make this faster
# else:
# vals = np.zeros(4)
# for i in range(4):
# a,d = box.GetEdge(i)
# # This is finding the max!
## beta = 2.*(a+gT.mu).T.dot(solve(gT.Sigma, d))
## alpha = d.T.dot(solve(gT.Sigma, d))
## tau = -2.*beta/alpha
## print alpha, tau
## tau = min(1.,max(0.,tau))
## vals[i] = gT.pdf(a+tau*d)
# vals[i] = gT.pdf(a)
# ubs[i] = np.min(vals)
if not box.Inside(t):
print('--', t.ravel())
plt.figure()
for i in range(4):
a,_ = box.GetEdge(i)
plt.plot(a[0],a[1],'ro')
print(a.ravel())
plt.plot(t[0],t[1],'bx')
plt.plot(gT.mu[0],gT.mu[1],'kx')
plt.show()
return ubs.sum()
def FindMinTinBox(A,b,box,All=False):
ts = []
vals =[]
t = solve(A, b)
#print A,b,t
if not box.Inside(t):
# check the sides for the min
for i in range(4):
a, d = box.GetEdge(i)
alpha = (d.T.dot(b) - d.T.dot(A).dot(a))/(d.T.dot(A).dot(d))
if 0. <= alpha and alpha <= 1.:
t = a+alpha*d
ts.append(t)
vals.append( t.T.dot(A).dot(t) -2.*t.T.dot(b))
# print "box edge: ", a,d
# print (d.T.dot(b)), "over", (d.T.dot(A).dot(d))
# print b.ravel(), d.ravel()
# print alpha, t, vals[-1]
for i in range(4):
t,_ = box.GetEdge(i)
ts.append(t)
vals.append( t.T.dot(A).dot(t) -2.*t.T.dot(b))
i_min = np.argmin(vals)
t = ts[i_min]
# print vals
# print ts
if not box.Inside(t):
print("WARNING sth is wrong here - computed t outside of given box.")
print(t)
if All:
return t, ts, vals
return t
def FindMaxTinBox(A,b,box):
# check the corners for the max
vals = []
ts = []
for i in range(4):
t,_ = box.GetEdge(i)
ts.append(t)
vals.append(t.T.dot(A).dot(t) -2.*t.T.dot(b))
i_max = np.argmax(vals)
t = ts[i_max]
return ts[i_max]
def UpperBound2(gmmT, A, b, Gamma_jk, box):
''' log inequality '''
Gamma = np.ones(len(gmmT))
for k,gTk in enumerate(gmmT):
tU = FindMaxTinBox(A[k], b[k], box)
logU = 0.5*(tU-gTk.mu).T.dot(solve(gTk.Sigma, tU-gTk.mu))
# tL = FindMinTinBox(A[k], b[k], box)
for j,gTj in enumerate(gmmT):
tL = FindMinTinBox(A[k], b[k], box)
logL = 0.5*(tL-gTj.mu).T.dot(solve(gTj.Sigma, tL-gTj.mu))
print(k,"logs", logU, logL, np.exp(logU-logL), Gamma_jk[j,k])
Gamma[k] += Gamma_jk[j,k] * np.exp(logU - logL)
Gamma = 1./Gamma
A_=np.zeros((2,2))
b_=np.zeros((2,1))
for k,gTk in enumerate(gmmT):
A_ += Gamma[k] * A[k]
b_ += Gamma[k] * b[k]
print(A_, b_)
t = FindMinTinBox(A_,b_,box)
print("gamma",Gamma)
ubs = np.zeros(len(gmmT))
for k,gT in enumerate(gmmT):
print("logpdf at ", t.ravel(), gT.logPdf(t), Gamma[k])
ubs[k] = Gamma[k] * (np.log(gT.pi) + gT.logPdf(t))
print(t, ubs)
if False:
plt.figure()
for i in range(4):
a,_ = box.GetEdge(i)
plt.plot(a[0],a[1],'ro')
# print a
plt.plot(t[0],t[1],'bx')
plt.xlim([0,2])
plt.ylim([0,2])
plt.show()
return np.exp(ubs.sum()) * len(gmmT)
def UpperBoundConvexity(gmmT, box):
''' log inequality '''
A_=np.zeros((2,2))
b_=np.zeros((2,1))
c_ = 0
for k,gT in enumerate(gmmT):
A, b = inv(gT.Sigma), solve(gT.Sigma, gT.mu)
tU = FindMinTinBox(A, b, box)
tL = FindMaxTinBox(A, b, box)
L = -0.5*(tL-gT.mu).T.dot(solve(gT.Sigma, tL-gT.mu))
U = -0.5*(tU-gT.mu).T.dot(solve(gT.Sigma, tU-gT.mu))
g = (1.-np.exp(L-U))*np.exp(U)/(U-L)
h = (np.exp(L-U)*U-L)*np.exp(U)/(U-L)
print('L,U', L,U, g, h)
D = gT.pi * (2.*np.pi)**(-1.) / np.sqrt(det(gT.Sigma))
A_ -= 0.5*D*g*A
b_ += D*g*b
c_ += D*(h-0.5*g*gT.mu.T.dot(b))
# print g, h, -0.5*D*g,D*g, D*(h-0.5*g*gT.mu.T.dot(b))
if False:
plt.figure()
for i in range(4):
a,_ = box.GetEdge(i)
plt.plot(a[0],a[1],'ro', ms=6.)
plt.plot(tU[0],tU[1],'bx', ms=20.)
plt.plot(tL[0],tL[1],'gx', ms=20.)
plt.plot(gT.mu[0],gT.mu[1],'bo', ms=20.)
plt.xlim([-0.1,2.1])
plt.ylim([-0.1,2.1])
plt.show()
# Not this one?
t,ts,vals = FindMinTinBox(-A_,0.5*b_,box, True)
ub1 = t.T.dot(A_.dot(t)) + b_.T.dot(t) + c_
print('ub', ub1, t.T.dot(A_.dot(t)), b_.T.dot(t), c_)
print(-0.25*b_.T.dot(solve(A_,b_)) + c_)
# ubs = np.zeros(len(gmmT))
# for k,gT in enumerate(gmmT):
# A, b = inv(gT.Sigma), solve(gT.Sigma, gT.mu)
# tL = FindMaxTinBox(A, b, box)
# tU = FindMinTinBox(A, b, box)
# L = -0.5*(tL-gT.mu).T.dot(solve(gT.Sigma, tL-gT.mu))
# U = -0.5*(tU-gT.mu).T.dot(solve(gT.Sigma, tU-gT.mu))
## print L,U
# g = (1.-np.exp(L-U))*np.exp(U)/(U-L)
# h = (np.exp(L-U)*U-L)*np.exp(U)/(U-L)
## print g, h
# D = gT.pi * (2.*np.pi)**(-1.5) / det(gT.Sigma)
## print D*g
# A_ = -0.5*D*g*A
# b_ = D*g*b
# c_ = D*(h-0.5*g*gT.mu.T.dot(b))
# ubs[k] = t.T.dot(A_.dot(t)) + b_.T.dot(t) + c_
# print ubs
# print ub, ub1
if False:
plt.figure()
for i in range(4):
a,d = box.GetEdge(i)
plt.plot(a[0],a[1],'ro')
plt.plot([a[0], a[0]+d[0]],[a[1],a[1]+d[1]],'r-')
# print a
m = solve(A_,-0.5*b_)
M = solve(-A_, 0.5*b_)
print("m", m.ravel(), M.ravel())
print(vals)
plt.plot(t[0],t[1],'bx',ms=6)
plt.plot(m[0],m[1],'bo',ms=11)
plt.plot(M[0],M[1],'rx',ms=15)
for ti in ts:
plt.plot(ti[0],ti[1],'bx',ms=3)
for k,gT in enumerate(gmmT):
plt.plot(gT.mu[0],gT.mu[1],'go', ms=8.)
plt.xlim([-0.1,2.1])
plt.ylim([-0.1,2.1])
plt.show()
return ub1
def CostFunction(gmmT, t):
c = 0.
for i,gT in enumerate(gmmT):
c += gT.pi * gT.pdf(t)
return c
import matplotlib.pyplot as plt
t = np.ones((2,1))
gmmA = [Gaussian(np.array([[0.],[0.]]), np.eye(2)*0.001, 0.3),
Gaussian(np.array([[1.],[0.]]), np.eye(2)*0.01, 0.3),
Gaussian(np.array([[0.],[1.]]), np.eye(2)*0.01, 0.4)]
gmmB = [Gaussian(np.array([[0.],[0.]])+t, np.eye(2)*0.001, 0.3),
Gaussian(np.array([[1.],[0.]])+t, np.eye(2)*0.01, 0.3),
Gaussian(np.array([[0.],[1.]])+t, np.eye(2)*0.01, 0.4)]
gmmA = [Gaussian(np.array([[0.],[0.]]), np.eye(2)*0.1, 0.3),
Gaussian(np.array([[0.],[1.]]), np.eye(2)*0.01, 0.7)]
gmmB = [Gaussian(np.array([[0.],[0.]])+t, np.eye(2)*0.1, 0.3),
Gaussian(np.array([[0.],[1.]])+t, np.eye(2)*0.01, 0.7)]
box = Box(np.array([[0.],[0.]]),
np.array([[2.],[2.]]))
gmmT, A, b, Gamma_jk = ComputeGmmForT(gmmA, gmmB, \
np.eye(2))
print(Gamma_jk)
plt.figure()
for res in [180]:
#for res in [11, 45,180]:
#for res in [10]:
ubs = np.zeros((res,res))
ubs2 = np.zeros((res,res))
cs = np.zeros((res,res))
lbs = np.zeros((res,res))
i=res/2
tx = np.linspace(0,2,res)[res/2]
#for i,tx in enumerate(np.linspace(0,2,res)):
Ty = np.linspace(0,2,res)
devs = np.zeros_like(Ty)
for j,ty in enumerate(Ty):
box = Box(np.array([[tx-1./res],[ty-1./res]]),
np.array([[tx+1./res],[ty+1./res]]))
ubs[i,j] = UpperBound(gmmT, box)
ubs2[i,j] = UpperBoundConvexity(gmmT, box)
# ubs2[i,j] = UpperBound2(gmmT, A, b, Gamma_jk, box)
lbs[i,j] = LowerBound(gmmT, box)
cs[i,j] = CostFunction(gmmT, box.GetMiddle())
devs[j] = np.sqrt((box.GetMiddle()[0]-1.)**2 +
(box.GetMiddle()[1]-1.)**2)
# plt.figure()
# plt.subplot(1,2,1)
# plt.imshow(ubs, interpolation="nearest")
# plt.colorbar()
# plt.subplot(1,2,2)
# plt.imshow(lbs, interpolation="nearest")
# plt.colorbar()
print(ubs2[res/2,:])
idx = np.argsort(devs)
#
plt.subplot(2,1,1)
plt.plot(Ty,ubs[res/2,idx], '-', label="ub indep")
plt.plot(Ty,ubs2[res/2,idx], '--', label="ub joint")
plt.plot(Ty,(lbs[res/2,idx]), '-.', label="lb")
# plt.plot(Ty,np.log(cs[res/2,:]), 'b-', label="c")
plt.legend()
plt.subplot(2,1,2)
plt.plot(Ty, devs[idx])
plt.show()
| 30.072776
| 73
| 0.536883
|
4a72a740a1149e1e10f7195dee83162c941bddf1
| 7,050
|
py
|
Python
|
fruitbat/catalogue.py
|
abatten/frbz
|
ca350a3a65aaf6033efe094fbf23f8a319a65fb7
|
[
"BSD-3-Clause"
] | 17
|
2019-02-22T02:56:52.000Z
|
2022-03-10T01:10:09.000Z
|
fruitbat/catalogue.py
|
abatten/frbz
|
ca350a3a65aaf6033efe094fbf23f8a319a65fb7
|
[
"BSD-3-Clause"
] | 22
|
2019-02-21T12:15:56.000Z
|
2021-12-15T03:56:16.000Z
|
fruitbat/catalogue.py
|
abatten/frbz
|
ca350a3a65aaf6033efe094fbf23f8a319a65fb7
|
[
"BSD-3-Clause"
] | 8
|
2019-02-19T05:52:59.000Z
|
2021-10-05T01:58:24.000Z
|
import os
import pandas as pd
from e13tools import docstring_substitute
from fruitbat import Frb, methods, cosmologies
__all__ = ["create_analysis_catalogue", "create_methods_catalogue",
"read_frb_row"]
@docstring_substitute(meth=methods.available_methods(),
cosmo=cosmologies.available_cosmologies())
def create_analysis_catalogue(filename="fruitbat_analysis_catalogue",
dataset='default', method='Inoue2004',
cosmology='Planck18'):
"""
Analyses an FRB dataset and produces a CSV file containing the
estimated redshift, fluence, energy and luminosity for each FRB in
additional to its measured quantities.
Parameters
----------
filename: str, optional
The output file name. Default: 'fruitbat_analysis_catalogue'
dataset: str, optional
The path to the FRBCAT dataset. The dataset is required to have
the following columns: 'frb_name', 'utc', 'telescope',
'rop_raj', 'rop_decj', 'rop_gl', 'rop_gb', 'rop_bandwidth',
'rop_centre_frequency', 'rmp_dm', 'rmp_width', 'rmp_snr',
'rmp_flux'. If ``dataset='default'`` then the builtin dataset
will be used. The builtin dataset was last updated: 2019-04-08.
Default: 'default'
method: str, optional
The dispersion measure - redshift relation to use when
calculating the redshift. Avaliable methods: %(meth)s.
Default: 'Inoue2004'
cosmology: str, optional
The cosmology to assume when calculating redshift.
Avaliable cosmologies: %(cosmo)s. Default: 'Planck18'
Generates
---------
A csv file with the output of the of the analysis.
"""
if dataset == 'default':
dataset = os.path.join(os.path.dirname(__file__), 'data',
'frbcat_database_20190408.csv')
df = pd.read_csv(dataset)
columns = ["Name", "Telescope", "RAJ", "DECJ", "gl", "gb",
"DM (pc/cm3)", "Width (ms)", "Bandwidth (MHz)",
"Centre_Frequency (MHz)", "Flux (Jy)", "SNR",
"DM_galaxy (pc/cm3)", "z", "Fluence (Jy ms)", "Energy (erg)",
"Luminosity (erg/s)", "Method", "Cosmology"]
df_out = pd.DataFrame(index=range(0, len(df)), columns=columns)
for item, row in df.iterrows():
data = read_frb_row(row)
frb = Frb(data["dm"], raj=data["raj"], decj=data["decj"],
gl=data["gl"], gb=data["gb"], width=data["width"],
peak_flux=data["flux"], obs_bandwidth=data["bw"],
obs_freq_central=data["centre_freq"], name=data["name"])
# Calculate FRB properties
frb.calc_dm_galaxy()
frb.calc_redshift(method="Inoue2004")
energy = frb.calc_energy()
luminosity = frb.calc_luminosity()
df_out.iloc[item] = [frb.name,
data["telescope"],
frb.raj,
frb.decj,
frb.gl,
frb.gb,
frb.dm.value,
frb.width.value,
frb.obs_bandwidth.value,
frb.obs_freq_central.value,
frb.peak_flux.value,
frb.snr,
frb.dm_galaxy.value,
frb.z.value,
frb.fluence.value,
energy.value,
luminosity.value,
method,
cosmology]
output_name = "".join([filename, ".csv"])
df_out.to_csv(output_name)
@docstring_substitute(cosmo=cosmologies.available_cosmologies())
def create_methods_catalogue(filename="fruitbat_methods_catalogue",
dataset='default', cosmology='Planck18'):
"""
Analyses an FRB dataset and produces a CSV file containing the
estimated redshift for each method.
Parameters
----------
filename: str, optional
The output file name. Default: 'fruitbat_methods_catalogue'
dataset: str, optional
The path to the FRBCAT dataset. The dataset is required to have
the following columns: 'frb_name', 'utc', 'telescope',
'rop_raj', 'rop_decj', 'rop_gl', 'rop_gb', 'rmp_dm'.
If ``dataset='default'`` then the builtin dataset
will be used. The builtin dataset was last updated: 2019-04-08.
Default: 'default'
cosmology: str, optional
The cosmology to assume when calculating redshift.
Avaliable cosmologies: %(cosmo)s. Default: 'Planck18'
Generates
---------
A csv file with the output of the of the analysis.
"""
if dataset == 'default':
dataset = os.path.join(os.path.dirname(__file__), 'data',
'frbcat_database_20190408.csv')
df = pd.read_csv(dataset)
columns = ["Name", "Telescope", "RAJ", "DECJ", "gl", "gb",
"DM (pc/cm3)", "DM_galaxy (pc/cm3)", "z (Ioka 2003)",
"z (Inoue 2004)", "z (Zhang 2018)"]
df_out = pd.DataFrame(index=range(0, len(df)), columns=columns)
for item, row in df.iterrows():
data = read_frb_row(row)
frb = Frb(data["dm"], raj=data["raj"], decj=data["decj"],
gl=data["gl"], gb=data["gb"], name=data["name"])
# Calculate FRB properties
frb.calc_dm_galaxy()
z_ioka = frb.calc_redshift(method="Ioka2003", cosmology=cosmology)
z_inoue = frb.calc_redshift(method="Inoue2004", cosmology=cosmology)
z_zhang = frb.calc_redshift(method="Zhang2018", cosmology=cosmology)
df_out.iloc[item] = [frb.name, data["telescope"], frb.raj,
frb.decj, frb.gl, frb.gb, frb.dm.value,
frb.dm_galaxy.value, z_ioka.value,
z_inoue.value, z_zhang.value]
output_name = "".join([filename, ".csv"])
df_out.to_csv(output_name)
def read_frb_row(row):
"""
Reads the row of a :obj:`~pandas.DataFrame` and retrieves the
data in the correct format.
Parameters
----------
row: :obj:`~pandas.core.series.Series`
The series containing FRB data.
Returns
-------
A dictionary containing the FRB paramemters
"""
drow = {"name": row['frb_name'],
"utc": row['utc'],
"telescope": row['telescope'],
"dm": float(row['rmp_dm'].split('±')[0]),
"gl": row['rop_gl'],
"gb": row['rop_gb'],
"raj": row['rop_raj'],
"decj": row['rop_decj'],
"bw": float(row['rop_bandwidth']),
"width": float(row['rmp_width']),
"snr": float(row['rmp_snr']),
"flux": float(row['rmp_flux']),
"centre_freq": float(row['rop_centre_frequency'])
}
return drow
| 36.340206
| 77
| 0.553333
|
95275cc4abf6b56c5be577d3b581fe04fa4bab96
| 31,656
|
py
|
Python
|
sdk/python/pulumi_docker/docker.py
|
stevesloka/pulumi-docker
|
6628c2dfab8ab3b6293da0b8f6befd143d19bd47
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_docker/docker.py
|
stevesloka/pulumi-docker
|
6628c2dfab8ab3b6293da0b8f6befd143d19bd47
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_docker/docker.py
|
stevesloka/pulumi-docker
|
6628c2dfab8ab3b6293da0b8f6befd143d19bd47
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2020, Pulumi Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import json
import math
import os
import re
from random import random
from typing import Optional, Union, List, Mapping, Sequence
from distutils.version import LooseVersion
import pulumi
from .utils import get_image_name_and_tag
class Registry:
registry: pulumi.Input[str]
"""
Registry server url
"""
username: pulumi.Input[str]
"""
Username for the registry
"""
password: pulumi.Input[str]
"""
Password for the registry
"""
def __init__(self, registry: pulumi.Input[str], username: pulumi.Input[str], password: pulumi.Input[str]):
"""
Registry is the information required to login to a Docker registry.
:param pulumi.Input[str] registry: Registry server url
:param pulumi.Input[str] username: Username for the registry
:param pulumi.Input[str] password: Password for the registry
"""
self.registry = registry
self.username = username
self.password = password
class CacheFrom:
"""
CacheFrom may be used to specify build stages to use for the Docker build cache. The final image
is always implicitly included.
"""
stages: Optional[Sequence[pulumi.Input[pulumi.Input[str]]]]
"""
An optional list of build stages to use for caching. Each build stage in this list will be
built explicitly and pushed to the target repository. A given stage's image will be tagged as
"[stage-name]".
"""
def __init__(self, stages: Optional[Sequence[pulumi.Input[pulumi.Input[str]]]] = None):
self.stages = stages
class DockerBuild:
context: Optional[pulumi.Input[str]]
"""
context is a path to a directory to use for the Docker build context, usually the directory
in which the Dockerfile resides (although dockerfile may be used to choose a custom location
independent of this choice). If not specified, the context defaults to the current working
directory if a relative path is used, it is relative to the current working directory that
Pulumi is evaluating.
"""
dockerfile: Optional[pulumi.Input[str]]
"""
dockerfile may be used to override the default Dockerfile name and/or location. By default,
it is assumed to be a file named Dockerfile in the root of the build context.
"""
args: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]
"""
An optional map of named build-time argument variables to set during the Docker build. This
flag allows you to pass built-time variables that can be accessed like environment variables
inside the `RUN` instruction.
"""
cache_from: Optional[pulumi.Input[Union[bool, CacheFrom]]]
"""
An optional CacheFrom object with information about the build stages to use for the Docker
build cache. This parameter maps to the --cache-from argument to the Docker CLI. If this
parameter is `true`, only the final image will be pulled and passed to --cache-from if it is
a CacheFrom object, the stages named therein will also be pulled and passed to --cache-from.
"""
extra_options: Optional[Sequence[pulumi.Input[pulumi.Input[str]]]]
"""
An optional catch-all list of arguments to provide extra CLI options to the docker build command. For
example `['--network', 'host']`.
"""
env: Optional[Mapping[str, str]]
"""
Environment variables to set on the invocation of `docker build`, for example to support
`DOCKER_BUILDKIT=1 docker build`.
"""
target: Optional[pulumi.Input[str]]
"""
The target of the dockerfile to build
"""
def __init__(self, context=None, dockerfile=None, args=None, cache_from=None, extra_options=None, env=None,
target=None):
"""
DockerBuild may be used to specify detailed instructions about how to build a container.
:param Optional[pulumi.Input[str]] context: context is a path to a directory to use for the Docker build
context, usually the directory in which the Dockerfile resides (although dockerfile may be used to choose
a custom location independent of this choice). If not specified, the context defaults to the current working
directory if a relative path is used, it is relative to the current working directory that
Pulumi is evaluating.
:param Optional[pulumi.Input[str]] dockerfile: dockerfile may be used to override the default Dockerfile name
and/or location. By default, it is assumed to be a file named Dockerfile in the root of the build context.
:param Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] args: An optional map of named build-time
argument variables to set during the Docker build. This flag allows you to pass built-time variables that
can be accessed like environment variables inside the `RUN` instruction.
:param Optional[pulumi.Input[Union[bool, CacheFrom]]] cache_from: An optional CacheFrom object with information
about the build stages to use for the Docker build cache. This parameter maps to the --cache-from argument
to the Docker CLI. If this parameter is `true`, only the final image will be pulled and passed to
--cache-from if it is a CacheFrom object, the stages named therein will also be pulled and passed to
--cache-from.
:param Optional[Sequence[pulumi.Input[pulumi.Input[str]]]] extra_options: An optional catch-all list of arguments
to provide extra CLI options to the docker build command. For example `['--network', 'host']`.
:param Optional[Mapping[str, str]] env: Environment variables to set on the invocation of `docker build`, for
example to support `DOCKER_BUILDKIT=1 docker build`.
:param Optional[pulumi.Input[str]] target: The target of the dockerfile to build
"""
self.context = context
self.dockerfile = dockerfile
self.args = args
self.cache_from = cache_from
self.extra_options = extra_options
self.env = env
self.target = target
class Error(Exception):
pass
class ResourceError(Error):
def __init__(self, message: str, resource: Optional[pulumi.Resource], hide_stack: Optional[bool] = False):
self.resource = resource
self.hide_stack = hide_stack
super().__init__(message)
async def use_docker_password_stdin(log_resource: pulumi.Resource):
# Verify that 'docker' is on the PATH and get the client/server versions
try:
docker_version_str = await run_command_that_must_succeed(
"docker", ["version", "-f", "{{json .}}"], log_resource)
# IDEA: In the future we could warn here on out-of-date versions of Docker which may not support key
# features we want to use.
pulumi.log.debug(f'\'docker version\' => {docker_version_str}', log_resource)
except Exception:
raise ResourceError("No 'docker' command available on PATH: Please install to use container 'build' mode.",
log_resource)
# Decide whether to use --password or --password-stdin based on the client version.
try:
version_data: any = json.loads(docker_version_str)
client_version: str = version_data['Client']['Version']
return LooseVersion(client_version) >= LooseVersion("17.07.0")
except Exception as err:
pulumi.log.info(f'Could not process Docker version ({err})', log_resource)
return False
async def build_and_push_image(
base_image_name: str,
path_or_build: pulumi.Input[Union[str, DockerBuild]],
repository_url: pulumi.Input[str],
log_resource: pulumi.Resource,
registry: Optional[Registry],
skip_push: bool = False
) -> str:
"""
build_and_push_image will build and push the Dockerfile and context from [pathOrBuild] into the
requested docker repo [repository_url]. It returns the unique target image name for the image in
the docker repository. During preview this will build the image, and return the target image
name, without pushing. During a normal update, it will do the same, as well as tag and push the
image.
"""
# Give an initial message indicating what we're about to do. That way, if anything
# takes a while, the user has an idea about what's going on.
log_debug("Starting docker build and push...", log_resource)
check_repository_url(repository_url)
_, tag = get_image_name_and_tag(base_image_name)
# login immediately if we're going to have to actually communicate with a remote registry.
#
# We know we have to login if:
#
# 1. We're doing an update. In that case, we'll always want to login so we can push our
# images to the remote registry.
#
# 2. We're in preview or update and the build information contains 'cache from' information. In
# that case, we'll want want to pull from the registry and will need to login for that.
pull_from_cache = not isinstance(path_or_build,
str) and path_or_build and path_or_build.cache_from and repository_url is not None
# If no `registry` info was passed in we simply assume docker is already
# logged-in to the correct registry (or uses auto-login via credential helpers).
if registry:
if not pulumi.runtime.is_dry_run() or pull_from_cache:
log_debug("Logging in to registry...", log_resource)
await login_to_registry(registry, log_resource)
# If the container specified a cache_from parameter, first set up the cached stages.
cache_from = None
if pull_from_cache:
_cache_from_param = CacheFrom() if isinstance(path_or_build.cache_from, bool) else path_or_build.cache_from
cache_from_param = _cache_from_param if _cache_from_param else CacheFrom()
cache_from = pull_cache(base_image_name, cache_from_param, repository_url, log_resource)
# Next, build the image.
log_ephemeral(f"Building image '{ path_or_build if isinstance(path_or_build, str) else path_or_build.context or '.'}'...", log_resource)
build_result = await build_image(base_image_name, path_or_build, log_resource, cache_from)
image_id, stages = build_result.image_id, build_result.stages
log_ephemeral("Image build succeeded.", log_resource)
if image_id is None:
raise Error("Internal error: docker build did not produce an imageId.")
# Generate a name that uniquely will identify this built image. This is similar in purpose to
# the name@digest form that can be normally be retrieved from a docker repository. However,
# this tag doesn't require actually pushing the image, nor does it require communicating with
# some external system, making it suitable for unique identification, even during preview.
# This also means that if docker produces a new imageId, we'll get a new name here, ensuring that
# resources (like docker.Image and cloud.Service) will be appropriately replaced.
unique_tagged_image_name = create_tagged_image_name(repository_url, tag, image_id)
# Use those to push the image. Then just return the unique target name. as the final result
# for our caller to use. Only push the image during an update, do not push during a preview.
if not pulumi.runtime.is_dry_run() and not skip_push:
# Push the final image first, then push the stage images to use for caching.
log_ephemeral(f"Pushing image '{base_image_name}'...", log_resource)
# First, push with both the optionally-requested-tag *and* imageId (which is guaranteed to
# be defined). By using the imageId we give the image a fully unique location that we can
# successfully pull regardless of whatever else has happened at this repository_url.
# Next, push only with the optionally-requested-tag. Users of this API still want to get a
# nice and simple url that they can reach this image at, without having the explicit imageId
# hash added to it. Note: this location is not guaranteed to be idempotent. For example,
# pushes on other machines might overwrite that location.
await tag_and_push_image(base_image_name, repository_url, tag, image_id, log_resource=log_resource)
await tag_and_push_image(base_image_name, repository_url, tag, image_id=None, log_resource=log_resource)
for stage in stages:
await tag_and_push_image(
local_stage_image_name(base_image_name, stage),
repository_url,
stage,
image_id=None,
log_resource=log_resource
)
log_ephemeral("Image push succeeded.", log_resource)
# If we got here, then building/pushing didn't throw any errors. Update the status bar
# indicating that things worked properly. That way, the info bar isn't stuck showing the very
# last thing printed by some subcommand we launched.
log_debug("Successfully pushed to docker", log_resource)
return unique_tagged_image_name
def log_ephemeral(message: str, log_resource: pulumi.Resource):
try:
pulumi.log.info(message, log_resource, stream_id=None, ephemeral=True)
except TypeError:
# that pulumi version does not support ephemeral
pulumi.log.info(message, log_resource, stream_id=None)
def log_debug(message: str, log_resource: pulumi.Resource):
try:
pulumi.log.debug(message, log_resource, stream_id=None, ephemeral=True)
except TypeError:
# that pulumi version does not support ephemeral
pulumi.log.info(message, log_resource, stream_id=None)
def check_repository_url(repository_url: str):
_, tag = get_image_name_and_tag(repository_url)
# We want to report an advisory error to users so that they don't accidentally include a 'tag'
# in the repo url they supply. i.e. their repo url can be:
#
# docker.mycompany.com/namespace/myimage
#
# but should not be:
#
# docker.mycompany.com/namespace/myimage:latest
#
# We could consider removing this check entirely. However, it is likely valuable to catch
# clear mistakes where a tag was included in a repo url inappropriately.
#
# However, since we do have the check, we need to ensure that we do allow the user to specify
# a *port* on their repository that the are communicating with. i.e. it's fine to have:
#
# docker.mycompany.com:5000 or
# docker.mycompany.com:5000/namespace/myimage
#
# So check if this actually does look like a port, and don't report an error in that case.
#
# From: https:#www.w3.org/Addressing/URL/url-spec.txt
#
# port digits
#
# Regex = any number of digits, optionally followed by / and any remainder.
if tag and not re.match(r'^\d+(/.*)?', tag):
raise Error(f'[repository_url] should not contain a tag: {tag}')
def local_stage_image_name(image_name: str, stage: str):
return f'{image_name}-{stage}'
def create_tagged_image_name(repository_url: str, tag: Optional[str], image_id: Optional[str]) -> str:
pieces: List = []
if tag:
pieces.append(tag)
if image_id:
pieces.append(image_id)
# Note: we don't do any validation that the tag is well formed, as per:
# https://docs.docker.com/engine/reference/commandline/tag
#
# If there are any issues with it, we'll just let docker report the problem.
full_tag = "-".join(pieces)
return f'{repository_url}:{full_tag}' if full_tag else repository_url
async def pull_cache(
image_name: str,
cache_from,
repo_url: str,
log_resource: pulumi.Resource
) -> Optional[Sequence[str]]:
# Ensure that we have a repository URL. If we don't, we won't be able to pull anything.
if not repo_url:
return None
pulumi.log.debug(f'pulling cache for {image_name} from {repo_url}', log_resource)
cache_from_images: List = []
stages = (cache_from.stages if cache_from.stages else []) + [""]
for stage in stages:
tag = f':{stage}' if stage else ""
image = f'{repo_url}{tag}'
# Try to pull the existing image if it exists. This may fail if the image does not exist.
# That's fine, just move onto the next stage. Also, pass along a flag saying that we
# should print that error as a warning instead. We don't want the update to succeed but
# the user to then get a nasty "error:" message at the end.
command_result = await run_command_that_can_fail(
"docker", ["pull", image], log_resource,
report_full_command_line=True, report_error_as_warning=True
)
if command_result.code:
continue
cache_from_images.append(image)
return cache_from_images
class BuildResult:
image_id: str
stages: Sequence[str]
def __init__(self, image_id, stages):
self.image_id = image_id
self.stages = stages
async def build_image(
image_name: str,
path_or_build: Union[str, DockerBuild],
log_resource: pulumi.Resource,
cache_from: Optional[Sequence[str]]
) -> BuildResult:
if isinstance(path_or_build, str):
build = DockerBuild(context=path_or_build)
elif path_or_build:
build = path_or_build
else:
raise ResourceError(f'Cannot build a container with an empty build specification', log_resource)
# If the build context is missing, default it to the working directory.
if not build.context:
build.context = "."
log_debug(
f'Building container image \'{image_name}\': context={build.context}' +
(f', dockerfile={build.dockerfile}' if build.dockerfile else "") +
(f', args={json.dumps(build.args)}' if build.args else "") +
(f', target={build.target}' if build.target else ""), log_resource)
# If the container build specified build stages to cache, build each in turn.
stages = []
if build.cache_from and not isinstance(build.cache_from, bool) and build.cache_from.stages:
for stage in build.cache_from.stages:
await docker_build(
local_stage_image_name(image_name, stage), build,
cache_from=cache_from, log_resource=log_resource, target=stage)
stages.append(stage)
# Invoke Docker CLI commands to build.
await docker_build(image_name, build, log_resource, cache_from)
# Finally, inspect the image so we can return the SHA digest. Do not forward the output of this
# command this to the CLI to show the user.
inspect_result = await run_command_that_must_succeed(
"docker", ["image", "inspect", "-f", "{{.Id}}", image_name], log_resource)
if not inspect_result:
raise ResourceError(
f'No digest available for image {image_name}', log_resource)
# From https:#docs.docker.com/registry/spec/api/#content-digests
#
# the image id will be a "algorithm:hex" pair. We don't care about the algorithm part. All we
# want is the unique portion we can use elsewhere. Since we are also going to place this in an
# image tag, we also don't want the colon, as that's not legal there. So simply grab the hex
# portion after the colon and return that.
image_id = inspect_result.strip()
colon_index = image_id.rfind(":")
image_id = image_id if colon_index < 0 else image_id[colon_index + 1:]
return BuildResult(image_id, stages)
async def docker_build(
image_name: str,
build: DockerBuild,
log_resource: pulumi.Resource,
cache_from: Optional[Sequence[str]],
target: Optional[str] = None
) -> str:
# Prepare the build arguments.
build_args: List[str] = ["build"]
if build.dockerfile:
build_args.extend(["-f", build.dockerfile]) # add a custom Dockerfile location.
if build.args:
for arg, build_arg in build.args.items():
build_args.extend(["--build-arg", f'{arg}={build_arg}'])
if build.target:
build_args.extend(["--target", build.target])
if cache_from:
for image in cache_from:
build_args.extend(["--cache-from", image])
if build.extra_options:
build_args.extend(build.extra_options)
build_args.extend(["-t", image_name]) # tag the image with the chosen name.
if target:
build_args.extend(["--target", target])
if build.context:
build_args.append(build.context) # push the docker build context onto the path.
return await run_command_that_must_succeed("docker", build_args, log_resource, env=build.env)
class LoginResult:
registry_name: str
username: str
done: asyncio.Task
def __init__(self, registry_name: str, username: str, done: asyncio.Task):
self.registry_name = registry_name
self.username = username
self.done = done
# Keep track of registries and users that have been logged in. If we've already logged into that
# registry with that user, there's no need to do it again.
login_results: List[LoginResult] = []
async def login_to_registry(registry: Registry, log_resource: pulumi.Resource):
registry_name = registry.registry
username = registry.username
password = registry.password
# See if we've issued an outstanding requests to login into this registry. If so, just
# await the results of that login request. Otherwise, create a new request and keep it
# around so that future login requests will see it.
result: LoginResult = None
for existing in login_results:
if existing.registry_name == registry_name and existing.username == username:
log_debug(f'Reusing existing login for {username}@{registry_name}', log_resource)
result = existing
break
if not result:
# An existing login wasn't found; WARNING: we must not await anything between this check
# for existing logins, and appending our new login attempt, otherwise an async interleaving
# could sneak in, log into the same server, and yield a redundant login (which will error out).
docker_password_stdin = use_docker_password_stdin(log_resource)
# pass 'report_full_command_line: false' here so that if we fail to login we don't emit the
# username/password in our logs. Instead, we'll just say "'docker login' failed with code ..."
coro: asyncio.Coroutine = None
if docker_password_stdin:
coro = run_command_that_must_succeed("docker", ["login", "-u", username, "--password-stdin", registry_name],
log_resource, report_full_command_line=False, stdin=password)
else:
coro = run_command_that_must_succeed("docker", ["login", "-u", username, "-p", password, registry_name],
log_resource, report_full_command_line=False)
done = asyncio.create_task(coro)
result = LoginResult(registry_name, username, done)
login_results.append(result)
await result.done
async def tag_and_push_image(
image_name: str, repository_url: str,
tag: Optional[str], image_id: Optional[str],
log_resource: pulumi.Resource
):
async def do_tag_and_push(target_name: str):
await run_command_that_must_succeed("docker", ["tag", image_name, target_name], log_resource)
await run_command_that_must_succeed("docker", ["push", target_name], log_resource)
# Ensure we have a unique target name for this image, and tag and push to that unique target.
await do_tag_and_push(create_tagged_image_name(repository_url, tag, image_id))
# If the user provided a tag themselves (like "x/y:dev") then also tag and push directly to
# that 'dev' tag. This is not going to be a unique location, and future pushes will overwrite
# this location. However, that's ok as there's still the unique target we generated above.
#
# Note: don't need to do this if imageId was 'undefined' as the above line will have already
# taken care of things for us.
if tag is not None and image_id is not None:
await do_tag_and_push(create_tagged_image_name(repository_url, tag, image_id=None))
return
class CommandResult:
code: int
stdout: str
def __init__(self, code, stdout):
self.code = code
self.stdout = stdout
def get_command_line_message(
cmd: str, args: Sequence[str], report_full_command_line: bool, env: Optional[Mapping[str, str]] = None
):
elements = []
if env:
elements.append(" ".join(map(lambda k: f'{k}={env[k]}', env.keys())))
elements.append(cmd)
argstr = " ".join(args) if report_full_command_line else args[0]
elements.append(argstr)
return f"'{' '.join(elements)}'"
def get_failure_message(
cmd: str, args: Sequence[str], report_full_command_line: bool, code: int, env: Optional[Mapping[str, str]] = None
):
return f'{get_command_line_message(cmd, args, report_full_command_line, env)} failed with exit code {code}'
# [report_full_command_line] is used to determine if the full command line should be reported
# when an error happens. In general reporting the full command line is fine. But it should be set
# to false if it might contain sensitive information (like a username/password)
async def run_command_that_must_succeed(
cmd: str,
args: Sequence[str],
log_resource: pulumi.Resource,
report_full_command_line: bool = True,
stdin: Optional[str] = None,
env: Optional[Mapping] = None
) -> str:
command_result = await run_command_that_can_fail(
cmd, args, log_resource, report_full_command_line, False, stdin, env)
code, stdout = command_result.code, command_result.stdout
if code != 0:
# Fail the entire build and push. This includes the full output of the command so that at
# the end the user can review the full docker message about what the problem was.
#
# Note: a message about the command failing will have already been ephemerally reported to
# the status column.
raise ResourceError(
f'{get_failure_message(cmd, args, report_full_command_line, code)}\n{stdout}', log_resource)
return stdout
async def run_command_that_can_fail(
cmd_name: str,
args: Sequence[str],
log_resource: pulumi.Resource,
report_full_command_line: bool,
report_error_as_warning: bool,
stdin: Optional[str] = None,
env: Optional[Mapping[str, str]] = None
) -> CommandResult:
"""
Runs a CLI command in a child process, returning a future for the process's exit. Both stdout
and stderr are redirected to process.stdout and process.stderr by default.
If the [stdin] argument is defined, its contents are piped into stdin for the child process.
[log_resource] is used to specify the resource to associate command output with. Stderr messages
are always sent (since they may contain important information about something that's gone wrong).
Stdout messages will be logged ephemerally to this resource. This lets the user know there is
progress, without having that dumped on them at the end. If an error occurs though, the stdout
content will be printed.
"""
# Let the user ephemerally know the command we're going to execute.
log_debug(f"Executing {get_command_line_message(cmd_name, args, report_full_command_line, env)}", log_resource)
# Generate a unique stream-ID that we'll associate all the docker output with. This will allow
# each spawned CLI command's output to associated with 'resource' and also streamed to the UI
# in pieces so that it can be displayed live. The stream-ID is so that the UI knows these
# messages are all related and should be considered as one large message (just one that was
# sent over in chunks).
#
# We use Math.random here in case our package is loaded multiple times in memory (i.e. because
# different downstream dependencies depend on different versions of us). By being random we
# effectively make it completely unlikely that any two cli outputs could map to the same stream
# id.
#
# Pick a reasonably distributed number between 0 and 2^30. This will fit as an int32
# which the grpc layer needs.
stream_id = math.floor(random() * (1 << 30))
if env is not None:
env = os.environ.copy().update(env)
process = await asyncio.create_subprocess_exec(
cmd_name, *args, env=env,
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, stdin=asyncio.subprocess.PIPE)
# If stdin input is present, we need to ensure it's encoded into bytes.
if isinstance(stdin, str):
stdin = stdin.encode('utf-8')
# We store the results from stdout in memory and will return them as a str.
stdout_chunks: List[str] = []
stderr_chunks: List[str] = []
# A None value for process.returncode indicates that the process hasn't terminated yet
while process.returncode is None:
outs, errs = await process.communicate(input=stdin)
if outs:
# Report all stdout messages as ephemeral messages. That way they show up in the
# info bar as they're happening. But they do not overwhelm the user as the end
# of the run.
for line in outs.splitlines():
log_ephemeral(line, log_resource)
stdout_chunks.append(outs.rstrip().decode('utf-8'))
if errs:
# We can't stream these stderr messages as we receive them because we don't knows at
# this point because Docker uses stderr for both errors and warnings. So, instead, we
# just collect the messages, and wait for the process to end to decide how to report
# them.
stderr_chunks.append(errs.rstrip().decode('utf-8'))
code = process.returncode
# Collapse our stored stdout/stderr messages into single strings.
stderr = ''.join(stderr_chunks)
stdout = ''.join(stdout_chunks)
# If we got any stderr messages, report them as an error/warning depending on the
# result of the operation.
if stderr:
if code and not report_error_as_warning:
# Command returned non-zero code. Treat these stderr messages as an error.
pulumi.log.error(stderr, log_resource, stream_id)
else:
# command succeeded. These were just warning.
pulumi.log.warn(stderr, log_resource, stream_id)
# If the command failed report an ephemeral message indicating which command it was.
# That way the user can immediately see something went wrong in the info bar. The
# caller (normally run_command_that_can_succeed) can choose to also report this
# non-ephemerally.
if code:
log_ephemeral(get_failure_message(cmd_name, args, report_full_command_line, code), log_resource)
return CommandResult(code, stdout)
| 43.723757
| 140
| 0.693676
|
a262e53287691d4dc9c18021db1dbcf6251a0122
| 7,393
|
py
|
Python
|
pytest_lambda/impl.py
|
theY4Kman/pytest-lambda
|
a64de8e9d57176878756686ac128aa93a9e88290
|
[
"MIT"
] | 10
|
2018-07-31T14:57:23.000Z
|
2021-05-24T12:45:51.000Z
|
pytest_lambda/impl.py
|
theY4Kman/pytest-lambda
|
a64de8e9d57176878756686ac128aa93a9e88290
|
[
"MIT"
] | 3
|
2019-01-16T00:41:42.000Z
|
2020-11-02T21:50:25.000Z
|
pytest_lambda/impl.py
|
theY4Kman/pytest-lambda
|
a64de8e9d57176878756686ac128aa93a9e88290
|
[
"MIT"
] | 3
|
2019-01-16T00:41:47.000Z
|
2021-05-01T09:50:36.000Z
|
import functools
from types import ModuleType
from typing import Callable, Union
import pytest
import wrapt
_IDENTITY_LAMBDA_FORMAT = '''
{name} = lambda {argnames}: ({argnames})
'''
def create_identity_lambda(name, *argnames):
source = _IDENTITY_LAMBDA_FORMAT.format(name=name, argnames=', '.join(argnames))
context = {}
exec(source, context)
fixture_func = context[name]
return fixture_func
class LambdaFixture(wrapt.ObjectProxy):
# NOTE: pytest won't apply marks unless the markee has a __call__ and a
# __name__ defined.
__name__ = '<lambda-fixture>'
bind: bool
fixture_kwargs: dict
fixture_func: Callable
has_fixture_func: bool
parent: Union[type, ModuleType]
def __init__(self, fixture_names_or_lambda, bind=False, **fixture_kwargs):
self.bind = bind
self.fixture_kwargs = fixture_kwargs
self.fixture_func = self._not_implemented
self.has_fixture_func = False
self.parent = None
#: pytest fixture info definition
self._pytestfixturefunction = pytest.fixture(**fixture_kwargs)
if fixture_names_or_lambda is not None:
self.set_fixture_func(fixture_names_or_lambda)
elif fixture_kwargs.get('params'):
# Shortcut to allow `lambda_fixture(params=[1,2,3])`
self.set_fixture_func(lambda request: request.param)
def __call__(self, *args, **kwargs):
if self.bind:
args = (self.parent,) + args
return self.fixture_func(*args, **kwargs)
def _not_implemented(self):
raise NotImplementedError(
'The fixture_func for this LambdaFixture has not been defined. '
'This is a catastrophic error!')
def set_fixture_func(self, fixture_names_or_lambda):
self.fixture_func = self.build_fixture_func(fixture_names_or_lambda)
self.has_fixture_func = True
# NOTE: this initializes the ObjectProxy
super().__init__(self.fixture_func)
def build_fixture_func(self, fixture_names_or_lambda):
if callable(fixture_names_or_lambda):
real_fixture_func = fixture_names_or_lambda
# We create a new method with the same signature as the passed
# method, which simply calls the passed method – this is so we can
# modify __name__ and other properties of the function without fear
# of overwriting functions unrelated to the fixture. (A lambda need
# not be used – a method imported from another module can be used.)
@functools.wraps(real_fixture_func)
def insulator(*args, **kwargs):
return real_fixture_func(*args, **kwargs)
return insulator
else:
if self.bind:
raise ValueError(
'bind must be False if requesting a fixture by name')
fixture_names = fixture_names_or_lambda
if isinstance(fixture_names, str):
fixture_names = (fixture_names,)
# Create a new method with the requested parameter, so pytest can
# determine its dependencies at parse time. If we instead use
# request.getfixturevalue, pytest won't know to include the fixture
# in its dependency graph, and will vomit with "The requested
# fixture has no parameter defined for the current test."
name = 'fixture__' + '__'.join(fixture_names) # XXX: will this conflict in certain circumstances?
return create_identity_lambda(name, *fixture_names)
def contribute_to_parent(self, parent: Union[type, ModuleType], name: str, **kwargs):
"""Setup the LambdaFixture for the given class/module
This method is called during collection, when a LambdaFixture is
encountered in a module or class. This method is responsible for saving
any names and setting any attributes on parent as necessary.
"""
is_in_class = isinstance(parent, type)
is_in_module = isinstance(parent, ModuleType)
assert is_in_class or is_in_module
if is_in_module and self.bind:
raise ValueError(f'bind=True cannot be used at the module level. '
f'Please remove this arg in the {name} fixture in {parent.__file__}')
if not self.has_fixture_func:
# If no fixture definition was passed to lambda_fixture, it's our
# responsibility to define it as the name of the attribute. This is
# handy if ya just wanna force a fixture to be used, e.g.:
# do_the_thing = lambda_fixture(autouse=True)
self.set_fixture_func(name)
self.__name__ = name
self.__module__ = self.fixture_func.__module__ = (
parent.__module__ if is_in_class else parent.__name__)
self.parent = parent
# With --doctest-modules enabled, the doctest finder will enumerate all objects
# in all relevant modules, and use `isinstance(obj, ...)` to determine whether
# the object has doctests to collect. Under the hood, isinstance retrieves the
# value of the `obj.__class__` attribute.
#
# When using implicit referential lambda fixtures (e.g. `name = lambda_fixture()`),
# the LambdaFixture object doesn't initialize its underlying object proxy until the
# pytest collection phase. Unfortunately, doctest's scanning occurs before this.
# When doctest attempts `isinstance(lfix, ...)` on an implicit referential
# lambda fixture and accesses `__class__`, the object proxy tries to curry
# the access to its wrapped object — but there isn't one, so it raises an error.
#
# To address this, we override __class__ to return LambdaFixture when the
# object proxy has not yet been initialized.
@property
def __class__(self):
try:
self.__wrapped__
except ValueError:
return LambdaFixture
else:
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, val):
self.__wrapped__.__class__ = val
# These properties are required in order to expose attributes stored on the
# LambdaFixture proxying instance without prefixing them with _self_
@property
def bind(self):
return self._self_bind
@bind.setter
def bind(self, value):
self._self_bind = value
@property
def fixture_kwargs(self):
return self._self_fixture_kwargs
@fixture_kwargs.setter
def fixture_kwargs(self, value):
self._self_fixture_kwargs = value
@property
def fixture_func(self):
return self._self_fixture_func
@fixture_func.setter
def fixture_func(self, value):
self._self_fixture_func = value
@property
def has_fixture_func(self):
return self._self_has_fixture_func
@has_fixture_func.setter
def has_fixture_func(self, value):
self._self_has_fixture_func = value
@property
def parent(self):
return self._self_parent
@parent.setter
def parent(self, value):
self._self_parent = value
@property
def _pytestfixturefunction(self):
return self._self__pytestfixturefunction
@_pytestfixturefunction.setter
def _pytestfixturefunction(self, value):
self._self__pytestfixturefunction = value
| 36.063415
| 110
| 0.670634
|
cfcefddf77738011e14575bd5f93ddda875a540d
| 1,359
|
py
|
Python
|
tests/types/test_permissions.py
|
nyejon/strawberry
|
664fde292fe2186c3e33e8cae79964b866fa5822
|
[
"MIT"
] | 1
|
2021-05-26T18:31:11.000Z
|
2021-05-26T18:31:11.000Z
|
tests/types/test_permissions.py
|
nyejon/strawberry
|
664fde292fe2186c3e33e8cae79964b866fa5822
|
[
"MIT"
] | 43
|
2021-07-05T22:51:03.000Z
|
2022-03-29T10:44:58.000Z
|
tests/types/test_permissions.py
|
nyejon/strawberry
|
664fde292fe2186c3e33e8cae79964b866fa5822
|
[
"MIT"
] | null | null | null |
from typing import Any
import strawberry
from strawberry.permission import BasePermission
from strawberry.types import Info
def test_permission_classes_basic_fields():
class IsAuthenticated(BasePermission):
message = "User is not authenticated"
def has_permission(self, source: Any, info: Info, **kwargs) -> bool:
return False
@strawberry.type
class Query:
user: str = strawberry.field(permission_classes=[IsAuthenticated])
definition = Query._type_definition
assert definition.name == "Query"
assert len(definition.fields) == 1
assert definition.fields[0].graphql_name == "user"
assert definition.fields[0].permission_classes == [IsAuthenticated]
def test_permission_classes():
class IsAuthenticated(BasePermission):
message = "User is not authenticated"
def has_permission(self, source: Any, info: Info, **kwargs) -> bool:
return False
@strawberry.type
class Query:
@strawberry.field(permission_classes=[IsAuthenticated])
def user(self) -> str:
return "patrick"
definition = Query._type_definition
assert definition.name == "Query"
assert len(definition.fields) == 1
assert definition.fields[0].graphql_name == "user"
assert definition.fields[0].permission_classes == [IsAuthenticated]
| 28.3125
| 76
| 0.698308
|
a37b848c0be19172fa620388c5d545c3873183ec
| 28,298
|
py
|
Python
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_sensitivity_labels_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2021-09-07T18:39:05.000Z
|
2021-09-07T18:39:05.000Z
|
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_sensitivity_labels_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | null | null | null |
sdk/synapse/azure-mgmt-synapse/azure/mgmt/synapse/aio/operations/_sql_pool_sensitivity_labels_operations.py
|
xolve/azure-sdk-for-python
|
9f5baa19c392f77f811d936ee43450e4ea524002
|
[
"MIT"
] | 1
|
2022-03-04T06:21:56.000Z
|
2022-03-04T06:21:56.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._sql_pool_sensitivity_labels_operations import build_create_or_update_request, build_delete_request, build_disable_recommendation_request, build_enable_recommendation_request, build_get_request, build_list_current_request, build_list_recommended_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SqlPoolSensitivityLabelsOperations:
"""SqlPoolSensitivityLabelsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_current(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.SensitivityLabelListResult"]:
"""Gets SQL pool sensitivity labels.
Gets SQL pool sensitivity labels.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param filter: An OData filter expression that filters elements in the collection.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SensitivityLabelListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.SensitivityLabelListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SensitivityLabelListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_current_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
filter=filter,
template_url=self.list_current.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_current_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SensitivityLabelListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_current.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/currentSensitivityLabels'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
parameters: "_models.SensitivityLabelUpdateList",
**kwargs: Any
) -> None:
"""Update sensitivity labels of a given SQL Pool using an operations batch.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param parameters:
:type parameters: ~azure.mgmt.synapse.models.SensitivityLabelUpdateList
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SensitivityLabelUpdateList')
request = build_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/currentSensitivityLabels'} # type: ignore
@distributed_trace
def list_recommended(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
include_disabled_recommendations: Optional[bool] = None,
skip_token: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.SensitivityLabelListResult"]:
"""Gets sensitivity labels of a given SQL pool.
Gets sensitivity labels of a given SQL pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param include_disabled_recommendations: Specifies whether to include disabled recommendations
or not.
:type include_disabled_recommendations: bool
:param skip_token: An OData query option to indicate how many elements to skip in the
collection.
:type skip_token: str
:param filter: An OData filter expression that filters elements in the collection.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SensitivityLabelListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.SensitivityLabelListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SensitivityLabelListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_recommended_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
include_disabled_recommendations=include_disabled_recommendations,
skip_token=skip_token,
filter=filter,
template_url=self.list_recommended.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_recommended_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
include_disabled_recommendations=include_disabled_recommendations,
skip_token=skip_token,
filter=filter,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SensitivityLabelListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_recommended.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/recommendedSensitivityLabels'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
schema_name: str,
table_name: str,
column_name: str,
parameters: "_models.SensitivityLabel",
**kwargs: Any
) -> "_models.SensitivityLabel":
"""Creates or updates the sensitivity label of a given column in a Sql pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param schema_name: The name of the schema.
:type schema_name: str
:param table_name: The name of the table.
:type table_name: str
:param column_name: The name of the column.
:type column_name: str
:param parameters: The column sensitivity label resource.
:type parameters: ~azure.mgmt.synapse.models.SensitivityLabel
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SensitivityLabel, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.SensitivityLabel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SensitivityLabel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'SensitivityLabel')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SensitivityLabel', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SensitivityLabel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
schema_name: str,
table_name: str,
column_name: str,
**kwargs: Any
) -> None:
"""Deletes the sensitivity label of a given column in a Sql pool.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param schema_name: The name of the schema.
:type schema_name: str
:param table_name: The name of the table.
:type table_name: str
:param column_name: The name of the column.
:type column_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
schema_name: str,
table_name: str,
column_name: str,
sensitivity_label_source: Union[str, "_models.SensitivityLabelSource"],
**kwargs: Any
) -> "_models.SensitivityLabel":
"""Gets the sensitivity label of a given column.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param schema_name: The name of the schema.
:type schema_name: str
:param table_name: The name of the table.
:type table_name: str
:param column_name: The name of the column.
:type column_name: str
:param sensitivity_label_source: The source of the sensitivity label.
:type sensitivity_label_source: str or ~azure.mgmt.synapse.models.SensitivityLabelSource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SensitivityLabel, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.SensitivityLabel
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SensitivityLabel"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name,
sensitivity_label_source=sensitivity_label_source,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('SensitivityLabel', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}'} # type: ignore
@distributed_trace_async
async def enable_recommendation(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
schema_name: str,
table_name: str,
column_name: str,
**kwargs: Any
) -> None:
"""Enables sensitivity recommendations on a given column (recommendations are enabled by default
on all columns).
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param schema_name: The name of the schema.
:type schema_name: str
:param table_name: The name of the table.
:type table_name: str
:param column_name: The name of the column.
:type column_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_enable_recommendation_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name,
template_url=self.enable_recommendation.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
enable_recommendation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}/enable'} # type: ignore
@distributed_trace_async
async def disable_recommendation(
self,
resource_group_name: str,
workspace_name: str,
sql_pool_name: str,
schema_name: str,
table_name: str,
column_name: str,
**kwargs: Any
) -> None:
"""Disables sensitivity recommendations on a given column.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param sql_pool_name: SQL pool name.
:type sql_pool_name: str
:param schema_name: The name of the schema.
:type schema_name: str
:param table_name: The name of the table.
:type table_name: str
:param column_name: The name of the column.
:type column_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_disable_recommendation_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name,
template_url=self.disable_recommendation.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
disable_recommendation.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/sqlPools/{sqlPoolName}/schemas/{schemaName}/tables/{tableName}/columns/{columnName}/sensitivityLabels/{sensitivityLabelSource}/disable'} # type: ignore
| 44.634069
| 321
| 0.667326
|
3bc5c4e73118ba83cf7c40222f661c592921978e
| 420
|
py
|
Python
|
setup.py
|
franaguila/DSCI_524-square-python
|
93a79472a72fc957100b1945949b2443aed1bdac
|
[
"MIT"
] | null | null | null |
setup.py
|
franaguila/DSCI_524-square-python
|
93a79472a72fc957100b1945949b2443aed1bdac
|
[
"MIT"
] | null | null | null |
setup.py
|
franaguila/DSCI_524-square-python
|
93a79472a72fc957100b1945949b2443aed1bdac
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name='square',
version='0.1.0',
author='Fran Aguila',
author_email='francescaaguila@gmail.com',
packages=['square'],
scripts=['square/utils.py'],
url='https://github.com/franguila/DSCI_524-square-python',
license='LICENSE.txt',
description='Get the area of a square',
long_description=open('README.txt').read(),
install_requires=[],
)
| 26.25
| 62
| 0.666667
|
d71461e7cfae43e7ea103a8bb19d4fe04483cce9
| 2,254
|
py
|
Python
|
ceph_deploy/tests/parser/test_config.py
|
SUSE/ceph-deploy-to-be-deleted
|
a7a540668fcd23eea6b9e8d079ae60f32e5426e3
|
[
"MIT"
] | 1
|
2020-07-29T15:09:23.000Z
|
2020-07-29T15:09:23.000Z
|
ceph_deploy/tests/parser/test_config.py
|
SUSE/ceph-deploy-to-be-deleted
|
a7a540668fcd23eea6b9e8d079ae60f32e5426e3
|
[
"MIT"
] | null | null | null |
ceph_deploy/tests/parser/test_config.py
|
SUSE/ceph-deploy-to-be-deleted
|
a7a540668fcd23eea6b9e8d079ae60f32e5426e3
|
[
"MIT"
] | null | null | null |
import pytest
from ceph_deploy.cli import get_parser
SUBCMDS_WITH_ARGS = ['push', 'pull']
class TestParserConfig(object):
def setup(self):
self.parser = get_parser()
def test_config_help(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('config --help'.split())
out, err = capsys.readouterr()
assert 'usage: ceph-deploy config' in out
assert 'positional arguments:' in out
assert 'optional arguments:' in out
@pytest.mark.parametrize('cmd', SUBCMDS_WITH_ARGS)
def test_config_subcommands_with_args(self, cmd):
self.parser.parse_args(['config'] + ['%s' % cmd] + ['host1'])
def test_config_invalid_subcommand(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('config bork'.split())
out, err = capsys.readouterr()
assert 'invalid choice' in err
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
def test_config_push_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('config push'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_config_push_one_host(self):
args = self.parser.parse_args('config push host1'.split())
assert args.client == ['host1']
def test_config_push_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args('config push'.split() + hostnames)
assert args.client == hostnames
@pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150")
def test_config_pull_host_required(self, capsys):
with pytest.raises(SystemExit):
self.parser.parse_args('config pull'.split())
out, err = capsys.readouterr()
assert "error: too few arguments" in err
def test_config_pull_one_host(self):
args = self.parser.parse_args('config pull host1'.split())
assert args.client == ['host1']
def test_config_pull_multiple_hosts(self):
hostnames = ['host1', 'host2', 'host3']
args = self.parser.parse_args('config pull'.split() + hostnames)
assert args.client == hostnames
| 36.354839
| 72
| 0.657054
|
c8c2897679cce1d9021d9eeace5671ce7765ae92
| 1,679
|
py
|
Python
|
packettcp.py
|
sekharkaredla/cnlab
|
8e1ef9a904dea5260f6b6cf968508a2847f9f15c
|
[
"MIT"
] | null | null | null |
packettcp.py
|
sekharkaredla/cnlab
|
8e1ef9a904dea5260f6b6cf968508a2847f9f15c
|
[
"MIT"
] | null | null | null |
packettcp.py
|
sekharkaredla/cnlab
|
8e1ef9a904dea5260f6b6cf968508a2847f9f15c
|
[
"MIT"
] | 2
|
2018-04-16T13:25:12.000Z
|
2018-04-16T14:49:38.000Z
|
#Packet sniffer in python for Linux
#Sniffs only incoming TCP packet
import socket, sys
from struct import *
#create an INET, STREAMing socket
try:
s = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_TCP)
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' +msg[1]
sys.exit()
# receive a packet
while True:
packet = s.recvfrom(65565)
#packet string from tuple
packet = packet[0]
#take first 20 characters for the ip header
ip_header = packet[0:20]
print ip_header
#now unpack them :)
iph = unpack('!BBHHHBBH4s4s' , ip_header)
print iph
version_ihl = iph[0]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
ttl = iph[5]
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8]);
d_addr = socket.inet_ntoa(iph[9]);
print 'Version : ' + str(version) + ' IP Header Length : ' + str(ihl) + ' TTL : ' +str(ttl) + ' Protocol : ' + str(protocol) + ' Source Address : ' + str(s_addr) + 'Destination Address : ' + str(d_addr)
tcp_header = packet[iph_length:iph_length+20]
#now unpack them :)
tcph = unpack('!HHLLBBHHH' , tcp_header)
source_port = tcph[0]
dest_port = tcph[1]
sequence = tcph[2]
acknowledgement = tcph[3]
doff_reserved = tcph[4]
tcph_length = doff_reserved >> 4
print 'Source Port : ' + str(source_port) + ' Dest Port : ' + str(dest_port) + 'Sequence Number : ' + str(sequence) + ' Acknowledgement : ' + str(acknowledgement) + 'TCP header length : ' + str(tcph_length)
h_size = iph_length + tcph_length * 4
data_size = len(packet) - h_size
#get data from the packet
data = packet[h_size:]
print 'Data : ' + data
| 33.58
| 208
| 0.671233
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.