hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a1384974ccd700a4a2a547d6245e9c75c028797e
| 14,123
|
py
|
Python
|
greatcirclelibrary.py
|
nathanielselub/great-circle-library
|
1f665245238c21cca614498320fd05d799654f98
|
[
"MIT"
] | 1
|
2020-11-03T21:01:44.000Z
|
2020-11-03T21:01:44.000Z
|
greatcirclelibrary.py
|
nathanielselub/great_circle_analysis_library
|
1f665245238c21cca614498320fd05d799654f98
|
[
"MIT"
] | null | null | null |
greatcirclelibrary.py
|
nathanielselub/great_circle_analysis_library
|
1f665245238c21cca614498320fd05d799654f98
|
[
"MIT"
] | null | null | null |
"""A module to help analyze great circles in the CMB sky."""
import numpy as np
import healpy as hp
import camb
from scipy.optimize import differential_evolution
from scipy.spatial.transform import Rotation as R
from numba import guvectorize, int64, float64, prange, jit
NSIDE = 256
N_GC = 20000
N_P = 2000
PARS = camb.CAMBparams(min_l=1)
PARS.set_cosmology(H0=67.4, ombh2=0.0224, omch2=0.120, mnu=0.06, omk=0.001,
tau=0.054)
MAX_INF_DIPOLE_AMP = 100
@jit(parallel=True)
def preprocess_maps(paths, new_l_max, nside=NSIDE):
"""Preprocess maps of the full CMB sky.
Parameters
----------
paths : np.ndarray
Paths to full CMB maps in .FITS format.
new_l_max : int
Maximum ell of multipole moments to extract from the full CMB maps.
nside : int
NSIDE of the maps when the extractions occur.
Returns
-------
np.ndarray
Returns the specified multipole moments of the full CMB maps, excluding
their respective monopole and dipole moments.
"""
new_alms = np.zeros([len(paths), hp.sphtfunc.Alm.getsize(new_l_max)],
dtype='complex')
for i in prange(len(paths)):
dgraded_map = hp.pixelfunc.ud_grade(hp.read_map(paths[i]) * (10**6), nside)
old_alms = hp.sphtfunc.map2alm(dgraded_map)
old_l_max = hp.sphtfunc.Alm.getlmax(old_alms.shape[0])
for ell in range(2, new_l_max + 1):
for m in range(ell + 1):
old_index = hp.Alm.getidx(old_l_max, ell, m)
new_index = hp.Alm.getidx(new_l_max, ell, m)
new_alms[i][new_index] = old_alms[old_index]
return new_alms
@jit(parallel=True)
def generate_same_cl_sims(alms, n):
"""Generate same cl spectrum simulations.
Parameters
----------
alms : np.ndarray
Alms from which the same cl spectrum simulations are generated.
n : int
Number of simulations to generate.
Returns
-------
np.ndarray
Returns n same cl spectrum simulations.
"""
l_max = hp.sphtfunc.Alm.getlmax(alms.shape[0])
cl_vals = hp.alm2cl(alms)
simulations = np.zeros([n, alms.shape[0]], dtype='complex')
for i in prange(n):
simulations[i] = hp.sphtfunc.synalm(cl_vals, lmax=l_max, verbose=False)
for ell in np.flatnonzero(cl_vals):
cl_sum = 0
for m in range(ell + 1):
cl_sum += abs(simulations[i][hp.Alm.getidx(l_max, ell, m)])**2
scaling_factor = np.sqrt(cl_vals[ell] * (1 + 2 * ell)/(2 * cl_sum))
for m in range(ell + 1):
simulations[i][hp.Alm.getidx(l_max, ell, m)] *= scaling_factor
simulations[i][hp.Alm.getidx(l_max, ell, 0)] *= np.sqrt(2)
return simulations
@jit(parallel=True)
def generate_standard_dipole_sims(alms, n, pars=PARS):
"""Generate simulations with dipoles from the standard model of cosmology.
Parameters
----------
alms : np.ndarray
Alms which the standard model dipoles are added to.
n : int
Number of simulations to generate.
pars : camb.model.CAMBparams
Parameters for the CAMB angular power spectrum generation.
Returns
-------
np.ndarray
Returns n simulations that have had a standard model dipole added to
them.
"""
l_max = hp.sphtfunc.Alm.getlmax(alms.shape[0])
results = camb.get_results(pars)
powers = results.get_cmb_power_spectra(pars, CMB_unit='muK')
dipole_cl = np.zeros(l_max + 1)
dipole_cl[1] = np.pi * powers['total'][1, 0]
new_alms = remove_l(alms, 1)
simulations = np.zeros([n, alms.shape[0]], dtype='complex')
for i in prange(n):
simulations[i] = new_alms + hp.sphtfunc.synalm(dipole_cl, lmax=l_max,
verbose=False)
return simulations
@jit(parallel=True)
def generate_gcs(n_gc=N_GC, n_p=N_P, nside=NSIDE):
"""Generate a set of great circles.
Parameters
----------
n_gc : int
Number of great circles to be generated.
n_p : int
Number of points to be sampled from each great circle.
nside : int
NSIDE of the map on which the great circles will be valid.
Returns
-------
np.ndarray
Returns an array containing great circles.
"""
phi = np.random.uniform(0., 2 * np.pi, (n_gc, 1))
theta = np.arcsin(np.random.uniform(np.sin(- np.pi / 2), np.sin(np.pi/2),
(n_gc, 1))) + np.pi / 2
rotation_1 = R.from_rotvec(np.pad(theta, [(0, 0), (1, 1)]))
rotation_2 = R.from_rotvec(np.pad(phi, [(0, 0), (2, 0)]))
random_rotation = rotation_2 * rotation_1
circ_angs = np.linspace(0, 2 * np.pi, n_p)
circ_coords = np.stack((np.cos(circ_angs), np.sin(circ_angs),
np.zeros_like(circ_angs)), axis=-1)
gcs = np.empty([n_gc, n_p], dtype=int)
for i in prange(n_gc):
gc = random_rotation[i].apply(circ_coords)
gcs[i] = hp.vec2pix(nside, gc[:, 0], gc[:, 1], gc[:, 2])
return gcs
@guvectorize([(int64[:], float64[:], float64[:])], '(m),(n)->()',
target='parallel', nopython=True)
def gc_vars(gcs, my_map, res):
"""Get the biased sample variances of great circles from a single map.
Parameters
----------
gcs : np.ndarray
Great circles whose biased sample variances are calculated.
my_map : np.ndarray
Map from which the biased sample variances of the great circles
are calculated.
res : np.ndarray
Returns the biased sample variances of the great circles.
"""
res[0] = np.var(my_map[gcs])
@jit(parallel=True)
def multi_gc_vars(gcs, alms, nside=NSIDE):
"""Get the variance of great circle variances from multiple maps.
Parameters
----------
gcs : np.ndarray
Great circles whose unbiased sample variances are calculated.
alms : np.ndarray
Set of maps in alm form from which the unbiased sample variances of the
great circles are calculated.
nside : int
NSIDE of the maps when the unbiased sample variances of the great
circles are calculated.
Returns
-------
np.ndarray
Returns the variance of great circle variances from multiple maps.
"""
vars_sims = np.zeros(alms.shape[0])
for i in prange(alms.shape[0]):
vars_sims[i] = np.var(gc_vars(gcs, hp.sphtfunc.alm2map(alms[i], nside,
verbose=False)),
ddof=1)
n = gcs.shape[1]
return ((n / (n - 1)) ** 2) * vars_sims
def correlation_function(cl_vals):
"""Calculate the angular correlation function from a power spectrum.
Parameters
----------
cl_vals : np.ndarray
Cls with which the angular correlation function is calculated.
Returns
-------
function
Returns the angular correlation function calculated from the given cls.
"""
cl_vals_scaled = np.copy(cl_vals)
for ell in range(cl_vals_scaled.shape[0]):
cl_vals_scaled[ell] *= ((2. * ell + 1.) / (4. * np.pi))
def C(theta):
return np.polynomial.legendre.legval(np.cos(theta), cl_vals_scaled)
return C
@jit(parallel=True)
def get_inf_versions(gcs, alms, nside=NSIDE):
"""Get the inferred versions of multiple maps.
Parameters
----------
gcs : np.ndarray
Great circles whose biased sample variances are used to perform the
calculations.
alms : np.ndarray
Set of maps in alm form whose inferred versions are calculated.
nside : int
NSIDE of the maps when the biased sample variances of the great circles
are calculated.
Returns
-------
np.ndarray
Returns the respective inferred versions of the maps in alm form.
"""
new_alms = np.copy(alms)
l_max = hp.sphtfunc.Alm.getlmax(alms.shape[1])
dipole_index = hp.Alm.getidx(l_max, 1, 0)
for i in prange(new_alms.shape[0]):
new_alms[i] = remove_l(new_alms[i], 1)
dipole = np.zeros_like(new_alms[i])
res = differential_evolution(inf_dipole_evaluator,
[(0, MAX_INF_DIPOLE_AMP), (0, np.pi),
(0, 2 * np.pi)],
args=(gcs, new_alms[i], dipole_index,
nside),
workers=-1,
popsize=3)
dipole[dipole_index] = res.x[0]
hp.rotate_alm(dipole, 0, res.x[1], res.x[2])
new_alms[i] += dipole
return new_alms
def inf_dipole_evaluator(dipole, gcs, alms, index, nside):
"""Find the variance of great circle variances, given a dipole.
Parameters
----------
dipole : np.ndarray
Array containing the dipole amplitude and orientation.
gcs : np.ndarray
Great circles whose biased sample variances are calculated.
alms : np.ndarray
Alms used to generate the map that the dipole is added to.
index : int
Index of m = 0 for the dipole.
nside : int
NSIDE of the map when the biased sample variances of the great circles
are calculated.
Returns
-------
float
Returns the biased sample variance of the biased sample variances of
the great circles.
"""
new_dipole = np.zeros_like(alms)
new_dipole[index] = dipole[0]
hp.rotate_alm(new_dipole, 0, dipole[1], dipole[2])
return np.var(gc_vars(gcs, hp.sphtfunc.alm2map(alms + new_dipole, nside,
verbose=False)))
@jit(parallel=True)
def get_axis_of_max_sect(alms, ell):
"""Get the axis of maximum sectorality for a given multipole moment.
Parameters
----------
alms : np.ndarray
Alms from which the multipole moments are taken.
ell : int
Ell of the multipole moment whose axis of maximum sectorality is
calculated.
Returns
-------
np.ndarray
Returns the locations of the respective axes of maximum sectorality for
the specified multipole moment from each set of alms.
"""
l_max = hp.sphtfunc.Alm.getlmax(alms.shape[1])
a_l_l_index = hp.Alm.getidx(l_max, ell, ell)
locs = np.zeros([alms.shape[0], 3])
for i in range(alms.shape[0]):
isolated_moment = get_l(alms[i], ell)
res = differential_evolution(axis_of_max_sect_evaluator,
[(0, 2 * np.pi), (0, np.pi / 2)],
args=(isolated_moment, a_l_l_index),
workers=-1,
popsize=15)
ams_rot = R.from_euler('zyz', [0, -res.x[1], -res.x[0]])
locs[i] = ams_rot.apply([0, 0, -1])
return locs
def axis_of_max_sect_evaluator(loc, isolated_moment, a_l_l_index):
"""Find the sectorality of a multipole, given a coordinate system rotation.
Parameters
----------
loc : np.ndarray
Array containing the rotation to the new coordinate system in which the
sectorality is calculated.
isolated_moment : np.ndarray
The given multipole moment in alm form.
a_l_l_index : int
Index of a_l_l for the given multipole moment.
Returns
-------
float
Returns the reciprocal of the magnitude of the a_l_l coefficient in
the new coordinate system.
"""
new_moment = np.copy(isolated_moment)
hp.rotate_alm(new_moment, loc[0], loc[1], 0)
return 1 / abs(new_moment[a_l_l_index])
def get_nat_rot(alms):
"""Find a rotation to the natural coordinate system of the alms.
Parameters
----------
alms : np.ndarray
Alms whose natural coordinate system rotation is found.
Returns
-------
np.ndarray
Returns a vector containing Euler angles that describe an extrinsic
z-y-z rotation to the natural coordinate system.
"""
rotation_copy = np.copy(alms)
temp_map = hp.sphtfunc.alm2map(alms, NSIDE)
dipole_ang = hp.pixelfunc.vec2ang(hp.pixelfunc.fit_dipole(temp_map)[1])
nat_rotation = [-dipole_ang[1][0], -dipole_ang[0][0], 0]
hp.rotate_alm(rotation_copy, *nat_rotation)
l_max = hp.sphtfunc.Alm.getlmax(alms.shape[0])
a_3_3 = rotation_copy[hp.Alm.getidx(l_max, 3, 3)]
nat_rotation[2] = np.arctan2(np.imag(a_3_3), np.real(a_3_3)) / 3
return nat_rotation
def rot_to_nat_coords(alms):
"""Rotate the alms to their natural coordinate system.
Parameters
----------
alms : np.ndarray
Alms to rotate to natural coordinates.
Returns
-------
np.ndarray
Returns the alms in their natural coordinate system.
"""
new_alms = np.copy(alms)
hp.rotate_alm(new_alms, *get_nat_rot(alms))
return new_alms
def get_l(alms, ell):
"""Get the specified multipole moment from the alms.
Parameters
----------
alms : np.ndarray
Alms from which the specified multipole moment is returned.
ell : int
Multipole moment to return.
Returns
-------
np.ndarray
Returns the specified multipole moment.
"""
l_max = hp.sphtfunc.Alm.getlmax(alms.shape[0])
isolated_l = np.zeros_like(alms)
for m in range(ell + 1):
index = hp.Alm.getidx(l_max, ell, m)
isolated_l[index] = alms[index]
return isolated_l
def remove_l(alms, ell):
"""Remove the specified multipole moment from the alms.
Parameters
----------
alms : np.ndarray
Alms from which the specified multipole moment is removed.
ell : int
Multipole moment to remove.
Returns
-------
np.ndarray
Return the alms with the specified multipole moment removed.
"""
l_max = hp.sphtfunc.Alm.getlmax(alms.shape[0])
new_alms = np.copy(alms)
for m in range(ell + 1):
new_alms[hp.Alm.getidx(l_max, ell, m)] = 0
return new_alms
| 27.317215
| 83
| 0.604546
|
254b79618218a2f7ad7a817ca0db4f8a4d6c8dd2
| 6,230
|
py
|
Python
|
hookbox/user.py
|
gameclosure/hookbox
|
c0087632b8035a1bff5c8dc55a2b64ba44c2cfbb
|
[
"MIT"
] | 10
|
2015-06-26T01:41:41.000Z
|
2019-04-16T09:07:29.000Z
|
hookbox/user.py
|
play-co/hookbox
|
c0087632b8035a1bff5c8dc55a2b64ba44c2cfbb
|
[
"MIT"
] | null | null | null |
hookbox/user.py
|
play-co/hookbox
|
c0087632b8035a1bff5c8dc55a2b64ba44c2cfbb
|
[
"MIT"
] | null | null | null |
import eventlet
from errors import ExpectedException
try:
import json
except ImportError:
import simplejson as json
import datetime
def get_now():
return datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
class User(object):
_options = {
'reflective': True,
'moderated_message': True,
}
def __init__(self, server, name, **options):
self.server = server
self.name = name
self.connections = []
self.channels = []
self._temp_cookie = ""
self.update_options(**self._options)
self.update_options(**options)
def serialize(self):
return {
'channels': [ chan.name for chan in self.channels ],
'connections': [ conn.id for conn in self.connections ],
'name': self.name,
'options': dict([ (key, getattr(self, key)) for key in self._options])
}
def update_options(self, **options):
# TODO: this can't remain so generic forever. At some point we need
# better checks on values, such as the list of dictionaries
# for history, or the polling options.
# TODO: add support for lists (we only have dicts now)
# TODO: Probably should make this whole function recursive... though
# we only really have one level of nesting now.
# TODO: most of this function is duplicated from Channel#update_options
# (including the TODOs above), could be a lot DRYer
for key, val in options.items():
if key not in self._options:
raise ValueError("Invalid keyword argument %s" % (key))
default = self._options[key]
cls = default.__class__
if cls in (unicode, str):
cls = basestring
if not isinstance(val, cls):
raise ValueError("Invalid type for %s (should be %s)" % (key, default.__class__))
if key == 'state':
self.state_replace(val)
continue
if isinstance(val, dict):
for _key, _val in val.items():
if _key not in self._options[key]:
raise ValueError("Invalid keyword argument %s" % (_key))
default = self._options[key][_key]
cls = default.__class__
if isinstance(default, float) and isinstance(_val, int):
_val = float(_val)
if cls in (unicode, str):
cls = basestring
if not isinstance(_val, cls):
raise ValueError("%s is Invalid type for %s (should be %s)" % (_val, _key, default.__class__))
# two loops forces exception *before* any of the options are set.
for key, val in options.items():
# this should create copies of any dicts or lists that are options
if isinstance(val, dict) and hasattr(self, key):
getattr(self, key).update(val)
else:
setattr(self, key, val.__class__(val))
def add_connection(self, conn):
self.connections.append(conn)
conn.user = self
# call later...
eventlet.spawn(self._send_initial_subscriptions, conn)
def _send_initial_subscriptions(self, conn):
for channel in self.channels:
frame = channel._build_subscribe_frame(self)
conn.send_frame('SUBSCRIBE', frame)
def remove_connection(self, conn):
self.connections.remove(conn)
if not self.connections:
# each call to user_disconnected might result in an immediate call
# to self.channel_unsubscribed, thus modifying self.channels and
# messing up our loop. So we loop over a copy of self.channels...
for channel in self.channels[:]:
channel.user_disconnected(self)
# print 'tell server to remove user...'
# so the disconnect callback has a cookie
self._temp_cookie = conn.get_cookie()
self.server.remove_user(self.name)
def channel_subscribed(self, channel):
self.channels.append(channel)
def channel_unsubscribed(self, channel):
self.channels.remove(channel)
def get_name(self):
return self.name
def send_frame(self, name, args={}, omit=None):
for conn in self.connections:
if conn is not omit:
conn.send_frame(name, args)
def get_cookie(self, conn=None):
if conn:
return conn.get_cookie()
return self._temp_cookie or ""
def send_message(self, recipient_name, payload, conn=None, needs_auth=True):
try:
encoded_payload = json.loads(payload)
except:
raise ExpectedException("Invalid json for payload")
payload = encoded_payload
if needs_auth and self.moderated_message:
form = { 'sender': self.get_name(), 'recipient': recipient_name, 'recipient_exists': self.server.exists_user(recipient_name), 'payload': json.dumps(payload) }
success, options = self.server.http_request('message', self.get_cookie(conn), form, conn=conn)
self.server.maybe_auto_subscribe(self, options, conn=conn)
if not success:
raise ExpectedException(options.get('error', 'Unauthorized'))
payload = options.get('override_payload', payload)
recipient_name = options.get('override_recipient_name', recipient_name)
elif not self.server.exists_user(recipient_name):
raise ExpectedException('Invalid user name')
recipient = self.server.get_user(recipient_name) if self.server.exists_user(recipient_name) else None
frame = {"sender": self.get_name(), "recipient": recipient.get_name() if recipient else "null", "payload": payload, "datetime": get_now()}
if recipient:
recipient.send_frame('MESSAGE', frame)
if self.reflective and (not recipient or recipient.name != self.name):
self.send_frame('MESSAGE', frame)
| 42.380952
| 170
| 0.592616
|
eed5866335ce19bbb1271c4f76e26d5e9e6b7d3a
| 5,477
|
bzl
|
Python
|
bazel_tools/fat_cc_library.bzl
|
flokli/daml
|
92f2dbf2eb8985290a3bb9702a3193e6f9296b22
|
[
"Apache-2.0"
] | null | null | null |
bazel_tools/fat_cc_library.bzl
|
flokli/daml
|
92f2dbf2eb8985290a3bb9702a3193e6f9296b22
|
[
"Apache-2.0"
] | null | null | null |
bazel_tools/fat_cc_library.bzl
|
flokli/daml
|
92f2dbf2eb8985290a3bb9702a3193e6f9296b22
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
load("@os_info//:os_info.bzl", "is_darwin", "is_windows")
def _fat_cc_library_impl(ctx):
input_lib = ctx.attr.input_lib
cc_info = input_lib[CcInfo]
static_libs = []
# For now we assume that we have static PIC libs for all libs.
# It should be possible to extend this but we do not have a need
# for it so far and it would complicate things.
for lib in cc_info.linking_context.libraries_to_link.to_list():
static_lib = None
if lib.pic_static_library:
static_lib = lib.pic_static_library
elif is_windows and lib.static_library:
# On Windows we don't seem to have `pic_static_library`s available.
static_lib = lib.static_library
else:
fail("No (PIC) static library found for '{}'.".format(
str(lib.dynamic_library.path),
))
static_libs += [static_lib]
dyn_lib = ctx.outputs.dynamic_library
static_lib = ctx.outputs.static_library
toolchain = ctx.attr._cc_toolchain[cc_common.CcToolchainInfo]
feature_configuration = cc_common.configure_features(ctx = ctx, cc_toolchain = toolchain)
compiler = None
if is_darwin:
# toolchain.compiler_executable() fails on MacOS, see https://github.com/bazelbuild/bazel/issues/7105
compiler = ctx.executable._cc_compiler
elif is_windows:
compiler = toolchain.compiler_executable() + ".exe"
else:
compiler = toolchain.compiler_executable()
ctx.actions.run(
mnemonic = "CppLinkFatDynLib",
outputs = [dyn_lib],
executable = compiler,
arguments =
["-o", dyn_lib.path, "-shared"] +
ctx.attr.whole_archive_flag +
[f.path for f in static_libs] +
ctx.attr.no_whole_archive_flag +
# Some libs seems to depend on libstdc++ implicitely
["-lstdc++"] +
(["-framework", "CoreFoundation"] if is_darwin else []) +
# On Windows some libs seems to depend on Windows sockets
(["-lws2_32"] if is_windows else []),
inputs = static_libs,
env = {"PATH": ""},
)
mri_script_content = "\n".join(
["create {}".format(static_lib.path)] +
["addlib {}".format(lib.path) for lib in static_libs] +
["save", "end"],
) + "\n"
mri_script = ctx.actions.declare_file(ctx.label.name + "_mri")
ctx.actions.write(mri_script, mri_script_content)
ar = toolchain.ar_executable()
if ar.find("libtool") >= 0:
# We are on MacOS where ar_executable is actually libtool, see
# https://github.com/bazelbuild/bazel/issues/5127.
ctx.actions.run(
mnemonic = "CppLinkFatStaticLib",
outputs = [static_lib],
executable = ar,
inputs = static_libs,
arguments =
["-no_warning_for_no_symbols", "-static", "-o", static_lib.path] +
[f.path for f in static_libs],
)
else:
ctx.actions.run_shell(
mnemonic = "CppLinkFatStaticLib",
outputs = [static_lib],
inputs = [mri_script] + static_libs,
command = "{ar} -M < {mri_script}".format(ar = ar, mri_script = mri_script.path),
)
fat_lib = cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = toolchain,
dynamic_library = dyn_lib,
static_library = static_lib,
)
new_linking_context = cc_common.create_linking_context(
libraries_to_link = [fat_lib],
)
new_cc_info = CcInfo(
linking_context = new_linking_context,
compilation_context = cc_info.compilation_context,
)
return [new_cc_info]
# Shared libraries built with Bazel do not declare their dependencies on other libraries properly.
# Instead that dependency is tracked in Bazel internally. This breaks the GHCi linker if
# RTLD_LAZY doesn’t work which happens quite often. To make matters worse, we also cannot use
# linkstatic = True, since the GHCi linker cannot handle some relocations.
# To work around this mess, we create fat libraries that do not have additional dependencies.
# See https://github.com/tweag/rules_haskell/issues/720
fat_cc_library = rule(
_fat_cc_library_impl,
attrs = dict({
"input_lib": attr.label(),
"_cc_toolchain": attr.label(
default = Label("@bazel_tools//tools/cpp:current_cc_toolchain"),
),
"_cc_compiler": attr.label(
allow_files = True,
executable = True,
cfg = "host",
default =
# bin/cc is gcc on Darwin which fails to find libc++
Label("@nixpkgs_cc_toolchain//:bin/clang") if is_darwin else None,
),
"whole_archive_flag": attr.string_list(
# ld on MacOS doesn’t understand --whole-archive
default = ["-Wl,-all_load"] if is_darwin else ["-Wl,--whole-archive"],
),
"no_whole_archive_flag": attr.string_list(
default = [] if is_darwin else ["-Wl,--no-whole-archive"],
),
}),
fragments = ["cpp"],
outputs = {
"dynamic_library": "lib%{name}.dll" if is_windows else "lib%{name}.so",
"static_library": "lib%{name}.a",
},
)
| 38.570423
| 109
| 0.622969
|
507280826d7d5cb43c822ac43cf98da6ec1f5ecf
| 2,742
|
py
|
Python
|
external/bayesopt/python/demo_quad.py
|
pchrapka/brain-modelling
|
f232b5a858e45f10b0b0735269010454129ab017
|
[
"MIT"
] | 1
|
2017-10-13T19:37:52.000Z
|
2017-10-13T19:37:52.000Z
|
external/bayesopt/python/demo_quad.py
|
pchrapka/brain-modelling
|
f232b5a858e45f10b0b0735269010454129ab017
|
[
"MIT"
] | null | null | null |
external/bayesopt/python/demo_quad.py
|
pchrapka/brain-modelling
|
f232b5a858e45f10b0b0735269010454129ab017
|
[
"MIT"
] | 1
|
2019-11-25T12:22:05.000Z
|
2019-11-25T12:22:05.000Z
|
#!/usr/bin/env python
# -------------------------------------------------------------------------
# This file is part of BayesOpt, an efficient C++ library for
# Bayesian optimization.
#
# Copyright (C) 2011-2015 Ruben Martinez-Cantin <rmcantin@unizar.es>
#
# BayesOpt is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BayesOpt is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with BayesOpt. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------------------
import bayesopt
from bayesoptmodule import BayesOptContinuous
import numpy as np
from time import clock
# Python3 compat
if sys.version_info[0] == 3:
raw_input = input
# Function for testing.
def testfunc(Xin):
total = 5.0
for value in Xin:
total = total + (value -0.33)*(value-0.33)
return total
# Class for OO testing.
class BayesOptTest(BayesOptContinuous):
def evaluateSample(self,Xin):
return testfunc(Xin)
# Let's define the parameters
# For different options: see parameters.h and cpp
# If a parameter is not define, it will be automatically set
# to a default value.
params = {}
params['n_iterations'] = 50
params['n_iter_relearn'] = 5
params['n_init_samples'] = 2
print("Callback implementation")
n = 5 # n dimensions
lb = np.zeros((n,))
ub = np.ones((n,))
start = clock()
mvalue, x_out, error = bayesopt.optimize(testfunc, n, lb, ub, params)
print("Result", mvalue, "at", x_out)
print("Running time:", clock() - start, "seconds")
raw_input('Press INTRO to continue')
print("OO implementation")
bo_test = BayesOptTest(n)
bo_test.parameters = params
bo_test.lower_bound = lb
bo_test.upper_bound = ub
start = clock()
mvalue, x_out, error = bo_test.optimize()
print("Result", mvalue, "at", x_out)
print("Running time:", clock() - start, "seconds")
raw_input('Press INTRO to continue')
print("Callback discrete implementation")
x_set = np.random.rand(100,n)
start = clock()
mvalue, x_out, error = bayesopt.optimize_discrete(testfunc, x_set, params)
print("Result", mvalue, "at", x_out)
print("Running time:", clock() - start, "seconds")
value = np.array([testfunc(i) for i in x_set])
print("Optimum", value.min(), "at", x_set[value.argmin()])
| 29.804348
| 77
| 0.667031
|
f4de3f19c87be81953805a4b77da4a4bf0195399
| 2,611
|
py
|
Python
|
korbit/public_api.py
|
HoonJin/korbit-python
|
496131d1320361e8ddf4e60ec8d7269ca8451afe
|
[
"MIT"
] | 24
|
2016-03-07T06:01:33.000Z
|
2022-03-19T06:18:43.000Z
|
korbit/public_api.py
|
HoonJin/korbit-python
|
496131d1320361e8ddf4e60ec8d7269ca8451afe
|
[
"MIT"
] | 1
|
2018-01-05T08:48:37.000Z
|
2018-01-18T05:36:10.000Z
|
korbit/public_api.py
|
HoonJin/korbit-python
|
496131d1320361e8ddf4e60ec8d7269ca8451afe
|
[
"MIT"
] | 13
|
2017-07-27T15:44:21.000Z
|
2020-03-12T19:08:44.000Z
|
# -*- coding: utf-8 -*-
import requests
import json
import logging
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class PublicAPI:
def __init__(self, production=True, version="v1", timeout=20):
self.__host = production and "https://api.korbit.co.kr/%s/" % version \
or "https://api.korbit-test.com/%s/" % version
self.__timeout = timeout
# https://apidocs.korbit.co.kr/#public
def ticker(self, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair
}
return self.request_get("ticker", params=params)
def detailed_ticker(self, currency_pair="btc_krw"):
params = {
'currency_pair': currency_pair
}
return self.request_get("ticker/detailed", params=params)
def all_detailed_ticker(self):
return self.request_get("ticker/detailed/all")
def orderbook(self, currency_pair="btc_krw", category="all", group=True):
params = {
'group': group,
'category': category,
'currency_pair': currency_pair
}
return self.request_get("orderbook", params=params)
def bids_orderbook(self, currency_pair="btc_krw", group=True):
return self.orderbook(currency_pair=currency_pair, category="bid", group=group)
def asks_orderbook(self, currency_pair="btc_krw", group=True):
return self.orderbook(currency_pair=currency_pair, category="ask", group=group)
def list_of_filled_orders(self, currency_pair="btc_krw", interval="hour"):
params = {
'time': interval,
'currency_pair': currency_pair
}
return self.request_get("transactions", params=params)
def request_get(self, path, headers=None, params=None):
response = requests.get(urljoin(self.host, path), headers=headers, params=params, timeout=self.__timeout)
try:
return response.json()
except json.decoder.JSONDecodeError as e:
logging.error("exception: {}, response_text: {}".format(e, response.text))
return response.text
def request_post(self, path, headers=None, data=None):
response = requests.post(urljoin(self.host, path), headers=headers, data=data, timeout=self.__timeout)
try:
return response.json()
except json.decoder.JSONDecodeError as e:
logging.error("exception: {}, response_text: {}".format(e, response.text))
return response.text
@property
def host(self):
return self.__host
| 35.283784
| 113
| 0.641134
|
7e4dc6f661b51bffd677607379a0f7dff4a9e51f
| 7,054
|
py
|
Python
|
tests/integration/test_specfile.py
|
packit/specfile
|
66608618fc044c3dd3cd73353f75f5943d5681b0
|
[
"MIT"
] | 2
|
2022-02-16T09:37:45.000Z
|
2022-02-19T10:33:19.000Z
|
tests/integration/test_specfile.py
|
packit/specfile
|
66608618fc044c3dd3cd73353f75f5943d5681b0
|
[
"MIT"
] | 37
|
2021-11-23T12:21:06.000Z
|
2022-03-31T11:20:34.000Z
|
tests/integration/test_specfile.py
|
packit/specfile
|
66608618fc044c3dd3cd73353f75f5943d5681b0
|
[
"MIT"
] | 2
|
2022-01-11T15:04:55.000Z
|
2022-03-22T10:11:32.000Z
|
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import datetime
import subprocess
import pytest
import rpm
from flexmock import flexmock
from specfile.exceptions import SpecfileException
from specfile.prep import AutopatchMacro, AutosetupMacro, PatchMacro, SetupMacro
from specfile.sections import Section
from specfile.specfile import Specfile
def test_parse(spec_multiple_sources):
spec = Specfile(spec_multiple_sources)
prep = spec._spec.prep
# remove all sources
for path in spec.sourcedir.iterdir():
if not path.samefile(spec.path):
path.unlink()
spec = Specfile(spec_multiple_sources)
assert spec._spec.prep == prep
def test_prep_traditional(spec_traditional):
spec = Specfile(spec_traditional)
with spec.prep() as prep:
assert AutosetupMacro not in prep.macros
assert AutopatchMacro not in prep.macros
assert isinstance(prep.macros[0], SetupMacro)
for i, m in enumerate(prep.macros[1:]):
assert isinstance(m, PatchMacro)
assert m.index == i
assert m.options.p == 1
prep.remove_patch_macro(0)
assert len([m for m in prep.macros if isinstance(m, PatchMacro)]) == 2
prep.add_patch_macro(0, p=2, b=".test")
assert len(prep.macros) == 4
with spec.sections() as sections:
assert sections.prep[1] == "%patch0 -p2 -b .test"
def test_prep_autosetup(spec_autosetup):
spec = Specfile(spec_autosetup)
with spec.prep() as prep:
assert len(prep.macros) == 1
assert AutosetupMacro in prep.macros
assert SetupMacro not in prep.macros
assert PatchMacro not in prep.macros
assert prep.macros[0].options.p == 1
def test_sources(spec_minimal):
spec = Specfile(spec_minimal)
source = "test.tar.gz"
with spec.sources() as sources:
assert not sources
sources.append(source)
assert sources.count(source) == len(sources) == 1
with spec.tags() as tags:
assert [source] == [t.value for t in tags if t.name.startswith("Source")]
with spec.sources() as sources:
sources.remove(source)
assert not sources
sources.insert(0, source)
assert sources[0].location == source
sources.clear()
assert not sources
def test_patches(spec_patchlist):
spec = Specfile(spec_patchlist)
patch = "test.patch"
with spec.patches() as patches:
patches.insert(0, patch)
assert patches[0].location == patch
assert patches[1].number == 1
with spec.tags() as tags:
assert len([t for t in tags if t.name.startswith("Patch")]) == 2
with spec.patches() as patches:
patches.remove(patch)
patches.insert(1, patch)
patches[1].comments.append("test")
with spec.sections() as sections:
assert len([sl for sl in sections.patchlist if sl]) == 4
assert sections.patchlist[0] == "# test"
@pytest.mark.parametrize(
"rpmdev_packager_available, entry, author, email, timestamp, result",
[
(False, None, None, None, None, None),
(
True,
"test",
None,
None,
datetime.date(2022, 2, 1),
Section(
"changelog",
["* Tue Feb 01 2022 John Doe <john@doe.net> - 0.1-1", "test"],
),
),
(
True,
"test",
"Bill Packager",
None,
datetime.date(2022, 2, 1),
Section("changelog", ["* Tue Feb 01 2022 Bill Packager - 0.1-1", "test"]),
),
(
True,
"test",
"Bill Packager",
"bill@packager.net",
datetime.date(2022, 2, 1),
Section(
"changelog",
["* Tue Feb 01 2022 Bill Packager <bill@packager.net> - 0.1-1", "test"],
),
),
(
True,
"test",
"Bill Packager",
"bill@packager.net",
datetime.datetime(2022, 2, 1, 9, 28, 13),
Section(
"changelog",
[
"* Tue Feb 01 09:28:13 UTC 2022 Bill Packager <bill@packager.net> - 0.1-1",
"test",
],
),
),
(
True,
["line 1", "line 2"],
"Bill Packager",
"bill@packager.net",
datetime.datetime(2022, 2, 1, 9, 28, 13),
Section(
"changelog",
[
"* Tue Feb 01 09:28:13 UTC 2022 Bill Packager <bill@packager.net> - 0.1-1",
"line 1",
"line 2",
],
),
),
],
)
def test_add_changelog_entry(
spec_minimal, rpmdev_packager_available, entry, author, email, timestamp, result
):
if not rpmdev_packager_available:
flexmock(subprocess).should_receive("check_output").with_args(
"rpmdev-packager"
).and_raise(FileNotFoundError)
elif author is None:
flexmock(subprocess).should_receive("check_output").with_args(
"rpmdev-packager"
).and_return(b"John Doe <john@doe.net>")
spec = Specfile(spec_minimal)
if not rpmdev_packager_available:
with pytest.raises(SpecfileException):
spec.add_changelog_entry(entry, author, email, timestamp)
else:
spec.add_changelog_entry(entry, author, email, timestamp)
with spec.sections() as sections:
assert sections.changelog[: len(result)] == result
@pytest.mark.parametrize(
"version, release",
[
("0.2", "3"),
("67", "1"),
("1.4.6", "0.1rc5"),
],
)
def test_set_version_and_release(spec_minimal, version, release):
spec = Specfile(spec_minimal)
spec.set_version_and_release(version, release)
assert spec.version == version
assert spec.release == release
assert spec.raw_release.startswith(release)
with spec.tags() as tags:
assert tags.version.value == spec.version
assert tags.release.value == spec.raw_release
assert spec._spec.sourceHeader[rpm.RPMTAG_VERSION] == spec.expanded_version
assert spec._spec.sourceHeader[rpm.RPMTAG_RELEASE] == spec.expanded_raw_release
spec.raw_release = release
with spec.tags() as tags:
assert tags.release.value == release
assert spec._spec.sourceHeader[rpm.RPMTAG_RELEASE] == spec.expanded_raw_release
@pytest.mark.skipif(
rpm.__version__ < "4.16", reason="%autochangelog requires rpm 4.16 or higher"
)
def test_autochangelog(spec_rpmautospec):
spec = Specfile(spec_rpmautospec)
assert spec.has_autochangelog
with spec.changelog() as changelog:
assert len(changelog) == 0
with spec.sections() as sections:
changelog = sections.changelog.copy()
spec.add_changelog_entry("test")
with spec.sections() as sections:
assert sections.changelog == changelog
| 32.657407
| 95
| 0.595407
|
bdc8e37e0b1dae37410ec590cd90de4aac00570d
| 37,058
|
py
|
Python
|
sdks/python/apache_beam/transforms/trigger_test.py
|
dexterchan/beam
|
01e500c2dd0d699aea0434154b69fd59d824700f
|
[
"Apache-2.0"
] | 1
|
2020-01-08T19:17:06.000Z
|
2020-01-08T19:17:06.000Z
|
sdks/python/apache_beam/transforms/trigger_test.py
|
dexterchan/beam
|
01e500c2dd0d699aea0434154b69fd59d824700f
|
[
"Apache-2.0"
] | 1
|
2019-09-16T06:43:55.000Z
|
2019-09-16T06:43:55.000Z
|
sdks/python/apache_beam/transforms/trigger_test.py
|
dexterchan/beam
|
01e500c2dd0d699aea0434154b69fd59d824700f
|
[
"Apache-2.0"
] | 1
|
2020-02-09T02:51:50.000Z
|
2020-02-09T02:51:50.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the triggering classes."""
from __future__ import absolute_import
import collections
import json
import os.path
import pickle
import random
import unittest
from builtins import range
from builtins import zip
# patches unittest.TestCase to be python3 compatible
import future.tests.base # pylint: disable=unused-import
import yaml
import apache_beam as beam
from apache_beam import coders
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.portability import common_urns
from apache_beam.runners import pipeline_context
from apache_beam.runners.direct.clock import TestClock
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms import ptransform
from apache_beam.transforms import trigger
from apache_beam.transforms.core import Windowing
from apache_beam.transforms.trigger import AccumulationMode
from apache_beam.transforms.trigger import AfterAll
from apache_beam.transforms.trigger import AfterAny
from apache_beam.transforms.trigger import AfterCount
from apache_beam.transforms.trigger import AfterEach
from apache_beam.transforms.trigger import AfterProcessingTime
from apache_beam.transforms.trigger import AfterWatermark
from apache_beam.transforms.trigger import DefaultTrigger
from apache_beam.transforms.trigger import GeneralTriggerDriver
from apache_beam.transforms.trigger import InMemoryUnmergedState
from apache_beam.transforms.trigger import Repeatedly
from apache_beam.transforms.trigger import TriggerFn
from apache_beam.transforms.window import FixedWindows
from apache_beam.transforms.window import IntervalWindow
from apache_beam.transforms.window import Sessions
from apache_beam.transforms.window import TimestampCombiner
from apache_beam.transforms.window import TimestampedValue
from apache_beam.transforms.window import WindowedValue
from apache_beam.transforms.window import WindowFn
from apache_beam.utils.timestamp import MAX_TIMESTAMP
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.windowed_value import PaneInfoTiming
class CustomTimestampingFixedWindowsWindowFn(FixedWindows):
"""WindowFn for testing custom timestamping."""
def get_transformed_output_time(self, unused_window, input_timestamp):
return input_timestamp + 100
class TriggerTest(unittest.TestCase):
def run_trigger_simple(self, window_fn, trigger_fn, accumulation_mode,
timestamped_data, expected_panes, *groupings,
**kwargs):
# Groupings is a list of integers indicating the (uniform) size of bundles
# to try. For example, if timestamped_data has elements [a, b, c, d, e]
# then groupings=(5, 2) would first run the test with everything in the same
# bundle, and then re-run the test with bundling [a, b], [c, d], [e].
# A negative value will reverse the order, e.g. -2 would result in bundles
# [e, d], [c, b], [a]. This is useful for deterministic triggers in testing
# that the output is not a function of ordering or bundling.
# If empty, defaults to bundles of size 1 in the given order.
late_data = kwargs.pop('late_data', [])
assert not kwargs
def bundle_data(data, size):
if size < 0:
data = list(data)[::-1]
size = -size
bundle = []
for timestamp, elem in data:
windows = window_fn.assign(WindowFn.AssignContext(timestamp, elem))
bundle.append(WindowedValue(elem, timestamp, windows))
if len(bundle) == size:
yield bundle
bundle = []
if bundle:
yield bundle
if not groupings:
groupings = [1]
for group_by in groupings:
self.run_trigger(window_fn, trigger_fn, accumulation_mode,
bundle_data(timestamped_data, group_by),
bundle_data(late_data, group_by),
expected_panes)
def run_trigger(self, window_fn, trigger_fn, accumulation_mode,
bundles, late_bundles,
expected_panes):
actual_panes = collections.defaultdict(list)
allowed_lateness = Duration(micros=int(
common_urns.constants.MAX_TIMESTAMP_MILLIS.constant)*1000)
driver = GeneralTriggerDriver(
Windowing(window_fn, trigger_fn, accumulation_mode,
allowed_lateness=allowed_lateness), TestClock())
state = InMemoryUnmergedState()
for bundle in bundles:
for wvalue in driver.process_elements(state, bundle, MIN_TIMESTAMP,
MIN_TIMESTAMP):
window, = wvalue.windows
self.assertEqual(window.max_timestamp(), wvalue.timestamp)
actual_panes[window].append(set(wvalue.value))
while state.timers:
for timer_window, (name, time_domain, timestamp) in (
state.get_and_clear_timers()):
for wvalue in driver.process_timer(
timer_window, name, time_domain, timestamp, state, MIN_TIMESTAMP):
window, = wvalue.windows
self.assertEqual(window.max_timestamp(), wvalue.timestamp)
actual_panes[window].append(set(wvalue.value))
for bundle in late_bundles:
for wvalue in driver.process_elements(state, bundle, MAX_TIMESTAMP,
MAX_TIMESTAMP):
window, = wvalue.windows
self.assertEqual(window.max_timestamp(), wvalue.timestamp)
actual_panes[window].append(set(wvalue.value))
while state.timers:
for timer_window, (name, time_domain, timestamp) in (
state.get_and_clear_timers()):
for wvalue in driver.process_timer(
timer_window, name, time_domain, timestamp, state, MAX_TIMESTAMP):
window, = wvalue.windows
self.assertEqual(window.max_timestamp(), wvalue.timestamp)
actual_panes[window].append(set(wvalue.value))
self.assertEqual(expected_panes, actual_panes)
def test_fixed_watermark(self):
self.run_trigger_simple(
FixedWindows(10), # pyformat break
AfterWatermark(),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (13, 'c')],
{IntervalWindow(0, 10): [set('ab')],
IntervalWindow(10, 20): [set('c')]},
1,
2,
3,
-3,
-2,
-1)
def test_fixed_watermark_with_early(self):
self.run_trigger_simple(
FixedWindows(10), # pyformat break
AfterWatermark(early=AfterCount(2)),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c')],
{IntervalWindow(0, 10): [set('ab'), set('abc')]},
2)
self.run_trigger_simple(
FixedWindows(10), # pyformat break
AfterWatermark(early=AfterCount(2)),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c')],
{IntervalWindow(0, 10): [set('abc'), set('abc')]},
3)
def test_fixed_watermark_with_early_late(self):
self.run_trigger_simple(
FixedWindows(100), # pyformat break
AfterWatermark(early=AfterCount(3),
late=AfterCount(2)),
AccumulationMode.DISCARDING,
zip(range(9), 'abcdefghi'),
{IntervalWindow(0, 100): [
set('abcd'), set('efgh'), # early
set('i'), # on time
set('vw'), set('xy') # late
]},
2,
late_data=zip(range(5), 'vwxyz'))
def test_sessions_watermark_with_early_late(self):
self.run_trigger_simple(
Sessions(10), # pyformat break
AfterWatermark(early=AfterCount(2),
late=AfterCount(1)),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (15, 'b'), (7, 'c'), (30, 'd')],
{
IntervalWindow(1, 25): [
set('abc'), # early
set('abc'), # on time
set('abcxy') # late
],
IntervalWindow(30, 40): [
set('d'), # on time
],
IntervalWindow(1, 40): [
set('abcdxyz') # late
],
},
2,
late_data=[(1, 'x'), (2, 'y'), (21, 'z')])
def test_fixed_after_count(self):
self.run_trigger_simple(
FixedWindows(10), # pyformat break
AfterCount(2),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c'), (11, 'z')],
{IntervalWindow(0, 10): [set('ab')]},
1,
2)
self.run_trigger_simple(
FixedWindows(10), # pyformat break
AfterCount(2),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c'), (11, 'z')],
{IntervalWindow(0, 10): [set('abc')]},
3,
4)
def test_fixed_after_first(self):
self.run_trigger_simple(
FixedWindows(10), # pyformat break
AfterAny(AfterCount(2), AfterWatermark()),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c')],
{IntervalWindow(0, 10): [set('ab')]},
1,
2)
self.run_trigger_simple(
FixedWindows(10), # pyformat break
AfterAny(AfterCount(5), AfterWatermark()),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c')],
{IntervalWindow(0, 10): [set('abc')]},
1,
2,
late_data=[(1, 'x'), (2, 'y'), (3, 'z')])
def test_repeatedly_after_first(self):
self.run_trigger_simple(
FixedWindows(100), # pyformat break
Repeatedly(AfterAny(AfterCount(3), AfterWatermark())),
AccumulationMode.ACCUMULATING,
zip(range(7), 'abcdefg'),
{IntervalWindow(0, 100): [
set('abc'),
set('abcdef'),
set('abcdefg'),
set('abcdefgx'),
set('abcdefgxy'),
set('abcdefgxyz')]},
1,
late_data=zip(range(3), 'xyz'))
def test_sessions_after_all(self):
self.run_trigger_simple(
Sessions(10), # pyformat break
AfterAll(AfterCount(2), AfterWatermark()),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c')],
{IntervalWindow(1, 13): [set('abc')]},
1,
2)
self.run_trigger_simple(
Sessions(10), # pyformat break
AfterAll(AfterCount(5), AfterWatermark()),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (3, 'c')],
{IntervalWindow(1, 13): [set('abcxy')]},
1,
2,
late_data=[(1, 'x'), (2, 'y'), (3, 'z')])
def test_sessions_default(self):
self.run_trigger_simple(
Sessions(10), # pyformat break
DefaultTrigger(),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b')],
{IntervalWindow(1, 12): [set('ab')]},
1,
2,
-2,
-1)
self.run_trigger_simple(
Sessions(10), # pyformat break
AfterWatermark(),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b'), (15, 'c'), (16, 'd'), (30, 'z'), (9, 'e'),
(10, 'f'), (30, 'y')],
{IntervalWindow(1, 26): [set('abcdef')],
IntervalWindow(30, 40): [set('yz')]},
1,
2,
3,
4,
5,
6,
-4,
-2,
-1)
def test_sessions_watermark(self):
self.run_trigger_simple(
Sessions(10), # pyformat break
AfterWatermark(),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (2, 'b')],
{IntervalWindow(1, 12): [set('ab')]},
1,
2,
-2,
-1)
def test_sessions_after_count(self):
self.run_trigger_simple(
Sessions(10), # pyformat break
AfterCount(2),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (15, 'b'), (6, 'c'), (30, 's'), (31, 't'), (50, 'z'),
(50, 'y')],
{IntervalWindow(1, 25): [set('abc')],
IntervalWindow(30, 41): [set('st')],
IntervalWindow(50, 60): [set('yz')]},
1,
2,
3)
def test_sessions_repeatedly_after_count(self):
self.run_trigger_simple(
Sessions(10), # pyformat break
Repeatedly(AfterCount(2)),
AccumulationMode.ACCUMULATING,
[(1, 'a'), (15, 'b'), (6, 'c'), (2, 'd'), (7, 'e')],
{IntervalWindow(1, 25): [set('abc'), set('abcde')]},
1,
3)
self.run_trigger_simple(
Sessions(10), # pyformat break
Repeatedly(AfterCount(2)),
AccumulationMode.DISCARDING,
[(1, 'a'), (15, 'b'), (6, 'c'), (2, 'd'), (7, 'e')],
{IntervalWindow(1, 25): [set('abc'), set('de')]},
1,
3)
def test_sessions_after_each(self):
self.run_trigger_simple(
Sessions(10), # pyformat break
AfterEach(AfterCount(2), AfterCount(3)),
AccumulationMode.ACCUMULATING,
zip(range(10), 'abcdefghij'),
{IntervalWindow(0, 11): [set('ab')],
IntervalWindow(0, 15): [set('abcdef')]},
2)
self.run_trigger_simple(
Sessions(10), # pyformat break
Repeatedly(AfterEach(AfterCount(2), AfterCount(3))),
AccumulationMode.ACCUMULATING,
zip(range(10), 'abcdefghij'),
{IntervalWindow(0, 11): [set('ab')],
IntervalWindow(0, 15): [set('abcdef')],
IntervalWindow(0, 17): [set('abcdefgh')]},
2)
def test_picklable_output(self):
global_window = (trigger.GlobalWindow(),)
driver = trigger.BatchGlobalTriggerDriver()
unpicklable = (WindowedValue(k, 0, global_window)
for k in range(10))
with self.assertRaises(TypeError):
pickle.dumps(unpicklable)
for unwindowed in driver.process_elements(None, unpicklable, None, None):
self.assertEqual(pickle.loads(pickle.dumps(unwindowed)).value,
list(range(10)))
class RunnerApiTest(unittest.TestCase):
def test_trigger_encoding(self):
for trigger_fn in (
DefaultTrigger(),
AfterAll(AfterCount(1), AfterCount(10)),
AfterAny(AfterCount(10), AfterCount(100)),
AfterWatermark(early=AfterCount(1000)),
AfterWatermark(early=AfterCount(1000), late=AfterCount(1)),
Repeatedly(AfterCount(100)),
trigger.OrFinally(AfterCount(3), AfterCount(10))):
context = pipeline_context.PipelineContext()
self.assertEqual(
trigger_fn,
TriggerFn.from_runner_api(trigger_fn.to_runner_api(context), context))
class TriggerPipelineTest(unittest.TestCase):
def test_after_count(self):
with TestPipeline() as p:
def construct_timestamped(k_t):
return TimestampedValue((k_t[0], k_t[1]), k_t[1])
def format_result(k_v):
return ('%s-%s' % (k_v[0], len(k_v[1])), set(k_v[1]))
result = (p
| beam.Create([1, 2, 3, 4, 5, 10, 11])
| beam.FlatMap(lambda t: [('A', t), ('B', t + 5)])
| beam.Map(construct_timestamped)
| beam.WindowInto(FixedWindows(10), trigger=AfterCount(3),
accumulation_mode=AccumulationMode.DISCARDING)
| beam.GroupByKey()
| beam.Map(format_result))
assert_that(result, equal_to(
list(
{
'A-5': {1, 2, 3, 4, 5},
# A-10, A-11 never emitted due to AfterCount(3) never firing.
'B-4': {6, 7, 8, 9},
'B-3': {10, 15, 16},
}.items()
)))
def test_multiple_accumulating_firings(self):
# PCollection will contain elements from 1 to 10.
elements = [i for i in range(1, 11)]
ts = TestStream().advance_watermark_to(0)
for i in elements:
ts.add_elements([('key', str(i))])
if i % 5 == 0:
ts.advance_watermark_to(i)
ts.advance_processing_time(5)
ts.advance_watermark_to_infinity()
options = PipelineOptions()
options.view_as(StandardOptions).streaming = True
with TestPipeline(options=options) as p:
records = (p
| ts
| beam.WindowInto(
FixedWindows(10),
accumulation_mode=trigger.AccumulationMode.ACCUMULATING,
trigger=AfterWatermark(
early=AfterAll(
AfterCount(1), AfterProcessingTime(5))
))
| beam.GroupByKey()
| beam.FlatMap(lambda x: x[1]))
# The trigger should fire twice. Once after 5 seconds, and once after 10.
# The firings should accumulate the output.
first_firing = [str(i) for i in elements if i <= 5]
second_firing = [str(i) for i in elements]
assert_that(records, equal_to(first_firing + second_firing))
class TranscriptTest(unittest.TestCase):
# We must prepend an underscore to this name so that the open-source unittest
# runner does not execute this method directly as a test.
@classmethod
def _create_test(cls, spec):
counter = 0
name = spec.get('name', 'unnamed')
unique_name = 'test_' + name
while hasattr(cls, unique_name):
counter += 1
unique_name = 'test_%s_%d' % (name, counter)
test_method = lambda self: self._run_log_test(spec)
test_method.__name__ = unique_name
test_method.__test__ = True
setattr(cls, unique_name, test_method)
# We must prepend an underscore to this name so that the open-source unittest
# runner does not execute this method directly as a test.
@classmethod
def _create_tests(cls, transcript_filename):
for spec in yaml.load_all(open(transcript_filename)):
cls._create_test(spec)
def _run_log_test(self, spec):
if 'error' in spec:
self.assertRaisesRegex(
Exception, spec['error'], self._run_log, spec)
else:
self._run_log(spec)
def _run_log(self, spec):
def parse_int_list(s):
"""Parses strings like '[1, 2, 3]'."""
s = s.strip()
assert s[0] == '[' and s[-1] == ']', s
if not s[1:-1].strip():
return []
return [int(x) for x in s[1:-1].split(',')]
def split_args(s):
"""Splits 'a, b, [c, d]' into ['a', 'b', '[c, d]']."""
args = []
start = 0
depth = 0
for ix in range(len(s)):
c = s[ix]
if c in '({[':
depth += 1
elif c in ')}]':
depth -= 1
elif c == ',' and depth == 0:
args.append(s[start:ix].strip())
start = ix + 1
assert depth == 0, s
args.append(s[start:].strip())
return args
def parse(s, names):
"""Parse (recursive) 'Foo(arg, kw=arg)' for Foo in the names dict."""
s = s.strip()
if s in names:
return names[s]
elif s[0] == '[':
return parse_int_list(s)
elif '(' in s:
assert s[-1] == ')', s
callee = parse(s[:s.index('(')], names)
posargs = []
kwargs = {}
for arg in split_args(s[s.index('(') + 1:-1]):
if '=' in arg:
kw, value = arg.split('=', 1)
kwargs[kw] = parse(value, names)
else:
posargs.append(parse(arg, names))
return callee(*posargs, **kwargs)
else:
try:
return int(s)
except ValueError:
raise ValueError('Unknown function: %s' % s)
def parse_fn(s, names):
"""Like parse(), but implicitly calls no-arg constructors."""
fn = parse(s, names)
if isinstance(fn, type):
return fn()
return fn
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.transforms import window as window_module
# pylint: enable=wrong-import-order, wrong-import-position
window_fn_names = dict(window_module.__dict__)
window_fn_names.update({'CustomTimestampingFixedWindowsWindowFn':
CustomTimestampingFixedWindowsWindowFn})
trigger_names = {'Default': DefaultTrigger}
trigger_names.update(trigger.__dict__)
window_fn = parse_fn(spec.get('window_fn', 'GlobalWindows'),
window_fn_names)
trigger_fn = parse_fn(spec.get('trigger_fn', 'Default'), trigger_names)
accumulation_mode = getattr(
AccumulationMode, spec.get('accumulation_mode', 'ACCUMULATING').upper())
timestamp_combiner = getattr(
TimestampCombiner,
spec.get('timestamp_combiner', 'OUTPUT_AT_EOW').upper())
allowed_lateness = spec.get('allowed_lateness', 0.000)
def only_element(xs):
x, = list(xs)
return x
transcript = [only_element(line.items()) for line in spec['transcript']]
self._execute(
window_fn, trigger_fn, accumulation_mode, timestamp_combiner,
allowed_lateness, transcript, spec)
def _windowed_value_info(windowed_value):
# Currently some runners operate at the millisecond level, and some at the
# microsecond level. Trigger transcript timestamps are expressed as
# integral units of the finest granularity, whatever that may be.
# In these tests we interpret them as integral seconds and then truncate
# the results to integral seconds to allow for portability across
# different sub-second resolutions.
window, = windowed_value.windows
return {
'window': [int(window.start), int(window.max_timestamp())],
'values': sorted(windowed_value.value),
'timestamp': int(windowed_value.timestamp),
'index': windowed_value.pane_info.index,
'nonspeculative_index': windowed_value.pane_info.nonspeculative_index,
'early': windowed_value.pane_info.timing == PaneInfoTiming.EARLY,
'late': windowed_value.pane_info.timing == PaneInfoTiming.LATE,
'final': windowed_value.pane_info.is_last,
}
def _windowed_value_info_map_fn(
k, vs,
window=beam.DoFn.WindowParam,
t=beam.DoFn.TimestampParam,
p=beam.DoFn.PaneInfoParam):
return (
k,
_windowed_value_info(WindowedValue(
vs, windows=[window], timestamp=t, pane_info=p)))
def _windowed_value_info_check(actual, expected, key=None):
key_string = ' for %s' % key if key else ''
def format(panes):
return '\n[%s]\n' % '\n '.join(str(pane) for pane in sorted(
panes, key=lambda pane: pane.get('timestamp', None)))
if len(actual) > len(expected):
raise AssertionError(
'Unexpected output%s: expected %s but got %s' % (
key_string, format(expected), format(actual)))
elif len(expected) > len(actual):
raise AssertionError(
'Unmatched output%s: expected %s but got %s' % (
key_string, format(expected), format(actual)))
else:
def diff(actual, expected):
for key in sorted(expected.keys(), reverse=True):
if key in actual:
if actual[key] != expected[key]:
return key
for output in actual:
diffs = [diff(output, pane) for pane in expected]
if all(diffs):
raise AssertionError(
'Unmatched output%s: %s not found in %s (diffs in %s)' % (
key_string, output, format(expected), diffs))
class _ConcatCombineFn(beam.CombineFn):
create_accumulator = lambda self: []
add_input = lambda self, acc, element: acc.append(element) or acc
merge_accumulators = lambda self, accs: sum(accs, [])
extract_output = lambda self, acc: acc
class TriggerDriverTranscriptTest(TranscriptTest):
def _execute(
self, window_fn, trigger_fn, accumulation_mode, timestamp_combiner,
allowed_lateness, transcript, unused_spec):
driver = GeneralTriggerDriver(
Windowing(window_fn, trigger_fn, accumulation_mode,
timestamp_combiner, allowed_lateness), TestClock())
state = InMemoryUnmergedState()
output = []
watermark = MIN_TIMESTAMP
def fire_timers():
to_fire = state.get_and_clear_timers(watermark)
while to_fire:
for timer_window, (name, time_domain, t_timestamp) in to_fire:
for wvalue in driver.process_timer(
timer_window, name, time_domain, t_timestamp, state):
output.append(_windowed_value_info(wvalue))
to_fire = state.get_and_clear_timers(watermark)
for action, params in transcript:
if action != 'expect':
# Fail if we have output that was not expected in the transcript.
self.assertEqual(
[], output, msg='Unexpected output: %s before %s: %s' % (
output, action, params))
if action == 'input':
bundle = [
WindowedValue(t, t, window_fn.assign(WindowFn.AssignContext(t, t)))
for t in params]
output = [
_windowed_value_info(wv)
for wv in driver.process_elements(state, bundle, watermark,
watermark)]
fire_timers()
elif action == 'watermark':
watermark = params
fire_timers()
elif action == 'expect':
for expected_output in params:
for candidate in output:
if all(candidate[k] == expected_output[k]
for k in candidate if k in expected_output):
output.remove(candidate)
break
else:
self.fail('Unmatched output %s in %s' % (expected_output, output))
elif action == 'state':
# TODO(robertwb): Implement once we support allowed lateness.
pass
else:
self.fail('Unknown action: ' + action)
# Fail if we have output that was not expected in the transcript.
self.assertEqual([], output, msg='Unexpected output: %s' % output)
class BaseTestStreamTranscriptTest(TranscriptTest):
"""A suite of TestStream-based tests based on trigger transcript entries.
"""
def _execute(
self, window_fn, trigger_fn, accumulation_mode, timestamp_combiner,
allowed_lateness, transcript, spec):
runner_name = TestPipeline().runner.__class__.__name__
if runner_name in spec.get('broken_on', ()):
self.skipTest('Known to be broken on %s' % runner_name)
is_order_agnostic = (
isinstance(trigger_fn, DefaultTrigger)
and accumulation_mode == AccumulationMode.ACCUMULATING)
if is_order_agnostic:
reshuffle_seed = random.randrange(1 << 20)
keys = [
u'original', u'reversed', u'reshuffled(%s)' % reshuffle_seed,
u'one-element-bundles', u'one-element-bundles-reversed',
u'two-element-bundles']
else:
keys = [u'key1', u'key2']
# Elements are encoded as a json strings to allow other languages to
# decode elements while executing the test stream.
# TODO(BEAM-8600): Eliminate these gymnastics.
test_stream = TestStream(coder=coders.StrUtf8Coder()).with_output_types(str)
for action, params in transcript:
if action == 'expect':
test_stream.add_elements([json.dumps(('expect', params))])
else:
test_stream.add_elements([json.dumps(('expect', []))])
if action == 'input':
def keyed(key, values):
return [json.dumps(('input', (key, v))) for v in values]
if is_order_agnostic:
# Must match keys above.
test_stream.add_elements(keyed('original', params))
test_stream.add_elements(keyed('reversed', reversed(params)))
r = random.Random(reshuffle_seed)
reshuffled = list(params)
r.shuffle(reshuffled)
test_stream.add_elements(keyed(
'reshuffled(%s)' % reshuffle_seed, reshuffled))
for v in params:
test_stream.add_elements(keyed('one-element-bundles', [v]))
for v in reversed(params):
test_stream.add_elements(
keyed('one-element-bundles-reversed', [v]))
for ix in range(0, len(params), 2):
test_stream.add_elements(
keyed('two-element-bundles', params[ix:ix+2]))
else:
for key in keys:
test_stream.add_elements(keyed(key, params))
elif action == 'watermark':
test_stream.advance_watermark_to(params)
elif action == 'clock':
test_stream.advance_processing_time(params)
elif action == 'state':
pass # Requires inspection of implementation details.
else:
raise ValueError('Unexpected action: %s' % action)
test_stream.add_elements([json.dumps(('expect', []))])
test_stream.advance_watermark_to_infinity()
read_test_stream = test_stream | beam.Map(json.loads)
class Check(beam.DoFn):
"""A StatefulDoFn that verifies outputs are produced as expected.
This DoFn takes in two kinds of inputs, actual outputs and
expected outputs. When an actual output is received, it is buffered
into state, and when an expected output is received, this buffered
state is retrieved and compared against the expected value(s) to ensure
they match.
The key is ignored, but all items must be on the same key to share state.
"""
def __init__(self, allow_out_of_order=True):
# Some runners don't support cross-stage TestStream semantics.
self.allow_out_of_order = allow_out_of_order
def process(
self,
element,
seen=beam.DoFn.StateParam(
beam.transforms.userstate.BagStateSpec(
'seen',
beam.coders.FastPrimitivesCoder())),
expected=beam.DoFn.StateParam(
beam.transforms.userstate.BagStateSpec(
'expected',
beam.coders.FastPrimitivesCoder()))):
key, (action, data) = element
if self.allow_out_of_order:
if action == 'expect' and not list(seen.read()):
if data:
expected.add(data)
return
elif action == 'actual' and list(expected.read()):
seen.add(data)
all_data = list(seen.read())
all_expected = list(expected.read())
if len(all_data) == len(all_expected[0]):
expected.clear()
for expect in all_expected[1:]:
expected.add(expect)
action, data = 'expect', all_expected[0]
else:
return
if action == 'actual':
seen.add(data)
elif action == 'expect':
actual = list(seen.read())
seen.clear()
_windowed_value_info_check(actual, data, key)
else:
raise ValueError('Unexpected action: %s' % action)
@ptransform.ptransform_fn
def CheckAggregation(inputs_and_expected, aggregation):
# Split the test stream into a branch of to-be-processed elements, and
# a branch of expected results.
inputs, expected = (
inputs_and_expected
| beam.MapTuple(
lambda tag, value: beam.pvalue.TaggedOutput(tag, value),
).with_outputs('input', 'expect'))
# Process the inputs with the given windowing to produce actual outputs.
outputs = (
inputs
| beam.MapTuple(
lambda key, value: TimestampedValue((key, value), value))
| beam.WindowInto(
window_fn,
trigger=trigger_fn,
accumulation_mode=accumulation_mode,
timestamp_combiner=timestamp_combiner,
allowed_lateness=allowed_lateness)
| aggregation
| beam.MapTuple(_windowed_value_info_map_fn)
# Place outputs back into the global window to allow flattening
# and share a single state in Check.
| 'Global' >> beam.WindowInto(beam.transforms.window.GlobalWindows()))
# Feed both the expected and actual outputs to Check() for comparison.
tagged_expected = (
expected | beam.FlatMap(
lambda value: [(key, ('expect', value)) for key in keys]))
tagged_outputs = (
outputs | beam.MapTuple(lambda key, value: (key, ('actual', value))))
# pylint: disable=expression-not-assigned
([tagged_expected, tagged_outputs]
| beam.Flatten()
| beam.ParDo(Check(self.allow_out_of_order)))
with TestPipeline() as p:
# TODO(BEAM-8601): Pass this during pipeline construction.
p.options.view_as(StandardOptions).streaming = True
# We can have at most one test stream per pipeline, so we share it.
inputs_and_expected = p | read_test_stream
_ = inputs_and_expected | CheckAggregation(beam.GroupByKey())
_ = inputs_and_expected | CheckAggregation(beam.CombinePerKey(
_ConcatCombineFn()))
class TestStreamTranscriptTest(BaseTestStreamTranscriptTest):
allow_out_of_order = False
class WeakTestStreamTranscriptTest(BaseTestStreamTranscriptTest):
allow_out_of_order = True
class BatchTranscriptTest(TranscriptTest):
def _execute(
self, window_fn, trigger_fn, accumulation_mode, timestamp_combiner,
allowed_lateness, transcript, spec):
if timestamp_combiner == TimestampCombiner.OUTPUT_AT_EARLIEST_TRANSFORMED:
self.skipTest(
'Non-fnapi timestamp combiner: %s' % spec.get('timestamp_combiner'))
if accumulation_mode != AccumulationMode.ACCUMULATING:
self.skipTest('Batch mode only makes sense for accumulating.')
watermark = MIN_TIMESTAMP
for action, params in transcript:
if action == 'watermark':
watermark = params
elif action == 'input':
if any(t <= watermark for t in params):
self.skipTest('Batch mode never has late data.')
inputs = sum([vs for action, vs in transcript if action == 'input'], [])
final_panes_by_window = {}
for action, params in transcript:
if action == 'expect':
for expected in params:
trimmed = {}
for field in ('window', 'values', 'timestamp'):
if field in expected:
trimmed[field] = expected[field]
final_panes_by_window[tuple(expected['window'])] = trimmed
final_panes = list(final_panes_by_window.values())
if window_fn.is_merging():
merged_away = set()
class MergeContext(WindowFn.MergeContext):
def merge(_, to_be_merged, merge_result):
for window in to_be_merged:
if window != merge_result:
merged_away.add(window)
all_windows = [IntervalWindow(*pane['window']) for pane in final_panes]
window_fn.merge(MergeContext(all_windows))
final_panes = [
pane for pane in final_panes
if IntervalWindow(*pane['window']) not in merged_away]
with TestPipeline() as p:
input_pc = (
p
| beam.Create(inputs)
| beam.Map(lambda t: TimestampedValue(('key', t), t))
| beam.WindowInto(
window_fn,
trigger=trigger_fn,
accumulation_mode=accumulation_mode,
timestamp_combiner=timestamp_combiner,
allowed_lateness=allowed_lateness))
grouped = input_pc | 'Grouped' >> (
beam.GroupByKey()
| beam.MapTuple(_windowed_value_info_map_fn)
| beam.MapTuple(lambda _, value: value))
combined = input_pc | 'Combined' >> (
beam.CombinePerKey(_ConcatCombineFn())
| beam.MapTuple(_windowed_value_info_map_fn)
| beam.MapTuple(lambda _, value: value))
assert_that(
grouped,
lambda actual: _windowed_value_info_check(actual, final_panes),
label='CheckGrouped')
assert_that(
combined,
lambda actual: _windowed_value_info_check(actual, final_panes),
label='CheckCombined')
TRANSCRIPT_TEST_FILE = os.path.join(
os.path.dirname(__file__), '..', 'testing', 'data',
'trigger_transcripts.yaml')
if os.path.exists(TRANSCRIPT_TEST_FILE):
TriggerDriverTranscriptTest._create_tests(TRANSCRIPT_TEST_FILE)
TestStreamTranscriptTest._create_tests(TRANSCRIPT_TEST_FILE)
WeakTestStreamTranscriptTest._create_tests(TRANSCRIPT_TEST_FILE)
BatchTranscriptTest._create_tests(TRANSCRIPT_TEST_FILE)
if __name__ == '__main__':
unittest.main()
| 36.40275
| 80
| 0.618571
|
b63d6ab206eb531809a47a6c96f010262e52afea
| 415
|
py
|
Python
|
chat/migrations/0007_chatroom_name.py
|
yccye/CT_AI_web
|
267553d3aaaef78f7dbdd652c0f1868ec60862c2
|
[
"MulanPSL-1.0"
] | 5
|
2021-05-25T07:53:36.000Z
|
2021-11-23T13:04:51.000Z
|
chat/migrations/0007_chatroom_name.py
|
yccye/CT_AI_web
|
267553d3aaaef78f7dbdd652c0f1868ec60862c2
|
[
"MulanPSL-1.0"
] | 1
|
2021-11-07T14:41:52.000Z
|
2021-11-07T15:34:28.000Z
|
chat/migrations/0007_chatroom_name.py
|
yccye/CT_AI_web
|
267553d3aaaef78f7dbdd652c0f1868ec60862c2
|
[
"MulanPSL-1.0"
] | 2
|
2021-11-07T13:29:13.000Z
|
2022-03-10T12:13:04.000Z
|
# Generated by Django 3.1.7 on 2021-05-28 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chat', '0006_auto_20210527_1934'),
]
operations = [
migrations.AddField(
model_name='chatroom',
name='name',
field=models.CharField(max_length=255, null=True, verbose_name='聊天室名称'),
),
]
| 21.842105
| 84
| 0.607229
|
d7875d47de5f28f764f045da9ee42995cbc757df
| 1,191
|
py
|
Python
|
main.py
|
avidale/alice-stretching
|
619c66ff14d5c2712b2fce82d84c05ebb8d3e545
|
[
"MIT"
] | 2
|
2021-01-09T13:28:45.000Z
|
2022-01-11T18:29:43.000Z
|
main.py
|
avidale/alice-stretching
|
619c66ff14d5c2712b2fce82d84c05ebb8d3e545
|
[
"MIT"
] | null | null | null |
main.py
|
avidale/alice-stretching
|
619c66ff14d5c2712b2fce82d84c05ebb8d3e545
|
[
"MIT"
] | 1
|
2021-01-14T10:15:26.000Z
|
2021-01-14T10:15:26.000Z
|
import logging
import os
import sentry_sdk
import tgalice
from tgalice.dialog import Context
from dm import StretchDM
logging.basicConfig(level=logging.DEBUG)
if os.getenv('SENTRY_DSN', None) is not None:
sentry_sdk.init(os.environ['SENTRY_DSN'])
class CustomLogger(tgalice.storage.message_logging.MongoMessageLogger):
def should_ignore_message(self, context: Context = None, **kwargs) -> bool:
if super(CustomLogger, self).should_ignore_message(context=context, **kwargs):
return True
if context.yandex and context.yandex.request and context.yandex.request.type == 'Show.Pull':
return True
return False
manager = StretchDM()
db = tgalice.message_logging.get_mongo_or_mock()
connector = tgalice.dialog_connector.DialogConnector(
dialog_manager=manager,
storage=tgalice.storage.session_storage.BaseStorage(),
log_storage=CustomLogger(collection=db.get_collection('message_logs'), detect_pings=True),
alice_native_state='user',
)
handler = connector.serverless_alice_handler
if __name__ == '__main__':
server = tgalice.server.flask_server.FlaskServer(connector=connector)
server.parse_args_and_run()
| 28.357143
| 100
| 0.758186
|
f8b79daea70836555a77fce75ef0fc923e967272
| 278
|
py
|
Python
|
torchplasma/linear/__init__.py
|
hdkai/Plasma
|
1942d7fe5f6b41c9a16c8e2d1b6c7cf263307c39
|
[
"Apache-2.0"
] | null | null | null |
torchplasma/linear/__init__.py
|
hdkai/Plasma
|
1942d7fe5f6b41c9a16c8e2d1b6c7cf263307c39
|
[
"Apache-2.0"
] | null | null | null |
torchplasma/linear/__init__.py
|
hdkai/Plasma
|
1942d7fe5f6b41c9a16c8e2d1b6c7cf263307c39
|
[
"Apache-2.0"
] | null | null | null |
#
# Plasma
# Copyright (c) 2021 Yusuf Olokoba.
#
from .chromaticity import chromatic_adaptation
from .pointwise import contrast, exposure, saturation, color_balance
from .selective import selective_color
from .spectral import clarity, highlights, shadows, sharpen, texture
| 30.888889
| 68
| 0.798561
|
7921d85b049e935aa1af15b11d8757ffadcc33d5
| 2,504
|
py
|
Python
|
modules/exercise_gui_shop/authentication.py
|
PetkoAndreev/Python-advanced
|
94fd0fa69b366b978921ee8d4e214d3d3b4335d6
|
[
"MIT"
] | null | null | null |
modules/exercise_gui_shop/authentication.py
|
PetkoAndreev/Python-advanced
|
94fd0fa69b366b978921ee8d4e214d3d3b4335d6
|
[
"MIT"
] | null | null | null |
modules/exercise_gui_shop/authentication.py
|
PetkoAndreev/Python-advanced
|
94fd0fa69b366b978921ee8d4e214d3d3b4335d6
|
[
"MIT"
] | null | null | null |
import json
from canvas import tk
from tkinter import Button, Entry, Label
from helpers import clean_screen
from products import render_products
def login(username, password):
with open('db/user_credentials_db.txt', 'r') as file:
lines = file.readlines()
for line in lines:
user, pas = line[:-1].split(', ')
if user == username and pas == password:
with open('db/current_user.txt', 'w') as current_user_file:
current_user_file.write(username)
render_products()
return
else:
render_login(errors=True)
def register(**user):
user.update({'products': []})
with open('db/users.txt', 'a') as file:
file.write(json.dumps(user))
file.write('\n')
with open('db/user_credentials_db.txt', 'a') as file:
file.write(f'{user.get("username")}, {user.get("password")}')
file.write('\n')
def render_register():
clean_screen()
Label(text='Enter your username:').grid(row=0, column=0)
username = Entry(tk)
username.grid(row=0, column=1)
Label(text='Enter your password:').grid(row=1, column=0)
password = Entry(tk, show='*')
password.grid(row=1, column=1)
Label(text='Enter your firstname:').grid(row=2, column=0)
firstname = Entry(tk)
firstname.grid(row=2, column=1)
Label(text='Enter your lastname:').grid(row=3, column=0)
lastname = Entry(tk)
lastname.grid(row=3, column=1)
Button(tk, text='Register', bg='green',
command=lambda: register(username=username.get(), password=password.get(), firstname=firstname.get(),
lastname=lastname.get())).grid(row=4, column=0)
def render_login(errors=None):
clean_screen()
Label(text='Enter your username:').grid(row=0, column=0)
username = Entry(tk)
username.grid(row=0, column=1)
Label(text='Enter your password:').grid(row=1, column=0)
password = Entry(tk, show='*')
password.grid(row=1, column=1)
Button(tk, text='Enter', bg='green', command=lambda: login(username=username.get(), password=password.get())).grid(
row=2, column=0)
if errors:
Label(text='Invalid username or password.').grid(row=3, column=0)
def render_main_enter_screen():
Button(tk, text='Login', bg='green', fg='white', command=render_login).grid(row=0, column=0)
Button(tk, text='Register', bg='yellow', command=render_register).grid(row=0, column=1)
| 35.771429
| 119
| 0.625399
|
e328418738bcded4e44bee0d41b21da2ea06947d
| 29,721
|
py
|
Python
|
src/NL_BERT/finetune.py
|
Anonym96/Negotiation_Learning
|
a819f2e688879c0afb3b17964f4fec9ab37161de
|
[
"MIT"
] | null | null | null |
src/NL_BERT/finetune.py
|
Anonym96/Negotiation_Learning
|
a819f2e688879c0afb3b17964f4fec9ab37161de
|
[
"MIT"
] | null | null | null |
src/NL_BERT/finetune.py
|
Anonym96/Negotiation_Learning
|
a819f2e688879c0afb3b17964f4fec9ab37161de
|
[
"MIT"
] | 1
|
2021-06-20T16:34:31.000Z
|
2021-06-20T16:34:31.000Z
|
"""
The main file used to train student and teacher models. Mainly based on [GitHub repository](https://github.com/intersun/PKD-for-BERT-Model-Compression) for [Patient Knowledge Distillation for BERT Model Compression](https://arxiv.org/abs/1908.09355).
"""
import logging
import os
import random
import pickle
import numpy as np
import torch
from torch.utils.data import RandomSampler, SequentialSampler
from tqdm import tqdm, trange
import torch.nn as nn
from BERT.pytorch_pretrained_bert.modeling import BertConfig
from BERT.pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from BERT.pytorch_pretrained_bert.tokenization import BertTokenizer
from BERT.pytorch_pretrained_bert.quantization_modules import calculate_next_quantization_parts
from utils.argument_parser import default_parser, get_predefine_argv, complete_argument
from utils.nli_data_processing import processors, output_modes
from utils.data_processing import init_model, get_task_dataloader
from utils.modeling import BertForSequenceClassificationEncoder, FCClassifierForSequenceClassification, FullFCClassifierForSequenceClassification
from utils.utils import load_model, count_parameters, eval_model_dataloader_nli, eval_model_dataloader, compute_metrics, load_model_finetune
from utils.KD_loss import distillation_loss, patience_loss
from envs import HOME_DATA_FOLDER
from BERT.pytorch_pretrained_bert.quantization_modules import quantization
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
#########################################################################
# Prepare Parser
##########################################################################
parser = default_parser()
DEBUG = True
args = parser.parse_args()
# The code might not be that clean in the current version.
train_seed_fixed = args.train_seed
saving_criterion_acc_fixed = args.saving_criterion_acc
saving_criterion_loss_fixed = args.saving_criterion_loss
train_batch_size_fixed = args.train_batch_size
eval_batch_size_fixed = args.eval_batch_size
model_type_fixed = args.model_type
save_model_dir_fixed = args.save_model_dir
output_dir_fixed = args.output_dir
load_model_dir_fixed = args.load_model_dir
layer_initialization_fixed = args.layer_initialization
freeze_layer_fixed = args.freeze_layer
fp16_fixed = args.fp16
learning_rate_fixed = args.learning_rate
teacher_prediction_fixed = args.teacher_prediction
num_train_epochs_fixed = args.num_train_epochs
#teacher_num = args.teacher_numb
task_name_fixed = args.task
if DEBUG:
logger.info("IN DEBUG MODE")
argv = get_predefine_argv(args, 'glue', args.task, args.train_type, args.student_hidden_layers)
try:
args = parser.parse_args(argv)
except NameError:
raise ValueError('please uncomment one of option above to start training')
else:
logger.info("IN CMD MODE")
args = parser.parse_args()
args.output_dir = output_dir_fixed
if load_model_dir_fixed is not None:
args.load_model_dir = load_model_dir_fixed
args = complete_argument(args, args.output_dir, args.load_model_dir)
if train_seed_fixed is not None:
args.train_seed = train_seed_fixed
if saving_criterion_acc_fixed is not None:
args.saving_criterion_acc = saving_criterion_acc_fixed
if saving_criterion_loss_fixed is not None:
args.saving_criterion_loss = saving_criterion_loss_fixed
if train_batch_size_fixed is not None:
args.train_batch_size = train_batch_size_fixed
if eval_batch_size_fixed is not None:
args.eval_batch_size = eval_batch_size_fixed
if save_model_dir_fixed is not None:
args.save_model_dir = save_model_dir_fixed
if args.load_model_dir is not None:
args.encoder_checkpoint = args.load_model_dir
if task_name_fixed is not None:
args.task_name = task_name_fixed
args.task = task_name_fixed
if layer_initialization_fixed is not None:
args.layer_initialization = layer_initialization_fixed
if freeze_layer_fixed is not None:
args.freeze_layer = freeze_layer_fixed
if fp16_fixed is not None:
args.fp16 = fp16_fixed
if learning_rate_fixed is not None:
args.learning_rate = learning_rate_fixed
if teacher_prediction_fixed is not None:
args.teacher_prediction = teacher_prediction_fixed
if num_train_epochs_fixed is not None:
args.num_train_epochs = num_train_epochs_fixed
args.model_type = model_type_fixed
args.raw_data_dir = os.path.join(HOME_DATA_FOLDER, 'data_raw', args.task_name)
args.feat_data_dir = os.path.join(HOME_DATA_FOLDER, 'data_feat', args.task_name)
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
logger.info('actual batch size on all GPU = %d' % args.train_batch_size)
device, n_gpu = args.device, args.n_gpu
###################################################################################################################################
random.seed(args.train_seed)
np.random.seed(args.train_seed)
torch.manual_seed(args.train_seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.train_seed)
if args.model_type == 'Original':
if args.student_hidden_layers == 3:
# args.fc_layer_idx = '3,7'
args.fc_layer_idx = '1,3'
elif args.student_hidden_layers == 6:
args.fc_layer_idx = '1,3,5,7,9'
logger.info('Input Argument Information')
args_dict = vars(args)
for a in args_dict:
logger.info('%-28s %s' % (a, args_dict[a]))
#########################################################################
# Prepare Data
##########################################################################
task_name = args.task_name.lower()
if task_name not in processors and 'race' not in task_name:
raise ValueError("Task not found: %s" % (task_name))
if 'race' in task_name:
pass
else:
processor = processors[task_name]()
output_mode = output_modes[task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=True)
if args.do_train:
train_sampler = SequentialSampler if DEBUG else RandomSampler
read_set = 'train'
if args.teacher_prediction is not None and args.alpha > 0:
logger.info('loading teacher\'s prediction')
teacher_predictions = pickle.load(open(args.teacher_prediction, 'rb'))['train'] if args.teacher_prediction is not None else None
#teacher_predictions = pickle.load(open(args.real_teacher, 'rb'))['train'] if args.real_teacher is not None else logger.info("shibal")
logger.info('teacher acc = %.2f, teacher loss = %.5f' % (teacher_predictions['acc']*100, teacher_predictions['loss']))
teacher_predictions_ = pickle.load(open(args.teacher_prediction, 'rb'))['dev'] if args.teacher_prediction is not None else None
#teacher_predictions_ = pickle.load(open(args.real_teacher, 'rb'))['dev'] if args.real_teacher is not None else None
logger.info('teacher acc = %.2f, teacher loss = %.5f' % (teacher_predictions_['acc']*100, teacher_predictions_['loss']))
if args.kd_model == 'kd':
train_examples, train_dataloader, _ = get_task_dataloader(task_name, read_set, tokenizer, args, SequentialSampler,
batch_size=args.train_batch_size,
knowledge=teacher_predictions['pred_logit'])
else:
train_examples, train_dataloader, _ = get_task_dataloader(task_name, read_set, tokenizer, args, SequentialSampler,
batch_size=args.train_batch_size,
knowledge=teacher_predictions['pred_logit'],
extra_knowledge=teacher_predictions['feature_maps'])
else:
if args.alpha > 0:
raise ValueError('please specify teacher\'s prediction file for KD training')
logger.info('runing simple fine-tuning because teacher\'s prediction is not provided')
train_examples, train_dataloader, _ = get_task_dataloader(task_name, read_set, tokenizer, args, SequentialSampler,
batch_size=args.train_batch_size)
num_train_optimization_steps = int(len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
# Run prediction for full data
eval_examples, eval_dataloader, eval_label_ids = get_task_dataloader(task_name, 'dev', tokenizer, args, SequentialSampler, batch_size=args.eval_batch_size)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
# if args.do_eval:
# test_examples, test_dataloader, test_label_ids = get_task_dataloader(task_name, 'test', tokenizer, args, SequentialSampler, batch_size=args.eval_batch_size)
# logger.info("***** Running evaluation *****")
# logger.info(" Num examples = %d", len(test_examples))
# logger.info(" Batch size = %d", args.eval_batch_size)
#########################################################################
# Prepare model
#########################################################################
student_config = BertConfig(os.path.join(args.bert_model, 'bert_config.json'))
if args.kd_model.lower() in ['kd', 'kd.cls', 'kd.u', 'kd.i']:
logger.info('using normal Knowledge Distillation')
output_all_layers = (args.kd_model.lower() in ['kd.cls', 'kd.u', 'kd.i'])
# if original model
if args.model_type == 'Original':
student_encoder, student_classifier = init_model(task_name, output_all_layers, args.student_hidden_layers, student_config)
n_student_layer = len(student_encoder.bert.encoder.layer)
layer_initialization = args.layer_initialization.split(',')
for i in range(len(layer_initialization)):
layer_initialization[i] = int(layer_initialization[i])
student_encoder = load_model_finetune(student_encoder, layer_initialization, args.encoder_checkpoint, args, 'student', verbose= True)
logger.info('*' * 77)
student_classifier = load_model(student_classifier, args.cls_checkpoint, args, 'classifier', verbose= True)
elif args.kd_model.lower() == 'kd.full':
logger.info('using FULL Knowledge Distillation')
layer_idx = [int(i) for i in args.fc_layer_idx.split(',')]
num_fc_layer = len(layer_idx)
if args.weights is None or args.weights.lower() in ['none']:
weights = np.array([1] * (num_fc_layer-1) + [num_fc_layer-1]) / 2 / (num_fc_layer-1)
else:
weights = [float(w) for w in args.weights.split(',')]
weights = np.array(weights) / sum(weights)
assert len(weights) == num_fc_layer, 'number of weights and number of FC layer must be equal to each other'
# weights = torch.tensor(np.array([1, 1, 1, 1, 2, 6])/12, dtype=torch.float, device=device, requires_grad=False)
if args.fp16:
weights = weights.half()
student_encoder = BertForSequenceClassificationEncoder(student_config, output_all_encoded_layers=True,
num_hidden_layers=args.student_hidden_layers,
fix_pooler=True)
n_student_layer = len(student_encoder.bert.encoder.layer)
student_encoder = load_model(student_encoder, args.encoder_checkpoint, args, 'student', verbose=True)
logger.info('*' * 77)
student_classifier = FullFCClassifierForSequenceClassification(student_config, num_labels, student_config.hidden_size,
student_config.hidden_size, 6)
student_classifier = load_model(student_classifier, args.cls_checkpoint, args, 'exact', verbose=True)
assert max(layer_idx) <= n_student_layer - 1, 'selected FC layer idx cannot exceed the number of transformers'
else:
raise ValueError('%s KD not found, please use kd or kd.full' % args.kd)
n_param_student = count_parameters(student_encoder) + count_parameters(student_classifier)
logger.info('number of layers in student model = %d' % n_student_layer)
logger.info('num parameters in student model are %d and %d' % (count_parameters(student_encoder), count_parameters(student_classifier)))
#########################################################################
# Prepare optimizer
#########################################################################
if task_name == 'rte':
log_per_step = 1
elif task_name == 'mrpc':
log_per_step = 1
elif task_name == 'cola':
log_per_step = 10
elif task_name == 'sst-2':
log_per_step = 10
else:
log_per_step = 200
if args.do_train:
param_optimizer = list(student_encoder.named_parameters()) + list(student_classifier.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
logger.info('FP16 activate, use apex FusedAdam')
try:
from apex.contrib.optimizers import FP16_Optimizer
from apex.contrib.optimizers import FusedAdam
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
logger.info('FP16 is not activated, use BertAdam')
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
#########################################################################
# Model Training
#########################################################################
if args.do_train:
global_step = 0
nb_tr_steps = 0
tr_loss = 0
student_encoder.train()
student_classifier.train()
eval_loss_min = 100
eval_best_acc = 0
eval_best_acc_and_f1 = 0
eval_best_f1 = 0
loss_acc = 0
for epoch in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss, tr_ce_loss, tr_kd_loss, tr_acc = 0, 0, 0, 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
student_encoder.train()
student_classifier.train()
batch = tuple(t.to(device) for t in batch)
if args.alpha == 0:
input_ids, input_mask, segment_ids, label_ids = batch
teacher_pred, teacher_patience = None, None
else:
if args.kd_model == 'kd':
input_ids, input_mask, segment_ids, label_ids, teacher_pred = batch
teacher_patience = None
else:
input_ids, input_mask, segment_ids, label_ids, teacher_pred, teacher_patience = batch
if args.fp16:
teacher_patience = teacher_patience.half()
if args.fp16:
teacher_pred = teacher_pred.half()
full_output, pooled_output = student_encoder(input_ids, segment_ids, input_mask)
if args.kd_model.lower() in['kd', 'kd.cls']:
logits_pred_student = student_classifier(pooled_output)
if args.kd_model.lower() == 'kd.cls':
student_patience = torch.stack(full_output[:-1]).transpose(0,1)
else:
student_patience = None
elif args.kd_model.lower() == 'kd.full':
logits_pred_student = student_classifier(full_output, weights, layer_idx)
else:
raise ValueError(f'{args.kd_model} not implemented yet')
loss_dl, kd_loss, ce_loss = distillation_loss(logits_pred_student, label_ids, teacher_pred, T=args.T, alpha=args.alpha)
if args.beta > 0:
if student_patience.shape[0] != input_ids.shape[0]:
# For RACE
n_layer = student_patience.shape[1]
student_patience = student_patience.transpose(0, 1).contiguous().view(n_layer, input_ids.shape[0], -1).transpose(0,1)
pt_loss = args.beta * patience_loss(teacher_patience, student_patience, args.normalize_patience)
loss = loss_dl + pt_loss
else:
pt_loss = torch.tensor(0.0)
loss = loss_dl
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
n_sample = input_ids.shape[0]
tr_loss += loss.item() * n_sample
if isinstance(kd_loss, float):
tr_kd_loss += kd_loss * n_sample
else:
tr_kd_loss += kd_loss.item() * n_sample
tr_ce_loss += ce_loss.item() * n_sample
tr_loss_pt = pt_loss.item() * n_sample
pred_cls = logits_pred_student.data.max(1)[1]
tr_acc += pred_cls.eq(label_ids).sum().cpu().item()
nb_tr_examples += n_sample
nb_tr_steps += 1
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_optimization_steps,
args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
else:
lr_this_step = args.learning_rate * warmup_linear(global_step / num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.num_train_epochs == 10:
if (epoch == 8):
logger.info("*"*77)
logger.info("Best Acc: "+str(eval_best_acc)+", Best Loss: "+str(eval_loss_min))
if task_name == 'mrpc':
logger.info("Best acc and f1: "+str(eval_best_acc_and_f1))
logger.info("*"*77)
raise ValueError("Skipping the rest.")
elif args.num_train_epochs == 4:
if (epoch == 4):
logger.info("*"*77)
logger.info("Best Acc: "+str(eval_best_acc)+", Best Loss: "+str(eval_loss_min))
if task_name == 'mrpc':
logger.info("Best acc and f1: "+str(eval_best_acc_and_f1))
logger.info("*"*77)
raise ValueError("Skipping the rest.")
#Validate the model on dev set every log_per_step and save the model if criterion is met.
if (global_step % log_per_step == 0) & (epoch > 0):
if 'race' in task_name:
result = eval_model_dataloader_nli(student_encoder, student_classifier, eval_dataloader, device, False)
else:
test_res = eval_model_dataloader_nli(args.task_name.lower(), eval_label_ids, student_encoder, student_classifier, eval_dataloader, args.kd_model, num_labels, device, args.weights, args.fc_layer_idx, output_mode)
# Saving checkpoints when the conditions below are met.
if task_name == 'cola':
if test_res['mcc'] > eval_best_acc:
logger.info("")
logger.info('='*77)
logger.info("Validation mcc improved! "+str(eval_best_acc)+" -> "+str(test_res['mcc']))
logger.info('='*77)
eval_best_acc = test_res['mcc']
if eval_best_acc > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
logger.info("Saving the model...")
if test_res['eval_loss']< eval_loss_min:
logger.info("")
logger.info('='*77)
logger.info("Validation Loss improved! "+str(eval_loss_min)+" -> "+str(test_res['eval_loss']))
logger.info('='*77)
eval_loss_min = test_res['eval_loss']
if eval_loss_min < args.saving_criterion_loss:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
logger.info("Saving the model...")
elif task_name == 'mrpc':
if test_res['f1'] > eval_best_acc:
logger.info("")
logger.info('='*77)
logger.info("Validation f1 improved! "+str(eval_best_acc)+" -> "+str(test_res['f1']))
logger.info('='*77)
eval_best_acc = test_res['f1']
print("ACC= "+str(test_res['acc']))
if eval_best_acc > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
logger.info("Saving the model...")
if test_res['acc_and_f1'] > eval_best_acc_and_f1:
logger.info("")
logger.info('='*77)
logger.info("Validation acc_and_f1 improved! "+str(eval_best_acc_and_f1)+" -> "+str(test_res['acc_and_f1']))
logger.info('='*77)
eval_best_acc_and_f1 = test_res['acc_and_f1']
logger.info("ACC= "+str(test_res['acc']))
logger.info("f1= "+str(test_res['f1']))
if eval_best_acc_and_f1 > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc_and_f1.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc_and_f1.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc_and_f1.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc_and_f1.pkl'))
logger.info("Saving the model...")
if test_res['eval_loss']< eval_loss_min:
logger.info("")
logger.info('='*77)
logger.info("Validation Loss improved! "+str(eval_loss_min)+" -> "+str(test_res['eval_loss']))
logger.info('='*77)
eval_loss_min = test_res['eval_loss']
print("ACC= "+str(test_res['acc']))
if eval_loss_min < args.saving_criterion_loss:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
logger.info("Saving the model...")
else:
if test_res['acc'] > eval_best_acc:
logger.info("")
logger.info('='*77)
logger.info("Validation acc improved! "+str(eval_best_acc)+" -> "+str(test_res['acc']))
logger.info('='*77)
eval_best_acc = test_res['acc']
if eval_best_acc > args.saving_criterion_acc:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_acc.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_acc.pkl'))
logger.info("Saving the model...")
if test_res['eval_loss']< eval_loss_min:
logger.info("")
logger.info('='*77)
logger.info("Validation Loss improved! "+str(eval_loss_min)+" -> "+str(test_res['eval_loss']))
logger.info('='*77)
eval_loss_min = test_res['eval_loss']
if eval_loss_min < args.saving_criterion_loss:
if args.n_gpu > 1:
torch.save(student_encoder.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.module.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
else:
torch.save(student_encoder.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.encoder_loss.pkl'))
torch.save(student_classifier.state_dict(), os.path.join(args.output_dir, 'BERT'+f'.cls_loss.pkl'))
logger.info("Saving the model...")
logger.info("")
logger.info('='*77)
logger.info("Validation Accuracy : "+str(eval_best_acc)+" Validation Loss : "+str(eval_loss_min))
logger.info("The seed is : "+str(args.seed))
logger.info('='*77)
| 53.168157
| 250
| 0.586555
|
4c0b3ab53bd0e0a437983858877aa1c6af1824d1
| 2,882
|
py
|
Python
|
tests/keras/applications/applications_test.py
|
mikezsx/dlstudy
|
6572934f9a7c4ba498300186c2d297994c43900d
|
[
"MIT"
] | 3
|
2018-01-27T06:15:26.000Z
|
2019-12-27T16:51:54.000Z
|
tests/keras/applications/applications_test.py
|
candleinwindsteve/keras
|
9eb7ecd3e525c9cff31ebd59a96794f212ca5e1e
|
[
"MIT"
] | null | null | null |
tests/keras/applications/applications_test.py
|
candleinwindsteve/keras
|
9eb7ecd3e525c9cff31ebd59a96794f212ca5e1e
|
[
"MIT"
] | 3
|
2020-02-24T15:16:05.000Z
|
2020-05-09T05:29:53.000Z
|
import pytest
from keras.utils.test_utils import keras_test
from keras import applications
from keras import backend as K
@keras_test
def test_resnet50():
model = applications.ResNet50(weights=None)
assert model.output_shape == (None, 1000)
@keras_test
def test_resnet50_notop():
model = applications.ResNet50(weights=None, include_top=False)
assert model.output_shape == (None, None, None, 2048)
@keras_test
def test_resnet50_pooling():
model = applications.ResNet50(weights=None,
include_top=False,
pooling='avg')
assert model.output_shape == (None, 2048)
@keras_test
def test_vgg16():
model = applications.VGG16(weights=None)
assert model.output_shape == (None, 1000)
@keras_test
def test_vgg16_notop():
model = applications.VGG16(weights=None, include_top=False)
assert model.output_shape == (None, None, None, 512)
@keras_test
def test_vgg16_pooling():
model = applications.VGG16(weights=None, include_top=False, pooling='avg')
assert model.output_shape == (None, 512)
@keras_test
def test_vgg19():
model = applications.VGG19(weights=None)
assert model.output_shape == (None, 1000)
@keras_test
def test_vgg19_notop():
model = applications.VGG16(weights=None, include_top=False)
assert model.output_shape == (None, None, None, 512)
@keras_test
def test_vgg19_pooling():
model = applications.VGG16(weights=None, include_top=False, pooling='avg')
assert model.output_shape == (None, 512)
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires tensorflow backend')
def test_xception():
model = applications.Xception(weights=None)
assert model.output_shape == (None, 1000)
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires tensorflow backend')
def test_xception_notop():
model = applications.Xception(weights=None, include_top=False)
assert model.output_shape == (None, None, None, 2048)
@keras_test
@pytest.mark.skipif((K.backend() != 'tensorflow'),
reason='Requires tensorflow backend')
def test_xception_pooling():
model = applications.Xception(weights=None, include_top=False, pooling='avg')
assert model.output_shape == (None, 2048)
@keras_test
def test_inceptionv3():
model = applications.InceptionV3(weights=None)
assert model.output_shape == (None, 1000)
@keras_test
def test_inceptionv3_notop():
model = applications.InceptionV3(weights=None, include_top=False)
assert model.output_shape == (None, None, None, 2048)
@keras_test
def test_inceptionv3_pooling():
model = applications.InceptionV3(weights=None, include_top=False, pooling='avg')
assert model.output_shape == (None, 2048)
if __name__ == '__main__':
pytest.main([__file__])
| 26.934579
| 84
| 0.703678
|
e5f3cf597f1d1f5628fc1ac3643ce5414512b73f
| 8,709
|
py
|
Python
|
src/dsc/thesis.py
|
ghw329/DDSC
|
97262b7fe0f507a7860828060e43ae2e0c1f1495
|
[
"MIT"
] | null | null | null |
src/dsc/thesis.py
|
ghw329/DDSC
|
97262b7fe0f507a7860828060e43ae2e0c1f1495
|
[
"MIT"
] | null | null | null |
src/dsc/thesis.py
|
ghw329/DDSC
|
97262b7fe0f507a7860828060e43ae2e0c1f1495
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# --- Program internal modules -------------------------------------------
from __future__ import division
from dsc import DSC
import numpy as np
import pandas as pd
import click
import pickle
############ plotting
import matplotlib.pyplot as plt
plt.style.use('ggplot')
## plotting basis functions
import matplotlib.cm as cm
# plotly
import plotly.plotly as py
from plotly.graph_objs import *
###############################
# plots directory
figure_directory= '../../../doc/thesis/figures/'
# change so that all figures have font size defulat as 22
plt.rcParams.update({'font.size': 18})
# figure size always the same
plt.figure(figsize=(16,12))
# --- Locally installed modules -----------------------------------------
from reader import Reader
class Plotter(object):
def __init__(self,n,t,acc,data):
self.n = n
self.T = t
self.acc = acc
self.data = data
def appliances(self, x_train, x_test, x_test_use, x_predict):
# row and column sharing
f, ((ax1, ax2, ax3, ax4, ax5, ax6)) = plt.subplots(6, 1, sharex='col', sharey='row', figsize=(16,18))
## piechart
f2, ((axes1, axes2)) = plt.subplots(2, 1, sharex='col', sharey='row', figsize=(16,18))
pie_chart_true = []
pie_chart_pred = []
x = range(x_train[x_train.keys()[0]].shape[0])
plt.rcParams.update({'font.size': 15})
y = np.asarray(x_test_use)[0:,0]
ax1.plot(x, y, color='b',label='Actual energy')
y=[-1]*x_train[x_train.keys()[0]].shape[0]
ax1.plot(x, y, color='r', label='Predicted')
#ax1.set_ylim([0,2])
ax1.set_ylabel('Whole Home')
ax1.legend()
##
y = np.asarray(x_test[x_test.keys()[0]])[0:,0]
pie_chart_true.append(y.sum())
ax2.plot(x, y, color='b')
y = np.asarray(x_predict[0])[0:,0]
ax2.plot(x , y, color='r')
#ax2.get_yaxis().set_visible(False)
ax2.set_ylabel('Refrigerator')
pie_chart_pred.append(y.sum())
##
y = np.asarray(x_test[x_test.keys()[1]])[0:,0]
pie_chart_true.append(y.sum())
ax3.plot(x, y, color='b')
y = np.asarray(x_predict[1])[0:,0]
ax3.plot(x,y, color='r')
#ax3.get_yaxis().set_visible(False)
ax3.set_ylabel('Dishwasher')
pie_chart_pred.append(y.sum())
##
y = np.asarray(x_test[x_test.keys()[2]])[0:,0]
pie_chart_true.append(y.sum())
ax4.plot(x, y, color='b')
y = np.asarray(x_predict[2])[0:,0]
ax4.plot(x,y, color='r')
#ax4.get_yaxis().set_visible(False)
ax4.set_ylabel('Furnace')
pie_chart_pred.append(y.sum())
##
y = np.asarray(x_test[x_test.keys()[3]])[0:,0]
pie_chart_true.append(y.sum())
ax5.plot(x, y, color='b')
y = np.asarray(x_predict[3])[0:,0]
ax5.plot(x,y, color='r')
#ax5.get_yaxis().set_visible(False)
ax5.set_ylabel('Air')
pie_chart_pred.append(y.sum())
##
y = np.asarray(x_test[x_test.keys()[4]])[0:,0]
pie_chart_true.append(y.sum())
ax6.plot(x, y, color='b')
y = np.asarray(x_predict[4])[0:,0]
ax6.plot(x,y, color='r')
#ax6.get_yaxis().set_visible(False)
ax6.set_ylabel('Others')
ax6.set_xlabel('Hours')
pie_chart_pred.append(y.sum())
if self.data == 0:
f.savefig(figure_directory+'normal_appliances_'+str(self.n)+'_'+str(self.T) + '.png')
elif self.data == 1:
f.savefig(figure_directory+'end_appliances_'+str(self.n)+'_'+str(self.T) + '.png')
elif self.data == 2:
f.savefig(figure_directory+'days_appliances_'+str(self.n)+'_'+str(self.T) + '.png')
## pie-charts
labels = x_test.keys()
self.pie_chart(axes1,pie_chart_true,labels)
axes1.set_title('True usage')
self.pie_chart(axes2,pie_chart_pred,labels)
axes2.set_title('Predicted usage')
axes2.text(0.95, 0.01, 'Accuracy of ' + str(round(self.acc[0],1)),
verticalalignment='center', horizontalalignment='right',
transform=axes2.transAxes,
color='black', fontsize=15)
if self.data == 0:
f2.savefig(figure_directory+'normal_pie_chart_'+str(self.n)+'_'+str(self.T) + '.png')
elif self.data == 1:
f2.savefig(figure_directory+'end_pie_chart_'+str(self.n)+'_'+str(self.T) + '.png')
elif self.data == 2:
f2.savefig(figure_directory+'days_pie_chart_'+str(self.n)+'_'+str(self.T) + '.png')
def pie_chart(self, subplot, pie_chart, labels):
# The slices will be ordered and plotted counter-clockwise.
## --- Plotting the true-piechart
pie_chart_sum = sum(pie_chart)
pie_chart = map(lambda x: x/pie_chart_sum,pie_chart)
cmap = plt.cm.prism
colors = cmap(np.linspace(0., 1., len(pie_chart)))
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
pie_wedge_collection = subplot.pie(pie_chart, colors=colors, labels=labels, labeldistance=1.05);
for pie_wedge in pie_wedge_collection[0]:
pie_wedge.set_edgecolor('white')
# Set aspect ratio to be equal so that pie is drawn as a circle.
@click.command()
@click.option('--t', default=14,help='Timeframe; note that half will be test')
@click.option('--ph',default=None, help='Portion of houses to investigate')
def main(t,ph,n):
'''
Program to train disaggregation using Sparse Coding
'''
# read dataset
datasets = [0,1,2]
for data in datasets:
dataset = ['cleanpecanhour2014','weekendpecanhour2014','weekdayspecanhour2014']
reader = Reader(dataset[data])
##returning a datafile pandasObject
df = reader.dataParser()
print "parsed the data"
# returns a dictionary of all of the appliances
d = reader.format_data(df,other=True)
print "formated data"
portion = 0.5
factor_n_t = 0.1 # heuristically determined
timeframes = [14,30,60]
timeframes = [x*24 for x in timeframes]
alphas = [0.00001, 0.00001, 0.000001]
portion = 0.5
# Good values (t,n,alpha)
# (14,40, alpha = 0.0001)
# (336,800, alpha = 0.00001)
# (720,,1400, alpha = )
for timeframe, alpha in zip(timeframes,alphas):
n = int(factor_n_t*timeframe)
x_train, x_test = reader.split(d,portion,timeframe)
# use in whole house disaggregation step
x_train_use = x_train.pop('use',None)
x_test_use = x_test.pop('use',None)
x_train_localhour = x_train.pop('localhour',None)
x_test_localhour = x_test.pop('localhour',None)
# algorithm starts
# parameters
train_set = x_train
test_set = x_test
train_sum = sum(x_train.values())
k = len(x_train.keys())
T,m = x_train[x_train.keys()[0]].shape
rp = 0.0005
epsilon = 0.001
steps = 10 # steps must be higher than k
n_components = n
# Sparse Coding pre_training
dsc = DSC(train_set,train_sum,alpha,epsilon,rp,steps,n_components,m,T,k)
print "started the pre-training"
A_list,B_list = dsc.pre_training(x_train.values())
print "done pre_training"
# Didscriminative Disaggregation training
B_cat = dsc.DD(x_train_use.values,B_list,A_list)
print "done DD"
# Given test examples x_test
A_prime = dsc.F(x_test_use.values,B_cat,A=np.vstack(A_list))
A_last = np.split(A_prime,k,axis=0)
x_predict = dsc.predict(A_last,B_list)
x_predict_sum = sum(x_predict)
print "the shape of the first predicted appliances is :%s" %(x_predict[0].shape,)
# energy disaggregation accuracy
acc = dsc.accuracy(x_train.values(),train_sum,B_list,A_last)
# energy disaggregation error
error, error_star = dsc.error(x_train.values(),train_sum,B_list,A_list)
print "error: %s, error_star: %s" % (sum(error),sum(error_star))
acc_nndsc, acc_dddsc = dsc.get_accuracy_plot()
err_nndsc, err_dddsc = dsc.get_error_plot()
# plotting acc/err
a_nndsc, a_ddsc = dsc.get_a()
b_nndsc, b_ddsc = dsc.get_b()
hours = timeframe/2
plot_it = Plotter(n,hours,acc,data)
plot_it.appliances(x_train, x_test, x_test_use, x_predict)
if __name__ == '__main__':
main()
| 37.377682
| 109
| 0.584683
|
8792eafe59bbed1124bde4a3925c65867f4dfd34
| 2,227
|
py
|
Python
|
src/chat/consumers.py
|
hafizsameed/django-realtime-chat-app
|
88ed5a8bc06411ed770fe6cf9f12d71a97108fa6
|
[
"MIT"
] | null | null | null |
src/chat/consumers.py
|
hafizsameed/django-realtime-chat-app
|
88ed5a8bc06411ed770fe6cf9f12d71a97108fa6
|
[
"MIT"
] | 2
|
2020-06-06T01:45:47.000Z
|
2021-06-10T22:48:29.000Z
|
src/chat/consumers.py
|
hafizsameed/django-realtime-chat-app
|
88ed5a8bc06411ed770fe6cf9f12d71a97108fa6
|
[
"MIT"
] | null | null | null |
import asyncio
import json
from django.contrib.auth import get_user_model
from channels.consumer import AsyncConsumer
from channels.db import database_sync_to_async
from .models import Thread, ChatMessage
class ChatConsumer(AsyncConsumer):
async def websocket_connect(self, event):
print("connected", event)
other_user = self.scope['url_route']['kwargs']['username']
me = self.scope['user']
print(other_user,me)
thread_obj = await self.get_thread(me,other_user)
print(thread_obj)
self.thread_obj = thread_obj
chat_room = f'thread_{thread_obj.id}'
self.chat_room = chat_room
# await asyncio.sleep(10)
await self.channel_layer.group_add(
chat_room,
self.channel_name
)
await self.send({
"type":"websocket.accept"
})
async def websocket_receive(self, event):
print("recieve", event)
front_text = event.get("text",None)
if front_text is not None:
loaded_dict_data = json.loads(front_text)
msg = loaded_dict_data.get('message')
user = self.scope['user']
username='default'
if user.is_authenticated:
username = user.username
myResponse = {
'message':msg,
'username':username
}
await self.create_chat_message(user,msg)
await self.channel_layer.group_send(
self.chat_room,
{
'type':'chat_message',
'text':json.dumps(myResponse)
}
)
async def chat_message(self,event):
await self.send({
'type':'websocket.send',
'text':event['text']
})
async def websocket_disconnect(self, event):
print("disconnected", event)
@database_sync_to_async
def get_thread(self,user,other_user):
return Thread.objects.get_or_new(user,other_user)[0]
@database_sync_to_async
def create_chat_message(self,me,msg):
thread_obj = self.thread_obj
return ChatMessage.objects.create(thread=thread_obj,user=me,message=msg)
| 32.275362
| 80
| 0.597665
|
a36bd712c2820aab5d9337d67fdce9c3f9b5f961
| 247
|
py
|
Python
|
sms_voting/admin.py
|
Fedor-Lyanguzov/Voting
|
98f8fd958624fa9ba3835836c525fbb4abb6b5e5
|
[
"MIT"
] | null | null | null |
sms_voting/admin.py
|
Fedor-Lyanguzov/Voting
|
98f8fd958624fa9ba3835836c525fbb4abb6b5e5
|
[
"MIT"
] | 1
|
2021-12-04T11:43:25.000Z
|
2021-12-04T11:43:25.000Z
|
sms_voting/admin.py
|
Fedor-Lyanguzov/Voting
|
98f8fd958624fa9ba3835836c525fbb4abb6b5e5
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Poll, Bulletin
@admin.register(Poll)
class PollAdmin(admin.ModelAdmin):
fields = ('title', 'face_participants')
@admin.register(Bulletin)
class BulletinAdmin(admin.ModelAdmin):
pass
| 19
| 43
| 0.761134
|
371687637bd4acefcd2077142ab684d98afce408
| 4,604
|
py
|
Python
|
mainEmberrite/people/characterClass.py
|
evvanErb/Emberrite
|
1e65ef69188619684e093f01febc6f92f8b02716
|
[
"Apache-2.0"
] | null | null | null |
mainEmberrite/people/characterClass.py
|
evvanErb/Emberrite
|
1e65ef69188619684e093f01febc6f92f8b02716
|
[
"Apache-2.0"
] | null | null | null |
mainEmberrite/people/characterClass.py
|
evvanErb/Emberrite
|
1e65ef69188619684e093f01febc6f92f8b02716
|
[
"Apache-2.0"
] | null | null | null |
import random
#Character building class
class character:
def __init__(self,name,gender,age,race,classType,alignment,stats,health,maxHealth):
self.name = name
self.gender = gender
self.age = age
self.race = race
self.classType = classType
self.alignment = alignment
self.stats = stats
self.health = health
self.maxHealth = maxHealth
#Return character attributes
def returnGender(self):
return(self.gender)
def returnAge(self):
return(self.age)
def returnRace(self):
return(self.race)
def returnClassType(self):
return(self.classType)
def returnClassName(self):
return(self.classType.titleReturn())
def returnAlignment(self):
return(self.alignment)
def returnStats(self):
return(self.stats)
def returnHealth(self):
return(self.health)
def returnMaxHealth(self):
return(self.maxHealth)
#Change health
def damage(self, damage):
self.health -= damage
return(self.health)
def heal(self, heal):
while ((heal > 0) and (self.health < self.maxHealth)):
self.health += 1
heal -= 1
return(self.health)
#Determine vision type
def vision(self):
if (self.race == "half-elf"):
return("infra")
elif (self.race == "elf"):
return("infra")
elif (self.race == "dwarf"):
return("low-light")
else:
return("regular")
#Determine weapon bonuses from class and race
def weaponBonus(self):
#Weapon bonuses from class
if (self.classType == "hunter"):
crBonus = {"bow":2, "short sword":2}
elif (self.classType == "paladin"):
crBonus = {"hammer":2}
elif (self.classType == "rogue"):
crBonus = {"knife":2}
elif (self.classType == "warrior"):
crBonus = {"short swort":2, "axe":2}
else:
crBonus = {}
#Weapon bonuses from race
if (self.race == "elf"):
if ("bow" in crBonus):
crBonus["bow"] += 2
else:
crBonus.update({"bow":2})
elif (self.race == "half-elf"):
if ("short sword" in crBonus):
crBonus["short sword"] += 2
else:
crBonus.update({"short sword":2})
elif (self.race == "dwarf"):
if ("hammer" in crBonus):
crBonus["hammer"] += 2
else:
crBonus.update({"hammer":2})
return(crBonus)
#Chance to pick lock
def pickLock(self):
if ((self.stats["dexterity"] >= 16) and (self.classType == "rogue")):
return(.90)
elif ((self.stats["dexterity"] < 16) and (self.classType == "rogue")):
return(.80)
elif (self.stats["dexterity"] >= 17):
return(.75)
elif (self.stats["dexterity"] >= 15):
return(.25)
elif (self.stats["dexterity"] >= 12):
return(.15)
else:
return(.05)
#Chance to seduce character
def seduce(self):
if (self.stats["charisma"] >= 17):
return(.75)
elif (self.stats["charisma"] >= 15):
return(.50)
elif (self.stats["charisma"] >= 12):
return(.35)
else:
return(.15)
#Saving throws
def saveVpoison(self):
if (self.stats["constitution"] >= 17):
return(.65)
elif (self.stats["constitution"] >= 15):
return(.45)
elif (self.stats["constitution"] >= 12):
return(.35)
else:
return(.15)
def saveVstun(self):
if (self.stats["dexterity"] >= 17):
return(.65)
elif (self.stats["dexterity"] >= 15):
return(.45)
elif (self.stats["dexterity"] >= 12):
return(.35)
else:
return(.15)
def saveVmagic(self):
if (self.stats["wisdom"] >= 17):
return(.65)
elif (self.stats["wisdom"] >= 15):
return(.45)
elif (self.stats["wisdom"] >= 12):
return(.35)
else:
return(.15)
def saveVcrush(self):
if (self.stats["strength"] >= 17):
return(.65)
elif (self.stats["strength"] >= 15):
return(.45)
elif (self.stats["strength"] >= 12):
return(.35)
else:
return(.15)
| 28.073171
| 87
| 0.497828
|
f425ad008d52317cc0c7132e3ebfbb8a92c5ab23
| 5,686
|
py
|
Python
|
package/tests/test_govee_parser.py
|
s-limo/bleparser
|
a1f5b55ab1c1185877ac50c45dd3685e11994cb2
|
[
"MIT"
] | null | null | null |
package/tests/test_govee_parser.py
|
s-limo/bleparser
|
a1f5b55ab1c1185877ac50c45dd3685e11994cb2
|
[
"MIT"
] | null | null | null |
package/tests/test_govee_parser.py
|
s-limo/bleparser
|
a1f5b55ab1c1185877ac50c45dd3685e11994cb2
|
[
"MIT"
] | null | null | null |
"""The tests for the Govee ble_parser."""
from bleparser import BleParser
class TestGovee:
def test_Govee_H5051(self):
"""Test Govee H5051 parser."""
data_string = "043e1902010400aabb615960e30d0cff88ec00ba0af90f63020101b7"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Govee"
assert sensor_msg["type"] == "H5051"
assert sensor_msg["mac"] == "E3605961BBAA"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 27.46
assert sensor_msg["humidity"] == 40.89
assert sensor_msg["battery"] == 99
assert sensor_msg["rssi"] == -73
def test_Govee_H5074(self):
"""Test Govee H5074 parser."""
data_string = "043e1702010400aabb611d12e00b0aff88ec0088078c116402a6"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Govee"
assert sensor_msg["type"] == "H5074"
assert sensor_msg["mac"] == "E0121D61BBAA"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 19.28
assert sensor_msg["humidity"] == 44.92
assert sensor_msg["battery"] == 100
assert sensor_msg["rssi"] == -90
def test_Govee_H5102(self):
"""Test Govee H5102 parser."""
data_string = "043e2b02010000aabb611d12e11f0d09475648353130325f43423942030388ec02010509ff0100010103cb0164aa"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Govee"
assert sensor_msg["type"] == "H5101/H5102/H5177"
assert sensor_msg["mac"] == "E1121D61BBAA"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 24.8577
assert sensor_msg["humidity"] == 57.7
assert sensor_msg["battery"] == 100
assert sensor_msg["rssi"] == -86
def test_Govee_H5075(self):
"""Test Govee H5075 parser."""
data_string = "043e2b02010000aabb6138c1a41f0d09475648353037355f43423942030388ec02010509ff88ec0003215d6400aa"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Govee"
assert sensor_msg["type"] == "H5072/H5075"
assert sensor_msg["mac"] == "A4C13861BBAA"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 20.5149
assert sensor_msg["humidity"] == 14.9
assert sensor_msg["battery"] == 100
assert sensor_msg["rssi"] == -86
def test_Govee_H5178_sensor_0(self):
"""Test Govee H5178 parser."""
data_string = "043E2B0201000045C5DF38C1A41F0A09423531373843353435030388EC0201050CFF010001010003A00F640000BF"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Govee"
assert sensor_msg["type"] == "H5178"
assert sensor_msg["mac"] == "A4C138DFC545"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 23.7583
assert sensor_msg["humidity"] == 58.3
assert sensor_msg["sensor id"] == 0
assert sensor_msg["battery"] == 100
assert sensor_msg["rssi"] == -65
def test_Govee_H5178_sensor_1(self):
"""Test Govee H5178 parser."""
data_string = "043E2B0201000045C5DF38C1A41F0A09423531373843353435030388EC0201050CFF010001010102FC87640002BF"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Govee"
assert sensor_msg["type"] == "H5178"
assert sensor_msg["mac"] == "A4C138DFC545"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature outdoor"] == 19.5719
assert sensor_msg["humidity outdoor"] == 71.9
assert sensor_msg["sensor id"] == 1
assert sensor_msg["battery"] == 100
assert sensor_msg["rssi"] == -65
def test_Govee_H5179(self):
"""Test Govee H5179 parser."""
data_string = "043E19020104006F18128132E30D0CFF0188EC000101A00AA2175BB6"
data = bytes(bytearray.fromhex(data_string))
# pylint: disable=unused-variable
ble_parser = BleParser()
sensor_msg, tracker_msg = ble_parser.parse_data(data)
assert sensor_msg["firmware"] == "Govee"
assert sensor_msg["type"] == "H5179"
assert sensor_msg["mac"] == "E3328112186F"
assert sensor_msg["packet"] == "no packet id"
assert sensor_msg["data"]
assert sensor_msg["temperature"] == 27.2
assert sensor_msg["humidity"] == 60.5
assert sensor_msg["battery"] == 91
assert sensor_msg["rssi"] == -74
| 42.432836
| 116
| 0.646324
|
70a29a19d2f354e9501d0ffc0a29a32e75a5a804
| 6,980
|
py
|
Python
|
app/templates/four_g_core/mme_user_data.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | 1
|
2022-03-02T09:43:45.000Z
|
2022-03-02T09:43:45.000Z
|
app/templates/four_g_core/mme_user_data.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | null | null | null |
app/templates/four_g_core/mme_user_data.py
|
kukkalli/orchestrator
|
0b53e3f95c0a886a739cf08d611ea76c958bc691
|
[
"Apache-2.0"
] | null | null | null |
import logging
LOG = logging.getLogger(__name__)
class MMEUserData:
USERDATA = """#!/bin/bash
cat > /home/ubuntu/.ssh/authorized_keys << EOF
ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAGxlZsduAGeKqz3UhzHeXiJOsRlBQTZIyOxA0DrXso9ncDveooDqUr+Xw5XZx44nHFNjWocoQowDdaA8jj0DYEs9wF5ELGj/rm4n6a1b6tXVAlb3Vojb5C0mZfx2gUA6i5GNnNXONRttaW53XeOoD/VDM9tlgBnpa04bBQ1naTiLbQsQg== os@controller
ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAFJ/TSfJegktNbVbCF2L1hte8qfDtgk/zArlNq4vgEAKRePSEYnoFldlGVn5zDqnvLP2xy6WrcFUjO2TOeTnmqQ1gEzcBOjUXeYdA7LO1J8yARvvAMOk4IiuVTvGUdCIW8uDpXwfqCxqeKbSudo3LVLgt/ZcRg1QENyRLP/zqixIJoEsA== os@compute01
ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACnHtnIvTXuTG2I5ngNXKUYu/h7izkEGPmfZpqIeuXcQIY0miX7k+9snBvPXKuxp5nYspOZuOzsHs4JEE3l/+ftcgHvF7w3SD5CtdTfGMhUwGHtcpWtKfj18+FiDwh9wK4m6exBChpfBTU1q14LPZBR7xg9KZULWGddugmffUK1SMoWdg== os@compute02
ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAG6PCMQcMNvSA4yRmHETOYdj60fsJo4n8FOBKmlw2fJR7xWMND0DQWTVvPssv3bw1iKn5zLbx4aeVd7idKT00HsjwB4mX1/+UBVUeP/21tp50J3XsG5Pdwz4JL6LeRWvurKoU66bpBR5u0Iuo9VrJlHfn3GbCiHzke7uUt3QBmBWkxroQ== sdn@sdnc
ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBACU7DStTMa4kHmnZ6kiNkQHrW4CYW9kOKkR8xQa3yBvPDG0IYv0MuUJg2lY5TfdhmXNWELPYHlZxieOC60HTD/vzACoV3268mlJNYGE+ju4iQq+QXaUSwog4YkQs4aDCpylyDRJYWFe8YP97/xFOzR5P5bxCYcJZQLlwWa/+294kW29hQ== hanif@openstack
ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAEBCbxZcyGw+PD3S/HoPx/WKhfGOz4Mos3OGQ4Q2rvh7UpNBE4UVp/xOBcFoL0WveHI+WskQV0jKa7TnErjVwEsOAAX6O4DxaskATGq6XioPv2XmRGKb5UZ28NUCE+VLhUvnFLLn2IMiCSiNzCU8hX0rjsU6/hHjDyV01Iahq2gAY6E7Q== hanif@openstack
ecdsa-sha2-nistp521 AAAAE2VjZHNhLXNoYTItbmlzdHA1MjEAAAAIbmlzdHA1MjEAAACFBAHLT0AS5MHwwJ6hX1Up5stfz361+IWA/8/MhZBH+mYA32h/Bp5hSWkQDXow4aDiHRlxVV1WLlHHup+GPBBA9XLTRwHP8gAjbP5EM4EVxR9EbDh5Hz13xcN0/n9J9rasefHS8UgTJUgRrWeNRCSAhkbNfDfSeQzk8NWlzhiwwCIUacKnzg== hanif@kukkalli
EOF
DOMAIN="@@domain@@"
INTERFACES=$(find /sys/class/net -mindepth 1 -maxdepth 1 ! -name lo ! -name docker -printf "%P " -execdir cat {}/address \;)
first=true
interface_name=""
sudo rm /etc/netplan/50-cloud-init.yaml
sudo -- sh -c "echo 'network:' >> /etc/netplan/50-cloud-init.yaml"
sudo -- sh -c "echo ' ethernets:' >> /etc/netplan/50-cloud-init.yaml"
# shellcheck disable=SC2068
for i in ${INTERFACES[@]};
do
if ${first}
then
first=false
interface_name=${i}
sudo -- sh -c "echo ' ${interface_name}:' >> /etc/netplan/50-cloud-init.yaml"
sudo -- sh -c "echo ' dhcp4: true' >> /etc/netplan/50-cloud-init.yaml"
else
first=true
sudo -- sh -c "echo ' match:' >> /etc/netplan/50-cloud-init.yaml"
sudo -- sh -c "echo ' macaddress: ${i}' >> /etc/netplan/50-cloud-init.yaml"
sudo -- sh -c "echo ' set-name: ${interface_name}' >> /etc/netplan/50-cloud-init.yaml"
fi
done
sudo -- sh -c "echo ' version: 2' >> /etc/netplan/50-cloud-init.yaml"
sudo -- sh -c "echo 'network: {config: disabled}' >> /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg"
sudo netplan apply
HOSTNAME=$(hostname -s)
sudo hostnamectl set-hostname "$HOSTNAME"."$DOMAIN"
FQDN_HOSTNAME=$(hostname)
sudo rm /etc/hosts
cat > /etc/hosts << EOF
127.0.0.1 localhost
127.0.1.1 ${FQDN_HOSTNAME} ${HOSTNAME}
# The following lines are desirable for IPv6 capable hosts'
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
EOF
# : <<'END'
sudo apt-get update
sudo apt-get install ca-certificates curl gnupg lsb-release
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get install docker-ce docker-ce-cli containerd.io -y
docker --version
cat > /etc/docker/daemon.json << EOF
{
"log-driver": "json-file",
"log-opts": {
"max-size": "1m",
"max-file": "9"
},
"ipv6": true,
"fixed-cidr-v6": "2001:db8:1::/64"
}
EOF
sudo usermod -aG docker ubuntu
sudo systemctl daemon-reload
sudo systemctl restart docker
IP_ADDR=$(ip address |grep ens|grep inet|awk '{print $2}'| awk -F / '{print $1}')
echo "MME FQDN $FQDN_HOSTNAME"
sudo -- sh -c "echo '' >> /etc/hosts"
for i in $IP_ADDR; do
sudo -- sh -c "echo $i $HOSTNAME $FQDN_HOSTNAME >> /etc/hosts"
if [[ $i == "10.10"* ]];
then
MANAGEMENT_IP=$i
fi
if [[ $i == "10.11"* ]];
then
export FABRIC_IP=$i
fi
done
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "--------------- docker-compose version is: ---------------"
docker-compose --version
HSS_IP="@@hss_ip@@"
HSS_HOSTNAME="@@hss_hostname@@"
sudo -- sh -c "echo $HSS_IP $HSS_HOSTNAME $HSS_FQDN >> /etc/hosts"
su - ubuntu
export MANAGEMENT_IP="$MANAGEMENT_IP"
export FABRIC_IP="$FABRIC_IP"
export FABRIC_IP="$MANAGEMENT_IP"
# DOCKER_PASS="@@docker_pass@@"
# docker login -u kukkalli -p ${DOCKER_PASS}
cd /home/ubuntu/ || exit
git clone https://github.com/kukkalli/oai-docker-compose.git
chown ubuntu:ubuntu -R oai-docker-compose
cd oai-docker-compose/4g/mme/ || exit
export DOMAIN="$DOMAIN"
echo "DOMAIN is: $DOMAIN"
export REALM="$DOMAIN"
echo "REALM is: $REALM"
export HSS_IP="$HSS_IP"
echo "HSS IP is: $HSS_IP"
export HSS_HOSTNAME="$HSS_HOSTNAME"
echo "HSS HOSTNAME is:" $HSS_HOSTNAME
export HSS_FQDN="$HSS_HOSTNAME"."$DOMAIN"
echo "HSS FQDN is: $HSS_FQDN"
MCC="@@mcc@@"
export MCC="$MCC"
echo "MCC is: $MCC"
MNC="@@mnc@@"
export MNC="$MNC"
echo "MNC is: $MNC"
MME_GID="@@mme_gid@@" # 32768
export MME_GID="$MME_GID"
echo "MME GID is: $MME_GID"
MME_CODE="@@mme_code@@" # 3
export MME_CODE="$MME_CODE"
echo "MME CODE is: $MME_CODE"
SGWC_IP_ADDRESS="@@sgwc_ip_address@@"
export SGWC_IP_ADDRESS="$SGWC_IP_ADDRESS"
echo "SGWC IP is: $SGWC_IP_ADDRESS"
MME_HOSTNAME="$(hostname -s)"
export MME_HOSTNAME="$MME_HOSTNAME"
echo "MME hostname is $MME_HOSTNAME"
export TZ="Europe/Berlin"
echo "Timezone is $TZ"
export HSS_REALM="$DOMAIN"
echo "HSS Realm is $HSS_REALM"
export MME_FQDN="$FQDN_HOSTNAME"
echo "MME FQDN is $MME_FQDN"
# Update mme.conf file before pushing it to docker
echo "Update mme.conf file before pushing it to docker"
./update_mme_conf.sh
# Wait for HSS to be up and running
echo "Waiting for HSS at IP: $HSS_IP to be up and running"
./wait-for-hss.sh "$HSS_IP"
echo "HSS at IP: $HSS_IP is up and running"
docker-compose up -d magma_mme
docker ps
exit 0
"""
| 33.397129
| 268
| 0.743696
|
097f8a1842e4111c4e3ade7538b0d707be625cf3
| 6,732
|
py
|
Python
|
app/analyzers/html_parsers/generic.py
|
MinistereSupRecherche/bso
|
82467213f531d2aee3a08258c5eb9c4467470960
|
[
"MIT"
] | 17
|
2019-10-21T13:14:46.000Z
|
2022-01-14T16:32:07.000Z
|
app/analyzers/html_parsers/generic.py
|
MinistereSupRecherche/bso
|
82467213f531d2aee3a08258c5eb9c4467470960
|
[
"MIT"
] | 2
|
2021-03-31T19:19:09.000Z
|
2021-12-13T20:22:00.000Z
|
app/analyzers/html_parsers/generic.py
|
MinistereSupRecherche/bso
|
82467213f531d2aee3a08258c5eb9c4467470960
|
[
"MIT"
] | 1
|
2021-05-17T06:47:04.000Z
|
2021-05-17T06:47:04.000Z
|
import bs4
from bs4 import BeautifulSoup
import re
import datetime
from doi_utils import *
from app.utils.logger import create_logger
logger = create_logger(__name__)
affiliation_keywords = ['affiliation','affiliations','institution','institutions',\
'author','authors','country', 'auteur', 'auteurs', 'authoraffiliates', 'profile', 'affilia',\
'affiliationscontainer', 'contributors', 'contributor', 'authoraff', 'lblaffiliates', \
'affiliates', 'affiliate', 'scalex']
affiliation_regex = construct_regex(affiliation_keywords)
negative_match = "^_gaq|\(1[0-9][0-9]{2}|\(20[0-9]{2}|(;|,|\.).1[0-9][0-9]{2}|(;|,|\.).20[0-9]{2}|p\..[0-9]|^, «|France 2,|[0-9].p\.|^,.avec|resses universitaires|^,.catalogue|\(éd\.|par exemple|ibl\. |^,.éd\.|,.ed\.|,^Éd\.|^,(e|é|E|É)dit|^, et |il est |il a |notamment|, PUF|Dunod|Gallimard|Grasset|Hachette|Harmattan|Honoré Champion|Presses de|Larousse|Le Seuil|Albin Michel|Armand Collin|Belin|Belles Lettres|, (E|É)dition|Flammarion|Grasset|mprimerie|Karthala|La Découverte|Le Robert|Les Liens qui libèrent|Masson|Payot|Plon|Pocket|, Seuil|, Vrin|Odile Jacob|Fayard|^, thèse|(V|v)ol\.| eds\. |Octarès|Ellipses|Dalloz|Syros|^In |^(L|l)a |(L|l)es |logo cnrs|[0-9]{2}–[0-9]{2}| et al\.|Monod|National agreement in France|ovh-france|ar exemple|Pygmalion|Minuit|Puf|Karthala|^(P|p)our |^Presented|^(R|r)apport|^Reproducted from|^Revue|^Rev|^We thank|Zartman| price | VAT |^Support|^Sur |(S|s)pringer|^siècle|@|\.png|http|^(t|T)he |^\\n|^Does|(T|t)èse de"
def find_fr_affiliation (elt, verbose = False):
if type(elt) == bs4.element.Tag:
return find_fr_affiliation_elt(elt, verbose)
#elif type(elt)==bs4.element.NavigableString:
else:
return find_fr_affiliation_txt(elt, verbose)
logger.debug ('error')
logger.debug(type(elt))
logger.debug(elt)
return None
def find_fr_affiliation_elt(elt, verbose = False):
is_fr = False
affiliations = []
forbidden_words = ['reference', 'http','bibliog']
for att in elt.attrs:
attribute_value = elt.attrs[att]
if type(attribute_value)==str:
attribute_value = attribute_value.lower()
elif type(attribute_value)==list:
attribute_value = (" ".join(attribute_value)).lower()
if 'title' in attribute_value.lower():
return False, []
for w in forbidden_words:
if w in attribute_value or w in att.lower():
return False, []
if re.search(fr_regex, attribute_value):
if(verbose):
logger.debug('fr_kw in attribute value: ' + attribute_value)
affiliations.append(attribute_value)
is_fr = True
if elt.findChildren()==[] and re.search(fr_regex, elt.get_text().lower()):
if(verbose):
logger.debug('fr_kw in elt - text: ' + elt.get_text().lower())
affiliations.append(elt.get_text())
is_fr = True
return is_fr, list(set(affiliations))
def find_fr_affiliation_txt(elt, verbose = False):
is_fr = False
affiliations = []
# for w in forbidden_words:
# if w in elt.lower():
# return False, []
if re.search(fr_regex, elt.lower()):
if(verbose):
logger.debug('fr_kw in text: ' + elt.lower())
affiliations.append(elt)
is_fr = True
return is_fr, list(set(affiliations))
def post_filter(x):
affiliations_fr = x['affiliations_fr']
affiliations_fr_filtered = []
for e in affiliations_fr:
if re.search(negative_match, e)==None and len(e)<250:
affiliations_fr_filtered.append(e)
is_french = (len(affiliations_fr_filtered) >0)
return {'is_french':is_french, 'affiliations_fr':affiliations_fr_filtered}
def handler(signum, frame):
logger.debug("Forever is over!")
raise Exception("end of time")
def parse_generic(soup, verbose = False):
#remove all options
[x.extract() for x in soup.findAll('option')]
[x.extract() for x in soup.findAll('title')]
[x.extract() for x in soup.findAll(class_ = 'fig-caption')]
[x.extract() for x in soup.findAll(class_ = 'materials-methods')]
[x.extract() for x in soup.findAll(class_ = 'toc-section')]
is_french=False
all_affiliations=[]
possible_elts = [e for e in soup.descendants if type(e)==bs4.element.Tag]
if(possible_elts == []):
return {'is_french': is_french, 'affiliations_fr':all_affiliations}
try:
elt_to_check = []
for elt in possible_elts:
is_affiliation_elt = False
if elt==None:
continue
if len(elt.findChildren())==0 and re.search(affiliation_regex, elt.get_text().lower()):
is_affiliation_elt = True
if(verbose):
logger.debug('kw 1 affiliation in '+ elt.get_text().lower())
for sub_elt in elt.findChildren():
if sub_elt.find('sup'):
is_affiliation_elt = True
if(verbose):
logger.debug('kw sup affiliation in '+ elt.get_text().lower())
for att in elt.attrs:
attribute_value = elt.attrs[att]
if type(attribute_value)==str:
attribute_value = attribute_value.lower()
elif type(attribute_value)==list:
attribute_value = (" ".join(attribute_value)).lower()
if re.search(affiliation_regex, att.lower()) or re.search(affiliation_regex, attribute_value):
is_affiliation_elt = True
if(verbose):
logger.debug('*********************')
logger.debug('kw2 affiliation in ')
logger.debug(att.lower())
logger.debug(attribute_value)
logger.debug('*********************')
if(is_affiliation_elt):
elt_to_check.append(elt)
elt_to_check += [e for e in elt.descendants]
all_affiliations = []
for elt in list(set(elt_to_check)):
is_french, affiliations = find_fr_affiliation(elt, verbose)
if is_french:
all_affiliations += affiliations
all_affiliations = list(set(all_affiliations))
if len(all_affiliations) > 0:
is_french = True
else:
is_french = False
except:
pass
return post_filter({'is_french': is_french, 'affiliations_fr':all_affiliations})
| 37.608939
| 953
| 0.5918
|
f4df80bf881cee1b2dbe74cbb08065f404641f5a
| 419
|
py
|
Python
|
Section 4 - Regular Expression/regex3.py
|
nachordo/Hands-On-Natural-Language-Processing-with-Python
|
e37e25a41a122bdb9b4c5619c7651f33a9c09eb8
|
[
"MIT"
] | null | null | null |
Section 4 - Regular Expression/regex3.py
|
nachordo/Hands-On-Natural-Language-Processing-with-Python
|
e37e25a41a122bdb9b4c5619c7651f33a9c09eb8
|
[
"MIT"
] | null | null | null |
Section 4 - Regular Expression/regex3.py
|
nachordo/Hands-On-Natural-Language-Processing-with-Python
|
e37e25a41a122bdb9b4c5619c7651f33a9c09eb8
|
[
"MIT"
] | null | null | null |
# Introduction to Python Regular Expressions
# Importing Libraries
import re
pattern1 = "I love Avengers" #I love Justice League
print(re.sub(r"Avengers","Justice League",pattern1))
print(re.sub(r"[a-z]","0",pattern1,1,flags=re.I)) #re.I --- Case insensitive
#############################
print(re.sub(r"[a-z]","0",pattern1))
print(re.sub(r"[a-z]","0",pattern1,flags=re.I))
print(re.sub(r"[a-z]","0",pattern1,7))
| 24.647059
| 76
| 0.637232
|
ac1c85668a9bd509748b1a53906039614d33f427
| 95
|
py
|
Python
|
modelflow/notused__init__.py
|
IbHansen/ModelFlow
|
09b1f911332f3d0af700ec65d46e8d4a53335e19
|
[
"X11"
] | 2
|
2019-06-13T15:50:42.000Z
|
2019-06-13T15:51:05.000Z
|
modelflow/notused__init__.py
|
IbHansen/modelflow
|
09b1f911332f3d0af700ec65d46e8d4a53335e19
|
[
"X11"
] | null | null | null |
modelflow/notused__init__.py
|
IbHansen/modelflow
|
09b1f911332f3d0af700ec65d46e8d4a53335e19
|
[
"X11"
] | 1
|
2019-05-10T09:35:59.000Z
|
2019-05-10T09:35:59.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 09:54:51 2020
@author: bruger
"""
| 10.555556
| 36
| 0.515789
|
6e26bb70d4d4445b3c8cf3acd64316b9da1fe84e
| 157
|
py
|
Python
|
wush/web/__init__.py
|
wxnacy/wush
|
30620144f7a6fb676d210dd9463b77894f956b38
|
[
"MIT"
] | null | null | null |
wush/web/__init__.py
|
wxnacy/wush
|
30620144f7a6fb676d210dd9463b77894f956b38
|
[
"MIT"
] | null | null | null |
wush/web/__init__.py
|
wxnacy/wush
|
30620144f7a6fb676d210dd9463b77894f956b38
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: wxnacy@gmail.com
"""
"""
from .wush import Wapi
from .common import functions
__all__ = ['Wapi', ]
| 13.083333
| 29
| 0.630573
|
8847fb635670d68bdd2c499b95d710027817a6c8
| 237
|
py
|
Python
|
learning/ecc/_init_.py
|
Han-xv/RGGCNet
|
5b1ccf9be4bd1fe91381624ae0b4b7e16296df89
|
[
"MIT"
] | 1
|
2020-03-26T08:41:11.000Z
|
2020-03-26T08:41:11.000Z
|
learning/ecc/_init_.py
|
Han-xv/RGGCNet
|
5b1ccf9be4bd1fe91381624ae0b4b7e16296df89
|
[
"MIT"
] | null | null | null |
learning/ecc/_init_.py
|
Han-xv/RGGCNet
|
5b1ccf9be4bd1fe91381624ae0b4b7e16296df89
|
[
"MIT"
] | 2
|
2020-03-25T11:47:29.000Z
|
2020-03-26T08:42:25.000Z
|
from .GraphConvInfo import GraphConvInfo
from .GraphConvModule import GraphConvModule, GraphConvFunction
from .GraphPoolInfo import GraphPoolInfo
from .GraphPoolModule import GraphAvgPoolModule, GraphMaxPoolModule
from .utils import *
| 29.625
| 67
| 0.864979
|
d42222ed097811a46e0759bf8768769da7a98fa2
| 805
|
py
|
Python
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/WGL/ATI/pixel_format_float.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/WGL/ATI/pixel_format_float.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/WGL/ATI/pixel_format_float.py
|
JE-Chen/je_old_repo
|
a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5
|
[
"MIT"
] | null | null | null |
'''OpenGL extension ATI.pixel_format_float
This module customises the behaviour of the
OpenGL.raw.WGL.ATI.pixel_format_float to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/pixel_format_float.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.WGL.ATI.pixel_format_float import *
from OpenGL.raw.WGL.ATI.pixel_format_float import _EXTENSION_NAME
def glInitPixelFormatFloatATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 35
| 72
| 0.790062
|
8bf53f89bfd22feb0565b40a41992d1ad40a4ec1
| 552
|
py
|
Python
|
samples/python/llist.py
|
peterzhou84/datastructure
|
89cca5fcd887dc06ba16074999193367bcc825e3
|
[
"MIT"
] | null | null | null |
samples/python/llist.py
|
peterzhou84/datastructure
|
89cca5fcd887dc06ba16074999193367bcc825e3
|
[
"MIT"
] | null | null | null |
samples/python/llist.py
|
peterzhou84/datastructure
|
89cca5fcd887dc06ba16074999193367bcc825e3
|
[
"MIT"
] | null | null | null |
#coding=utf-8
'''
链表的示例,来源周海君老师
'''
class lnode:
def __init__(self,elem,nexte=None):
self.elem=elem
self.next=nexte
def inserhead(head,x): #在表头插入元素x
p=lnode(x)
p.next=head
head=p
return head
def printli(head):
p=head
# print(p.elem)
a=[]
while p!=None:
#print(p.elem)
a.append(p.elem)
p=p.next
print(a)
head=lnode(1)
b=[]
b.append(head.elem)
for i in range(2,9):
head = inserhead(head,i)
#p=lnode(i)#b.append(p.elem)
#p.next=head
#head=p
printli(head)
| 17.25
| 39
| 0.572464
|
8c47b9afd6c078106102de57e44fe21711fbc135
| 4,545
|
py
|
Python
|
pixloc/run_CMU.py
|
jkabalar/pixloc
|
f9ca721977a5c3246ee5cf40fe3a214fef7cf3f2
|
[
"Apache-2.0"
] | 457
|
2021-03-17T00:39:33.000Z
|
2022-03-30T02:38:19.000Z
|
pixloc/run_CMU.py
|
jkabalar/pixloc
|
f9ca721977a5c3246ee5cf40fe3a214fef7cf3f2
|
[
"Apache-2.0"
] | 31
|
2021-03-17T07:35:34.000Z
|
2022-03-31T07:07:56.000Z
|
pixloc/run_CMU.py
|
jkabalar/pixloc
|
f9ca721977a5c3246ee5cf40fe3a214fef7cf3f2
|
[
"Apache-2.0"
] | 56
|
2021-03-17T05:55:09.000Z
|
2022-03-15T01:38:35.000Z
|
import pickle
from . import set_logging_debug, logger
from .localization import RetrievalLocalizer, PoseLocalizer
from .utils.data import Paths, create_argparser, parse_paths, parse_conf
from .utils.io import write_pose_results, concat_results
default_paths = Paths(
query_images='slice{slice}/query/',
reference_images='slice{slice}/database',
reference_sfm='slice{slice}/sfm_superpoint+superglue/model/',
query_list='slice{slice}/queries_with_intrinsics.txt',
global_descriptors='slice{slice}/cmu-slice{slice}_tf-netvlad.h5',
retrieval_pairs='slice{slice}/pairs-query-netvlad10.txt',
hloc_logs='slice{slice}/CMU_hloc_superpoint+superglue_netvlad10.txt_logs.pkl',
results='pixloc_CMU_slice{slice}.txt',
)
experiment = 'pixloc_cmu'
default_confs = {
'from_retrieval': {
'experiment': experiment,
'features': {},
'optimizer': {
'num_iters': 100,
'pad': 2,
},
'refinement': {
'num_dbs': 2,
'point_selection': 'all',
'normalize_descriptors': True,
'average_observations': False,
'filter_covisibility': False,
'do_pose_approximation': False,
},
},
'from_poses': {
'experiment': experiment,
'features': {},
'optimizer': {
'num_iters': 100,
'pad': 2,
},
'refinement': {
'num_dbs': 5,
'min_points_opt': 100,
'point_selection': 'inliers',
'normalize_descriptors': True,
'average_observations': False,
'layer_indices': [0, 1],
},
},
}
TEST_URBAN = [2, 3, 4, 5, 6]
TEST_SUBURBAN = [13, 14, 15, 16, 17]
TEST_PARK = [18, 19, 20, 21]
TEST_SLICES_CMU = TEST_URBAN + TEST_SUBURBAN + TEST_PARK
TRAINING_SLICES_CMU = [7, 8, 9, 10, 11, 12, 22, 23, 24, 25]
def generate_query_list(paths, slice_):
cameras = {}
with open(paths.dataset / 'intrinsics.txt', 'r') as f:
for line in f.readlines():
if line[0] == '#' or line == '\n':
continue
data = line.split()
cameras[data[0]] = data[1:]
assert len(cameras) == 2
queries = paths.dataset / f'slice{slice_}/test-images-slice{slice_}.txt'
with open(queries, 'r') as f:
queries = [q.rstrip('\n') for q in f.readlines()]
out = [[q] + cameras[q.split('_')[2]] for q in queries]
with open(paths.query_list, 'w') as f:
f.write('\n'.join(map(' '.join, out)))
def parse_slice_arg(slice_str):
if slice_str is None:
slices = TEST_SLICES_CMU
logger.info(
'No slice list given, will evaluate all %d test slices; '
'this might take a long time.', len(slices))
elif '-' in slice_str:
min_, max_ = slice_str.split('-')
slices = list(range(int(min_), int(max_)+1))
else:
slices = eval(slice_str)
if isinstance(slices, int):
slices = [slices]
return slices
def main():
parser = create_argparser('CMU')
parser.add_argument('--slices', type=str,
help='a single number, an interval (e.g. 2-6), '
'or a Python-style list or int (e.g. [2, 3, 4]')
args = parser.parse_intermixed_args()
set_logging_debug(args.verbose)
paths = parse_paths(args, default_paths)
conf = parse_conf(args, default_confs)
slices = parse_slice_arg(args.slices)
all_results = []
logger.info('Will evaluate slices %s.', slices)
for slice_ in slices:
logger.info('Working on slice %s.', slice_)
paths_slice = paths.interpolate(slice=slice_)
all_results.append(paths_slice.results)
if paths_slice.results.exists():
continue
if not paths_slice.query_list.exists():
generate_query_list(paths_slice, slice_)
if args.from_poses:
localizer = PoseLocalizer(paths_slice, conf)
else:
localizer = RetrievalLocalizer(paths_slice, conf)
poses, logs = localizer.run_batched(skip=args.skip)
write_pose_results(poses, paths_slice.results)
with open(f'{paths_slice.results}_logs.pkl', 'wb') as f:
pickle.dump(logs, f)
output_path = concat_results(all_results, slices, paths.results, 'slice')
logger.info(
'Finished evaluating all slices, you can now submit the file %s to '
'https://www.visuallocalization.net/submission/', output_path)
if __name__ == '__main__':
main()
| 32.697842
| 82
| 0.60484
|
106ce621fa17ecd41c95e704ece1b7a7d470aa5f
| 167
|
py
|
Python
|
settings.py
|
salmanwahed/salman-wahed.appspot.com
|
f854f4d023509e0cf4923ca1cddfaf2cc368799c
|
[
"Apache-2.0"
] | null | null | null |
settings.py
|
salmanwahed/salman-wahed.appspot.com
|
f854f4d023509e0cf4923ca1cddfaf2cc368799c
|
[
"Apache-2.0"
] | null | null | null |
settings.py
|
salmanwahed/salman-wahed.appspot.com
|
f854f4d023509e0cf4923ca1cddfaf2cc368799c
|
[
"Apache-2.0"
] | null | null | null |
__author__ = 'salman'
SMTPserver = 'smtp.sendgrid.net'
USERNAME = "salmanwahed"
PASSWORD = "smw@$endGrid"
TO_ADDRESS = '<salman2312@gmail.com>'
text_subtype = 'plain'
| 23.857143
| 37
| 0.736527
|
45de05ea3d776be3844762518a668797fe9b31af
| 7,858
|
py
|
Python
|
bluebottle/fsm/state.py
|
jayvdb/bluebottle
|
305fea238e6aa831598a8b227223a1a2f34c4fcc
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/fsm/state.py
|
jayvdb/bluebottle
|
305fea238e6aa831598a8b227223a1a2f34c4fcc
|
[
"BSD-3-Clause"
] | null | null | null |
bluebottle/fsm/state.py
|
jayvdb/bluebottle
|
305fea238e6aa831598a8b227223a1a2f34c4fcc
|
[
"BSD-3-Clause"
] | null | null | null |
from builtins import str
from builtins import object
from django.utils.translation import ugettext_lazy as _
from django.dispatch import Signal
from future.utils import with_metaclass
from stripe.six import python_2_unicode_compatible
class TransitionNotPossible(Exception):
pass
@python_2_unicode_compatible
class BaseTransition(object):
def __init__(self, sources, target, name='', description='',
automatic=True, conditions=None, effects=None, **options):
self.name = name
if not isinstance(sources, (list, tuple)):
sources = (sources, )
self.sources = sources
self.target = target
self.automatic = automatic
self.conditions = conditions or []
self.effects = effects or []
self.description = description
assert not (not self.automatic and not self.name), 'Automatic transitions should have a name'
self.options = options
@property
def source_values(self):
return [source.value for source in self.sources]
def is_valid(self, machine):
if not all(condition(machine) for condition in self.conditions):
raise TransitionNotPossible(
_('Conditions not met for transition')
)
def can_execute(self, machine, automatic=True, **kwargs):
self.is_valid(machine)
if not automatic and self.automatic:
raise TransitionNotPossible(
_('Cannot transition from {} to {}').format(machine.state, self.target)
)
if not (
machine.state in self.source_values or
(AllStates() in self.sources)
):
raise TransitionNotPossible(
_('Cannot transition from {} to {}').format(machine.state, self.target)
)
def on_execute(self, machine):
machine.state = self.target.value
def execute(self, machine, **kwargs):
self.can_execute(machine, **kwargs)
self.on_execute(machine, **kwargs)
def __get__(self, instance, owner):
if instance:
def func(**kwargs):
return self.execute(instance, **kwargs)
return func
else:
return self
def __repr__(self):
return '<Transition from {} to {}>'.format(self.sources, self.target)
def __str__(self):
return str(self.name or self.field)
pre_state_transition = Signal(providing_args=['instance', 'transition', 'kwargs'])
post_state_transition = Signal(providing_args=['instance', 'transition', 'kwargs'])
class Transition(BaseTransition):
def __init__(self, sources, target, *args, **kwargs):
self.permission = kwargs.get('permission')
super(Transition, self).__init__(sources, target, *args, **kwargs)
def can_execute(self, machine, user=None, **kwargs):
result = super(Transition, self).can_execute(machine, **kwargs)
if self.permission and user and not user.is_staff and not self.permission(machine, user):
raise TransitionNotPossible(
_('You are not allowed to perform this transition')
)
return result and (not user or self.permission(machine, user))
else:
return result
def on_execute(self, machine, save=False, effects=True, **kwargs):
pre_state_transition.send(
sender=machine.instance.__class__,
instance=machine.instance,
transition=self,
**kwargs
)
super(Transition, self).on_execute(machine)
if effects:
for effect in self.effects:
for effect in effect(machine.instance, **kwargs).all_effects():
if effect not in machine.instance._effects:
machine.instance._effects.append(effect)
if save:
machine.save()
post_state_transition.send(
sender=machine.instance.__class__,
instance=machine.instance,
transition=self,
**kwargs
)
@python_2_unicode_compatible
class State(object):
transition_class = Transition
def __init__(self, name, value=None, description=''):
self.name = name
self.value = value
self.description = description
def __repr__(self):
return '<State {}>'.format(self.name)
def __str__(self):
return str(self.name)
class EmptyState(State):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(EmptyState, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
super(EmptyState, self).__init__('empty', '')
def __repr__(self):
return '<EmptyState {}>'.format(self.name)
class AllStates(State):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(AllStates, cls).__new__(
cls, *args, **kwargs)
return cls._instance
def __init__(self):
super(AllStates, self).__init__('all', '')
def __repr__(self):
return '<All States {}>'.format(self.name)
class StateMachineMeta(type):
def __new__(cls, name, bases, dct):
result = type.__new__(cls, name, bases, dct)
states = dict(
(key, getattr(result, key))
for key in dir(result)
if isinstance(getattr(result, key), State) and key != 'initial'
)
result.states = states
transitions = dict(
(key, getattr(result, key))
for key in dir(result)
if isinstance(getattr(result, key), Transition)
)
for key, transition in list(transitions.items()):
transition.field = key
result.transitions = transitions
return result
class StateMachine(with_metaclass(StateMachineMeta, object)):
@property
def initial_transition(self):
initial_transitions = [
transition
for transition in list(self.transitions.values())
if EmptyState() in transition.sources
]
if (len(initial_transitions)) > 1:
raise AssertionError(
'Found multiple transitions from empty state'
)
if initial_transitions:
return initial_transitions[0]
@property
def current_state(self):
for state in list(self.states.values()):
if state.value == self.state:
return state
def possible_transitions(self, **kwargs):
result = []
for transition in list(self.transitions.values()):
try:
transition.can_execute(self, **kwargs)
result.append(transition)
except TransitionNotPossible:
pass
return result
class ModelStateMachineMeta(StateMachineMeta):
def __new__(cls, name, bases, dct):
if 'field' not in dct:
dct['field'] = 'status'
if 'name' not in dct:
dct['name'] = 'states'
result = StateMachineMeta.__new__(cls, name, bases, dct)
if hasattr(result, 'model'):
if not hasattr(result.model, '_state_machines'):
result.model._state_machines = {}
result.model._state_machines[result.name] = result
return result
class ModelStateMachine(with_metaclass(ModelStateMachineMeta, StateMachine)):
def __init__(self, instance):
self.instance = instance
super(ModelStateMachine, self).__init__()
@property
def state(self):
return getattr(self.instance, self.field)
@state.setter
def state(self, state):
setattr(self.instance, self.field, state)
def save(self):
self.instance.save()
| 29.103704
| 101
| 0.607152
|
cf74792c854b3c3776af91c7198f0f105d07eaaa
| 2,173
|
py
|
Python
|
wzrd/wzrd/users/migrations/0001_initial.py
|
umarochki/dwellers-and-donkeys
|
0777b040754412402c0fd96048263087eff13b14
|
[
"MIT"
] | null | null | null |
wzrd/wzrd/users/migrations/0001_initial.py
|
umarochki/dwellers-and-donkeys
|
0777b040754412402c0fd96048263087eff13b14
|
[
"MIT"
] | null | null | null |
wzrd/wzrd/users/migrations/0001_initial.py
|
umarochki/dwellers-and-donkeys
|
0777b040754412402c0fd96048263087eff13b14
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.5 on 2020-12-06 12:17
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('username', models.TextField(unique=True)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
),
]
| 54.325
| 266
| 0.638288
|
7ff4961905b010d31e8bcac0bc49fc5431deb1ca
| 3,490
|
py
|
Python
|
src/python2/request/device_request_builder.py
|
microsoftarchive/msgraph-sdk-python
|
1320ba9116be0d00a1d7fce3484ea979e24ee82d
|
[
"MIT"
] | 7
|
2019-07-17T06:59:53.000Z
|
2021-05-13T15:23:37.000Z
|
src/python2/request/device_request_builder.py
|
microsoftarchive/msgraph-sdk-python
|
1320ba9116be0d00a1d7fce3484ea979e24ee82d
|
[
"MIT"
] | null | null | null |
src/python2/request/device_request_builder.py
|
microsoftarchive/msgraph-sdk-python
|
1320ba9116be0d00a1d7fce3484ea979e24ee82d
|
[
"MIT"
] | 2
|
2020-06-30T13:06:59.000Z
|
2021-06-03T09:47:35.000Z
|
# -*- coding: utf-8 -*-
"""
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
"""
from __future__ import unicode_literals
from .device_request import DeviceRequest
from ..request_builder_base import RequestBuilderBase
from ..request import directory_object_collection
class DeviceRequestBuilder(RequestBuilderBase):
def __init__(self, request_url, client):
"""Initialize the DeviceRequestBuilder
Args:
request_url (str): The url to perform the DeviceRequest
on
client (:class:`GraphClient<microsoft.msgraph.request.graph_client.GraphClient>`):
The client which will be used for the request
"""
super(DeviceRequestBuilder, self).__init__(request_url, client)
def request(self, expand=None, select=None, options=None):
"""Builds the DeviceRequest
Args:
expand (str): Default None, comma-separated list of relationships
to expand in the response.
select (str): Default None, comma-separated list of properties to
include in the response.
options (list of :class:`Option<microsoft.msgraph.options.Option>`):
A list of options to pass into the request. Defaults to None.
Returns:
:class:`DeviceRequest<microsoft.msgraph.request.device_request.DeviceRequest>`:
The DeviceRequest
"""
req = DeviceRequest(self._request_url, self._client, options)
req._set_query_options(expand=expand, select=select)
return req
def delete(self):
"""Deletes the specified Device."""
self.request().delete()
def get(self):
"""Gets the specified Device.
Returns:
:class:`Device<microsoft.msgraph.model.device.Device>`:
The Device.
"""
return self.request().get()
def update(self, device):
"""Updates the specified Device.
Args:
device (:class:`Device<microsoft.msgraph.model.device.Device>`):
The Device to update.
Returns:
:class:`Device<microsoft.msgraph.model.device.Device>`:
The updated Device
"""
return self.request().update(device)
@property
def registered_owners(self):
"""The registered_owners for the DeviceRequestBuilder
Returns:
:class:`DirectoryObjectCollectionRequestBuilder<microsoft.msgraph.request.registered_owners_collection.DirectoryObjectCollectionRequestBuilder>`:
A request builder created from the DeviceRequestBuilder
"""
return directory_object_collection.DirectoryObjectCollectionRequestBuilder(self.append_to_request_url("registeredOwners"), self._client)
@property
def registered_users(self):
"""The registered_users for the DeviceRequestBuilder
Returns:
:class:`DirectoryObjectCollectionRequestBuilder<microsoft.msgraph.request.registered_users_collection.DirectoryObjectCollectionRequestBuilder>`:
A request builder created from the DeviceRequestBuilder
"""
return directory_object_collection.DirectoryObjectCollectionRequestBuilder(self.append_to_request_url("registeredUsers"), self._client)
| 37.526882
| 157
| 0.668768
|
185924c67023218c5de748c00417e1365f18f223
| 10,476
|
py
|
Python
|
torchrec/distributed/planner/stats.py
|
mreso/torchrec
|
a06ad87676e1dfdfd529787c9cc4b023c02cbca2
|
[
"BSD-3-Clause"
] | null | null | null |
torchrec/distributed/planner/stats.py
|
mreso/torchrec
|
a06ad87676e1dfdfd529787c9cc4b023c02cbca2
|
[
"BSD-3-Clause"
] | null | null | null |
torchrec/distributed/planner/stats.py
|
mreso/torchrec
|
a06ad87676e1dfdfd529787c9cc4b023c02cbca2
|
[
"BSD-3-Clause"
] | 1
|
2022-02-09T23:52:22.000Z
|
2022-02-09T23:52:22.000Z
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections import defaultdict
from typing import Union, Tuple, Optional, Any, List, Dict, cast
from torchrec.distributed.planner.types import (
ShardingOption,
Stats,
Topology,
ParameterConstraints,
Storage,
)
from torchrec.distributed.planner.utils import bytes_to_gb
from torchrec.distributed.types import ShardingType, ParameterSharding, ShardingPlan
logger: logging.Logger = logging.getLogger(__name__)
STATS_DIVIDER = "####################################################################################################"
STATS_BAR = f"#{'------------------------------------------------------------------------------------------------': ^98}#"
class EmbeddingStats(Stats):
"""
Stats for a sharding planner execution.
"""
def log(
self,
sharding_plan: ShardingPlan,
topology: Topology,
num_proposals: int,
num_plans: int,
best_plan: List[ShardingOption],
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> None:
"""
Log stats for a given sharding plan to stdout.
Provide a tabular view of stats for the given sharding plan with per device
storage usage (HBM and DDR), perf, input (pooling factors), output (embedding
dimension), and number and type of shards.
Args:
sharding_plan (ShardingPlan): sharding plan chosen by the ShardingPlanner.
topology (Topology): device topology.
num_proposals (int): number of proposals evaluated
num_plans (int): number of proposals successfully partitioned
best_plan (List[ShardingOption]): plan with expected performance
constraints (Optional[Dict[str, ParameterConstraints]]): dict of parameter
names to provided ParameterConstraints.
"""
shard_by_fqn = {
module_name + "." + param_name: value
for module_name, param_dict in sharding_plan.plan.items()
for param_name, value in param_dict.items()
}
stats: Dict[int, Dict[str, Any]] = {
rank: {"type": {}, "pooling_factor": 0.0, "embedding_dims": 0}
for rank in range(topology.world_size)
}
used_sharding_types = set()
compute_kernels_to_count = defaultdict(int)
for sharding_option in best_plan:
fqn = sharding_option.fqn
if shard_by_fqn.get(fqn) is None:
continue
shard: ParameterSharding = shard_by_fqn[fqn]
ranks, pooling_factor, emb_dims = self._get_shard_stats(
shard=shard,
sharding_option=sharding_option,
world_size=topology.world_size,
local_size=topology.local_world_size,
constraints=constraints,
)
sharding_type_abbr = _get_sharding_type_abbr(shard.sharding_type)
used_sharding_types.add(sharding_type_abbr)
compute_kernels_to_count[sharding_option.compute_kernel] += 1
for i, rank in enumerate(ranks):
count = stats[rank]["type"].get(sharding_type_abbr, 0)
stats[rank]["type"][sharding_type_abbr] = count + 1
stats[rank]["pooling_factor"] += pooling_factor[i]
stats[rank]["embedding_dims"] += emb_dims[i]
used_hbm = [0] * topology.world_size
used_ddr = [0] * topology.world_size
perf = [0.0] * topology.world_size
for sharding_option in best_plan:
for shard in sharding_option.shards:
storage = cast(Storage, shard.storage)
rank = cast(int, shard.rank)
used_hbm[rank] += storage.hbm
used_ddr[rank] += storage.ddr
perf[rank] += cast(float, shard.perf)
table: List[List[Union[str, int]]] = [
["Rank", "HBM (GB)", "DDR (GB)", "Perf", "Input", "Output", "Shards"],
[
"------",
"----------",
"----------",
"------",
"-------",
"--------",
"--------",
],
]
for rank, device in enumerate(topology.devices):
used_hbm_gb = bytes_to_gb(used_hbm[rank])
used_hbm_ratio = (
used_hbm[rank] / device.storage.hbm
if topology.compute_device == "cuda"
else 0
)
used_ddr_gb = bytes_to_gb(used_ddr[rank])
used_ddr_ratio = used_ddr[rank] / device.storage.ddr
for sharding_type in used_sharding_types:
if sharding_type not in stats[rank]["type"]:
stats[rank]["type"][sharding_type] = 0
rank_hbm = f"{used_hbm_gb:.1f} ({used_hbm_ratio:.0%})"
rank_ddr = f"{used_ddr_gb:.1f} ({used_ddr_ratio:.0%})"
rank_perf = f"{perf[rank] / 1000:,.0f}"
rank_pooling = f"{int(stats[rank]['pooling_factor']):,}"
rank_dims = f"{stats[rank]['embedding_dims']:,}"
rank_shards = " ".join(
f"{sharding_type}: {num_tables}"
for sharding_type, num_tables in sorted(stats[rank]["type"].items())
)
table.append(
[
rank,
rank_hbm,
rank_ddr,
rank_perf,
rank_pooling,
rank_dims,
rank_shards,
]
)
logger.info(STATS_DIVIDER)
header_text = "--- Planner Statistics ---"
logger.info(f"#{header_text: ^98}#")
iter_text = (
f"--- Evalulated {num_proposals} proposal(s), "
f"found {num_plans} possible plan(s) ---"
)
logger.info(f"#{iter_text: ^98}#")
logger.info(STATS_BAR)
formatted_table = _format_table(table)
for row in formatted_table:
logger.info(f"# {row: <97}#")
logger.info(f"#{'' : ^98}#")
legend = "Input: pooling factor, Output: embedding dimension, Shards: number of tables"
logger.info(f"# {legend: <97}#")
logger.info(f"#{'' : ^98}#")
compute_kernels_count = [
f"{compute_kernel}: {count}"
for compute_kernel, count in sorted(compute_kernels_to_count.items())
]
logger.info(f"# {'Compute Kernels:' : <97}#")
for compute_kernel_count in compute_kernels_count:
logger.info(f"# {compute_kernel_count : <95}#")
logger.info(STATS_DIVIDER)
def _get_shard_stats(
self,
shard: ParameterSharding,
sharding_option: ShardingOption,
world_size: int,
local_size: int,
constraints: Optional[Dict[str, ParameterConstraints]] = None,
) -> Tuple[List[int], List[float], List[int]]:
"""
Gets ranks, pooling factors, and embedding dimensions per shard.
Returns:
ranks: list of ranks.
pooling_factor: list of pooling factors across ranks.
emb_dims: list of embedding dimensions across ranks.
"""
ranks = list(range(world_size))
pooling_factor = [
sum(constraints[sharding_option.name].pooling_factors)
if constraints and constraints.get(sharding_option.name)
else 0.0
]
emb_dims = [sharding_option.tensor.shape[1]]
if shard.sharding_type == ShardingType.DATA_PARALLEL.value:
emb_dims = emb_dims * len(ranks)
pooling_factor = pooling_factor * len(ranks)
elif shard.sharding_type == ShardingType.TABLE_WISE.value:
assert shard.ranks
ranks = shard.ranks
elif shard.sharding_type == ShardingType.COLUMN_WISE.value:
assert shard.ranks
ranks = shard.ranks
emb_dims = [
int(shard.shard_sizes[1])
# pyre-ignore [16]
for shard in shard.sharding_spec.shards
]
pooling_factor = pooling_factor * len(ranks)
elif shard.sharding_type == ShardingType.ROW_WISE.value:
pooling_factor = [pooling_factor[0] / world_size] * len(ranks)
emb_dims = emb_dims * len(ranks)
elif shard.sharding_type == ShardingType.TABLE_ROW_WISE.value:
assert shard.ranks
host_id = shard.ranks[0] // local_size
ranks = list(range(host_id * local_size, (host_id + 1) * local_size))
pooling_factor = [pooling_factor[0] / local_size] * len(ranks)
emb_dims = emb_dims * len(ranks)
elif shard.sharding_type == ShardingType.TABLE_COLUMN_WISE.value:
assert shard.ranks
ranks = shard.ranks
pooling_factor = pooling_factor * len(ranks)
emb_dims = [
int(shard.shard_sizes[1]) for shard in shard.sharding_spec.shards
]
else:
raise ValueError(
f"Unrecognized or unsupported sharding type provided: {shard.sharding_type}"
)
return ranks, pooling_factor, emb_dims
def _get_sharding_type_abbr(sharding_type: str) -> str:
if sharding_type == ShardingType.DATA_PARALLEL.value:
return "DP"
elif sharding_type == ShardingType.TABLE_WISE.value:
return "TW"
elif sharding_type == ShardingType.COLUMN_WISE.value:
return "CW"
elif sharding_type == ShardingType.ROW_WISE.value:
return "RW"
elif sharding_type == ShardingType.TABLE_ROW_WISE.value:
return "TWRW"
elif sharding_type == ShardingType.TABLE_COLUMN_WISE.value:
return "TWCW"
else:
raise ValueError(
f"Unrecognized or unsupported sharding type provided: {sharding_type}"
)
def _format_table(table: List[List[Union[str, int]]]) -> List[str]:
longest_cols = [
(max([len(str(row[i])) for row in table]) + 3) for i in range(len(table[0]))
]
row_format = "".join(
["{:>" + str(longest_col) + "}" for longest_col in longest_cols]
)
return [row_format.format(*row) for row in table]
| 37.017668
| 122
| 0.568347
|
5fb390b0b2fb46c0777352ad903ac8aff19d17a5
| 7,544
|
py
|
Python
|
benchmarks/f3_wrong_hints/scaling_software_termination/15-2Nested_false-termination_30.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 3
|
2021-04-23T23:29:26.000Z
|
2022-03-23T10:00:30.000Z
|
benchmarks/f3_wrong_hints/scaling_software_termination/15-2Nested_false-termination_30.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | null | null | null |
benchmarks/f3_wrong_hints/scaling_software_termination/15-2Nested_false-termination_30.py
|
EnricoMagnago/F3
|
c863215c318d7d5f258eb9be38c6962cf6863b52
|
[
"MIT"
] | 1
|
2021-11-17T22:02:56.000Z
|
2021-11-17T22:02:56.000Z
|
from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(x, y)))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y2", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_x, y))
h_x = Hint("h_x3", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.TRUE())
loc0.set_progress(0, mgr.TRUE())
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(0, mgr.Equals(x_y, mgr.Times(y, y)))
h_y = Hint("h_y5", env, frozenset([y]), symbs)
h_y.set_locs([loc0])
res.append(h_y)
loc0 = Location(env, mgr.LE(x, i_0))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, x)))
loc1 = Location(env, mgr.GE(x, i_0))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x5", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.GE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc2", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, x), i_1)))
loc1 = Location(env, mgr.GE(x, i_20))
loc1.set_progress(0, mgr.LT(x_x, mgr.Times(m_1, x, x)))
h_x = Hint("h_x6", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1])
res.append(h_x)
loc0 = Location(env, mgr.LE(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.LE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_0), mgr.GE(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, pc)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y7", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GT(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Plus(mgr.Times(x, y), i_1)))
loc1 = Location(env, mgr.GE(x, i_2))
loc1.set_progress(2, mgr.LT(x_x, mgr.Times(m_1, x, x)))
loc2 = Location(env, mgr.LE(x, i_4))
loc2.set_progress(0, mgr.GE(x_x, mgr.Div(x, x)))
h_x = Hint("h_x7", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
return frozenset(res)
| 31.302905
| 77
| 0.587487
|
212b996bcd031b2378d64f2d18a422bd5c431a05
| 3,801
|
py
|
Python
|
lotrpc/rpcgen.py
|
wtnb75/lotrpc
|
4209722d237eba8863bd777653a3390969552e8e
|
[
"MIT"
] | 1
|
2020-02-13T01:09:31.000Z
|
2020-02-13T01:09:31.000Z
|
lotrpc/rpcgen.py
|
wtnb75/lotrpc
|
4209722d237eba8863bd777653a3390969552e8e
|
[
"MIT"
] | null | null | null |
lotrpc/rpcgen.py
|
wtnb75/lotrpc
|
4209722d237eba8863bd777653a3390969552e8e
|
[
"MIT"
] | null | null | null |
import os
import sys
import subprocess
import tempfile
import importlib
import click
import yaml
from logging import getLogger, basicConfig, DEBUG
import lotrpc.sunrpc.parse
import lotrpc.sunrpc.rpcgen
log = getLogger(__name__)
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
print(ctx.get_help())
@cli.command()
@click.argument("file", type=click.File('r'))
@click.option("--verbose/--no-verbose", default=False)
def lex(file, verbose):
if verbose:
basicConfig(level=DEBUG)
for token in lotrpc.sunrpc.parse.get_lexer(file):
log.info("token %s", token)
def parse_file(file, cpp, defs, verbose):
defs = yaml.load(defs, Loader=yaml.FullLoader)
if cpp is not None:
with subprocess.Popen(["cpp"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE) as p:
p.stdin.write(file.read().encode('utf-8'))
p.stdin.close()
file = p.stdout
return lotrpc.sunrpc.parse.parse_file(
p.stdout, debug=verbose, defines=defs)
return lotrpc.sunrpc.parse.parse_file(file, debug=verbose, defines=defs)
defdef = "{LM_MAXSTRLEN: 1024, MAXNAMELEN: 1025, MAXNETNAMELEN: 255}"
@cli.command()
@click.option("--cpp/--no-cpp", default=False)
@click.option("--defs", default=defdef)
@click.option("--verbose/--no-verbose", default=False)
@click.argument("file", type=click.File('r'))
def parse(file, cpp, defs, verbose):
if verbose:
basicConfig(level=DEBUG)
result = parse_file(file, cpp, defs, verbose)
print(yaml.dump(result))
@cli.command()
@click.option("--cpp/--no-cpp", default=False)
@click.option("--defs", default=defdef)
@click.option("--template", default=None, type=click.File('r'))
@click.option("--verbose/--no-verbose", default=False)
@click.argument("file", type=click.File('r'))
def rpcgen(file, cpp, defs, template, verbose):
if verbose:
basicConfig(level=DEBUG)
data = parse_file(file, cpp, defs, verbose)
tmpl = None
if template is not None:
tmpl = template.read()
res = lotrpc.sunrpc.rpcgen.generate_proto(data, tmpl)
print(res)
@cli.command()
@click.option("--cpp/--no-cpp", default=False)
@click.option("--defs", default=defdef)
@click.option("--template", default=None, type=click.File('r'))
@click.option("--verbose/--no-verbose", default=False)
@click.argument("file", type=click.File('r'))
def rpcgen_autopep(file, cpp, defs, template, verbose):
if verbose:
basicConfig(level=DEBUG)
data = parse_file(file, cpp, defs, verbose)
tmpl = None
if template is not None:
tmpl = template.read()
res = lotrpc.sunrpc.rpcgen.generate_proto(data, tmpl)
with tempfile.NamedTemporaryFile(suffix=".py", mode="w") as tf:
tf.write(res)
with subprocess.Popen(["autopep8", "--diff", tf.name], stdin=subprocess.DEVNULL) as p:
p.wait()
@cli.command()
@click.option("--cpp/--no-cpp", default=False)
@click.option("--defs", default=defdef)
@click.option("--template", default=None, type=click.File('r'))
@click.option("--verbose/--no-verbose", default=False)
@click.argument("file", type=click.File('r'))
def rpcgen_help(file, cpp, defs, template, verbose):
if verbose:
basicConfig(level=DEBUG)
data = parse_file(file, cpp, defs, verbose)
tmpl = None
if template is not None:
tmpl = template.read()
with tempfile.TemporaryDirectory() as tmpd:
with open(os.path.join(tmpd, "mymodule.py"), "w") as ofp:
res = lotrpc.sunrpc.rpcgen.generate_proto(data, tmpl)
ofp.write(res)
sys.path.append(tmpd)
mod = importlib.import_module("mymodule")
help(mod)
if __name__ == '__main__':
cli()
| 30.653226
| 94
| 0.660089
|
f62b7c024de33d741e6fae7b5bd86f82493f12f5
| 4,942
|
py
|
Python
|
nova/api/openstack/compute/contrib/instance_actions.py
|
nkrinner/nova
|
1372397d4f5f8c155af6f1f4ab5dc68be00c9c01
|
[
"Apache-2.0"
] | 1
|
2015-01-09T10:49:56.000Z
|
2015-01-09T10:49:56.000Z
|
nova/api/openstack/compute/contrib/instance_actions.py
|
Semihalf/openstack-nova
|
ff6c7e806d12c4d00990b28438bd8b30e2b978ee
|
[
"Apache-2.0"
] | null | null | null |
nova/api/openstack/compute/contrib/instance_actions.py
|
Semihalf/openstack-nova
|
ff6c7e806d12c4d00990b28438bd8b30e2b978ee
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
authorize_actions = extensions.extension_authorizer('compute',
'instance_actions')
authorize_events = extensions.soft_extension_authorizer('compute',
'instance_actions:events')
ACTION_KEYS = ['action', 'instance_uuid', 'request_id', 'user_id',
'project_id', 'start_time', 'message']
EVENT_KEYS = ['event', 'start_time', 'finish_time', 'result', 'traceback']
def make_actions(elem):
for key in ACTION_KEYS:
elem.set(key)
def make_action(elem):
for key in ACTION_KEYS:
elem.set(key)
event = xmlutil.TemplateElement('events', selector='events')
for key in EVENT_KEYS:
event.set(key)
elem.append(event)
class InstanceActionsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('instanceActions')
elem = xmlutil.SubTemplateElement(root, 'instanceAction',
selector='instanceActions')
make_actions(elem)
return xmlutil.MasterTemplate(root, 1)
class InstanceActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('instanceAction',
selector='instanceAction')
make_action(root)
return xmlutil.MasterTemplate(root, 1)
class InstanceActionsController(wsgi.Controller):
def __init__(self):
super(InstanceActionsController, self).__init__()
self.compute_api = compute.API()
self.action_api = compute.InstanceActionAPI()
def _format_action(self, action_raw):
action = {}
for key in ACTION_KEYS:
action[key] = action_raw.get(key)
return action
def _format_event(self, event_raw):
event = {}
for key in EVENT_KEYS:
event[key] = event_raw.get(key)
return event
@wsgi.serializers(xml=InstanceActionsTemplate)
def index(self, req, server_id):
"""Returns the list of actions recorded for a given instance."""
context = req.environ["nova.context"]
instance = common.get_instance(self.compute_api, context, server_id)
authorize_actions(context, target=instance)
actions_raw = self.action_api.actions_get(context, instance)
actions = [self._format_action(action) for action in actions_raw]
return {'instanceActions': actions}
@wsgi.serializers(xml=InstanceActionTemplate)
def show(self, req, server_id, id):
"""Return data about the given instance action."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id)
authorize_actions(context, target=instance)
action = self.action_api.action_get_by_request_id(context, instance,
id)
if action is None:
raise exc.HTTPNotFound()
action_id = action['id']
action = self._format_action(action)
if authorize_events(context):
events_raw = self.action_api.action_events_get(context, instance,
action_id)
action['events'] = [self._format_event(evt) for evt in events_raw]
return {'instanceAction': action}
class Instance_actions(extensions.ExtensionDescriptor):
"""View a log of actions and events taken on an instance."""
name = "InstanceActions"
alias = "os-instance-actions"
namespace = ("http://docs.openstack.org/compute/ext/"
"instance-actions/api/v1.1")
updated = "2013-02-08T00:00:00+00:00"
def get_resources(self):
ext = extensions.ResourceExtension('os-instance-actions',
InstanceActionsController(),
parent=dict(
member_name='server',
collection_name='servers'))
return [ext]
| 38.015385
| 78
| 0.62849
|
a486ce7fc8c530bb8791e6a9740cd6991ed0200a
| 2,383
|
py
|
Python
|
netbox/extras/urls.py
|
aslafy-z/netbox
|
a5512dd4c46c005df8752fc330c1382ac22b31ea
|
[
"Apache-2.0"
] | 1
|
2021-09-23T00:06:51.000Z
|
2021-09-23T00:06:51.000Z
|
netbox/extras/urls.py
|
aslafy-z/netbox
|
a5512dd4c46c005df8752fc330c1382ac22b31ea
|
[
"Apache-2.0"
] | 4
|
2021-06-08T22:29:06.000Z
|
2022-03-12T00:48:51.000Z
|
netbox/extras/urls.py
|
aslafy-z/netbox
|
a5512dd4c46c005df8752fc330c1382ac22b31ea
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from extras import views
from extras.models import Tag
app_name = 'extras'
urlpatterns = [
# Tags
path('tags/', views.TagListView.as_view(), name='tag_list'),
path('tags/edit/', views.TagBulkEditView.as_view(), name='tag_bulk_edit'),
path('tags/delete/', views.TagBulkDeleteView.as_view(), name='tag_bulk_delete'),
path('tags/<str:slug>/', views.TagView.as_view(), name='tag'),
path('tags/<str:slug>/edit/', views.TagEditView.as_view(), name='tag_edit'),
path('tags/<str:slug>/delete/', views.TagDeleteView.as_view(), name='tag_delete'),
path('tags/<str:slug>/changelog/', views.ObjectChangeLogView.as_view(), name='tag_changelog', kwargs={'model': Tag}),
# Config contexts
path('config-contexts/', views.ConfigContextListView.as_view(), name='configcontext_list'),
path('config-contexts/add/', views.ConfigContextCreateView.as_view(), name='configcontext_add'),
path('config-contexts/edit/', views.ConfigContextBulkEditView.as_view(), name='configcontext_bulk_edit'),
path('config-contexts/delete/', views.ConfigContextBulkDeleteView.as_view(), name='configcontext_bulk_delete'),
path('config-contexts/<int:pk>/', views.ConfigContextView.as_view(), name='configcontext'),
path('config-contexts/<int:pk>/edit/', views.ConfigContextEditView.as_view(), name='configcontext_edit'),
path('config-contexts/<int:pk>/delete/', views.ConfigContextDeleteView.as_view(), name='configcontext_delete'),
# Image attachments
path('image-attachments/<int:pk>/edit/', views.ImageAttachmentEditView.as_view(), name='imageattachment_edit'),
path('image-attachments/<int:pk>/delete/', views.ImageAttachmentDeleteView.as_view(), name='imageattachment_delete'),
# Change logging
path('changelog/', views.ObjectChangeListView.as_view(), name='objectchange_list'),
path('changelog/<int:pk>/', views.ObjectChangeView.as_view(), name='objectchange'),
# Reports
path('reports/', views.ReportListView.as_view(), name='report_list'),
path('reports/<str:name>/', views.ReportView.as_view(), name='report'),
path('reports/<str:name>/run/', views.ReportRunView.as_view(), name='report_run'),
# Scripts
path('scripts/', views.ScriptListView.as_view(), name='script_list'),
path('scripts/<str:module>/<str:name>/', views.ScriptView.as_view(), name='script'),
]
| 51.804348
| 121
| 0.716744
|
afd2ed3325b41fbc1aca7883d011a81c7255b6ca
| 2,122
|
py
|
Python
|
hackerrank/dynamicAarray.py
|
shrijaltamrakar/Descent_py
|
82cd89ebeca37d079fc5d072010d3a3120420b2a
|
[
"MIT"
] | 2
|
2018-11-17T10:03:25.000Z
|
2019-01-24T08:40:10.000Z
|
hackerrank/dynamicAarray.py
|
shrijaltamrakar/Descent_py
|
82cd89ebeca37d079fc5d072010d3a3120420b2a
|
[
"MIT"
] | 1
|
2021-06-02T00:16:58.000Z
|
2021-06-02T00:16:58.000Z
|
hackerrank/dynamicAarray.py
|
shrijaltamrakar/Descent_py
|
82cd89ebeca37d079fc5d072010d3a3120420b2a
|
[
"MIT"
] | 1
|
2020-10-03T08:22:40.000Z
|
2020-10-03T08:22:40.000Z
|
"""
____ _ _
| _ \ _ _ _ __ __ _ _ __ ___ (_) ___ / \ _ __ _ __ __ _ _ _
| | | | | | | '_ \ / _` | '_ ` _ \| |/ __| / _ \ | '__| '__/ _` | | | |
| |_| | |_| | | | | (_| | | | | | | | (__ / ___ \| | | | | (_| | |_| |
|____/ \__, |_| |_|\__,_|_| |_| |_|_|\___| /_/ \_\_| |_| \__,_|\__, |
|___/ |___/
Create a list, seqList, of N empty sequences, where each sequence is indexed from 0 to N-1. The elements within each of the N sequences also use 0-indexing.
Create an integer, lastAns, and initialize it to 0.
The 2 types of queries that can be performed on your list of sequences (seqList) are described below:
Query: 1 x y
Find the sequence, seq, at index ((x ⊕ lastAns) % N) in seqList.
Append integer y to sequence seq.
Query: 2 x y
Find the sequence, seq, at index ((x ⊕ lastAns) % N) in seqList.
Find the value of element (y%size) in seq (where size is the size of seq) and assign it to lastAns.
Print the new value of lastAns on a new line
Sample Input:
2 5
1 0 5
1 1 7
1 0 3
2 1 0
2 1 1
Sample Output:
7
3
Explanation:
Initial Values:
N=2
lastAns=0
S0={}
S1={}
Query 0: Append 5 to sequence ((0 ⊕ 0) % 2) =0.
lastAns=0
S0={5}
S1={}
Query 1: Append 7 to sequence ((1 ⊕ 0) % 2) =1.
lastAns=0
S0={5}
S1={7}
Query 2: Append 3 to sequence ((0 ⊕ 0) % 2) =0..
lastAns=0
S0={5,3}
S1={7}
Query 3: Assign the value at index 0 of sequence ((1 ⊕ 0) % 2) =1. to lastAns, print lastAns. lastAns=7
S0={5,3}
S1={7}
7
Query 4: Assign the value at index 1 of sequence ((1 ⊕ 7) % 2) =0 to lastAns, print lastAns. lastAns=3
S0={5,3}
S1={7}
3
"""
def dynamicArray(n, queries):
# result = []
# Write your code here
lastAns = 0
seqList = [[] for _ in range(n)]
for q, x, y in queries:
index = (x ^ lastAns) % n
seq = seqList[index]
if q == 1:
seq.append(y)
elif q == 2:
size = len(seq)
lastAns = seq[y % size]
yield lastAns
else:
raise ValueError()
| 23.065217
| 156
| 0.530631
|
b50d1672eabccc117774a4ae4a783268e9619dea
| 3,951
|
py
|
Python
|
src/dimmer/dimmer.py
|
deklungel/iRulez
|
eca073d0af55f8bf7006bf37c2cd69116e926211
|
[
"MIT"
] | 1
|
2018-03-21T15:08:22.000Z
|
2018-03-21T15:08:22.000Z
|
src/dimmer/dimmer.py
|
deklungel/iRulez
|
eca073d0af55f8bf7006bf37c2cd69116e926211
|
[
"MIT"
] | 1
|
2017-08-25T06:12:08.000Z
|
2017-08-25T06:13:58.000Z
|
src/dimmer/dimmer.py
|
deklungel/iRulez
|
eca073d0af55f8bf7006bf37c2cd69116e926211
|
[
"MIT"
] | 1
|
2020-07-23T11:57:06.000Z
|
2020-07-23T11:57:06.000Z
|
import src.irulez.log as log
import src.irulez.constants as constants
import src.irulez.util as util
import paho.mqtt.client as mqtt
import src.irulez.configuration as configuration
import src.dimmer.processor as dimmer_processor
import src.dimmer.mqtt_sender as mqtt_sender
import src.output_status.ServiceClient as ServiceClient
logger = log.get_logger('dimmer Module')
# Get config
config = configuration.Configuration()
mqttConfig = config.get_mqtt_config()
serviceConfig = config.get_service_client_config()
# Create client
client = mqtt.Client()
StatusService = ServiceClient.StatusServiceClient(serviceConfig['url'], serviceConfig['port'])
sender = mqtt_sender.MqttSender(client)
processor = dimmer_processor.DimmerActionProcessor(client, sender, StatusService)
def on_connect(connected_client, _, __, rc) -> None:
"""Callback function for when the mqtt client is connected."""
logger.info("Connected client with result code " + str(rc))
# Subscribe in on_connect callback to automatically re-subscribe if the connection was lost
# Subscribe to all arduino hexnumber actions
# '+' means single level wildcard. '#' means multi level wildcard.
# See http://www.hivemq.com/blog/mqtt-essentials-part-5-mqtt-topics-best-practices
# Receives message from button about dimmer state switch
subscribe = constants.iRulezTopic + '/' + constants.actionTopic + '/' + constants.dimmerModuleTopic
logger.debug("Subscribing to " + str(subscribe))
connected_client.subscribe(subscribe)
# Receives message from timer module when a dimmer pin should be updated.
subscribe = constants.iRulezTopic + '/' + constants.actionTopic + '/' + constants.dimmerTimerFired
logger.debug("Subscribing to " + str(subscribe))
connected_client.subscribe(subscribe)
subscribe = constants.iRulezTopic + '/' + constants.dimmerCancelled + '/' + constants.dimmerModuleTopic
logger.debug("Subscribing to " + str(subscribe))
connected_client.subscribe(subscribe)
# # Subscribe to real time dimmer
# subscribe = constants.arduinoTopic + '/' + constants.actionTopic + '/' + constants.dimmerRealTimeModuleTopic
# logger.debug("Subscribing to " + str(subscribe))
# connected_client.subscribe(subscribe)
def on_subscribe(_, __, mid, granted_qos) -> None:
logger.debug("Subscribed: " + str(mid) + " " + str(granted_qos))
def on_message(_, __, msg) -> None:
"""Callback function for when a new message is received."""
logger.debug(f"Received message {msg.topic}: {msg.payload}")
# Find arduino name of topic
logger.debug(f"Process dimmer message")
# Check if message is a command message from the button module to start dimming
if util.is_arduino_dimmer_action_topic(msg.topic):
processor.process_dimmer_message(msg.payload.decode("utf-8"))
return
# Check if message is a timer fired message from the timer module to send the next dimming message
if util.is_arduino_dimmer_timer_fired_topic(msg.topic):
processor.process_dimmer_timer_fired(msg.payload.decode("utf-8"))
return
if util.is_arduino_dimmer_cancelled_topic(msg.topic):
processor.process_dimmer_cancelled(msg.payload.decode("utf-8"))
return
# # Check if message is for real time dimmer
# elif util.is_arduino_real_time_dimmer_topic(msg.topic):
# dimmer_processor.process_dimmer_real_time_message(msg.payload.decode("utf-8"))
# Unknown topic
logger.warning(f"Topic '{msg.topic}' is of no interest to us. Are we subscribed to too much?")
# Set callback functions
client.on_connect = on_connect
client.on_message = on_message
client.on_subscribe = on_subscribe
client.username_pw_set(mqttConfig['username'], mqttConfig['password'])
client.connect(mqttConfig['ip'], int(mqttConfig['port']), 60)
logger.info("Starting loop forever")
# Blocking class that loops forever
# Also handles reconnecting
client.loop_forever()
| 40.316327
| 114
| 0.747659
|
28e89953caba2ae7d1bd0c7665458c78ebac0006
| 35,884
|
py
|
Python
|
src/ploomber/sources/notebooksource.py
|
Vinay26k/ploomber
|
dea26619662449beb62a4e0ba5c717b2357d1045
|
[
"Apache-2.0"
] | null | null | null |
src/ploomber/sources/notebooksource.py
|
Vinay26k/ploomber
|
dea26619662449beb62a4e0ba5c717b2357d1045
|
[
"Apache-2.0"
] | null | null | null |
src/ploomber/sources/notebooksource.py
|
Vinay26k/ploomber
|
dea26619662449beb62a4e0ba5c717b2357d1045
|
[
"Apache-2.0"
] | null | null | null |
"""
On languages and kernels
------------------------
NotebookSource represents source code in a Jupyter notebook format (language
agnostic). Apart from .ipynb, we also support any other extension supported
by jupytext.
Given a notebook, we have to know which language it is written in to extract
upstream/product variables (though this only happens when the option of
extracting dependencies automatically is on), we also have to determine the
Jupyter kernel to use (this is always needed).
The unequivocal place to store this information is in the notebook metadata
section, but given that we advocate for the use of scripts (converted to
notebooks via jupytext), they most likely won't contain metadata (metadata
saving is turned off by default in jupytext), so we have to infer this
ourselves.
To make things more complex, jupytext adds its own metadata section but we are
ignoring that for now.
Given that there are many places where this information might be stored, we
have a few rules to automatically determine language and kernel given a
script/notebook.
"""
from functools import wraps
import ast
from pathlib import Path
import warnings
from contextlib import redirect_stdout
from io import StringIO
from copy import deepcopy
# papermill is importing a deprecated module from pyarrow
with warnings.catch_warnings():
warnings.simplefilter('ignore', FutureWarning)
from papermill.parameterize import parameterize_notebook
import nbformat
import jupytext
from jupytext import cli as jupytext_cli
from jupytext.formats import long_form_one_format, short_form_one_format
from jupytext.config import JupytextConfiguration
import parso
from ploomber.exceptions import (SourceInitializationError,
MissingParametersCellError)
from ploomber.placeholders.placeholder import Placeholder
from ploomber.util import requires
from ploomber.sources.abc import Source
from ploomber.sources.nb_utils import find_cell_with_tag, find_cell_with_tags
from ploomber.static_analysis.extractors import extractor_class_for_language
from ploomber.static_analysis.pyflakes import check_notebook
from ploomber.sources import docstring
from ploomber.io import pretty_print
def _jupytext_fmt(primitive, extension):
"""
Determine the jupytext fmt string to use based on the content and extension
"""
if extension != 'ipynb':
fmt, _ = jupytext.guess_format(primitive, f'.{extension}')
fmt_final = f'{extension}:{fmt}'
else:
fmt_final = '.ipynb'
return fmt_final
# TODO: we should unit test that this function is called, as opposed to vanilla
# .read_text
def _read_primitive(path):
"""
We read using the UTF-8 instead of the default encoding since notebooks are
always stored in UTF-8.
We can see this in nbformat, which always reads as UTF-8:
https://github.com/jupyter/nbformat/blob/df63593b64a15ee1c37b522973c39e8674f93c5b/nbformat/__init__.py#L125
Scripts are a different story since they may have other encodings, however,
modern editors have UTF-8 as default (example: VSCode
https://docs.microsoft.com/en-us/powershell/scripting/dev-cross-plat/vscode/understanding-file-encoding?view=powershell-7.2#configuring-vs-code)
so it's safer to use UTF-8 than the default encoding.
jupytext already does this:
https://github.com/mwouts/jupytext/issues/896
"""
return Path(path).read_text(encoding='utf-8')
def _get_last_cell(nb):
"""
Get last cell, ignores cells with empty source (unless the notebook only
has one cell and it's empty)
"""
# iterate in reverse order
for idx in range(-1, -len(nb.cells) - 1, -1):
cell = nb.cells[idx]
# only return it if it has some code
if cell['source'].strip():
return cell
# otherwise return the first cell
return nb.cells[0]
def _get_cell_suggestion(nb):
format_name = nb.metadata.get('jupytext', {}).get('text_representation',
{}).get('format_name')
preamble = 'Add a new cell with your code'
if format_name == 'light':
message = f'{preamble}:\n' + """
# + tags=["parameters"]
# your parameters here...
# -
# +
# your code here...
# -
"""
elif format_name == 'percent':
message = f'{preamble}:\n' + """
# %% tags=["parameters"]
# your parameters here...
# %%
# your code here...
"""
else:
message = preamble + '.'
return message
def requires_path(func):
"""
Checks if NotebookSource instance was initialized from a file, raises
an error if not
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self._path is None:
raise ValueError(f'Cannot use {func.__name__!r} if notebook was '
'not initialized from a file')
return func(self, *args, **kwargs)
return wrapper
class NotebookSource(Source):
"""
A source object representing a jupyter notebook (or any format supported
by jupytext)
Parameters
----------
hot_reload : bool, optional
Makes the notebook always read the file before rendering
kernelspec_name : str, optional
Which kernel to use for executing the notebook, it overrides any
existing kernelspec metadata in the notebook. If the notebook does
not have kernelspec info, this parameter is required. Defaults to None.
To see which kernelspecs are available run "jupyter kernelspec list"
check_if_kernel_installed : bool, optional
Check if the kernel is installed during initization
Notes
-----
The render method prepares the notebook for execution: it adds the
parameters and it makes sure kernelspec is defined
"""
@requires([
'parso', 'pyflakes', 'jupytext', 'nbformat', 'papermill',
'jupyter_client'
])
def __init__(self,
primitive,
hot_reload=False,
ext_in=None,
kernelspec_name=None,
static_analysis='regular',
check_if_kernel_installed=True):
# any non-py file must first be converted using jupytext, we need
# that representation for validation, if input is already a .py file
# do not convert. If passed a string, try to guess format using
# jupytext. We also need ipynb representation for .develop(),
# but do lazy loading in case we don't need both
self._primitive = primitive
self._check_if_kernel_installed = check_if_kernel_installed
# this happens if using SourceLoader
if isinstance(primitive, Placeholder):
self._path = primitive.path
self._primitive = str(primitive)
elif isinstance(primitive, str):
self._path = None
self._primitive = primitive
elif isinstance(primitive, Path):
self._path = primitive
if primitive.is_dir():
raise SourceInitializationError(
f'Failed to initialize {str(primitive)!r}. '
'Expected a file, got a directory.' +
_suggest_ploomber_scaffold_is_dir())
if not primitive.exists():
raise SourceInitializationError(
f'Failed to initialize {str(primitive)!r}. '
'File does not exist.' +
_suggest_ploomber_scaffold_missing_file())
self._primitive = _read_primitive(primitive)
else:
raise TypeError('Notebooks must be initialized from strings, '
'Placeholder or pathlib.Path, got {}'.format(
type(primitive)))
static_analysis_vals = {'disable', 'regular', 'strict'}
if static_analysis not in static_analysis_vals:
raise ValueError(f'{static_analysis!r} is not a '
"valid 'static_analysis' value, choose one from: "
f'{pretty_print.iterable(static_analysis_vals)}')
self.static_analysis = static_analysis
self._kernelspec_name = kernelspec_name
self._hot_reload = hot_reload
# TODO: validate ext_in values and extensions
if self._path is None and hot_reload:
raise ValueError('hot_reload only works in the notebook was '
'loaded from a file')
if self._path is not None and ext_in is None:
self._ext_in = self._path.suffix[1:]
elif self._path is None and ext_in is None:
if Path(self._primitive).exists():
path = str(self._primitive)
raise ValueError(
f'The file {path!r} you passed looks like '
'a path to a file. Perhaps you meant passing a '
'pathlib.Path object? Example:\n\n'
'from pathlib import Path\n'
f'NotebookRunner(Path({path!r}))')
else:
raise ValueError(
'"ext_in" cannot be None if the notebook is '
'initialized from a string. Either pass '
'a pathlib.Path object with the notebook file '
'location or pass the source code as string '
'and include the "ext_in" parameter')
elif self._path is not None and ext_in is not None:
raise ValueError('"ext_in" must be None if notebook is '
'initialized from a pathlib.Path object')
elif self._path is None and ext_in is not None:
self._ext_in = ext_in
# try to determine language based on extension, though this test
# might be inconclusive if dealing with a ipynb file, though we only
# use this to determine the appropriate jupyter kernel when
# initializing from a string, when initializing from files, the
# extension is used to determine the kernel
self._language = determine_language(self._ext_in)
self._loc = None
self._params = None
self._nb_str_unrendered = None
self._nb_obj_unrendered = None
self._nb_str_rendered = None
self._nb_obj_rendered = None
# this will raise an error if kernelspec_name is invalid
self._read_nb_str_unrendered()
self._post_init_validation(str(self._primitive))
@property
def primitive(self):
if self._hot_reload:
self._primitive = _read_primitive(self._path)
return self._primitive
def render(self, params):
"""Render notebook (fill parameters using papermill)
"""
self._params = json_serializable_params(params)
self._render()
def _render(self):
# _read_nb_str_unrendered uses hot_reload, this ensures we always get
# the latest version
_, nb = self._read_nb_str_unrendered()
if 'parameters' in _get_last_cell(nb).metadata.get('tags', []):
cell_suggestion = _get_cell_suggestion(nb)
kind = 'notebook' if self._ext_in == 'ipynb' else 'script'
raise SourceInitializationError(
f'Error processing {str(self._path)!r}: the last cell '
f'in the {kind} is the parameters cell. {cell_suggestion}')
# this is needed for parameterize_notebook to work
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = []
nb.metadata['papermill'] = dict()
# NOTE: we use parameterize_notebook instead of execute_notebook
# with the prepare_only option because the latter adds a "papermill"
# section on each cell's metadata, which makes it too verbose when
# using NotebookRunner.develop() when the source is script (each cell
# will have an empty "papermill" metadata dictionary)
nb = parameterize_notebook(nb, self._params)
# delete empty tags to prevent cluttering the notebooks
for cell in nb.cells:
if not len(cell.metadata['tags']):
cell.metadata.pop('tags')
self._nb_str_rendered = nbformat.writes(nb)
self._post_render_validation()
def _read_nb_str_unrendered(self):
"""
Returns the notebook representation (JSON string), this is the raw
source code passed, does not contain injected parameters.
Adds kernelspec info if not present based on the kernelspec_name,
this metadata is required for papermill to know which kernel to use.
An exception is raised if we cannot determine kernel information.
"""
# hot_reload causes to always re-evalaute the notebook representation
if self._nb_str_unrendered is None or self._hot_reload:
# this is the notebook node representation
nb = _to_nb_obj(
self.primitive,
ext=self._ext_in,
# passing the underscored version
# because that's the only one available
# when this is initialized
language=self._language,
kernelspec_name=self._kernelspec_name,
check_if_kernel_installed=self._check_if_kernel_installed,
path=self._path)
# if the user injected cells manually (with plomber nb --inject)
# the source will contain the injected cell, remove it because
# it should not be considered part of the source code
self._nb_obj_unrendered = _cleanup_rendered_nb(nb, print_=False)
# get the str representation. always write from nb_obj, even if
# this was initialized with a ipynb file, nb_obj contains
# kernelspec info
self._nb_str_unrendered = nbformat.writes(
self._nb_obj_unrendered, version=nbformat.NO_CONVERT)
return self._nb_str_unrendered, self._nb_obj_unrendered
def _post_init_validation(self, value):
"""
Validate notebook after initialization (run pyflakes to detect
syntax errors)
"""
# NOTE: what happens if I pass source code with errors to parso?
# maybe we don't need to use pyflakes after all
# we can also use compile. can pyflakes detect things that
# compile cannot?
params_cell, _ = find_cell_with_tag(self._nb_obj_unrendered,
'parameters')
if params_cell is None:
loc = ' "{}"'.format(self.loc) if self.loc else ''
msg = ('Notebook{} does not have a cell tagged '
'"parameters"'.format(loc))
if self.loc and Path(self.loc).suffix == '.py':
msg += """.
Add a cell at the top like this:
# %% tags=["parameters"]
upstream = None
product = None
Go to: https://ploomber.io/s/params for more information
"""
if self.loc and Path(self.loc).suffix == '.ipynb':
msg += ('. Add a cell at the top and tag it as "parameters". '
'Go to the next URL for '
'details: https://ploomber.io/s/params')
raise MissingParametersCellError(msg)
def _post_render_validation(self):
"""
Validate params passed against parameters in the notebook
"""
# NOTE: maybe static_analysis = off should not turn off everything
# but only warn
# strict mode: raise and check signature
# regular mode: _check_notebook called in NotebookRunner.run
if self.static_analysis == 'strict':
self._check_notebook(raise_=True, check_signature=True)
else:
# otherwise, only warn on unused parameters
_warn_on_unused_params(self._nb_obj_unrendered, self._params)
def _check_notebook(self, raise_, check_signature):
if self.static_analysis and self.language == 'python':
# warn if errors (e.g., undeclared variables, syntax errors)
check_notebook(self._nb_str_to_obj(self._nb_str_rendered),
self._params,
filename=self._path or 'notebook',
raise_=raise_,
check_signature=check_signature)
@property
def doc(self):
"""
Returns notebook docstring parsed either from a triple quoted string
in the top cell or a top markdown markdown cell
"""
return docstring.extract_from_nb(self._nb_obj_unrendered)
@property
def loc(self):
return self._path
@property
def name(self):
# filename without extension(e.g., plot.py -> plot)
if self._path:
return self._path.stem
@property
def nb_str_rendered(self):
"""
Returns the notebook (as a string) with parameters injected, hot
reloadig if necessary
"""
if self._nb_str_rendered is None:
raise RuntimeError('Attempted to get location for an unrendered '
'notebook, render it first')
if self._hot_reload:
self._render()
return self._nb_str_rendered
@property
def nb_obj_rendered(self):
"""
Returns the notebook (as an objet) with parameters injected, hot
reloadig if necessary
"""
if self._nb_obj_rendered is None:
# using self.nb_str_rendered triggers hot reload if needed
self._nb_obj_rendered = self._nb_str_to_obj(self.nb_str_rendered)
return self._nb_obj_rendered
def __str__(self):
# reload if empty or hot_reload=True
self._read_nb_str_unrendered()
# FIXME: this should ignore changes to the markdown cells
return '\n'.join([c.source for c in self._nb_obj_unrendered.cells])
def __repr__(self):
if self.loc is not None:
return "{}('{}')".format(type(self).__name__, self.loc)
else:
return "{}(loaded from string)".format(type(self).__name__)
@property
def variables(self):
raise NotImplementedError
@property
def extension(self):
# this can be Python, R, Julia, etc. We are handling them the same,
# for now, no normalization can be done.
# One approach is to use the ext if loaded from file, otherwise None
return None
# FIXME: add this to the abstract class, probably get rid of "extension"
# since it's not informative (ipynb files can be Python, R, etc)
@property
def language(self):
"""
Notebook Language (Python, R, etc), this is a best-effort property,
can be None if we could not determine the language
"""
if self._language is None:
self._read_nb_str_unrendered()
try:
# make sure you return "r" instead of "R"
return (self._nb_obj_unrendered.metadata.kernelspec.language.
lower())
except AttributeError:
return None
else:
return self._language
def _nb_str_to_obj(self, nb_str):
return nbformat.reads(nb_str, as_version=nbformat.NO_CONVERT)
def _get_parameters_cell(self):
self._read_nb_str_unrendered()
cell, _ = find_cell_with_tag(self._nb_obj_unrendered, tag='parameters')
return cell.source
def extract_upstream(self):
extractor_class = extractor_class_for_language(self.language)
return extractor_class(self._get_parameters_cell()).extract_upstream()
def extract_product(self):
extractor_class = extractor_class_for_language(self.language)
return extractor_class(self._get_parameters_cell()).extract_product()
@requires_path
def save_injected_cell(self):
"""
Inject cell, overwrite the source file (and any paired files)
"""
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# add metadata to flag that the cell was injected manually
recursive_update(
self.nb_obj_rendered,
dict(metadata=dict(ploomber=dict(injected_manually=True))))
# Are we updating a text file that has a metadata filter? If so,
# add ploomber as a section that must be stored
if (self.nb_obj_rendered.metadata.get(
'jupytext', {}).get('notebook_metadata_filter') == '-all'):
recursive_update(
self.nb_obj_rendered,
dict(metadata=dict(jupytext=dict(
notebook_metadata_filter='ploomber,-all'))))
# overwrite
jupytext.write(self.nb_obj_rendered, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(self.nb_obj_rendered, fmt_,
self._path.stem):
jupytext.write(self.nb_obj_rendered, fp=path, fmt=fmt_)
@requires_path
def remove_injected_cell(self):
"""
Delete injected cell, overwrite the source file (and any paired files)
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
# remove metadata
recursive_update(
nb_clean,
dict(metadata=dict(ploomber=dict(injected_manually=None))))
fmt_ = _jupytext_fmt(self._primitive, self._ext_in)
# overwrite
jupytext.write(nb_clean, self._path, fmt=fmt_)
# overwrite all paired files
for path, fmt_ in iter_paired_notebooks(self._nb_obj_unrendered, fmt_,
self._path.stem):
jupytext.write(nb_clean, fp=path, fmt=fmt_)
@requires_path
def format(self, fmt):
"""Change source format
Returns
-------
str
The path if the extension changed, None otherwise
"""
nb_clean = _cleanup_rendered_nb(self._nb_obj_unrendered)
ext_file = self._path.suffix
ext_format = long_form_one_format(fmt)['extension']
extension_changed = ext_file != ext_format
if extension_changed:
path = self._path.with_suffix(ext_format)
Path(self._path).unlink()
else:
path = self._path
jupytext.write(nb_clean, path, fmt=fmt)
return path if extension_changed else None
@requires_path
def pair(self, base_path):
"""Pairs with an ipynb file
"""
# TODO: add unit test
if self._ext_in == 'ipynb':
raise ValueError(
'pairing only works with .py files, got .ipynb. '
'Yoy may convert the .ipynb to .py and try again.')
fmt, _ = jupytext.guess_format(self._primitive, f'.{self._ext_in}')
fmt_ = f'{self._ext_in}:{fmt}'
# mute jupytext's output
with redirect_stdout(StringIO()):
jupytext_cli.jupytext(args=[
'--set-formats', f'{base_path}//ipynb,{fmt_}',
str(self._path)
])
@requires_path
def sync(self):
"""Pairs with and ipynb file
"""
# mute jupytext's output
with redirect_stdout(StringIO()):
jupytext_cli.jupytext(args=['--sync', str(self._path)])
def json_serializable_params(params):
# papermill only allows JSON serializable parameters
# convert Params object to dict
params = params.to_dict()
params['product'] = params['product'].to_json_serializable()
if params.get('upstream'):
params['upstream'] = params['upstream'].to_json_serializable()
return params
def _to_nb_obj(source,
language,
ext=None,
kernelspec_name=None,
check_if_kernel_installed=True,
path=None):
"""
Convert to jupyter notebook via jupytext, if the notebook does not contain
kernel information and the user did not pass a kernelspec_name explicitly,
we will try to infer the language and select a kernel appropriately.
If a valid kernel is found, it is added to the notebook. If none of this
works, an exception is raised.
If also converts the code string to its notebook node representation,
adding kernel data accordingly.
Parameters
----------
source : str
Jupyter notebook (or jupytext compatible formatted) document
language : str
Programming language
path : str, default=None
Script/notebook path. If not None, it's used to throw an informative
error if the notebook fails to load
Returns
-------
nb
Notebook object
Raises
------
RenderError
If the notebook has no kernelspec metadata and kernelspec_name is
None. A notebook without kernelspec metadata will not display in
jupyter notebook correctly. We have to make sure all notebooks
have this.
"""
import jupytext
# let jupytext figure out the format
try:
nb = jupytext.reads(source, fmt=ext)
except Exception as e:
what = 'notebook' if ext == 'ipynb' else 'script'
err = f'Failed to read {what}'
if path is not None:
err += f' from {str(path)!r}'
raise SourceInitializationError(err) from e
# NOTE: I can add the cell with parameters here, but what happens if
# extract_upstream is false? would that be a problem?
check_nb_kernelspec_info(nb,
kernelspec_name,
ext,
language,
check_if_installed=check_if_kernel_installed)
return nb
def check_nb_kernelspec_info(nb,
kernelspec_name,
ext,
language,
check_if_installed=True):
"""Make sure the passed notebook has kernel info
Parameters
----------
check_if_installed : bool
Also check if the kernelspec is installed, nb.metadata.kernelspec
to be replaced by whatever information jupyter returns when requesting
the kernelspec
"""
import jupyter_client
kernel_name = determine_kernel_name(nb, kernelspec_name, ext, language)
# cannot keep going if we don't have the kernel name
if kernel_name is None:
raise SourceInitializationError(
'Notebook does not contain kernelspec metadata and '
'kernelspec_name was not specified, either add '
'kernelspec info to your source file or specify '
'a kernelspec by name. To see list of installed kernels run '
'"jupyter kernelspec list" in the terminal (first column '
'indicates the name). Python is usually named "python3", '
'R usually "ir"')
if check_if_installed:
kernelspec = jupyter_client.kernelspec.get_kernel_spec(kernel_name)
nb.metadata.kernelspec = {
"display_name": kernelspec.display_name,
"language": kernelspec.language,
"name": kernel_name
}
else:
if 'metadata' not in nb:
nb['metadata'] = dict()
if 'kernelspec' not in nb['metadata']:
nb['metadata']['kernelspec'] = dict()
# we cannot ask jupyter, so we fill this in ourselves
nb.metadata.kernelspec = {
"display_name": 'R' if kernel_name == 'ir' else 'Python 3',
"language": 'R' if kernel_name == 'ir' else 'python',
"name": kernel_name
}
def determine_kernel_name(nb, kernelspec_name, ext, language):
"""
Determines the kernel name by using the following data (returns whatever
gives kernel info first): 1) explicit kernel from the user 2) notebook's
metadata 3) file extension 4) language 5) best guess
"""
# explicit kernelspec name
if kernelspec_name is not None:
return kernelspec_name
# use metadata info
try:
return nb.metadata.kernelspec.name
except AttributeError:
pass
# use language from extension if passed, otherwise use language variable
if ext:
language = determine_language(ext)
lang2kernel = {'python': 'python3', 'r': 'ir'}
if language in lang2kernel:
return lang2kernel[language]
# nothing worked, try to guess if it's python...
is_python_ = is_python(nb)
if is_python_:
return 'python3'
else:
return None
def inject_cell(model, params):
"""Inject params (by adding a new cell) to a model
Notes
-----
A model is different than a notebook:
https://jupyter-notebook.readthedocs.io/en/stable/extending/contents.html
"""
nb = nbformat.from_dict(model['content'])
# we must ensure nb has kernelspec info, otherwise papermill will fail to
# parametrize
ext = model['name'].split('.')[-1]
check_nb_kernelspec_info(nb, kernelspec_name=None, ext=ext, language=None)
# papermill adds a bunch of things before calling parameterize_notebook
# if we don't add those things, parameterize_notebook breaks
# https://github.com/nteract/papermill/blob/0532d499e13e93d8990211be33e9593f1bffbe6c/papermill/iorw.py#L400
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': None,
}
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = []
params = json_serializable_params(params)
comment = ('This cell was injected automatically based on your stated '
'upstream dependencies (cell above) and pipeline.yaml '
'preferences. It is temporary and will be removed when you '
'save this notebook')
model['content'] = parameterize_notebook(nb,
params,
report_mode=False,
comment=comment)
def _cleanup_rendered_nb(nb, print_=True):
"""
Cleans up a rendered notebook object. Removes cells with tags:
injected-parameters, debugging-settings, and metadata injected by
papermill
"""
out = find_cell_with_tags(nb,
['injected-parameters', 'debugging-settings'])
if print_:
for key in out.keys():
print(f'Removing {key} cell...')
idxs = set(cell['index'] for cell in out.values())
nb['cells'] = [
cell for idx, cell in enumerate(nb['cells']) if idx not in idxs
]
# papermill adds "tags" to all cells that don't have them, remove them
# if they are empty to avoid cluttering the script
for cell in nb['cells']:
if 'tags' in cell.get('metadata', {}):
if not len(cell['metadata']['tags']):
del cell['metadata']['tags']
return nb
def is_python(nb):
"""
Determine if the notebook is Python code for a given notebook object, look
for metadata.kernelspec.language first, if not defined, try to guess if
it's Python, it's conservative and it returns False if the code is valid
Python but contains (<-), in which case it's much more likely to be R
"""
is_python_ = None
# check metadata first
try:
language = nb.metadata.kernelspec.language
except AttributeError:
pass
else:
is_python_ = language == 'python'
# no language defined in metadata, check if it's valid python
if is_python_ is None:
code_str = '\n'.join([c.source for c in nb.cells])
try:
ast.parse(code_str)
except SyntaxError:
is_python_ = False
else:
# there is a lot of R code which is also valid Python code! So
# let's
# run a quick test. It is very unlikely to have "<-" in Python (
# {less than} {negative} but extremely common {assignment}
if '<-' not in code_str:
is_python_ = True
# inconclusive test...
if is_python_ is None:
is_python_ = False
return is_python_
def determine_language(extension):
"""
A function to determine programming language given file extension,
returns programming language name (all lowercase) if could be determined,
None if the test is inconclusive
"""
if extension.startswith('.'):
extension = extension[1:]
mapping = {'py': 'python', 'r': 'r', 'R': 'r', 'Rmd': 'r', 'rmd': 'r'}
# ipynb can be many languages, it must return None
return mapping.get(extension)
def recursive_update(target, update):
"""Recursively update a dictionary. Taken from jupytext.header
"""
for key in update:
value = update[key]
if value is None:
# remove if it exists
target.pop(key, None)
elif isinstance(value, dict):
target[key] = recursive_update(target.get(key, {}), value)
else:
target[key] = value
return target
def parse_jupytext_format(fmt, name):
"""
Parse a jupytext format string (such as notebooks//ipynb) and return the
path to the file and the extension
"""
fmt_parsed = long_form_one_format(fmt)
path = Path(fmt_parsed['prefix'], f'{name}{fmt_parsed["extension"]}')
del fmt_parsed['prefix']
return path, short_form_one_format(fmt_parsed)
def iter_paired_notebooks(nb, fmt_, name):
formats = nb.metadata.get('jupytext', {}).get('formats', '')
if not formats:
return
formats = formats.split(',')
formats.remove(fmt_)
# overwrite all paired files
for path, fmt_current in (parse_jupytext_format(fmt, name)
for fmt in formats):
yield path, fmt_current
def _nb2codestr(nb):
return '\n'.join([c.source for c in nb.cells if c.cell_type == 'code'])
def _warn_on_unused_params(nb, params):
nb = deepcopy(nb)
_, idx = find_cell_with_tag(nb, 'parameters')
del nb.cells[idx]
code = _nb2codestr(nb)
# NOTE: if there a syntax error we cannot accurately check this
m = parso.parse(code)
names = set(m.get_used_names())
# remove product since it may not be required
# FIXME: maybe only remove it if it's a dictionary with >2 keys
unused = set(params) - names - {'product'}
if unused:
warnings.warn('These parameters are not used in the '
f'task\'s source code: {pretty_print.iterable(unused)}')
def add_parameters_cell(path, extract_upstream, extract_product):
"""
Add parameters cell to a script/notebook in the given path, overwrites the
original file
"""
source = ''
if extract_upstream:
source += """\
# declare a list tasks whose products you want to use as inputs
upstream = None
"""
if extract_product:
source += """\
# declare a dictionary with the outputs of this task
product = None
"""
c = JupytextConfiguration()
c.notebook_metadata_filter
c.cell_metadata_filter = 'all'
nb = jupytext.read(path)
new_cell = nbformat.v4.new_code_cell(source,
metadata={'tags': ['parameters']})
nb.cells.insert(0, new_cell)
jupytext.write(nb, path, config=c)
def _suggest_ploomber_scaffold_missing_file():
if Path('pipeline.yaml').is_file():
return '\nTo create it, run: ploomber scaffold'
else:
return ''
def _suggest_ploomber_scaffold_is_dir():
if Path('pipeline.yaml').is_file():
return ('\nTo create it, delete the directory, '
'then run: ploomber scaffold')
else:
return ''
| 34.305927
| 148
| 0.625237
|
b0704a0487fe62b838fa728b127c6f2a091a2355
| 5,061
|
py
|
Python
|
tests/vfs/vshadow_file_entry.py
|
Defense-Cyber-Crime-Center/dfvfs
|
da2ccbc4c989ced5ad651057bd8f5a4b18af6d37
|
[
"Apache-2.0"
] | 2
|
2016-02-18T12:46:26.000Z
|
2022-03-13T03:05:05.000Z
|
tests/vfs/vshadow_file_entry.py
|
Defense-Cyber-Crime-Center/dfvfs
|
da2ccbc4c989ced5ad651057bd8f5a4b18af6d37
|
[
"Apache-2.0"
] | null | null | null |
tests/vfs/vshadow_file_entry.py
|
Defense-Cyber-Crime-Center/dfvfs
|
da2ccbc4c989ced5ad651057bd8f5a4b18af6d37
|
[
"Apache-2.0"
] | 5
|
2016-12-18T08:05:39.000Z
|
2019-11-19T21:18:00.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the file entry implementation using the pyvshadow."""
import os
import unittest
from dfvfs.path import os_path_spec
from dfvfs.path import qcow_path_spec
from dfvfs.path import vshadow_path_spec
from dfvfs.resolver import context
from dfvfs.vfs import vshadow_file_entry
from dfvfs.vfs import vshadow_file_system
class VShadowFileEntryTest(unittest.TestCase):
"""The unit test for the VSS file entry object."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._resolver_context = context.Context()
test_file = os.path.join(u'test_data', u'vsstest.qcow2')
path_spec = os_path_spec.OSPathSpec(location=test_file)
self._qcow_path_spec = qcow_path_spec.QcowPathSpec(parent=path_spec)
self._vshadow_path_spec = vshadow_path_spec.VShadowPathSpec(
location=u'/', parent=self._qcow_path_spec)
self._file_system = vshadow_file_system.VShadowFileSystem(
self._resolver_context)
self._file_system.Open(path_spec=self._vshadow_path_spec)
def tearDown(self):
"""Cleans up the needed objects used throughout the test."""
self._file_system.Close()
# qcowmount test_data/vsstest.qcow2 fuse/
# vshadowinfo fuse/qcow1
#
# Volume Shadow Snapshot information:
# Number of stores: 2
#
# Store: 1
# ...
# Identifier : 600f0b69-5bdf-11e3-9d6c-005056c00008
# Shadow copy set ID : 0a4e3901-6abb-48fc-95c2-6ab9e38e9e71
# Creation time : Dec 03, 2013 06:35:09.736378700 UTC
# Shadow copy ID : 4e3c03c2-7bc6-4288-ad96-c1eac1a55f71
# Volume size : 1073741824 bytes
# Attribute flags : 0x00420009
#
# Store: 2
# Identifier : 600f0b6d-5bdf-11e3-9d6c-005056c00008
# Shadow copy set ID : 8438a0ee-0f06-443b-ac0c-2905647ca5d6
# Creation time : Dec 03, 2013 06:37:48.919058300 UTC
# Shadow copy ID : 18f1ac6e-959d-436f-bdcc-e797a729e290
# Volume size : 1073741824 bytes
# Attribute flags : 0x00420009
def testIntialize(self):
"""Test the initialize functionality."""
file_entry = vshadow_file_entry.VShadowFileEntry(
self._resolver_context, self._file_system, self._vshadow_path_spec)
self.assertNotEqual(file_entry, None)
def testGetParentFileEntry(self):
"""Test the get parent file entry functionality."""
path_spec = vshadow_path_spec.VShadowPathSpec(
store_index=1, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertNotEqual(file_entry, None)
parent_file_entry = file_entry.GetParentFileEntry()
self.assertEqual(parent_file_entry, None)
def testGetStat(self):
"""Test the get stat functionality."""
path_spec = vshadow_path_spec.VShadowPathSpec(
store_index=1, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
stat_object = file_entry.GetStat()
self.assertNotEqual(stat_object, None)
self.assertEqual(stat_object.type, stat_object.TYPE_FILE)
def testIsFunctions(self):
"""Test the Is? functionality."""
path_spec = vshadow_path_spec.VShadowPathSpec(
store_index=1, parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertFalse(file_entry.IsRoot())
self.assertFalse(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertFalse(file_entry.IsDirectory())
self.assertTrue(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
path_spec = vshadow_path_spec.VShadowPathSpec(
location=u'/', parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertTrue(file_entry.IsRoot())
self.assertTrue(file_entry.IsVirtual())
self.assertTrue(file_entry.IsAllocated())
self.assertFalse(file_entry.IsDevice())
self.assertTrue(file_entry.IsDirectory())
self.assertFalse(file_entry.IsFile())
self.assertFalse(file_entry.IsLink())
self.assertFalse(file_entry.IsPipe())
self.assertFalse(file_entry.IsSocket())
def testSubFileEntries(self):
"""Test the sub file entries iteration functionality."""
path_spec = vshadow_path_spec.VShadowPathSpec(
location=u'/', parent=self._qcow_path_spec)
file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)
self.assertNotEqual(file_entry, None)
self.assertEqual(file_entry.number_of_sub_file_entries, 2)
expected_sub_file_entry_names = [u'vss1', u'vss2']
sub_file_entry_names = []
for sub_file_entry in file_entry.sub_file_entries:
sub_file_entry_names.append(sub_file_entry.name)
self.assertEqual(
len(sub_file_entry_names), len(expected_sub_file_entry_names))
self.assertEqual(
sorted(sub_file_entry_names), sorted(expected_sub_file_entry_names))
if __name__ == '__main__':
unittest.main()
| 34.903448
| 76
| 0.742343
|
d7d969d13441f34ff644b0751fe22652bfbe141b
| 10,545
|
py
|
Python
|
src/oscarcch/calculator.py
|
thelabnyc/django-oscar-cch
|
d98832c9cf642c6d241e3aaf4b1dc631c3d5ce0e
|
[
"0BSD"
] | null | null | null |
src/oscarcch/calculator.py
|
thelabnyc/django-oscar-cch
|
d98832c9cf642c6d241e3aaf4b1dc631c3d5ce0e
|
[
"0BSD"
] | 14
|
2020-02-11T21:53:07.000Z
|
2022-01-13T00:40:33.000Z
|
src/oscarcch/calculator.py
|
thelabnyc/django-oscar-cch
|
d98832c9cf642c6d241e3aaf4b1dc631c3d5ce0e
|
[
"0BSD"
] | 1
|
2016-05-31T10:02:38.000Z
|
2016-05-31T10:02:38.000Z
|
from datetime import datetime
from decimal import Decimal
from . import exceptions, settings
import logging
import soap
logger = logging.getLogger(__name__)
POSTCODE_LEN = 5
PLUS4_LEN = 4
class CCHTaxCalculator(object):
"""
Simple interface between Python and the CCH Sales Tax Office SOAP API.
"""
precision = settings.CCH_PRECISION
wsdl = settings.CCH_WSDL
entity_id = settings.CCH_ENTITY
divsion_id = settings.CCH_DIVISION
max_retries = settings.CCH_MAX_RETRIES
def __init__(self, breaker=None):
"""
Construct a CCHTaxCalculator instance
You may optionally supply a ``pybreaker.CircuitBreaker`` instance. If you do so, it will be used to
implement the CircuitBreaker pattern around the SOAP calls to the CCH web service.
:param breaker: Optional :class:`CircuitBreaker <pybreaker.CircuitBreaker>` instance
"""
self.breaker = breaker
def apply_taxes(self, shipping_address, basket=None, shipping_charge=None):
"""
Apply taxes to a Basket instance using the given shipping address.
Pass return value of this method to :func:`OrderTaxation.save_details <oscarcch.models.OrderTaxation.save_details>`
to persist the taxation details, CCH transaction ID, etc in the database.
:param shipping_address: :class:`ShippingAddress <oscar.apps.order.models.ShippingAddress>` instance
:param basket: :class:`Basket <oscar.apps.basket.models.Basket>` instance
:param shipping_charge: :class:`ShippingCharge <oscarcch.prices.ShippingCharge>` instance
:return: SOAP Response.
"""
response = self._get_response(shipping_address, basket, shipping_charge)
# Check the response for errors
respOK = self._check_response_messages(response)
if not respOK:
response = None
# Build map of line IDs to line tax details
cch_line_map = {}
if response and response.LineItemTaxes:
cch_line_map = {
item.ID: item for item in response.LineItemTaxes.LineItemTax
}
# Apply taxes to line items
if basket is not None:
for line in basket.all_lines():
line_id = str(line.id)
taxes = cch_line_map.get(line_id)
self._apply_taxes_to_price(
taxes, line.purchase_info.price, line.quantity
)
# Apply taxes to shipping charge
if shipping_charge is not None:
for shipping_charge_component in shipping_charge.components:
shipping_taxes = cch_line_map.get(shipping_charge_component.cch_line_id)
self._apply_taxes_to_price(shipping_taxes, shipping_charge_component, 1)
# Return CCH response
return response
def _apply_taxes_to_price(self, taxes, price, quantity):
# Taxes come in two forms: quantity and percentage based
# We need to handle both of those here. The tricky part is that CCH returns data
# for an entire line item (inclusive quantity), but Oscar needs the tax info for
# each unit in the line (exclusive quantity). So, we use the details provided to
# derive the per-unit taxes before applying them.
price.clear_taxes()
if taxes:
for tax in taxes.TaxDetails.TaxDetail:
unit_fee = Decimal(str(tax.FeeApplied)) / quantity
unit_tax = Decimal(str(tax.TaxApplied)) / quantity
price.add_tax(
authority_name=tax.AuthorityName,
tax_name=tax.TaxName,
tax_applied=unit_tax,
fee_applied=unit_fee,
)
# Check our work and make sure the total we arrived at matches the total CCH gave us
total_line_tax = (price.tax * quantity).quantize(self.precision)
total_applied_tax = Decimal(taxes.TotalTaxApplied).quantize(self.precision)
if total_applied_tax != total_line_tax:
raise RuntimeError(
(
"Taxation miscalculation occurred! "
"Details sum to %s, which doesn't match given sum of %s"
)
% (total_line_tax, taxes.TotalTaxApplied)
)
else:
price.tax = Decimal("0.00")
def _get_response(self, shipping_address, basket, shipping_charge):
"""Fetch CCH tax data for the given basket and shipping address"""
response = None
retry_count = 0
while response is None and retry_count <= self.max_retries:
response = self._get_response_inner(
shipping_address, basket, shipping_charge, retry_count=retry_count
)
retry_count += 1
return response
def _get_response_inner(
self, shipping_address, basket, shipping_charge, retry_count
):
response = None
def _call_service():
order = self._build_order(shipping_address, basket, shipping_charge)
if order is None:
return None
response = self.client.service.CalculateRequest(
self.entity_id, self.divsion_id, order
)
return response
try:
if self.breaker is not None:
response = self.breaker.call(_call_service)
else:
response = _call_service()
except Exception as e:
logger.exception(e)
return response
def _check_response_messages(self, response):
"""Raise an exception if response messages contains any reported errors."""
if response is None:
return False
if response.Messages:
for message in response.Messages.Message:
if message.Code > 0:
exc = exceptions.build(message.Severity, message.Code, message.Info)
logger.exception(exc)
return False
return True
@property
def client(self):
"""Lazy constructor for SOAP client"""
return soap.get_client(self.wsdl, "CCH")
def _build_order(self, shipping_address, basket, shipping_charge):
"""Convert an Oscar Basket and ShippingAddresss into a CCH Order object"""
order = self.client.factory.create("ns15:Order")
order.InvoiceDate = datetime.now(settings.CCH_TIME_ZONE)
order.SourceSystem = settings.CCH_SOURCE_SYSTEM
order.TestTransaction = settings.CCH_TEST_TRANSACTIONS
order.TransactionType = settings.CCH_TRANSACTION_TYPE
order.CustomerType = settings.CCH_CUSTOMER_TYPE
order.ProviderType = settings.CCH_PROVIDER_TYPE
order.TransactionID = 0
order.finalize = settings.CCH_FINALIZE_TRANSACTION
# Add CCH lines for each basket line
if basket is not None:
for line in basket.all_lines():
qty = getattr(line, "cch_quantity", line.quantity)
if qty <= 0:
continue
# Line Info
item = self.client.factory.create("ns11:LineItem")
item.ID = line.id
item.AvgUnitPrice = Decimal(
line.line_price_excl_tax_incl_discounts / qty
).quantize(Decimal("0.00001"))
item.Quantity = qty
item.ExemptionCode = None
item.SKU = self._get_product_data("sku", line)
# Product Info
item.ProductInfo = self.client.factory.create("ns21:ProductInfo")
item.ProductInfo.ProductGroup = self._get_product_data("group", line)
item.ProductInfo.ProductItem = self._get_product_data("item", line)
# Ship From/To Addresses
item.NexusInfo = self.client.factory.create("ns14:NexusInfo")
warehouse = line.stockrecord.partner.primary_address
if warehouse:
item.NexusInfo.ShipFromAddress = self._build_address(warehouse)
item.NexusInfo.ShipToAddress = self._build_address(shipping_address)
# Add line to order
order.LineItems.LineItem.append(item)
# Add CCH lines for shipping charges
if shipping_charge is not None and settings.CCH_SHIPPING_TAXES_ENABLED:
for shipping_charge_component in shipping_charge.components:
shipping_line = self.client.factory.create("ns11:LineItem")
shipping_line.ID = shipping_charge_component.cch_line_id
shipping_line.AvgUnitPrice = (
shipping_charge_component.excl_tax.quantize(Decimal("0.00001"))
)
shipping_line.Quantity = 1
shipping_line.ExemptionCode = None
shipping_line.SKU = shipping_charge_component.cch_sku
shipping_line.NexusInfo = self.client.factory.create("ns14:NexusInfo")
shipping_line.NexusInfo.ShipToAddress = self._build_address(
shipping_address
)
# Add shipping line to order
order.LineItems.LineItem.append(shipping_line)
# Must include at least 1 line item
if len(order.LineItems.LineItem) <= 0:
return None
# Return order
return order
def _build_address(self, oscar_address):
addr = self.client.factory.create("ns0:Address")
addr.Line1 = oscar_address.line1
addr.Line2 = oscar_address.line2
addr.City = oscar_address.city
addr.StateOrProvince = oscar_address.state
postcode, plus4 = self.format_postcode(oscar_address.postcode)
addr.PostalCode = postcode
addr.Plus4 = plus4
addr.CountryCode = oscar_address.country.code
return addr
def _get_product_data(self, key, line):
key = "cch_product_%s" % key
sku = getattr(settings, key.upper())
sku = getattr(line.product.attr, key.lower(), sku)
return sku
def format_postcode(self, raw_postcode):
if not raw_postcode:
return "", ""
postcode, plus4 = raw_postcode[:POSTCODE_LEN], None
# Set Plus4 if PostalCode provided as 9 digits separated by hyphen
if len(raw_postcode) == POSTCODE_LEN + PLUS4_LEN + 1:
plus4 = raw_postcode[POSTCODE_LEN + 1 :]
return postcode, plus4
| 41.845238
| 123
| 0.621527
|
8eca530edb334f1f4a8b573a4e4ec1bdbb57b04d
| 3,602
|
py
|
Python
|
ask-smapi-model/ask_smapi_model/v0/catalog/upload/create_content_upload_request.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v0/catalog/upload/create_content_upload_request.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v0/catalog/upload/create_content_upload_request.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class CreateContentUploadRequest(object):
"""
:param number_of_upload_parts: Provides the number of parts the file will be split into. An equal number of presigned upload urls are generated in response to facilitate each part's upload.
:type number_of_upload_parts: (optional) int
"""
deserialized_types = {
'number_of_upload_parts': 'int'
} # type: Dict
attribute_map = {
'number_of_upload_parts': 'numberOfUploadParts'
} # type: Dict
supports_multiple_types = False
def __init__(self, number_of_upload_parts=None):
# type: (Optional[int]) -> None
"""
:param number_of_upload_parts: Provides the number of parts the file will be split into. An equal number of presigned upload urls are generated in response to facilitate each part's upload.
:type number_of_upload_parts: (optional) int
"""
self.__discriminator_value = None # type: str
self.number_of_upload_parts = number_of_upload_parts
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, CreateContentUploadRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 33.663551
| 201
| 0.608828
|
41f3839deb5e6c4e67ead390ddf56bc5b889399d
| 7,373
|
py
|
Python
|
src/TheLanguage/Grammars/v0_0_1/Statements/TryStatement.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | null | null | null |
src/TheLanguage/Grammars/v0_0_1/Statements/TryStatement.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | null | null | null |
src/TheLanguage/Grammars/v0_0_1/Statements/TryStatement.py
|
davidbrownell/DavidBrownell_TheLanguage
|
07170b448a0ebd7fa2325c9ccd4cefdb3cf7eb98
|
[
"BSL-1.0"
] | 1
|
2021-06-18T18:58:57.000Z
|
2021-06-18T18:58:57.000Z
|
# ----------------------------------------------------------------------
# |
# | TryStatement.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2021-10-14 11:16:28
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2021
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Contains the TryStatement object"""
import os
from typing import Callable, cast, List, Optional, Tuple, Union
import CommonEnvironment
from CommonEnvironment import Interface
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..Common import StatementsPhraseItem
from ..Common import Tokens as CommonTokens
from ...GrammarInfo import AST, DynamicPhrasesType, GrammarPhrase, ParserInfo
from ....Lexer.Phrases.DSL import (
CreatePhrase,
ExtractDynamic,
ExtractOptional,
ExtractRepeat,
ExtractSequence,
ExtractToken,
OptionalPhraseItem,
ZeroOrMorePhraseItem,
)
from ....Parser.Parser import CreateParserRegions, GetParserInfo
from ....Parser.Statements.TryStatementParserInfo import (
TryStatementClauseParserInfo,
TryStatementParserInfo,
TypeParserInfo,
)
# ----------------------------------------------------------------------
class TryStatement(GrammarPhrase):
"""\
Try/except blocks.
'try' ':'
<statement>+
(
'except' <type> <name>? ':'
<statement>+
)*
(
'except' ':'
<statement>+
)?
Examples:
try:
Func1()
except Exception ex:
Func2()
except (Exception1 | Exception2) ex:
Func3()
except Exception4:
Func4()
except:
Func5()
"""
PHRASE_NAME = "Try Except Statement"
# ----------------------------------------------------------------------
def __init__(self):
statements_phrase_item = StatementsPhraseItem.Create()
super(TryStatement, self).__init__(
DynamicPhrasesType.Statements,
CreatePhrase(
name=self.PHRASE_NAME,
item=[
# 'try' ':'
# <statement>+
"try",
statements_phrase_item,
# (
# 'except' <type> <name>? ':'
# <statement>+
# )*
ZeroOrMorePhraseItem.Create(
name="Typed Except Clause",
item=[
"except",
DynamicPhrasesType.Types,
OptionalPhraseItem.Create(CommonTokens.VariableName),
statements_phrase_item,
],
),
# (
# 'except' ':'
# <statement>+
# )?
OptionalPhraseItem.Create(
name="Except Clause",
item=[
"except",
statements_phrase_item,
],
),
],
),
)
# ----------------------------------------------------------------------
@staticmethod
@Interface.override
def ExtractParserInfo(
node: AST.Node,
) -> Union[
None,
ParserInfo,
Callable[[], ParserInfo],
Tuple[ParserInfo, Callable[[], ParserInfo]],
]:
# ----------------------------------------------------------------------
def Impl():
nodes = ExtractSequence(node)
assert len(nodes) == 4
# 'try'...
try_statements_node = cast(AST.Node, nodes[1])
try_statements_info = StatementsPhraseItem.ExtractParserInfo(try_statements_node)
# 'except' <type>...
except_clauses_node = cast(Optional[AST.Node], nodes[2])
if except_clauses_node is None:
except_clauses_info = None # type: ignore
else:
except_clauses_info: List[TryStatementClauseParserInfo] = []
for except_clause_node in cast(List[AST.Node], ExtractRepeat(except_clauses_node)):
except_clause_nodes = ExtractSequence(except_clause_node)
assert len(except_clause_nodes) == 4
# <type>
type_node = cast(AST.Node, ExtractDynamic(cast(AST.Node, except_clause_nodes[1])))
type_info = cast(TypeParserInfo, GetParserInfo(type_node))
# <name>?
name_node = cast(Optional[AST.Node], ExtractOptional(cast(Optional[AST.Node], except_clause_nodes[2])))
if name_node is None:
name_info = None
else:
name_info = cast(str, ExtractToken(cast(AST.Leaf, name_node)))
# <statement>+
statements_node = cast(AST.Node, except_clause_nodes[3])
statements_info = StatementsPhraseItem.ExtractParserInfo(statements_node)
except_clauses_info.append(
# pylint: disable=too-many-function-args
TryStatementClauseParserInfo(
CreateParserRegions(except_clause_node, type_node, name_node, statements_node), # type: ignore
type_info,
name_info,
statements_info,
),
)
assert except_clauses_info
# 'except'...
except_node = cast(Optional[AST.Node], ExtractOptional(cast(Optional[AST.Node], nodes[3])))
if except_node is None:
except_info = None
else:
except_nodes = ExtractSequence(except_node)
assert len(except_nodes) == 2
except_info = StatementsPhraseItem.ExtractParserInfo(cast(AST.Node, except_nodes[1]))
return TryStatementParserInfo(
CreateParserRegions(node, try_statements_node, except_clauses_node, except_node), # type: ignore
try_statements_info,
except_clauses_info,
except_info,
)
# ----------------------------------------------------------------------
return Impl
| 35.109524
| 124
| 0.448122
|
bce592274f384a8d0954168f9a8bde8e5848be27
| 23,783
|
py
|
Python
|
ciscoisesdk/api/v3_0_0/px_grid_node.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36
|
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
ciscoisesdk/api/v3_0_0/px_grid_node.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15
|
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
ciscoisesdk/api/v3_0_0/px_grid_node.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6
|
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""Cisco Identity Services Engine pxGridNode API wrapper.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
from builtins import *
from past.builtins import basestring
from ...restsession import RestSession
from ...utils import (
check_type,
dict_from_items_with_values,
apply_path_params,
dict_of_str,
get_next_page,
)
class PxGridNode(object):
"""Identity Services Engine pxGridNode API (version: 3.0.0).
Wraps the Identity Services Engine pxGridNode
API and exposes the API as native Python
methods that return native Python objects.
"""
def __init__(self, session, object_factory, request_validator):
"""Initialize a new PxGridNode
object with the provided RestSession.
Args:
session(RestSession): The RESTful session object to be used for
API calls to the Identity Services Engine service.
Raises:
TypeError: If the parameter types are incorrect.
"""
check_type(session, RestSession)
super(PxGridNode, self).__init__()
self._session = session
self._object_factory = object_factory
self._request_validator = request_validator
def approve_px_grid_node(self,
name,
headers=None,
**query_parameters):
"""This API allows the client to approve a pxGrid node. Only
pending pxGrid nodes can be approved.
Args:
name(basestring): name path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring, may_be_none=False)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(name, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'name': name,
}
e_url = ('/ers/config/pxgridnode/name/{name}/approve')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.put(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.put(endpoint_full_url, params=_params)
return self._object_factory('bpm_f47d656ed0805859a85e5cc082c78dcf_v3_0_0', _api_response)
def approve(self,
name,
headers=None,
**query_parameters):
"""Alias for `approve_px_grid_node <#ciscoisesdk.
api.v3_0_0.px_grid_node.
PxGridNode.approve_px_grid_node>`_
"""
return self.approve_px_grid_node(
name=name,
headers=headers,
**query_parameters
)
def get_px_grid_node_by_name(self,
name,
headers=None,
**query_parameters):
"""This API allows the client to get a pxGrid node by name.
Args:
name(basestring): name path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring, may_be_none=False)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(name, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'name': name,
}
e_url = ('/ers/config/pxgridnode/name/{name}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_a9d109aac585a89bdd3fae400064b_v3_0_0', _api_response)
def get_by_name(self,
name,
headers=None,
**query_parameters):
"""Alias for `get_px_grid_node_by_name <#ciscoisesdk.
api.v3_0_0.px_grid_node.
PxGridNode.get_px_grid_node_by_name>`_
"""
return self.get_px_grid_node_by_name(
name=name,
headers=headers,
**query_parameters
)
def delete_px_grid_node_by_name(self,
name,
headers=None,
**query_parameters):
"""This API deletes a pxGrid node by name.
Args:
name(basestring): name path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring, may_be_none=False)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(name, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'name': name,
}
e_url = ('/ers/config/pxgridnode/name/{name}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.delete(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.delete(endpoint_full_url, params=_params)
return self._object_factory('bpm_e718d5054593b94a2fef39461c24a_v3_0_0', _api_response)
def delete_by_name(self,
name,
headers=None,
**query_parameters):
"""Alias for `delete_px_grid_node_by_name <#ciscoisesdk.
api.v3_0_0.px_grid_node.
PxGridNode.delete_px_grid_node_by_name>`_
"""
return self.delete_px_grid_node_by_name(
name=name,
headers=headers,
**query_parameters
)
def get_px_grid_node_by_id(self,
id,
headers=None,
**query_parameters):
"""This API allows the client to get a pxGrid node by ID.
Args:
id(basestring): id path parameter.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring, may_be_none=False)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(id, basestring,
may_be_none=False)
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
'id': id,
}
e_url = ('/ers/config/pxgridnode/{id}')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_d24ade0b53405fbc898cb0cc1ea57fb8_v3_0_0', _api_response)
def get_by_id(self,
id,
headers=None,
**query_parameters):
"""Alias for `get_px_grid_node_by_id <#ciscoisesdk.
api.v3_0_0.px_grid_node.
PxGridNode.get_px_grid_node_by_id>`_
"""
return self.get_px_grid_node_by_id(
id=id,
headers=headers,
**query_parameters
)
def get_px_grid_node(self,
page=None,
size=None,
headers=None,
**query_parameters):
"""This API allows the client to get all the npxGrid nodes.
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
if 'ERS-Media-Type' in headers:
check_type(headers.get('ERS-Media-Type'),
basestring, may_be_none=False)
if 'X-CSRF-TOKEN' in headers:
check_type(headers.get('X-CSRF-TOKEN'),
basestring)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
check_type(page, (int, basestring, list))
check_type(size, (int, basestring, list))
_params = {
'page':
page,
'size':
size,
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/ers/config/pxgridnode')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_d97156379640002f79b2007c_v3_0_0', _api_response)
def get_all(self,
page=None,
size=None,
headers=None,
**query_parameters):
"""Alias for `get_px_grid_node <#ciscoisesdk.
api.v3_0_0.px_grid_node.
PxGridNode.get_px_grid_node>`_
"""
return self.get_px_grid_node(
page=page,
size=size,
headers=headers,
**query_parameters
)
def get_px_grid_node_generator(self,
page=None,
size=None,
headers=None,
**query_parameters):
"""This API allows the client to get all the npxGrid nodes.
Args:
page(int): page query parameter. Page number.
size(int): size query parameter. Number of objects
returned per page.
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
Generator: A generator object containing the following object.
+ RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
yield from get_next_page(
self.get_px_grid_node, dict(
page=page,
size=size,
headers=headers,
**query_parameters
),
access_next_list=["SearchResult", "nextPage", "href"],
access_resource_list=["SearchResult", "resources"])
def get_all_generator(self,
page=None,
size=None,
headers=None,
**query_parameters):
"""Alias for `get_px_grid_node_generator <#ciscoisesdk.
api.v3_0_0.px_grid_node.
PxGridNode.get_px_grid_node_generator>`_
"""
yield from get_next_page(
self.get_px_grid_node, dict(
page=page,
size=size,
headers=headers,
**query_parameters
),
access_next_list=["SearchResult", "nextPage", "href"],
access_resource_list=["SearchResult", "resources"])
def get_version(self,
headers=None,
**query_parameters):
"""This API helps to retrieve the version information related to
the pxGrid node.
Args:
headers(dict): Dictionary of HTTP Headers to send with the Request
.
**query_parameters: Additional query parameters (provides
support for parameters that may be added in the future).
Returns:
RestResponse: REST response with following properties:
- headers(MyDict): response headers.
- response(MyDict): response body as a MyDict object. Access the object's properties by using the dot notation
or the bracket notation.
- content(bytes): representation of the request's response
- text(str): representation of the request's response
Raises:
TypeError: If the parameter types are incorrect.
MalformedRequest: If the request body created is invalid.
ApiError: If the Identity Services Engine cloud returns an error.
"""
check_type(headers, dict)
if headers is not None:
if 'Content-Type' in headers:
check_type(headers.get('Content-Type'),
basestring, may_be_none=False)
if 'Accept' in headers:
check_type(headers.get('Accept'),
basestring, may_be_none=False)
with_custom_headers = False
_headers = self._session.headers or {}
if headers:
_headers.update(dict_of_str(headers))
with_custom_headers = True
_params = {
}
_params.update(query_parameters)
_params = dict_from_items_with_values(_params)
path_params = {
}
e_url = ('/ers/config/pxgridnode/versioninfo')
endpoint_full_url = apply_path_params(e_url, path_params)
if with_custom_headers:
_api_response = self._session.get(endpoint_full_url, params=_params,
headers=_headers)
else:
_api_response = self._session.get(endpoint_full_url, params=_params)
return self._object_factory('bpm_c2962d70ef5964be55cfeae68e5ba6_v3_0_0', _api_response)
| 37.57188
| 128
| 0.57785
|
6546a21c4a1be9823f38073f2d22cd4b9e5cfc7b
| 3,603
|
py
|
Python
|
tools/vm-migrator/src/migrator/disk.py
|
marcosgm/professional-services
|
f332b425c2f3b6538ebf65afda7e67de3bed1b3d
|
[
"Apache-2.0"
] | 2
|
2021-11-22T20:36:44.000Z
|
2021-11-22T20:36:57.000Z
|
tools/vm-migrator/src/migrator/disk.py
|
hyuatpc/professional-services
|
e5c811a8752e91fdf9f959a0414931010b0ea1ba
|
[
"Apache-2.0"
] | 16
|
2021-10-07T11:37:40.000Z
|
2021-12-23T14:07:17.000Z
|
tools/vm-migrator/src/migrator/disk.py
|
hyuatpc/professional-services
|
e5c811a8752e91fdf9f959a0414931010b0ea1ba
|
[
"Apache-2.0"
] | 3
|
2021-12-29T10:31:05.000Z
|
2022-01-06T11:37:28.000Z
|
#!/usr/bin/env python
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file provides functionality related to migrating disks.
"""
import logging
from . import instance
from . import machine_image
from .exceptions import NotFoundException
from . import uri
def delete_disk(disk, project_zone: uri.ProjectZone, disk_name: str):
logging.info('Deleting Disk %s ', disk_name)
return disk.delete(project=project_zone.project, zone=project_zone.zone,
disk=disk_name).execute()
def delete(instance_uri: uri.Instance, disk_name, source_project):
try:
waited_time = instance.RATE_LIMIT.wait() # wait before starting the task
logging.info(' task: waited for %s secs', waited_time)
compute = instance.get_compute()
image = machine_image.get(instance_uri.project, instance_uri.name)
if image:
logging.info('Found machine image can safely delete the disk %s',
disk_name)
disks = compute.disks()
try:
disk = disks.get(project=instance_uri.project,
zone=instance_uri.zone,
disk=disk_name).execute()
except Exception:
disk = None
if disk:
delete_operation = delete_disk(disks, instance_uri, disk_name)
instance.wait_for_zonal_operation(compute, instance_uri,
delete_operation['name'])
return disk_name
else:
raise NotFoundException(
'Can\'t delete the disk {} as machine image {} was not found. '
' (machine project = {}, source project = {}, please report '
'if these values differ)'.format(disk_name, instance_uri.name,
instance_uri.project,
source_project))
except Exception as ex:
logging.error(ex)
raise ex
def setLabels(disk_uri: uri.Disk, labels):
logging.getLogger().setLevel(logging.DEBUG)
try:
# wait before starting the task
waited_time = instance.RATE_LIMIT.wait()
logging.info(' task: waited for %s secs', waited_time)
compute = instance.get_compute()
disk = compute.disks().get(project=disk_uri.project,
zone=disk_uri.zone,
disk=disk_uri.name).execute()
update_operation = compute.disks() \
.setLabels(project=disk_uri.project, zone=disk_uri.zone,
resource=disk_uri.name, body={
'labels': labels,
'labelFingerprint': disk['labelFingerprint']
}).execute()
instance.wait_for_zonal_operation(compute, disk_uri,
update_operation['name'])
return disk_uri.name
except Exception as ex:
logging.error(ex)
raise ex
| 41.413793
| 81
| 0.592839
|
fd0833eb51977cdfbba7f5dc3cbd775e07139be9
| 1,591
|
py
|
Python
|
frappe/desk/listview.py
|
linkmultiselect/frappe
|
0871db97f6aa9738e6aff169ad2b9853980b0653
|
[
"MIT"
] | null | null | null |
frappe/desk/listview.py
|
linkmultiselect/frappe
|
0871db97f6aa9738e6aff169ad2b9853980b0653
|
[
"MIT"
] | null | null | null |
frappe/desk/listview.py
|
linkmultiselect/frappe
|
0871db97f6aa9738e6aff169ad2b9853980b0653
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
@frappe.whitelist()
def get_list_settings(doctype):
try:
return frappe.get_cached_doc("List View Setting", doctype)
except frappe.DoesNotExistError:
frappe.clear_messages()
@frappe.whitelist()
def set_list_settings(doctype, values):
try:
doc = frappe.get_doc("List View Setting", doctype)
except frappe.DoesNotExistError:
doc = frappe.new_doc("List View Setting")
doc.name = doctype
frappe.clear_messages()
doc.update(frappe.parse_json(values))
doc.save()
@frappe.whitelist()
def get_group_by_count(doctype, current_filters, field):
current_filters = frappe.parse_json(current_filters)
subquery_condition = ''
subquery = frappe.get_all(doctype, filters=current_filters, return_query = True)
if field == 'assigned_to':
subquery_condition = ' and `tabToDo`.reference_name in ({subquery})'.format(subquery = subquery)
return frappe.db.sql("""select `tabToDo`.owner as name, count(*) as count
from
`tabToDo`, `tabUser`
where
`tabToDo`.status='Open' and
`tabToDo`.owner = `tabUser`.name and
`tabUser`.user_type = 'System User'
{subquery_condition}
group by
`tabToDo`.owner
order by
count desc
limit 50""".format(subquery_condition = subquery_condition), as_dict=True)
else:
return frappe.db.get_list(doctype,
filters=current_filters,
group_by=field,
fields=['count(*) as count', '`{}` as name'.format(field)],
order_by='count desc',
limit=50,
)
| 27.912281
| 98
| 0.729101
|
3935d24fc9c615fb07e62f66e06c96547041f71e
| 9,243
|
py
|
Python
|
sickbeard/cherrypy/_cplogging.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/cherrypy/_cplogging.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
sickbeard/cherrypy/_cplogging.py
|
Branlala/docker-sickbeardfr
|
3ac85092dc4cc8a4171fb3c83e9682162245e13e
|
[
"MIT"
] | null | null | null |
"""CherryPy logging."""
import datetime
import logging
# Silence the no-handlers "warning" (stderr write!) in stdlib logging
logging.Logger.manager.emittedNoHandlerWarning = 1
logfmt = logging.Formatter("%(message)s")
import os
import sys
import cherrypy
from cherrypy import _cperror
class LogManager(object):
appid = None
error_log = None
access_log = None
access_log_format = \
'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
def __init__(self, appid=None, logger_root="cherrypy"):
self.logger_root = logger_root
self.appid = appid
if appid is None:
self.error_log = logging.getLogger("%s.error" % logger_root)
self.access_log = logging.getLogger("%s.access" % logger_root)
else:
self.error_log = logging.getLogger("%s.error.%s" % (logger_root, appid))
self.access_log = logging.getLogger("%s.access.%s" % (logger_root, appid))
self.error_log.setLevel(logging.INFO)
self.access_log.setLevel(logging.INFO)
cherrypy.engine.subscribe('graceful', self.reopen_files)
def reopen_files(self):
"""Close and reopen all file handlers."""
for log in (self.error_log, self.access_log):
for h in log.handlers:
if isinstance(h, logging.FileHandler):
h.acquire()
h.stream.close()
h.stream = open(h.baseFilename, h.mode)
h.release()
def error(self, msg='', context='', severity=logging.INFO, traceback=False):
"""Write to the error log.
This is not just for errors! Applications may call this at any time
to log application-specific information.
"""
if traceback:
msg += _cperror.format_exc()
self.error_log.log(severity, ' '.join((self.time(), context, msg)))
def __call__(self, *args, **kwargs):
"""Write to the error log.
This is not just for errors! Applications may call this at any time
to log application-specific information.
"""
return self.error(*args, **kwargs)
def access(self):
"""Write to the access log (in Apache/NCSA Combined Log format).
Like Apache started doing in 2.0.46, non-printable and other special
characters in %r (and we expand that to all parts) are escaped using
\\xhh sequences, where hh stands for the hexadecimal representation
of the raw byte. Exceptions from this rule are " and \\, which are
escaped by prepending a backslash, and all whitespace characters,
which are written in their C-style notation (\\n, \\t, etc).
"""
request = cherrypy.serving.request
remote = request.remote
response = cherrypy.serving.response
outheaders = response.headers
inheaders = request.headers
if response.output_status is None:
status = "-"
else:
status = response.output_status.split(" ", 1)[0]
atoms = {'h': remote.name or remote.ip,
'l': '-',
'u': getattr(request, "login", None) or "-",
't': self.time(),
'r': request.request_line,
's': status,
'b': dict.get(outheaders, 'Content-Length', '') or "-",
'f': dict.get(inheaders, 'Referer', ''),
'a': dict.get(inheaders, 'User-Agent', ''),
}
for k, v in atoms.items():
if isinstance(v, unicode):
v = v.encode('utf8')
elif not isinstance(v, str):
v = str(v)
# Fortunately, repr(str) escapes unprintable chars, \n, \t, etc
# and backslash for us. All we have to do is strip the quotes.
v = repr(v)[1:-1]
# Escape double-quote.
atoms[k] = v.replace('"', '\\"')
try:
self.access_log.log(logging.INFO, self.access_log_format % atoms)
except:
self(traceback=True)
def time(self):
"""Return now() in Apache Common Log Format (no timezone)."""
now = datetime.datetime.now()
monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',
'jul', 'aug', 'sep', 'oct', 'nov', 'dec']
month = monthnames[now.month - 1].capitalize()
return ('[%02d/%s/%04d:%02d:%02d:%02d]' %
(now.day, month, now.year, now.hour, now.minute, now.second))
def _get_builtin_handler(self, log, key):
for h in log.handlers:
if getattr(h, "_cpbuiltin", None) == key:
return h
# ------------------------- Screen handlers ------------------------- #
def _set_screen_handler(self, log, enable, stream=None):
h = self._get_builtin_handler(log, "screen")
if enable:
if not h:
if stream is None:
stream = sys.stderr
h = logging.StreamHandler(stream)
h.setFormatter(logfmt)
h._cpbuiltin = "screen"
log.addHandler(h)
elif h:
log.handlers.remove(h)
def _get_screen(self):
h = self._get_builtin_handler
has_h = h(self.error_log, "screen") or h(self.access_log, "screen")
return bool(has_h)
def _set_screen(self, newvalue):
self._set_screen_handler(self.error_log, newvalue, stream=sys.stderr)
self._set_screen_handler(self.access_log, newvalue, stream=sys.stdout)
screen = property(_get_screen, _set_screen,
doc="If True, error and access will print to stderr.")
# -------------------------- File handlers -------------------------- #
def _add_builtin_file_handler(self, log, fname):
h = logging.FileHandler(fname)
h.setFormatter(logfmt)
h._cpbuiltin = "file"
log.addHandler(h)
def _set_file_handler(self, log, filename):
h = self._get_builtin_handler(log, "file")
if filename:
if h:
if h.baseFilename != os.path.abspath(filename):
h.close()
log.handlers.remove(h)
self._add_builtin_file_handler(log, filename)
else:
self._add_builtin_file_handler(log, filename)
else:
if h:
h.close()
log.handlers.remove(h)
def _get_error_file(self):
h = self._get_builtin_handler(self.error_log, "file")
if h:
return h.baseFilename
return ''
def _set_error_file(self, newvalue):
self._set_file_handler(self.error_log, newvalue)
error_file = property(_get_error_file, _set_error_file,
doc="The filename for self.error_log.")
def _get_access_file(self):
h = self._get_builtin_handler(self.access_log, "file")
if h:
return h.baseFilename
return ''
def _set_access_file(self, newvalue):
self._set_file_handler(self.access_log, newvalue)
access_file = property(_get_access_file, _set_access_file,
doc="The filename for self.access_log.")
# ------------------------- WSGI handlers ------------------------- #
def _set_wsgi_handler(self, log, enable):
h = self._get_builtin_handler(log, "wsgi")
if enable:
if not h:
h = WSGIErrorHandler()
h.setFormatter(logfmt)
h._cpbuiltin = "wsgi"
log.addHandler(h)
elif h:
log.handlers.remove(h)
def _get_wsgi(self):
return bool(self._get_builtin_handler(self.error_log, "wsgi"))
def _set_wsgi(self, newvalue):
self._set_wsgi_handler(self.error_log, newvalue)
wsgi = property(_get_wsgi, _set_wsgi,
doc="If True, error messages will be sent to wsgi.errors.")
class WSGIErrorHandler(logging.Handler):
"A handler class which writes logging records to environ['wsgi.errors']."
def flush(self):
"""Flushes the stream."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
stream.flush()
def emit(self, record):
"""Emit a record."""
try:
stream = cherrypy.serving.request.wsgi_environ.get('wsgi.errors')
except (AttributeError, KeyError):
pass
else:
try:
msg = self.format(record)
fs = "%s\n"
import types
if not hasattr(types, "UnicodeType"): #if no unicode support...
stream.write(fs % msg)
else:
try:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except:
self.handleError(record)
| 36.824701
| 86
| 0.54214
|
3b592d677ae3268b7d340320ae068f2889019dff
| 1,134
|
py
|
Python
|
src/gen_key_key.py
|
jasonhavenD/triplesKB
|
e591adcb60d2611eef361f8ec412ace817c683eb
|
[
"Apache-2.0"
] | 14
|
2019-05-13T09:43:56.000Z
|
2022-01-23T02:18:28.000Z
|
src/gen_key_key.py
|
jasonhavenD/triplesKB
|
e591adcb60d2611eef361f8ec412ace817c683eb
|
[
"Apache-2.0"
] | 1
|
2020-10-26T03:34:09.000Z
|
2020-10-26T06:17:10.000Z
|
src/gen_key_key.py
|
jasonhavenD/triplesKB
|
e591adcb60d2611eef361f8ec412ace817c683eb
|
[
"Apache-2.0"
] | 7
|
2019-05-27T01:51:24.000Z
|
2021-07-12T02:42:12.000Z
|
# encoding = utf-8
import re
import loguru
import json
def compare(tpl):
'''
0:自己 1:上级 2:下级 3:同领域 -1:无关系
'''
x, y = tpl
if x == y:
return x, y, 0
elif len(x.split('.')) == len(y.split('.')):
xs = x.split('.')
ys = y.split('.')
for i in range(len(xs)):
if xs[i] != ys[i]:
break
if i > 0:
return x, y, 3
else:
return x, y, -1
else:
if y.startswith(x): # 1:上级
return x, y, 1
elif x.startswith(y): # 2:下级
return x, y, 2
else:
return x, y, -1
def main(src, des):
data = json.load(open(src,'r'))
ids = list(data.keys() )
with open(des, 'w') as f:
f.write('%s##%s##%s\n' % ('field1', 'field2', 'relation'))
n = len(ids)
tpl = [[ids[i], ids[j]] for i in range(n) for j in range(n)]
result = list(map(compare, tpl))
for k1,k2,rel in result:
f.write('%s##%s##%s\n' % (k1,k2,rel))
if __name__ == "__main__":
main('field_dict.json', 'triples/key_key_rel.csv')
| 23.625
| 68
| 0.444444
|
6da13e058d64e3b6883e8873b2b74fde535c2441
| 470
|
py
|
Python
|
src/niweb/apps/noclook/migrations/0012_role_slug.py
|
SUNET/ni
|
f652e230524346bf0801cdf8bbb6ee63f4985cc2
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/niweb/apps/noclook/migrations/0012_role_slug.py
|
SUNET/ni
|
f652e230524346bf0801cdf8bbb6ee63f4985cc2
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2019-07-24T12:41:11.000Z
|
2020-03-31T10:10:04.000Z
|
src/niweb/apps/noclook/migrations/0012_role_slug.py
|
SUNET/ni
|
f652e230524346bf0801cdf8bbb6ee63f4985cc2
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2019-02-25T14:58:20.000Z
|
2019-02-25T14:58:20.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.21 on 2019-07-23 07:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('noclook', '0011_auto_20190719_1157'),
]
operations = [
migrations.AddField(
model_name='role',
name='slug',
field=models.CharField(max_length=20, null=True, unique=True),
),
]
| 22.380952
| 74
| 0.619149
|
68dc54c3660e40e58a67f741abff61f23bf81485
| 1,028
|
py
|
Python
|
13.py
|
tsbxmw/leetcode
|
e751311b8b5f2769874351717a22c35c19b48a36
|
[
"MIT"
] | null | null | null |
13.py
|
tsbxmw/leetcode
|
e751311b8b5f2769874351717a22c35c19b48a36
|
[
"MIT"
] | null | null | null |
13.py
|
tsbxmw/leetcode
|
e751311b8b5f2769874351717a22c35c19b48a36
|
[
"MIT"
] | null | null | null |
# 罗马数字包含以下七种字符: I, V, X, L,C,D 和 M。
# 字符 数值
# I 1
# V 5
# X 10
# L 50
# C 100
# D 500
# M 1000
# 例如, 罗马数字 2 写做 II ,即为两个并列的 1。12 写做 XII ,即为 X + II 。 27 写做 XXVII, 即为 XX + V + II 。
# 通常情况下,罗马数字中小的数字在大的数字的右边。但也存在特例,例如 4 不写做 IIII,而是 IV。数字 1 在数字 5 的左边,所表示的数等于大数 5 减小数 1 得到的数值 4 。同样地,数字 9 表示为 IX。这个特殊的规则只适用于以下六种情况:
# I 可以放在 V (5) 和 X (10) 的左边,来表示 4 和 9。
# X 可以放在 L (50) 和 C (100) 的左边,来表示 40 和 90。
# C 可以放在 D (500) 和 M (1000) 的左边,来表示 400 和 900。
# 给定一个罗马数字,将其转换成整数。输入确保在 1 到 3999 的范围内。
# 示例 1:
# 输入: "III"
# 输出: 3
# 示例 2:
# 输入: "IV"
# 输出: 4
# 示例 3:
# 输入: "IX"
# 输出: 9
# 示例 4:
# 输入: "LVIII"
# 输出: 58
# 解释: L = 50, V= 5, III = 3.
# 示例 5:
# 输入: "MCMXCIV"
# 输出: 1994
# 解释: M = 1000, CM = 900, XC = 90, IV = 4.
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/roman-to-integer
# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
class Solution:
def romanToInt(self, s: str) -> int:
| 20.56
| 130
| 0.497082
|
ae0b5e4b8c9748b41b5f5b8c4aa66e8ba94da49f
| 90
|
py
|
Python
|
api_server/book_api/apps.py
|
roman-bezusiak/lamia-test-task-py
|
b56fb6fda51917a9c7c50bc1a07b942d358eefd1
|
[
"MIT"
] | null | null | null |
api_server/book_api/apps.py
|
roman-bezusiak/lamia-test-task-py
|
b56fb6fda51917a9c7c50bc1a07b942d358eefd1
|
[
"MIT"
] | null | null | null |
api_server/book_api/apps.py
|
roman-bezusiak/lamia-test-task-py
|
b56fb6fda51917a9c7c50bc1a07b942d358eefd1
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class BookApiConfig(AppConfig):
name = 'book_api'
| 15
| 33
| 0.755556
|
e052f3e9c9a67f9ef9bfba60d293fb3616889630
| 2,553
|
py
|
Python
|
poetry/console/io/inputs/run_argv_input.py
|
uda/poetry
|
30e3d7e33c20cbe2af8eda06e0db4888275caaa1
|
[
"MIT"
] | 12,347
|
2019-12-12T07:07:32.000Z
|
2022-03-31T21:08:50.000Z
|
poetry/console/io/inputs/run_argv_input.py
|
uda/poetry
|
30e3d7e33c20cbe2af8eda06e0db4888275caaa1
|
[
"MIT"
] | 3,483
|
2019-12-11T20:20:20.000Z
|
2022-03-31T23:18:18.000Z
|
poetry/console/io/inputs/run_argv_input.py
|
uda/poetry
|
30e3d7e33c20cbe2af8eda06e0db4888275caaa1
|
[
"MIT"
] | 1,399
|
2019-12-12T12:27:46.000Z
|
2022-03-31T09:12:53.000Z
|
from typing import List
from typing import Optional
from typing import Union
from cleo.io.inputs.argv_input import ArgvInput
from cleo.io.inputs.definition import Definition
class RunArgvInput(ArgvInput):
def __init__(
self, argv: Optional[List[str]] = None, definition: Optional[Definition] = None
) -> None:
super().__init__(argv, definition=definition)
self._parameter_options = []
@property
def first_argument(self) -> Optional[str]:
return "run"
def add_parameter_option(self, name: str) -> None:
self._parameter_options.append(name)
def has_parameter_option(
self, values: Union[str, List[str]], only_params: bool = False
) -> bool:
if not isinstance(values, list):
values = [values]
for token in self._tokens:
if only_params and token == "--":
return False
for value in values:
if value not in self._parameter_options:
continue
# Options with values:
# For long options, test for '--option=' at beginning
# For short options, test for '-o' at beginning
if value.find("--") == 0:
leading = value + "="
else:
leading = value
if token == value or leading != "" and token.find(leading) == 0:
return True
return False
def _parse(self) -> None:
parse_options = True
self._parsed = self._tokens[:]
try:
token = self._parsed.pop(0)
except IndexError:
token = None
while token is not None:
if parse_options and token == "":
self._parse_argument(token)
elif parse_options and token == "--":
parse_options = False
elif parse_options and token.find("--") == 0:
if token in self._parameter_options:
self._parse_long_option(token)
else:
self._parse_argument(token)
elif parse_options and token[0] == "-" and token != "-":
if token in self._parameter_options:
self._parse_short_option(token)
else:
self._parse_argument(token)
else:
self._parse_argument(token)
try:
token = self._parsed.pop(0)
except IndexError:
token = None
| 31.134146
| 87
| 0.533882
|
7c68a99efa93e7083d0902cc3d2ada5bc0108ca5
| 1,595
|
py
|
Python
|
volume_cmf.py
|
EmreMicrosoft/piyasa-indikatorleri
|
fb5b4c256d173f12cb3d0659d36add75dfa7ce1f
|
[
"MIT"
] | null | null | null |
volume_cmf.py
|
EmreMicrosoft/piyasa-indikatorleri
|
fb5b4c256d173f12cb3d0659d36add75dfa7ce1f
|
[
"MIT"
] | null | null | null |
volume_cmf.py
|
EmreMicrosoft/piyasa-indikatorleri
|
fb5b4c256d173f12cb3d0659d36add75dfa7ce1f
|
[
"MIT"
] | null | null | null |
# CMF (Chaikin Money Flow)
# http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:chaikin_money_flow_cmf
# Belirli bir dönemdeki Para Akışı Hacmi miktarını ölçer.
# Argümanlar:
# high(pandas.Series): veri kümesi 'Yüksek' sütunu.
# low(pandas.Series): veri kümesi 'Düşük' sütunu.
# close(pandas.Series): veri kümesi 'Kapat' sütunu.
# volume(pandas.Series): veri kümesi 'Volume' sütunu.
# window(int): n periyodu.
# fillna(bool): True ise, nan değerlerini doldur.
import pandas as pd
from _utilities import IndicatorMixin
class ChaikinMoneyFlowIndicator(IndicatorMixin):
def __init__(
self,
high: pd.Series,
low: pd.Series,
close: pd.Series,
volume: pd.Series,
window: int = 20,
fillna: bool = False,
):
self._high = high
self._low = low
self._close = close
self._volume = volume
self._window = window
self._fillna = fillna
self._run()
def _run(self):
mfv = ((self._close - self._low) - (self._high - self._close)) / (self._high - self._low)
mfv = mfv.fillna(0.0) # float division by zero
mfv *= self._volume
min_periods = 0 if self._fillna else self._window
self._cmf = (mfv.rolling(self._window, min_periods=min_periods).sum()
/ self._volume.rolling(self._window, min_periods=min_periods).sum())
def chaikin_money_flow(self) -> pd.Series:
cmf = self._check_fillna(self._cmf, value=0)
return pd.Series(cmf, name="cmf")
| 34.673913
| 101
| 0.631348
|
576d096d21a44591cc0da7b45b64d1773d4cbb3d
| 1,595
|
py
|
Python
|
pi/gpsf(old).py
|
AideTechBot/expo
|
b616512100e2449e795f8073871efc056b92742f
|
[
"MIT"
] | 2
|
2016-07-13T21:29:33.000Z
|
2020-12-24T04:47:40.000Z
|
pi/gpsf(old).py
|
AideTechBot/expo
|
b616512100e2449e795f8073871efc056b92742f
|
[
"MIT"
] | 1
|
2018-04-24T20:29:10.000Z
|
2018-04-24T20:29:10.000Z
|
pi/gpsf(old).py
|
AideTechBot/expo
|
b616512100e2449e795f8073871efc056b92742f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
GPS.PY
written by: Manuel Dionne
credit to: the internet
"""
import wave
import PiFm
import time
import os
from subprocess import call
def send_data( filename, freq ):
call(["./pifm", filename, freq])
return
freq = 500.0
data_size = 0
frate = 11025.0 # framerate as a float
amp = 64000. # multiplier for amplitude
pos = "400 600"
fname = "packet.wav"
data_list_x = []
def cleardata():
if os.path.exists(fname):
os.remove(fname)
else:
print "No file to delete: " + fname
def writedata():
global data_list_x
wav = wave.open(fname,'w')
#padding
padding = ""
i = 0
for i in range(0,1024):
i = i + 1
padding = padding + "A"
print padding
#making the data
data = padding + "START" + "|" + str(time.time()) + "|" + pos + "|" + "END" + padding + "\n"
data_size = len(data)
print str(data) + str(data_size)
#put all the data in a list
for x in range(data_size):
data_list_x.append(data[x])
#declaring some vars
nchannels = 1
sampwidth = 2
framerate = int(frate)
nframes = data_size
comptype = "NONE"
compname = "not compressed"
#setting the params
wav.setparams((nchannels, sampwidth, framerate, nframes, comptype, compname))
#writing frames
for s in data_list_x:
wav.writeframes(s)
wav.close()
#recycle vars
data_list_x = []
while(True):
cleardata()
writedata()
print "ok"
send_data(fname,"103.3")
time.sleep(1)
| 20.448718
| 97
| 0.584953
|
4e97dee002fd3c43fcf199088a382c9113275037
| 418
|
py
|
Python
|
qrgenerator/lib/BarcodeGenerator.py
|
saraclohisey/icecap-qrcode
|
c22875a99ba50065b62d370b3aa3dcc6ebeeafab
|
[
"MIT"
] | 2
|
2020-04-06T08:39:18.000Z
|
2020-05-05T11:50:11.000Z
|
qrgenerator/lib/BarcodeGenerator.py
|
saraclohisey/icecap-qrcode
|
c22875a99ba50065b62d370b3aa3dcc6ebeeafab
|
[
"MIT"
] | 7
|
2021-03-31T19:45:55.000Z
|
2022-03-12T00:22:43.000Z
|
qrgenerator/lib/BarcodeGenerator.py
|
saraclohisey/icecap-qrcode
|
c22875a99ba50065b62d370b3aa3dcc6ebeeafab
|
[
"MIT"
] | 2
|
2020-04-21T12:28:18.000Z
|
2020-05-25T10:16:32.000Z
|
from barcode import Code39
from barcode.writer import ImageWriter
def generate(input_tuple, filename):
elements = list(input_tuple)
data_to_encode = "-".join(elements)
data_to_encode = data_to_encode.replace('_', '-')
print('Generating barcode for', data_to_encode)
print('Saving to', filename)
with open(filename, 'wb') as fh:
Code39(data_to_encode, writer=ImageWriter()).write(fh)
| 29.857143
| 62
| 0.712919
|
a8afc35f86420c5c0c3d13b32b220384686518e3
| 607
|
py
|
Python
|
common/rpc/buildserver.py
|
akshitdewan/cs61a-apps
|
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
|
[
"MIT"
] | null | null | null |
common/rpc/buildserver.py
|
akshitdewan/cs61a-apps
|
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
|
[
"MIT"
] | null | null | null |
common/rpc/buildserver.py
|
akshitdewan/cs61a-apps
|
155f2afe98b238fb4b1c4ca1c79610ec55e826e6
|
[
"MIT"
] | null | null | null |
from common.rpc.utils import create_service, requires_master_secret
service = create_service(__name__)
@requires_master_secret
@service.route("/api/clear_queue")
def clear_queue(*, repo: str, pr_number: int):
...
@requires_master_secret
@service.route("/api/trigger_build")
def trigger_build_sync(*, pr_number: int, target_app: str = None):
...
@requires_master_secret
@service.route("/api/deploy_prod_app_sync")
def deploy_prod_app_sync(*, target_app: str):
...
@requires_master_secret
@service.route("/api/get_base_hostname")
def get_base_hostname(*, target_app: str) -> str:
...
| 21.678571
| 67
| 0.746293
|
c7b187a3a663a73ad1dcb6b96597d02acd35a2be
| 1,452
|
py
|
Python
|
custom_loss.py
|
chenchr/PoseNet
|
d8c0d1071db21652a7cb4b2715747ef346c8f706
|
[
"MIT"
] | 2
|
2018-05-09T03:35:42.000Z
|
2018-05-10T00:13:56.000Z
|
custom_loss.py
|
chenchr/PoseNet
|
d8c0d1071db21652a7cb4b2715747ef346c8f706
|
[
"MIT"
] | null | null | null |
custom_loss.py
|
chenchr/PoseNet
|
d8c0d1071db21652a7cb4b2715747ef346c8f706
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
def normalize(vec):
divided = torch.sqrt(vec.pow(2).sum(dim=1, keepdim=True)) + 1e-8
return vec/divided
def custom_loss(estimate, target, qua_weight, t_weight, test=False):
# print("estimete: {}\ntarget: {}".format(estimate[0,:], target[0,:]))
#if test:
# qua_estimate, qua_target = normalize(estimate[:, 0:3]), normalize(target[:, 0:3])
#else:
rVec_estimate, rVec_target = estimate[:, 0:3], target[:, 0:3]
# we, xe, ye, ze = qua_estimate[0,:].data.cpu().numpy()
# wt, xt, yt, zt = qua_target[0,:].data.cpu().numpy()
# print('qua estimate: {}, target: {}'.format([we,xe,ye,ze], [wt,xt,yt,zt]))
if test:
t_estimate, t_target = normalize(estimate[:, 3:6]), normalize(target[:, 3:6])
else:
t_estimate, t_target = estimate[:, 3:6], target[:, 3:6]
# xe, ye, ze = t_estimate[0,:].data.cpu().numpy()
# xt, yt, zt = t_target[0,:].data.cpu().numpy()
# print('t estimate: {}, target: {}'.format([xe,ye,ze],[xt,yt,zt]))
rVec_error = (rVec_estimate - rVec_target).pow(2).sum(dim=1).mean()
t_error = (t_estimate - t_target).pow(2).sum(dim=1).mean()
# print('t error: {}'.format(t_error.data[0]))
# all_error = qua_error * torch.exp(-qua_weight) + qua_weight + t_error * torch.exp(-t_weight) + t_weight
rVec_error.fill_(0) # only loss for translation
all_error = rVec_error + t_error
return all_error, rVec_error, t_error
| 46.83871
| 109
| 0.621901
|
b226636bda27aee98c88a65070ae3817b3c8d1f5
| 882
|
py
|
Python
|
noise.py
|
mohamedsayed18/Neuroscience_Inno2021
|
d321a69583333ec596079f0e69b4d4751ab37425
|
[
"MIT"
] | null | null | null |
noise.py
|
mohamedsayed18/Neuroscience_Inno2021
|
d321a69583333ec596079f0e69b4d4751ab37425
|
[
"MIT"
] | null | null | null |
noise.py
|
mohamedsayed18/Neuroscience_Inno2021
|
d321a69583333ec596079f0e69b4d4751ab37425
|
[
"MIT"
] | null | null | null |
"""
Add Gaussian white noise
TODO:
Try the different D values(amplitude) for noise(1, 0.1, 0.01, 0.001)
"""
import numpy as np
import matplotlib.pyplot as plt
from morris_lecar import morris_lecar
class noisy_model(morris_lecar):
D = 0.01
def dv_dt(self, current):
self.I = current
return ((self.I - self.gl * (self.v - self.v_l) - self.gca * self.m_inf() * (self.v - self.v_ca)
- self.gk * self.n * (self.v - self.v_k)) / self.c) + (noisy_model.D * np.random.normal(0.001,1))
if __name__ == "__main__":
mymodel = noisy_model() # create an instance
# get the output for 2000 step
for i in range(3000):
if(i>450 and i<550):
mymodel.get_v(1)
else:
mymodel.get_v(0)
plt.plot(mymodel.volts, label="D = 0.01")
plt.xlabel("Time")
plt.ylabel("Volt")
plt.legend()
plt.show()
| 25.941176
| 109
| 0.604308
|
92e90f725fed9573cc3b9f10e029987b01b1b8e1
| 11,554
|
py
|
Python
|
SMBcorr/racmo_extrap_mean.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | null | null | null |
SMBcorr/racmo_extrap_mean.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | null | null | null |
SMBcorr/racmo_extrap_mean.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | 1
|
2020-08-06T19:48:52.000Z
|
2020-08-06T19:48:52.000Z
|
#!/usr/bin/env python
u"""
racmo_extrap_mean.py
Written by Tyler Sutterley (09/2019)
Interpolates and extrapolates downscaled RACMO products to times and coordinates
Uses fast nearest-neighbor search algorithms
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.BallTree.html
https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KDTree.html
and inverse distance weighted interpolation to extrapolate spatially
CALLING SEQUENCE:
python racmo_extrap_mean.py --directory=<path> --version=3.0 \
--product=SMB,PRECIP,RUNOFF --coordinate=[-39e4,-133e4],[-39e4,-133e4] \
--date=2016.1,2018.1
COMMAND LINE OPTIONS:
-D X, --directory=X: Working data directory
--version=X: Downscaled RACMO Version
1.0: RACMO2.3/XGRN11
2.0: RACMO2.3p2/XGRN11
3.0: RACMO2.3p2/FGRN055
--product: RACMO product to calculate
SMB: Surface Mass Balance
PRECIP: Precipitation
RUNOFF: Melt Water Runoff
SNOWMELT: Snowmelt
REFREEZE: Melt Water Refreeze
--mean: Start and end year of mean (separated by commas)
--coordinate=X: Polar Stereographic X and Y of point
--date=X: Date to interpolate in year-decimal format
--csv=X: Read dates and coordinates from a csv file
--fill-value: Replace invalid values with fill value
(default uses original fill values from data file)
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
pyproj: Python interface to PROJ library
https://pypi.org/project/pyproj/
scikit-learn: Machine Learning in Python
https://scikit-learn.org/stable/index.html
https://github.com/scikit-learn/scikit-learn
UPDATE HISTORY:
Updated 04/2020: reduced to interpolation function. output masked array
Updated 09/2019: read subsets of DS1km netCDF4 file to save memory
Written 09/2019
"""
from __future__ import print_function
import sys
import os
import re
import pyproj
import getopt
import netCDF4
import numpy as np
import scipy.interpolate
from sklearn.neighbors import KDTree, BallTree
#-- PURPOSE: read and interpolate downscaled RACMO products
def extrapolate_racmo_mean(base_dir, EPSG, VERSION, PRODUCT, tdec, X, Y,
RANGE=[], SEARCH='BallTree', NN=10, POWER=2.0, FILL_VALUE=None):
#-- Full Directory Setup
DIRECTORY = 'SMB1km_v{0}'.format(VERSION)
#-- netcdf variable names
input_products = {}
input_products['SMB'] = 'SMB_rec'
input_products['PRECIP'] = 'precip'
input_products['RUNOFF'] = 'runoff'
input_products['SNOWMELT'] = 'snowmelt'
input_products['REFREEZE'] = 'refreeze'
#-- version 1 was in separate files for each year
if (VERSION == '1.0'):
RACMO_MODEL = ['XGRN11','2.3']
VARNAME = input_products[PRODUCT]
SUBDIRECTORY = '{0}_v{1}'.format(VARNAME,VERSION)
input_dir = os.path.join(base_dir, 'RACMO', DIRECTORY, SUBDIRECTORY)
elif (VERSION == '2.0'):
RACMO_MODEL = ['XGRN11','2.3p2']
var = input_products[PRODUCT]
VARNAME = var if PRODUCT in ('SMB','PRECIP') else '{0}corr'.format(var)
input_dir = os.path.join(base_dir, 'RACMO', DIRECTORY)
elif (VERSION == '3.0'):
RACMO_MODEL = ['FGRN055','2.3p2']
var = input_products[PRODUCT]
VARNAME = var if (PRODUCT == 'SMB') else '{0}corr'.format(var)
input_dir = os.path.join(base_dir, 'RACMO', DIRECTORY)
#-- read mean from netCDF4 file
arg = (RACMO_MODEL[0],RACMO_MODEL[1],VERSION,PRODUCT,RANGE[0],RANGE[1])
mean_file = '{0}_RACMO{1}_DS1km_v{2}_{3}_Mean_{4:4d}-{5:4d}.nc'.format(*arg)
with netCDF4.Dataset(os.path.join(input_dir,mean_file),'r') as fileID:
MEAN = fileID[VARNAME][:,:].copy()
#-- input cumulative netCDF4 file
args = (RACMO_MODEL[0],RACMO_MODEL[1],VERSION,PRODUCT)
input_file = '{0}_RACMO{1}_DS1km_v{2}_{3}_cumul.nc'.format(*args)
#-- Open the RACMO NetCDF file for reading
fileID = netCDF4.Dataset(os.path.join(input_dir,input_file), 'r')
#-- input shape of RACMO data
nt,ny,nx = fileID[VARNAME].shape
#-- Get data from each netCDF variable
d = {}
#-- cell origins on the bottom right
dx = np.abs(fileID.variables['x'][1]-fileID.variables['x'][0])
dy = np.abs(fileID.variables['y'][1]-fileID.variables['y'][0])
#-- latitude and longitude arrays at center of each cell
d['LON'] = fileID.variables['LON'][:,:].copy()
d['LAT'] = fileID.variables['LAT'][:,:].copy()
#-- extract time (decimal years)
d['TIME'] = fileID.variables['TIME'][:].copy()
#-- mask object for interpolating data
d['MASK'] = np.array(fileID.variables['MASK'][:],dtype=np.bool)
i,j = np.nonzero(d['MASK'])
#-- reduce mean to valid points
var1 = MEAN[i,j]
#-- convert RACMO latitude and longitude to input coordinates (EPSG)
proj1 = pyproj.Proj("+init={0}".format(EPSG))
proj2 = pyproj.Proj("+init=EPSG:{0:d}".format(4326))
xg,yg = pyproj.transform(proj2, proj1, d['LON'], d['LAT'])
#-- construct search tree from original points
#-- can use either BallTree or KDTree algorithms
xy1 = np.concatenate((xg[i,j,None],yg[i,j,None]),axis=1)
tree = BallTree(xy1) if (SEARCH == 'BallTree') else KDTree(xy1)
#-- output extrapolated arrays of variable
extrap_var = np.zeros_like(tdec,dtype=np.float)
#-- type designating algorithm used (1: interpolate, 2: backward, 3:forward)
extrap_type = np.ones_like(tdec,dtype=np.uint8)
#-- inverse distance weighting to extrapolate in space
#-- query the search tree to find the NN closest points
xy2 = np.concatenate((X[:,None],Y[:,None]),axis=1)
dist,indices = tree.query(xy2, k=NN, return_distance=True)
count = len(tdec)
#-- normalized weights if POWER > 0 (typically between 1 and 3)
#-- in the inverse distance weighting
power_inverse_distance = dist**(-POWER)
s = np.sum(power_inverse_distance, axis=1)
w = power_inverse_distance/np.broadcast_to(s[:,None],(count,NN))
#-- spatially extrapolate using inverse distance weighting
dt = (tdec - d['TIME'][0])/(d['TIME'][1] - d['TIME'][0])
extrap_var[:] = dt*np.sum(w*var1[indices],axis=1)
#-- replace fill value if specified
if FILL_VALUE:
ind, = np.nonzero(extrap_type == 0)
extrap_var[ind] = FILL_VALUE
fv = FILL_VALUE
else:
fv = 0.0
#-- close the NetCDF files
fileID.close()
#-- return the extrapolated values
return (extrap_var,extrap_type,fv)
#-- PURPOSE: interpolate RACMO products to a set of coordinates and times
#-- wrapper function to extract EPSG and print to terminal
def racmo_extrap_mean(base_dir, VERSION, PRODUCT, RANGE=[],
COORDINATES=None, DATES=None, CSV=None, FILL_VALUE=None):
#-- this is the projection of the coordinates being extrapolated into
EPSG = "EPSG:{0:d}".format(3413)
#-- read coordinates and dates from a csv file (X,Y,year decimal)
if CSV:
X,Y,tdec = np.loadtxt(CSV,delimiter=',').T
else:
#-- regular expression pattern for extracting x and y coordinates
numerical_regex = '([-+]?(?:(?:\d*\.\d+)|(?:\d+\.?))(?:[Ee][+-]?\d+)?)'
regex = re.compile('\[{0},{0}\]'.format(numerical_regex))
#-- number of coordinates
npts = len(regex.findall(COORDINATES))
#-- x and y coordinates of interpolation points
X = np.zeros((npts))
Y = np.zeros((npts))
for i,XY in enumerate(regex.findall(COORDINATES)):
X[i],Y[i] = np.array(XY, dtype=np.float)
#-- convert dates to ordinal days (count of days of the Common Era)
tdec = np.array(DATES, dtype=np.float)
#-- read and interpolate/extrapolate RACMO2.3 products
vi,itype,fv = extrapolate_racmo_mean(base_dir, EPSG, VERSION, PRODUCT,
tdec, X, Y, RANGE=RANGE, FILL_VALUE=FILL_VALUE)
interpolate_types = ['invalid','extrapolated','backward','forward']
for v,t in zip(vi,itype):
print(v,interpolate_types[t])
#-- PURPOSE: help module to describe the optional input parameters
def usage():
print('\nHelp: {}'.format(os.path.basename(sys.argv[0])))
print(' -D X, --directory=X\tWorking data directory')
print(' --version=X\t\tDownscaled RACMO Version')
print('\t1.0: RACMO2.3/XGRN11')
print('\t2.0: RACMO2.3p2/XGRN11')
print('\t3.0: RACMO2.3p2/FGRN055')
print(' --product:\t\tRACMO product to calculate')
print('\tSMB: Surface Mass Balance')
print('\tPRECIP: Precipitation')
print('\tRUNOFF: Melt Water Runoff')
print('\tSNOWMELT: Snowmelt')
print('\tREFREEZE: Melt Water Refreeze')
print(' --mean:\t\tStart and end year of mean (separated by commas)')
print(' --coordinate=X\t\tPolar Stereographic X and Y of point')
print(' --date=X\t\tDates to interpolate in year-decimal format')
print(' --csv=X\t\tRead dates and coordinates from a csv file')
print(' --fill-value\t\tReplace invalid values with fill value\n')
#-- Main program that calls racmo_extrap_mean()
def main():
#-- Read the system arguments listed after the program
long_options = ['help','directory=','version=','product=','mean=',
'coordinate=','date=','csv=','fill-value=']
optlist,arglist = getopt.getopt(sys.argv[1:], 'hD:', long_options)
#-- data directory
base_dir = os.getcwd()
#-- Downscaled version
VERSION = '3.0'
#-- Products to calculate cumulative
PRODUCTS = ['SMB']
#-- mean range
RANGE = [1961,1990]
#-- coordinates and times to run
COORDINATES = None
DATES = None
#-- read coordinates and dates from csv file
CSV = None
#-- invalid value (default is nan)
FILL_VALUE = np.nan
#-- extract parameters
for opt, arg in optlist:
if opt in ('-h','--help'):
usage()
sys.exit()
elif opt in ("-D","--directory"):
base_dir = os.path.expanduser(arg)
elif opt in ("--version"):
VERSION = arg
elif opt in ("--product"):
PRODUCTS = arg.split(',')
elif opt in ("--mean"):
RANGE = np.array(arg.split(','),dtype=np.int)
elif opt in ("--coordinate"):
COORDINATES = arg
elif opt in ("--date"):
DATES = arg.split(',')
elif opt in ("--csv"):
CSV = os.path.expanduser(arg)
elif opt in ("--fill-value"):
FILL_VALUE = eval(arg)
#-- data product longnames
longname = {}
longname['SMB'] = 'Cumulative Surface Mass Balance Anomalies'
longname['PRECIP'] = 'Cumulative Precipitation Anomalies'
longname['RUNOFF'] = 'Cumulative Runoff Anomalies'
longname['SNOWMELT'] = 'Cumulative Snowmelt Anomalies'
longname['REFREEZE'] = 'Cumulative Melt Water Refreeze Anomalies'
#-- for each product
for p in PRODUCTS:
#-- check that product was entered correctly
if p not in longname.keys():
raise IOError('{0} not in valid RACMO products'.format(p))
#-- run program with parameters
racmo_extrap_mean(base_dir, VERSION, p, RANGE=RANGE,
COORDINATES=COORDINATES, DATES=DATES, CSV=CSV,
FILL_VALUE=FILL_VALUE)
#-- run main program
if __name__ == '__main__':
main()
| 40.118056
| 81
| 0.648866
|
5519b61fbec384694a50ffe7fd7032f528145c29
| 3,353
|
py
|
Python
|
teste/teste/settings.py
|
MillenaEDS/trabalhope
|
659e29f9d5ea4f3f9a740a6952d432e22f282a60
|
[
"Apache-2.0"
] | null | null | null |
teste/teste/settings.py
|
MillenaEDS/trabalhope
|
659e29f9d5ea4f3f9a740a6952d432e22f282a60
|
[
"Apache-2.0"
] | 2
|
2020-06-05T18:10:20.000Z
|
2021-06-10T20:06:29.000Z
|
teste/teste/settings.py
|
MillenaEDS/trabalholms
|
659e29f9d5ea4f3f9a740a6952d432e22f282a60
|
[
"Apache-2.0"
] | null | null | null |
"""
Django settings for teste project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y6ta0xr1_y#e_!z53*d)gxobem9#31c*m=$*e3*n)s3x_!&0re'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'testando'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'teste.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'teste.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'lms',
'USER': 'root',
'PASSWORD': 'lu1234',
'HOST': '127.0.0.1',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
#FileUpload
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#login
AUTH_USER_MODEL ='testando.Usuario'
LOGIN_REDIRECT_URL = 'index'
| 24.122302
| 91
| 0.686848
|
9ebb1f5f9e8d7a3b3765d23b0ddaffb1e9755492
| 7,622
|
py
|
Python
|
gammapy/irf/tests/test_energy_dispersion.py
|
qpiel/gammapy
|
cfb976909e63f4d5d578e1495245c0baad69482b
|
[
"BSD-3-Clause"
] | null | null | null |
gammapy/irf/tests/test_energy_dispersion.py
|
qpiel/gammapy
|
cfb976909e63f4d5d578e1495245c0baad69482b
|
[
"BSD-3-Clause"
] | 1
|
2020-10-29T19:55:46.000Z
|
2020-10-29T19:55:46.000Z
|
gammapy/irf/tests/test_energy_dispersion.py
|
qpiel/gammapy
|
cfb976909e63f4d5d578e1495245c0baad69482b
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from astropy.coordinates import Angle
import astropy.units as u
from ...utils.testing import requires_dependency, requires_data, mpl_plot_check
from ...utils.energy import EnergyBounds
from ...irf import EnergyDispersion, EnergyDispersion2D
class TestEnergyDispersion:
def setup(self):
self.e_true = np.logspace(0, 1, 101) * u.TeV
self.e_reco = self.e_true
self.resolution = 0.1
self.bias = 0
self.edisp = EnergyDispersion.from_gauss(
e_true=self.e_true,
e_reco=self.e_reco,
pdf_threshold=1e-7,
sigma=self.resolution,
bias=self.bias,
)
def test_from_diagonal_response(self):
e_true = [0.5, 1, 2, 4, 6] * u.TeV
e_reco = [2, 4, 6] * u.TeV
edisp = EnergyDispersion.from_diagonal_response(e_true, e_reco)
assert edisp.pdf_matrix.shape == (4, 2)
expected = [[0, 0], [0, 0], [1, 0], [0, 1]]
assert_equal(edisp.pdf_matrix, expected)
# Test square matrix
edisp = EnergyDispersion.from_diagonal_response(e_true)
assert_allclose(edisp.e_reco.bins.value, e_true.value)
assert edisp.e_reco.bins.unit == "TeV"
assert_equal(edisp.pdf_matrix[0][0], 1)
assert_equal(edisp.pdf_matrix[2][0], 0)
assert edisp.pdf_matrix.sum() == 4
def test_str(self):
assert "EnergyDispersion" in str(self.edisp)
def test_evaluate(self):
# Check for correct normalization
pdf = self.edisp.data.evaluate(e_true=3.34 * u.TeV)
assert_allclose(np.sum(pdf), 1, atol=1e-2)
def test_apply(self):
counts = np.arange(len(self.e_true) - 1)
actual = self.edisp.apply(counts)
assert_allclose(actual[0], 1.8612999017723058, atol=1e-3)
counts = np.arange(len(self.e_true) - 4)
with pytest.raises(ValueError) as exc:
self.edisp.apply(counts)
assert str(len(counts)) in str(exc.value)
assert_allclose(actual[0], 1.8612999017723058, atol=1e-3)
def test_get_bias(self):
bias = self.edisp.get_bias(3.34 * u.TeV)
assert_allclose(bias, self.bias, atol=1e-2)
def test_get_resolution(self):
resolution = self.edisp.get_resolution(3.34 * u.TeV)
assert_allclose(resolution, self.resolution, atol=1e-2)
def test_io(self, tmpdir):
indices = np.array([[1, 3, 6], [3, 3, 2]])
desired = self.edisp.pdf_matrix[indices]
writename = str(tmpdir / "rmf_test.fits")
self.edisp.write(writename)
edisp2 = EnergyDispersion.read(writename)
actual = edisp2.pdf_matrix[indices]
assert_allclose(actual, desired)
@requires_dependency("matplotlib")
def test_plot_matrix(self):
with mpl_plot_check():
self.edisp.plot_matrix()
@requires_dependency("matplotlib")
def test_plot_bias(self):
with mpl_plot_check():
self.edisp.plot_bias()
@requires_dependency("matplotlib")
def test_peek(self):
with mpl_plot_check():
self.edisp.peek()
@requires_data("gammapy-extra")
class TestEnergyDispersion2D:
def setup(self):
# TODO: use from_gauss method to create know edisp (see below)
# At the moment only 1 test uses it (test_get_response)
filename = (
"$GAMMAPY_EXTRA/test_datasets/irf/hess/pa/hess_edisp_2d_023523.fits.gz"
)
self.edisp = EnergyDispersion2D.read(filename, hdu="ENERGY DISPERSION")
# Make a test case
e_true = np.logspace(-1.0, 2.0, 51) * u.TeV
migra = np.linspace(0.0, 4.0, 1001)
offset = np.linspace(0.0, 2.5, 5) * u.deg
sigma = 0.15 / (e_true[:-1] / (1 * u.TeV)).value ** 0.3
bias = 1e-3 * (e_true[:-1] - 1 * u.TeV).value
self.edisp2 = EnergyDispersion2D.from_gauss(e_true, migra, bias, sigma, offset)
def test_str(self):
assert "EnergyDispersion2D" in str(self.edisp)
def test_evaluation(self):
# TODO: Move to tests for NDDataArray
# Check that nodes are evaluated correctly
e_node = 12
off_node = 3
m_node = 5
offset = self.edisp.data.axis("offset").nodes[off_node]
energy = self.edisp.data.axis("e_true").nodes[e_node]
migra = self.edisp.data.axis("migra").nodes[m_node]
actual = self.edisp.data.evaluate(offset=offset, e_true=energy, migra=migra)
desired = self.edisp.data.data[e_node, m_node, off_node]
assert_allclose(actual, desired, rtol=1e-06)
assert_allclose(actual, 0.09388659149, rtol=1e-06)
# Check output shape
energy = [1, 2] * u.TeV
migra = np.array([0.98, 0.97, 0.7])
offset = [0.1, 0.2, 0.3, 0.4] * u.deg
actual = self.edisp.data.evaluate(
e_true=energy.reshape(-1, 1, 1),
migra=migra.reshape(1, -1, 1),
offset=offset.reshape(1, 1, -1),
)
assert_allclose(actual.shape, (2, 3, 4))
# Check evaluation at all nodes
actual = self.edisp.data.evaluate().shape
desired = (
self.edisp.data.axis("e_true").nbins,
self.edisp.data.axis("migra").nbins,
self.edisp.data.axis("offset").nbins,
)
assert_equal(actual, desired)
def test_get_response(self):
pdf = self.edisp2.get_response(offset=0.7 * u.deg, e_true=1 * u.TeV)
assert_allclose(pdf.sum(), 1)
assert_allclose(pdf.max(), 0.013025634736094305)
def test_exporter(self):
# Check RMF exporter
offset = Angle(0.612, "deg")
e_reco = EnergyBounds.equal_log_spacing(1, 10, 6, "TeV")
e_true = EnergyBounds.equal_log_spacing(0.8, 5, 4, "TeV")
rmf = self.edisp.to_energy_dispersion(offset, e_true=e_true, e_reco=e_reco)
assert_allclose(rmf.data.data[2, 3], 0.08, atol=5e-2) # same tolerance as above
actual = rmf.pdf_matrix[2]
e_val = np.sqrt(e_true[2] * e_true[3])
desired = self.edisp.get_response(offset, e_val, e_reco)
assert_equal(actual, desired)
def test_write(self):
energy_lo = np.logspace(0, 1, 11)[:-1] * u.TeV
energy_hi = np.logspace(0, 1, 11)[1:] * u.TeV
offset_lo = np.linspace(0, 1, 4)[:-1] * u.deg
offset_hi = np.linspace(0, 1, 4)[1:] * u.deg
migra_lo = np.linspace(0, 3, 4)[:-1]
migra_hi = np.linspace(0, 3, 4)[1:]
data = (
np.ones(shape=(len(energy_lo), len(migra_lo), len(offset_lo))) * u.cm * u.cm
)
edisp = EnergyDispersion2D(
e_true_lo=energy_lo,
e_true_hi=energy_hi,
migra_lo=migra_lo,
migra_hi=migra_hi,
offset_lo=offset_lo,
offset_hi=offset_hi,
data=data,
)
hdu = edisp.to_fits()
assert_equal(hdu.data["ENERG_LO"][0], edisp.data.axis("e_true").lo.value)
assert hdu.header["TUNIT1"] == edisp.data.axis("e_true").lo.unit
@requires_dependency("matplotlib")
def test_plot_migration(self):
with mpl_plot_check():
self.edisp.plot_migration()
@requires_dependency("matplotlib")
def test_plot_bias(self):
with mpl_plot_check():
self.edisp.plot_bias()
@requires_dependency("matplotlib")
def test_peek(self):
with mpl_plot_check():
self.edisp.peek()
| 36.295238
| 88
| 0.616505
|
3b31f70fa7fd06e546f8d29db1d342c921f3b03c
| 14,535
|
py
|
Python
|
test_compare.py
|
JeanCherubini/One-Shot-Object-Detection
|
2098ad3e90cb4aa9f1dd188a40efa29927ac3ab1
|
[
"MIT"
] | 376
|
2019-11-29T02:26:39.000Z
|
2022-03-24T04:07:29.000Z
|
test_compare.py
|
JeanCherubini/One-Shot-Object-Detection
|
2098ad3e90cb4aa9f1dd188a40efa29927ac3ab1
|
[
"MIT"
] | 31
|
2019-12-03T08:05:19.000Z
|
2022-03-15T06:33:05.000Z
|
test_compare.py
|
JeanCherubini/One-Shot-Object-Detection
|
2098ad3e90cb4aa9f1dd188a40efa29927ac3ab1
|
[
"MIT"
] | 78
|
2019-11-29T05:20:31.000Z
|
2022-03-18T12:40:34.000Z
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def save_weight(weight, time, seen):
time = np.where(time==0, 1, time)
weight = weight/time[:,np.newaxis]
result_map = np.zeros((len(weight), len(weight)))
for i in range(len(weight)):
for j in range(len(weight)):
v1 = weight[i]
v2 = weight[j]
# v1_ = np.linalg.norm(v1)
# v2_ = np.linalg.norm(v2)
# v12 = np.sum(v1*v2)
# print(v12)
# print(v1_)
# print(v2_)
distance = np.linalg.norm(v1-v2)
if np.sum(v1*v2)== 0 :
result_map[i][j] = 0
else:
result_map[i][j] = distance
df = pd.DataFrame (result_map)
## save to xlsx file
filepath = 'similarity_%d.xlsx'%(seen)
df.to_excel(filepath, index=False)
weight = weight*255
cv2.imwrite('./weight_%d.png'%(seen), weight)
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='coco', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/res101.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res50', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
default=True)
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
default=True)
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
default=True)
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--s', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=10, type=int)
parser.add_argument('--p', dest='checkpoint',
help='checkpoint to load network',
default=1663, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
parser.add_argument('--seen', dest='seen',default=2, type=int)
parser.add_argument('--a', dest='average',default=1, type=int)
parser.add_argument('--g', dest='group',
help='which group',
default=0)
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2017_train"
args.imdbval_name = "coco_2017_val"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
# args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
args.cfg_file = "cfgs/{}_{}.yml".format(args.net, args.group) if args.group != 0 else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
# imdb_vs, roidb_vs, ratio_list_vs, ratio_index_vs, query_vs = combined_roidb('coco_2014_valminusminival', False)
imdb_vu, roidb_vu, ratio_list_vu, ratio_index_vu, query_vu = combined_roidb(args.imdbval_name, False, seen=args.seen)
# imdb_vs.competition_mode(on=True)
imdb_vu.competition_mode(on=True)
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb_vu.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb_vu.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb_vu.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb_vu.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
query = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
catgory = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
query = query.cuda()
im_info = im_info.cuda()
catgory = catgory.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
query = Variable(query)
im_info = Variable(im_info)
catgory = Variable(catgory)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
# output_dir_vs = get_output_dir(imdb_vs, 'faster_rcnn_seen')
output_dir_vu = get_output_dir(imdb_vu, 'faster_rcnn_unseen')
all_weight = np.zeros((len(ratio_index_vu[0]),1024))
all_times = np.zeros((imdb_vu.num_classes))
dataset_vu = roibatchLoader(roidb_vu, ratio_list_vu, ratio_index_vu, query_vu, 1, imdb_vu.num_classes, training=False, seen=args.seen)
fasterRCNN.eval()
all_ap = []
for avg in range(args.average):
dataset_vu.query_position = avg
dataloader_vu = torch.utils.data.DataLoader(dataset_vu, batch_size=1,shuffle=False, num_workers=0,pin_memory=True)
data_iter_vu = iter(dataloader_vu)
num_images_vu = len(imdb_vu.image_index)
num_detect = len(ratio_index_vu[0])
all_boxes = [[[] for _ in xrange(num_images_vu)]
for _ in xrange(imdb_vu.num_classes)]
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir_vu, 'detections_%d_%d.pkl'%(args.seen, avg))
print(det_file)
for i,index in enumerate(ratio_index_vu[0]):
data = next(data_iter_vu)
im_data.data.resize_(data[0].size()).copy_(data[0])
query.data.resize_(data[1].size()).copy_(data[1])
im_info.data.resize_(data[2].size()).copy_(data[2])
gt_boxes.data.resize_(data[3].size()).copy_(data[3])
catgory.data.resize_(data[4].size()).copy_(data[4])
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, _, RCNN_loss_bbox, \
rois_label, weight = fasterRCNN(im_data, query, im_info, gt_boxes, catgory)
# all_weight[data[4],:] = all_weight[data[4],:] + weight.view(-1).detach().cpu().numpy()
all_weight[i,:] = weight.view(-1).detach().cpu().numpy()
all_times[data[4]] = all_times[data[4]] + 1
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= data[2][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
if vis and i%1==0:
print(i)
im = cv2.imread(dataset_vu._roidb[dataset_vu.ratio_index[i]]['image'])
im2show = np.copy(im)
inds = torch.nonzero(scores>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = scores[inds]
_, order = torch.sort(cls_scores, 0, True)
cls_boxes = pred_boxes[inds, :]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
all_boxes[data[4]][index] = cls_dets.cpu().numpy()
if vis:
im2show = vis_detections(im2show, 'shot', cls_dets.cpu().numpy(), 0.8)
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
try:
image_scores = all_boxes[data[4]][index][:,-1]
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
keep = np.where(all_boxes[data[4]][index][:,-1] >= image_thresh)[0]
all_boxes[data[4]][index] = all_boxes[data[4]][index][keep, :]
except:
pass
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_detect, detect_time, nms_time))
sys.stdout.flush()
if vis and i%1==0:
o_query = data[1][0].permute(1, 2,0).contiguous().cpu().numpy()
o_query *= [0.229, 0.224, 0.225]
o_query += [0.485, 0.456, 0.406]
o_query *= 255
o_query = o_query[:,:,::-1]
(h,w,c) = im2show.shape
o_query = cv2.resize(o_query, (h, h),interpolation=cv2.INTER_LINEAR)
# im2show = np.concatenate((im2show, o_query), axis=1)
cv2.imwrite('./test_img/%d.png'%(i), im2show)
with open(det_file, 'wb') as f:
print('hi')
pickle.dump([all_boxes, all_weight, all_times], f, pickle.HIGHEST_PROTOCOL)
with open(det_file, 'rb') as fid:
[all_boxes, all_weight, all_times] = pickle.load(fid)
save_weight(all_weight, all_times, args.seen)
print('Evaluating detections')
aps = imdb_vu.evaluate_detections(all_boxes, output_dir_vu)
all_ap.append(aps)
print(aps)
print(all_ap)
end = time.time()
print("test time: %0.4fs" % (end - start))
all_ap = np.array(all_ap)
print(np.mean(all_ap, axis=0))
| 35.712531
| 136
| 0.631441
|
9d028368be9d26a0695a3766715bd0849dfeedb2
| 1,986
|
py
|
Python
|
tests/rules/test_scm_correction.py
|
RogueScholar/thefuck-termux
|
cc33d5fa0077b2b2323b8a62f3478ff8efef3fba
|
[
"MIT"
] | null | null | null |
tests/rules/test_scm_correction.py
|
RogueScholar/thefuck-termux
|
cc33d5fa0077b2b2323b8a62f3478ff8efef3fba
|
[
"MIT"
] | null | null | null |
tests/rules/test_scm_correction.py
|
RogueScholar/thefuck-termux
|
cc33d5fa0077b2b2323b8a62f3478ff8efef3fba
|
[
"MIT"
] | null | null | null |
import pytest
from thefuck.rules.scm_correction import get_new_command
from thefuck.rules.scm_correction import match
from thefuck.types import Command
@pytest.fixture
def get_actual_scm_mock(mocker):
return mocker.patch("thefuck.rules.scm_correction._get_actual_scm",
return_value=None)
@pytest.mark.parametrize(
"script, output, actual_scm",
[
(
"git log",
"fatal: Not a git repository "
"(or any of the parent directories): .git",
"hg",
),
(
"hg log",
"abort: no repository found in '/home/nvbn/exp/thefuck' "
"(.hg not found)!",
"git",
),
],
)
def test_match(get_actual_scm_mock, script, output, actual_scm):
get_actual_scm_mock.return_value = actual_scm
assert match(Command(script, output))
@pytest.mark.parametrize(
"script, output, actual_scm",
[
("git log", "", "hg"),
(
"git log",
"fatal: Not a git repository "
"(or any of the parent directories): .git",
None,
),
(
"hg log",
"abort: no repository found in '/home/nvbn/exp/thefuck' "
"(.hg not found)!",
None,
),
(
"not-scm log",
"abort: no repository found in '/home/nvbn/exp/thefuck' "
"(.hg not found)!",
"git",
),
],
)
def test_not_match(get_actual_scm_mock, script, output, actual_scm):
get_actual_scm_mock.return_value = actual_scm
assert not match(Command(script, output))
@pytest.mark.parametrize(
"script, actual_scm, result",
[("git log", "hg", "hg log"), ("hg log", "git", "git log")],
)
def test_get_new_command(get_actual_scm_mock, script, actual_scm, result):
get_actual_scm_mock.return_value = actual_scm
new_command = get_new_command(Command(script, ""))
assert new_command == result
| 27.205479
| 74
| 0.575529
|
7c9d18147bee0f9f5fa0371422cd10a8d7ad7ba8
| 12,914
|
py
|
Python
|
gan_pytorch.py
|
ReyhaneAskari/negative_momentum_for_improved_game_dynamics
|
84339c2dbbe8173cf8d004ef494700076908f18b
|
[
"MIT"
] | 6
|
2020-05-14T07:41:26.000Z
|
2022-03-06T08:49:52.000Z
|
gan_pytorch.py
|
ReyhaneAskari/negative_momentum_for_improved_game_dynamics
|
84339c2dbbe8173cf8d004ef494700076908f18b
|
[
"MIT"
] | 3
|
2020-12-28T07:51:20.000Z
|
2021-01-25T06:51:29.000Z
|
gan_pytorch.py
|
ReyhaneAskari/negative_momentum_for_improved_game_dynamics
|
84339c2dbbe8173cf8d004ef494700076908f18b
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
np.random.seed(1234)
import os
import torch
torch.manual_seed(1234)
torch.cuda.manual_seed_all(1234)
import torch.nn as nn
from torch.autograd import Variable
from torch.optim import SGD, Adam
from utils import Adamp as AdamPlus
import torch.autograd as autograd
from torch.optim.optimizer import required
from scipy.stats import gaussian_kde
import argparse
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from numpy import linalg
parser = argparse.ArgumentParser()
parser.add_argument('--iterations', type=int, default=2)
parser.add_argument('--modes_idx', type=int, default=2)
parser.add_argument('--update_rule_idx', type=int, default=4)
parser.add_argument('--learning_rate_idx', type=int, default=1)
parser.add_argument('--momentum_idx', type=int, default=2)
parser.add_argument('--dis_iter_idx', type=int, default=0)
args = parser.parse_args()
iterations = args.iterations
modes = ['gp', 'no_gp', 'sat', 'co']
mode = modes[args.modes_idx]
update_rules = ['sgdnm', 'adam_plus', 'adam', 'mixed_adam_plus', 'mixed_sgd']
update_rule = update_rules[args.update_rule_idx]
learning_rates = [1e-1, 1e-2, 1e-3, 1e-4]
learning_rate = learning_rates[args.learning_rate_idx]
momentums = [-0.5, -0.1, 0, 0.5]
momentum = momentums[args.momentum_idx]
dis_iters = [1, 5]
dis_iter = dis_iters[args.dis_iter_idx]
_batch_size = 128
dim = 256
use_cuda = True
# LAMBDA = 0.1
LAMBDA = 0.05
z_dim = 8
save_directory = ('results/' + str(mode) + '_' +
str(update_rule) + '_' +
str(learning_rate) + '_' +
str(momentum) + '_' +
str(dis_iter) + '_' +
'/')
print('save_directory: ' + save_directory)
print('iterations: ' + str(iterations))
if not os.path.exists(save_directory):
os.makedirs(save_directory)
class Gen(nn.Module):
def __init__(self):
super(Gen, self).__init__()
main = nn.Sequential(
nn.Linear(z_dim, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, 2),
)
self.main = main
def forward(self, noise):
output = self.main(noise)
return output
class Dis(nn.Module):
def __init__(self):
super(Dis, self).__init__()
main = nn.Sequential(
nn.Linear(2, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, dim),
nn.ReLU(True),
nn.Linear(dim, 1),
)
self.main = main
def forward(self, inputs):
output = self.main(inputs)
return output.view(-1)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
# def weights_load():
def get_8gaussians(batch_size):
scale = 2.
centers = [
(1, 0),
(-1, 0),
(0, 1),
(0, -1),
(1. / np.sqrt(2), 1. / np.sqrt(2)),
(1. / np.sqrt(2), -1. / np.sqrt(2)),
(-1. / np.sqrt(2), 1. / np.sqrt(2)),
(-1. / np.sqrt(2), -1. / np.sqrt(2))
]
centers = [(scale * x, scale * y) for x, y in centers]
while True:
dataset = []
for i in range(batch_size):
point = np.random.randn(2) * .05
center = random.choice(centers)
point[0] += center[0]
point[1] += center[1]
dataset.append(point)
dataset = np.array(dataset, dtype='float32')
dataset /= 1.414
out = Variable(torch.Tensor(dataset))
if use_cuda:
out = out.cuda()
yield out
def calc_gradient_penalty(netD, real_data, fake_data):
alpha = torch.rand(_batch_size, 1)
alpha = alpha.expand(real_data.size())
alpha = alpha.cuda() if use_cuda else alpha
interpolates = alpha * real_data + ((1 - alpha) * fake_data)
if use_cuda:
interpolates = interpolates.cuda()
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = netD(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(
disc_interpolates.size()).cuda() if use_cuda else torch.ones(
disc_interpolates.size()),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
def apply_consensus(x, y):
grad_x += 0.5 * x
grad_y += 0.5 * y
x -= learning_rate * grad_x
y -= learning_rate * grad_y
class SGDNM(SGD):
def __init__(self, params, lr=required, momentum=0, dampening=0,
weight_decay=0, nesterov=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= weight_decay:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(SGD, self).__init__(params, defaults)
def get_dens_real(batch_size):
data = get_8gaussians(batch_size).next()
real = np.array(data.data)
kde_real = gaussian_kde(real.T, bw_method=0.22)
x, y = np.mgrid[-2:2:(200 * 1j), -2:2:(200 * 1j)]
z_real = kde_real((x.ravel(), y.ravel())).reshape(*x.shape)
return z_real
z_real = get_dens_real(1000)
def plot(fake, epoch):
plt.figure(figsize=(20, 9))
fake = np.array(fake.data)
kde_fake = gaussian_kde(fake.T, bw_method=0.22)
x, y = np.mgrid[-2:2:(200 * 1j), -2:2:(200 * 1j)]
z_fake = kde_fake((x.ravel(), y.ravel())).reshape(*x.shape)
ax1 = plt.subplot(1, 2, 1)
# ax1.hold(True)
ax1.pcolor(x, y, z_real)
ax2 = plt.subplot(1, 2, 2)
# ax2.hold(True)
ax2.pcolor(x, y, z_fake)
ax1.scatter(real[:, 0], real[:, 1])
ax2.scatter(fake[:, 0], fake[:, 1])
plt.savefig(save_directory + str(epoch) + '.png')
plt.close()
def plot_eigens(points):
fig, ax = plt.subplots()
ax.set_xlabel("real")
ax.set_ylabel("imaginaries")
reals = [p.real for p in points]
imaginaries = [p.imag for p in points]
plt.plot(reals, imaginaries, 'o', color='blue')
plt.grid()
fig.set_size_inches(10, 10)
plt.savefig('eigen_plot/eigen_init.png')
plt.close(fig)
dis = Dis()
gen = Gen()
# dis.load_state_dict(torch.load('models/dis_sat_mixed_sgd_0.01_0_1_'))
# gen.load_state_dict(torch.load('models/gen_sat_mixed_sgd_0.01_0_1_'))
dis.apply(weights_init)
gen.apply(weights_init)
if use_cuda:
dis = dis.cuda()
gen = gen.cuda()
if update_rule == 'adam':
dis_optimizer = Adam(dis.parameters(),
lr=learning_rate,
betas=(0.5, 0.9))
gen_optimizer = Adam(gen.parameters(),
lr=learning_rate,
betas=(0.5, 0.9))
elif update_rule == 'adam_plus':
dis_optimizer = AdamPlus(dis.parameters(),
lr=learning_rate,
betas=(momentum, 0.9))
gen_optimizer = AdamPlus(gen.parameters(),
lr=learning_rate,
betas=(momentum, 0.9))
elif update_rule == 'sgd':
dis_optimizer = SGD(dis.parameters(), lr=learning_rate)
gen_optimizer = SGD(gen.parameters(), lr=learning_rate)
elif update_rule == 'sgdnm':
dis_optimizer = SGDNM(
dis.parameters(), lr=learning_rate, momentum=momentum)
gen_optimizer = SGDNM(
gen.parameters(), lr=learning_rate, momentum=momentum)
elif update_rule == 'mixed_adam_plus':
dis_optimizer = AdamPlus(dis.parameters(),
lr=learning_rate,
betas=(momentum, 0.9))
gen_optimizer = Adam(gen.parameters(),
lr=learning_rate,
betas=(0.5, 0.9))
elif update_rule == 'mixed_sgd':
dis_optimizer = SGDNM(
dis.parameters(), lr=learning_rate, momentum=momentum)
gen_optimizer = SGD(gen.parameters(), lr=learning_rate)
one = torch.FloatTensor([1])
mone = one * -1
if use_cuda:
one = one.cuda()
mone = mone.cuda()
dataset = get_8gaussians(_batch_size)
criterion = nn.BCEWithLogitsLoss()
ones = Variable(torch.ones(_batch_size))
zeros = Variable(torch.zeros(_batch_size))
if use_cuda:
criterion = criterion.cuda()
ones = ones.cuda()
zeros = zeros.cuda()
gen_losses = []
dis_losses = []
points = []
for iteration in range(iterations):
for iter_d in range(dis_iter):
dis.zero_grad()
noise = torch.randn(_batch_size, z_dim)
if use_cuda:
noise = noise.cuda()
noise = autograd.Variable(noise)
real = dataset.next()
pred_real = criterion(dis(real), ones)
# fake = Variable(gen(noise).data)
fake = gen(noise)
pred_fake = criterion(dis(fake), zeros)
gradient_penalty = 0
import ipdb; ipdb.set_trace()
if mode == 'gp':
gradient_penalty = calc_gradient_penalty(
dis, real.data, fake.data)
pred_tot = pred_real + pred_fake + gradient_penalty
else:
pred_tot = pred_real + pred_fake
pred_tot.backward(create_graph=True, retain_graph=True)
# pred_tot.backward()
dis_loss = gradient_penalty + pred_real + pred_fake
dis_optimizer.step()
if iteration == 0:
dis_param = dis.parameters().next()
gen_param = gen.parameters()
for i in range(6):
gen_param.next()
gen_param = gen_param.next()
dis_param_grad = dis_param.grad.view(-1)
dis_grad_dis = torch.stack(
[torch.autograd.grad(dis_param_grad[p],
dis_param, create_graph=True, retain_graph=True)
[0].view(-1) for p in range(len(dis_param_grad))])
dis_grad_gen = torch.stack(
[torch.autograd.grad(dis_param_grad[p],
gen_param, create_graph=True, retain_graph=True)
[0].view(-1) for p in range(len(dis_param_grad))])
joc_dis = torch.cat((dis_grad_dis, dis_grad_gen), 1)
gen.zero_grad()
noise = torch.randn(_batch_size, z_dim)
ones = Variable(torch.ones(_batch_size))
zeros = Variable(torch.zeros(_batch_size))
if use_cuda:
noise = noise.cuda()
ones = ones.cuda()
zeros = zeros.cuda()
noise = autograd.Variable(noise)
fake = gen(noise)
if mode == 'sat':
gen_loss = -criterion(dis(fake), zeros)
else:
gen_loss = criterion(dis(fake), ones)
gen_loss.backward(create_graph=True, retain_graph=True)
gen_optimizer.step()
if iteration == 0:
gen_param_grad = gen_param.grad.view(-1)
gen_grad_gen = torch.stack(
[torch.autograd.grad(gen_param_grad[p],
gen_param, create_graph=True, retain_graph=True)
[0].view(-1) for p in range(len(gen_param_grad))])
gen_grad_dis = torch.stack(
[torch.autograd.grad(gen_param_grad[p],
dis_param, create_graph=True, retain_graph=True)
[0] for p in range(len(gen_param_grad))])
gen_grad_dis = gen_grad_dis.view(512, 512)
joc_gen = torch.cat((gen_grad_dis, gen_grad_gen), 1)
joc = torch.cat((joc_gen, joc_dis), 0)
eigvals = linalg.eigvals(joc.cpu().data.numpy())
points.append(eigvals)
if iteration % 250 == 0:
print("iteration: " + str(iteration) +
" gen_loss: " + str(float(gen_loss)) +
" dis_loss: " + str(float(dis_loss)))
gen_losses += [float(gen_loss)]
dis_losses += [float(dis_loss)]
if iteration < 5000:
freq = 500
elif iteration < 25000:
freq = 2000
elif iteration < 70000:
freq = 5000
else:
freq = 10000
if iteration % freq == 0:
noise = torch.randn(1000, z_dim)
if use_cuda:
noise = noise.cuda()
noise = autograd.Variable(noise)
fake_for_plot = gen(noise)
plot(fake_for_plot, iteration)
plot_eigens(points)
# torch.save(gen.state_dict(), save_directory + 'gen')
# torch.save(dis.state_dict(), save_directory + 'dis')
# np.save(save_directory + 'dis_losses', dis_losses)
# np.save(save_directory + 'gen_losses', gen_losses)
| 30.821002
| 77
| 0.598962
|
b446b3cebfbcde35897ae84f34d7209c56a3fed4
| 4,669
|
py
|
Python
|
lib/demo/statstools.py
|
uogbuji/akara
|
d308bf745458c5b54a203af25faebca5f48311ca
|
[
"Apache-2.0"
] | 1
|
2018-05-24T06:13:53.000Z
|
2018-05-24T06:13:53.000Z
|
lib/demo/statstools.py
|
uogbuji/akara
|
d308bf745458c5b54a203af25faebca5f48311ca
|
[
"Apache-2.0"
] | null | null | null |
lib/demo/statstools.py
|
uogbuji/akara
|
d308bf745458c5b54a203af25faebca5f48311ca
|
[
"Apache-2.0"
] | 1
|
2020-09-18T13:14:02.000Z
|
2020-09-18T13:14:02.000Z
|
# -*- encoding: utf-8 -*-
'''
See also:
'''
from __future__ import with_statement
import sys, time
import urllib, urlparse
import tempfile
import os
import re
import csv
import cgi
from cStringIO import StringIO
from gettext import gettext as _
from itertools import *
from functools import *
from subprocess import *
import amara
from amara.xslt import transform
from amara.xpath.util import simplify
from amara.bindery import html
from amara.lib.util import *
# Requires Python 2.6 or http://code.google.com/p/json/
from amara.thirdparty import json
import akara
from akara.services import simple_service
VAR_PAT = re.compile('VARIABLE\s+LABELS\s+(((\w+)\s+"([^"]+)"\s*)+)\.')
VAR_DEF_PAT = re.compile('(\w+)\s+"([^"]+)"')
VALUE_PAT = re.compile('VALUE\s+LABELS\s+((/(\w+)\s+(\'(\w+)\'\s+"([^"]+)"\s*)+)+)\.')
VALUE_DEF_SET_PAT = re.compile('/(\w+)\s+((\'(\w+)\'\s+"([^"]+)"\s*)+)')
VALUE_DEF_PAT = re.compile('\'(\w+)\'\s+"([^"]+)"')
VALUE_SET_TYPE = 'value_set'
VARIABLE_LABELS_TYPE = 'variable_labels'
VALUE_LABELS_TYPE = 'value_labels'
#R_SCRIPT = '''library(foreign)
#mydata <- read.spss(file='%s')
#write.csv2(mydata)
#'''
R_SCRIPT = '''library(Hmisc)
mydata <- spss.get(file='%s')
write.csv2(mydata)
'''
R_FILE_CMD = akara.module_config(__name__).get('r_command', 'r')
POR_REQUIRED = _("The 'POR' POST parameter is mandatory.")
SERVICE_ID = 'http://purl.org/akara/services/demo/spss.json'
@simple_service('POST', SERVICE_ID, 'spss.json', 'application/json')
def spss2json(body, ctype, **params):
'''
Uses GNU R to convert SPSS to JSON
Optionally tries to guess long labels from an original .SPS file
Requires POST body of multipart/form-data
Sample request:
curl -F "POR=@foo.por" http://localhost:8880/spss.json
curl -F "POR=@foo.por" -F "SPSS=@foo.sps" http://localhost:8880/spss.json
'''
#curl --request POST -F "POR=@lat506.por" -F "SPSS=@LAT506.SPS" http://labs.zepheira.com:8880/spss.json
#Useful:
# * [[http://wiki.math.yorku.ca/index.php/R:_Data_conversion_from_SPSS|R: Data conversion from SPSS]]
body = StringIO(body)
form = cgi.FieldStorage(fp=body, environ=WSGI_ENVIRON)
#for k in form:
# print >> sys.stderr, (k, form[k][:100])
por = form.getvalue('POR')
assert_not_equal(por, None, msg=POR_REQUIRED)
spss = form.getvalue('SPSS')
(items, varlabels, valuelabels) = parse_spss(por, spss)
for count, item in enumerate(items):
#print >> sys.stderr, row
item['id'] = item['label'] = '_' + str(count)
item['type'] = VALUE_SET_TYPE
return json.dumps({'items': items, VARIABLE_LABELS_TYPE: varlabels, VALUE_LABELS_TYPE: valuelabels}, indent=4)
def parse_spss(spss_por, spss_syntax=None):
'''
Uses GNU R to convert SPSS to a simple Python data structure
Optionally tries to guess long labels from an original .SPS file
'''
varlabels = {}
valuelabels = {}
if spss_syntax:
matchinfo = VAR_PAT.search(spss_syntax)
if matchinfo:
#print >> sys.stderr, matchinfo.groups
defns = matchinfo.group(1)
for defn in VAR_DEF_PAT.finditer(defns):
varlabels[defn.group(1)] = defn.group(2)
matchinfo = VALUE_PAT.search(spss_syntax)
defsets = matchinfo.group(1)
for defset in VALUE_DEF_SET_PAT.finditer(defsets):
valuelabelset = {}
for defn in VALUE_DEF_PAT.finditer(defset.group(2)):
valuelabelset[defn.group(1)] = defn.group(2)
valuelabels[defset.group(1)] = valuelabelset
#print >> sys.stderr, varlabels
#print >> sys.stderr, valuelabels
#print >> sys.stderr, por[:100]
#print >> sys.stderr, spss[:100]
temp = tempfile.mkstemp(suffix=".por")
os.write(temp[0], spss_por)
cmdline = R_FILE_CMD
process = Popen(cmdline, stdin=PIPE, stdout=PIPE, stderr=PIPE, universal_newlines=True, shell=True)
csvdata, perr = process.communicate(input=R_SCRIPT%temp[1])
os.close(temp[0])
os.remove(temp[1])
if not csvdata:
print >> sys.stderr, R_SCRIPT%temp[1]
print >> sys.stderr, perr
#FIXME: L10N
raise ValueError('Empty output from the command line. Probably a failure. Command line: "%s"'%cmdline)
def value(k, v):
if k in valuelabels and v in valuelabels[k]:
return valuelabels[k][v]
else:
return v
r_reader = csv.DictReader(csvdata.splitlines(), delimiter=';')
rows = [
dict(((k, value(k, v.strip())) for (k, v) in row.iteritems()))
for row in r_reader
]
return (rows, varlabels, valuelabels)
| 31.33557
| 114
| 0.64532
|
66079fb1b5a8978813afc2b0ca918326b523f868
| 9,152
|
py
|
Python
|
mutatio/share/kivy-examples/widgets/tabbed_panel_showcase.py
|
SmatMan/mutatio
|
e6e24c45054321f9cb753499f0842ee8c8969c56
|
[
"MIT"
] | null | null | null |
mutatio/share/kivy-examples/widgets/tabbed_panel_showcase.py
|
SmatMan/mutatio
|
e6e24c45054321f9cb753499f0842ee8c8969c56
|
[
"MIT"
] | null | null | null |
mutatio/share/kivy-examples/widgets/tabbed_panel_showcase.py
|
SmatMan/mutatio
|
e6e24c45054321f9cb753499f0842ee8c8969c56
|
[
"MIT"
] | null | null | null |
'''
TabbedPanel
============
Test of the widget TabbedPanel showing all capabilities.
'''
from kivy.app import App
from kivy.animation import Animation
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.tabbedpanel import TabbedPanel, TabbedPanelHeader
from kivy.factory import Factory
class StandingHeader(TabbedPanelHeader):
pass
class CloseableHeader(TabbedPanelHeader):
pass
Factory.register('StandingHeader', cls=StandingHeader)
Factory.register('CloseableHeader', cls=CloseableHeader)
from kivy.lang import Builder
Builder.load_string('''
<TabShowcase>
but: _but
Button:
id: _but
text: 'Press to show Tabbed Panel'
on_release: root.show_tab()
<StandingHeader>
color: 0,0,0,0
disabled_color: self.color
Scatter:
do_translation: False
do_scale: False
do_rotation: False
auto_bring_to_front: False
rotation: 70
size_hint: None, None
size: lbl.size
center_x: root.center_x
center_y: root.center_y
Label:
id: lbl
text: root.text
size: root.size
color: 1, 1, 1, .5 if self.disabled else 1
pos: 0,0
<PanelLeft>
size_hint: (.45, .45)
pos_hint: {'center_x': .25, 'y': .55}
# replace the default tab with our custom tab class
default_tab_cls: sh.__class__
do_default_tab: True
default_tab_content: default_content.__self__
tab_width: 40
tab_height: 70
FloatLayout:
RstDocument:
id: default_content
text: '\\n'.join(("Standing tabs", "-------------",\
"Tabs in \\'%s\\' position" %root.tab_pos))
Image:
id: tab_2_content
source: 'data/images/defaulttheme-0.png'
Image:
id: tab_3_content
source: 'data/images/image-loading.zip'
StandingHeader:
id: sh
content: tab_2_content.__self__
text: 'tab 2'
StandingHeader:
content: tab_3_content
text: 'tab 3'
<CloseableHeader>
color: 0,0,0,0
disabled_color: self.color
# variable tab_width
text: 'tabx'
size_hint_x: None
width: self.texture_size[0] + 40
BoxLayout:
pos: root.pos
size_hint: None, None
size: root.size
padding: 3
Label:
id: lbl
text: root.text
BoxLayout:
size_hint: None, 1
orientation: 'vertical'
width: 22
Image:
source: 'tools/theming/defaulttheme/close.png'
on_touch_down:
if self.collide_point(*args[1].pos) :\
root.panel.remove_widget(root)
<PanelRight>
tab_pos: 'top_right'
size_hint: (.45, .45)
pos_hint: {'center_x': .75, 'y': .55}
# replace the default tab with our custom tab
default_tab: def_tab
# allow variable tab width
tab_width: None
FloatLayout:
RstDocument:
id: default_content
text: '\\n'.join(("Closeable tabs", "---------------",\
"- The tabs above are also scrollable",\
"- Tabs in \\'%s\\' position" %root.tab_pos))
Image:
id: tab_2_content
source: 'data/images/defaulttheme-0.png'
BoxLayout:
id: tab_3_content
BubbleButton:
text: 'Press to add new tab'
on_release: root.add_header()
BubbleButton:
text: 'Press set this tab as default'
on_release: root.default_tab = tab3
CloseableHeader:
id: def_tab
text: 'default tab'
content:default_content.__self__
panel: root
CloseableHeader:
text: 'tab2'
content: tab_2_content.__self__
panel: root
CloseableHeader:
id: tab3
text: 'tab3'
content: tab_3_content.__self__
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
CloseableHeader:
panel: root
<PanelbLeft>
tab_pos: 'bottom_left'
size_hint: (.45, .45)
pos_hint: {'center_x': .25, 'y': .02}
do_default_tab: False
TabbedPanelItem:
id: settings
text: 'Settings'
RstDocument:
text: '\\n'.join(("Normal tabs", "-------------",\
"Tabs in \\'%s\\' position" %root.tab_pos))
TabbedPanelItem:
text: 'tab2'
BubbleButton:
text: 'switch to settings'
on_press: root.switch_to(settings)
TabbedPanelItem:
text: 'tab3'
Image:
source: 'data/images/image-loading.zip'
<PanelbRight>
tab_pos: 'right_top'
size_hint: (.45, .45)
pos_hint: {'center_x': .75, 'y': .02}
default_tab: def_tab
tab_height: img.width
FloatLayout:
RstDocument:
id: default_content
text: '\\n'.join(("Image tabs","-------------",\
"1. Normal image tab","2. Image with Text","3. Rotated Image",\
"4. Tabs in \\'%s\\' position" %root.tab_pos))
Image:
id: tab_2_content
source: 'data/images/defaulttheme-0.png'
VideoPlayer:
id: tab_3_content
source: 'cityCC0.mpg'
TabbedPanelHeader:
id: def_tab
content:default_content.__self__
border: 0, 0, 0, 0
background_down: 'cityCC0.png'
background_normal:'sequenced_images/data/images/info.png'
TabbedPanelHeader:
id: tph
content: tab_2_content.__self__
BoxLayout:
pos: tph.pos
size: tph.size
orientation: 'vertical'
Image:
source: 'sequenced_images/data/images/info.png'\
if tph.state == 'normal' else 'cityCC0.png'
Label:
text: 'text & img'
TabbedPanelHeader:
id: my_header
content: tab_3_content.__self__
Scatter:
do_translation: False
do_scale: False
do_rotation: False
auto_bring_to_front: False
rotation: 90
size_hint: None, None
size: img.size
center: my_header.center
Image:
id: img
source: 'sequenced_images/data/images/info.png'\
if my_header.state == 'normal' else 'cityCC0.png'
size: my_header.size
allow_stretch: True
keep_ratio: False
''')
class Tp(TabbedPanel):
# override tab switching method to animate on tab switch
def switch_to(self, header):
anim = Animation(opacity=0, d=.24, t='in_out_quad')
def start_anim(_anim, child, in_complete, *lt):
_anim.start(child)
def _on_complete(*lt):
if header.content:
header.content.opacity = 0
anim = Animation(opacity=1, d=.43, t='in_out_quad')
start_anim(anim, header.content, True)
super(Tp, self).switch_to(header)
anim.bind(on_complete=_on_complete)
if self.current_tab.content:
start_anim(anim, self.current_tab.content, False)
else:
_on_complete()
class PanelLeft(Tp):
pass
class PanelRight(Tp):
def add_header(self):
self.add_widget(CloseableHeader(panel=self))
class PanelbLeft(Tp):
pass
class PanelbRight(Tp):
pass
class TabShowcase(FloatLayout):
def show_tab(self):
if not hasattr(self, 'tab'):
self.tab = tab = PanelLeft()
self.add_widget(tab)
self.tab1 = tab = PanelRight()
self.add_widget(tab)
self.tab2 = tab = PanelbRight()
self.add_widget(tab)
self.tab3 = tab = PanelbLeft()
self.add_widget(tab)
self.but.text = \
'Tabs in variable positions, press to change to top_left'
else:
values = ('left_top', 'left_mid', 'left_bottom', 'top_left',
'top_mid', 'top_right', 'right_top', 'right_mid',
'right_bottom', 'bottom_left', 'bottom_mid', 'bottom_right')
index = values.index(self.tab.tab_pos)
self.tab.tab_pos = self.tab1.tab_pos = self.tab2.tab_pos\
= self.tab3.tab_pos = values[(index + 1) % len(values)]
self.but.text = 'Tabs in \'%s\' position,' % self.tab.tab_pos\
+ '\n press to change to next pos'
class TestTabApp(App):
def build(self):
return TabShowcase()
if __name__ == '__main__':
TestTabApp().run()
| 28.779874
| 80
| 0.542177
|
4698200abf52958b493379a5c57599f1ff3c0ff5
| 16,331
|
py
|
Python
|
qiskit/opflow/primitive_ops/pauli_sum_op.py
|
ewinston/qiskit-sdk-py
|
4d64125aba4ff31f15d0054b90437bcef352782e
|
[
"Apache-2.0"
] | null | null | null |
qiskit/opflow/primitive_ops/pauli_sum_op.py
|
ewinston/qiskit-sdk-py
|
4d64125aba4ff31f15d0054b90437bcef352782e
|
[
"Apache-2.0"
] | 2
|
2022-03-30T10:09:44.000Z
|
2022-03-30T10:09:45.000Z
|
qiskit/opflow/primitive_ops/pauli_sum_op.py
|
ewinston/qiskit-sdk-py
|
4d64125aba4ff31f15d0054b90437bcef352782e
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""PauliSumOp Class """
from collections import defaultdict
from typing import Dict, List, Optional, Set, Tuple, Union, cast
import numpy as np
from scipy.sparse import spmatrix
from qiskit.circuit import Instruction, ParameterExpression
from qiskit.opflow.exceptions import OpflowError
from qiskit.opflow.list_ops.summed_op import SummedOp
from qiskit.opflow.list_ops.tensored_op import TensoredOp
from qiskit.opflow.operator_base import OperatorBase
from qiskit.opflow.primitive_ops.pauli_op import PauliOp
from qiskit.opflow.primitive_ops.primitive_op import PrimitiveOp
from qiskit.quantum_info import Pauli, SparsePauliOp, Statevector
from qiskit.quantum_info.operators.custom_iterator import CustomIterator
from qiskit.quantum_info.operators.symplectic.pauli_table import PauliTable
class PauliSumOp(PrimitiveOp):
"""Class for Operators backend by Terra's ``SparsePauliOp`` class."""
primitive: SparsePauliOp
def __init__(
self,
primitive: SparsePauliOp,
coeff: Union[complex, ParameterExpression] = 1.0,
grouping_type: str = "None",
) -> None:
"""
Args:
primitive: The SparsePauliOp which defines the behavior of the underlying function.
coeff: A coefficient multiplying the primitive.
grouping_type: The type of grouping. If None, the operator is not grouped.
Raises:
TypeError: invalid parameters.
"""
if not isinstance(primitive, SparsePauliOp):
raise TypeError(
f"PauliSumOp can only be instantiated with SparsePauliOp, not {type(primitive)}"
)
super().__init__(primitive, coeff=coeff)
self._grouping_type = grouping_type
def primitive_strings(self) -> Set[str]:
return {"SparsePauliOp"}
@property
def grouping_type(self) -> str:
"""
Returns: Type of Grouping
"""
return self._grouping_type
@property
def num_qubits(self) -> int:
return self.primitive.num_qubits
@property
def coeffs(self):
"""Return the Pauli coefficients."""
return self.coeff * self.primitive.coeffs
def matrix_iter(self, sparse=False):
"""Return a matrix representation iterator.
This is a lazy iterator that converts each term in the PauliSumOp
into a matrix as it is used. To convert to a single matrix use the
:meth:`to_matrix` method.
Args:
sparse (bool): optionally return sparse CSR matrices if True,
otherwise return Numpy array matrices
(Default: False)
Returns:
MatrixIterator: matrix iterator object for the PauliTable.
"""
class MatrixIterator(CustomIterator):
"""Matrix representation iteration and item access."""
def __repr__(self):
return "<PauliSumOp_matrix_iterator at {}>".format(hex(id(self)))
def __getitem__(self, key):
sumopcoeff = self.obj.coeff * self.obj.primitive.coeffs[key]
mat = PauliTable._to_matrix(self.obj.primitive.table.array[key], sparse=sparse)
return sumopcoeff * mat
return MatrixIterator(self)
def add(self, other: OperatorBase) -> OperatorBase:
if not self.num_qubits == other.num_qubits:
raise ValueError(
f"Sum of operators with different numbers of qubits, {self.num_qubits} and "
f"{other.num_qubits}, is not well defined"
)
if isinstance(other, PauliSumOp):
return PauliSumOp(self.coeff * self.primitive + other.coeff * other.primitive, coeff=1)
if isinstance(other, PauliOp):
return PauliSumOp(
self.coeff * self.primitive + other.coeff * SparsePauliOp(other.primitive)
)
return SummedOp([self, other])
def mul(self, scalar: Union[complex, ParameterExpression]) -> OperatorBase:
if isinstance(scalar, (int, float, complex)) and scalar != 0:
return PauliSumOp(scalar * self.primitive, coeff=self.coeff)
return super().mul(scalar)
def adjoint(self) -> "PauliSumOp":
return PauliSumOp(self.primitive.adjoint(), coeff=self.coeff.conjugate())
def equals(self, other: OperatorBase) -> bool:
self_reduced, other_reduced = self.reduce(), other.reduce()
if not isinstance(other_reduced, PauliSumOp):
return False
if isinstance(self_reduced.coeff, ParameterExpression) or isinstance(
other_reduced.coeff, ParameterExpression
):
return (
self_reduced.coeff == other_reduced.coeff
and self_reduced.primitive == other_reduced.primitive
)
return (
len(self_reduced) == len(other_reduced)
and self_reduced.primitive == other_reduced.primitive
)
def _expand_dim(self, num_qubits: int) -> "PauliSumOp":
return PauliSumOp(
self.primitive.tensor(SparsePauliOp(Pauli("I" * num_qubits))),
coeff=self.coeff,
)
def tensor(self, other: OperatorBase) -> Union["PauliSumOp", TensoredOp]:
if isinstance(other, PauliSumOp):
return PauliSumOp(
self.primitive.tensor(other.primitive),
coeff=self.coeff * other.coeff,
)
return TensoredOp([self, other])
def permute(self, permutation: List[int]) -> "PauliSumOp":
"""Permutes the sequence of ``PauliSumOp``.
Args:
permutation: A list defining where each Pauli should be permuted. The Pauli at index
j of the primitive should be permuted to position permutation[j].
Returns:
A new PauliSumOp representing the permuted operator. For operator (X ^ Y ^ Z) and
indices=[1,2,4], it returns (X ^ I ^ Y ^ Z ^ I).
Raises:
OpflowError: if indices do not define a new index for each qubit.
"""
if len(permutation) != self.num_qubits:
raise OpflowError(
"List of indices to permute must have the " "same size as Pauli Operator"
)
length = max(permutation) + 1
spop = self.primitive.tensor(SparsePauliOp(Pauli("I" * (length - self.num_qubits))))
permutation = [i for i in range(length) if i not in permutation] + permutation
permu_arr = np.arange(length)[np.argsort(permutation)]
permu_arr = np.hstack([permu_arr, permu_arr + length])
spop.table.array = spop.table.array[:, permu_arr]
return PauliSumOp(spop, self.coeff)
def compose(
self,
other: OperatorBase,
permutation: Optional[List[int]] = None,
front: bool = False,
) -> OperatorBase:
new_self, other = self._expand_shorter_operator_and_permute(other, permutation)
new_self = cast(PauliSumOp, new_self)
if front:
return other.compose(new_self)
# If self is identity, just return other.
if not np.any(new_self.primitive.table.array):
return other * new_self.coeff * sum(new_self.primitive.coeffs)
# Both PauliSumOps
if isinstance(other, PauliSumOp):
return PauliSumOp(
new_self.primitive * other.primitive,
coeff=new_self.coeff * other.coeff,
)
if isinstance(other, PauliOp):
other_primitive = SparsePauliOp(other.primitive)
return PauliSumOp(
new_self.primitive * other_primitive,
coeff=new_self.coeff * other.coeff,
)
# pylint: disable=cyclic-import
from ..state_fns.circuit_state_fn import CircuitStateFn
from .circuit_op import CircuitOp
if isinstance(other, (CircuitOp, CircuitStateFn)):
pauli_op = cast(Union[PauliOp, SummedOp], new_self.to_pauli_op())
return pauli_op.to_circuit_op().compose(other)
return super(PauliSumOp, new_self).compose(other)
def to_matrix(self, massive: bool = False) -> np.ndarray:
OperatorBase._check_massive("to_matrix", True, self.num_qubits, massive)
if isinstance(self.coeff, ParameterExpression):
return (self.primitive.to_matrix(sparse=True)).toarray() * self.coeff
return (self.primitive.to_matrix(sparse=True) * self.coeff).toarray()
def __str__(self) -> str:
def format_sign(x):
return x.real if np.isreal(x) else x
def format_number(x):
x = format_sign(x)
if isinstance(x, (int, float)) and x < 0:
return f"- {-x}"
return f"+ {x}"
indent = "" if self.coeff == 1 else " "
prim_list = self.primitive.to_list()
if prim_list:
first = prim_list[0]
if isinstance(first[1], (int, float)) and first[1] < 0:
main_string = indent + f"- {-first[1].real} * {first[0]}"
else:
main_string = indent + f"{format_sign(first[1])} * {first[0]}"
main_string += "".join([f"\n{indent}{format_number(c)} * {p}" for p, c in prim_list[1:]])
return f"{main_string}" if self.coeff == 1 else f"{self.coeff} * (\n{main_string}\n)"
def eval(
self,
front: Optional[
Union[str, Dict[str, complex], np.ndarray, OperatorBase, Statevector]
] = None,
) -> Union[OperatorBase, complex]:
if front is None:
return self.to_matrix_op()
# pylint: disable=cyclic-import
from ..list_ops.list_op import ListOp
from ..state_fns.circuit_state_fn import CircuitStateFn
from ..state_fns.dict_state_fn import DictStateFn
from ..state_fns.state_fn import StateFn
from .circuit_op import CircuitOp
# For now, always do this. If it's not performant, we can be more granular.
if not isinstance(front, OperatorBase):
front = StateFn(front, is_measurement=False)
if isinstance(front, ListOp) and front.distributive:
return front.combo_fn(
[self.eval(front.coeff * front_elem) for front_elem in front.oplist]
)
else:
if self.num_qubits != front.num_qubits:
raise ValueError(
"eval does not support operands with differing numbers of qubits, "
"{} and {}, respectively.".format(self.num_qubits, front.num_qubits)
)
if isinstance(front, DictStateFn):
new_dict: Dict[str, int] = defaultdict(int)
corrected_x_bits = self.primitive.table.X[::, ::-1]
corrected_z_bits = self.primitive.table.Z[::, ::-1]
coeffs = self.primitive.coeffs
for bstr, v in front.primitive.items():
bitstr = np.fromiter(bstr, dtype=int).astype(bool)
new_b_str = np.logical_xor(bitstr, corrected_x_bits)
new_str = ["".join([str(b) for b in bs]) for bs in new_b_str.astype(int)]
z_factor = np.product(1 - 2 * np.logical_and(bitstr, corrected_z_bits), axis=1)
y_factor = np.product(
np.sqrt(1 - 2 * np.logical_and(corrected_x_bits, corrected_z_bits) + 0j),
axis=1,
)
for i, n_str in enumerate(new_str):
new_dict[n_str] += v * z_factor[i] * y_factor[i] * coeffs[i]
return DictStateFn(new_dict, coeff=self.coeff * front.coeff)
elif isinstance(front, StateFn) and front.is_measurement:
raise ValueError("Operator composed with a measurement is undefined.")
# Composable types with PauliOp
elif isinstance(front, (PauliSumOp, PauliOp, CircuitOp, CircuitStateFn)):
return self.compose(front).eval()
# Covers VectorStateFn and OperatorStateFn
front = cast(StateFn, front)
return self.to_matrix_op().eval(front.to_matrix_op())
def exp_i(self) -> OperatorBase:
""" Return a ``CircuitOp`` equivalent to e^-iH for this operator H. """
# TODO: optimize for some special cases
from ..evolutions.evolved_op import EvolvedOp
return EvolvedOp(self)
def to_instruction(self) -> Instruction:
return self.to_matrix_op().to_circuit().to_instruction() # type: ignore
def to_pauli_op(self, massive: bool = False) -> Union[PauliOp, SummedOp]:
def to_native(x):
return x.item() if isinstance(x, np.generic) else x
if len(self.primitive) == 1:
return PauliOp(
Pauli((self.primitive.table.Z[0], self.primitive.table.X[0])),
to_native(np.real_if_close(self.primitive.coeffs[0])) * self.coeff,
)
tables = self.primitive.table
coeffs = np.real_if_close(self.primitive.coeffs)
return SummedOp(
[
PauliOp(
Pauli((t.Z[0], t.X[0])),
to_native(c),
)
for t, c in zip(tables, coeffs)
],
coeff=self.coeff,
)
def __getitem__(self, offset: Union[int, slice]) -> "PauliSumOp":
"""Allows array-indexing style access to the ``PauliSumOp``.
Args:
offset: The index of ``PauliSumOp``.
Returns:
The ``PauliSumOp`` at index ``offset``,
"""
return PauliSumOp(self.primitive[offset], self.coeff)
def __iter__(self):
for i in range(len(self)):
yield self[i]
def __len__(self) -> int:
"""Length of ``SparsePauliOp``.
Returns:
An int equal to the length of SparsePauliOp.
"""
return len(self.primitive)
# pylint: disable=arguments-differ
def reduce(self, atol: Optional[float] = None, rtol: Optional[float] = None) -> "PauliSumOp":
"""Simplify the primitive ``SparsePauliOp``.
Args:
atol: Absolute tolerance for checking if coefficients are zero (Default: 1e-8).
rtol: Relative tolerance for checking if coefficients are zero (Default: 1e-5).
Returns:
The simplified ``PauliSumOp``.
"""
if isinstance(self.coeff, (int, float, complex)):
primitive = self.coeff * self.primitive
return PauliSumOp(primitive.simplify(atol=atol, rtol=rtol))
return PauliSumOp(self.primitive.simplify(atol=atol, rtol=rtol), self.coeff)
def to_spmatrix(self) -> spmatrix:
"""Returns SciPy sparse matrix representation of the ``PauliSumOp``.
Returns:
CSR sparse matrix representation of the ``PauliSumOp``.
Raises:
ValueError: invalid parameters.
"""
return self.primitive.to_matrix(sparse=True) * self.coeff
@classmethod
def from_list(
cls,
pauli_list: List[Tuple[str, Union[complex]]],
coeff: Union[complex, ParameterExpression] = 1.0,
) -> "PauliSumOp":
"""Construct from a pauli_list with the form [(pauli_str, coeffs)]
Args:
pauli_list: A list of Tuple of pauli_str and coefficient.
coeff: A coefficient multiplying the primitive.
Returns:
The PauliSumOp constructed from the pauli_list.
"""
return cls(SparsePauliOp.from_list(pauli_list), coeff=coeff)
def is_zero(self) -> bool:
"""
Return this operator is zero operator or not.
"""
op = self.reduce()
primitive: SparsePauliOp = op.primitive
return op.coeff == 1 and len(op) == 1 and primitive.coeffs[0] == 0
| 38.067599
| 99
| 0.609454
|
4d67acac46cf6b233c195f30d725fed3d2f2763d
| 5,402
|
py
|
Python
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/operations/_usage_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/operations/_usage_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2016_04_30_preview/operations/_usage_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UsageOperations(object):
"""UsageOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2016_04_30_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
location, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ListUsagesResult"]
"""Gets, for the specified location, the current compute resource usage information as well as the
limits for compute resources under the subscription.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.compute.v2016_04_30_preview.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ListUsagesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-04-30-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str', pattern=r'^[-\w\._]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ListUsagesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/usages'} # type: ignore
| 45.394958
| 134
| 0.653277
|
31dc6d15ec939d5bf40c53cedc738344a7bd965a
| 1,394
|
py
|
Python
|
Python Control Flow: Medical Insurance Project/script.py
|
dilanozer/Codecademy_DataScientist
|
253a77d63ba45194e1c5e55723d0048863a57e7d
|
[
"MIT"
] | null | null | null |
Python Control Flow: Medical Insurance Project/script.py
|
dilanozer/Codecademy_DataScientist
|
253a77d63ba45194e1c5e55723d0048863a57e7d
|
[
"MIT"
] | null | null | null |
Python Control Flow: Medical Insurance Project/script.py
|
dilanozer/Codecademy_DataScientist
|
253a77d63ba45194e1c5e55723d0048863a57e7d
|
[
"MIT"
] | null | null | null |
# Add your code here
def analyze_smoker(smoker_status):
if smoker_status == 1:
print("To lower your cost, you should consider quitting smoking.")
else:
print("Smoking is not an issue for you.")
def analyze_bmi(bmi_value):
if bmi_value > 30:
print("Your BMI is in the obese range. To lower your cost, you should significantly lower your BMI.")
elif bmi_value >= 25 and bmi_value <= 30:
print("Your BMI is in the overweight range. To lower your cost, you should lower your BMI.")
elif bmi_value >= 18.5 and bmi_value < 25:
print("Your BMI is in a healthy range.")
else:
print("Your BMI is in the underweight range. Increasing your BMI will not help lower your cost, but it will help improve your health.")
# Function to estimate insurance cost:
def estimate_insurance_cost(name, age, sex, bmi, num_of_children, smoker):
estimated_cost = 250*age - 128*sex + 370*bmi + 425*num_of_children + 24000*smoker - 12500
print(name + "'s Estimated Insurance Cost: " + str(estimated_cost) + " dollars.")
analyze_smoker(smoker)
analyze_bmi(bmi)
return estimated_cost
# Estimate Keanu's insurance cost
keanu_insurance_cost = estimate_insurance_cost(name = 'Keanu', age = 29, sex = 1, bmi = 26.2, num_of_children = 3, smoker = 1)
dilan_insurance_cost = estimate_insurance_cost(name = "Dilan", age = 23, sex = 0, bmi = 20.1, num_of_children = 0, smoker = 0)
| 48.068966
| 139
| 0.719512
|
bc10cd97de34a1cc78f77b5ebfb1178689ee4f77
| 8,575
|
py
|
Python
|
app/user/utils.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 37
|
2016-01-21T18:33:56.000Z
|
2021-10-24T01:43:20.000Z
|
app/user/utils.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 179
|
2016-01-21T21:33:31.000Z
|
2022-02-15T21:31:35.000Z
|
app/user/utils.py
|
CityOfNewYork/NYCOpenRecords
|
476a236a573e6f3a2f96c6537a30ee27b2bd3a2b
|
[
"Apache-2.0"
] | 13
|
2017-05-19T17:27:31.000Z
|
2020-07-05T00:55:29.000Z
|
from datetime import datetime
from elasticsearch.helpers import bulk
from flask import current_app
from psycopg2 import OperationalError
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import joinedload
from app import (
celery,
db,
es
)
from app.constants import (bulk_updates, event_type, role_name, user_type_request)
from app.lib.email_utils import (
get_agency_admin_emails,
send_email,
)
from app.models import (Agencies, Events, Requests, Roles, UserRequests, Users)
@celery.task(bind=True, name='app.user.utils.make_user_admin', autoretry_for=(OperationalError, SQLAlchemyError,),
retry_kwargs={'max_retries': 5}, retry_backoff=True)
def make_user_admin(self, modified_user_guid: str, current_user_guid: str, agency_ein: str):
"""
Make the specified user an admin for the agency.
Args:
modified_user_guid (str): GUID of the user to be modified
current_user_guid (str): GUID of the current user
agency_ein (str): Agency the user is being added to
Returns:
"""
permissions = Roles.query.filter_by(name=role_name.AGENCY_ADMIN).one().permissions
user = Users.query.filter_by(guid=modified_user_guid).one()
requests = [request.id for request in user.agencies.filter_by(ein=agency_ein).one().requests]
new_user_requests = []
new_user_requests_events = []
update_user_requests = []
update_user_requests_events = []
for request in requests:
existing_value = UserRequests.query.filter_by(request_id=request, user_guid=user.guid).one_or_none()
if existing_value and existing_value.permissions != permissions:
user_request = bulk_updates.UserRequestsDict(
user_guid=user.guid,
request_id=request,
request_user_type=user_type_request.AGENCY,
permissions=permissions,
point_of_contact=existing_value.point_of_contact
)
update_user_requests.append(user_request)
previous_value = {
'user_guid': modified_user_guid,
'permissions': existing_value.permissions
}
new_value = {
'user_guid': modified_user_guid,
'permissions': permissions
}
user_request_event = bulk_updates.UserRequestsEventDict(
request_id=request,
user_guid=user.guid,
response_id=None,
type=event_type.USER_PERM_CHANGED,
timestamp=datetime.utcnow(),
previous_value=previous_value,
new_value=new_value,
)
update_user_requests_events.append(user_request_event)
elif existing_value is None:
user_request = bulk_updates.UserRequestsDict(
user_guid=user.guid,
request_id=request,
request_user_type=user_type_request.AGENCY,
permissions=permissions,
point_of_contact=None
)
new_user_requests.append(user_request)
new_value = {
'user_guid': user.guid,
'request_id': request,
'request_user_type': user_type_request.AGENCY,
'permissions': permissions,
'point_of_contact': None
}
user_request_event = bulk_updates.UserRequestsEventDict(
request_id=request,
user_guid=current_user_guid,
response_id=None,
type=event_type.USER_ADDED,
timestamp=datetime.utcnow(),
previous_value=None,
new_value=new_value
)
new_user_requests_events.append(user_request_event)
try:
UserRequests.query.filter(UserRequests.user_guid == user.guid).update([('permissions', permissions)])
db.session.bulk_insert_mappings(Events, update_user_requests_events)
db.session.bulk_insert_mappings(UserRequests, new_user_requests)
db.session.bulk_insert_mappings(Events, new_user_requests_events)
db.session.commit()
agency = Agencies.query.filter_by(ein=agency_ein).one()
admin_users = get_agency_admin_emails(agency)
es_update_assigned_users.apply_async(args=[requests])
send_email(
subject='User {name} Made Admin'.format(name=user.name),
to=admin_users,
template='email_templates/email_user_made_agency_admin',
agency_name=agency.name,
name=user.name
)
except SQLAlchemyError:
db.session.rollback()
@celery.task(bind=True, name='app.user.utils.remove_user_permissions',
autoretry_for=(OperationalError, SQLAlchemyError,), retry_kwargs={'max_retries': 5}, retry_backoff=True)
def remove_user_permissions(self, modified_user_guid: str, current_user_guid: str, agency_ein: str, action: str = None):
"""
Remove the specified users permissions for the agency identified by agency_ein
Args:
modified_user_guid (str): GUID of the user to be modified
current_user_guid (str): GUID of the current user
agency_ein (str): Agency the user is being removed from
Returns:
"""
user_requests = db.session.query(UserRequests, Requests).join(Requests).with_entities(
UserRequests.request_id, UserRequests.permissions, UserRequests.point_of_contact).filter(
Requests.agency_ein == agency_ein, UserRequests.user_guid == modified_user_guid).all()
request_ids = [ur.request_id for ur in user_requests]
user = Users.query.filter_by(guid=modified_user_guid).one()
remove_user_request_events = [bulk_updates.UserRequestsEventDict(
request_id=ur.request_id,
user_guid=current_user_guid,
response_id=None,
type=event_type.USER_REMOVED,
timestamp=datetime.utcnow(),
previous_value={
'user_guid': modified_user_guid,
'permissions': ur.permissions,
'point_of_contact': ur.point_of_contact
},
new_value={
'user_guid': modified_user_guid,
'point_of_contact': False
}
) for ur in user_requests]
try:
db.session.query(UserRequests).filter(UserRequests.user_guid == modified_user_guid,
UserRequests.request_id.in_(request_ids)).delete(
synchronize_session=False)
db.session.bulk_insert_mappings(Events, remove_user_request_events)
db.session.commit()
es_update_assigned_users.apply_async(args=[request_ids])
agency = Agencies.query.filter_by(ein=agency_ein).one()
admin_users = get_agency_admin_emails(agency)
if action == event_type.AGENCY_USER_DEACTIVATED:
send_email(
subject='User {name} Deactivated'.format(name=user.name),
to=admin_users,
template='email_templates/email_agency_user_deactivated',
agency_name=agency.name,
name=user.name
)
elif action == event_type.USER_MADE_AGENCY_USER:
send_email(
subject='User {name} Made Regular User'.format(name=user.name),
to=admin_users,
template='email_templates/email_user_removed_agency_admin',
agency_name=agency.name,
name=user.name
)
except SQLAlchemyError:
db.session.rollback()
@celery.task(bind=True, name='app.user.utils.es_update_assigned_users',
autoretry_for=(OperationalError, SQLAlchemyError,), retry_kwargs={'max_retries': 5}, retry_backoff=True)
def es_update_assigned_users(self, request_ids: list):
"""
Update the ElasticSearch index assigned_users for the provided request IDs.
Args:
request_ids (list): List of Request IDs
"""
try:
actions = [{
'_op_type': 'update',
'_id': request.id,
'doc': {
'assigned_users': [user.get_id() for user in request.agency_users]
}
} for request in
Requests.query.filter(Requests.id.in_(request_ids)).options(joinedload(Requests.agency_users)).all()]
except SQLAlchemyError:
db.session.rollback()
bulk(
es,
actions,
index=current_app.config['ELASTICSEARCH_INDEX'],
doc_type='request',
chunk_size=current_app.config['ELASTICSEARCH_CHUNK_SIZE']
)
| 37.609649
| 120
| 0.645248
|
9d8431a4f0b2fdb3400ce469604bea9a6d99ea9a
| 11,513
|
py
|
Python
|
networkapi/api_network/views/v3/networkv4.py
|
vinicius-marinho/GloboNetworkAPI
|
94651d3b4dd180769bc40ec966814f3427ccfb5b
|
[
"Apache-2.0"
] | 73
|
2015-04-13T17:56:11.000Z
|
2022-03-24T06:13:07.000Z
|
networkapi/api_network/views/v3/networkv4.py
|
leopoldomauricio/GloboNetworkAPI
|
3b5b2e336d9eb53b2c113977bfe466b23a50aa29
|
[
"Apache-2.0"
] | 99
|
2015-04-03T01:04:46.000Z
|
2021-10-03T23:24:48.000Z
|
networkapi/api_network/views/v3/networkv4.py
|
shildenbrand/GloboNetworkAPI
|
515d5e961456cee657c08c275faa1b69b7452719
|
[
"Apache-2.0"
] | 64
|
2015-08-05T21:26:29.000Z
|
2022-03-22T01:06:28.000Z
|
# -*- coding: utf-8 -*-
import logging
from django.db.transaction import commit_on_success
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from networkapi.api_equipment import permissions as perm_eqpt
from networkapi.api_network import permissions
from networkapi.api_network import tasks
from networkapi.api_network.facade import v3 as facade
from networkapi.api_network.serializers import v3 as serializers
from networkapi.settings import SPECS
from networkapi.util.classes import CustomAPIView
from networkapi.util.decorators import logs_method_apiview
from networkapi.util.decorators import permission_classes_apiview
from networkapi.util.decorators import permission_obj_apiview
from networkapi.util.decorators import prepare_search
from networkapi.util.geral import render_to_json
from networkapi.util.json_validate import json_validate
from networkapi.util.json_validate import raise_json_validate
log = logging.getLogger(__name__)
class NetworkIPv4View(CustomAPIView):
@logs_method_apiview
@raise_json_validate()
@permission_classes_apiview((IsAuthenticated, permissions.Read))
@prepare_search
def get(self, request, *args, **kwargs):
"""Returns a list of networkv4 by ids ou dict."""
if not kwargs.get('obj_ids'):
obj_model = facade.get_networkipv4_by_search(self.search)
networks = obj_model['query_set']
only_main_property = False
else:
obj_ids = kwargs.get('obj_ids').split(';')
networks = facade.get_networkipv4_by_ids(obj_ids)
only_main_property = True
obj_model = None
# serializer networks
serializer_net = serializers.NetworkIPv4V3Serializer(
networks,
many=True,
fields=self.fields,
include=self.include,
exclude=self.exclude,
kind=self.kind
)
# prepare serializer with customized properties
data = render_to_json(
serializer_net,
main_property='networks',
obj_model=obj_model,
request=request,
only_main_property=only_main_property
)
return Response(data, status=status.HTTP_200_OK)
@logs_method_apiview
@raise_json_validate('networkv4_post')
@permission_classes_apiview((IsAuthenticated, permissions.Write))
@permission_obj_apiview([permissions.write_objv4_permission])
@commit_on_success
def post(self, request, *args, **kwargs):
"""Creates list of networkv4."""
data = request.DATA
json_validate(SPECS.get('networkv4_post')).validate(data)
response = list()
for networkv4 in data['networks']:
vl = facade.create_networkipv4(networkv4, request.user)
response.append({'id': vl.id})
return Response(response, status=status.HTTP_201_CREATED)
@logs_method_apiview
@raise_json_validate('')
@permission_classes_apiview((IsAuthenticated, permissions.Write))
@permission_obj_apiview([permissions.write_objv4_permission])
@commit_on_success
def delete(self, request, *args, **kwargs):
"""Deletes list of networkv4."""
response = list()
obj_ids = kwargs['obj_ids'].split(';')
facade.delete_networkipv4(obj_ids, request.user)
return Response(response, status=status.HTTP_200_OK)
@logs_method_apiview
@raise_json_validate('networkv4_put')
@permission_classes_apiview((IsAuthenticated, permissions.Write))
@permission_obj_apiview([permissions.write_objv4_permission])
@commit_on_success
def put(self, request, *args, **kwargs):
"""Updates list of networkv4."""
data = request.DATA
json_validate(SPECS.get('networkv4_put')).validate(data)
response = list()
for networkv4 in data['networks']:
vl = facade.update_networkipv4(networkv4, request.user)
response.append({'id': vl.id})
return Response(response, status=status.HTTP_200_OK)
class Networkv4AsyncView(CustomAPIView):
@logs_method_apiview
@raise_json_validate('ipv4_post')
@permission_classes_apiview((IsAuthenticated, permissions.Write))
@permission_obj_apiview([permissions.write_objv4_permission])
@commit_on_success
def post(self, request, *args, **kwargs):
"""Create NetworkV4."""
response = list()
nets = request.DATA
json_validate(SPECS.get('networkv4_post')).validate(nets)
user = request.user
for net in nets['networks']:
task_obj = tasks.create_networkv4.apply_async(args=[net, user.id],
queue='napi.network')
task = {
'task_id': task_obj.id
}
response.append(task)
return Response(response, status=status.HTTP_202_ACCEPTED)
@logs_method_apiview
@raise_json_validate('ipv4_put')
@permission_classes_apiview((IsAuthenticated, permissions.Write))
@permission_obj_apiview([permissions.write_objv4_permission])
@commit_on_success
def put(self, request, *args, **kwargs):
"""Edit NetworkV4."""
response = list()
nets = request.DATA
json_validate(SPECS.get('networkv4_put')).validate(nets)
user = request.user
for net in nets['networks']:
task_obj = tasks.update_networkv4.apply_async(args=[net, user.id],
queue='napi.network')
task = {
'task_id': task_obj.id
}
response.append(task)
return Response(response, status=status.HTTP_202_ACCEPTED)
@logs_method_apiview
@raise_json_validate('')
@permission_classes_apiview((IsAuthenticated, permissions.Write))
@permission_obj_apiview([permissions.write_objv4_permission])
@commit_on_success
def delete(self, request, *args, **kwargs):
"""Delete NetworkV4."""
response = list()
obj_ids = kwargs['obj_ids'].split(';')
user = request.user
for obj_id in obj_ids:
task_obj = tasks.delete_networkv4.apply_async(
args=[obj_id, user.id], queue='napi.network')
task = {
'task_id': task_obj.id
}
response.append(task)
return Response(response, status=status.HTTP_202_ACCEPTED)
class NetworkIPv4DeployView(CustomAPIView):
@logs_method_apiview
@raise_json_validate('')
@permission_classes_apiview((IsAuthenticated, permissions.Write,
perm_eqpt.Write))
@permission_obj_apiview([permissions.deploy_objv4_permission])
@commit_on_success
def post(self, request, *args, **kwargs):
"""Deploy NetworkV6."""
response = list()
obj_ids = kwargs['obj_ids'].split(';')
for obj_id in obj_ids:
# deploy network configuration
status_deploy = facade.deploy_networkipv4(obj_id, request.user)
response.append({
'status': status_deploy,
'id': obj_id,
})
return Response(response, status=status.HTTP_200_OK)
@logs_method_apiview
@raise_json_validate('')
@permission_classes_apiview((IsAuthenticated, permissions.Write,
perm_eqpt.Write))
@permission_obj_apiview([permissions.deploy_objv4_permission])
@commit_on_success
def delete(self, request, *args, **kwargs):
"""Undeploy NetworkV6."""
response = list()
obj_ids = kwargs['obj_ids'].split(';')
for obj_id in obj_ids:
# deploy network configuration
status_deploy = facade.undeploy_networkipv4(obj_id, request.user)
response.append({
'status': status_deploy,
'id': obj_id,
})
return Response(response, status=status.HTTP_200_OK)
class Networkv4DeployAsyncView(CustomAPIView):
@logs_method_apiview
@raise_json_validate('')
@permission_classes_apiview((IsAuthenticated, permissions.Write,
perm_eqpt.Write))
@permission_obj_apiview([permissions.deploy_objv4_permission])
@commit_on_success
def post(self, request, *args, **kwargs):
"""Deploy NetworkV4."""
response = list()
obj_ids = kwargs['obj_ids'].split(';')
user = request.user
for obj_id in obj_ids:
task_obj = tasks.deploy_networkv4.apply_async(
args=[obj_id, user.id], queue='napi.network')
task = {
'task_id': task_obj.id
}
response.append(task)
return Response(response, status=status.HTTP_202_ACCEPTED)
@logs_method_apiview
@raise_json_validate('')
@permission_classes_apiview((IsAuthenticated, permissions.Write,
perm_eqpt.Write))
@permission_obj_apiview([permissions.deploy_objv4_permission])
@commit_on_success
def delete(self, request, *args, **kwargs):
"""Undeploy NetworkV4."""
response = list()
obj_ids = kwargs['obj_ids'].split(';')
user = request.user
for obj_id in obj_ids:
task_obj = tasks.undeploy_networkv4.apply_async(
args=[obj_id, user.id], queue='napi.network')
task = {
'task_id': task_obj.id
}
response.append(task)
return Response(response, status=status.HTTP_202_ACCEPTED)
class NetworkIPv4ForceView(CustomAPIView):
@logs_method_apiview
@raise_json_validate('networkv4_post')
@permission_classes_apiview((IsAuthenticated, permissions.WriteForce))
@commit_on_success
def post(self, request, *args, **kwargs):
"""Creates list of networkv4."""
data = request.DATA
json_validate(SPECS.get('networkv4_post')).validate(data)
response = list()
for networkv4 in data['networks']:
vl = facade.create_networkipv4(networkv4, request.user,
force=True)
response.append({'id': vl.id})
return Response(response, status=status.HTTP_201_CREATED)
@logs_method_apiview
@raise_json_validate('networkv4_put')
@permission_classes_apiview((IsAuthenticated, permissions.WriteForce))
@commit_on_success
def put(self, request, *args, **kwargs):
"""Updates list of networkv4."""
data = request.DATA
json_validate(SPECS.get('networkv4_put')).validate(data)
response = list()
for networkv4 in data['networks']:
vl = facade.update_networkipv4(networkv4, request.user,
force=True)
response.append({'id': vl.id})
return Response(response, status=status.HTTP_200_OK)
@logs_method_apiview
@raise_json_validate('')
@permission_classes_apiview((IsAuthenticated, permissions.WriteForce))
@commit_on_success
def delete(self, request, *args, **kwargs):
"""Deletes list of networkv4."""
response = list()
obj_ids = kwargs['obj_ids'].split(';')
facade.delete_networkipv4(obj_ids, request.user,
force=True)
return Response(response, status=status.HTTP_200_OK)
| 32.614731
| 79
| 0.642578
|
33fa5caa0a894924155fcbc606b20425d0aae7d8
| 17,623
|
py
|
Python
|
emailgen/themes.py
|
heqingy/emailgen
|
17f5089bc8c7446c87a611083c9fefe37717fa39
|
[
"Apache-2.0"
] | 2
|
2018-08-02T06:42:00.000Z
|
2019-09-21T17:00:44.000Z
|
emailgen/themes.py
|
heqingy/emailgen
|
17f5089bc8c7446c87a611083c9fefe37717fa39
|
[
"Apache-2.0"
] | 2
|
2017-08-03T09:36:00.000Z
|
2019-10-05T01:25:51.000Z
|
emailgen/themes.py
|
heqingy/emailgen
|
17f5089bc8c7446c87a611083c9fefe37717fa39
|
[
"Apache-2.0"
] | 4
|
2018-04-25T18:01:01.000Z
|
2021-06-07T09:19:33.000Z
|
DEFAULT_THEME = {
"name": "Default",
"html": """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<style type="text/css" rel="stylesheet" media="all">
/* Base ------------------------------ */
*:not(br):not(tr):not(html) {
font-family: Arial, 'Helvetica Neue', Helvetica, sans-serif;
-webkit-box-sizing: border-box;
box-sizing: border-box;
}
body {
width: 100% !important;
height: 100%;
margin: 0;
line-height: 1.4;
background-color: #F2F4F6;
color: #74787E;
-webkit-text-size-adjust: none;
}
a {
color: #3869D4;
}
/* Layout ------------------------------ */
.email-wrapper {
width: 100%;
margin: 0;
padding: 0;
background-color: #F2F4F6;
}
.email-content {
width: 100%;
margin: 0;
padding: 0;
}
/* Masthead ----------------------- */
.email-masthead {
padding: 25px 0;
text-align: center;
}
.email-masthead_logo {
max-width: 400px;
border: 0;
}
.email-masthead_name {
font-size: 16px;
font-weight: bold;
color: #2F3133;
text-decoration: none;
text-shadow: 0 1px 0 white;
}
.email-logo {
max-height: 50px;
}
/* Body ------------------------------ */
.email-body {
width: 100%;
margin: 0;
padding: 0;
border-top: 1px solid #EDEFF2;
border-bottom: 1px solid #EDEFF2;
background-color: #FFF;
}
.email-body_inner {
width: 570px;
margin: 0 auto;
padding: 0;
}
.email-footer {
width: 570px;
margin: 0 auto;
padding: 0;
text-align: center;
}
.email-footer p {
color: #AEAEAE;
}
.body-action {
width: 100%;
margin: 30px auto;
padding: 0;
text-align: center;
}
.body-dictionary {
width: 100%;
overflow: hidden;
margin: 20px auto 10px;
padding: 0;
}
.body-dictionary dd {
margin: 0 0 10px 0;
}
.body-dictionary dt {
clear: both;
color: #000;
font-weight: bold;
}
.body-dictionary dd {
margin-left: 0;
margin-bottom: 10px;
}
.body-sub {
margin-top: 25px;
padding-top: 25px;
border-top: 1px solid #EDEFF2;
table-layout: fixed;
}
.body-sub a {
word-break: break-all;
}
.content-cell {
padding: 35px;
}
.align-right {
text-align: right;
}
/* Type ------------------------------ */
h1 {
margin-top: 0;
color: #2F3133;
font-size: 19px;
font-weight: bold;
}
h2 {
margin-top: 0;
color: #2F3133;
font-size: 16px;
font-weight: bold;
}
h3 {
margin-top: 0;
color: #2F3133;
font-size: 14px;
font-weight: bold;
}
blockquote {
margin: 1.7rem 0;
padding-left: 0.85rem;
border-left: 10px solid #F0F2F4;
}
blockquote p {
font-size: 1.1rem;
color: #999;
}
blockquote cite {
display: block;
text-align: right;
color: #666;
font-size: 1.2rem;
}
cite {
display: block;
font-size: 0.925rem;
}
cite:before {
content: "\\2014 \\0020";
}
p {
margin-top: 0;
color: #74787E;
font-size: 16px;
line-height: 1.5em;
}
p.sub {
font-size: 12px;
}
p.center {
text-align: center;
}
table {
width: 100%;
}
th {
padding: 0px 5px;
padding-bottom: 8px;
border-bottom: 1px solid #EDEFF2;
}
th p {
margin: 0;
color: #9BA2AB;
font-size: 12px;
}
td {
padding: 10px 5px;
color: #74787E;
font-size: 15px;
line-height: 18px;
}
.content {
align: center;
padding: 0;
}
/* Data table ------------------------------ */
.data-wrapper {
width: 100%;
margin: 0;
padding: 35px 0;
}
.data-table {
width: 100%;
margin: 0;
}
.data-table th {
text-align: left;
padding: 0px 5px;
padding-bottom: 8px;
border-bottom: 1px solid #EDEFF2;
}
.data-table th p {
margin: 0;
color: #9BA2AB;
font-size: 12px;
}
.data-table td {
padding: 10px 5px;
color: #74787E;
font-size: 15px;
line-height: 18px;
}
/* Buttons ------------------------------ */
.button {
display: inline-block;
width: 200px;
background-color: #3869D4;
border-radius: 3px;
color: #ffffff;
font-size: 15px;
line-height: 45px;
text-align: center;
text-decoration: none;
-webkit-text-size-adjust: none;
mso-hide: all;
}
/*Media Queries ------------------------------ */
@media only screen and (max-width: 600px) {
.email-body_inner,
.email-footer {
width: 100% !important;
}
}
@media only screen and (max-width: 500px) {
.button {
width: 100% !important;
}
}
</style>
</head>
<body dir="{{ generator.text_direction }}">
<table class="email-wrapper" width="100%" cellpadding="0" cellspacing="0">
<tr>
<td class="content">
<table class="email-content" width="100%" cellpadding="0" cellspacing="0">
<!-- Logo -->
<tr>
<td class="email-masthead">
<a class="email-masthead_name" href="{{ generator.product[" link"] }}" target="_blank">
{% if generator.product["logo"] %}
<img src="{{ generator.product[" logo"] }}" class="email-logo" />
{% else %}
{{ generator.product["name"] }}
{% endif %}
</a>
</td>
</tr>
<!-- Email Body -->
<tr>
<td class="email-body" width="100%">
<table class="email-body_inner" align="center" width="570" cellpadding="0"
cellspacing="0">
<!-- Body content -->
<tr>
<td class="content-cell">
<h1>{% if email.title %}{{ email.title }}{% else %}{{ email.greeting }} {{ email.name }},{% endif %}</h1>
<!-- Intro -->
{% for intro in email.intros %}
<p>{{ intro }}</p>
{% endfor %}
{% if email.free_markdown %}
<!-- Free Markdown -->
{{ email.free_markdown }}
{% else %}
<!-- Dictionary -->
{% if email.dictionary|length != 0 %}
<dl class="body-dictionary">
{% for d in email.dictionary %}
<dt>{{ d[0] }}</dt>
<dd>{{ d[1] }}</dd>
{% endfor %}
</dl>
{% endif %}
<!-- Table -->
{% if email.table %}
<table class="data-wrapper" width="100%" cellpadding="0" cellspacing="0">
<tr>
<td colspan="2">
<table class="data-table" width="100%" cellpadding="0" cellspacing="0">
<tr>
{% for header in email.table.headers %}
<th>{{ header }}</th>
{% endfor %}
</tr>
{% for row in email.table.rows %}
<tr>
{% for column in row %}
<td>
{{ column }}
</td>
{% endfor %}
</tr>
{% endfor %}
</table>
</td>
</tr>
</table>
{% endif %}
<!-- Action -->
{% for action in email.actions %}
<p>{{ action[0] }}</p>
<table class="body-action" align="center" width="100%" cellpadding="0"
cellspacing="0">
<tr>
<td align="center">
<div>
<a href="{{ action[1].link }}" class="button"
style="background-color: {{ action[1].color }}"
target="_blank">
{{ action[1].text }}
</a>
</div>
</td>
</tr>
</table>
{% endfor %}
{% endif %}
{% for outro in email.outros %}
<p>{{ outro }}</p>
{% endfor %}
<p>{{ email.signature }},<br/>{{ generator.product["name"] }}</p>
{% if email.free_markdown %}
<table class="body-sub">
<tbody>
{% for action in email.actions %}
<tr>
<td>
<p class="sub">{{
generator.product["trouble"]|replace("[ACTION]",
action[1].text) }}</p>
<p class="sub"><a href="{{ action[1].link }}">{{
action[1].link }}</a></p>
</td>
</tr>
{% endfor %}
</tbody>
</table>
{% endif %}
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td>
<table class="email-footer" align="center" width="570" cellpadding="0" cellspacing="0">
<tr>
<td class="content-cell">
<p class="sub center">
{{ generator.product["copyright"] }}
</p>
</td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
</body>
</html>""",
"plain": """
{% if email.title -%}
{{ email.title }}
{%- else -%}
{{ email.greeting }} {{ email.name }}
{%- endif %},
{% for intro in email.intros %}
{{ intro }}
{% endfor %}
{% if email.free_markdown -%}
{{ email.raw_markdown }}
{%- else -%}
{% for item in email.dictionary %}
{{ item[0] }}: {{ item[1] }}
{% endfor %}
{% if email.table %}
{{ email.table.headers[0] }}{% if email.table.headers|length > 1 %}{% for header in email.table.headers[1:] %} | {{ header }}{% endfor %}{% endif %}
{% for row in email.table.rows %}
{{ row[0] }}{% if row|length > 1 %}{% for column in row[1:] %} | {{ column }}{% endfor %}{% endif %}
{% endfor %}
{% endif %}
{% for action in email.actions %}
{{ action[0] }} {{ action[1].link }}
{% endfor %}
{%- endif %}
{% for outro in email.outros %}
{{ outro }}
{% endfor %}
{{ email.signature }},
{{ generator.product["name"] }} - {{ generator.product["link"] }}
{{ generator.product["copyright"] }}
"""
}
| 36.638254
| 152
| 0.272258
|
74959afe627e5acf06f07b3e95f0979dc540e02f
| 2,339
|
py
|
Python
|
tools/cr/cr/actions/builder.py
|
SlimKatLegacy/android_external_chromium_org
|
ee480ef5039d7c561fc66ccf52169ead186f1bea
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2015-03-04T02:36:53.000Z
|
2016-06-25T11:22:17.000Z
|
tools/cr/cr/actions/builder.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
tools/cr/cr/actions/builder.py
|
j4ckfrost/android_external_chromium_org
|
a1a3dad8b08d1fcf6b6b36c267158ed63217c780
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 4
|
2015-02-09T08:49:30.000Z
|
2017-08-26T02:03:34.000Z
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the Builder base class."""
import cr
class Builder(cr.Action, cr.Plugin.Type):
"""Base class for implementing builders.
Builder implementations must override the Build and Clean methods at a
minimum to build a target and clean up back to a pristine state respectively.
They can also override Rebuild if they are able to handle it in a more
efficient way that a Clean Build sequence.
They should override the GetTargets method to return the set of valid targets
the build system knows about, and override IsTarget if they can implement it
more efficiently than checking from presents in the result of GetTargets.
"""
SELECTOR_ARG = '--builder'
SELECTOR = 'CR_BUILDER'
SELECTOR_HELP = 'Sets the builder to use to update dependencies.'
@cr.Plugin.activemethod
def Build(self, context, targets, arguments):
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Clean(self, context, targets, arguments):
"""Clean temporary files built by a target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Rebuild(self, context, targets, arguments):
"""Make a target build even if it is up to date.
Default implementation is to do a Clean and Build sequence.
Do not call the base version if you implement a more efficient one.
"""
self.Clean(context, targets, [])
self.Build(context, targets, arguments)
@cr.Plugin.activemethod
def GetTargets(self, context):
"""Gets the full set of targets supported by this builder.
Used in automatic target name transformations, and also in offering the
user choices.
"""
return []
@cr.Plugin.activemethod
def IsTarget(self, context, target_name):
"""Check if a target name is on the builder knows about."""
return target_name in self.GetTargets(context)
class SkipBuilder(Builder):
"""The "skip" version of a Builder, causes the build step to be skipped."""
@property
def priority(self):
return super(SkipBuilder, self).priority - 1
def Build(self, context, targets, arguments):
pass
def Clean(self, context, targets, arguments):
pass
| 32.041096
| 79
| 0.728516
|
302fd1689fba5b0e1507a1fefc31bceb07f194a3
| 1,968
|
py
|
Python
|
p330_patching_array.py
|
feigaochn/leetcode
|
abf0877fae02aa9c2549051f0b68df0ace952512
|
[
"MIT"
] | null | null | null |
p330_patching_array.py
|
feigaochn/leetcode
|
abf0877fae02aa9c2549051f0b68df0ace952512
|
[
"MIT"
] | null | null | null |
p330_patching_array.py
|
feigaochn/leetcode
|
abf0877fae02aa9c2549051f0b68df0ace952512
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# author: Fei Gao <leetcode.com@feigao.xyz>
# Problem: patching array
#
# Given a sorted positive integer array nums and an integer n, add/patch
# elements to the array such that any number in range [1, n] inclusive can be
# formed by the sum of some elements in the array. Return the minimum number
# of patches required.
#
# Example 1:
# nums = [1, 3], n = 6
# Return 1.
# Combinations of nums are [1], [3], [1,3], which form possible sums of: 1, 3,
# 4.
# Now if we add/patch 2 to nums, the combinations are: [1], [2], [3], [1,3],
# [2,3], [1,2,3].
# Possible sums are 1, 2, 3, 4, 5, 6, which now covers the range [1, 6].
# So we only need 1 patch.
# Example 2:
# nums = [1, 5, 10], n = 20
# Return 2.
# The two patches can be [2, 4].
# Example 3:
# nums = [1, 2, 2], n = 5
# Return 0.
# Credits:Special thanks to @dietpepsi for adding this problem and creating
# all test cases.
#
# Subscribe to see which companies asked this question
#
# Show Tags
#
# Greedy
class Solution(object):
def minPatches(self, nums, n):
"""
:type nums: list[int]
:type n: int
:rtype: int
"""
len1 = len(nums)
idx = 0
s = 0
while True:
if s >= n:
break
if idx >= len(nums) or s + 1 < nums[idx]:
nums.insert(idx, s + 1)
s += nums[idx]
idx += 1
# print(nums)
return len(nums) - len1
def main():
solver = Solution()
tests = [
(([1, 3], 6), 1),
(([], 7), 3),
(([1, 5, 10], 20), 2),
(([1, 2, 2], 5), 0),
(([1, 2, 31, 33], 2147483647), '?'),
]
for params, expect in tests:
print('-' * 5 + 'TEST' + '-' * 5)
print('Input: ' + str(params))
print('Expect: ' + str(expect))
result = solver.minPatches(*params)
print('Result: ' + str(result))
pass
if __name__ == '__main__':
main()
pass
| 24.296296
| 78
| 0.526931
|
a969d27da9b6f671f6d56625e9fd8ae75009febd
| 3,366
|
py
|
Python
|
src/djangoreactredux/djrenv/lib/python3.5/site-packages/prospector/message.py
|
m2jobe/c_x
|
ba914449a9a85d82703895fc884733ca20454034
|
[
"MIT"
] | 1
|
2019-11-01T11:45:22.000Z
|
2019-11-01T11:45:22.000Z
|
src/djangoreactredux/djrenv/lib/python3.5/site-packages/prospector/message.py
|
m2jobe/c_x
|
ba914449a9a85d82703895fc884733ca20454034
|
[
"MIT"
] | 7
|
2020-02-12T03:06:52.000Z
|
2021-06-10T19:33:14.000Z
|
src/djangoreactredux/djrenv/lib/python3.5/site-packages/prospector/message.py
|
m2jobe/c_x
|
ba914449a9a85d82703895fc884733ca20454034
|
[
"MIT"
] | 1
|
2019-11-01T11:38:54.000Z
|
2019-11-01T11:38:54.000Z
|
# -*- coding: utf-8 -*-
import os
import sys
class Location(object):
def __init__(self, path, module, function, line, character, absolute_path=True):
if sys.version_info.major == 2 and isinstance(path, str):
# If this is not a unicode object, make it one! Some tools return
# paths as unicode, some as bytestring, so to ensure that they are
# all the same, we normalise here. For Python3 this is (probably)
# always a str so no need to do anything.
path = path.decode(sys.getfilesystemencoding())
self.path = path
self._path_is_absolute = absolute_path
self.module = module or None
self.function = function or None
self.line = None if line == -1 else line
self.character = None if character == -1 else character
def to_absolute_path(self, root):
if self._path_is_absolute:
return
self.path = os.path.abspath(os.path.join(root, self.path))
self._path_is_absolute = True
def to_relative_path(self, root):
if not self._path_is_absolute:
return
self.path = os.path.relpath(self.path, root)
self._path_is_absolute = False
def as_dict(self):
return {
'path': self.path,
'module': self.module,
'function': self.function,
'line': self.line,
'character': self.character
}
def __hash__(self):
return hash((self.path, self.line, self.character))
def __eq__(self, other):
return self.path == other.path and self.line == other.line and self.character == other.character
def __lt__(self, other):
if self.path == other.path:
if self.line == other.line:
return (self.character or -1) < (other.character or -1)
return (self.line or -1) < (other.line or -1) # line can be None if it a file-global warning
return self.path < other.path
class Message(object):
def __init__(self, source, code, location, message):
self.source = source
self.code = code
self.location = location
self.message = message
def to_absolute_path(self, root):
self.location.to_absolute_path(root)
def to_relative_path(self, root):
self.location.to_relative_path(root)
def as_dict(self):
return {
'source': self.source,
'code': self.code,
'location': self.location.as_dict(),
'message': self.message
}
def __repr__(self):
return "%s-%s" % (self.source, self.code)
def __eq__(self, other):
if self.location == other.location:
return self.code == other.code
else:
return False
def __lt__(self, other):
if self.location == other.location:
return self.code < other.code
return self.location < other.location
def make_tool_error_message(filepath, source, code, message,
line=0, character=0, module=None, function=None):
location = Location(
path=filepath,
module=module,
function=function,
line=line,
character=character
)
return Message(
source=source,
code=code,
location=location,
message=message
)
| 30.880734
| 105
| 0.592395
|
8f6889433136a50209823c1c24cd720003ea4d1b
| 3,225
|
py
|
Python
|
test/vanilla/legacy/Expected/AcceptanceTests/UrlMultiCollectionFormat/urlmulticollectionformat/aio/_auto_rest_url_mutli_collection_format_test_service.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 35
|
2018-04-03T12:15:53.000Z
|
2022-03-11T14:03:34.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/UrlMultiCollectionFormat/urlmulticollectionformat/aio/_auto_rest_url_mutli_collection_format_test_service.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 652
|
2017-08-28T22:44:41.000Z
|
2022-03-31T21:20:31.000Z
|
test/vanilla/legacy/Expected/AcceptanceTests/UrlMultiCollectionFormat/urlmulticollectionformat/aio/_auto_rest_url_mutli_collection_format_test_service.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 29
|
2017-08-28T20:57:01.000Z
|
2022-03-11T14:03:38.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional
from azure.core import AsyncPipelineClient
from azure.core.rest import AsyncHttpResponse, HttpRequest
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import AutoRestUrlMutliCollectionFormatTestServiceConfiguration
from .operations import QueriesOperations
class AutoRestUrlMutliCollectionFormatTestService:
"""Test Infrastructure for AutoRest.
:ivar queries: QueriesOperations operations
:vartype queries: urlmulticollectionformat.aio.operations.QueriesOperations
:param base_url: Service URL. Default value is 'http://localhost:3000'.
:type base_url: str
"""
def __init__(self, base_url: str = "http://localhost:3000", **kwargs: Any) -> None:
self._config = AutoRestUrlMutliCollectionFormatTestServiceConfiguration(**kwargs)
self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.queries = QueriesOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "AutoRestUrlMutliCollectionFormatTestService":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| 45.422535
| 104
| 0.694884
|
27cffb75223367a52adcb8a8587889820b60cad6
| 424
|
py
|
Python
|
homeassistant/components/melcloud/const.py
|
domwillcode/home-assistant
|
f170c80bea70c939c098b5c88320a1c789858958
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
homeassistant/components/melcloud/const.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
homeassistant/components/melcloud/const.py
|
jagadeeshvenkatesh/core
|
1bd982668449815fee2105478569f8e4b5670add
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Constants for the MELCloud Climate integration."""
DOMAIN = "melcloud"
CONF_POSITION = "position"
ATTR_STATUS = "status"
ATTR_VANE_HORIZONTAL = "vane_horizontal"
ATTR_VANE_HORIZONTAL_POSITIONS = "vane_horizontal_positions"
ATTR_VANE_VERTICAL = "vane_vertical"
ATTR_VANE_VERTICAL_POSITIONS = "vane_vertical_positions"
SERVICE_SET_VANE_HORIZONTAL = "set_vane_horizontal"
SERVICE_SET_VANE_VERTICAL = "set_vane_vertical"
| 28.266667
| 60
| 0.832547
|
514ac217e81a254fe238af6113192d9da04f13f3
| 1,040
|
py
|
Python
|
util/trajectory.py
|
SwagJ/FCGF
|
18cfb4d4e9f7a18487d6ef8941fcaeda387ab55f
|
[
"MIT"
] | null | null | null |
util/trajectory.py
|
SwagJ/FCGF
|
18cfb4d4e9f7a18487d6ef8941fcaeda387ab55f
|
[
"MIT"
] | null | null | null |
util/trajectory.py
|
SwagJ/FCGF
|
18cfb4d4e9f7a18487d6ef8941fcaeda387ab55f
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
class CameraPose:
def __init__(self, meta, mat):
self.metadata = meta
self.pose = mat
def __str__(self):
return 'metadata : ' + ' '.join(map(str, self.metadata)) + '\n' + \
"pose : " + "\n" + np.array_str(self.pose)
def read_trajectory(filename, dim=4):
traj = []
print(filename)
assert os.path.exists(filename)
with open(filename, 'r') as f:
metastr = f.readline()
while metastr:
metadata = list(map(int, metastr.split()))
mat = np.zeros(shape=(dim, dim))
for i in range(dim):
matstr = f.readline()
mat[i, :] = np.fromstring(matstr, dtype=float, sep=' \t')
traj.append(CameraPose(metadata, mat))
metastr = f.readline()
return traj
def write_trajectory(traj, filename, dim=4):
with open(filename, 'w') as f:
for x in traj:
p = x.pose.tolist()
f.write(' '.join(map(str, x.metadata)) + '\n')
f.write('\n'.join(' '.join(map('{0:.12f}'.format, p[i])) for i in range(dim)))
f.write('\n')
| 26
| 84
| 0.586538
|
509f9ae9931271a2fa66ae7c3f289442ed22d3e1
| 395
|
py
|
Python
|
BOJ/exaustive_search_boj/snow_white.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
BOJ/exaustive_search_boj/snow_white.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
BOJ/exaustive_search_boj/snow_white.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
# BOJ 2309
import sys
# 9명중 두명만 빼면 7명이기때문에 2명을 고르는것으로 접근
si = sys.stdin.readline
arr = [int(si()) for _ in range(9)]
res = []
s = sum(arr)
for i in range(9):
for j in range(9):
if i == j:
continue
if s - arr[i] - arr[j] == 100:
res = [arr[k] for k in range(9) if k != i and k != j]
break
res.sort()
print("\n".join(list(map(str, res))))
| 20.789474
| 65
| 0.513924
|
53140e05038c349ad9219f4bf139d602efa6aaa6
| 33,256
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20190201/application_gateway.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190201/application_gateway.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20190201/application_gateway.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ApplicationGateway']
class ApplicationGateway(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_gateway_name: Optional[pulumi.Input[str]] = None,
authentication_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayAuthenticationCertificateArgs']]]]] = None,
autoscale_configuration: Optional[pulumi.Input[pulumi.InputType['ApplicationGatewayAutoscaleConfigurationArgs']]] = None,
backend_address_pools: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayBackendAddressPoolArgs']]]]] = None,
backend_http_settings_collection: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayBackendHttpSettingsArgs']]]]] = None,
custom_error_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayCustomErrorArgs']]]]] = None,
enable_fips: Optional[pulumi.Input[bool]] = None,
enable_http2: Optional[pulumi.Input[bool]] = None,
etag: Optional[pulumi.Input[str]] = None,
firewall_policy: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
frontend_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayFrontendIPConfigurationArgs']]]]] = None,
frontend_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayFrontendPortArgs']]]]] = None,
gateway_ip_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayIPConfigurationArgs']]]]] = None,
http_listeners: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayHttpListenerArgs']]]]] = None,
id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
probes: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayProbeArgs']]]]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
redirect_configurations: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayRedirectConfigurationArgs']]]]] = None,
request_routing_rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayRequestRoutingRuleArgs']]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_guid: Optional[pulumi.Input[str]] = None,
rewrite_rule_sets: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayRewriteRuleSetArgs']]]]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ApplicationGatewaySkuArgs']]] = None,
ssl_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewaySslCertificateArgs']]]]] = None,
ssl_policy: Optional[pulumi.Input[pulumi.InputType['ApplicationGatewaySslPolicyArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
trusted_root_certificates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayTrustedRootCertificateArgs']]]]] = None,
url_path_maps: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayUrlPathMapArgs']]]]] = None,
web_application_firewall_configuration: Optional[pulumi.Input[pulumi.InputType['ApplicationGatewayWebApplicationFirewallConfigurationArgs']]] = None,
zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Application gateway resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_gateway_name: The name of the application gateway.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayAuthenticationCertificateArgs']]]] authentication_certificates: Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[pulumi.InputType['ApplicationGatewayAutoscaleConfigurationArgs']] autoscale_configuration: Autoscale Configuration.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayBackendAddressPoolArgs']]]] backend_address_pools: Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayBackendHttpSettingsArgs']]]] backend_http_settings_collection: Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayCustomErrorArgs']]]] custom_error_configurations: Custom error configurations of the application gateway resource.
:param pulumi.Input[bool] enable_fips: Whether FIPS is enabled on the application gateway resource.
:param pulumi.Input[bool] enable_http2: Whether HTTP2 is enabled on the application gateway resource.
:param pulumi.Input[str] etag: A unique read-only string that changes whenever the resource is updated.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] firewall_policy: Reference of the FirewallPolicy resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayFrontendIPConfigurationArgs']]]] frontend_ip_configurations: Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayFrontendPortArgs']]]] frontend_ports: Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayIPConfigurationArgs']]]] gateway_ip_configurations: Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayHttpListenerArgs']]]] http_listeners: Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[pulumi.InputType['ManagedServiceIdentityArgs']] identity: The identity of the application gateway, if configured.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayProbeArgs']]]] probes: Probes of the application gateway resource.
:param pulumi.Input[str] provisioning_state: Provisioning state of the application gateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayRedirectConfigurationArgs']]]] redirect_configurations: Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayRequestRoutingRuleArgs']]]] request_routing_rules: Request routing rules of the application gateway resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] resource_guid: Resource GUID property of the application gateway resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayRewriteRuleSetArgs']]]] rewrite_rule_sets: Rewrite rules for the application gateway resource.
:param pulumi.Input[pulumi.InputType['ApplicationGatewaySkuArgs']] sku: SKU of the application gateway resource.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewaySslCertificateArgs']]]] ssl_certificates: SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[pulumi.InputType['ApplicationGatewaySslPolicyArgs']] ssl_policy: SSL policy of the application gateway resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayTrustedRootCertificateArgs']]]] trusted_root_certificates: Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ApplicationGatewayUrlPathMapArgs']]]] url_path_maps: URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
:param pulumi.Input[pulumi.InputType['ApplicationGatewayWebApplicationFirewallConfigurationArgs']] web_application_firewall_configuration: Web application firewall configuration.
:param pulumi.Input[Sequence[pulumi.Input[str]]] zones: A list of availability zones denoting where the resource needs to come from.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['application_gateway_name'] = application_gateway_name
__props__['authentication_certificates'] = authentication_certificates
__props__['autoscale_configuration'] = autoscale_configuration
__props__['backend_address_pools'] = backend_address_pools
__props__['backend_http_settings_collection'] = backend_http_settings_collection
__props__['custom_error_configurations'] = custom_error_configurations
__props__['enable_fips'] = enable_fips
__props__['enable_http2'] = enable_http2
__props__['etag'] = etag
__props__['firewall_policy'] = firewall_policy
__props__['frontend_ip_configurations'] = frontend_ip_configurations
__props__['frontend_ports'] = frontend_ports
__props__['gateway_ip_configurations'] = gateway_ip_configurations
__props__['http_listeners'] = http_listeners
__props__['id'] = id
__props__['identity'] = identity
__props__['location'] = location
__props__['probes'] = probes
__props__['provisioning_state'] = provisioning_state
__props__['redirect_configurations'] = redirect_configurations
__props__['request_routing_rules'] = request_routing_rules
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['resource_guid'] = resource_guid
__props__['rewrite_rule_sets'] = rewrite_rule_sets
__props__['sku'] = sku
__props__['ssl_certificates'] = ssl_certificates
__props__['ssl_policy'] = ssl_policy
__props__['tags'] = tags
__props__['trusted_root_certificates'] = trusted_root_certificates
__props__['url_path_maps'] = url_path_maps
__props__['web_application_firewall_configuration'] = web_application_firewall_configuration
__props__['zones'] = zones
__props__['name'] = None
__props__['operational_state'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20190201:ApplicationGateway"), pulumi.Alias(type_="azure-native:network:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/latest:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/latest:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20150501preview:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20150615:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20150615:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20160330:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20160330:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20160601:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20160601:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20160901:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20160901:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20161201:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20161201:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20170301:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20170301:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20170601:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20170601:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20170801:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20170801:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20170901:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20171001:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20171101:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20171101:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20180101:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20180201:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20180401:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20180601:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20180701:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20180801:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20181001:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20181101:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20181201:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20190401:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20190601:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20190701:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20190801:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20190901:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20191101:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20191201:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20200301:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20200401:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20200501:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20200601:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20200701:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ApplicationGateway"), pulumi.Alias(type_="azure-native:network/v20200801:ApplicationGateway"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ApplicationGateway")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApplicationGateway, __self__).__init__(
'azure-native:network/v20190201:ApplicationGateway',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApplicationGateway':
"""
Get an existing ApplicationGateway resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["authentication_certificates"] = None
__props__["autoscale_configuration"] = None
__props__["backend_address_pools"] = None
__props__["backend_http_settings_collection"] = None
__props__["custom_error_configurations"] = None
__props__["enable_fips"] = None
__props__["enable_http2"] = None
__props__["etag"] = None
__props__["firewall_policy"] = None
__props__["frontend_ip_configurations"] = None
__props__["frontend_ports"] = None
__props__["gateway_ip_configurations"] = None
__props__["http_listeners"] = None
__props__["identity"] = None
__props__["location"] = None
__props__["name"] = None
__props__["operational_state"] = None
__props__["probes"] = None
__props__["provisioning_state"] = None
__props__["redirect_configurations"] = None
__props__["request_routing_rules"] = None
__props__["resource_guid"] = None
__props__["rewrite_rule_sets"] = None
__props__["sku"] = None
__props__["ssl_certificates"] = None
__props__["ssl_policy"] = None
__props__["tags"] = None
__props__["trusted_root_certificates"] = None
__props__["type"] = None
__props__["url_path_maps"] = None
__props__["web_application_firewall_configuration"] = None
__props__["zones"] = None
return ApplicationGateway(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']]]:
"""
Authentication certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="autoscaleConfiguration")
def autoscale_configuration(self) -> pulumi.Output[Optional['outputs.ApplicationGatewayAutoscaleConfigurationResponse']]:
"""
Autoscale Configuration.
"""
return pulumi.get(self, "autoscale_configuration")
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]]:
"""
Backend address pool of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter(name="backendHttpSettingsCollection")
def backend_http_settings_collection(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']]]:
"""
Backend http settings of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "backend_http_settings_collection")
@property
@pulumi.getter(name="customErrorConfigurations")
def custom_error_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayCustomErrorResponse']]]:
"""
Custom error configurations of the application gateway resource.
"""
return pulumi.get(self, "custom_error_configurations")
@property
@pulumi.getter(name="enableFips")
def enable_fips(self) -> pulumi.Output[Optional[bool]]:
"""
Whether FIPS is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_fips")
@property
@pulumi.getter(name="enableHttp2")
def enable_http2(self) -> pulumi.Output[Optional[bool]]:
"""
Whether HTTP2 is enabled on the application gateway resource.
"""
return pulumi.get(self, "enable_http2")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[Optional[str]]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="firewallPolicy")
def firewall_policy(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
Reference of the FirewallPolicy resource.
"""
return pulumi.get(self, "firewall_policy")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']]]:
"""
Frontend IP addresses of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="frontendPorts")
def frontend_ports(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']]]:
"""
Frontend ports of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "frontend_ports")
@property
@pulumi.getter(name="gatewayIPConfigurations")
def gateway_ip_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']]]:
"""
Subnets of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "gateway_ip_configurations")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']]]:
"""
Http listeners of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ManagedServiceIdentityResponse']]:
"""
The identity of the application gateway, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operationalState")
def operational_state(self) -> pulumi.Output[str]:
"""
Operational state of the application gateway resource.
"""
return pulumi.get(self, "operational_state")
@property
@pulumi.getter
def probes(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayProbeResponse']]]:
"""
Probes of the application gateway resource.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
Provisioning state of the application gateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="redirectConfigurations")
def redirect_configurations(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayRedirectConfigurationResponse']]]:
"""
Redirect configurations of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "redirect_configurations")
@property
@pulumi.getter(name="requestRoutingRules")
def request_routing_rules(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']]]:
"""
Request routing rules of the application gateway resource.
"""
return pulumi.get(self, "request_routing_rules")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[Optional[str]]:
"""
Resource GUID property of the application gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="rewriteRuleSets")
def rewrite_rule_sets(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayRewriteRuleSetResponse']]]:
"""
Rewrite rules for the application gateway resource.
"""
return pulumi.get(self, "rewrite_rule_sets")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.ApplicationGatewaySkuResponse']]:
"""
SKU of the application gateway resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sslCertificates")
def ssl_certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']]]:
"""
SSL certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "ssl_certificates")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> pulumi.Output[Optional['outputs.ApplicationGatewaySslPolicyResponse']]:
"""
SSL policy of the application gateway resource.
"""
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedRootCertificates")
def trusted_root_certificates(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayTrustedRootCertificateResponse']]]:
"""
Trusted Root certificates of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "trusted_root_certificates")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlPathMaps")
def url_path_maps(self) -> pulumi.Output[Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']]]:
"""
URL path map of the application gateway resource. For default limits, see [Application Gateway limits](https://docs.microsoft.com/azure/azure-subscription-service-limits#application-gateway-limits).
"""
return pulumi.get(self, "url_path_maps")
@property
@pulumi.getter(name="webApplicationFirewallConfiguration")
def web_application_firewall_configuration(self) -> pulumi.Output[Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse']]:
"""
Web application firewall configuration.
"""
return pulumi.get(self, "web_application_firewall_configuration")
@property
@pulumi.getter
def zones(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
A list of availability zones denoting where the resource needs to come from.
"""
return pulumi.get(self, "zones")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 71.059829
| 5,407
| 0.732259
|
b3725e700d793710cc0abe4b754dc6df865cf203
| 8,652
|
py
|
Python
|
kubernetes/client/models/v1_policy_rule.py
|
venukarnati92/python-1
|
3fabf9ed9f4758fb5133975a58fc147471e91d9d
|
[
"Apache-2.0"
] | 1
|
2022-02-07T21:57:20.000Z
|
2022-02-07T21:57:20.000Z
|
kubernetes/client/models/v1_policy_rule.py
|
venukarnati92/python-1
|
3fabf9ed9f4758fb5133975a58fc147471e91d9d
|
[
"Apache-2.0"
] | 1
|
2022-03-01T03:37:57.000Z
|
2022-03-01T03:37:57.000Z
|
kubernetes/client/models/v1_policy_rule.py
|
venukarnati92/python-1
|
3fabf9ed9f4758fb5133975a58fc147471e91d9d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.22
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PolicyRule(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_groups': 'list[str]',
'non_resource_ur_ls': 'list[str]',
'resource_names': 'list[str]',
'resources': 'list[str]',
'verbs': 'list[str]'
}
attribute_map = {
'api_groups': 'apiGroups',
'non_resource_ur_ls': 'nonResourceURLs',
'resource_names': 'resourceNames',
'resources': 'resources',
'verbs': 'verbs'
}
def __init__(self, api_groups=None, non_resource_ur_ls=None, resource_names=None, resources=None, verbs=None, local_vars_configuration=None): # noqa: E501
"""V1PolicyRule - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_groups = None
self._non_resource_ur_ls = None
self._resource_names = None
self._resources = None
self._verbs = None
self.discriminator = None
if api_groups is not None:
self.api_groups = api_groups
if non_resource_ur_ls is not None:
self.non_resource_ur_ls = non_resource_ur_ls
if resource_names is not None:
self.resource_names = resource_names
if resources is not None:
self.resources = resources
self.verbs = verbs
@property
def api_groups(self):
"""Gets the api_groups of this V1PolicyRule. # noqa: E501
APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. # noqa: E501
:return: The api_groups of this V1PolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._api_groups
@api_groups.setter
def api_groups(self, api_groups):
"""Sets the api_groups of this V1PolicyRule.
APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. # noqa: E501
:param api_groups: The api_groups of this V1PolicyRule. # noqa: E501
:type: list[str]
"""
self._api_groups = api_groups
@property
def non_resource_ur_ls(self):
"""Gets the non_resource_ur_ls of this V1PolicyRule. # noqa: E501
NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both. # noqa: E501
:return: The non_resource_ur_ls of this V1PolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._non_resource_ur_ls
@non_resource_ur_ls.setter
def non_resource_ur_ls(self, non_resource_ur_ls):
"""Sets the non_resource_ur_ls of this V1PolicyRule.
NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both. # noqa: E501
:param non_resource_ur_ls: The non_resource_ur_ls of this V1PolicyRule. # noqa: E501
:type: list[str]
"""
self._non_resource_ur_ls = non_resource_ur_ls
@property
def resource_names(self):
"""Gets the resource_names of this V1PolicyRule. # noqa: E501
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
:return: The resource_names of this V1PolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._resource_names
@resource_names.setter
def resource_names(self, resource_names):
"""Sets the resource_names of this V1PolicyRule.
ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. # noqa: E501
:param resource_names: The resource_names of this V1PolicyRule. # noqa: E501
:type: list[str]
"""
self._resource_names = resource_names
@property
def resources(self):
"""Gets the resources of this V1PolicyRule. # noqa: E501
Resources is a list of resources this rule applies to. '*' represents all resources. # noqa: E501
:return: The resources of this V1PolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._resources
@resources.setter
def resources(self, resources):
"""Sets the resources of this V1PolicyRule.
Resources is a list of resources this rule applies to. '*' represents all resources. # noqa: E501
:param resources: The resources of this V1PolicyRule. # noqa: E501
:type: list[str]
"""
self._resources = resources
@property
def verbs(self):
"""Gets the verbs of this V1PolicyRule. # noqa: E501
Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. # noqa: E501
:return: The verbs of this V1PolicyRule. # noqa: E501
:rtype: list[str]
"""
return self._verbs
@verbs.setter
def verbs(self, verbs):
"""Sets the verbs of this V1PolicyRule.
Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. '*' represents all verbs. # noqa: E501
:param verbs: The verbs of this V1PolicyRule. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and verbs is None: # noqa: E501
raise ValueError("Invalid value for `verbs`, must not be `None`") # noqa: E501
self._verbs = verbs
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PolicyRule):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PolicyRule):
return True
return self.to_dict() != other.to_dict()
| 36.661017
| 423
| 0.636385
|
79670921ffeca430a258b090b06d90f8406d663e
| 4,922
|
py
|
Python
|
avaya/loadIQ.py
|
nd1/odds-and-ends
|
8be5c6dfab733038bb3b7a35eaa6ae4f630d2ace
|
[
"MIT"
] | null | null | null |
avaya/loadIQ.py
|
nd1/odds-and-ends
|
8be5c6dfab733038bb3b7a35eaa6ae4f630d2ace
|
[
"MIT"
] | null | null | null |
avaya/loadIQ.py
|
nd1/odds-and-ends
|
8be5c6dfab733038bb3b7a35eaa6ae4f630d2ace
|
[
"MIT"
] | null | null | null |
'''
Load Avaya IQ report data to a MS SQL db.
Nicole Donnelly 20170530
'''
import os
import sys
import pandas as pd
from sqlalchemy import create_engine, text, types
def convert_time(x):
# change a hh:mm:ss string to number of seconds
times = x.split(':')
return (360*int(times[0])+60*int(times[1]))+int(times[2])
def get_count(sql_connection):
# get the current number of rows in the table, if it exists
try:
sql = text('select count(1) from call_data')
result = sql_connection.execute(sql)
names = []
for row in result:
names.append(row[0])
current_row_count = names[0]
except:
current_row_count = 0
return current_row_count
def load_data(data):
# define variables
# labels for the dataframe
col_label = ['start', 'duration', 'customer', 'direction',
'first_routing', 'first_queue', 'disposition', 'wait',
'self_service', 'active', 'on_hold', 'contact_id', 'source',
'agent']
# column types for the database using sqlalchemy types
col_types = {'start': types.DateTime, 'duration': types.Integer,
'customer': types.String(32), 'direction': types.String(32),
'first_routing': types.String(128),
'first_queue': types.String(128),
'disposition': types.String(128), 'wait': types.Integer,
'self_service': types.Integer, 'active': types.Integer,
'on_hold': types.Integer, 'contact_id': types.Integer,
'source': types.String(8), 'agent': types.String(128)}
# define the db connection
# replace host, port, db to run
engine = create_engine('mssql+pyodbc://@HOST:PORT/DB?driver=SQL+Server')
connection = engine.connect()
# load the file to a dataframe. skip header and footer data in the report.
# convert the time fields to number of seconds
df = pd.read_excel('./exported_data/' + data, header=None, skiprows=13,
skip_footer=2, names=col_label,
converters={1: convert_time, 7: convert_time,
8: convert_time, 9: convert_time,
10: convert_time})
# file contains merged cells. use fillna to fill in the missing values
# by default, pandas will put the value in the first row of the merge
# and populate the rest as na. ffill will fill down to next value
df.disposition = pd.Series(df.disposition).fillna(method='ffill')
# get the current row count in table for error checking purposes
current_row_count = get_count(connection)
print("The current table has {0} rows. You are adding {1} rows."
.format(current_row_count, len(df)))
# write the data to the db using pandas to_sql
print("Updating the db.")
df.to_sql(name='call_data', con=connection, index=False,
if_exists='append', dtype=col_types)
new_row_count = get_count(connection)
# close the connection and veirfy the results
connection.close()
print("The table now has {0} rows.".format(new_row_count))
if (current_row_count + len(df)) == new_row_count:
print("Table updated as expected.")
return 0
else:
print("Something went wrong in the update. Expected {0} rows but have\
{1}.".format((current_row_count + len(df)), new_row_count))
return 1
if __name__ == '__main__':
# check if there are files to Load
if len(os.listdir('./exported_data')) == 0:
print("There are no files to load.")
sys.exit()
else:
for new_report in os.listdir('./exported_data'):
# load all excel files
if new_report.endswith('.xls') or new_report.endswith('.xlsx'):
print("Begin processing: {0}".format(new_report))
load_status = load_data(new_report)
if load_status == 0:
# if the load was successful, move the loaded file
print("Moving {0} to the processed directory."
.format(new_report))
os.rename('./exported_data/' + new_report,
'./processed_data/' + new_report)
else:
# if the load was not successful, do not move the file
print("Left {0} in the exported_data directory. Please rev\
iew for errors.".format(new_report))
# create an error log of files that did not load properly
log = 'error.log'
if os.path.exists(log):
append_write = 'a'
else:
append_write = 'w'
f = open(log, append_write)
f.write(new_report + '\n')
f.close()
| 39.063492
| 79
| 0.58208
|
a5edf1e3a1045a2c7f05264358693e44141638f2
| 532
|
py
|
Python
|
jms_oidc_rp/__init__.py
|
east-district-f2c-shangqi/jumpserver-django-oidc-rp
|
d3df68f21d002b07ad7d6776b38256a042ea3831
|
[
"MIT"
] | null | null | null |
jms_oidc_rp/__init__.py
|
east-district-f2c-shangqi/jumpserver-django-oidc-rp
|
d3df68f21d002b07ad7d6776b38256a042ea3831
|
[
"MIT"
] | null | null | null |
jms_oidc_rp/__init__.py
|
east-district-f2c-shangqi/jumpserver-django-oidc-rp
|
d3df68f21d002b07ad7d6776b38256a042ea3831
|
[
"MIT"
] | null | null | null |
__version__ = '0.3.7.5'
# Deploying a new version:
# 1. remove the ".dev" from the current version number
# 2. create a new commit (eg. "Prepared 0.1.1 release")
# 3. run "git tag x.y.z" (eg. "git tag 0.1.1")
# 4. run "python setup.py sdist bdist_wheel upload"
# 5. bump the version (increment the version and append a ".dev" to it). eg. "0.1.2.dev"
# 6. create a new commit (eg. "Bumped version to 0.1.2.dev")
# 7. run "git push" and "git push --tags"
default_app_config = 'jms_oidc_rp.apps.JumpServerOIDCRelyingPartyAppConfig'
| 38
| 88
| 0.68985
|
ef97678d071b0ca4a00b4fb16f80165b710b8f79
| 9,663
|
py
|
Python
|
services/core/MasterDriverAgent/tests/test_modbus_driver.py
|
craig8/volttron
|
2a954311d323effa3b79c2a53f6e8c3bb9664e1c
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 1
|
2020-06-08T16:54:28.000Z
|
2020-06-08T16:54:28.000Z
|
services/core/MasterDriverAgent/tests/test_modbus_driver.py
|
craig8/volttron
|
2a954311d323effa3b79c2a53f6e8c3bb9664e1c
|
[
"Apache-2.0",
"BSD-2-Clause"
] | 8
|
2016-10-07T22:49:28.000Z
|
2022-02-23T00:57:58.000Z
|
services/core/MasterDriverAgent/tests/test_modbus_driver.py
|
craig8/volttron
|
2a954311d323effa3b79c2a53f6e8c3bb9664e1c
|
[
"Apache-2.0",
"BSD-2-Clause"
] | null | null | null |
import pytest
import gevent
import logging
import time
from struct import pack, unpack
from volttron.platform import get_services_core, jsonapi
from master_driver.interfaces.modbus_tk.server import Server
from master_driver.interfaces.modbus_tk.client import Client, Field
from master_driver.interfaces.modbus_tk import helpers
from volttrontesting.utils.utils import get_rand_ip_and_port
from volttron.platform.agent.known_identities import PLATFORM_DRIVER
logger = logging.getLogger(__name__)
IP, _port = get_rand_ip_and_port().split(":")
PORT = int(_port)
DRIVER_CONFIG = {
"driver_config": {
"device_address": IP,
"port": PORT,
"slave_id": 1
},
"driver_type": "modbus",
"registry_config": "config://modbus.csv",
"interval": 120,
"timezone": "UTC"
}
# This registry configuration contains only required fields
REGISTRY_CONFIG_STRING = """Volttron Point Name,Units,Modbus Register,Writable,Point Address
BigUShort,PPM,>H,TRUE,0
BigUInt,PPM,>I,TRUE,1
BigULong,PPM,>Q,TRUE,3
BigShort,PPM,>h,TRUE,7
BigInt,PPM,>i,TRUE,8
BigFloat,PPM,>f,TRUE,10
BigLong,PPM,>q,TRUE,12
LittleUShort,PPM,<H,TRUE,100
LittleUInt,PPM,<I,TRUE,101
LittleULong,PPM,<Q,TRUE,103
LittleShort,PPM,<h,TRUE,107
LittleInt,PPM,<i,TRUE,108
LittleFloat,PPM,<f,TRUE,110
LittleLong,PPM,<q,TRUE,112"""
# Register values dictionary for testing set_point and get_point
registers_dict = {"BigUShort": 2**16-1,
"BigUInt": 2**32-1,
"BigULong": 2**64-1,
"BigShort": -(2**16)//2,
"BigInt": -(2**32)//2,
"BigFloat": -1234.0,
"BigLong": -(2**64)//2,
"LittleUShort": 0,
"LittleUInt": 0,
"LittleULong": 0,
"LittleShort": (2**16)//2-1,
"LittleInt": (2**32)//2-1,
"LittleFloat": 1.0,
"LittleLong": (2**64)//2-1
}
@pytest.fixture(scope="module")
def agent(request, volttron_instance):
"""
Build MasterDriverAgent, add Modbus driver & csv configurations
"""
# Build master driver agent
md_agent = volttron_instance.build_agent(identity="test_md_agent")
capabilities = {'edit_config_store': {'identity': PLATFORM_DRIVER}}
volttron_instance.add_capabilities(md_agent.core.publickey, capabilities)
gevent.sleep(1)
# Clean out master driver configurations
# wait for it to return before adding new config
md_agent.vip.rpc.call('config.store',
'manage_delete_store',
PLATFORM_DRIVER).get()
# Add driver configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
PLATFORM_DRIVER,
'devices/modbus',
jsonapi.dumps(DRIVER_CONFIG),
config_type='json')
# Add csv configurations
md_agent.vip.rpc.call('config.store',
'manage_store',
PLATFORM_DRIVER,
'modbus.csv',
REGISTRY_CONFIG_STRING,
config_type='csv')
master_uuid = volttron_instance.install_agent(
agent_dir=get_services_core("MasterDriverAgent"),
config_file={},
start=True)
gevent.sleep(10) # wait for the agent to start and start the devices
def stop():
"""Stop master driver agent
"""
volttron_instance.stop_agent(master_uuid)
md_agent.core.stop()
request.addfinalizer(stop)
return md_agent
class PPSPi32Client(Client):
"""
Define some registers to PPSPi32Client
"""
def __init__(self, *args, **kwargs):
super(PPSPi32Client, self).__init__(*args, **kwargs)
byte_order = helpers.BIG_ENDIAN
addressing = helpers.ADDRESS_OFFSET
BigUShort = Field("BigUShort", 0, helpers.USHORT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigUInt = Field("BigUInt", 1, helpers.UINT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigULong = Field("BigULong", 3, helpers.UINT64, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigShort = Field("BigShort", 7, helpers.SHORT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigInt = Field("BigInt", 8, helpers.INT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigFloat = Field("BigFloat", 10, helpers.FLOAT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
BigLong = Field("BigLong", 12, helpers.INT64, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleUShort = Field(
"LittleUShort", 100, helpers.USHORT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleUInt = Field(
"LittleUInt", 101, helpers.UINT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleULong = Field(
"LittleULong", 103, helpers.UINT64, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleShort = Field(
"LittleShort", 107, helpers.SHORT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleInt = Field(
"LittleInt", 108, helpers.INT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleFloat = Field(
"LittleFloat", 110, helpers.FLOAT, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
LittleLong = Field(
"LittleLong", 112, helpers.INT64, 'PPM', 2, helpers.no_op,
helpers.REGISTER_READ_WRITE, helpers.OP_MODE_READ_WRITE)
@pytest.fixture
def modbus_server(request):
modbus_server = Server(address=IP, port=PORT)
modbus_server.define_slave(1, PPSPi32Client, unsigned=True)
# Set values for registers from server as the default values
modbus_server.set_values(1, PPSPi32Client().field_by_name("BigUShort"), 0)
modbus_server.set_values(1, PPSPi32Client().field_by_name("BigUInt"), 0)
modbus_server.set_values(1, PPSPi32Client().field_by_name("BigULong"), 0)
modbus_server.set_values(1, PPSPi32Client().field_by_name("BigShort"), 0)
modbus_server.set_values(1, PPSPi32Client().field_by_name("BigInt"), 0)
modbus_server.set_values(1, PPSPi32Client().field_by_name("BigFloat"), 0)
modbus_server.set_values(1, PPSPi32Client().field_by_name("BigLong"), 0)
modbus_server.set_values(1, PPSPi32Client().field_by_name("LittleUShort"), unpack('<H', pack('>H', 0)))
modbus_server.set_values(1, PPSPi32Client().field_by_name("LittleUInt"), unpack('<HH', pack('>I', 0)))
modbus_server.set_values(1, PPSPi32Client().field_by_name("LittleULong"), unpack('<HHHH', pack('>Q', 0)))
modbus_server.set_values(1, PPSPi32Client().field_by_name("LittleShort"), unpack('<H', pack('>h', 0)))
modbus_server.set_values(1, PPSPi32Client().field_by_name("LittleInt"), unpack('<HH', pack('>i', 0)))
modbus_server.set_values(1, PPSPi32Client().field_by_name("LittleFloat"), unpack('<HH', pack('>f', 0)))
modbus_server.set_values(1, PPSPi32Client().field_by_name("LittleLong"), unpack('<HHHH', pack('>q', 0)))
modbus_server.start()
time.sleep(1)
yield modbus_server
modbus_server.stop()
@pytest.mark.usefixtures("modbus_server")
class TestModbusDriver:
"""
Regression tests for the modbus driver interface.
"""
def get_point(self, agent, point_name):
"""
Issue a get_point RPC call for the named point and return the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@return: The returned value from the RPC call.
"""
return agent.vip.rpc.call(PLATFORM_DRIVER, 'get_point', 'modbus', point_name).get(timeout=10)
def set_point(self, agent, point_name, point_value):
"""
Issue a set_point RPC call for the named point and value, and return
the result.
@param agent: The test Agent.
@param point_name: The name of the point to query.
@param point_value: The value to set on the point.
@return: The returned value from the RPC call.
"""
return agent.vip.rpc.call(PLATFORM_DRIVER, 'set_point', 'modbus', point_name, point_value).get(timeout=10)
def scrape_all(self, agent):
"""
Issue a get_point RPC call for the named point and return the result.
@param agent: The test Agent.
@return: The returned value from the RPC call.
"""
return agent.vip.rpc.call(PLATFORM_DRIVER, 'scrape_all', 'modbus').get(timeout=10)
def test_default_values(self, agent):
"""
By default server setting, all registers values are 0
"""
default_values = self.scrape_all(agent)
assert type(default_values) is dict
for key in default_values.keys():
assert default_values[key] == 0 or 0.0
def test_set_point(self, agent):
for key in registers_dict.keys():
self.set_point(agent, key, registers_dict[key])
assert self.get_point(agent, key) == registers_dict[key]
assert self.scrape_all(agent) == registers_dict
| 39.765432
| 114
| 0.650523
|
bac7fd08bde7e603fc5e9e032cbf0dbb92560d7e
| 84
|
py
|
Python
|
apps/setting/helper.py
|
zibinklin/weekly_self-evaluation
|
ed3c15e8261014ecc40068999e385860c0f8166e
|
[
"MIT"
] | null | null | null |
apps/setting/helper.py
|
zibinklin/weekly_self-evaluation
|
ed3c15e8261014ecc40068999e385860c0f8166e
|
[
"MIT"
] | null | null | null |
apps/setting/helper.py
|
zibinklin/weekly_self-evaluation
|
ed3c15e8261014ecc40068999e385860c0f8166e
|
[
"MIT"
] | null | null | null |
import pandas as pd
def set(param):
activity = ''
achievement =''
pass
| 12
| 19
| 0.595238
|
b6f9f5f144a746fa3edfe7dc301f70916132991f
| 4,880
|
py
|
Python
|
volatility/volatility/plugins/gui/vtypes/vista.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | 2
|
2018-07-16T13:30:40.000Z
|
2018-07-17T12:02:05.000Z
|
volatility/volatility/plugins/gui/vtypes/vista.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | null | null | null |
volatility/volatility/plugins/gui/vtypes/vista.py
|
williamclot/MemoryVisualizer
|
2ff9f30f07519d6578bc36c12f8d08acc9cb4383
|
[
"MIT"
] | null | null | null |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2010,2011,2012 Michael Hale Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.obj as obj
import volatility.plugins.gui.vtypes.win7_sp0_x64_vtypes_gui as win7_sp0_x64_vtypes_gui
import volatility.plugins.gui.constants as consts
class Vista2008x64GuiVTypes(obj.ProfileModification):
before = ["XP2003x64BaseVTypes", "Win32Kx64VTypes"]
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0}
def modification(self, profile):
# Enough stayed the same between Vista/2008 and Windows 7,
## so we can re-use the Windows 7 types. This is a bit unconventional
## because we typically when we re-use, we do it forward (i.e. use
## an older OS's types for a newer OS). However since the win32k.sys
## vtypes were never public until Windows 7, we're re-using backward.
profile.vtypes.update(win7_sp0_x64_vtypes_gui.win32k_types)
# We don't want to overlay or HeEntrySize from Win7 will
# appear to be a valid member of the Vista structure.
profile.vtypes.update({
'tagSHAREDINFO' : [ 0x238, {
'psi' : [ 0x0, ['pointer64', ['tagSERVERINFO']]],
'aheList' : [ 0x8, ['pointer64', ['_HANDLEENTRY']]],
'ulSharedDelta' : [ 0x18, ['unsigned long long']],
}],
})
profile.merge_overlay({
# From Win7SP0x64
'tagDESKTOP' : [ None, {
'pheapDesktop' : [ 0x78, ['pointer64', ['tagWIN32HEAP']]],
'ulHeapSize' : [ 0x80, ['unsigned long']],
}],
'tagTHREADINFO' : [ None, {
'ppi' : [ 0x68, ['pointer64', ['tagPROCESSINFO']]],
'PtiLink' : [ 0x160, ['_LIST_ENTRY']],
}],
'tagHOOK': [ None, {
'flags': [ None, ['Flags', {'bitmap': consts.HOOK_FLAGS}]]
}],
'_HANDLEENTRY': [ None, {
'bType': [ None, ['Enumeration', dict(target = 'unsigned char', choices = consts.HANDLE_TYPE_ENUM)]],
}],
'tagWINDOWSTATION' : [ None, {
'pClipBase' : [ None, ['pointer', ['array', lambda x : x.cNumClipFormats, ['tagCLIP']]]],
}],
'tagCLIP': [ None, {
'fmt' : [ 0x0, ['Enumeration', dict(target = 'unsigned long', choices = consts.CLIPBOARD_FORMAT_ENUM)]],
}],
})
class Vista2008x86GuiVTypes(obj.ProfileModification):
before = ["XP2003x86BaseVTypes", "Win32Kx86VTypes"]
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '32bit',
'major': lambda x: x == 6,
'minor': lambda x: x == 0}
def modification(self, profile):
profile.merge_overlay({
# The size is very important since we carve from bottom up
'tagWINDOWSTATION' : [ 0x54, {
'pClipBase' : [ None, ['pointer', ['array', lambda x : x.cNumClipFormats, ['tagCLIP']]]],
}],
'tagDESKTOP' : [ None, {
'PtiList' : [ 0x64, ['_LIST_ENTRY']],
'hsectionDesktop' : [ 0x3c, ['pointer', ['void']]],
'pheapDesktop' : [ 0x40, ['pointer', ['tagWIN32HEAP']]],
'ulHeapSize' : [ 0x44, ['unsigned long']],
}],
'tagTHREADINFO' : [ None, { # same as win2003x86
'PtiLink' : [ 0xB0, ['_LIST_ENTRY']],
'fsHooks' : [ 0x9C, ['unsigned long']],
'aphkStart' : [ 0xF8, ['array', 16, ['pointer', ['tagHOOK']]]],
}],
'tagSERVERINFO' : [ None, {
'cHandleEntries' : [ 0x4, ['unsigned long']],
'cbHandleTable' : [ 0x1c8, ['unsigned long']],
}],
'tagSHAREDINFO' : [ 0x11c, { # From Win7SP0x86
'psi' : [ 0x0, ['pointer', ['tagSERVERINFO']]],
'aheList' : [ 0x4, ['pointer', ['_HANDLEENTRY']]],
'ulSharedDelta' : [ 0xC, ['unsigned long']],
}],
'tagCLIP' : [ 16, { # just a size change
}]})
| 42.807018
| 116
| 0.559836
|
b858c29359da81b8929f41b33136b158111e7e8d
| 2,698
|
py
|
Python
|
python/ray/tune/result.py
|
haje01/ray
|
e1c9a869ed3eb83f41f142b841854192ce65f0c3
|
[
"Apache-2.0"
] | 1
|
2019-10-07T17:20:01.000Z
|
2019-10-07T17:20:01.000Z
|
python/ray/tune/result.py
|
GitHubBeinner/ray
|
d20696300e2c4aad90c0deece2b3af7e70fc6056
|
[
"Apache-2.0"
] | null | null | null |
python/ray/tune/result.py
|
GitHubBeinner/ray
|
d20696300e2c4aad90c0deece2b3af7e70fc6056
|
[
"Apache-2.0"
] | 2
|
2019-09-04T13:27:51.000Z
|
2019-09-17T04:20:38.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
# yapf: disable
# __sphinx_doc_begin__
# (Optional/Auto-filled) training is terminated. Filled only if not provided.
DONE = "done"
# (Optional) Enum for user controlled checkpoint
SHOULD_CHECKPOINT = "should_checkpoint"
# (Auto-filled) The hostname of the machine hosting the training process.
HOSTNAME = "hostname"
# (Auto-filled) The auto-assigned id of the trial.
TRIAL_ID = "trial_id"
# (Auto-filled) The node ip of the machine hosting the training process.
NODE_IP = "node_ip"
# (Auto-filled) The pid of the training process.
PID = "pid"
# (Optional) Mean reward for current training iteration
EPISODE_REWARD_MEAN = "episode_reward_mean"
# (Optional) Mean loss for training iteration
MEAN_LOSS = "mean_loss"
# (Optional) Mean accuracy for training iteration
MEAN_ACCURACY = "mean_accuracy"
# Number of episodes in this iteration.
EPISODES_THIS_ITER = "episodes_this_iter"
# (Optional/Auto-filled) Accumulated number of episodes for this experiment.
EPISODES_TOTAL = "episodes_total"
# Number of timesteps in this iteration.
TIMESTEPS_THIS_ITER = "timesteps_this_iter"
# (Auto-filled) Accumulated number of timesteps for this entire experiment.
TIMESTEPS_TOTAL = "timesteps_total"
# (Auto-filled) Time in seconds this iteration took to run.
# This may be overriden to override the system-computed time difference.
TIME_THIS_ITER_S = "time_this_iter_s"
# (Auto-filled) Accumulated time in seconds for this entire experiment.
TIME_TOTAL_S = "time_total_s"
# (Auto-filled) The index of this training iteration.
TRAINING_ITERATION = "training_iteration"
# __sphinx_doc_end__
# yapf: enable
# __duplicate__ is a magic keyword used internally to
# avoid double-logging results when using the Function API.
RESULT_DUPLICATE = "__duplicate__"
# Where Tune writes result files by default
DEFAULT_RESULTS_DIR = (os.environ.get("TUNE_RESULT_DIR")
or os.path.expanduser("~/ray_results"))
# Meta file about status under each experiment directory, can be
# parsed by automlboard if exists.
JOB_META_FILE = "job_status.json"
# Meta file about status under each trial directory, can be parsed
# by automlboard if exists.
EXPR_META_FILE = "trial_status.json"
# File that stores parameters of the trial.
EXPR_PARAM_FILE = "params.json"
# Pickle File that stores parameters of the trial.
EXPR_PARAM_PICKLE_FILE = "params.pkl"
# File that stores the progress of the trial.
EXPR_PROGRESS_FILE = "progress.csv"
# File that stores results of the trial.
EXPR_RESULT_FILE = "result.json"
# Config prefix when using Analysis.
CONFIG_PREFIX = "config/"
| 29.977778
| 77
| 0.778354
|
6b7c7524366729aac07ffa9c4f5690c6edc372a4
| 161
|
py
|
Python
|
src/Services/FaceRecognition/face_recognition/engines/__init__.py
|
Amir-Shamsi/facial-recognition-login-api
|
e37e9cdf63f98a53693b3d07a0fb79b1304d4ab9
|
[
"CC0-1.0"
] | 8
|
2019-02-25T14:09:21.000Z
|
2021-12-24T07:48:18.000Z
|
src/Services/FaceRecognition/face_recognition/engines/__init__.py
|
Amir-Shamsi/facial-recognition-login-api
|
e37e9cdf63f98a53693b3d07a0fb79b1304d4ab9
|
[
"CC0-1.0"
] | 7
|
2019-03-20T14:40:34.000Z
|
2021-10-03T13:13:09.000Z
|
src/Services/FaceRecognition/face_recognition/engines/__init__.py
|
Amir-Shamsi/facial-recognition-login-api
|
e37e9cdf63f98a53693b3d07a0fb79b1304d4ab9
|
[
"CC0-1.0"
] | 3
|
2021-02-10T20:25:39.000Z
|
2022-02-10T14:11:54.000Z
|
"""
Defines engines for processing requests/responses to/from FacePP.
"""
from .base import BaseEngine
from .sync import SyncEngine
DefaultEngine = SyncEngine
| 17.888889
| 65
| 0.78882
|
3d8324d8985a6fa9ef3000282896c3d854c01fd9
| 1,743
|
py
|
Python
|
studies/curso_em_video/ex059-criando-um-menu-de-opcoes.py
|
alexander-colaneri/Python
|
fbc84a2818d8324b712014e1a5abfc01474e49ea
|
[
"MIT"
] | null | null | null |
studies/curso_em_video/ex059-criando-um-menu-de-opcoes.py
|
alexander-colaneri/Python
|
fbc84a2818d8324b712014e1a5abfc01474e49ea
|
[
"MIT"
] | null | null | null |
studies/curso_em_video/ex059-criando-um-menu-de-opcoes.py
|
alexander-colaneri/Python
|
fbc84a2818d8324b712014e1a5abfc01474e49ea
|
[
"MIT"
] | null | null | null |
class Calculadora():
'''Calcula números inseridos, de acordo com opções dadas.'''
def __init__(self):
self.x = 0
self.y = 0
self.menu = '''Escolha uma opção:
[ 1 ] somar
[ 2 ] multiplicar
[ 3 ] maior
[ 4 ] novos números
[ 5 ] sair do programa'''
def iniciar(self):
'''Menu principal do programa'''
self.definir_argumentos()
while True:
print(self.menu)
opcao = input()
if opcao == '1':
print(f'Soma de {self.x} por {self.y} é {self.somar(self.x, self.y)}\n')
elif opcao == '2':
print(f'Multiplicação de {self.x} por {self.y} é {self.multiplicar(self.x, self.y)}\n')
elif opcao =='3':
print(f'O maior número entre {self.x} e {self.y} é {self.indicar_maior_numero(self.x, self.y)}\n')
elif opcao == '4':
self.definir_argumentos()
elif opcao == '5':
print('Tenha um bom dia!')
break
else:
print('Opção Inválida!')
def definir_argumentos(self):
'''Menu principal do programa.'''
print('Digite os números: ')
self.x = int(input())
self.y = int(input())
def somar(self, x, y):
'''Soma de dois números.'''
return x + y
def multiplicar(self, x, y):
'''Multiplicação de dois números.'''
return x * y
def indicar_maior_numero(self, x, y):
'''Indica qual o maior entre os dois números.'''
if x > y:
return x
elif x < y:
return y
else:
return 'nenhum dos dois.'
calculadora = Calculadora()
calculadora.iniciar()
| 29.542373
| 114
| 0.506598
|
be26ce63456970c81cd97f0c7c31be2b7239cc16
| 799
|
py
|
Python
|
apps/badge/tests.py
|
lestrato/badgepack
|
7432c0ead1d5f63dd509620a0bb06bd76828b590
|
[
"MS-PL"
] | 3
|
2016-10-21T01:35:46.000Z
|
2020-11-07T01:20:05.000Z
|
apps/badge/tests.py
|
lestrato/badgepack
|
7432c0ead1d5f63dd509620a0bb06bd76828b590
|
[
"MS-PL"
] | 31
|
2016-10-31T19:28:53.000Z
|
2017-01-19T16:55:49.000Z
|
apps/badge/tests.py
|
lestrato/badgepack
|
7432c0ead1d5f63dd509620a0bb06bd76828b590
|
[
"MS-PL"
] | 1
|
2020-11-07T01:20:07.000Z
|
2020-11-07T01:20:07.000Z
|
from django.test import TestCase
from badge.models import BadgeClass, BadgeInstance
from badge.forms import BadgeCreationForm, UserBadgeAssignForm, OneBadgeAssignForm, BadgeSetAvailabilityForm
from django.core.files.uploadedfile import SimpleUploadedFile
#
# class BadgeClassTestCase(TestCase):
#
# def test_forms(self):
# test_image.image = SimpleUploadedFile(
# name='test_image.jpg',
# content=open(image_path, 'rb').read(),
# content_type='image/jpeg'
# )
# badge_creation_form_data = {
# 'image':test_image,
# 'name':'test_name',
# 'description':'test_description',
# }
# BCForm = BadgeCreationForm(data = badge_creation_form_data)
# self.assertTrue(form.is_valid())
| 34.73913
| 108
| 0.659574
|
ad5896c91cda90df3886cba674cde084cb28c8ba
| 5,375
|
py
|
Python
|
gta_driver.py
|
GodelBose/GTAV_self_driving_network
|
14156ab2f5f7033e6a2566dd58c9e6692088adf5
|
[
"MIT"
] | 1
|
2017-08-21T11:44:03.000Z
|
2017-08-21T11:44:03.000Z
|
gta_driver.py
|
GodelBose/GTAV_self_driving_network
|
14156ab2f5f7033e6a2566dd58c9e6692088adf5
|
[
"MIT"
] | null | null | null |
gta_driver.py
|
GodelBose/GTAV_self_driving_network
|
14156ab2f5f7033e6a2566dd58c9e6692088adf5
|
[
"MIT"
] | null | null | null |
import numpy as np
from grabscreen import grab_screen
import cv2
import time
import os
import pyxinput
import matplotlib.pyplot as plt
import pygame
from getkeys import key_check
from directkeys import PressKey,ReleaseKey, W, A, S, D
from keras.models import Model
class GTA_driver:
def __init__(self, data_gen, epochs, load_model_name, save_model_name, batch_size, compiled_model, cam_resolution, frame_rate, cam_region=None, map_region=None, speed_region=None):
self.data_gen = data_gen
self.epochs = epochs
self.load_model_name = load_model_name
self.save_model_name = save_model_name
self.batch_size = batch_size
self.model = compiled_model
self.save_per_iteration = 10
self.cam_resolution = cam_resolution
self.inputs = {'map_view':data_gen.map_view, 'cam_view':data_gen.cam_view, 'speed_view':data_gen.speed_view}
self.clock = pygame.time.Clock()
self.frame_rate = frame_rate
self.cam_region = cam_region
self.speed_region = speed_region
self.map_region = map_region
def train_model(self):
iteration = 0
for epoch in range(self.epochs):
instances = 0
print("Starting epoch %d"%epoch)
while True:
X,y = self.data_gen.yield_data()
try:
self.model.fit(X, y, epochs=1, batch_size=self.batch_size, shuffle=True)
instances += self.data_gen.files_per_batch * 550
if iteration % self.save_per_iteration == 0:
print('SAVING MODEL!')
np.save(self.save_model_name, self.model.get_weights())
plt.plot(self.model.predict(X))
plt.savefig("temp_imgs/predictions %d"%0)
plt.close()
print(y[0].shape)
plt.plot(y[0][:,0])
plt.plot(y[0][:,1])
plt.savefig("temp_imgs/labels %d"%0)
plt.close()
iteration += 1
except Exception as e:
print(str(e))
# epoch end
if instances >= 550 * self.data_gen.total_files:
break
def load_model(self):
if self.load_model_name:
self.model.set_weights(np.load(self.load_model_name))
x = self.model
def predict(self,X):
return self.model.predict(X)
def round(self,x):
if x <-1:
return -1
if x>1:
return 1
else:
return x
def visualize_filters(self, images):
self.load_model()
visualizer = Model(inputs=self.model.input, outputs=self.model.get_layer('conv2d_2').output)
visualizer.compile(optimizer='rmsprop', loss='mean_squared_error')
i = 0
screen = [x[0] for x in images]
speed = [x[2] for x in images]
for image, speed in zip(screen, speed):
if self.data_gen.view_resize:
image = cv2.resize(image, (self.data_gen.view_resize))
speed= speed[:,:,None]
filters = visualizer.predict([image[None,:,:,:], speed[None,:,:,:]])
f = np.random.randint(0,filters.shape[3]-4)
plt.imshow(filters[0][:,:,f:f+3])#np.save('temp_imgs/filter_{}'.format(i), filters)
plt.savefig('temp_imgs/filter_{}'.format(i))
plt.close()
plt.imshow(image)#np.save('temp_imgs/filter_{}'.format(i), filters)
plt.savefig('temp_imgs/filter_{}_image'.format(i))
plt.close()
i += 1
def make_input(self, ax_predictions, speed_predictions, controller):
ax_value = {0:'AxisLx', 1:'AxisLy', 2:'AxisRx', 3:'AxisRy', 4:'TriggerR', 5:'TriggerL'}
for j,i in enumerate(self.data_gen.axis_indices):
# make sure only forward or backward acceleration is applied
if i==5:
controller.set_value(ax_value[i], -1)
'''
if self.round(ax_predictions[j]) > self.round(ax_predictions[j-1]):
controller.set_value(ax_value[i], self.round(ax_predictions[j]))
controller.set_value(ax_value[4], -1)
'''
else:
controller.set_value(ax_value[i], self.round(ax_predictions[j]))
def live_driving(self):
controller = pyxinput.vController()
paused = False
old_screens = []
while(True):
if not paused:
map_screen = grab_screen(region=self.map_region)
map_screen = cv2.cvtColor(map_screen, cv2.COLOR_BGR2RGB)
cam_screen = grab_screen(region=self.cam_region)
cam_screen = cv2.cvtColor(cam_screen, cv2.COLOR_BGR2RGB)
cam_screen = cv2.resize(cam_screen, (360,240))
if self.data_gen.view_resize:
cam_screen = cv2.resize(cam_screen, self.data_gen.view_resize)
speed_screen = grab_screen(region=self.speed_region)
speed_screen = cv2.cvtColor(speed_screen, cv2.COLOR_BGR2RGB)
speed_screen = cv2.cvtColor(speed_screen,cv2.COLOR_RGB2GRAY)[:,:,None]
if self.data_gen.seq_len== 1:
X = [x for name,x in zip(['map_view', 'cam_view', 'speed_view'],[map_screen[None,:,:,:], cam_screen[None,:,:,:], speed_screen[None,:,:,:]]) if self.inputs[name]]
else:
if old_screens == []:
old_screens = [cam_screen for i in range(self.data_gen.seq_len)]
old_screens.append(cam_screen)
old_screens = old_screens[-self.data_gen.seq_len:]
X = np.array(old_screens)[None,:,:,:,:]
if self.data_gen.return_buttons:
ax_predictions, button_predictions = self.predict(X)
else:
ax_predictions = self.predict(X)
button_predictions = []
print(ax_predictions)#, X[0].shape, X[1].shape)
self.make_input(ax_predictions[0], button_predictions, controller)
self.clock.tick(self.frame_rate)
keys = key_check()
# p pauses game and can get annoying.
if 'T' in keys:
if paused:
print("unpausing")
paused = False
time.sleep(1)
else:
print("pausing")
paused = True
time.sleep(1)
if 'Q' in keys:
break
| 33.385093
| 181
| 0.691907
|
246f8dfa7481c9939a9fac4ae74430c948d64a31
| 9,273
|
py
|
Python
|
yolox/data/data_augment.py
|
itec-hust/MusicYOLO
|
d980a0c0a3723a6c25772c2f7150a82baa1a4ec3
|
[
"Apache-2.0"
] | 1
|
2022-01-29T15:47:50.000Z
|
2022-01-29T15:47:50.000Z
|
yolox/data/data_augment.py
|
xk-wang/MusicYOLO
|
d980a0c0a3723a6c25772c2f7150a82baa1a4ec3
|
[
"Apache-2.0"
] | null | null | null |
yolox/data/data_augment.py
|
xk-wang/MusicYOLO
|
d980a0c0a3723a6c25772c2f7150a82baa1a4ec3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
"""
Data augmentation functionality. Passed as callable transformations to
Dataset classes.
The data augmentation procedures were interpreted from @weiliu89's SSD paper
http://arxiv.org/abs/1512.02325
"""
import math
import random
import cv2
import numpy as np
from yolox.utils import xyxy2cxcywh
def augment_hsv(img, hgain=0.015, sgain=0.7, vgain=0.4):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge(
(cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))
).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2):
# box1(4,n), box2(4,n)
# Compute candidate boxes which include follwing 5 things:
# box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (
(w2 > wh_thr)
& (h2 > wh_thr)
& (w2 * h2 / (w1 * h1 + 1e-16) > area_thr)
& (ar < ar_thr)
) # candidates
def random_perspective(
img,
targets=(),
degrees=10,
translate=0.1,
scale=0.1,
shear=10,
perspective=0.0,
border=(0, 0),
):
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(scale[0], scale[1])
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = (
random.uniform(0.5 - translate, 0.5 + translate) * width
) # x translation (pixels)
T[1, 2] = (
random.uniform(0.5 - translate, 0.5 + translate) * height
) # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ C # order of operations (right to left) is IMPORTANT
###########################
# For Aug out of Mosaic
# s = 1.
# M = np.eye(3)
###########################
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(
img, M, dsize=(width, height), borderValue=(114, 114, 114)
)
else: # affine
img = cv2.warpAffine(
img, M[:2], dsize=(width, height), borderValue=(114, 114, 114)
)
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
n * 4, 2
) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, :4].T * s, box2=xy.T)
targets = targets[i]
targets[:, :4] = xy[i]
return img, targets
def _distort(image):
def _convert(image, alpha=1, beta=0):
tmp = image.astype(float) * alpha + beta
tmp[tmp < 0] = 0
tmp[tmp > 255] = 255
image[:] = tmp
image = image.copy()
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def _mirror(image, boxes):
_, width, _ = image.shape
if random.randrange(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
return image, boxes
def preproc(image, input_size, mean, std, swap=(2, 0, 1)):
if len(image.shape) == 3:
padded_img = np.ones((input_size[0], input_size[1], 3)) * 114.0
else:
padded_img = np.ones(input_size) * 114.0
img = np.array(image)
r = min(input_size[0] / img.shape[0], input_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.float32)
padded_img[: int(img.shape[0] * r), : int(img.shape[1] * r)] = resized_img
padded_img = padded_img[:, :, ::-1]
padded_img /= 255.0
if mean is not None:
padded_img -= mean
if std is not None:
padded_img /= std
padded_img = padded_img.transpose(swap)
padded_img = np.ascontiguousarray(padded_img, dtype=np.float32)
return padded_img, r
class TrainTransform:
def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=50):
self.means = rgb_means
self.std = std
self.p = p
self.max_labels = max_labels
def __call__(self, image, targets, input_dim):
boxes = targets[:, :4].copy()
labels = targets[:, 4].copy()
if len(boxes) == 0:
targets = np.zeros((self.max_labels, 5), dtype=np.float32)
image, r_o = preproc(image, input_dim, self.means, self.std)
image = np.ascontiguousarray(image, dtype=np.float32)
return image, targets
image_o = image.copy()
targets_o = targets.copy()
height_o, width_o, _ = image_o.shape
boxes_o = targets_o[:, :4]
labels_o = targets_o[:, 4]
# bbox_o: [xyxy] to [c_x,c_y,w,h]
boxes_o = xyxy2cxcywh(boxes_o)
image_t = _distort(image)
image_t, boxes = _mirror(image_t, boxes)
height, width, _ = image_t.shape
image_t, r_ = preproc(image_t, input_dim, self.means, self.std)
# boxes [xyxy] 2 [cx,cy,w,h]
boxes = xyxy2cxcywh(boxes)
boxes *= r_
mask_b = np.minimum(boxes[:, 2], boxes[:, 3]) > 8
boxes_t = boxes[mask_b]
labels_t = labels[mask_b]
if len(boxes_t) == 0:
image_t, r_o = preproc(image_o, input_dim, self.means, self.std)
boxes_o *= r_o
boxes_t = boxes_o
labels_t = labels_o
labels_t = np.expand_dims(labels_t, 1)
targets_t = np.hstack((labels_t, boxes_t))
padded_labels = np.zeros((self.max_labels, 5))
padded_labels[range(len(targets_t))[: self.max_labels]] = targets_t[
: self.max_labels
]
padded_labels = np.ascontiguousarray(padded_labels, dtype=np.float32)
image_t = np.ascontiguousarray(image_t, dtype=np.float32)
return image_t, padded_labels
class ValTransform:
"""
Defines the transformations that should be applied to test PIL image
for input into the network
dimension -> tensorize -> color adj
Arguments:
resize (int): input dimension to SSD
rgb_means ((int,int,int)): average RGB of the dataset
(104,117,123)
swap ((int,int,int)): final order of channels
Returns:
transform (transform) : callable transform to be applied to test/val
data
"""
def __init__(self, rgb_means=None, std=None, swap=(2, 0, 1)):
self.means = rgb_means
self.swap = swap
self.std = std
# assume input is cv2 img for now
def __call__(self, img, res, input_size):
img, _ = preproc(img, input_size, self.means, self.std, self.swap)
return img, np.zeros((1, 5))
| 32.197917
| 93
| 0.548151
|
f4e7c86823c20db58285c8cc2d866ab0a07990c4
| 26,789
|
py
|
Python
|
bikeshed/h/dom.py
|
kvark/bikeshed
|
d5538ca3371cf31be3b46f89b497e55d45859a0d
|
[
"CC0-1.0"
] | null | null | null |
bikeshed/h/dom.py
|
kvark/bikeshed
|
d5538ca3371cf31be3b46f89b497e55d45859a0d
|
[
"CC0-1.0"
] | null | null | null |
bikeshed/h/dom.py
|
kvark/bikeshed
|
d5538ca3371cf31be3b46f89b497e55d45859a0d
|
[
"CC0-1.0"
] | null | null | null |
import collections
import hashlib
import re
import html5lib
from lxml import etree
from lxml.cssselect import CSSSelector
from lxml.html import tostring
from ..DefaultOrderedDict import DefaultOrderedDict
from ..messages import *
def flatten(arr):
for el in arr:
if isinstance(el, collections.Iterable) and not isinstance(el, str) and not lxml.etree.iselement(el):
yield from flatten(el)
else:
yield el
def unescape(string):
import html
return html.unescape(string)
def findAll(sel, context):
if isinstance(context, constants.specClass):
context = context.document
try:
return CSSSelector(sel, namespaces={"svg": "http://www.w3.org/2000/svg"})(context)
except Exception as e:
die("The selector '{0}' returned an error:\n{1}", sel, e)
return []
def find(sel, context=None):
result = findAll(sel, context)
if result:
return result[0]
else:
return None
def escapeCSSIdent(val):
if len(val) == 0:
die("Programming error: can't escape an empty ident.")
return ""
ident = ""
firstCode = val[0]
for i, code in enumerate(ord(x) for x in val):
if code == 0:
die("Invalid character: the string '{0}' somehow has a NUL in it.", val)
return ""
if (
0x1 <= code <= 0x1F
or code == 0x7F
or (i == 0 and 0x30 <= code <= 0x39)
or (i == 1 and 0x30 <= code <= 0x39 and firstCode == 0x2D)
):
ident += fr"\{code:x} "
elif (
code >= 0x80
or code == 0x2D
or code == 0x5F
or 0x30 <= code <= 0x39
or 0x41 <= code <= 0x5A
or 0x61 <= code <= 0x7A
):
ident += chr(code)
else:
ident += r"\{}".format(chr(code))
return ident
def escapeUrlFrag(val):
result = ""
for char in val:
if validUrlUnit(char):
result += char
else:
for b in char.encode("utf-8"):
result += f"%{b:0>2x}"
return result
def validUrlUnit(char):
c = ord(char)
if c < 0xA0:
# ASCII range
if (
c == 0x21
or c == 0x24
or 0x26 <= c <= 0x29
or 0x2A <= c <= 0x3B
or c == 0x3D
or 0x3F <= c <= 0x5A
or c == 0x5F
or 0x61 <= c <= 0x7A
or c == 0x7E
):
return True
return False
else:
if 0xD800 <= c <= 0xDFFF or 0xFDD0 <= c <= 0xFDEF:
return False
if (c % 0xFFFF) in [0xFFFE, 0xFFFF]:
# Last two bytes are FFFE or FFFF
return False
return True
def textContent(el, exact=False):
# If exact is False, then any elements with data-deco attribute
# get ignored in the textContent.
# This allows me to ignore things added by Bikeshed by default.
if len(el) == 0:
return el.text or ""
if exact:
return tostring(el, method="text", with_tail=False, encoding="unicode")
else:
return textContentIgnoringDecorative(el)
def textContentIgnoringDecorative(el):
str = el.text or ""
for child in childElements(el):
if child.get("data-deco") is None:
str += textContentIgnoringDecorative(child)
str += child.tail or ""
return str
def innerHTML(el):
if el is None:
return ""
return (el.text or "") + "".join(tostring(x, encoding="unicode") for x in el)
def outerHTML(el, literal=False, with_tail=False):
if el is None:
return ""
if isinstance(el, str):
return el
if isinstance(el, list):
return "".join(outerHTML(x) for x in el)
if el.get("bs-autolink-syntax") is not None and not literal:
return el.get("bs-autolink-syntax")
return tostring(el, with_tail=with_tail, encoding="unicode")
def serializeTag(el):
# Serialize *just* the opening tag for the element.
# Use when you want to output the HTML,
# but it might be a container with a lot of content.
tag = "<" + el.tag
for n, v in el.attrib.items():
tag += ' {n}="{v}"'.format(n=n, v=escapeAttr(v))
tag += ">"
return tag
def foldWhitespace(text):
return re.sub(r"(\s|\xa0)+", " ", text)
def parseHTML(text):
doc = html5lib.parse(text, treebuilder="lxml", namespaceHTMLElements=False)
head = doc.getroot()[0]
body = doc.getroot()[1]
if len(body) > 0 or body.text is not None:
# Body contains something, so return that.
contents = [body.text] if body.text is not None else []
contents.extend(body.iterchildren())
return contents
elif len(head) > 0 or head.text is not None:
# Okay, anything in the head?
contents = [head.text] if head.text is not None else []
contents.extend(head.iterchildren())
return contents
else:
return []
def parseDocument(text):
doc = html5lib.parse(text, treebuilder="lxml", namespaceHTMLElements=False)
return doc
def escapeHTML(text):
# Escape HTML
return text.replace("&", "&").replace("<", "<")
def escapeAttr(text):
return text.replace("&", "&").replace("'", "'").replace('"', """)
def clearContents(el):
del el[:]
el.text = ""
return el
def parentElement(el):
return el.getparent()
def appendChild(parent, *children):
# Appends either text or an element.
children = list(flatten(children))
for child in children:
if isinstance(child, str):
if len(parent) > 0:
parent[-1].tail = (parent[-1].tail or "") + child
else:
parent.text = (parent.text or "") + child
else:
if len(parent) == 0 and parent.text is not None:
# LXML "helpfully" assumes you meant to insert it before the text,
# and so moves the text into the element's tail when you append.
text, parent.text = parent.text, None
parent.append(child)
parent.text = text
else:
# For some reason it doesn't make any weird assumptions about text
# when the parent already has children; the last child's tail
# doesn't get moved into the appended child or anything.
parent.append(child)
return children[-1] if len(children) > 0 else None
def prependChild(parent, child):
# Prepends either text or an element to the parent.
if isinstance(child, str):
if parent.text is None:
parent.text = child
else:
parent.text = child + parent.text
else:
removeNode(child)
parent.insert(0, child)
if parent.text is not None:
child.tail = (child.tail or "") + parent.text
parent.text = None
def insertBefore(target, *els):
parent = target.getparent()
index = parent.index(target)
prevSibling = parent[index - 1] if index > 0 else None
for el in els:
if isinstance(el, str):
if prevSibling is not None:
prevSibling.tail = (prevSibling.tail or "") + el
else:
parent.text = (parent.text or "") + el
else:
parent.insert(index, el)
index += 1
prevSibling = el
return target
def insertAfter(target, *els):
parent = target.getparent()
for el in els:
if isinstance(el, str):
target.tail = (target.tail or "") + el
else:
parent.insert(parent.index(target) + 1, el)
target = el
return target
def removeNode(node):
parent = node.getparent()
if parent is None:
return node
index = parent.index(node)
text = node.tail or ""
node.tail = None
if index == 0:
parent.text = (parent.text or "") + text
else:
prevsibling = parent[index - 1]
prevsibling.tail = (prevsibling.tail or "") + text
parent.remove(node)
return node
def replaceNode(node, *replacements):
insertBefore(node, *replacements)
removeNode(node)
if replacements:
return replacements[0]
def appendContents(el, container):
# Accepts either an iterable *or* a container element
if isElement(container):
container = childNodes(container, clear=True)
appendChild(el, *container)
return el
def replaceContents(el, newElements):
clearContents(el)
return appendContents(el, newElements)
def moveContents(toEl, fromEl):
replaceContents(toEl, fromEl)
fromEl.text = ""
def wrapContents(parentEl, wrapperEl):
appendContents(wrapperEl, parentEl)
appendChild(parentEl, wrapperEl)
return parentEl
def headingLevelOfElement(el):
for heading in relevantHeadings(el, levels=[2, 3, 4, 5, 6]):
if heading.get("data-level") is not None:
return heading.get("data-level")
return None
def relevantHeadings(startEl, levels=None):
if levels is None:
levels = [1, 2, 3, 4, 5, 6]
levels = ["h" + str(level) for level in levels]
currentHeadingLevel = float("inf")
for el in scopingElements(startEl, *levels):
tagLevel = int(el.tag[1])
if tagLevel < currentHeadingLevel:
yield el
currentHeadingLevel = tagLevel
if tagLevel == 2:
return
def sectionName(el):
"""
Return the name of the nearest section to el,
or None if that section isn't meant to be referenced.
"""
h = nextIter(relevantHeadings(el))
if h is None:
return "Unnamed section"
if hasClass(h, "no-ref"):
return None
return textContent(h)
def scopingElements(startEl, *tags):
# Elements that could form a "scope" for the startEl
# Ancestors, and preceding siblings of ancestors.
# Maps to the things that can establish a counter scope.
tagFilter = set(tags)
for el in startEl.itersiblings(preceding=True, *tags):
yield el
for el in startEl.iterancestors():
if el.tag in tagFilter:
yield el
for el in el.itersiblings(preceding=True, *tags):
yield el
def previousElements(startEl, tag=None, *tags):
# Elements preceding the startEl in document order.
# Like .iter(), but in the opposite direction.
els = []
for el in startEl.getroottree().getroot().iter(tag=tag, *tags):
if el == startEl:
return reversed(els)
els.append(el)
return els
def childElements(parentEl, tag="*", *tags, **stuff):
if len(parentEl) == 0:
return iter(())
return parentEl.iterchildren(tag=tag, *tags, **stuff)
def childNodes(parentEl, clear=False, skipOddNodes=True):
"""
This function returns all the nodes in a parent element in the DOM sense,
mixing text nodes (strings) and other nodes together
(rather than LXML's default stupid handling of text).
If you set "clear" to True, it'll
1. remove all of parentEl's children,
so you can append nodes back to it safely, and
2. Set parentEl.text and child elements' .tail to null,
again so you can safely append text to parentEl.
In other words, the following is a no-op:
```
appendChild(parentEl, *childNodes(parentEl, clear=True))
```
Using clear=True is required if you're going to be modifying the element or its children,
otherwise you'll get weird results (repeated/misplaced text).
But if you're just reading nodes,
it's not necessary.
skipOddNodes ensures that the return value will only be text and Element nodes;
if it's false, there might be comments, PIs, etc.
"""
if isinstance(parentEl, list):
ret = []
for c in parentEl:
if isinstance(c, str):
ret.append(c)
continue
if skipOddNodes and isOddNode(c):
pass
else:
ret.append(c)
if not emptyText(c.tail, wsAllowed=False):
ret.append(c.tail)
if clear:
c.tail = None
if clear:
parentEl[:] = []
return ret
ret = []
if not emptyText(parentEl.text, wsAllowed=False):
ret.append(parentEl.text)
if clear:
parentEl.text = None
for c in childElements(parentEl, tag=None):
if skipOddNodes and isOddNode(c):
pass
else:
ret.append(c)
if not emptyText(c.tail, wsAllowed=False):
ret.append(c.tail)
if clear:
c.tail = None
if clear:
clearContents(parentEl)
return ret
def nodeIter(el, clear=False, skipOddNodes=True):
# Iterates thru an element and all its descendants,
# yielding up each child node it sees in depth-first order.
# (In other words, same as el.iter(),
# but returning nodes+strings rather than the stupid LXML model.)
# Takes the same kwargs as childNodes
if isinstance(el, str):
yield el
return
if isinstance(el, etree.ElementTree):
el = el.getroot()
text = el.text
tail = el.tail
if clear:
el.text = None
el.tail = None
yield el
if text is not None:
yield text
for c in childElements(el, tag=None):
if skipOddNodes and isOddNode(c):
continue
# yield from nodeIter(c, clear=clear, skipOddNodes=skipOddNodes)
yield from nodeIter(c, clear=clear, skipOddNodes=skipOddNodes)
if tail is not None:
yield tail
def treeAttr(el, attrName):
# Find the nearest instance of the given attr in the tree
# Useful for when you can put an attr on an ancestor and apply it to all contents.
# Returns attrValue or None if nothing is found.
import itertools as it
for target in it.chain([el], el.iterancestors()):
if target.get(attrName) is not None:
return target.get(attrName)
def closestAttr(el, *attrs):
# Like treeAttr, but can provide multiple attr names, and returns the first one found.
# Useful with combos like highlight/nohighlight
# If multiple target attrs show up on same element, priority is calling order.
# Returns a tuple of (attrName, attrValue) or (None, None) if nothing is found.
import itertools as it
for target in it.chain([el], el.iterancestors()):
for attrName in attrs:
if target.get(attrName) is not None:
return attrName, target.get(attrName)
return None, None
def closestAncestor(el, pred):
# Finds the nearest ancestor matching a predicate
for target in el.iterancestors():
if pred(target):
return target
def filterAncestors(el, pred):
# Returns all ancestors that match the predicate
for target in el.iterancestors():
if pred(target):
yield target
def hasAncestor(el, pred):
return closestAncestor(el, pred) is not None
def removeAttr(el, *attrNames):
# Remove an attribute, silently ignoring if attr doesn't exist.
for attrName in attrNames:
if attrName in el.attrib:
del el.attrib[attrName]
return el
def hasAttr(el, *attrNames):
# Returns True if the element has at least one of the named attributes
for attrName in attrNames:
if attrName in el.attrib:
return True
return False
def hasAttrs(el):
return bool(el.attrib)
def addClass(el, cls):
if el.get("class") is None:
el.set("class", cls)
elif hasClass(el, cls):
pass
else:
el.set("class", "{} {}".format(el.get("class"), cls))
_classMap = {}
def hasClass(el, cls, classMap=_classMap):
elClass = el.get("class")
if elClass is None:
return False
if cls not in elClass:
return False
key = cls, elClass
if key in classMap:
return classMap[key]
ret = re.search(r"(^|\s)" + cls + r"($|\s)", elClass)
classMap[key] = ret
return ret
def removeClass(el, cls):
oldClass = el.get("class")
if oldClass is None:
return
newClass = " ".join(c for c in oldClass.split() if c != cls)
if newClass == "":
del el.attrib["class"]
else:
el.set("class", newClass)
def isElement(node):
# LXML HAS THE DUMBEST XML TREE DATA MODEL IN THE WORLD
return etree.iselement(node) and isinstance(node.tag, str)
def isOddNode(node):
# Something other than an element node or string.
if isinstance(node, str):
return False
if isElement(node):
return False
return True
def isNormative(el, doc):
# Returns whether the element is "informative" or "normative" with a crude algo.
# Currently just tests whether the element is in a class=example or class=note block, or not.
if el in _normativeElCache:
return _normativeElCache[el]
informativeClasses = [
"note",
"example",
"non-normative",
"informative",
] + doc.md.informativeClasses
for cls in informativeClasses:
if hasClass(el, cls):
_normativeElCache[el] = False
return False
if hasClass(el, "normative"):
_normativeElCache[el] = True
return True
parent = parentElement(el)
if not isElement(parent):
# Went past the root without finding any indicator,
# so normative by default.
_normativeElCache[el] = True
return True
# Otherwise, walk the tree
norm = isNormative(parent, doc)
_normativeElCache[el] = norm
return norm
_normativeElCache = {}
def isEmpty(el):
# Returns whether the element is empty - no text or children.
return (el.text is None or el.text.strip() == "") and len(el) == 0
def hasChildElements(el):
try:
next(childElements(el))
return True
except StopIteration:
return False
# If the element has one child element, returns it.
# Otherwise, returns None
def hasOnlyChild(el, wsAllowed=True):
if not emptyText(el.text, wsAllowed):
# Has significant child text
return None
children = childElements(el)
single = next(children, None)
if single is None:
# No children
return None
if not emptyText(single.tail, wsAllowed):
# Has significant child text following the child element
return None
if next(children, None) is not None:
# At least two children
return None
return single
def fixTypography(text):
# Replace straight aposes with curly quotes for possessives and contractions.
text = re.sub(r"([\w])'([\w])", r"\1’\2", text)
text = re.sub(r"(</[\w]+>)'([\w])", r"\1’\2", text)
# Fix line-ending em dashes, or --, by moving the previous line up, so no space.
text = re.sub(r"([^<][^!])(—|--)\r?\n\s*(\S)", r"\1—<wbr>\3", text)
return text
def fixSurroundingTypography(el):
# Applies some of the fixTypography changes to the content surrounding an element.
# Used when a shorthand prevented fixTypography from firing previously.
if el.tail is not None and el.tail.startswith("'"):
el.tail = "’" + el.tail[1:]
return el
def unfixTypography(text):
# Replace curly quotes with straight quotes, and emdashes with double dashes.
text = re.sub(r"’", r"'", text)
# Fix line-ending em dashes, or --, by moving the previous line up, so no space.
text = re.sub(r"—<wbr>", r"--", text)
return text
def emptyText(text, wsAllowed=True):
# Because LXML represents a complete lack of text as None,
# you can't do something like `el.text.strip() == ""` to test for emptiness.
# wsAllowed controls whether whitespace-only strings count as empty or not
if text is None:
return True
if not wsAllowed:
return text == ""
return text.strip() == ""
def hashContents(el):
# Hash the contents of an element into an 8-character alphanum string.
# Generally used for generating probably-unique IDs.
# Normalize whitespace away to avoid git-related newline normalization issues.
text = re.sub(r"\s+", " ", textContent(el).strip()).encode("ascii", "xmlcharrefreplace")
return hashlib.md5(text).hexdigest()[0:8]
def replaceMacros(text, macros):
# `macros` is a dict of {lowercaseMacroName => replacementText}
# Macro syntax is [FOO], where FOO is /[A-Z0-9-]+/
# If written as [FOO?], failure to find a matching macro just replaced it with nothing;
# otherwise, it throws a fatal error.
def macroReplacer(match):
fullText = match.group(0)
innerText = match.group(2).lower() or ""
optional = match.group(3) == "?"
if fullText.startswith("\\"):
# Escaped
return fullText[1:]
if fullText.startswith("[["):
# Actually a biblio link
return fullText
if re.match(r"[\d-]+$", innerText):
# No refs are all-digits (this is probably JS code, or a regex/grammar).
return fullText
if innerText in macros:
# For some reason I store all the macros in lowercase,
# despite requiring them to be spelled with uppercase.
return str(macros[innerText])
# Nothing has matched, so start failing the macros.
if optional:
return ""
die(
"Found unmatched text macro {0}. Correct the macro, or escape it with a leading backslash.",
fullText,
)
return fullText
return re.sub(r"(\\|\[)?\[([A-Z0-9-]+)(\??)\]", macroReplacer, text)
def replaceAwkwardCSSShorthands(text):
# Replace the <<production>> shortcuts, because they won't survive the HTML parser.
def replaceProduction(match):
syntaxAttr = escapeAttr(match.group(0))
escape, text = match.groups()
if escape:
return escapeHTML(match.group(0)[1:])
return f"<fake-production-placeholder class=production bs-autolink-syntax='{syntaxAttr}' data-opaque>{text}</fake-production-placeholder>"
text = re.sub(r"(\\)?<<([^>\n]+)>>", replaceProduction, text)
# Replace the ''maybe link'' shortcuts.
# They'll survive the HTML parser,
# but the current shorthand-recognizer code won't find them if they contain an element.
# (The other shortcuts are "atomic" and can't contain elements.)
def replaceMaybe(match):
syntaxAttr = escapeAttr(match.group(0))
escape, text = match.groups()
if escape:
return escapeHTML(match.group(0)[1:])
return f"<fake-maybe-placeholder bs-autolink-syntax='{syntaxAttr}'>{text}</fake-maybe-placeholder>"
text = re.sub(r"(\\)?''([^=\n]+?)''", replaceMaybe, text)
return text
def fixupIDs(doc, els):
addOldIDs(els)
dedupIDs(doc)
def safeID(transOrDoc, id):
if isinstance(transOrDoc, dict):
trans = transOrDoc
else:
trans = transOrDoc.md.translateIDs
if id in trans:
return trans[id]
return id
def addOldIDs(els):
for el in els:
if not el.get("oldids"):
continue
oldIDs = [id.strip() for id in el.get("oldids").strip().split(",")]
for oldID in oldIDs:
appendChild(el, E.span({"id": oldID}))
removeAttr(el, "oldids")
def dedupIDs(doc):
import itertools as iter
ids = DefaultOrderedDict(list)
for el in findAll("[id]", doc):
ids[el.get("id")].append(el)
for dupeId, els in list(ids.items()):
if len(els) < 2:
# Only one instance, so nothing to do.
continue
warnAboutDupes = True
if re.match(r"issue-[0-9a-fA-F]{8}$", dupeId):
# Don't warn about issues, it's okay if they have the same ID because they're identical text.
warnAboutDupes = False
ints = iter.count(1)
for el in els[1:]:
# If I registered an alternate ID, try to use that.
if el.get("data-alternate-id"):
altId = el.get("data-alternate-id")
if altId not in ids:
el.set("id", safeID(doc, el.get("data-alternate-id")))
ids[altId].append(el)
continue
if el.get("data-silently-dedup") is not None:
warnAboutDupes = False
if dupeId.startswith("ref-for-"):
warnAboutDupes = False
# Try to de-dup the id by appending an integer after it.
if warnAboutDupes:
warn(
"Multiple elements have the same ID '{0}'.\nDeduping, but this ID may not be stable across revisions.",
dupeId,
el=el,
)
for x in ints:
altId = "{}{}".format(dupeId, circledDigits(x))
if altId not in ids:
el.set("id", safeID(doc, altId))
ids[altId].append(el)
break
def approximateLineNumber(el, setIntermediate=True):
if el.get("line-number"):
return el.get("line-number")
parent = parentElement(el)
if not isElement(parent):
if el.tag == "html":
return None
return None
approx = approximateLineNumber(parent, setIntermediate=setIntermediate)
if approx is None:
return None
if approx[0].isdigit():
approx = "~" + approx
if setIntermediate:
el.set("line-number", approx)
return approx
def circledDigits(num):
"""
Converts a base-10 number into a string using unicode circled digits.
That is, 123 becomes "①②③"
"""
num = int(num)
assert num >= 0
digits = ["⓪", "①", "②", "③", "④", "⑤", "⑥", "⑦", "⑧", "⑨"]
result = "".join(digits[int(d)] for d in str(num))
return result
def nextIter(it, default=None):
"""
Returns the next element of the iterator,
returning the default value if it's empty,
rather than throwing an error.
"""
try:
return next(iter(it))
except StopIteration:
return default
def createElement(tag, attrs={}, *children):
el = etree.Element(tag, {n: v for n, v in attrs.items() if v is not None})
for child in children:
appendChild(el, child)
return el
class ElementCreationHelper:
def __getattr__(self, name):
def _creater(*children):
if children and not (isinstance(children[0], str) or isElement(children[0])):
attrs = children[0]
children = children[1:]
else:
attrs = {}
return createElement(name, attrs, *children)
return _creater
E = ElementCreationHelper()
| 29.965324
| 146
| 0.597932
|
53e8b4b239957272469aa25420cb0b1b814742f7
| 48,591
|
py
|
Python
|
openstack_dashboard/dashboards/project/instances/tables.py
|
sahil-awasthi/tcpro-wallaby
|
5375f2da1d7548210a465a817ec74df4acf6e821
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/instances/tables.py
|
sahil-awasthi/tcpro-wallaby
|
5375f2da1d7548210a465a817ec74df4acf6e821
|
[
"Apache-2.0"
] | null | null | null |
openstack_dashboard/dashboards/project/instances/tables.py
|
sahil-awasthi/tcpro-wallaby
|
5375f2da1d7548210a465a817ec74df4acf6e821
|
[
"Apache-2.0"
] | 1
|
2022-03-31T15:13:27.000Z
|
2022-03-31T15:13:27.000Z
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.http import HttpResponse
from django import shortcuts
from django import template
from django.template.defaultfilters import title
from django import urls
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import format_lazy
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import netaddr
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.floating_ips import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances \
import utils as instance_utils
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
from openstack_dashboard.views import get_url_with_pagination
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
SHELVE_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
SHELVE = 0
UNSHELVE = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class DeleteInstance(policy.PolicyTargetMixin, tables.DeleteAction):
policy_rules = (("compute", "os_compute_api:servers:delete"),)
help_text = _("Deleted instances are not recoverable.")
default_message_level = "info"
@staticmethod
def action_present(count):
return ungettext_lazy(
"Delete Instance",
"Delete Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Scheduled deletion of Instance",
"Scheduled deletion of Instances",
count
)
def allowed(self, request, instance=None):
error_state = False
if instance:
error_state = (instance.status == 'ERROR')
return error_state or not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-reboot',)
policy_rules = (("compute", "os_compute_api:servers:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
action_type = "danger"
@staticmethod
def action_present(count):
return ungettext_lazy(
"Hard Reboot Instance",
"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Hard Rebooted Instance",
"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is None:
return True
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance))
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
"Soft Reboot Instance",
"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Soft Rebooted Instance",
"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
def allowed(self, request, instance=None):
if instance is not None:
return instance.status in ACTIVE_STATES
return True
class RescueInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rescue"
verbose_name = _("Rescue Instance")
policy_rules = (("compute", "os_compute_api:os-rescue"),)
classes = ("btn-rescue", "ajax-modal")
url = "horizon:project:instances:rescue"
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
def allowed(self, request, instance):
return instance.status in ACTIVE_STATES
class UnRescueInstance(tables.BatchAction):
name = 'unrescue'
classes = ("btn-unrescue",)
@staticmethod
def action_present(count):
return ungettext_lazy(
"Unrescue Instance",
"Unrescue Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Unrescued Instance",
"Unrescued Instances",
count
)
def action(self, request, obj_id):
api.nova.server_unrescue(request, obj_id)
def allowed(self, request, instance=None):
if instance:
return instance.status == "RESCUE"
return False
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
"Pause Instance",
"Pause Instances",
count
),
ungettext_lazy(
"Resume Instance",
"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
"Paused Instance",
"Paused Instances",
count
),
ungettext_lazy(
"Resumed Instance",
"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy_rules = (
("compute", "os_compute_api:os-pause-server:unpause"),)
else:
self.current_present_action = PAUSE
policy_rules = (
("compute", "os_compute_api:os-pause-server:pause"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission and
(instance.status in ACTIVE_STATES or self.paused) and
not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
"Suspend Instance",
"Suspend Instances",
count
),
ungettext_lazy(
"Resume Instance",
"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
"Suspended Instance",
"Suspended Instances",
count
),
ungettext_lazy(
"Resumed Instance",
"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy_rules = (
("compute", "os_compute_api:os-suspend-server:resume"),)
else:
self.current_present_action = SUSPEND
policy_rules = (
("compute", "os_compute_api:os-suspend-server:suspend"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission and
(instance.status in ACTIVE_STATES or self.suspended) and
not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class ToggleShelve(tables.BatchAction):
name = "shelve"
icon = "shelve"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
"Shelve Instance",
"Shelve Instances",
count
),
ungettext_lazy(
"Unshelve Instance",
"Unshelve Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
"Shelved Instance",
"Shelved Instances",
count
),
ungettext_lazy(
"Unshelved Instance",
"Unshelved Instances",
count
),
)
def allowed(self, request, instance=None):
if not instance:
return False
if not request.user.is_superuser and getattr(
instance, 'locked', False):
return False
self.shelved = instance.status == "SHELVED_OFFLOADED"
if self.shelved:
self.current_present_action = UNSHELVE
policy_rules = (("compute", "os_compute_api:os-shelve:unshelve"),)
else:
self.current_present_action = SHELVE
policy_rules = (("compute", "os_compute_api:os-shelve:shelve"),)
has_permission = policy.check(
policy_rules, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission and
(instance.status in SHELVE_READY_STATES or self.shelved) and
not is_deleting(instance))
def action(self, request, obj_id):
if self.shelved:
api.nova.server_unshelve(request, obj_id)
self.current_past_action = UNSHELVE
else:
api.nova.server_shelve(request, obj_id)
self.current_past_action = SHELVE
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "os_compute_api:servers:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super().__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = list(self.classes) + ['disabled']
self.verbose_name = format_lazy(
'{verbose_name} {quota_exceeded}',
verbose_name=self.verbose_name,
quota_exceeded=_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render(is_table_action=True))
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urls.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super().get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "os_compute_api:servers:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urls.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
if not api.base.is_service_enabled(request, 'network'):
return False
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class EditPortSecurityGroups(tables.LinkAction):
name = "edit_port_secgroups"
verbose_name = _("Edit Port Security Groups")
policy_rules = (("network", "update_security_group"),)
url = "horizon:project:instances:detail"
icon = "pencil"
def get_link_url(self, instance):
base_url = urls.reverse(self.url, args=[instance.id])
return '%s?tab=%s__%s' % (base_url, 'instance_details', 'interfaces')
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "os_compute_api:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "os_compute_api:os-consoles:index"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return (bool(settings.CONSOLE_TYPE) and
instance.status in ACTIVE_STATES and
not is_deleting(instance))
def get_link_url(self, datum):
base_url = super().get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "os_compute_api:os-console-output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super().get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "os_compute_api:servers:resize"),)
action_type = "danger"
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urls.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "os_compute_api:servers:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, obj_id):
instance = table.get_object_by_id(obj_id)
try:
api.nova.server_confirm_resize(request, instance.id)
except Exception:
exceptions.handle(request,
_('Unable to confirm resize instance "%s".')
% (instance.name or instance.id))
return shortcuts.redirect(request.get_full_path())
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "os_compute_api:servers:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, obj_id):
instance = table.get_object_by_id(obj_id)
try:
api.nova.server_revert_resize(request, instance.id)
except Exception:
exceptions.handle(request,
_('Unable to revert resize instance "%s".')
% (instance.name or instance.id))
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "os_compute_api:servers:rebuild"),)
action_type = "danger"
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
return (settings.OPENSTACK_ENABLE_PASSWORD_RETRIEVE and
(instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance) and
get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urls.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("network", "update_floatingip"),)
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if not api.neutron.floating_ip_supported(request):
return False
if api.neutron.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urls.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class DisassociateIP(tables.LinkAction):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
url = "horizon:project:instances:disassociate"
classes = ("btn-disassociate", 'ajax-modal')
policy_rules = (("network", "update_floatingip"),)
action_type = "danger"
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if not api.neutron.floating_ip_supported(request):
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
class UpdateMetadata(policy.PolicyTargetMixin, tables.LinkAction):
name = "update_metadata"
verbose_name = _("Update Metadata")
ajax = False
icon = "pencil"
attrs = {"ng-controller": "MetadataModalHelperController as modal"}
policy_rules = (("compute", "os_compute_api:server-metadata:update"),)
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super().__init__(attrs, **kwargs)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
self.attrs['ng-click'] = (
"modal.openMetadataModal('instance', '%s', true, 'metadata')"
% instance_id)
return "javascript:void(0);"
def allowed(self, request, instance=None):
return (instance and
instance.status.lower() != 'error')
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = format_lazy('{preamble}: {message}',
preamble=preamble, message=message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = instance_utils.resolve_flavor(request,
instance)
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
try:
api.network.servers_update_addresses(request, [instance])
except Exception:
exceptions.handle(request,
_('Unable to retrieve Network information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "os_compute_api:servers:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
"Start Instance",
"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Started Instance",
"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
policy_rules = (("compute", "os_compute_api:servers:stop"),)
help_text = _("The instance(s) will be shut off.")
action_type = "danger"
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
"Shut Off Instance",
"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
"Shut Off Instance",
"Shut Off Instances",
count
)
def allowed(self, request, instance):
return (instance is None or
(get_power_state(instance) in ("RUNNING", "SUSPENDED") and
not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "os_compute_api:os-lock-server:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
"Lock Instance",
"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Locked Instance",
"Locked Instances",
count
)
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if getattr(instance, 'locked', False):
return False
if not api.nova.is_feature_available(request, "locked_attribute"):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "os_compute_api:os-lock-server:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
"Unlock Instance",
"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
"Unlocked Instance",
"Unlocked Instances",
count
)
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not getattr(instance, 'locked', True):
return False
if not api.nova.is_feature_available(request, "locked_attribute"):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachVolume(tables.LinkAction):
name = "attach_volume"
verbose_name = _("Attach Volume")
url = "horizon:project:instances:attach_volume"
classes = ("ajax-modal",)
policy_rules = (
("compute", "os_compute_api:os-volumes-attachments:create"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
# or cinder is not enabled
def allowed(self, request, instance=None):
return (instance.status in ("ACTIVE") and
not is_deleting(instance) and
api.cinder.is_volume_service_enabled(request))
class DetachVolume(AttachVolume):
name = "detach_volume"
verbose_name = _("Detach Volume")
url = "horizon:project:instances:detach_volume"
policy_rules = (
("compute", "os_compute_api:os-volumes-attachments:delete"),)
# This action should be disabled if the instance
# is not active, or the instance is being deleted
# or cinder is not enabled
def allowed(self, request, instance=None):
return (instance.status in ("ACTIVE") and
not is_deleting(instance) and
api.cinder.is_volume_service_enabled(request))
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "os_compute_api:os-attach-interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES or
instance.status == 'SHUTOFF') and
not is_deleting(instance) and
api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
policy_rules = (("compute", "os_compute_api:os-attach-interfaces:delete"),)
def allowed(self, request, instance):
if not api.base.is_service_enabled(request, 'network'):
return False
if is_deleting(instance):
return False
if (instance.status not in ACTIVE_STATES and
instance.status != 'SHUTOFF'):
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "fixed":
return True
return False
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urls.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in instance.addresses.items():
ips = [addr['addr'] for addr in addresses]
ips.sort(key=lambda ip: netaddr.IPAddress(ip).version)
ip_groups[ip_group] = ips
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_flavor(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": getattr(instance.full_flavor, 'id', None)
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", "Deleted")),
("active", pgettext_lazy("Current status of an Instance", "Active")),
("shutoff", pgettext_lazy("Current status of an Instance", "Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", "Paused")),
("error", pgettext_lazy("Current status of an Instance", "Error")),
("resize", pgettext_lazy("Current status of an Instance",
"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", "Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", "Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", "Password")),
("rebuild", pgettext_lazy("Current status of an Instance", "Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
"Migrating")),
("build", pgettext_lazy("Current status of an Instance", "Build")),
("rescue", pgettext_lazy("Current status of an Instance", "Rescue")),
("soft-delete", pgettext_lazy("Current status of an Instance",
"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", "Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
"Shelved Offloaded")),
# these vm states are used when generating CSV usage summary
("building", pgettext_lazy("Current status of an Instance", "Building")),
("stopped", pgettext_lazy("Current status of an Instance", "Stopped")),
("rescued", pgettext_lazy("Current status of an Instance", "Rescued")),
("resized", pgettext_lazy("Current status of an Instance", "Resized")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", "None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", "Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", "Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
"Hard Rebooting")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
"Hard Reboot Pending")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
"Hard Reboot Started")),
("pausing", pgettext_lazy("Task status of an Instance", "Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", "Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", "Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", "Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", "Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", "Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", "Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", "Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", "Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", "Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", "No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", "Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", "Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", "Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", "Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", "Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", "Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", "Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", "Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", "Building")),
)
INSTANCE_FILTER_CHOICES = (
('uuid', _("Instance ID ="), True),
('name', _("Instance Name ="), True),
('image', _("Image ID ="), True),
('image_name', _("Image Name ="), True),
('ip', _("IPv4 Address ="), True),
('ip6', _("IPv6 Address ="), True, None,
api.neutron.is_enabled_by_config('enable_ipv6')),
('flavor', _("Flavor ID ="), True),
('flavor_name', _("Flavor Name ="), True),
('key_name', _("Key Pair Name ="), True),
('status', _("Status ="), True),
('availability_zone', _("Availability Zone ="), True),
('changes-since', _("Changes Since"), True,
_("Filter by an ISO 8061 formatted time, e.g. 2016-06-14T06:27:59Z")),
('vcpus', _("vCPUs ="), True),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = INSTANCE_FILTER_CHOICES
def render_locked(instance):
if not hasattr(instance, 'locked'):
return ""
if instance.locked:
icon_classes = "fa fa-fw fa-lock"
help_tooltip = _("This instance is currently locked. To enable more "
"actions on it, please unlock it by selecting Unlock "
"Instance from the actions menu.")
else:
icon_classes = "fa fa-fw fa-unlock text-muted"
help_tooltip = _("This instance is unlocked.")
locked_status = ('<span data-toggle="tooltip" title="{}" class="{}">'
'</span>').format(help_tooltip, icon_classes)
return mark_safe(locked_status)
def get_server_detail_link(obj, request):
return get_url_with_pagination(request,
InstancesTable._meta.pagination_param,
InstancesTable._meta.prev_pagination_param,
'horizon:project:instances:detail',
obj.id)
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.WrappingColumn("name",
link=get_server_detail_link,
verbose_name=_("Instance Name"))
image_name = tables.WrappingColumn("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
flavor = tables.Column(get_flavor,
sortable=False,
verbose_name=_("Flavor"))
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
locked = tables.Column(render_locked,
verbose_name="",
sortable=False)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Age"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if settings.LAUNCH_INSTANCE_LEGACY_ENABLED:
launch_actions = (LaunchLink,) + launch_actions
if settings.LAUNCH_INSTANCE_NG_ENABLED:
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (DeleteInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, AssociateIP, DisassociateIP,
AttachInterface, DetachInterface, EditInstance,
AttachVolume, DetachVolume,
UpdateMetadata, DecryptInstancePassword,
EditInstanceSecurityGroups,
EditPortSecurityGroups,
ConsoleLink, LogLink,
RescueInstance, UnRescueInstance,
TogglePause, ToggleSuspend, ToggleShelve,
ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, DeleteInstance)
| 36.811364
| 79
| 0.603383
|
69bcea51016c986c43081adc9aca49bdb2a59959
| 5,069
|
py
|
Python
|
software/python/multirobot/robot/old/pi-20161009/robot-client-4.py
|
lucas137/trackedrobot
|
e315d2811e1f13d92aac079048e5931755335054
|
[
"MIT"
] | null | null | null |
software/python/multirobot/robot/old/pi-20161009/robot-client-4.py
|
lucas137/trackedrobot
|
e315d2811e1f13d92aac079048e5931755335054
|
[
"MIT"
] | null | null | null |
software/python/multirobot/robot/old/pi-20161009/robot-client-4.py
|
lucas137/trackedrobot
|
e315d2811e1f13d92aac079048e5931755335054
|
[
"MIT"
] | null | null | null |
#------------------------------------------------------------------------------
# robot-client-4.py
#------------------------------------------------------------------------------
#!/usr/bin/python3
# robot client emulator
from __future__ import print_function
import time # time-related functions
import socket # networking interface
import sys # system-specific parameters
# Motor command format "m1,m2"
# Value of 0 is motor stopped
# 10 is forward max speed
# -10 is reverse max speed
from pololu_drv8835_rpi import motors, MAX_SPEED
#------------------------------------------------------------------------------
robot = 4 # ID number
host = '192.168.2.100' # remote host
port = 64000 # server port
timeout = 1 # connection timeout in seconds
# blocking mode: timeout = None
if len(sys.argv) > 1:
host = sys.argv[1]
if len(sys.argv) > 2:
port = sys.argv[2]
print('remote host:', host, sep=' ')
print('port number:', port, sep=' ')
#------------------------------------------------------------------------------
class RobotClient:
def __init__(self, robot):
self.id = robot # robot ID number
self.name = 'robot:'+str(robot) # robot name
self.ack = 'ack:'+str(robot) # acknowledge message
self.motors = motors # motor driver
self.sock = None # socket
print("robot", self.id, sep=' ') # print robot ID
self.motors.setSpeeds(0,0) # make sure motors are stopped
def connect(self, host, port):
while self.sock == None:
try:
print("robot", self.id, "connect...", sep=' ')
# create socket object and connect to server
self.sock = socket.create_connection((host, port), timeout)
# send robot ID to server
self.send_str(self.name)
print(self.name)
print("robot", self.id, "connected")
except:
print("robot", self.id, "timeout", sep=' ')
def disconnect(self):
self.motors.setSpeeds(0,0) # stop motors
if self.sock != None:
self.sock.close() # close socket
self.sock = None
print("robot", self.id, "connection closed", sep=' ')
def is_connected(self):
return (self.sock != None)
def send_str(self, string):
self.sock.sendall(string.encode('utf-8'))
#def receive_bytes(self):
# try:
# byte_array = self.sock.recv(1024) # receive message from server
# #print("received", len(byte_array), "bytes", sep=' ')
# #print("recv: ", byte_array) # debug
# return byte_array
# except:
# print("socket.recv error")
# return b''
def receive_str(self):
try:
byte_array = self.sock.recv(1024) # receive message from server
return byte_array.decode('utf-8')
except:
print("robot", self.id, "socket.recv error", sep=' ')
return ''
def motor_command(self, string):
try:
motor_values = msg.split(',')
except:
print("robot", self.id, "string.split error", sep=' ')
else:
motor_1 = int((MAX_SPEED * int(motor_values[0]))/10)
motor_2 = int((MAX_SPEED * int(motor_values[1]))/10)
self.motors.setSpeeds(motor_1, motor_2)
print("robot", self.id, "motors:", motor_1, ',', motor_2, sep=' ')
#------------------------------------------------------------------------------
client = RobotClient(robot)
# To break loop: Ctrl + Shift + \
# socket connection loop
while True:
client.connect(host, port) # connect to server
# message loop
while client.is_connected():
recv_str = client.receive_str() # receive message from server
if not recv_str: # check if no bytes received
client.disconnect() # stop motors and close connection
break
for msg in recv_str.split():
if msg == 'close': # server closed
print("close")
client.disconnect() # stop motors and close connection
break
elif msg == 'shutdown':
client.disconnect()
print("shutdown")
sys.exit()
elif msg == 'syn': # handshake synchronize message
client.send_str(client.ack) # send acknowledge response
print("syn -> ack")
else:
client.motor_command(msg)
client.disconnect() # stop motors and close connection
print("exit")
#------------------------------------------------------------------------------
| 35.950355
| 80
| 0.473072
|
ae105ef56fc8a9365fc211032af1a611140dcaea
| 8,732
|
py
|
Python
|
evaluation/ms_ssim/msssim_score_txt_consistency.py
|
pedroryzewski/TCC
|
ba5167928d5a14dc0c6e144d0927c050090950a6
|
[
"MIT"
] | null | null | null |
evaluation/ms_ssim/msssim_score_txt_consistency.py
|
pedroryzewski/TCC
|
ba5167928d5a14dc0c6e144d0927c050090950a6
|
[
"MIT"
] | null | null | null |
evaluation/ms_ssim/msssim_score_txt_consistency.py
|
pedroryzewski/TCC
|
ba5167928d5a14dc0c6e144d0927c050090950a6
|
[
"MIT"
] | null | null | null |
import sys
import os
import numpy as np
import torch
import torch.nn as nn
import argparse
from torch.utils.data import DataLoader
import random
import pickle
from scipy import signal
from scipy.ndimage.filters import convolve
proj_root = '.'
sys.path.insert(0, proj_root)
data_root = 'data'
model_root = 'models'
from gan.data_loader import BirdsDataset
from gan.networks import Generator
from gan.networks import ImgEncoder
from gan.proj_utils.local_utils import save_images
from gan.proj_utils.torch_utils import to_torch, to_numpy, to_binary, roll
from PIL import Image
def _FSpecialGauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
radius = size // 2
offset = 0.0
start, stop = -radius, radius + 1
if size % 2 == 0:
offset = 0.5
stop -= 1
x, y = np.mgrid[offset + start:stop, offset + start:stop]
assert len(x) == size
g = np.exp(-((x ** 2 + y ** 2) / (2.0 * sigma ** 2)))
return g / g.sum()
def SSIM(img1, img2, seg1, seg2, max_val=255, filter_size=11,
filter_sigma=1.5, k1=0.01, k2=0.03):
"""Return the Structural Similarity Map between `img1` and `img2`.
This function attempts to match the functionality of ssim_index_new.m by
Zhou Wang: http://www.cns.nyu.edu/~lcv/ssim/msssim.zip
Arguments:
img1: Numpy array holding the first RGB image batch.
img2: Numpy array holding the second RGB image batch.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
filter_size: Size of blur kernel to use (will be reduced for small
images).
filter_sigma: Standard deviation for Gaussian blur kernel (will be reduced
for small images).
k1: Constant used to maintain stability in the SSIM calculation (0.01 in
the original paper).
k2: Constant used to maintain stability in the SSIM calculation (0.03 in
the original paper).
Returns:
Pair containing the mean SSIM and contrast sensitivity between `img1` and
`img2`.
Raises:
RuntimeError: If input images don't have the same shape or don't have four
dimensions: [batch_size, height, width, depth].
"""
if img1.shape != img2.shape:
raise RuntimeError('Input images must have the same shape (%s vs. %s).',
img1.shape, img2.shape)
if img1.ndim != 4:
raise RuntimeError('Input images must have four dimensions, not %d',
img1.ndim)
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
_, height, width, _ = img1.shape
# Filter size can't be larger than height or width of images.
size = min(filter_size, height, width)
# Scale down sigma if a smaller filter size is used.
sigma = size * filter_sigma / filter_size if filter_size else 0
if filter_size:
window = np.reshape(_FSpecialGauss(size, sigma), (1, size, size, 1))
s_window = np.ones_like(window) / (filter_size*filter_size)
mu1 = signal.fftconvolve(img1, window, mode='valid')
mu2 = signal.fftconvolve(img2, window, mode='valid')
sigma11 = signal.fftconvolve(img1 * img1, window, mode='valid')
sigma22 = signal.fftconvolve(img2 * img2, window, mode='valid')
sigma12 = signal.fftconvolve(img1 * img2, window, mode='valid')
seg1 = signal.convolve(seg1, s_window, mode='valid')
seg2 = signal.convolve(seg2, s_window, mode='valid')
else:
# Empty blur kernel so no need to convolve.
mu1, mu2 = img1, img2
sigma11 = img1 * img1
sigma22 = img2 * img2
sigma12 = img1 * img2
mu11 = mu1 * mu1
mu22 = mu2 * mu2
mu12 = mu1 * mu2
sigma11 -= mu11
sigma22 -= mu22
sigma12 -= mu12
# Calculate intermediate values used by both ssim and cs_map.
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
bkg_seg1 = (seg1 <= 0.008) # aprox. threshold for 1 pixel
bkg_seg2 = (seg2 <= 0.008) # aprox. threshold for 1 pixel
mask = (bkg_seg1 & bkg_seg2)
v1 = 2.0 * sigma12 + c2
v2 = sigma11 + sigma22 + c2
ssim = (((2.0 * mu12 + c1) * v1) / ((mu11 + mu22 + c1) * v2))
mask_ssim = (ssim * mask).sum()/(mask.sum() * 3) # 3 channels
return mask_ssim
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Gans')
parser.add_argument('--epoch', type=int, default=0,
help='load from epoch')
parser.add_argument('--model', type=str, default='',
help='model name')
parser.add_argument('--batch_size', type=int, default=10,
help='batch_size')
parser.add_argument('--align', type=str, choices=['shape', 'background', 'all', 'none'],
help='Which concept to align during generation.')
args = parser.parse_args()
epoch = args.epoch
model_name = args.model
batch_size = args.batch_size
align = args.align
# set file name
file = 'epoch_%d' % epoch
sample_name = file
png_file = file + '.png'
txt_file = file + '.txt'
z_file = file + '.pickle'
# cfgs
data_name = 'birds'
emb_dim = 128
scode_dim = 1024 # segmentation enconded dim
# folders
datadir = os.path.join(data_root, data_name)
model_name = '{}_{}'.format(model_name, data_name)
model_folder = os.path.join(model_root, model_name)
# NNs
netG = Generator(tcode_dim=512, scode_dim=scode_dim, emb_dim=emb_dim, hid_dim=128)
netEs = ImgEncoder(num_chan=1, out_dim=scode_dim)
netEb = ImgEncoder(num_chan=3, out_dim=scode_dim)
# Dataset
dataset = BirdsDataset(datadir, mode='test')
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
# load models from checkpoint
G_weightspath = os.path.join(model_folder, 'G_epoch{}.pth'.format(epoch))
D_weightspath = os.path.join(model_folder, 'D_epoch{}.pth'.format(epoch))
Es_weightspath = os.path.join(model_folder, 'Es_epoch{}.pth'.format(epoch))
Eb_weightspath = os.path.join(model_folder, 'Eb_epoch{}.pth'.format(epoch))
netG.load_state_dict(torch.load(G_weightspath))
netEs.load_state_dict(torch.load(Es_weightspath))
netEb.load_state_dict(torch.load(Eb_weightspath))
# set to GPU
netG = netG.cuda()
netEs = netEs.cuda()
netEb = netEb.cuda()
scores = []
with torch.no_grad():
for i in range(100):
print('%d/100' % i)
# get batch of test samples
images, _, segs, txt_data, txt_len, captions, *_ = next(iter(dataloader))
images = images.cuda()
segs = segs.cuda()
txt_data = txt_data.cuda()
bimages = images
netG.eval()
netEs.eval()
netEb.eval()
# alignment
if align == 'shape':
bimages = roll(images, 2, dim=0) # for text and seg mismatched backgrounds
bsegs = roll(segs, 2, dim=0) # for text and seg mismatched backgrounds
elif align == 'background':
segs = roll(segs, 1, dim=0) # for text mismatched segmentations
elif align == 'all':
bimages = images.clone()
bsegs = segs.clone()
elif align == 'none':
bimages = roll(images, 2, dim=0) # for text and seg mismatched backgrounds
segs = roll(segs, 1, dim=0) # for text mismatched segmentations
bsegs = roll(segs, 2, dim=0) # for text and seg mismatched backgrounds
np_segs = to_numpy(segs)
np_bsegs = to_numpy(bsegs)
np_images = to_numpy(images)
np_bimages = to_numpy(bimages)
segs_code = netEs(segs)
bkgs_code = netEb(bimages)
*_, f_images, z_list = netG(txt_data, txt_len, segs_code, bkgs_code)
np_fakes = to_numpy(f_images)
for x, b, s, sb in zip(np_fakes, np_bimages, np_segs, np_bsegs):
x = (x.transpose(1,2,0) + 1)/2. * 255.
b = (b.transpose(1,2,0) + 1)/2. * 255.
s = s.transpose(1,2,0)
sb = sb.transpose(1,2,0)
ssim = SSIM(x[np.newaxis,:,:,:], b[np.newaxis,:,:,:], s[np.newaxis,:,:,:], sb[np.newaxis,:,:,:])
# ssim = SSIM(x[np.newaxis,:,:,:], x[np.newaxis,:,:,:], s[np.newaxis,:,:,:], s[np.newaxis,:,:,:])
if not np.isnan(ssim):
scores.append(ssim)
print('SSSIM = %f +- %f'(np.array(scores).mean(),np.array(scores).std()))
| 35.640816
| 113
| 0.60284
|
7de6a4f1c99f4e8d92a14b1bc542ea68cf5ef2ec
| 6,530
|
py
|
Python
|
holland/evolution/evolution.py
|
lambdalife/holland
|
3ce9510aec92b409cae655c90721f5b1ca067637
|
[
"MIT"
] | 1
|
2021-05-04T09:16:49.000Z
|
2021-05-04T09:16:49.000Z
|
holland/evolution/evolution.py
|
lambdalife/holland
|
3ce9510aec92b409cae655c90721f5b1ca067637
|
[
"MIT"
] | 5
|
2018-10-13T02:09:36.000Z
|
2019-11-13T22:27:41.000Z
|
holland/evolution/evolution.py
|
lambdalife/holland
|
3ce9510aec92b409cae655c90721f5b1ca067637
|
[
"MIT"
] | null | null | null |
import math
import logging
from .evaluation import Evaluator
from .breeding import PopulationGenerator
from ..storage import StorageManager
class Evolver:
"""
Handles evolution for a population
:param fitness_function: the fitness function used to evaluate individuals; see :ref:`fitness-function`
:type fitness_function: function
:param genome_params: a dictionary specifying genome parameters; see :ref:`genome-params`
:type genome_params: dict
:param selection_strategy: a dictionary specifying selection parameters; see :ref:`selection-strategy`
:type selection_strategy: dict
:param should_maximize_fitness: whether fitness should be maximized or minimized
:type should_maximize_fitness: bool
"""
def __init__(
self, fitness_function, genome_params, selection_strategy, should_maximize_fitness=True
):
self.fitness_function = fitness_function
self.genome_params = genome_params
self.selection_strategy = selection_strategy
self.should_maximize_fitness = should_maximize_fitness
def evolve(
self,
generation_params={},
initial_population=None,
stop_conditions={"n_generations": 100, "target_fitness": math.inf},
storage_options={},
logging_options={"level": logging.INFO, "format": "%(message)s"},
):
"""
The heart of Holland.
:param generation_params: a dictionary specifying how to create each generation; see :ref:`generation-params`
:type generation_params: dict
:param initial_population: an initial population
:type initial_population: list
:param stop_conditions: conditions for stopping execution; will stop if *any* of the conditions is met; see Stop Conditions below
:type stop_conditions: dict
:param storage_options: configuration options for storing fitness and genomes (should contain keys ``"fitness"`` and ``"genomes"``); see :ref:`fitness-storage-options` and :ref:`genome-storage-options`
:type storage_options: dict
:param logging_options: options for logging passed to `logging.basicConfig <https://docs.python.org/3/library/logging.html#logging.basicConfig>`_ as ``kwargs``
:type logging_options: dict
:Stop Conditions:
* **n_generations** (*int*) -- the number of generations to run evolution over
* **target_fitness** (*int*) -- the target fitness score, will stop once the fittest individual reaches this score
:returns:
* a list of fitness scores and genomes ``[(fitness, genome), ...]`` (fitness results); or
* a tuple of fitness results (previous bullet) and list of historical fitness statistics ``(fitness_results, fitness_history)``, if ``storage_options["fitness"]`` has ``'should_record_fitness': True`` and ``'format': 'memory'``
:raises ValueError: if ``generation_params["n_random"] < 0`` or ``generation_params["n_elite"] < 0``
:raises ValueError: if ``population_size < 1``
:raises ValueError: if ``n_generations < 1``
.. todo:: If an initial population is given but does not match the given genome parameters, some kind of error should be raised
.. todo:: If an initial population is given and some genomes are missing parameters, a warning is given unless a flag is set to fill those values randomly
Dependencies:
* :func:`~holland.evolution.PopulationGenerator.generate_random_genomes`
* :func:`~holland.evolution.Evaluator.evaluate_fitness`
* :func:`~holland.evolution.PopulationGenerator.generate_next_generation`
* :func:`~holland.storage.StorageManager.update_storage`
* :func:`~holland.storage.StorageManager.react_to_interruption`
Example:
.. literalinclude:: examples/basic_example.py
:linenos:
:emphasize-lines: 16
"""
n_random_per_generation = generation_params.get("n_random", 0)
n_elite_per_generation = generation_params.get("n_elite", 0)
population_size = generation_params.get("population_size", 1000)
n_generations = stop_conditions.get("n_generations", math.inf)
target_fitness = stop_conditions.get("target_fitness", math.inf)
should_stop = (
lambda gen_num, max_fit: gen_num == n_generations - 1 or max_fit == target_fitness
)
if n_random_per_generation < 0 or n_elite_per_generation < 0:
raise ValueError("Number of random and elite genomes per generation cannot be negative")
if population_size < 1:
raise ValueError("Population size must be at least 1")
if n_generations < 1:
raise ValueError("Number of generations must be at least 1")
logging.basicConfig(**logging_options)
logger = logging.getLogger(__name__)
evaluator = Evaluator(self.fitness_function, ascending=self.should_maximize_fitness)
storage_manager = StorageManager(
fitness_storage_options=storage_options.get("fitness", {}),
genome_storage_options=storage_options.get("genomes", {}),
)
population_generator = PopulationGenerator(
self.genome_params, self.selection_strategy, generation_params=generation_params
)
population = initial_population
if population is None:
population = population_generator.generate_random_genomes(population_size)
generation_num = 0
while True:
try:
fitness_results = evaluator.evaluate_fitness(population)
best_fitness = fitness_results[-1][0]
logger.info(f"Generation: {generation_num}; Top Score: {best_fitness}")
storage_manager.update_storage(generation_num, fitness_results)
if should_stop(generation_num, best_fitness):
break
population = population_generator.generate_next_generation(fitness_results)
generation_num += 1
except:
storage_manager.react_to_interruption(generation_num, fitness_results)
raise
if (
storage_options.get("fitness", {}).get("should_record_fitness", False)
and storage_options.get("fitness", {}).get("format") == "memory"
):
return fitness_results, storage_manager.fitness_history
return fitness_results
| 43.825503
| 240
| 0.675191
|
ebed74ca81d5277b7404f56c556064a95a943453
| 10,946
|
py
|
Python
|
main.py
|
Atrix256/InverseDFTProblems
|
1742ad3614d563ef67d0bac1be4b8728dba80f5f
|
[
"MIT"
] | 7
|
2021-12-29T08:35:24.000Z
|
2021-12-31T16:07:28.000Z
|
main.py
|
Atrix256/InverseDFTProblems
|
1742ad3614d563ef67d0bac1be4b8728dba80f5f
|
[
"MIT"
] | null | null | null |
main.py
|
Atrix256/InverseDFTProblems
|
1742ad3614d563ef67d0bac1be4b8728dba80f5f
|
[
"MIT"
] | null | null | null |
import os
import imageio
import matplotlib.pyplot as plt
import numpy as np
import random
import sys
import cmath
import math
from PIL import Image, ImageFont, ImageDraw
os.makedirs("out", exist_ok=True)
sourceImgs = [
"bn8",
"bn16",
"bn32",
"bn64",
"bn128",
"bn256",
"bn10240",
]
generatedSizes=[8, 16, 32, 64, 128, 256, 10240]
# Generate noise via IDFT
for generatedSize in generatedSizes:
sourceImg = "idft" + str(generatedSize)
print(sourceImg)
# Make random complex numbers that are
randomlist = np.empty([generatedSize],dtype=complex)
for i in range(int(generatedSize/2)):
angle = 0.0
if i > 0:
angle = np.random.random(1)* math.pi * 2
randomlist[i] = cmath.rect(np.random.random(1), angle)
for i in range(int(generatedSize/2)-1):
randomlist[int(generatedSize/2)+1+i] = np.conj(randomlist[int(generatedSize/2)-1-i])
# make a 1d gaussian with a peak where 0hz DC is going to be
l = generatedSize+1
sig = 1.9 * float(generatedSize) / 8.0
ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l)
gauss = np.exp(-0.5 * np.square(ax) / np.square(sig))
gauss = gauss[:-1].copy()
# DEBUG: show the gaussian curve
if False:
x = np.arange(gauss.shape[0])
plt.plot(x, gauss)
plt.show()
# make LPF and HPF
LPF = randomlist * gauss
HPF = randomlist - LPF
# Make DC be 1/2 * pixelCount
HPF = HPF * generatedSize
LPF = LPF * generatedSize
LPF[int(generatedSize/2)] = (generatedSize-1)/2
HPF[int(generatedSize/2)] = (generatedSize-1)/2
DFTs = [LPF, HPF]
labels = ["LPF", "HPF"]
for dft, label in zip(DFTs, labels):
sourceImg = "idft" + str(generatedSize) + "_" + label
print(label)
# IDFT
signal = np.fft.ifftn(np.fft.ifftshift(dft))
# DEBUG: make sure the imaginary part of the signal isn't significant
if False:
for signalSample in signal:
if np.imag(signalSample) > 0.1:
print("===============")
print("randomlist = " + str(randomlist))
print("gauss = " + str(gauss))
print("dft = " + str(dft))
print("signal = " + str(signal))
sys.exit()
dfty = np.abs(dft)
# cosmetic modifications to the DFT
dfty[int(dfty.shape[0] / 2)] = 0 # zero out DC
dfty = np.append(dfty, dfty[0]) # duplicate the negative dft frequency to the positive
# Graph the DFT
plt.title(sourceImg + " DFT")
plt.xlabel("Hertz")
plt.ylabel("Magnitude")
x = np.arange(dfty.shape[0])
x = x - int((dfty.shape[0]-1) / 2)
plt.plot(x, dfty)
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".dft.png", bbox_inches='tight')
plt.close(fig)
# Graph the raw values
signal = np.real(signal)
y = signal
x = np.arange(y.shape[0])
plt.plot(x, y)
ax = plt.gca()
ax.set_xlim([0.0, y.shape[0]-1])
ax.set_ylim([0.0, 1.0])
plt.title(sourceImg + " IDFT Raw Values")
plt.xlabel("Index")
plt.ylabel("Value")
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".valuesraw.png", bbox_inches='tight')
plt.close(fig)
# Make Raw Histogram
if False:
plt.title(sourceImg + " Histogram")
plt.xlabel("Value")
plt.ylabel("Count")
plt.hist(y, 256, facecolor='blue', alpha=0.5)
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".histogramraw.png", bbox_inches='tight')
plt.close(fig)
# DEBUG: verify dft
if False:
# Make DFT
dfty = np.abs(np.fft.fftshift(np.fft.fftn(signal)))
dfty[int(dfty.shape[0] / 2)] = 0 # zero out DC
dfty = np.append(dfty, dfty[0])
# Graph the DFT
plt.title(sourceImg + " DFT")
plt.xlabel("Hertz")
plt.ylabel("Magnitude")
x = np.arange(dfty.shape[0])
x = x - int((dfty.shape[0]-1) / 2)
plt.plot(x, dfty)
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".dft2raw.png", bbox_inches='tight')
plt.close(fig)
# Normalize the signal
signalmin = np.amin(signal)
signalmax = np.amax(signal)
signal = (signal - signalmin) / (signalmax - signalmin)
# Graph the values
signal = np.real(signal)
y = signal
x = np.arange(y.shape[0])
plt.plot(x, y)
ax = plt.gca()
ax.set_xlim([0.0, y.shape[0]-1])
ax.set_ylim([0.0, 1.0])
plt.title(sourceImg + " IDFT Normalized Values")
plt.xlabel("Index")
plt.ylabel("Value")
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".values.png", bbox_inches='tight')
plt.close(fig)
# Make Histogram
if True:
plt.title(sourceImg + " Histogram")
plt.xlabel("Value")
plt.ylabel("Count")
plt.hist(y, 256, facecolor='blue', alpha=0.5)
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".histogram.png", bbox_inches='tight')
plt.close(fig)
# DEBUG: verify dft
if False:
# Make DFT
dfty = np.abs(np.fft.fftshift(np.fft.fftn(signal)))
dfty[int(dfty.shape[0] / 2)] = 0 # zero out DC
dfty = np.append(dfty, dfty[0])
# Graph the DFT
plt.title(sourceImg + " DFT")
plt.xlabel("Hertz")
plt.ylabel("Magnitude")
x = np.arange(dfty.shape[0])
x = x - int((dfty.shape[0]-1) / 2)
plt.plot(x, dfty)
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".dft2.png", bbox_inches='tight')
plt.close(fig)
# tile the signal to be a 256x256 image
generatedSize_ = min(generatedSize, 256)
if True: #generatedSize <= 256:
row = Image.new('L', (1, 256), (255))
signalImage = Image.fromarray(np.uint8(signal*255))
for i in range(int(256/generatedSize_)):
row.paste(signalImage, (0, i * generatedSize_))
out = Image.new('L', (256, 256), (255))
for i in range(256):
out.paste(row, (i, 0))
out.save("out/" + sourceImg + ".png")
# Make the composite diagram
im1 = Image.open("out/" + sourceImg + ".png")
im2 = Image.open("out/" + sourceImg + ".valuesraw.png")
im3 = Image.open("out/" + sourceImg + ".values.png")
im4 = Image.open("out/" + sourceImg + ".histogram.png")
im5 = Image.open("out/" + sourceImg + ".dft.png")
height = max(im1.size[1], max(im2.size[1], max(im3.size[1], max(im4.size[1], im5.size[1])))) + 6
width = im1.size[0] + im2.size[0] + im3.size[0] + im4.size[0] + im5.size[0] + 18
imout = Image.new('RGB',(width, height), (255, 255, 255))
x = 3
imout.paste(im1, (x, int((height - im1.size[1]) / 2)))
x = x + im1.size[0] + 3
imout.paste(im2, (x, int((height - im2.size[1]) / 2)))
x = x + im2.size[0] + 3
imout.paste(im3, (x, int((height - im3.size[1]) / 2)))
x = x + im3.size[0] + 3
imout.paste(im4, (x, int((height - im4.size[1]) / 2)))
x = x + im4.size[0] + 3
imout.paste(im5, (x, int((height - im5.size[1]) / 2)))
imout.save("out/_" + sourceImg + ".png")
# Process blue noise made with void and cluster
for sourceImg in sourceImgs:
# load the data
fileName = "source/" + sourceImg + ".png"
print(fileName)
y = (imageio.imread(fileName).astype(float) / 255.0)[0,:]
# Graph the values
x = np.arange(y.shape[0])
plt.plot(x, y)
ax = plt.gca()
ax.set_xlim([0.0, y.shape[0]-1])
ax.set_ylim([0.0, 1.0])
plt.title(sourceImg + " Values")
plt.xlabel("Index")
plt.ylabel("Value")
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".values.png", bbox_inches='tight')
plt.close(fig)
# Make Histogram
plt.title(sourceImg + " Histogram")
plt.xlabel("Value")
plt.ylabel("Count")
plt.hist(y, 256, facecolor='blue', alpha=0.5)
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".histogram.png", bbox_inches='tight')
plt.close(fig)
# Make DFT
dfty = np.abs(np.fft.fftshift(np.fft.fftn(y)))
dfty[int(dfty.shape[0] / 2)] = 0 # zero out DC
dfty = np.append(dfty, dfty[0])
# Graph the DFT
plt.title(sourceImg + " DFT")
plt.xlabel("Hertz")
plt.ylabel("Magnitude")
x = np.arange(dfty.shape[0])
x = x - int((dfty.shape[0]-1) / 2)
plt.plot(x, dfty)
fig = plt.gcf()
fig.savefig("out/" + sourceImg + ".dft.png", bbox_inches='tight')
plt.close(fig)
# tile the signal to be a 256x256 image
generatedSize = min(y.shape[0], 256)
if True: #generatedSize <= 256:
row = Image.new('L', (1, 256), (255))
signalImage = Image.fromarray(np.uint8(y*255))
for i in range(int(256/generatedSize)):
row.paste(signalImage, (0, i * generatedSize))
out = Image.new('L', (256, 256), (255))
for i in range(256):
out.paste(row, (i, 0))
out.save("out/" + sourceImg + ".png")
# Make the composite diagram
im1 = Image.open("out/" + sourceImg + ".png")
im2 = Image.open("out/" + sourceImg + ".values.png")
im3 = Image.open("out/" + sourceImg + ".histogram.png")
im4 = Image.open("out/" + sourceImg + ".dft.png")
height = max(im1.size[1], max(im2.size[1], max(im3.size[1], im4.size[1]))) + 6
width = im1.size[0] + im2.size[0] + im3.size[0] + im4.size[0] + 15
imout = Image.new('RGB',(width, height), (255, 255, 255))
x = 3
imout.paste(im1, (x, int((height - im1.size[1]) / 2)))
x = x + im1.size[0] + 3
imout.paste(im2, (x, int((height - im2.size[1]) / 2)))
x = x + im2.size[0] + 3
imout.paste(im3, (x, int((height - im3.size[1]) / 2)))
x = x + im3.size[0] + 3
imout.paste(im4, (x, int((height - im4.size[1]) / 2)))
imout.save("out/_" + sourceImg + ".png")
focus10240 = ["bn10240", "idft10240_HPF"]
for sourceImg in focus10240:
im1 = Image.open("out/" + sourceImg + ".histogram.png")
im2 = Image.open("out/" + sourceImg + ".dft.png")
height = max(im1.size[1], im2.size[1])
width = im1.size[0] + im2.size[0] + 9
imout = Image.new('RGB',(width, height), (255, 255, 255))
x = 3
imout.paste(im1, (x, int((height - im1.size[1]) / 2)))
x = x + im1.size[0] + 3
imout.paste(im2, (x, int((height - im2.size[1]) / 2)))
imout.save("out/_focus10240_" + sourceImg + ".png")
| 34.099688
| 104
| 0.53161
|
24d26879de388626eef38990af6b10e5717c0ade
| 2,185
|
py
|
Python
|
tensor2tensor/tpu/tpu_trainer_lib_test.py
|
webdeveloper0012/Tensor2tensor
|
48bce065278eba461c8a2840e4132becbc822c7c
|
[
"Apache-2.0"
] | 9
|
2019-11-21T02:48:21.000Z
|
2021-08-18T07:30:34.000Z
|
tensor2tensor/tpu/tpu_trainer_lib_test.py
|
webdeveloper0012/Tensor2tensor
|
48bce065278eba461c8a2840e4132becbc822c7c
|
[
"Apache-2.0"
] | 3
|
2020-01-14T16:37:49.000Z
|
2020-04-14T06:24:56.000Z
|
tensor2tensor/tpu/tpu_trainer_lib_test.py
|
webdeveloper0012/Tensor2tensor
|
48bce065278eba461c8a2840e4132becbc822c7c
|
[
"Apache-2.0"
] | 1
|
2020-06-19T09:34:01.000Z
|
2020-06-19T09:34:01.000Z
|
# coding=utf-8
# Copyright 2017 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tpu_trainer_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.tpu import tpu_trainer_lib as lib
from tensor2tensor.utils import trainer_utils
from tensor2tensor.utils import trainer_utils_test
import tensorflow as tf
class TpuTrainerTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
trainer_utils_test.TrainerUtilsTest.setUpClass()
def testSmoke(self):
data_dir = trainer_utils_test.TrainerUtilsTest.data_dir
problem_name = "tiny_algo"
model_name = "transformer"
hparams_set = "transformer_tpu"
hparams = trainer_utils.create_hparams(hparams_set, data_dir)
trainer_utils.add_problem_hparams(hparams, problem_name)
model_fn = lib.get_model_fn(model_name, hparams, use_tpu=False)
input_fn = lib.get_input_fn(tf.estimator.ModeKeys.TRAIN, hparams)
params = {"batch_size": 16}
config = tf.contrib.tpu.RunConfig(
tpu_config=tf.contrib.tpu.TPUConfig(num_shards=2))
features, targets = input_fn(params)
with tf.variable_scope("training"):
spec = model_fn(features, targets, tf.estimator.ModeKeys.TRAIN, params,
config)
self.assertTrue(spec.loss is not None)
self.assertTrue(spec.train_op is not None)
with tf.variable_scope("eval"):
spec = model_fn(features, targets, tf.estimator.ModeKeys.EVAL, params,
config)
self.assertTrue(spec.eval_metric_ops is not None)
if __name__ == "__main__":
tf.test.main()
| 32.132353
| 77
| 0.745538
|
5148c7a1dea90be18ab9dc7308493a09f82362d9
| 16,001
|
py
|
Python
|
tests/bbo/unit/test_heuristics.py
|
ValentinGaut/shaman
|
754e9eef3c097f3e58b0f06e7c08716bc1b11edd
|
[
"Apache-2.0"
] | null | null | null |
tests/bbo/unit/test_heuristics.py
|
ValentinGaut/shaman
|
754e9eef3c097f3e58b0f06e7c08716bc1b11edd
|
[
"Apache-2.0"
] | null | null | null |
tests/bbo/unit/test_heuristics.py
|
ValentinGaut/shaman
|
754e9eef3c097f3e58b0f06e7c08716bc1b11edd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 BULL SAS All rights reserved
"""
Tests each heuristic possible heuristic.
WARNING:
If you want to add a new heuristic, please use the following format when writing out
unittests:
- Create a test_my_heuristic.py file for unit testing of the methods included in the heuristic.
- Add a section in this file for testing that the use of the new heuristics work properly
when using it through the optimizer interface.
"""
# Disable the could be a function for unit testing
# pylint: disable=no-self-use
# Disable name too longs (necessary for clarity in testing)
# pylint: disable=invalid-name
import unittest
import numpy as np
import time
from sklearn.neighbors import KNeighborsRegressor
from sklearn.gaussian_process import GaussianProcessRegressor
from bbo.optimizer import BBOptimizer
from bbo.heuristics.heuristics import Heuristic
# Imports for genetic algorithm
from bbo.heuristics.genetic_algorithm.selections import (
tournament_pick,
probabilistic_pick,
)
from bbo.heuristics.genetic_algorithm.mutations import mutate_chromosome_to_neighbor
from bbo.heuristics.genetic_algorithm.crossover import (
single_point_crossover,
double_point_crossover,
)
# Imports for simulated_annealing
from bbo.heuristics.simulated_annealing.restart_functions import random_restart
from bbo.heuristics.simulated_annealing.neighbor_functions import hop_to_next_value
from bbo.heuristics.simulated_annealing.cooldown_functions import (
multiplicative_schedule,
)
# Imports for surrogate models
from bbo.heuristics.surrogate_models.next_parameter_strategies import (
maximum_probability_improvement,
expected_improvement,
l_bfgs_b_minimizer,
)
from bbo.heuristics.surrogate_models.regression_models import (
DecisionTreeSTDRegressor,
CensoredGaussianProcesses,
)
# Use parabola as fake black-box
class Parabola:
"""
Black box class that will be used for testing purpose
"""
def __init__(self):
"""
Initialization of the black-box
"""
print("I'm the Parabola function ! Good luck finding my optimum !")
def compute(self, array_2d):
"""
Computes the value of the parabola at data point array_2d
"""
return array_2d[0] ** 2 + array_2d[1] ** 2
class AsyncParabola:
"""
Black-box class that will be used for testing heuristics that censors some data.
"""
def __init__(self):
"""
Initialization of the black-box
"""
print("I'm the async Parabola function ! Good luck finding my optimum !")
def compute(self, array_2d):
"""
Computes the value of the parabola at data point array_2d
"""
random_time = np.round(
np.abs(
np.random.normal(
size=1,
loc=3,
scale=0.5)))[0]
time.sleep(random_time)
return array_2d[0] ** 2 + array_2d[1] ** 2
class TestHeuristic(unittest.TestCase):
"""
Tests the different abstract methods raise an exception if called without overwritting.
"""
def setUp(self):
"""
Sets up the initialization of the Heuristic class.
"""
self.heuristic = Heuristic()
def test_init(self):
"""
Tests that the attributes are well set.
"""
self.assertFalse(self.heuristic.stop)
def test_choose_next_parameter(self):
"""
Tests that the "choose_next_parameter" method raises a NotImplementedError.
"""
with self.assertRaises(NotImplementedError):
self.heuristic.choose_next_parameter({}, [])
def test_summary(self):
"""
Tests that the "summary" method raises a NotImplementedError.
"""
self.assertRaises(NotImplementedError, self.heuristic.summary)
def test_reset(self):
"""
Tests that the "reset" method raises a NotImplementedError.
"""
self.assertRaises(NotImplementedError, self.heuristic.reset)
class TestGeneticAlgorithms(unittest.TestCase):
"""
Tests the different selection methods for selecting the two fittest parents in the population.
"""
def setUp(self):
"""
Sets up the testing procedure by initializing the parabola as the black-box function.
"""
self.fake_black_box = Parabola()
def test_tournament_pick_single_crossover(self):
"""
Tests that the optimization works properly when using the tournament pick method for
selection of the fittest parents + a single crossover method.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[np.arange(-5, 5, 1), np.arange(-6, 6, 1), np.arange(-6, 6, 1)]
).T,
heuristic="genetic_algorithm",
initial_sample_size=2,
max_iteration=10,
selection_method=tournament_pick,
crossover_method=single_point_crossover,
mutation_method=mutate_chromosome_to_neighbor,
pool_size=5,
mutation_rate=0.1,
elitism=False,
)
bb_obj.optimize()
def test_probabilistic_pick_single_crossover(self):
"""
Tests that the optimization works properly when using the probabilistic pick method for
selection of the fittest parents + a single crossover method.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="genetic_algorithm",
initial_sample_size=2,
max_iteration=10,
selection_method=probabilistic_pick,
crossover_method=single_point_crossover,
mutation_method=mutate_chromosome_to_neighbor,
mutation_rate=0.2,
)
bb_obj.optimize()
def test_tournament_pick_double_crossover(self):
"""
Tests that the optimization works properly when using the tournament pick method for
selection of the fittest parents + double crossover method.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[np.arange(-5, 5, 1), np.arange(-6, 6, 1), np.arange(-6, 6, 1)]
).T,
heuristic="genetic_algorithm",
initial_sample_size=2,
max_iteration=10,
selection_method=tournament_pick,
crossover_method=double_point_crossover,
mutation_method=mutate_chromosome_to_neighbor,
pool_size=5,
mutation_rate=0.1,
elitism=False,
)
bb_obj.optimize()
def test_probabilistic_pick_double_crossover(self):
"""
Tests that the optimization works properly when using the tournament pick method for
selection of the fittest parents + double crossover method.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="genetic_algorithm",
initial_sample_size=2,
max_iteration=10,
selection_method=tournament_pick,
crossover_method=double_point_crossover,
mutation_method=mutate_chromosome_to_neighbor,
pool_size=5,
mutation_rate=0.1,
elitism=False,
)
bb_obj.optimize()
class TestSimulatedAnnealing(unittest.TestCase):
"""
Tests that the optimization process works properly when using simulated annealing as an
heuristic.
"""
def setUp(self):
"""
Sets up the testing procedure by initializing the parabola as the black-box function.
"""
self.fake_black_box = Parabola()
def test_simulated_annealing_no_restart(self):
"""
Tests that the simulated annealing algorithm works properly when used through the
optimizer interface and without using any restart.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="simulated_annealing",
initial_sample_size=2,
max_iteration=10,
initial_temperature=1000,
cooldown_function=multiplicative_schedule,
neighbor_function=hop_to_next_value,
cooling_factor=3,
)
bb_obj.optimize()
def test_simulated_annealing_with_restart(self):
"""
Tests that the simulated annealing algorithm works properly when used through the
optimizer interface when using a restart.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="simulated_annealing",
initial_sample_size=2,
max_iteration=10,
initial_temperature=1000,
cooldown_function=multiplicative_schedule,
neighbor_function=hop_to_next_value,
cooling_factor=3,
restart=random_restart,
bernouilli_parameter=0.2,
)
bb_obj.optimize()
print(bb_obj.history["fitness"])
class TestSurrogateModels(unittest.TestCase):
"""
Tests that the optimization process works properly when using surrogate model as an
heuristic.
"""
def setUp(self):
"""
Sets up the testing procedure by initializing the parabola as the black-box function.
"""
self.fake_black_box = Parabola()
self.fake_async_black_box = AsyncParabola()
def test_surrogate_model_gp_ei(self):
"""
Tests that the optimization process surrogate models integrates properly in the
BBOptimizer when using GP + EI.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="surrogate_model",
regression_model=GaussianProcessRegressor,
next_parameter_strategy=expected_improvement,
initial_sample_size=2,
max_iteration=10,
)
bb_obj.optimize()
def test_surrogate_model_gp_mpi(self):
"""
Tests that the optimization process surrogate models integrates properly in the
BBOptimizer when using GP + MPI.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="surrogate_model",
regression_model=GaussianProcessRegressor,
next_parameter_strategy=maximum_probability_improvement,
initial_sample_size=2,
max_iteration=10,
)
bb_obj.optimize()
# def test_surrogate_model_knn_min(self):
# """
# Tests that the optimization process surrogate models integrates properly in the
# BBOptimizer when using KNearestNeighbor regressor and the regressed function as the merit
# one.
# """
# bb_obj = BBOptimizer(black_box=self.fake_black_box,
# parameter_space=np.array([np.arange(-5, 5, 1), np.arange(-6, 6, 1),
# np.arange(-6, 6, 1), np.arange(-6, 6, 1)]).T,
# heuristic="surrogate_model",
# regression_model=KNeighborsRegressor,
# next_parameter_strategy=l_bfgs_b_minimizer,
# initial_sample_size=6,
# max_iteration=10)
# bb_obj.optimize()
def test_surrogate_model_regression_tree_ei(self):
"""
Tests that the optimization with regression trees and expected improvement behaves as
expected.
"""
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="surrogate_model",
regression_model=DecisionTreeSTDRegressor,
next_parameter_strategy=expected_improvement,
initial_sample_size=5,
max_iteration=10,
max_retry=10,
)
bb_obj.optimize()
print(bb_obj.history)
def test_surrogate_model_censored_bayesian_ei(self):
"""
Tests that the optimization with regression trees and expected improvement behaves as
expected.
"""
bb_obj = BBOptimizer(
black_box=self.fake_async_black_box,
parameter_space=np.array(
[
np.arange(-5, 5, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
np.arange(-6, 6, 1),
]
).T,
heuristic="surrogate_model",
regression_model=CensoredGaussianProcesses,
next_parameter_strategy=expected_improvement,
initial_sample_size=5,
async_optim=True,
max_step_cost=1,
max_iteration=10,
max_retry=10,
)
bb_obj.optimize()
print(bb_obj.history)
class TestExhaustiveSearch(unittest.TestCase):
"""Tests the exhaustive_search heuristic.
"""
def setUp(self):
"""
Sets up the testing procedure by initializing the parabola as the black-box function.
"""
self.fake_black_box = Parabola()
def test_exhaustive_search(self):
"""
Tests that the exhaustive search heuristic tests all the parametrization when the budget is
equal to the size of the parametric grid.
"""
parametric_grid = np.array(
[np.arange(-5, 5, 1), np.arange(-6, 6, 1), ]).T
bb_obj = BBOptimizer(
black_box=self.fake_black_box,
parameter_space=parametric_grid,
heuristic="exhaustive_search",
initial_sample_size=2,
max_iteration=120,
)
bb_obj.optimize()
exhaustive_grid = np.array(np.meshgrid(*parametric_grid)).T.reshape(
-1, len(parametric_grid)
)
np.testing.assert_array_equal(
bb_obj.history["parameters"][2:], exhaustive_grid)
if __name__ == "__main__":
unittest.main()
| 33.474895
| 102
| 0.583776
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.